diff --git "a/4912.jsonl" "b/4912.jsonl"
new file mode 100644--- /dev/null
+++ "b/4912.jsonl"
@@ -0,0 +1,710 @@
+{"seq_id":"147221634","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\ndataset = pd.read_csv(\"D:\\Mushroom Classification data\\mushrooms.csv\")\r\ndataset.describe()\r\n\r\ndataset.info()\r\ndataset1 = pd.get_dummies(dataset)\r\n\r\nx = dataset1.iloc[:,2:]\r\ny = dataset1.iloc[:,1]\r\n\r\n# Training and testing set from KNeighborsC\r\nfrom sklearn.neighbors import KNeighborsClassifier \r\n\r\n\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nkn = KNeighborsClassifier(n_neighbors = 5,metric='minkowski', p =1)\r\n\r\nx_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.1,train_size=0.9,random_state=88,shuffle=True)\r\n\r\n\r\nkn.fit(x_train,y_train)\r\n\r\npredictionKN = kn.predict(x_test)\r\n\r\n\r\n# Training and testing set from Decision tree\r\n\r\n\r\nfrom sklearn.tree import DecisionTreeClassifier\r\n\r\ndt = DecisionTreeClassifier()\r\n\r\ndt.fit(x_train,y_train)\r\n\r\npredictionDT = dt.predict(x_test)\r\n\r\n### Naive Bayes\r\nfrom sklearn.naive_bayes import GaussianNB\r\n\r\nnb = GaussianNB()\r\nnb.fit(x_train,y_train)\r\n\r\npredictionNB = nb.predict(x_test)\r\n\r\n## Logistic regression\r\nfrom sklearn.linear_model import LogisticRegression\r\nlr = LogisticRegression()\r\nlr.fit(x_train,y_train)\r\n\r\npredictionLR = lr.predict(x_test)\r\n\r\n\r\n### Cross validation\r\n\r\nfrom sklearn.model_selection import cross_val_score\r\n\r\n\r\nscoreDT = cross_val_score(dt,x,y,cv = 10)\r\nscoreKN = cross_val_score(kn,x,y,cv=10)\r\nscoreNB = cross_val_score(nb,x,y,cv=10)\r\nscoreLR = cross_val_score(lr,x,y,cv=10)\r\n\r\n### Confusion Metrics and Classification report\r\n\r\nfrom sklearn.metrics import confusion_matrix,classification_report\r\n\r\n#CONFUSSION MATRIX\r\n\r\ncmdt = confusion_matrix(y_test, predictionDT)\r\n\r\ncmkn = confusion_matrix(y_test, predictionKN)\r\n\r\ncmnb = confusion_matrix(y_test, predictionNB)\r\n\r\ncmlr = confusion_matrix(y_test, predictionLR)\r\n\r\n#CLASSIFICATION REPORT\r\n\r\ncrdt = classification_report(y_test, predictionDT)\r\n\r\ncrkn = classification_report(y_test, predictionKN)\r\n\r\ncrnb = classification_report(y_test, predictionNB)\r\n\r\ncrlr = classification_report(y_test, predictionLR)\r\n\r\n# ROC CURVE\r\nfrom sklearn.metrics import roc_curve\r\n\r\ny_prob = lr.predict_proba(x_test)\r\n\r\ny_prob = y_prob[:,1]\r\n\r\nFPR, TPR, Thresholds = roc_curve(y_test, y_prob)\r\n\r\nplt.plot(FPR,TPR)\r\nplt.xlabel('FPR')\r\nplt.ylabel('TPR')\r\n\r\nplt.show()\r\n\r\n# ROC AUC Score\r\nfrom sklearn.metrics import roc_auc_score\r\n\r\nroc_auc_score(y_test,y_prob)\r\n\r\n\r\n\r\n","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"455998259","text":"'''\n그래프 component의 수를 세는 문제.\n모든 배열을 돌면서 몇번의 dfs가 실행되었는지 수를 세면 된다.\n'''\n\nimport sys\ninput = sys.stdin.readline\nT=int(input())\n\n\ndx=[0,0,-1,1]\ndy=[-1,1,0,0]\n\ndef in_range(i,j):\n if i==N or j==M or i<0 or j < 0:\n return False\n return True\n\ndef dfs(i,j):\n need_visit=[[i,j]]\n while need_visit:\n x,y = need_visit.pop()\n\n Map[x][y] = 0\n\n for i in range(4):\n new_x = x+dx[i]\n new_y = y+dy[i]\n\n if not in_range(new_x,new_y):\n continue\n \n if Map[new_x][new_y] == 0:\n continue\n \n need_visit.append([new_x,new_y])\n \n\nfor _ in range(T):\n M,N,K = map(int,input().split())\n Map = [[0 for i in range(M+1)] for j in range(N+1)]\n \n #배추밭 \n for _ in range(K):\n y,x = map(int,input().split())\n Map[x][y] = 1\n \n cnt=0\n for i in range(N):#세로\n for j in range(M):#가로\n if Map[i][j] == 1:\n dfs(i,j)\n cnt+=1\n\n print(cnt)","sub_path":"백준/Python/카테고리/DFS/1012(유기농 배추).py","file_name":"1012(유기농 배추).py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"91670960","text":"from http.server import HTTPServer, BaseHTTPRequestHandler\nfrom services.telegram_api_utils import TelegramApiUtils\nfrom datetime import datetime, timedelta, timezone\nimport json\nimport re\n\n\nclass TrelloActivityHandler(BaseHTTPRequestHandler):\n\n\n MESSAGE_PATTERN_FOR_UPDATED_OR_TRANSFERED_TICKETS = \"'{issue}' issue was {action}. \\\n \\n\\t\\t\" + u'\\U0000270F' + \" Previous {eventValue}: {before} \\\n \\n\\t\\t\" + u'\\U0001F4CC' + \" New one: {after} \\\n \\n---\\\n \\nChange was made by \\\"{initiatorFullName}\\\"\\n\" + u'\\U000027A1' + \"\\\n click here to see more\"\n\n MESSAGE_PATTERN_FOR_COMMENTED_ON_TICKETS = \"'{issue}' issue was commented with: \\\n \\n\\t\\t\" + u'\\U0001F4AC' + \" {comment} \\\n \\n---\\\n \\nBy \\\"{initiatorFullName}\\\"\\n\" + u'\\U000027A1' + \"\\\n click here to see more\"\n\n MESSAGE_PATTERN_FOR_DONE_CHECK_IN_CHECK_LIST = \"Element of check-list at issue '{issue}' was marked as DONE:\\\n \\n\\t\\t\" + u'\\U00002705' + \" {checkListElement} \\\n \\n---\\\n \\nBy \\\"{initiatorFullName}\\\"\\n\" + u'\\U000027A1' + \"\\\n click here to see more\"\n\n MESSAGE_PATTERN_FOR_UNDONE_CHECK_IN_CHECK_LIST = \"Element of check-list at issue '{issue}' was marked as UNDONE:\\\n \\n\\t\\t\" + u'\\U0000274C' + \" {checkListElement}\\\n \\n---\\\n \\nBy \\\"{initiatorFullName}\\\"\\n\" + u'\\U000027A1' + \"\\\n click here to see more\"\n\n MESSAGE_PATTERN_FROM_ADDED_ATTACHMENTS = \"'{issue}' issue has been updated by adding an attachment\\\n \\n---\\\n \\nBy \\\"{initiatorFullName}\\\"\\n\" + u'\\U000027A1' + \"\\\n click here to see more\"\n\n MESSAGE_PATTERN_FOR_CUD_CHECK_LIST_ELEMENTS = \"The element of checklist at issue '{issue}' was {action}:\\\n \\n\\t\\t{emojiAction} {checkListElement} \\\n \\n---\\\n \\nBy \\\"{initiatorFullName}\\\"\\n\" + u'\\U000027A1' + \"\\\n click here to see more\"\n\n MESSAGE_PATTERN_FOR_RENAMED_ELEMENTS_OF_CHECK_LISTS = \"The element of checklist at issue '{issue}' was renamed. \\\n \\n\\t\\t\" + u'\\U0000270F' + \" Previous {eventValue}: {before} \\\n \\n\\t\\t\" + u'\\U0001F4CC' + \" New one: {after} \\\n \\n---\\\n \\nChange was made by \\\"{initiatorFullName}\\\"\\n\" + u'\\U000027A1' + \"\\\n click here to see more\"\n\n MESSAGE_PATTERN_FOR_ASSIGNED_OR_UNASSIGNED_ON_TICKETS = \"You were {action} issue '{issue}'.\\\n \\n---\\\n \\nBy \\\"{initiatorFullName}\\\"\\n\" + u'\\U000027A1' + \"\\\n click here to see more\"\n\n MESSAGE_PATTERN_TICKET_WAS_CREATED_OR_DELETED = u'\\U0001F5D2' + \" Issue '{issue}' was {action}.\\\n \\n---\\\n \\nBy \\\"{initiatorFullName}\\\"\\n\" + u'\\U000027A1' + \"\\\n click here to see more\"\n\n def do_HEAD(self):\n print(\"[INFO] Received HEAD request on {path} from {ip}\".format(path = self.path, ip = self.address_string()))\n\n if (self.address_string() not in self.server.trello_api_utils.trello_api_webhook_declared_official_ips\n or self.path[1:] != self.server.trello_secured_endpoint):\n self.send_response(401)\n self.send_header('Content-type','text/html')\n self.end_headers()\n return \n\n self.send_response(200)\n self.send_header('Content-type','text/html')\n self.end_headers()\n return \n\n def do_GET(self):\n print(\"[INFO] Received GET request on {path} from {ip}\".format(path = self.path, ip = self.address_string()))\n\n if (self.address_string() not in self.server.trello_api_utils.trello_api_webhook_declared_official_ips\n or self.path[1:] != self.server.trello_secured_endpoint):\n self.send_response(401)\n self.send_header('Content-type','text/html')\n self.end_headers()\n return \n\n self.send_response(200)\n self.send_header('Content-type','text/html')\n self.end_headers()\n return\n\n def do_POST(self):\n print(\"[INFO] Received POST request on {path} from {ip}\".format(path = self.path, ip = self.address_string()))\n\n if (self.address_string() not in self.server.trello_api_utils.trello_api_webhook_declared_official_ips\n or self.path[1:] != self.server.trello_secured_endpoint):\n self.send_response(401)\n self.send_header('Content-type','text/html')\n self.end_headers()\n return \n\n trello_dashboard_update_info = json.loads(self.rfile.read(int(self.headers['Content-Length'])))\n print(\"[DEBUG] Received the next JSON payload: \\n{}\".format(json.dumps(trello_dashboard_update_info, indent = 2)))\n \n received_action_type = trello_dashboard_update_info['action']['type']\n received_action_subtype = trello_dashboard_update_info['action']['display']['translationKey']\n print(\"[INFO] Received action from trello: {actionType} _ {actionSubtype}\".format(\n actionType = received_action_type, actionSubtype = received_action_subtype\n ))\n\n action_initiator_fullname = trello_dashboard_update_info['action']['memberCreator']['fullName']\n action_initiator_username = trello_dashboard_update_info['action']['memberCreator']['username']\n\n telegramApiUtils = TelegramApiUtils(self.server.telegram_token)\n result_message = ''\n \n card_id = ''\n try: card_id = trello_dashboard_update_info['action']['data']['card']['id']\n except: print(\"[ERROR] An exception occurred: can't get card identifier from the received update\")\n if received_action_type == 'updateCard':\n action_card_shortlink = trello_dashboard_update_info['action']['data']['card']['shortLink']\n if received_action_subtype == 'action_move_card_from_list_to_list':\n\n result_object = {'card_title': '', 'from': '', 'to': ''}\n result_object['card_title'] = trello_dashboard_update_info['action']['display']['entities']['card']['text']\n result_object['from'] = trello_dashboard_update_info['action']['display']['entities']['listBefore']['text']\n result_object['to'] = trello_dashboard_update_info['action']['display']['entities']['listAfter']['text']\n\n # result_markers = ''\n # for v in trello_dashboard_update_info['model']['labelNames'].items():\n # if v != '': result_markers = result_markers + ',' + str(v)\n # result_markers = result_markers[1:-1]\n\n if telegramApiUtils.get_me():\n print('[INFO] Connection with telegram bot has been installed successfully')\n else:\n print('[ERROR] Connection with telegram bot has not been installed')\n exit()\n\n result_message = TrelloActivityHandler.MESSAGE_PATTERN_FOR_UPDATED_OR_TRANSFERED_TICKETS.format(\n issue = result_object['card_title'],\n action = \"moved\",\n eventValue = \"column\",\n before = result_object['from'],\n after = result_object['to'],\n initiatorFullName = action_initiator_fullname,\n issueShortLink = action_card_shortlink\n )\n if received_action_subtype == 'action_changed_description_of_card':\n result_message = TrelloActivityHandler.MESSAGE_PATTERN_FOR_UPDATED_OR_TRANSFERED_TICKETS.format(\n issue = trello_dashboard_update_info['action']['data']['card']['name'],\n action = \"updated (description)\",\n eventValue = \"description\",\n before = trello_dashboard_update_info['action']['data']['old']['desc'],\n after = trello_dashboard_update_info['action']['data']['card']['desc'],\n initiatorFullName = action_initiator_fullname,\n issueShortLink = action_card_shortlink\n )\n if received_action_subtype == 'action_renamed_card':\n result_message = TrelloActivityHandler.MESSAGE_PATTERN_FOR_UPDATED_OR_TRANSFERED_TICKETS.format(\n issue = trello_dashboard_update_info['action']['data']['card']['name'],\n action = \"renamed\",\n eventValue = \"title\",\n before = trello_dashboard_update_info['action']['data']['old']['name'],\n after = trello_dashboard_update_info['action']['data']['card']['name'],\n initiatorFullName = action_initiator_fullname,\n issueShortLink = action_card_shortlink\n )\n if received_action_subtype == 'action_added_a_due_date' or received_action_subtype == 'action_changed_a_due_date':\n # ticket shoul be moved to the corresponding column\n self.server.trello_api_utils.transfer_ticket_to_corresponding_column_by_its_due_date(card_id)\n\n ### handle complete/incomplete stats of check-list elements\n if received_action_type == 'updateCheckItemStateOnCard':\n # print(trello_dashboard_update_info['action']['data']['checklist']['name'])\n action_card_shortlink = trello_dashboard_update_info['action']['data']['card']['shortLink']\n if received_action_subtype == 'action_completed_checkitem':\n result_message = TrelloActivityHandler.MESSAGE_PATTERN_FOR_DONE_CHECK_IN_CHECK_LIST.format(\n issue = trello_dashboard_update_info['action']['data']['card']['name'],\n checkListElement = trello_dashboard_update_info['action']['data']['checkItem']['name'],\n initiatorFullName = action_initiator_fullname,\n issueShortLink = action_card_shortlink\n )\n elif received_action_subtype == 'action_marked_checkitem_incomplete':\n result_message = TrelloActivityHandler.MESSAGE_PATTERN_FOR_UNDONE_CHECK_IN_CHECK_LIST.format(\n issue = trello_dashboard_update_info['action']['data']['card']['name'],\n checkListElement = trello_dashboard_update_info['action']['data']['checkItem']['name'],\n initiatorFullName = action_initiator_fullname,\n issueShortLink = action_card_shortlink\n )\n \n ### actions of check-list elements\n if received_action_type == 'createCheckItem':\n action_card_shortlink = trello_dashboard_update_info['action']['data']['card']['shortLink']\n card_title = trello_dashboard_update_info['action']['data']['card']['name']\n result_message = TrelloActivityHandler.MESSAGE_PATTERN_FOR_CUD_CHECK_LIST_ELEMENTS.format(\n issue = card_title,\n action = \"created\",\n emojiAction = u'\\U00002795',\n checkListElement = trello_dashboard_update_info['action']['data']['checkItem']['name'],\n initiatorFullName = action_initiator_fullname,\n issueShortLink = action_card_shortlink\n )\n if received_action_type == 'deleteCheckItem':\n action_card_shortlink = trello_dashboard_update_info['action']['data']['card']['shortLink']\n card_title = trello_dashboard_update_info['action']['data']['card']['name']\n result_message = TrelloActivityHandler.MESSAGE_PATTERN_FOR_CUD_CHECK_LIST_ELEMENTS.format(\n issue = card_title,\n action = \"removed\",\n emojiAction = u'\\U00002796',\n checkListElement = trello_dashboard_update_info['action']['data']['checkItem']['name'],\n initiatorFullName = action_initiator_fullname,\n issueShortLink = action_card_shortlink\n )\n if received_action_type == 'updateCheckItem' and received_action_subtype == 'action_renamed_checkitem':\n action_card_shortlink = trello_dashboard_update_info['action']['data']['card']['shortLink']\n card_title = trello_dashboard_update_info['action']['data']['card']['name']\n result_message = TrelloActivityHandler.MESSAGE_PATTERN_FOR_RENAMED_ELEMENTS_OF_CHECK_LISTS.format(\n issue = card_title,\n checkListElement = trello_dashboard_update_info['action']['data']['checkItem']['name'],\n eventValue = 'content',\n before = trello_dashboard_update_info['action']['data']['old']['name'],\n after = trello_dashboard_update_info['action']['data']['checkItem']['name'],\n initiatorFullName = action_initiator_fullname,\n issueShortLink = action_card_shortlink\n )\n\n ### comments on the cards\n if received_action_type == 'commentCard' and received_action_subtype == 'action_comment_on_card':\n action_card_shortlink = trello_dashboard_update_info['action']['data']['card']['shortLink']\n card_title = trello_dashboard_update_info['action']['data']['card']['name']\n result_message = TrelloActivityHandler.MESSAGE_PATTERN_FOR_COMMENTED_ON_TICKETS.format(\n issue = card_title,\n comment = trello_dashboard_update_info['action']['data']['text'],\n initiatorFullName = action_initiator_fullname,\n issueShortLink = action_card_shortlink\n )\n\n ### card attachments\n if received_action_type == 'addAttachmentToCard' and received_action_subtype == 'action_add_attachment_to_card':\n action_card_shortlink = trello_dashboard_update_info['action']['data']['card']['shortLink']\n card_title = trello_dashboard_update_info['action']['data']['card']['name']\n result_message = TrelloActivityHandler.MESSAGE_PATTERN_FROM_ADDED_ATTACHMENTS.format(\n issue = card_title,\n attachmentPreviewLink = trello_dashboard_update_info['action']['data']['attachment']['previewUrl'],\n initiatorFullName = action_initiator_fullname,\n issueShortLink = action_card_shortlink\n )\n\n ### actions with un/assigned members on the cards\n if received_action_type == 'addMemberToCard' or received_action_type == 'removeMemberFromCard':\n action_card_shortlink = trello_dashboard_update_info['action']['data']['card']['shortLink']\n card_title = trello_dashboard_update_info['action']['data']['card']['name']\n if received_action_subtype == 'action_added_member_to_card':\n result_message = TrelloActivityHandler.MESSAGE_PATTERN_FOR_ASSIGNED_OR_UNASSIGNED_ON_TICKETS.format(\n action = u'\\U0000270C' + ' assigned to the',\n issue = card_title,\n initiatorFullName = action_initiator_fullname,\n issueShortLink = action_card_shortlink\n )\n if received_action_subtype == 'action_removed_member_from_card':\n result_message = TrelloActivityHandler.MESSAGE_PATTERN_FOR_ASSIGNED_OR_UNASSIGNED_ON_TICKETS.format(\n action = u'\\U0001F91E' + ' unassigned from the',\n issue = card_title,\n initiatorFullName = action_initiator_fullname,\n issueShortLink = action_card_shortlink\n )\n member_id = trello_dashboard_update_info['action']['data']['idMember']\n member_username = self.server.trello_api_utils.getMemberById(member_id)['username']\n member_telegram_trello_assignment = self.server.mongodb_utils.findUserTelegramTrelloAssignmentByTrelloUsername(\n member_username\n )\n if member_telegram_trello_assignment != None:\n telegramApiUtils.send_message(member_telegram_trello_assignment['telegram_chat_id'], result_message)\n else:\n print(\"[DEBUG] Assignment between trello username and telegram chat id has not been found. Expected username is = {}\".format(\n member_username\n ))\n # should be empty because we have already sent necessary message and we need to answer to webhook with 200 OK\n result_message = ''\n \n ### action for create or archive tickets\n if received_action_type == 'createCard' or (received_action_type == 'updateCard' and received_action_subtype == 'action_archived_card'):\n\n action_card_shortlink = trello_dashboard_update_info['action']['data']['card']['shortLink']\n card_title = trello_dashboard_update_info['action']['data']['card']['name']\n card_action = ''\n if received_action_subtype == 'action_archived_card':\n card_action = 'archived'\n if received_action_subtype == 'action_create_card':\n card_action = 'created'\n result_message = TrelloActivityHandler.MESSAGE_PATTERN_TICKET_WAS_CREATED_OR_DELETED.format(\n issue = card_title,\n action = u'\\U0001F9E0' + ' {}'.format(card_action),\n initiatorFullName = action_initiator_fullname,\n issueShortLink = action_card_shortlink\n )\n\n # send result message after result_message created\n if result_message:\n # get card members\n card_members_usernames = []\n for member_id in self.server.trello_api_utils.getCardById(card_id)['idMembers']:\n card_members_usernames.append(self.server.trello_api_utils.getMemberById(member_id)['username'])\n # notify users which subscribed for ALL changes\n subscribers_for_all = self.server.mongodb_utils.findTrellloSubscribersByTheirSubscription(['ALL'])\n for subscriber_for_all in subscribers_for_all:\n if action_initiator_username != subscriber_for_all['trello_username']:\n telegramApiUtils.send_message(subscriber_for_all['telegram_chat_id'], result_message)\n # notify specific subscribers\n for card_member_username in card_members_usernames:\n subscribers_for_specific_changes = self.server.mongodb_utils.findTrellloSubscribersByTheirSubscription([card_member_username])\n for subscriber_for_specific_subscription in subscribers_for_specific_changes:\n if action_initiator_username != subscriber_for_specific_subscription['trello_username']:\n telegramApiUtils.send_message(subscriber_for_specific_subscription['telegram_chat_id'], result_message)\n\n self.send_response(200)\n self.send_header('Content-type','text/html')\n self.end_headers()\n return\n\n def getUsernamesOfTicketAssigners(self, card_id):\n for memberId in self.server.trello_api_utils.getCardById(card_id)['idMembers']:\n print(self.server.trello_api_utils.getMemberById(memberId)['username'])","sub_path":"trello_bot/handler_trello_activity.py","file_name":"handler_trello_activity.py","file_ext":"py","file_size_in_byte":19614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"8596902","text":"# Transmission Control Protocol\n\nimport socket\nimport traceback\n\n\ndef connect(host, port):\n s = socket.socket()\n s.connect((host, port))\n\n return s\n\n\ndef listen(host, port, queue_len):\n s = socket.socket()\n s.bind((host, port))\n s.listen(queue_len)\n\n return s\n\n\ndef recv_ex(s, n):\n msg = b''\n\n while True:\n ml = len(msg)\n rem = n - ml\n\n if rem < 0:\n raise Exception('impossible to reach', rem)\n\n if rem == 0:\n return msg\n\n # print('REM', rem, '\\told', msg)\n\n msg += s.recv(rem)\n\n # print('\\tnew', msg)\n\n if len(msg) == ml:\n raise EOFError\n\n\ndef accept_all(s, fn):\n try:\n while True:\n cs, _ = s.accept()\n fn(cs)\n except KeyboardInterrupt:\n s.close()\n except Exception as e:\n traceback.print_exception(e)\n","sub_path":"src/tcp.py","file_name":"tcp.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"579476700","text":"import bpy\nfrom .addon import prefs\n\n\nclass SUV_OT_uv_rotate(bpy.types.Operator):\n bl_idname = \"suv.uv_rotate\"\n bl_label = \"Rotate UV Islands\"\n\n delta = bpy.props.IntProperty(options={'SKIP_SAVE'}, default=1)\n\n @classmethod\n def poll(self, context):\n obj = context.active_object\n return obj and obj.type == 'MESH' and \\\n context.scene.tool_settings.uv_select_mode not in {'EDGE'}\n\n def execute(self, context):\n if self.delta < 0:\n valor = -prefs().uv_rotate_step\n else:\n valor = prefs().uv_rotate_step\n bpy.ops.transform.rotate(\n value=valor,\n axis=(-0, -0, -1),\n constraint_axis=(False, False, False),\n constraint_orientation='GLOBAL',\n mirror=False,\n proportional='DISABLED',\n proportional_edit_falloff='SMOOTH',\n proportional_size=1)\n return {'FINISHED'}\n","sub_path":"scripts/addons/smart_uv/uv_rotate.py","file_name":"uv_rotate.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"403812811","text":"from elasticsearch.exceptions import NotFoundError\nfrom faceduck.models.user import User\nfrom faceduck.utils import FaceduckError\nfrom .signup import email_already_exists\n\n\ndef get_user(user_id):\n try:\n user = User.get(id=user_id)\n except NotFoundError:\n raise FaceduckError(\"001\")\n \n return user\n\n\ndef get_all_users():\n return User.search().query(\"match_all\").scan()\n\ndef get_login_logs(user_id):\n user = get_user(user_id)\n return user.get_login_logs()\n\ndef edit_user(user_id, newData):\n user = get_user(user_id)\n for key in newData.keys():\n if key == \"username\":\n user.username = newData[key]\n elif key == \"email\":\n if email_already_exists(newData[key]):\n raise FaceduckError(\"003\") \n user.email = newData[key]\n elif key == \"name\":\n user.name = newData[key]\n elif key == \"surname\":\n user.surname = newData[key]\n elif key == \"birthday\":\n user.birthday = newData[key]\n elif key == \"gender\":\n user.gender = newData[key]\n elif key == \"image-url\":\n user.image_url = newData[key]\n user.save()\n return user\n\ndef get_groups(user_id):\n user = get_user(user_id)\n return user.getGroups()","sub_path":"backend/app/faceduck/core/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"206935378","text":"\n# We run several classifiers on the ESC-50 database\n\n# The aim is to evaluate run-time and efficiency in\n# the prediction.\n\n\n# Usual imports\nimport scipy as sp\nfrom IPython import embed\nfrom os import path\nimport time\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport sys\nimport os\n\n# To import the datasets\nfrom utils import ESC50\n\n# To write down results from classification\nfrom sklearn.metrics import classification_report, confusion_matrix \n\n# First, we import the dataset.\n# We have to decide the sizes of the train/test sets:\nSIZE_TRAIN = 100\nSIZE_TEST = 25\n\nshared_params = {'csv_path': 'esc50.csv',\n 'wav_dir': 'audio',\n 'dest_dir': 'audio/16000',\n 'audio_rate': 16000,\n # We use ESC-50 (so we classify 50 classes, not 10)!\n 'only_ESC10': False,\n 'pad': 0,\n 'normalize': True}\n\n# We can retrieve a batch of data\ntrain_data = ESC50(folds=[1],\n\t\t\t\t # We randomize the choice of data\n randomize=True,\n # This I am not sure what id does\n strongAugment=False,\n # This I am also not sure, but it appears harmless so far\n random_crop=False,\n # This reduces the input to two (out of 5) seconds\n inputLength=2,\n # This mixes two samples every time.\n # Not sure why this should be convenient\n mix=False,\n **shared_params).batch_gen(SIZE_TRAIN)\n\n# We can retrieve a batch of data\ntest_data = ESC50(folds=[1],\n randomize=True,\n strongAugment=False,\n random_crop=False,\n # The original kaggle script puts 4 secs\n # But we cannot contron data of different length.\n inputLength=2,\n mix=False,\n **shared_params).batch_gen(SIZE_TEST)\n\n# This is our data\nX, Y = next(train_data)\nX_test, Y_test = next(test_data)\n\nimport torch\n\n# We import a linear net:\nfrom my_nn import my_nn\n# And a convolution net:\nfrom my_nn import my_cnn\n\n# We have to put the input into the shape (dataset_size, 1, data_point_size)\n# where the 1 represents the number of input channels\n\nX_train = np.swapaxes(X, 1, 2)\nX_test = np.swapaxes(X_test, 1, 2)\n\n# We try with a convolution net.\n\ncnet = my_cnn(X_train, Y)\n\nembed()\n\ncnet.fit()\n\ncnn_prediction = cnet.predict(X_test)\n\nprint(classification_report(cnn_prediction, Y_test))\n\nembed()\n\n# We define the network:\n#net = my_nn(X_train, Y)\n#net.fit()\n#nn_prediction = net.predict(X_test)\n\n#print(classification_report(nn_prediction, Y_test))\n\n# With 6 layers, 500 data points it predicted 60 data points correctly to 27%.\n# But it classified the training set to 100%. So we overfitted the data.\n# Single layer hat approximately 200 vs 200 shape.\n\n","sub_path":"Audio/classification_nn.py","file_name":"classification_nn.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"107991432","text":"from paste import httpserver\nimport argparse\n\nfrom .app import app\nfrom .version import __version__\n\nfrom youtube_dl.utils import YoutubeDLHandler, compat_urllib_request\n\n\"\"\"\n A server for providing the app anywhere, no need for GAE\n\"\"\"\n\n\ndef setup_url_handlers():\n opener = compat_urllib_request.build_opener( YoutubeDLHandler())\n opener.addheaders =[]\n compat_urllib_request.install_opener(opener)\n\ndef setup():\n setup_url_handlers()\n\ndef main():\n desc = \"\"\"\n The youtube-dl API server.\n \"\"\"\n\n parser = argparse.ArgumentParser(description=desc)\n\n default_port = 9191\n port_help = 'The port the server will use. The default is: {}'\n port_help = port_help.format(default_port)\n parser.add_argument('-p', '--port',\n default=default_port,\n type=int,\n help=port_help\n )\n\n parser.add_argument('--version', action='store_true',\n help='Print the version of the server')\n\n args = parser.parse_args()\n if args.version:\n print(__version__)\n exit(0)\n \n setup()\n httpserver.serve(app, host='localhost', port=args.port)\n\nif __name__ == '__main__':\n main()\n","sub_path":"youtube_dl_server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"205074265","text":"from re import sub\n\n\ndef count_words(text):\n \"\"\"Return a mapping that has words as the keys and the number of times each\n word was seen as the values.\n \"\"\"\n words = [sub(r\"[^A-z|']\", \"\", word).casefold() for word in text.split(\" \")]\n count = {}\n for word in words:\n count[f\"{word}\"] = count[f\"{word}\"] + 1 if count.get(f\"{word}\") else 1\n return count\n","sub_path":"count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"488443078","text":"#coding: utf-8\n\nimport argparse\nfrom bcompiler.bcompiler import asm_to_bocde\n\ndef options(cmdargs,groupMode):\n\n argParser = argparse.ArgumentParser()\n argParser.add_argument(\"--debug\", dest=\"debug\", action='store_true', help=\"默认关闭debug\")\n\n args = argParser.parse_args(cmdargs)\n return args\n\nif __name__ == \"__main__\":\n\n asm_to_bocde()","sub_path":"ibsm/asm2bcode.py","file_name":"asm2bcode.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"42521414","text":"\"\"\"\n@Project : text-classification-cnn-rnn\n@Module : map_concurrent_futures.py\n@Author : Deco [deco@cubee.com]\n@Created : 7/10/18 3:14 PM\n@Desc : \n\"\"\"\nimport spacy\nfrom concurrent.futures import ProcessPoolExecutor\n\n\ndef pipeline_tagger_parser_ner(cls, st):\n nlp = cls()\n # 2. initialise it\n pipeline = ['tagger', 'parser', 'ner']\n for name in pipeline:\n component = nlp.create_pipe(name)\n # 3. create the pipeline components\n nlp.add_pipe(component)\n # 4. add the component to the pipeline\n model_data_path = ('/home/deco/miniconda2/envs/tf17/lib/python3.6/'\n 'site-packages/en_core_web_md/en_core_web_md-2.0.0')\n nlp.from_disk(model_data_path)\n # 5. load in the binary data\n\n doc = nlp.make_doc(st)\n print('tokens in pipeline_tagger_parser_ner:')\n print([token.text for token in doc])\n return 'tagger_parser_ner'\n\n\ndef pipeline_tokenizer(cls, st):\n nlp = cls()\n model_data_path = ('/home/deco/miniconda2/envs/tf17/lib/python3.6/'\n 'site-packages/en_core_web_md/en_core_web_md-2.0.0')\n nlp.from_disk(model_data_path)\n\n doc = nlp.make_doc(st)\n print('tokens in pipeline_tokenizer:')\n print([token.text for token in doc])\n return 'tokenizer'\n\n\nclass RunPipeline:\n\n def __init__(self, cls, st):\n self.cls = cls\n self.st = st\n\n def __call__(self, func):\n return func(self.cls, self.st)\n\n\ndef map_func_multi_process(cls, st):\n funcs = [pipeline_tagger_parser_ner, pipeline_tokenizer]\n with ProcessPoolExecutor(2) as p:\n res = p.map(RunPipeline(cls, st), funcs)\n # future = p.map(RunPipeline(cls, st), funcs)\n # no future.result()\n # syncronization, blocking\n print('Type of future:', type(res))\n # an iterator or generator\n # an iterator in which __next__ calls the result method of each future,\n # so what we get are the results of the futures, and not the\n # futures themselves.\n\n\nif __name__ == '__main__':\n\n lang0 = 'en'\n cls0 = spacy.util.get_lang_class(lang0)\n st0 = 'This is a sentence'\n\n map_func_multi_process(cls0, st0)\n\n print('finished.')\n","sub_path":"nlp_models/concurrent/map_concurrent_futures.py","file_name":"map_concurrent_futures.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"153461135","text":"import argparse\nimport os\nimport json\nimport sys\nimport platform\nimport psutil\nimport subprocess\nfrom datetime import datetime\nfrom shutil import copyfile, SameFileError\nfrom pathlib import Path\n\n# Configure script context and importing jobs_launcher and logger to it (DO NOT REPLACE THIS CODE)\nROOT_DIR = os.path.abspath(\n os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)\n)\nsys.path.append(ROOT_DIR)\nimport jobs_launcher.core.config as core_config\nimport jobs_launcher.core.system_info as system_info\n\nLOG = core_config.main_logger\n\n\n# Makes report and execute case\nclass Renderer:\n\n PLATFORM = None\n LOG = None\n TOOL = None\n ASSETS_PATH = None\n BASELINE_PATH = None\n PACKAGE = None\n COMMON_REPORT_PATH = None\n\n # case - render scenario; output_dir - output directory for report and images\n def __init__(self, case, output_dir, update_refs, res_x, res_y, retries):\n self.case = case\n self.output = output_dir\n self.update_refs = update_refs\n self.retries = retries\n self.scene_path = os.path.join(Renderer.ASSETS_PATH, Renderer.PACKAGE, case['case'], case['scene'])\n self.case_report_path = os.path.join(self.output, case['case'] + core_config.CASE_REPORT_SUFFIX)\n for d in ['Color', 'render_tool_logs']:\n Path(os.path.join(output_dir, d)).mkdir(parents=True, exist_ok=True)\n Renderer.COMMON_REPORT_PATH = os.path.join(output_dir, 'renderTool.log')\n self.width = res_x\n self.height = res_y\n if Renderer.TOOL is None or Renderer.ASSETS_PATH is None:\n raise Exception(\"Path to tool executable didn't set\")\n else:\n self.__prepare_report()\n\n # Copy baselines images to work dirs\n def __copy_baseline(self):\n # Get original baseline json report from assets folder\n orig_baselines_dir = os.path.join(Renderer.BASELINE_PATH, self.PACKAGE)\n orig_baseline_path = os.path.join(orig_baselines_dir, self.case['case'] + core_config.CASE_REPORT_SUFFIX)\n # Create dir for baselines json for current case group in Work/Baseline/group_name\n copied_baselines_dir = os.path.join(self.output, os.pardir, os.pardir, os.pardir, 'Baseline', self.PACKAGE)\n if not os.path.exists(copied_baselines_dir):\n os.makedirs(copied_baselines_dir)\n # Create dir for baselines images for current case group in Work/Baseline/group_name/Color\n os.makedirs(os.path.join(copied_baselines_dir, 'Color'))\n copied_baseline_path = os.path.join(copied_baselines_dir, self.case['case'] + core_config.CASE_REPORT_SUFFIX)\n try:\n copyfile(orig_baseline_path, copied_baseline_path)\n with open(os.path.join(copied_baseline_path)) as f:\n baseline_json = json.load(f)\n for thumb in [''] + core_config.THUMBNAIL_PREFIXES:\n orig_thumbnail = os.path.join(orig_baselines_dir, baseline_json[thumb + 'render_color_path'])\n copied_thumbnail = os.path.join(copied_baselines_dir, baseline_json[thumb + 'render_color_path'])\n if thumb + 'render_color_path' and os.path.exists(orig_thumbnail):\n copyfile(orig_thumbnail, copied_thumbnail)\n except Exception as e:\n LOG.error('Failed to copy baseline ' + repr(e) + ' from: ' + orig_baseline_path + ' to: ' + copied_baseline_path)\n\n # Creates stub image which will be replaced on success render\n def __copy_stub_image(self, status):\n try:\n root_dir_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir))\n orig_stub_path = os.path.join(root_dir_path, 'jobs_launcher', 'common', 'img', status + '.png')\n copied_stub_path = os.path.join(self.output, 'Color', self.case['case'] + '.png')\n copyfile(orig_stub_path, copied_stub_path)\n except OSError or FileNotFoundError as e:\n LOG.error(\"Can't create img stub: \" + str(e))\n\n def __is_case_skipped(self):\n skip_pass = sum(set(Renderer.PLATFORM.values()) &\n set(skip_config) == set(skip_config) for skip_config in self.case.get('skip_on', ''))\n return True if (skip_pass or self.case['status'] == core_config.TEST_IGNORE_STATUS) else False\n\n def __get_tool_version(self):\n if Renderer.is_windows():\n return str(Renderer.TOOL).split(\"\\\\\")[-3]\n elif Renderer.is_macos():\n return str(Renderer.TOOL).split(\"/\")[3]\n else:\n return str(Renderer.TOOL).split(\"/\")[-3]\n\n def __prepare_report(self):\n skipped = core_config.TEST_IGNORE_STATUS\n if self.__is_case_skipped():\n self.case['status'] = skipped\n if 'frame' not in self.case:\n self.case['frame'] = 1\n report = core_config.RENDER_REPORT_BASE.copy()\n plugin_info = Renderer.PLATFORM['PLUGIN']\n report.update({\n 'test_case': self.case['case'],\n 'test_group': Renderer.PACKAGE,\n 'script_info': self.case['script_info'] if 'script_info' in self.case else [],\n 'render_device': Renderer.PLATFORM.get('GPU', 'Unknown'),\n 'scene_name': self.case['scene'],\n 'width': self.width,\n 'height': self.height,\n 'tool': self.__get_tool_version(),\n 'date_time': datetime.now().strftime('%m/%d/%Y %H:%M:%S'),\n 'file_name': self.case['case'] + self.case.get('extension', '.png'),\n 'render_color_path': os.path.join('Color', self.case['case'] + self.case.get('extension', '.png')),\n 'render_version': plugin_info['plugin_version'],\n 'core_version': plugin_info['core_version'],\n 'frame': self.case['frame']\n })\n if self.case['status'] == skipped:\n report['test_status'] = skipped\n report['group_timeout_exceeded'] = False\n self.__copy_stub_image(skipped)\n else:\n report['test_status'] = core_config.TEST_CRASH_STATUS\n self.__copy_stub_image('error')\n with open(self.case_report_path, 'w') as f:\n json.dump([report], f, indent=4)\n if 'Update' not in self.update_refs:\n self.__copy_baseline()\n\n def __complete_report(self, try_number):\n case_log_path = os.path.join('render_tool_logs', self.case['case'] + '.log')\n with open(Renderer.COMMON_REPORT_PATH, \"a\") as common_log:\n with open(case_log_path, 'r') as case_log:\n common_log.write(case_log.read())\n with open(self.case_report_path, 'r') as f:\n report = json.load(f)[0]\n if self.case['status'] == 'done' and os.path.isfile(report['render_color_path']):\n self.case['status'] = core_config.TEST_SUCCESS_STATUS\n with open(case_log_path, 'r') as f:\n tool_log = [line.strip() for line in f]\n for line in tool_log:\n if \"100% Lap=\" in line:\n time = datetime.strptime(line.split()[2].replace('Lap=', ''), '%H:%M:%S.%f')\n total_seconds = float(time.second + time.minute * 60 + time.hour * 3600) + (time.microsecond / 100000)\n report['render_time'] = total_seconds\n if 'Peak Memory Usage' in line: report[\"gpu_memory_max\"] = ' '.join(line.split()[-2:])\n if 'Current Memory Usage' in line: report[\"gpu_memory_usage\"] = ' '.join(line.split()[-2:])\n elif self.case['status'] == core_config.TEST_CRASH_STATUS:\n report['message'] = [\"Testcase wasn't executed successfully. Number of tries: \" + str(try_number)]\n report['render_log'] = case_log_path\n report['test_status'] = self.case['status']\n report['group_timeout_exceeded'] = self.case['group_timeout_exceeded']\n report['number_of_tries'] = try_number\n report['render_mode'] = 'GPU'\n with open(self.case_report_path, 'w') as f:\n json.dump([report], f, indent=4)\n\n def render(self):\n if self.case['status'] != core_config.TEST_IGNORE_STATUS:\n self.case['status'] = 'inprogress'\n cmd_template = '\"{tool}\" ' \\\n '\"{scene}\" ' \\\n '-R RPR -V 9 ' \\\n '-o \"{file}\" ' \\\n '{resolution}' \\\n '--frame {frame_number} ' \\\n '--append-stderr \"{log_file}\" --append-stdout \"{log_file}\"'\n shell_command = cmd_template.format(tool=Renderer.TOOL,\n scene=self.scene_path,\n file=(os.path.join('Color', self.case['case'] + '.png')),\n resolution=\"--res {} {} \".format(self.width, self.height) if int(self.width) > 0 and int(self.height) > 0 else \"\",\n log_file=os.path.join('render_tool_logs', self.case['case'] + '.log'),\n frame_number=self.case['frame'])\n # saving render command to script for debugging purpose\n shell_script_path = os.path.join(self.output, (self.case['case'] + '_render') + '.bat' if Renderer.is_windows() else '.sh')\n with open(shell_script_path, 'w') as f:\n f.write(shell_command)\n if not Renderer.is_windows():\n try:\n os.system('chmod +x ' + shell_script_path)\n except OSError as e:\n LOG.error('Error while setting right for script execution ' + str(e))\n os.chdir(self.output)\n def execute_task():\n p = subprocess.Popen(shell_script_path, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n try:\n p.communicate()\n return p.returncode\n except psutil.TimeoutExpired as e:\n LOG.error('Render has been aborted by timeout ', str(e))\n return 1\n success_flag = False\n try_number = 0\n while try_number < self.retries:\n try_number += 1\n rc = execute_task()\n LOG.info('Husk return code {}'.format(str(rc)))\n if rc == 0:\n success_flag = True\n break\n self.case['status'] = 'done' if success_flag else core_config.TEST_CRASH_STATUS\n self.case['group_timeout_exceeded'] = False\n test_cases_path = os.path.join(self.output, 'test_cases.json')\n with open(test_cases_path, 'r') as f:\n test_cases = json.load(f)\n for case in test_cases:\n if case['case'] == self.case['case']:\n case['status'] = self.case['status']\n with open(test_cases_path, 'w') as f:\n json.dump(test_cases, f, indent=4)\n self.__complete_report(try_number)\n\n\n @staticmethod\n def is_windows():\n return platform.system() == \"Windows\"\n\n @staticmethod\n def is_macos():\n return platform.system() == \"Darwin\"\n\n\n# Sets up the script parser\ndef create_parser():\n args = argparse.ArgumentParser()\n args.add_argument('--resolution_x', required=True, help='Width of image')\n args.add_argument('--resolution_y', required=True, help='Height of image')\n args.add_argument('--update_refs', required=True, help='Update or not references')\n args.add_argument('--tool', required=True, metavar='', help='Path to render executable file')\n args.add_argument('--res_path', required=True, help='Path to folder with scenes')\n args.add_argument('--output', required=True, metavar='', help='Path to folder where will be stored images and logs')\n args.add_argument('--test_cases', required=True, help='Path to json-file with test cases')\n args.add_argument('--package_name', required=True, help='Name of group of test cases')\n args.add_argument('--retries', required=False, default=2, type=int, help='The number of attempts to launch the case.')\n return args\n\n\n# Configure output_dir\ndef configure_output_dir(output, tests):\n try:\n os.makedirs(output)\n test_cases_path = os.path.realpath(os.path.join(os.path.abspath(output), 'test_cases.json'))\n copyfile(tests, test_cases_path)\n with open(test_cases_path, 'r') as f:\n test_cases = json.load(f)\n for case in test_cases:\n if 'status' not in case:\n case['status'] = 'active'\n with open(test_cases_path, 'w') as copied_file:\n json.dump(test_cases, copied_file, indent=4)\n LOG.info(\"Scenes to render: {}\".format([name['scene'] for name in test_cases]))\n return test_cases\n except OSError as e:\n LOG.error(\"Failed to read test_cases.json\")\n raise e\n except (SameFileError, IOError) as e:\n LOG.error(\"Can't copy test_cases.json\")\n raise e\n\n\ndef extract_plugin_versions():\n v = {\n 'core_version': '0',\n 'plugin_version': '0'\n }\n for dir in os.listdir(ROOT_DIR):\n if 'hdRpr' in dir and not \"tar.gz\" in dir:\n try:\n with open(os.path.join(ROOT_DIR, dir, 'version'), 'r') as f:\n raw = [line.strip().split(':') for line in f.readlines()]\n for r in raw:\n r[0] += '_version'\n r[1] = str(r[1])\n v = dict(raw)\n except FileNotFoundError as e:\n LOG.error(\"Can't find file with info about versions \" + repr(e))\n return v\n\n\ndef main():\n args = create_parser().parse_args()\n test_cases = []\n try:\n test_cases = configure_output_dir(args.output, args.test_cases)\n except Exception as e:\n LOG.error(repr(e))\n return 1\n # Defines the characteristics of machines which used to execute this script\n try:\n gpu = system_info.get_gpu()\n except:\n LOG.error(\"Can't get gpu name\")\n gpu = 'Unknown'\n Renderer.PLATFORM = {\n 'GPU': gpu,\n 'OS': platform.system(),\n 'PLUGIN': extract_plugin_versions()\n }\n Renderer.TOOL = args.tool\n Renderer.LOG = LOG\n Renderer.ASSETS_PATH = args.res_path\n Renderer.BASELINE_PATH = os.path.join(args.res_path, \"..\", \"rpr_houdini_autotests_baselines\")\n Renderer.PACKAGE = args.package_name\n [case.render() for case in\n [Renderer(case, args.output, args.update_refs, args.resolution_x, args.resolution_y, args.retries) for case in\n test_cases]\n ]\n return 0\n\n\nif __name__ == '__main__':\n exit(main())\n","sub_path":"jobs/Scripts/simpleRender.py","file_name":"simpleRender.py","file_ext":"py","file_size_in_byte":14769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"539663826","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport GestureAgentsTUIO.Tuio as Tuio\nfrom math import sqrt, fabs\nfrom GestureAgents.Recognizer import Recognizer, newHypothesis\nfrom GestureAgents.Events import Event\nfrom GestureAgents.Agent import Agent\n\n\nclass AgentStick(Agent):\n eventnames = (\"newStick\",)\n\n\nclass RecognizerStick (Recognizer):\n\n def __init__(self, system):\n Recognizer.__init__(self, system)\n self.finger = None\n self.cursorEvents = Tuio.TuioCursorEvents\n self.register_event(\n system.newAgent(self.cursorEvents), RecognizerStick.EventNewAgent)\n self.positions = []\n\n @newHypothesis\n def EventNewAgent(self, Cursor):\n if Cursor.recycled:\n self.fail(cause=\"Agent is recycled\")\n self.agent = AgentStick(self)\n self.agent.pos = Cursor.pos\n self.announce()\n self.unregister_all()\n self.register_event(\n Cursor.newCursor, RecognizerStick.EventNewCursor)\n\n def EventNewCursor(self, Cursor):\n #cursor is an Agent\n self.finger = Cursor\n self.positions.append(Cursor.pos)\n self.unregister_event(Cursor.newCursor)\n self.register_event(\n Cursor.updateCursor, RecognizerStick.EventMoveCursor)\n self.register_event(\n Cursor.removeCursor, RecognizerStick.EventRemoveCursor)\n #acquire should be the last thing to do\n self.acquire(Cursor)\n\n def EventMoveCursor(self, Cursor):\n self.positions.append(Cursor.pos)\n if not self.is_line():\n self.fail(cause=\"Is not line\")\n\n def is_line(self):\n first = self.positions[0]\n last = self.positions[-1]\n dist = sqrt((last[0] - first[0]) ** 2 + (last[1] - first[1]) ** 2)\n if dist < 50:\n return True\n maxdist = dist / 20.0\n for p in self.positions:\n d = self.pdis(first, last, p)\n if abs(d) > maxdist:\n return False\n return True\n\n def EventRemoveCursor(self, Cursor):\n self.unregister_event(Cursor.updateCursor)\n self.unregister_event(Cursor.removeCursor)\n first = self.positions[0]\n last = self.positions[-1]\n dist = sqrt((last[0] - first[0]) ** 2 + (last[1] - first[1]) ** 2)\n if self.is_line() and dist > 30:\n self.complete()\n else:\n self.fail(cause=\"Is not line\")\n\n def duplicate(self):\n d = self.get_copy(self.system)\n d.finger = self.finger\n d.positions = list(self.positions)\n return d\n\n def execute(self):\n self.agent.pos1 = self.positions[0]\n self.agent.pos2 = self.positions[-1]\n\n self.agent.newStick.call(self.agent)\n self.finish()\n\n @staticmethod\n def pdis(a, b, c):\n t = b[0] - a[0], b[1] - a[1] # Vector ab\n dd = sqrt(t[0] ** 2 + t[1] ** 2) # Length of ab\n t = t[0] / dd, t[1] / dd # unit vector of ab\n n = -t[1], t[0] # normal unit vector to ab\n ac = c[0] - a[0], c[1] - a[1] # vector ac\n return fabs(ac[0] * n[0] + ac[1] * n[1]) # Projection of ac to n (the minimum distance)\n\n\n","sub_path":"GestureAgentsTUIO/Gestures2D/RecognizerStick.py","file_name":"RecognizerStick.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"79869052","text":"# Project 5 - Earthquakes\n#\n# Name: Taylor Morris\n# Instructor: Brian Jones\n# Section 17\n\nfrom quake_funcs import *\nfrom operator import attrgetter\n\n\ndef printing(quakes):\n print(\"Earthquakes:\\n------------\")\n\n for quake in quakes:\n print(\"({:.2f}) {:>40} {:s} at ({:8.3f}, {:8.3f})\".format(quake.mag, quake.place, \n time_to_str(quake.time), quake.longitude, quake.latitude))\n print()\n\ndef write_to_file(quakes, quakes_file):\n out_file = open(quakes_file, 'w')\n for quake in quakes:\n out_file.write(\"{:f} {:f} {:f} {:d} {:s}\\n\".format(quake.mag, quake.longitude, quake.latitude, quake.time, quake.place))\n\n \n\ndef main():\n option = \"\"\n quakes = read_quakes_from_file(\"quakes.txt\")\n printing(quakes)\n\n\n while option != \"q\" and option != \"Q\":\n option = input(\"Options:\\n (s)ort\\n (f)ilter\\n (n)ew quakes\\n (q)uit\\n\\nChoice: \")\n\n if option == \"s\" or option == \"S\":\n sort_by = input(\"Sort by (m)agnitude, (t)ime, (l)ongtide, or l(a)titude? \")\n\n if sort_by == \"m\" or sort_by == \"M\":\n quakes.sort(key=attrgetter('mag'), reverse=True)\n print()\n printing(quakes) \n elif sort_by == \"t\" or sort_by == \"T\":\n quakes.sort(key=attrgetter(\"time\"), reverse=True)\n print()\n printing(quakes)\n elif sort_by == \"l\" or sort_by == \"L\":\n quakes.sort(key=attrgetter(\"longitude\"))\n print()\n printing(quakes)\n elif sort_by == \"a\" or sort_by == \"A\":\n quakes.sort(key=attrgetter(\"latitude\"))\n print()\n printing(quakes)\n\n elif option == \"f\" or option == \"F\":\n filter_by = input(\"Filter by (m)agnitude or (p)lace? \")\n\n if filter_by == \"m\" or filter_by == \"M\":\n lower = float(input(\"Lower bound: \"))\n upper = float(input(\"Upper bound: \"))\n filtered = filter_by_mag(quakes, lower, upper)\n print()\n printing(filtered)\n\n elif filter_by == \"p\" or filter_by == \"P\":\n string = input(\"Search for what string? \")\n filtered = filter_by_place(quakes, string)\n print()\n printing(filtered)\n\n elif option == \"n\" or option == \"N\":\n quakes_dict = get_json(\"http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/1.0_hour.geojson\")\n found = False\n for feature in quakes_dict[\"features\"]:\n if quake_from_feature(feature) not in quakes:\n quakes.append(quake_from_feature(feature))\n found = True\n\n if found == True:\n print(\"New quakes found!!!\")\n print()\n printing(quakes) \n\n elif option == \"q\" or option == \"Q\":\n write_to_file(quakes, \"saved_quakes.txt\")\n \n \n \n\n\n \n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"quakes.py","file_name":"quakes.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"}
+{"seq_id":"502312339","text":"cus = [int(i) for i in input().split(',')]\ngru = [int(i) for i in input().split(',')]\nx = int(input())\nout = 0\nfor i in range(len(gru)-x+1):\n temp = 0\n for j in range(len(gru)):\n if(i<=j and j<=i+x-1):\n temp+=cus[j]*1\n else:temp+=cus[j]*gru[j]\n out = max(temp,out)\nprint(out)","sub_path":"Code/CodeRecords/2643/60642/316058.py","file_name":"316058.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"}
+{"seq_id":"33574205","text":"import os\nimport struct\nimport numpy as np\nimport matplotlib.pyplot as plt\n# from PIL import Image\n\nimport tensorflow as tf\nimport cv2\n\naph = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\n\ndef load_emnist_image(path, imagefilename, labelfilename, type = 'train', batch_size=64):\n\n image_full_name = os.path.join(path, imagefilename)\n label_full_name = os.path.join(path, labelfilename)\n fp1 = open(image_full_name, 'rb')\n fp2 = open(label_full_name, 'rb')\n buf1 = fp1.read()\n buf2 = fp2.read()\n\n # 处理labels\n index = 0;\n magic, num = struct.unpack_from('>II', buf2, index)\n index += struct.calcsize('>II')\n labels = []\n\n for i in range(num):\n l = int(struct.unpack_from('>B',buf2, index)[0])\n labels.append(l)\n index += struct.calcsize('>B')\n \n\n # 处理images\n index = 0;\n magic_image, num_image, rows_image, cols_image = struct.unpack_from('>IIII', buf1, index)\n magic, num = struct.unpack_from('>II', buf1, index)\n index += struct.calcsize('>IIII')\n images = []\n\n for image in range(0, num):\n im = struct.unpack_from('>784B', buf1, index)\n index += struct.calcsize('>784B')\n im = np.array(im, dtype = 'uint8')\n im = im.reshape(28, 28)\n im_rot90 = np.rot90(im, -1)\n im_mirror = np.fliplr(im_rot90)\n # im_mirror = Image.fromarray(im_mirror)\n images.append(im_mirror)\n # if (type == 'train'):\n # print(image)\n # im_mirror.save(\"/home/rinz/Documents/buysell/read_picture/handwriting/datasets/rawdata/aa/train_{a}_{b}.png\".format(a=aph[int(labels[image])-1], b=image), 'png')\n # if (type == 'test'):\n # im_mirror.save('/home/rinz/Documents/buysell/read_picture/handwriting/datasets/rawdata/bb/test_%s.png' %image, 'png')\n\n # 构建dataset\n def _fixed_sides_resize(image, output_height, output_width):\n \"\"\"Resize images by fixed sides.\n\n Args:\n image: A 3-D image `Tensor`.\n output_height: The height of the image after preprocessing.\n output_width: The width of the image after preprocessing.\n\n Returns:\n resized_image: A 3-D tensor containing the resized image.\n \"\"\"\n output_height = tf.convert_to_tensor(output_height, dtype=tf.int32)\n output_width = tf.convert_to_tensor(output_width, dtype=tf.int32)\n\n image = tf.expand_dims(image, 0)\n # resized_image = tf.image.resize_nearest_neighbor(\n # image, [output_height, output_width], align_corners=False) # 返回[batch, height, width, channels]\n resized_image = tf.image.resize_images(\n image, [output_height, output_width], method=0)\n resized_image = tf.squeeze(resized_image, 0) # 去掉batch,留下[224, 224, 1]\n resized_image = tf.concat([resized_image, resized_image, resized_image], -1) # 单通道叠到3通道\n # resized_image = tf.expand_dims(resized_image, 2)\n # resized_image.set_shape([None, None, 1])\n return resized_image\n\n def _parse_function(image, label):\n img = tf.reshape(image, [28, 28, 1])\n image_raw = _fixed_sides_resize(img, 224, 224)\n return tf.to_float(image_raw), label-1\n\n images_array = np.array(images)\n dataset = tf.data.Dataset.from_tensor_slices((images_array, labels))\n dataset = dataset.map(_parse_function)\n if type == 'train':\n dataset = dataset.repeat(10)\n # dataset = dataset.batch(64)\n dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))\n\n return dataset\n# iterator = dataset.make_one_shot_iterator()\n# image, label = iterator.get_next()\n# return image, label\n\n\n# with tf.Session() as sess:\n# sess.run(tf.global_variables_initializer())\n\n# path = '/tf/handwriting/datasets/'\n# train_images = 'emnist-letters-train-images-idx3-ubyte'\n# train_labels = 'emnist-letters-train-labels-idx1-ubyte'\n\n# reshaped_image = load_emnist_image(path, train_images, train_labels)\n\n# for i in range(1):\n# # 每次sess.run(reshaped_image),都会取出一张图片\n# imgs, labels = sess.run(reshaped_image)\n# # print(labels)\n# # print('--------------------------------')\n# # labels_max = tf.reduce_max(labels)\n# # if(sess.run(labels_max) > 26):\n# # print('bad')\n# # print('--------------------------------')\n# index=0\n# print(len(imgs))\n# print(\"------start-------\")\n# for img in imgs:\n# print(labels[index])\n# # print(img)\n# cv2.imwrite(\"/tf/handwriting/datasets/rawdata/raw_test/train_{a}_{b}.png\".format(\n# a = aph[labels[index]], b = index), img)\n# index += 1","sub_path":"read_picture/handwriting/input_data.py","file_name":"input_data.py","file_ext":"py","file_size_in_byte":4881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"}
+{"seq_id":"310965706","text":"from enum import Enum\n\nfrom core.inputfile import Section\n\n\nclass QualityAnalysisType(Enum):\n \"\"\"Type of Water Quality Analysis\"\"\"\n NONE = 1\n CHEMICAL = 2\n AGE = 3\n TRACE = 4\n\n\nclass QualityOptions(Section):\n \"\"\"EPANET Quality Options\"\"\"\n\n SECTION_NAME = \"[OPTIONS]\"\n\n field_format = \" {:20}\\t{}\\n\"\n\n def __init__(self):\n Section.__init__(self)\n\n self.quality = QualityAnalysisType.NONE\n \"\"\"Type of water quality analysis to perform\"\"\"\n\n self.chemical_name = \"\"\n \"\"\"Name of chemical to be analyzed in quality section\"\"\"\n\n self.mass_units = \"\"\n \"\"\"Units of chemical to be analyzed in quality section\"\"\"\n\n self.diffusivity = 1.0\n \"\"\"Molecular diffusivity of the chemical being analyzed relative to that of chlorine in water\"\"\"\n\n self.trace_node = \"\"\n \"\"\"Node id to use in a quality trace\"\"\"\n\n self.tolerance = 0.0\n \"\"\"Difference in water quality level below one parcel of water is essentially the same as another\"\"\"\n\n def get_text(self):\n \"\"\"Contents of this item formatted for writing to file\"\"\"\n txt = \" Quality \\t\"\n if self.quality is None or self.quality == QualityAnalysisType.NONE:\n txt = \"\"\n elif self.quality == QualityAnalysisType.AGE:\n txt += \"AGE\"\n elif self.quality == QualityAnalysisType.TRACE:\n txt += \"Trace\"\n if self.trace_node:\n txt += \" \" + self.trace_node\n elif self.quality == QualityAnalysisType.CHEMICAL:\n if self.chemical_name:\n txt += self.chemical_name\n else:\n txt += \"CHEMICAL\"\n if txt and self.mass_units:\n txt += \" \" + self.mass_units\n if txt:\n txt += \"\\n\"\n txt += self.field_format.format(\"Diffusivity\", str(self.diffusivity))\n txt += self.field_format.format(\"Tolerance\", str(self.tolerance))\n return txt\n\n def set_text(self, new_text):\n \"\"\"Read properties from text.\n Args:\n new_text (str): Text to parse into properties.\n \"\"\"\n self.quality = QualityAnalysisType.NONE # default to NONE until found below\n self.chemical_name = \"\"\n self.mass_units = \"\"\n self.trace_node = \"\"\n\n for line in new_text.splitlines():\n line_list = line.split()\n if line_list:\n if str(line_list[0]).strip().upper() == \"QUALITY\":\n quality_type = str(line_list[1]).strip().upper()\n try:\n self.quality = QualityAnalysisType[quality_type]\n except:\n self.quality = QualityAnalysisType.CHEMICAL\n self.chemical_name = str(line_list[1])\n if self.quality == QualityAnalysisType.TRACE:\n self.trace_node = line_list[2]\n elif len(line_list) > 2:\n self.mass_units = line_list[2]\n elif str(line_list[0]).strip().upper() == \"DIFFUSIVITY\":\n self.diffusivity = float(line_list[1])\n elif str(line_list[0]).strip().upper() == \"TOLERANCE\":\n self.tolerance = float(line_list[1])\n","sub_path":"src/core/epanet/options/quality.py","file_name":"quality.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"474778179","text":"from sys import stdin\n\ndef main():\n noFirst = False\n while True:\n try:\n n = int(input())\n except:\n break\n if noFirst:\n print()\n noFirst=True\n gainLoss=[]\n toPosition={}\n #register people\n personNames=input().split()\n for i,personName in enumerate(personNames):\n gainLoss.append(0)\n toPosition[personName]=i\n #simulate\n for _ in range(0,n):\n tk = input().split()\n person,money,numberOfGifts = tk[0],int(tk[1]),int(tk[2])\n evenMoney = money//numberOfGifts if numberOfGifts!=0 else 0\n position = toPosition[person]\n gainLoss[position]+=-evenMoney*numberOfGifts\n for j in range(0,numberOfGifts):\n gainLoss[toPosition[tk[3+j]]]+=evenMoney\n for personName,gainLossI in zip(personNames,gainLoss):\n print(str(personName)+\" \"+str(gainLossI))\nmain()","sub_path":"Section 1.3.3/119 - Greedy Gift Givers/Python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"344759816","text":"'''\r\npygetmap:\r\n\r\nDownload web map by cooridinates\r\n\r\n'''\r\n\r\n#Longitude 经度\r\n#Latitude 纬度\r\n#Mecator x = y = [-20037508.3427892,20037508.3427892]\r\n#Mecator Latitue = [-85.05112877980659,85.05112877980659]\r\n\r\n\r\nfrom math import floor,pi,log,tan,atan,exp\r\nimport urllib.request as ur\r\nimport PIL.Image as pil\r\nimport io, asyncio\r\n\r\nHEADERS = 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.76 Safari/537.36'\r\n\r\nMAP_URLS={\r\n\"google\":{\"domain\":\"mt2.google.cn\",\"url\":\"/vt/lyrs={style}&hl=zh-CN&gl=CN&src=app&x={x}&y={y}&z={z}\"},\r\n\"amap\":{\"domain\":\"wprd02.is.autonavi.com\",\"url\":\"/appmaptile?style={style}&x={x}&y={y}&z={z}\"},\r\n\"tencent_s\":{\"domain\":\"p3.map.gtimg.com\",\"url\":\"/sateTiles/{z}/{fx}/{fy}/{x}_{y}.jpg\"},\r\n\"tencent_m\":{\"domain\":\"rt0.map.gtimg.com\",\"url\":\"/tile?z={z}&x={x}&y={y}&styleid=3\" }}\r\n\r\nEXIT = False\r\n\r\ndef geturl(source,x,y,z,style):\r\n '''\r\n Get the picture url for download.\r\n style:\r\n m for map\r\n s for satellite\r\n source:\r\n goole or amap or tencent\r\n x y:\r\n google-style tile coordinate system\r\n z:\r\n zoom \r\n '''\r\n if source == 'google':\r\n furl=MAP_URLS[\"google\"][\"url\"].format(x=x,y=y,z=z,style=style)\r\n elif source == 'amap':\r\n style=6 if style=='s' else 7 # for amap 6 is satellite and 7 is map.\r\n furl=MAP_URLS[\"amap\"][\"url\"].format(x=x,y=y,z=z,style=style)\r\n elif source == 'tencent':\r\n y=2**z-1-y\r\n if style == 's':\r\n furl=MAP_URLS[\"tencent_s\"][\"url\"].format(x=x,y=y,z=z,fx=floor(x/16),fy=floor(y/16))\r\n else:\r\n furl=MAP_URLS[\"tencent_m\"][\"url\"].format(x=x,y=y,z=z)\r\n else:\r\n raise Exception(\"Unknown Map Source ! \")\r\n\r\n return furl\r\n\r\ndef getdomain(source,style):\r\n if source == 'tencent':\r\n if style == \"s\":\r\n return MAP_URLS[\"tencent_s\"][\"domain\"]\r\n else:\r\n return MAP_URLS[\"tencent_m\"][\"domain\"]\r\n elif source == \"amap\" or source == \"google\":\r\n return MAP_URLS[source][\"domain\"]\r\n else:\r\n raise Exception(\"Unkonwn Map Source ! \")\r\n\r\n\r\n#WGS-84经纬度转Web墨卡托\r\ndef wgs2macator(x,y):\r\n y = 85.0511287798 if y > 85.0511287798 else y\r\n y = -85.0511287798 if y < -85.0511287798 else y\r\n\r\n x2 = x * 20037508.34 / 180\r\n y2 = log(tan((90+y)*pi/360))/(pi/180)\r\n y2 = y2*20037508.34/180\r\n return x2, y2\r\n\r\n#Web墨卡托转经纬度\r\ndef mecator2wgs(x,y):\r\n x2 = x / 20037508.34 * 180\r\n y2 = y / 20037508.34 * 180\r\n y2= 180/pi*(2*atan(exp(y2*pi/180))-pi/2)\r\n return x2,y2\r\n\r\n\r\n'''\r\n东经为正,西经为负。北纬为正,南纬为负\r\nj经度 w纬度 z缩放比例[0-22] ,对于卫星图并不能取到最大,测试值是20最大,再大会返回404.\r\n'''\r\n# 根据WGS-84 的经纬度获取谷歌地图中的瓦片坐标\r\ndef getpos(j,w,z):\r\n '''\r\n Get google-style tile cooridinate from geographical coordinate\r\n j : Longittude\r\n w : Latitude\r\n z : zoom\r\n '''\r\n isnum=lambda x: isinstance(x,int) or isinstance(x,float)\r\n if not(isnum(j) and isnum(w)):\r\n raise TypeError(\"j and w must be int or float!\")\r\n return None\r\n\r\n if not isinstance(z,int) or z<0 or z>22:\r\n raise TypeError(\"z must be int and between 0 to 22.\")\r\n return None\r\n\r\n if j<0:\r\n j=180+j\r\n else:\r\n j+=180\r\n j/=360 # make j to (0,1)\r\n\r\n w=85.0511287798 if w>85.0511287798 else w\r\n w=-85.0511287798 if w<-85.0511287798 else w\r\n w=log(tan((90+w)*pi/360))/(pi/180)\r\n w/=180 # make w to (-1,1)\r\n w=1-(w+1)/2 # make w to (0,1) and left top is 0-point\r\n\r\n num=2**z\r\n x=floor(j*num)\r\n y=floor(w*num)\r\n return x,y\r\n\r\n\r\n#根据瓦片坐标范围,获得该区域四个角的web墨卡托投影坐标\r\ndef getframeM(inx,iny,inx2,iny2,z):\r\n '''\r\n Get the frame of region \r\n input lefttop and rightbutton tile cooridinates\r\n output WebMecator cooridinates of the frame\r\n '''\r\n length = 20037508.3427892\r\n sum=2**z\r\n LTx=inx / sum*length*2 - length\r\n LTy= -(iny / sum*length*2) + length\r\n\r\n RBx=(inx2+1) / sum*length*2 - length\r\n RBy= -((iny2+1) / sum*length*2) + length\r\n\r\n #LT=left top,RB=right buttom\r\n #返回四个角的投影坐标\r\n res={'LT':(LTx,LTy),'RB':(RBx,RBy),'LB':(LTx,RBy),'RT':(RBx,LTy)}\r\n return res\r\n\r\n#根据瓦片坐标范围,获得该区域四个角的地理经纬度坐标\r\ndef getframeW(inx,iny,inx2,iny2,z):\r\n '''\r\n Get the frame of region \r\n input lefttop and rightbutton tile cooridinates\r\n output geographical cooridinates of the frame\r\n '''\r\n zb=getframeM(inx,iny,inx2,iny2,z)\r\n for index,xy in zb.items():\r\n zb[index]=mecator2wgs(*xy)\r\n #返回四个角的经纬度坐标\r\n return zb\r\n\r\ndef printzb(zb):\r\n if not zb:\r\n return\r\n print(\"左上:({0:.7f},{1:.7f})\".format(*zb['LT']))\r\n print(\"右上:({0:.7f},{1:.7f})\".format(*zb['RT']))\r\n print(\"左下:({0:.7f},{1:.7f})\".format(*zb['LB']))\r\n print(\"右下:({0:.7f},{1:.7f})\".format(*zb['RB']))\r\n\r\n\r\n\r\nasync def async_getpic(domain, urls, out_pics, index, multiple): # 通过协程来下载图像,更具效率\r\n length=len(urls)\r\n global EXIT\r\n for i in range(index,length,multiple):\r\n if EXIT:\r\n return\r\n connect = asyncio.open_connection(host=domain, port=80)\r\n try:\r\n reader, writer = await connect\r\n except:\r\n EXIT = True\r\n return\r\n header = 'GET {url} HTTP/1.0\\r\\nHost: {domain}\\r\\n{header}\\r\\n\\r\\n'.format(url=urls[i], \r\n domain=domain, header=HEADERS)\r\n writer.write(header.encode('utf-8'))\r\n await writer.drain()\r\n msg = await reader.read()\r\n if msg.find(b\"Content-Type: image\")>1:\r\n pl=msg.find(b\"\\r\\n\\r\\n\")\r\n out_pics[i] = msg[pl+4:]\r\n else:\r\n out_pics[i] = None\r\n EXIT = True\r\n \r\n writer.close()\r\n\r\n\r\ndef async_getpics(urls,domain, multiple=10): # 根据urls列表下载图片数据\r\n loop = asyncio.get_event_loop() # 得到一个事件循环模型\r\n minor_pics=[None for i in range(len(urls))] #预留list空间\r\n tasks=[]\r\n global EXIT\r\n EXIT = False\r\n for i in range(multiple):\r\n tasks.append(async_getpic(domain,urls,minor_pics,i,multiple))\r\n\r\n loop.run_until_complete(asyncio.wait(tasks)) # 执行任务\r\n loop.close()\r\n return minor_pics # 返回图片数据\r\n\r\n\r\ndef getpic(x1,y1,x2,y2,z,source='google',outfile=\"MAP_OUT.png\",style='s'):\r\n '''\r\n 依次输入左上角的经度、纬度,右下角的经度、纬度,缩放级别,地图源,输出文件,影像类型(默认为卫星图)\r\n 获取区域内的瓦片并自动拼合图像。\r\n '''\r\n pos1x, pos1y = getpos(x1, y1, z)\r\n pos2x, pos2y = getpos(x2, y2, z)\r\n frame=getframeW(pos1x,pos1y,pos2x,pos2y,z)\r\n lenx = pos2x - pos1x + 1\r\n leny = pos2y - pos1y + 1\r\n print(\"瓦片总数量:{x} X {y}\".format(x=lenx,y=leny))\r\n\r\n domain = getdomain(source,style)\r\n urls=[geturl(source,i,j,z,style) for j in range(pos1y, pos1y + leny) for i in range(pos1x, pos1x + lenx)]\r\n print(\"正在下载......\")\r\n datas = async_getpics(urls, domain)\r\n \r\n if EXIT:\r\n print(\"下载出错!\\n可能是缩放级别z过大,或者未连接到网络。\")\r\n return\r\n\r\n print(\"下载完成!\\n开始拼合图像......\") \r\n outpic = pil.new('RGBA',(lenx*256,leny*256))\r\n for i,data in enumerate(datas):\r\n picio=io.BytesIO(data)\r\n try:\r\n small_pic=pil.open(picio)\r\n except:\r\n print(data)\r\n y,x = i // lenx,i % lenx\r\n outpic.paste(small_pic,(x*256,y*256))\r\n\r\n print('拼合完成!\\n正在导出...')\r\n outpic.save(outfile)\r\n print('导出完成!')\r\n return frame\r\n\r\n\r\ndef getpic_s(x,y,z,source='google',outfile=\"out_single.png\",style=\"s\"):\r\n '''获得单幅瓦片图像'''\r\n getpic(x,y,x,y,z,source,outfile,style)\r\n\r\n\r\nif __name__ == '__main__':\r\n #下载西安 青龙寺地块 卫星地图\r\n mm=getpic(108.9797845,34.2356831,108.9949663,34.2275018,\r\n 18,source='google',style='s',outfile=\"myout.png\")\r\n printzb(mm)\r\n \r\n","sub_path":"getmap.py","file_name":"getmap.py","file_ext":"py","file_size_in_byte":8276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"170453703","text":"# -*- coding: utf-8 -*-\n\nimport PyPDF2, os\n\n\npdfFiles = []\n\nfor filename in os.listdir('.'):\n\tif filename.endswith('.pdf'):\n\t\tpdfFiles.append(filename)\npdfFiles.sort()\npdfWriter = PyPDF2.PdfFileWriter()\nprint(pdfFiles)\n\nfor filename in pdfFiles:\n\tpdfFileObj = open(filename, 'rb')\n\tpdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n\tpageObj = pdfReader.getPage(0).rotateClockwise(270)\n\t# pageObj.rotateClockwise(270)\n\tpdfWriter.addPage(pageObj)\n\npdfOutput = open('allFiles.pdf', 'wb')\npdfWriter.write(pdfOutput)\npdfOutput.close()\n\n","sub_path":"pdf(PyPDF2)/unindoArquivos.py","file_name":"unindoArquivos.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"98189999","text":"# pylint: skip-file\n# flake8: noqa\n\n# Authors\n#\n# - pre-alpha 0.0.1 2016 - Matt Comben\n# - GA 1.0.0 2020 - Tomasz Szuster\n#\n# Copyrigh (c)\n#\n# This file is part of pysfdisk.\n#\n# pysfdisk is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 2 of the License, or\n# (at your option) any later version.\n#\n# pysfdisk is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with pysfdisk. If not, see \n\nfrom setuptools import setup\nfrom os import path\n\n# The directory containing this file\nHERE = path.abspath(path.dirname(__file__))\n\n# The text of the README file\nwith open(path.join(HERE, \"README.rst\")) as fid:\n long_description = fid.read()\n\nwith open(path.join(HERE, \"VERSION\")) as version_fh:\n version = version_fh.read()\n\nsetup(\n author=\"Matt Comben, Tomasz Szuster\",\n author_email=\"matthew@dockstudios.co.uk, tomasz.szuster@gmail.com\",\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n classifiers=[\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: POSIX :: Linux\",\n ],\n license=\"GNU GENERAL PUBLIC LICENSE\",\n name=\"py-disk-imager\",\n packages=[\"pysfdisk\"],\n url=\"https://github.com/beskidinstruments/python-sfdisk\",\n version=version,\n include_package_data=True,\n)\n","sub_path":"pypi_install_script/py-disk-imager-1.0.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"590423759","text":"import mysql.connector\nimport pandas as pd\nfrom utils import lin_log_interp\nimport numpy as np\nimport ast\n\nclass DBQuerier(object):\n\n def __init__(self,database,assetId,connection='Personal',debug=True,limit=None):\n\n if debug:\n self.debug_str = \"\"\n else:\n self.debug_str = \"and VibrationState.PCAFit IS NULL\"\n\n self.debug = debug\n\n self.database = database\n self.assetId = assetId\n self.reset_min_date()\n\n if connection.lower() == 'personal':\n self.user = 'dnewman'\n self.password = 'Convolve7691!'\n self.host = '10.8.0.1'\n elif connection.lower() == 'boeing':\n self.user = 'searcher'\n self.password = 'oDxAYdZaC3FWhY66'\n self.host = 'db18.iotfm.org'\n self.limit = limit\n \n def connect(self):\n self.mydb = mysql.connector.connect(user=self.user,password=self.password,\n host=self.host,database=self.database)\n\n def disconnect(self):\n self.mydb.close()\n\n def reset_min_date(self):\n self.minQueriedDate = \"2999-01-01 00:00:00.000000\"\n\n def insert_labels(self,dates,labels):\n \n insert_vals = [\"('\" + dates[i] + \"', '\" + self.assetId + \"', '\" + labels[i] + \"'),\" for i in range(len(dates))]\n insert_vals = ''.join(insert_vals)[:-1]\n \n query = \"\"\"INSERT INTO \"\"\" + self.database + \"\"\".VibrationState\n (`dateTime`,`assetId`,`values`) VALUES \n \"\"\" + insert_vals + \"\"\";\"\"\"\n \n if self.debug:\n print(query)\n else:\n self.execute_query(query)\n \n def insert_labels_program(self,dates,labels, programName):\n \n insert_vals = [\"('\" + dates[i] + \"', '\" + self.assetId + \"', '\" + labels[i] + \"', '\" + programName + \"'),\" for i in range(len(dates))]\n insert_vals = ''.join(insert_vals)[:-1]\n \n query = \"\"\"INSERT INTO \"\"\" + self.database + \"\"\".VibrationState\n (`dateTime`,`assetId`,`values`,`programName`) VALUES \n \"\"\" + insert_vals + \"\"\";\"\"\"\n \n if self.debug:\n print(query)\n else:\n self.execute_query(query)\n \n def insert_labels_experiment(self,table,dates,\n experimentNumber,\n experimentName,\n toolStatus,\n toolSize,\n material,\n depthOfCut,\n surfaceSpeed,\n feedRate):\n \n insert_vals = [\"('\" + dates[i] + \"', '\" + self.assetId + \"', \" + str(experimentNumber) + \", '\" + experimentName +\"', '\" \n + toolStatus + \"', '\" + toolSize + \"', '\" + material + \"', '\" + depthOfCut + \"', '\"\n + surfaceSpeed + \"', '\" + feedRate + \"'),\" for i in range(len(dates))]\n \n insert_vals = ''.join(insert_vals)[:-1]\n \n query = \"\"\"INSERT INTO \"\"\" + self.database + \"\"\".\"\"\" + table + \"\"\"\n (`dateTime`,`assetId`,`experimentSample`, `experimentName`, `toolStatus`,\n `toolSize`,`material`,`depthOfCut`,`surfaceSpeed`,`feedRate`) VALUES \n \"\"\" + insert_vals + \"\"\";\"\"\"\n \n if self.debug:\n print(query)\n else:\n self.execute_query(query)\n\n def select_labels(self):\n query = \"\"\"SELECT VibrationState.values, VibrationState.dateTime, RMS.values as rmsVals\n FROM \"\"\" + self.database + \"\"\".VibrationState\n INNER JOIN \"\"\" + self.database + \"\"\".RMS ON\n \"\"\" + self.database + \"\"\".RMS.dateTime = \"\"\" + self.database + \"\"\".VibrationState.dateTime \n where VibrationState.assetId = '\"\"\" + self.assetId + \"\"\"'\n order by dateTime desc;\"\"\"\n \n cursor = self.execute_query(query)\n data=cursor.fetchall()\n columns = ['dateTime','RMS','VibState']\n vibState = np.array([[data[i][0] for i in range(len(data))]]).T\n dateTime = np.array([[data[i][1] for i in range(len(data))]]).T\n rmsVals = np.array([[data[i][2] for i in range(len(data))]]).T\n\n Data = np.hstack((dateTime,rmsVals,vibState))\n\n resultDF = pd.DataFrame(data=Data,columns=columns)\n # resultDF['dateTime'] = pd.to_datetime(resultDF['dateTime'])\n\n return resultDF\n\n def select_unique_sensorId(self):\n\n query = \"\"\"SELECT DISTINCT(sensorId) FROM \"\"\" + self.database + \"\"\".RMS\n where assetId = '\"\"\" + self.assetId + \"\"\"';\"\"\"\n\n cursor = self.execute_query(query)\n data=cursor.fetchall()\n\n sensorId = [data[i][0] for i in range(len(data))]\n\n return sensorId\n \n def select_frequency_interval(self):\n query = \"\"\"SELECT frequencyInterval FROM \"\"\" + self.database + \"\"\".FFT\n where assetId = '\"\"\" + self.assetId + \"\"\"'\n order by id desc limit 1;\"\"\"\n\n cursor = self.execute_query(query)\n data=cursor.fetchall()\n\n frequencyInterval = float(data[0][0])\n\n return frequencyInterval\n \n def select_fft_features(self,\n minDate=None,\n stdev=False,\n labeled=True,\n sensorId=None,\n limit=None,\n descending_order=True,\n fft_interval=None,\n extra_condition = '',\n ):\n if self.debug:\n print(self.minQueriedDate)\n \n if fft_interval is not None:\n interval_str = \"and frequencyInterval\" + fft_interval\n else:\n interval_str = \"\"\n \n if stdev == True:\n table = 'FFTSTD'\n else:\n table = 'FFT'\n\n if labeled == True:\n vibStateSelect = \"\"\"exists \n (SELECT VibrationState.values from \"\"\" + self.database + \"\"\".VibrationState WHERE VibrationState.dateTime = \"\"\" + table + \"\"\".dateTime)\"\"\"\n else:\n vibStateSelect = table + \"\"\".dateTime < '\"\"\" + self.minQueriedDate + \"\"\"'\n and not exists \n (SELECT VibrationState.values from \"\"\" + self.database + \"\"\".VibrationState WHERE VibrationState.dateTime = \"\"\" + table + \"\"\".dateTime)\"\"\"\n\n if sensorId is not None:\n sensorId_str = \"and \" + self.database + \".RMS.sensorId = '\" + sensorId + \"'\"\n else: \n sensorId_str = ''\n \n if minDate is not None:\n minDate_str = \"and \" + table + \".dateTime > \" + minDate\n else:\n minDate_str = \"\"\n\n if self.limit is not None:\n limit_str = \"limit {}\".format(self.limit)\n else:\n limit_str = \"\"\n \n if descending_order == True:\n desc_str = \"desc\"\n else:\n desc_str = \"asc\"\n\n query = \"\"\"SELECT \"\"\" + table + \"\"\".sensorId, \n \"\"\" + table + \"\"\".dateTime AS dateTime, \n \"\"\" + table + \"\"\".values AS fftVals, \n RMS.values AS rmsVals,\n VibrationState.values as vibState,\n VibrationState.programName as programName\n FROM \"\"\" + self.database + \"\"\".\"\"\" + table + \"\"\"\n INNER JOIN \"\"\" + self.database + \"\"\".RMS ON \n \"\"\" + self.database + \"\"\".RMS.dateTime = \"\"\" + self.database + \"\"\".\"\"\" + table + \"\"\".dateTime \n and \"\"\" + self.database + \"\"\".RMS.assetId = '\"\"\" + self.assetId + \"\"\"' \n \"\"\" + sensorId_str + \"\"\"\n LEFT JOIN \"\"\" + self.database + \"\"\".VibrationState ON \n \"\"\" + self.database + \"\"\".VibrationState.dateTime = \"\"\" + self.database + \"\"\".\"\"\" + table + \"\"\".dateTime \n where \"\"\" + vibStateSelect + \"\"\"\n \"\"\" + self.debug_str + \"\"\" \n and \"\"\" + table + \"\"\".assetId = '\"\"\" + self.assetId + \"\"\"'\n \"\"\" + minDate_str + \"\"\" \"\"\" + interval_str + \"\"\" \"\"\" + extra_condition + \"\"\"\n order by \"\"\" + table + \"\"\".dateTime \"\"\" + desc_str + \" \" + limit_str + \"\"\"; \"\"\"\n \n print(query)\n\n cursor = self.execute_query(query)\n data=cursor.fetchall()\n\n if len(data) <= 0:\n return pd.DataFrame(),pd.DataFrame(),pd.DataFrame()\n \n fftVals = np.array([[]])\n \n \n sensorId = np.array([[data[i][0] for i in range(len(data))]]).T\n dateTime = np.array([[data[i][1] for i in range(len(data))]]).T\n fftVals = np.array([np.array(ast.literal_eval(data[i][2])) for i in range(len(data))])\n rmsVals = np.array([[data[i][3] for i in range(len(data))]]).T.astype(float)\n vibState = np.array([[data[i][4] for i in range(len(data))]]).T\n programName = np.array([[data[i][5] for i in range(len(data))]]).T\n\n columns = ['FFT-{}'.format(i) for i in range(fftVals.shape[1])]\n columns = ['dateTime'] + columns + ['RMS','sensorId','VibState','programName']\n\n \n fftVals = lin_log_interp(fftVals).astype(float)\n\n statsFeatures = np.hstack((dateTime,fftVals,rmsVals,sensorId,vibState,programName))\n\n featuresDF = pd.DataFrame(data=statsFeatures,columns=columns)\n featuresDF = featuresDF.set_index('dateTime')\n featuresDF.index = pd.to_datetime(featuresDF.index)\n \n cursor.close()\n self.disconnect()\n\n sensorIdDF = featuresDF.loc[:, featuresDF.columns == 'sensorId']\n vibStateDF = featuresDF.loc[:, featuresDF.columns == 'VibState']\n progNameDF = featuresDF.loc[:, featuresDF.columns == 'programName']\n featuresDF = featuresDF.drop(['RMS','sensorId','VibState'],axis=1)\n \n self.minQueriedDate = np.amin(dateTime).strftime('%Y-%m-%d %H:%M:%S.%f')\n\n \n return featuresDF,sensorIdDF,vibStateDF, programName\n\n def select_ml_features(self,labeled=True,sensorId=None):\n\n if self.debug:\n print(self.minQueriedDate)\n\n if labeled == True:\n vibStateSelect = \"\"\"exists \n (SELECT VibrationState.values from db18.VibrationState WHERE VibrationState.dateTime = FFT.dateTime)\"\"\"\n else:\n vibStateSelect = \"\"\"FFT.dateTime < '\"\"\" + self.minQueriedDate + \"\"\"'\n and not exists \n (SELECT VibrationState.values from db18.VibrationState WHERE VibrationState.dateTime = FFT.dateTime)\"\"\"\n\n if sensorId is not None:\n sensorId_str = \"and db18.VibrationSkewness.sensorId = '\" + sensorId + \"'\"\n else: \n sensorId_str = ''\n\n if limit is not None:\n limit_str = \"limit {}\".format(self.limit)\n else:\n limit_str = \"\"\n\n query = \"\"\"SELECT FFT.sensorId, \n FFT.dateTime AS dateTime, \n FFT.values AS fftVals, \n RMS.values AS rmsVals, \n VibrationMean.values as vibMeanVals, \n VibrationSkewness.values as vibSkewVals, \n VibrationKurtosis.values as vibKurtVals, \n VibrationVariance.values as vibVarVals, \n VibrationState.values as vibState\n FROM db18.VibrationSkewness\n INNER JOIN db18.FFT ON\n db18.FFT.dateTime = db18.VibrationSkewness.dateTime \n and db18.VibrationSkewness.assetId = '\"\"\" + self.assetId + \"\"\"' \n \"\"\" + sensorId_str + \"\"\"\n INNER JOIN db18.RMS ON \n db18.RMS.dateTime = db18.VibrationSkewness.dateTime \n INNER JOIN db18.VibrationMean ON \n db18.VibrationMean.dateTime = db18.VibrationSkewness.dateTime \n INNER JOIN db18.VibrationKurtosis ON \n db18.VibrationKurtosis.dateTime = db18.VibrationSkewness.dateTime \n INNER JOIN db18.VibrationVariance ON \n db18.VibrationVariance.dateTime = db18.VibrationSkewness.dateTime \n LEFT JOIN db18.VibrationState ON \n db18.VibrationState.dateTime = db18.VibrationSkewness.dateTime \n where \"\"\" + vibStateSelect + \"\"\"\n \"\"\" + self.debug_str + \"\"\"\n order by FFT.dateTime desc \"\"\" + limit_str + \"\"\"; \"\"\"\n\n cursor = self.execute_query(query)\n data=cursor.fetchall()\n\n if len(data) <= 0:\n return pd.DataFrame(),pd.DataFrame(),pd.DataFrame()\n \n columns = ['FFT-{}'.format(i) for i in range(257)]\n columns = ['dateTime'] + columns + ['RMS', 'Mean', 'Skew', 'Kurtosis', 'Variance','sensorId','VibState']\n \n sensorId = np.array([[data[i][0] for i in range(len(data))]]).T\n dateTime = np.array([[data[i][1] for i in range(len(data))]]).T\n fftVals = np.array([ast.literal_eval(data[i][2]) for i in range(len(data))])\n rmsVals = np.array([[data[i][3] for i in range(len(data))]]).T\n vibMeanVals = np.array([[data[i][4] for i in range(len(data))]]).T\n vibSkewVals = np.array([[data[i][5] for i in range(len(data))]]).T\n vibKurtVals = np.array([[data[i][6] for i in range(len(data))]]).T\n vibVarVals = np.array([[data[i][7] for i in range(len(data))]]).T\n vibState = np.array([[data[i][8] for i in range(len(data))]]).T\n\n vibKurtVals = np.abs(vibKurtVals)\n vibSkewVals = np.abs(vibSkewVals)\n \n fftVals = lin_log_interp(fftVals)\n\n self.minQueriedDate = np.amin(dateTime).strftime('%Y-%m-%d %H:%M:%S.%f')\n\n statsFeatures = np.hstack((dateTime,fftVals,rmsVals,vibMeanVals,vibSkewVals,vibKurtVals,vibVarVals,sensorId,vibState))\n\n featuresDF = pd.DataFrame(data=statsFeatures,columns=columns)\n featuresDF = featuresDF.set_index('dateTime')\n featuresDF.index = pd.to_datetime(featuresDF.index)\n \n cursor.close()\n self.disconnect()\n\n sensorIdDF = featuresDF.loc[:, featuresDF.columns == 'sensorId']\n vibStateDF = featuresDF.loc[:, featuresDF.columns == 'VibState']\n featuresDF = featuresDF.drop(['sensorId','VibState'],axis=1).astype(float)\n \n return featuresDF,sensorIdDF,vibStateDF\n\n def execute_query(self,query):\n\n self.connect()\n cursor = self.mydb.cursor()\n cursor.execute(query)\n\n if query.lower().find('insert') != -1:\n self.mydb.commit()\n cursor.close()\n self.disconnect()\n return True\n else:\n return cursor\n","sub_path":"Dissertation-Notebooks/Chapter-3/Emco_Warmup_Monitoring/DBQuerier.py","file_name":"DBQuerier.py","file_ext":"py","file_size_in_byte":14338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"461789637","text":"#!/usr/bin/python3\n# -*- coding: utf8 -*-\n#######################\n#\n# 电表自动读数系统\n#\n#######################\n\nimport numpy as np\nimport imutils\nimport os\n\nimport pytesseract\nfrom PIL import Image\n\nimport cv2\n\nfrom skimage.morphology import disk\nfrom skimage.filter import rank\nimport sys\n\n\nfrom skimage import exposure\nimport argparse\n\nshow_img = True\n\ndef img_show_hook(title, img):\n global show_img\n type = sys.getfilesystemencoding()\n if show_img:\n #cv2.imshow(title, img)\n cv2.imshow(title.decode('utf-8').encode(type), img)\n cv2.waitKey(0) \n return\n\n\ndef img_sobel_binary(im, blur_sz):\n \n # 高斯模糊,滤除多余的直角干扰\n img_blur = cv2.GaussianBlur(im,blur_sz,0)\n if len(img_blur.shape) == 3:\n # 转换成灰度图\n blur_gray = cv2.cvtColor(img_blur,cv2.COLOR_BGR2GRAY)\n else:\n blur_gray = img_blur\n\n # 提取Sobel直角特征\n sobelx = cv2.Sobel(blur_gray, cv2.CV_16S, 1, 0, ksize=1)\n sobely = cv2.Sobel(blur_gray, cv2.CV_16S, 0, 1, ksize=1)\n abs_sobelx = np.absolute(sobelx)\n abs_sobely = np.absolute(sobely)\n sobel_8ux = np.uint8(abs_sobelx)\n sobel_8uy = np.uint8(abs_sobely)\n # img_show_hook(\"Sobelx特征\", sobel_8ux)\n # img_show_hook(\"Sobely特征\", sobel_8uy)\n \n # OTSU提取二值图像 \n #ret, thdx = cv2.threshold(sobel_8ux, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n #ret, thdy = cv2.threshold(sobel_8uy, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n ret, thdx = cv2.threshold(sobel_8ux, 12, 255, cv2.THRESH_BINARY)\n ret, thdy = cv2.threshold(sobel_8uy, 12, 255, cv2.THRESH_BINARY)\n\n thd_absx = cv2.convertScaleAbs(thdx)\n thd_absy = cv2.convertScaleAbs(thdy)\n bgimg = cv2.addWeighted(thd_absx, 0.5, thd_absy, 0.5, 0)\n \n img_show_hook(\"OTSU二值图像\", bgimg)\n \n return bgimg\n\n\ndef img_contour_extra(im):\n # 腐蚀和膨胀\n # kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(5,5))\n # bgmask = cv2.morphologyEx(im, cv2.MORPH_CLOSE, kernel)\n bgmask = im\n img_show_hook(\"膨胀腐蚀结果\", bgmask)\n \n # 获得连通区域\n # 该函数会破坏原始参数\n # findContours找到外部轮廓\n contours, hierarchy = cv2.findContours(bgmask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n cv2.drawContours(im, contours, -1, (0, 0, 255), 3)\n\n cv2.imshow(\"img\", im)\n cv2.waitKey(0)\n return contours\n\n\ndef img_contour_select(ctrs, im):\n # 剔除明显不满足条件的区域\n cand_rect = []\n for item in ctrs:\n # 周长,或者弧长,第二个参数表示该轮廓是否封闭,0.02的精度\n epsilon = 0.02*cv2.arcLength(item, True)\n # 进行轮廓近似\n approx = cv2.approxPolyDP(item, epsilon, True) \n if len(approx) <= 8:\n # minAreaRect 获得这些轮廓的最小外接矩形(旋转的外包络矩形),存储在vector向量中,返回值是RotatedRect\n rect = cv2.minAreaRect(item)\n '''\n if rect[1][0] < 20 or rect[1][1] < 20:\n continue\n if rect[1][0] > 150 or rect[1][1] > 150:\n continue\n '''\n\n # ratio = (rect[1][1]+0.00001) / rect[1][0]\n # if ratio > 1 or ratio < 0.9:\n # continue\n # box = cv2.boxPoints(rect)\n # box是四个点的坐标\n box = cv2.cv.BoxPoints(rect)\n box_d = np.int0(box)\n #画出轮廓,-1,表示所有轮廓(0,表示画出第0个轮廓),画笔颜色为(0, 255, 0),即Green,粗细为3\n cv2.drawContours(im, [box_d], 0, (0,255,0), 3)\n cand_rect.append(box)\n img_show_hook(\"候选区域\", im)\n #img_show_hook(\"候选区域\", im)\n return cand_rect\n\n\n# 轮廓\ndef img_contour_select_one(ctrs, im):\n # 剔除明显不满足条件的区域\n cand_rect = []\n for item in ctrs:\n # 周长,或者弧长,第二个参数表示该轮廓是否封闭,0.02的精度\n epsilon = 0.02*cv2.arcLength(item, True)\n # 进行轮廓近似\n approx = cv2.approxPolyDP(item, epsilon, True)\n if len(approx) == 4:\n # minAreaRect 获得这些轮廓的最小外接矩形(旋转的外包络矩形),存储在vector向量中,返回值是RotatedRect\n rect = cv2.minAreaRect(item)\n '''\n if rect[1][0] < 20 or rect[1][1] < 20:\n continue\n if rect[1][0] > 150 or rect[1][1] > 150:\n continue\n '''\n\n # ratio = (rect[1][1]+0.00001) / rect[1][0]\n # if ratio > 1 or ratio < 0.9:\n # continue\n # box = cv2.boxPoints(rect)\n # box是四个点的坐标\n box = cv2.cv.BoxPoints(rect)\n box_d = np.int0(box)\n #画出轮廓,-1,表示所有轮廓(0,表示画出第0个轮廓),画笔颜色为(0, 255, 0),即Green,粗细为3\n cv2.drawContours(im, [box_d], 0, (0,255,0), 2)\n cand_rect.append(box)\n img_show_hook(\"候选区域\", im)\n #img_show_hook(\"候选区域\", im)\n return cand_rect\ndef img_tesseract_detect(c_rect, im):\n # 由于使用minAreaRect获得的图像有-90~0的角度,所以给出的坐标顺序也不一定是\n # 转换时候给的,这里需要判断出图像的左上、左下、右上、右下的坐标,便于后面的变换\n pts = c_rect.reshape(4, 2)\n rect = np.zeros((4, 2), dtype = \"float32\")\n \n # the top-left point has the smallest sum whereas the\n # bottom-right has the largest sum\n s = pts.sum(axis = 1)\n rect[0] = pts[np.argmin(s)]\n rect[3] = pts[np.argmax(s)]\n \n # compute the difference between the points -- the top-right\n # will have the minumum difference and the bottom-left will\n # have the maximum difference\n diff = np.diff(pts, axis = 1)\n rect[2] = pts[np.argmin(diff)]\n rect[1] = pts[np.argmax(diff)] \n\n dst = np.float32([[0,0],[0,100],[200,0],[200,100]])\n\n # 对于投影变换,我们则需要知道四个点,通过cv2.getPerspectiveTransform求得变换矩阵.\n # 之后使用cv2.warpPerspective获得矫正后的图片。\n M = cv2.getPerspectiveTransform(rect, dst)\n warp = cv2.warpPerspective(im, M, (200, 100))\n \n img_show_hook(\"剪裁识别图像\", warp) \n \n warp = np.array(warp, dtype=np.uint8)\n radius = 10\n selem = disk(radius)\n \n # 使用局部自适应OTSU阈值处理\n local_otsu = rank.otsu(warp, selem)\n l_otsu = np.uint8(warp >= local_otsu)\n l_otsu *= 255\n # 定义结构元素\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(4, 4))\n # 膨胀和腐蚀操作的核函数\n l_otsu = cv2.morphologyEx(l_otsu, cv2.MORPH_CLOSE, kernel)\n \n img_show_hook(\"局部自适应OTSU图像\", l_otsu) \n \n print(\"识别结果:\")\n print(pytesseract.image_to_string(Image.fromarray(l_otsu)))\n \n cv2.waitKey(0)\n return\n\n# 检测直线\ndef img_hough_lines(im):\n im = cv2.GaussianBlur(im,(3,3),0)\n edges = cv2.Canny(im, 50, 150, apertureSize=3)\n lines = cv2.HoughLines(edges, 1, np.pi / 180, 118) # 这里对最后一个参数使用了经验型的值\n result = im.copy()\n # 经验参数\n minLineLength = 200\n maxLineGap = 15\n lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 80, minLineLength, maxLineGap)\n print(lines[0].min(axis=0))\n test = lines[0].min(axis=0)\n cv2.line(im, (test[0], test[1]), (test[2], test[3]), (0, 255, 0), 2)\n cv2.imshow('Cannyedgesone', im)\n\n\n for x1, y1, x2, y2 in lines[0]:\n #print(type(lines[0]))\n cv2.line(im, (x1, y1), (x2, y2), (0, 255, 0), 2)\n #cv2.imshow('Cannyedgesone', im)\n\n cv2.imshow('Cannyedges', edges)\n cv2.imshow('Result', im)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n return im\n\n# 找轮廓过滤矩形examples\ndef img_test(im):\n img_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n img_gray = cv2.bilateralFilter(img_gray, 11, 17, 17)\n\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))\n bgmask = cv2.morphologyEx(im, cv2.MORPH_CLOSE, kernel)\n\n im = cv2.GaussianBlur(img_gray, (3, 3), 0)\n edges = cv2.Canny(img_gray, 50, 150, apertureSize=3)\n\n\n\n\n lines = cv2.HoughLines(edges, 1, np.pi / 180, 118) # 这里对最后一个参数使用了经验型的值\n result = im.copy()\n # 经验参数\n minLineLength = 200\n maxLineGap = 15\n lines = cv2.HoughLinesP(edges, 1, np.pi / 180, 80, minLineLength, maxLineGap)\n\n\n for x1, y1, x2, y2 in lines[0]:\n cv2.line(im, (x1, y1), (x2, y2), (0, 255, 0), 2)\n\n cv2.imshow('Canny', edges)\n #edged = cv2.Canny(img_gray, 30, 200)\n\n\n\n #################\n\n # 图片二值化\n imagessss = img_sobel_binary(edges, (3, 3))\n ##################\n\n #cv2.imshow(\"edged\", edged)\n cv2.waitKey(0)\n\n\n (cnts, _) = cv2.findContours(edges.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n cnts = sorted(cnts, key=cv2.contourArea, reverse=True)[:50]\n screenCnt = None\n # loop over our contours\n\n\n for c in cnts:\n # approximate the contour\n peri = cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, 0.1 * peri, True)\n # if our approximated contour has four points, then\n # we can assume that we have found our screen\n if len(approx) == 4:\n screenCnt = approx\n #break\n\n cv2.drawContours(img_gray, [screenCnt], -1, (0, 255, 0), 2)\n cv2.imshow(\"Game Boy Screen\", img_gray)\n cv2.waitKey(0)\n\n\n# 对图片进行裁剪、识别\ndef img_detect_tesseract(im):\n\n # 购买方名称及身份证号码/组织机构代码\n cv2.rectangle(im, (72, 80), (247, 112), (0, 255, 0), 2)\n cv2.imshow(\"购买方名称及身份证号码\", im)\n\n # 创建图像\n #emptyImage = np.zeros((175,32), np.uint8)\n # 扣图像\n # box = (72, 80, 247, 112)\n # region = im.crop(box)\n # region.show()\n # 保存图像\n # cv2.imwrite(\"D:\\\\nameandid.jpg\", img)\n\n # 发动机号码\n cv2.rectangle(im, (72, 162), (247, 185), (0, 255, 0), 2)\n cv2.imshow(\"发动机号码\", im)\n\n # 车辆识别代码/车架号码\n cv2.rectangle(im, (353, 160), (518, 183), (0, 255, 0), 2)\n cv2.imshow(\"车辆识别代码/车架号码\", im)\n\n # 价税合计(小写)\n cv2.rectangle(im, (425, 184), (466, 207), (0, 255, 0), 2)\n cv2.imshow(\"价税合计\", im)\n\n print(pytesseract.image_to_string(Image.open('nameandid.png')))\n\n\n\n\nif __name__ == \"__main__\":\n \n print(\"...图片文字识别系统...\")\n \n #F1 = \"172_79.jpg\"\n #F1 = \"633_88.jpg\"\n #F1 = \"22.png\"\n #F1 = \"reciept.jpg\"\n #F1 = \"reciept11.png\"\n #F1 = \"lion.png\"\n F1 = \"receiptrect.jpg\"\n\n # 对图片进行裁剪、识别\n #img = Image.open(F1)\n\n\n img = cv2.imread(F1)\n img_show_hook(\"原图\", img)\n # 改变图片的长宽比\n img = imutils.resize(img, width=516, height=331)\n img_detect_tesseract(img)\n\n #检测直线\n im = img_hough_lines(img)\n\n\n\n #img_show_hook(\"restest\", img)\n\n\n #test 找轮廓找矩形\n img_test(img)\n\n # 转换成灰度图\n img_gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)\n # img_show_hook(\"灰度图像\", img)\n # 得到二值图像\n sb_img = img_sobel_binary(im, (5,5))\n # img_show_hook(\"二值图像\", sb_img)\n # 腐蚀和膨胀后,找到外部轮廓\n contours = img_contour_extra(sb_img)\n # 选出候选区域\n cand_rect = img_contour_select(contours, img)\n for item in cand_rect:\n # 输出识别结果\n img_tesseract_detect(np.array(item), img_gray)\n \n\n# http://www.pyimagesearch.com/2014/03/10/building-pokedex-python-getting-started-step-1-6/\n","sub_path":"dushu2.py","file_name":"dushu2.py","file_ext":"py","file_size_in_byte":11747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"322237134","text":"from wagtail.contrib.modeladmin.options import ModelAdmin, modeladmin_register, ModelAdminGroup\nfrom django.views.generic.base import TemplateView\nfrom django.urls import reverse\nfrom django.conf.urls import url\nfrom django.http import HttpResponse\nimport logging\nimport ssl\nfrom .models import CarnetDAdresse\nfrom wagtail.admin.menu import MenuItem\n\nif hasattr(ssl, '_create_unverified_context'):\n ssl._create_default_https_context = ssl._create_unverified_context\n\nlogger = logging.getLogger('smeadmin')\n\nfrom django.contrib.auth.models import Permission\n\n\nclass CarnetDAdresseAdmin(ModelAdmin):\n \"\"\"\n Carnet d'adresse - pour la gestion de l'envoi de lettre et de courrier\n \"\"\"\n model = CarnetDAdresse\n menu_label = \"SME - Carnet d'adresse\"\n menu_icon = \"mail\"\n\n\n\nclass EnveloppeView(TemplateView):\n \"\"\"\n Letter DL format pdf generation\n \"\"\"\n\n template_name = \"env.html\"\n\n def post(self, request, *args, **kwargs):\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=\"env.pdf\"'\n from reportlab.lib.units import mm\n from reportlab.pdfgen import canvas\n if 'dest' in request.POST:\n dest = request.POST['dest']\n if 'exp' in request.POST:\n exp = request.POST['exp']\n canvas = canvas.Canvas(response)\n canvas.setFont(\"Helvetica\", 10)\n canvas.setPageSize((220 * mm, 110 * mm))\n\n index = 100\n for l in exp.splitlines():\n canvas.drawString(10 * mm, index * mm, l)\n index -= 5\n\n canvas.setFont(\"Helvetica\", 12)\n index = 40\n for l in dest.splitlines():\n canvas.drawString(130 * mm, index * mm, l)\n index -= 5\n\n canvas.save()\n\n return response\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = \"Gestion des enveloppes\"\n context['carnet'] = CarnetDAdresse.objects.all()\n return context\n\n def will_modify_explorer_page_queryset(self):\n return False\n\n def get_admin_urls_for_registration(self):\n urls = (url(r'^smeadmin/$', EnveloppeView.as_view(), name='view_env'),)\n return urls\n\n def get_menu_item(self, order=None):\n return MenuItem('SME - Enveloppe', reverse(\"view_env\"), classnames='icon icon-mail', order=10000)\n\n def get_permissions_for_registration(self):\n return Permission.objects.none()\n\n\n\n\nclass LettreView(TemplateView):\n \"\"\"\n Autogenerate letter - French format\n \"\"\"\n\n template_name = \"letter.html\"\n\n def post(self, request, *args, **kwargs):\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=\"letter.pdf\"'\n from reportlab.lib.units import mm\n from reportlab.lib.pagesizes import A4\n from reportlab.platypus import Paragraph, SimpleDocTemplate, Spacer\n from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle\n from reportlab.lib.enums import TA_JUSTIFY, TA_RIGHT, TA_LEFT\n\n doc = SimpleDocTemplate(response, pagesize=A4)\n\n Env = []\n\n entete = request.POST['exp']\n mystyle = getSampleStyleSheet()\n right = ParagraphStyle(name='Justify', alignment=TA_LEFT, leftIndent=300, parent=mystyle['Normal'])\n Env.append(Paragraph(entete.replace('\\n', '
'), right))\n Env.append(Spacer(1, 3 * mm))\n right = ParagraphStyle(name='Justify', alignment=TA_LEFT, leftIndent=300, parent=mystyle['Normal'])\n from datetime import datetime\n t = datetime.now()\n import locale\n locale.setlocale(locale.LC_ALL, 'fr_FR.UTF-8')\n Env.append(Paragraph(\"À \" + request.POST['loc'] + \", le \" + t.strftime(\"%a %d %b %Y\"), right))\n Env.append(Spacer(1, 12 * mm))\n\n entete = request.POST['dest']\n mystyle = getSampleStyleSheet()\n dest = ParagraphStyle(name='Justify', alignment=TA_LEFT, leftIndent=20 * mm, parent=mystyle['Normal'])\n Env.append(Paragraph(\"À l'attention de : \" + entete.replace('\\n', '
'), dest))\n Env.append(Spacer(1, 12 * mm))\n\n entete = request.POST['base']\n entete = entete.replace('$DEST$', request.POST['dest'])\n mystyle = getSampleStyleSheet()\n base = ParagraphStyle(name='Base', alignment=TA_JUSTIFY, leftIndent=0, parent=mystyle['Normal'])\n Env.append(Paragraph(entete.replace('\\n', '
'), base))\n doc.build(Env)\n\n return response\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['title'] = \"Gestion des lettres\"\n context['carnet'] = CarnetDAdresse.objects.all()\n return context\n\n def will_modify_explorer_page_queryset(self):\n return False\n\n def get_admin_urls_for_registration(self):\n urls = (url(r'^smeadmin-letter/$', LettreView.as_view(), name='view_lettre'),)\n return urls\n\n def get_menu_item(self, order=None):\n return MenuItem('SME - Lettre', reverse(\"view_lettre\"), classnames='icon icon-edit', order=10000)\n\n def get_permissions_for_registration(self):\n return Permission.objects.none()\n\n\nclass SMEAdminGroup(ModelAdminGroup):\n \"\"\"\n SME Admin menu\n \"\"\"\n menu_label = \"SMEAdmin\"\n items = (CarnetDAdresseAdmin,EnveloppeView, LettreView)\n\n# Register button\nmodeladmin_register(SMEAdminGroup)\n\n","sub_path":"smeadmin/wagtail_hooks.py","file_name":"wagtail_hooks.py","file_ext":"py","file_size_in_byte":5477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"272817964","text":"\n\nfrom pymongo import MongoClient\nimport pandas as pd\nimport json\n\nmongo_client = MongoClient(\"mongodb+srv://db-admin-satyaki:admin@cluster0.kkrlk.mongodb.net/\"\n \"MyFirstDatabase?retryWrites=true&w=majority\")\ndatabase = 'TestDb2'\n\n\ndef mongoimport(csv_path, db_name, coll_name):\n\n db = mongo_client[db_name]\n log_collection = db['logging']\n existing_collection = db[coll_name]\n existing_df = existing_collection.find_one()\n\n new_df = pd.read_csv(csv_path)\n\n if not existing_df:\n coll = db[coll_name]\n payload = json.loads(new_df.to_json(orient='records'))\n coll.insert_many(payload)\n return \"All items in csv inserted\"\n\n else:\n existing_df = pd.DataFrame(list(db[coll_name].find()))\n del existing_df['_id']\n\n if new_df.columns.values.tolist() != existing_df.columns.values.tolist():\n print(\"Column names do not match\")\n\n log = {\"file_path\": csv_path,\n \"error\": \"Column names do not match\",\n \"action\": \"Rejected\"\n }\n log_collection.insert_one(log)\n return \"Column names do not match\"\n\n elif new_df.equals(existing_df):\n print(\"csv is already uploaded\")\n\n log = {\"file_path\": csv_path,\n \"error\": \"exact collection already exists\",\n \"action\": \"Rejected\"\n }\n log_collection.insert_one(log)\n return \"csv is already uploaded\"\n\n else:\n merged_df = existing_df.merge(new_df, indicator=True, how='outer')\n changed_rows_df = merged_df[merged_df['_merge'] == 'right_only']\n new_rows = changed_rows_df.drop('_merge', axis=1)\n\n payload = json.loads(new_rows.to_json(orient='records'))\n if not payload:\n\n print(\"DB contains extra items than is given by the csv. DB data was not changed\")\n \"\"\"Log\"\"\"\n log = {\"file_path\": csv_path,\n \"error\": \"DB contains extra items than is given by the csv\",\n \"action\": \"DB data was not changed\"\n }\n log_collection.insert_one(log)\n return \"DB contains extra items than is given by the csv. DB data was not changed\"\n\n else:\n existing_collection.insert_many(payload)\n\n message = \"Part of the csv is new. {} new items are appended to collection\".format(len(new_rows))\n\n print(message)\n \"\"\"Log\"\"\"\n log = {\"file_path\": csv_path,\n \"error\": \"Part of the csv is new\",\n \"action\": message\n }\n log_collection.insert_one(log)\n return message\n\n\nif __name__ == \"__main__\":\n mongoimport('../csv/1/conversions_1.csv', database, 'conversions_1')\n # mongoimport('../csv/2/conversions_2.csv', database, 'conversions_2')\n # mongoimport('../csv/3/conversions_3.csv', database, 'conversions_3')\n # mongoimport('../csv/4/conversions_4.csv', database, 'conversions_4')\n #\n #\n # mongoimport('../csv/1/clicks_1.csv', database, 'clicks_1')\n # mongoimport('../csv/2/clicks_2.csv', database, 'clicks_2')\n # mongoimport('../csv/3/clicks_3.csv', database, 'clicks_3')\n # mongoimport('../csv/4/clicks_4.csv', database, 'clicks_4')\n","sub_path":"prod/populate_db_script.py","file_name":"populate_db_script.py","file_ext":"py","file_size_in_byte":3432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"427202328","text":"#!/usr/bin/env python\n\nimport datetime\nimport sys\n\nif __name__ == \"__main__\":\n\t# this class should extend a framework base class that\n\t# provides default implementations of important methods\n\timport sys\n\tsys.path.append(\"../\")\n\tfrom CondCore.Utilities.CondDBFW import querying_framework_api\n\timport CondCore.Utilities.CondDBFW.data_sources, CondCore.Utilities.CondDBFW.data_formats as format\n\tfrom CondCore.Utilities.CondDBFW.querying import connect\n\n\tclass query_script():\n\n\t\tdef script(self, connection):\n\t\t\teverything = connection.search_everything(sys.argv[1])\n\t\t\treturn everything\n\n\tsecrets_file = \"/afs/cern.ch/cms/DB/conddb/.cms_cond/netrc\"\n\tsecrets_file_1 = \"netrc_test\"\n\n\tconnection_data = {\"db_alias\" : \"orapro\", \"host\" : \"oracle\", \"schema\" : \"cms_conditions\", \"secrets\" : secrets_file}\n\tqf = querying_framework_api(connection_data)\n\tdata = qf.run_script(query_script())\n\n\tdata.get(\"global_tags\").as_table()\n\tdata.get(\"tags\").as_table()\n\tdata.get(\"iovs\").as_table()\n\tdata.get(\"payloads\").as_table()","sub_path":"CondCore/Utilities/python/CondDBFW/examples/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"167524717","text":"import sys, datetime, os\nfrom create_folders import CREATE_FOLDER\nimport traceback\n\ndef ERROR(fun_name):\n logs = 'logs'\n CREATE_FOLDER(logs)\n path_to_err = os.getcwd() + '\\\\' + logs + '\\\\error.txt'\n with open(path_to_err, 'a') as e:\n text = fun_name + ':\\t' + str(datetime.datetime.now()) + '\\t==>\\t' + str(sys.exc_info()[0]) + '\\n'\n text_2 = str(traceback.format_exc())\n print(text)\n print(traceback.format_exc())\n e.write(text)\n e.write(text_2)\n","sub_path":"errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"646328945","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp import netsvc\nfrom openerp.osv import fields, osv\nfrom openerp.tools.translate import _\nimport openerp.addons.decimal_precision as dp\nfrom math import ceil\n\nclass raw_material_procurement(osv.osv_memory):\n _name = \"raw.material.procurement\"\n _description = \"Raw Material Procurement\"\n\n _columns = {\n 'line_ids': fields.one2many('raw.material.procurement.line', 'raw_id', 'Products', ondelete='set null'),\n }\n\n def default_get(self, cr, uid, fields, context):\n \"\"\" To get default values for the object.\n @param self: The object pointer.\n @param cr: A database cursor\n @param uid: ID of the user currently logged in\n @param fields: List of fields for which we want default values\n @param context: A standard dictionary\n @return: A dictionary which of fields with values.\n \"\"\"\n line_obj = self.pool.get('raw.material.procurement.line')\n res = super(raw_material_procurement, self).default_get(cr, uid, fields, context=context)\n line_ids = []\n for forecast in self.pool.get('sale.forecast.line').browse(cr, uid, context['active_ids']):\n proc_for = forecast.product_uom_qty - forecast.raw_material_qty\n val = {'product_id':forecast.product_id.id,'product_uom_qty':proc_for}\n line_ids.append(line_obj.create(cr, uid, val))\n res.update({'line_ids':line_ids})\n return res\n\n ###########################################################################################\n # Create Procurements Orders\n ###########################################################################################\n def create_procurement(self, cr, uid, ids, context=None):\n \"\"\" Creates procurement order for selected product.\n @param self: The object pointer.\n @param cr: A database cursor\n @param uid: ID of the user currently logged in\n @param ids: List of IDs selected\n @param context: A standard dictionary\n @return: A dictionary which loads Procurement orders form view.\n \"\"\"\n user = self.pool.get('res.users').browse(cr, uid, uid, context=context).login\n proc_obj = self.pool.get('procurement.order')\n prod_obj = self.pool.get('product.product')\n ord_obj = self.pool.get('stock.warehouse.orderpoint')\n wf_service = netsvc.LocalService(\"workflow\")\n for_obj = self.pool.get('sale.forecast.line')\n data = self.read(cr, uid, ids, [], context=context)[0]\n procure_ids = []\n vals = {}\n for forecast in for_obj.browse(cr, uid, context.get('active_ids',False), context=context):\n prod_qty = {forecast.product_id.id:forecast.product_uom_qty - forecast.raw_material_qty}\n raws = for_obj.get_all_raw(cr, uid, prod_qty, context=context) # All raw material for 1 product\n for raw_id in raws.keys():\n key = forecast.date_stop + str(raw_id)\n if vals.get(key,False):\n vals[key]['product_qty'] += raws[raw_id] \n else:\n vals[key] = ({'name':'INT: '+str(user),\n 'date_planned': forecast.date_mrp_planned,\n 'product_id': raw_id,\n 'product_qty': ceil(raws[raw_id]), \n 'product_uom': prod_obj.browse(cr, uid, raw_id).uom_id.id,\n 'location_id': forecast.lot_stock_id.id,\n 'procure_method':'make_to_order'})\n for v in vals.keys():\n prod = prod_obj.browse(cr, uid, vals[v]['product_id'])\n context.update({'location': vals[v]['location_id'], 'to_date': vals[v]['date_planned']})\n virtual_available = prod_obj.browse(cr, uid, prod.id, context=context).virtual_available\n if (virtual_available - vals[v]['product_qty']) >= 0:\n vals.pop(v,None)\n else:\n op = ord_obj.search(cr, uid, [('product_id','=',prod.id)], limit=1)\n if not op:\n raise osv.except_osv(_('Missing Definition!'), _('No order point defined for product %s') % ('['+prod.code+'] '+prod.name))\n for v in vals.keys():\n prod = prod_obj.browse(cr, uid, vals[v]['product_id'])\n context.update({'location': vals[v]['location_id'], 'to_date': vals[v]['date_planned']})\n virtual_available = prod_obj.browse(cr, uid, prod.id, context=context).virtual_available\n op = ord_obj.browse(cr, uid, ord_obj.search(cr, uid, [('product_id','=',prod.id)], limit=1)[0])\n qty = float(vals[v]['product_qty'] - virtual_available)\n qty_multiple = op.qty_multiple\n sub = qty % qty_multiple\n if sub > 0:\n qty += qty_multiple - sub\n if op.product_id.type not in ('consu'):\n if op.procurement_draft_ids:\n # Check draft procurement related to this order point\n pro_ids = [x.id for x in op.procurement_draft_ids]\n procure_datas = proc_obj.read(\n cr, uid, pro_ids, ['id', 'product_qty'], context=context)\n to_procure = qty\n for proc_data in procure_datas:\n if to_procure >= proc_data['product_qty']:\n wf_service.trg_validate(uid, 'procurement.order', proc_data['id'], 'button_confirm', cr)\n proc_obj.write(cr, uid, [proc_data['id']], {'origin': op.name}, context=context)\n to_procure -= proc_data['product_qty']\n if not to_procure:\n break\n qty = to_procure\n vals[v]['product_qty'] = qty\n procure_id = proc_obj.create(cr, uid, vals[v])\n wf_service.trg_validate(uid, 'procurement.order', procure_id, 'button_confirm', cr)\n procure_ids.append(procure_id)\n## proc_obj.run_scheduler(cr, uid)\n return {\n 'view_type': 'form',\n 'view_mode': 'tree,form',\n 'res_model': 'procurement.order',\n 'type': 'ir.actions.act_window',\n 'context': {},\n 'domain': [('id','in',procure_ids)] or [],\n }\n\n\nraw_material_procurement()\n\nclass raw_material_procurement_line(osv.osv_memory):\n _name = \"raw.material.procurement.line\"\n _description = \"Sale Forecast Raw Material Line\"\n\n _columns = {\n 'raw_id': fields.many2one('raw.material.procurement', 'Forecast Raw Material'),\n 'product_uom_qty': fields.float('Sale Order Quantity', digits_compute= dp.get_precision('Product UoS'), readonly=True),\n 'product_id': fields.many2one('product.product', 'Product', readonly=True),\n }\nraw_material_procurement_line()\n\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"bias_sale_mrp_forecast/wizard/create_procurement_order.py","file_name":"create_procurement_order.py","file_ext":"py","file_size_in_byte":7966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"176409640","text":"import pygame\r\nimport random\r\nfrom myblock import *\r\nfrom player import * \r\nfrom hipster import *\r\nfrom ghoul import *\r\nimport myconst as const\r\n\r\n# Define some colors\r\nblack = ( 0, 0, 0)\r\nwhite = ( 255, 255, 255)\r\nred = ( 255, 0, 0)\r\ngrey = ( 214, 214, 206)\r\n \r\n# Initialize Pygame\r\npygame.init()\r\npygame.mixer.init()\r\n\r\nsize = [const.screen_width,const.screen_height]\r\nscreen = pygame.display.set_mode(size)\r\npygame.display.set_caption(const.screen_title)\r\n\r\n# Helper functions\r\ndef showBannerText(phrase):\r\n y = const.screen_height / 2\r\n pygame.draw.line(screen, red, [0, y], [const.screen_width, y], 200)\r\n font = pygame.font.Font(const.screen_font, 40)\r\n text = font.render(phrase,True, grey)\r\n screen.blit(text, [150, y-20]) \r\n \r\n \r\n# This is a list of 'sprites'. Each block in the program is\r\n# added to this list. The list is managed by a class called 'Group.'\r\nvictim_group = pygame.sprite.Group()\r\nghoul_group = pygame.sprite.Group()\r\n\r\n# This is a list of every sprite. All blocks and the player block as well.\r\nall_sprites_list = pygame.sprite.Group()\r\n\r\n# Need my own indexed list to keep track of ghouls\r\nghoul_indexed_list = []\r\n\r\n# victim image list\r\nvictim_images = const.hipster_img_list\r\n\r\n# create hipsters\r\nfor i in range(const.initHipsters):\r\n victim = Hipster(random.choice(victim_images),\r\n random.randrange(const.screen_width),\r\n random.randrange(const.screen_height))\r\n \r\n # Add the block to the list of objects\r\n victim_group.add(victim)\r\n all_sprites_list.add(victim)\r\n\r\n# create bad sprites\r\nfor i in range(const.initGhouls):\r\n block = Ghoul(const.ghoul_img_src, random.randrange(const.screen_width), random.randrange(const.screen_height))\r\n \r\n # Set a random location for the block\r\n block.rect.x = random.randrange(const.screen_width)\r\n block.rect.y = random.randrange(const.screen_height)\r\n \r\n # Add the block to the list of objects\r\n ghoul_group.add(block)\r\n all_sprites_list.add(block)\r\n ghoul_indexed_list.append(block)\r\n \r\n# Create a player\r\nplayer = Player(const.player_img_src, const.attack_img_src, 350, 250)\r\nall_sprites_list.add(player)\r\n\r\n#Loop until the user clicks the close button.\r\ndone=False\r\n \r\n# Used to manage how fast the screen updates\r\nclock=pygame.time.Clock()\r\n \r\nscore = 0\r\n \r\n# -------- Main Program Loop -----------\r\nwhile done == False:\r\n # ALL EVENT PROCESSING SHOULD GO BELOW THIS COMMENT\r\n for event in pygame.event.get(): # User did something\r\n if event.type == pygame.QUIT: # If user clicked close\r\n done=True # Flag that we are done so we exit this loop\r\n\r\n # Set the speed based on the key pressed\r\n vp = const.vp\r\n \r\n if event.type == pygame.KEYDOWN:\r\n \r\n if event.key == pygame.K_LEFT:\r\n player.changespeed(vp*-1,0)\r\n if event.key == pygame.K_RIGHT:\r\n player.changespeed(vp,0)\r\n if event.key == pygame.K_UP:\r\n player.changespeed(0,vp*-1)\r\n if event.key == pygame.K_DOWN:\r\n player.changespeed(0,vp)\r\n \r\n \r\n #attack using X\r\n if event.key == pygame.K_x:\r\n player.attack(screen, victim_group)\r\n \r\n #use items by pressing Z \r\n if event.key == pygame.K_z:\r\n \r\n print('buying resurrect')\r\n reztarget = player.useLoot(screen, ghoul_indexed_list)\r\n \r\n #if can rez, \r\n if reztarget != None:\r\n \r\n #kill the reztarget after getting its coords\r\n rezx, rezy = reztarget.rect.x, reztarget.rect.y\r\n reztarget.kill()\r\n \r\n #create new hipster in ghoul's place \r\n undead = Hipster(random.choice(victim_images),rezx,rezy)\r\n victim_group.add(undead)\r\n all_sprites_list.add(undead)\r\n ghoul_indexed_list.append(undead)\r\n \r\n # Reset speed when key goes up \r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_LEFT:\r\n player.changespeed(vp,0)\r\n if event.key == pygame.K_RIGHT:\r\n player.changespeed(vp*-1,0)\r\n if event.key == pygame.K_UP:\r\n player.changespeed(0,vp)\r\n if event.key == pygame.K_DOWN:\r\n player.changespeed(0,vp*-1)\r\n \r\n # Reset player img \r\n if event.key == pygame.K_x:\r\n player.revert() \r\n\r\n # -----end EVENT PROCESSING-----\r\n \r\n # -----begin GAME LOGIC---------\r\n \r\n numVictimsAlive = 0\r\n # make hipsters move\r\n for victim in victim_group:\r\n if victim.state == 'alive':\r\n victim.vertigo(const.plat_left, const.plat_right, const.plat_top, const.plat_bot)\r\n victim.noOverlap(victim_group)\r\n numVictimsAlive += 1\r\n victim.update()\r\n \r\n # check if victim falls off platform, which is success\r\n if victim.fallsOff():\r\n score += 1\r\n player.updateGPA(score, const.initGPA, const.maxGPA)\r\n #spawn new ghoul at spawn points in mid-air\r\n spawn = Ghoul(const.ghoul_img_src, const.ghoul_spawn_x_offset, random.randrange(const.screen_height))\r\n #add new ghoul to lists\r\n ghoul_group.add(spawn)\r\n all_sprites_list.add(spawn)\r\n ghoul_indexed_list.append(spawn)\r\n \r\n # make ghouls move towards player\r\n for ghoul in ghoul_group:\r\n ghoul.eatbrains(screen, player)\r\n ghoul.noOverlap(ghoul_group)\r\n ghoul.update()\r\n \r\n # make sure player can't walk through walls\r\n player.hitWall(0, const.plat_right, const.plat_top - const.block_height_y_offset, const.plat_bot)\r\n player.update() \r\n # -----end GAME LOGIC-------------\r\n\r\n # -----begin CODE TO DRAW---------\r\n \r\n # Clear the screen and fill black\r\n screen.fill(black)\r\n \r\n # Draw the background\r\n bg = pygame.image.load(const.screen_bg_img_src).convert()\r\n screen.blit(bg,[0,0])\r\n \r\n # Draw the platform\r\n pygame.draw.line(screen, grey, [const.plat_left, const.plat_ymid], [const.plat_right, const.plat_ymid], const.plat_height)\r\n \r\n \r\n # See if the player block has collided with a victim.\r\n # set to False so that we don't remove the victim\r\n good_hit_list = pygame.sprite.spritecollide(player, victim_group, False)\r\n\r\n # See if player has collided with bad sprite.\r\n # set to True so that we remove the ghoul that collided\r\n bad_hit_list = pygame.sprite.spritecollide(player, ghoul_group, True)\r\n \r\n # Check the list of collisions.\r\n for block in good_hit_list:\r\n goodsound = pygame.mixer.Sound(const.sound_good)\r\n goodsound.play()\r\n\r\n for block in bad_hit_list:\r\n player.gpa -= 1\r\n badsound = pygame.mixer.Sound(const.sound_bad)\r\n badsound.play()\r\n \r\n # Game Over if GPA below 2\r\n if player.gpa <= const.failGPA:\r\n showBannerText(const.text_gameover_gpa)\r\n \r\n # Draw all the sprites\r\n all_sprites_list.draw(screen)\r\n \r\n # Limit to 20 frames per second\r\n clock.tick(20)\r\n\r\n # Print score to screen\r\n font = pygame.font.Font(const.screen_sidebar_font, 18)\r\n\r\n text = font.render('Attack: X',True,white)\r\n screen.blit(text, [620,350])\r\n \r\n text = font.render('Spawn: Z',True,white)\r\n screen.blit(text, [620,370])\r\n \r\n #victim\r\n myscore = 'Victims:'+str(score)\r\n text = font.render(myscore,True,white)\r\n screen.blit(text, [620,250])\r\n \r\n #plunder amount\r\n text = font.render('Plunder: $'+str(player.inventory),True,white)\r\n screen.blit(text, [620,270])\r\n\r\n #player gpa\r\n text = font.render('GPA:'+str(player.gpa),True,white)\r\n screen.blit(text, [620,290]) \r\n \r\n # Game Over when player falls off\r\n if player.fallsOff():\r\n showBannerText(const.text_gameover_fell) \r\n\r\n # -----end CODE TO DRAW --------\r\n\r\n # Go ahead and update the screen with what we've drawn.\r\n pygame.display.flip()\r\n \r\npygame.quit()","sub_path":"GAME.py","file_name":"GAME.py","file_ext":"py","file_size_in_byte":8348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"9278670","text":"\"\"\"\nA simple application shows how to load(or read) data into spark dataframe\nRun with spark2-submit simple_with_session.py\nAuthor- Andy\n\"\"\"\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import *\n\ndef readdata(spark):\n # Create a dataframe manually, from a list\n df = spark.createDataFrame([(1,'Andy'),(2,'mandy'),(3,'sandy')])\n print(type(df))\n df.show()\n df.printSchema()\n \n # Loading DataFrames: CSV method 1 using spark.read.csv('path')\n player_df =spark.read.option('header', True).csv('gs://dataproc-staging-us-east1-548550014762-rie5an4z/notebooks/jupyter/player.csv')\n player_df.show(5, False)\n player_df.printSchema()\n \n # Loading DataFrames: CSV method 2 using spark.read.format('csv').load('path')\n player_df = spark.read.format(\"csv\").load('gs://dataproc-staging-us-east1-548550014762-rie5an4z/notebooks/jupyter/player.csv')\n player_df.show(5, False)\n player_df.printSchema()\n\n # Loading JSON\n people_jsondf = spark.read.json('gs://dataproc-staging-us-east1-548550014762-rie5an4z/notebooks/jupyter/people.json')\n people_jsondf.printSchema()\n people_jsondf.show(5)\n \n # In the same way we can upload other formats such as parquet etc \n \n # Infer Schema\n player_headersdF = spark.read.option(\"inferSchema\", \"true\").option('header', True).csv('gs://dataproc-staging-us-east1-548550014762-rie5an4z/notebooks/jupyter/player.csv')\n player_headersdF.printSchema()\n player_headersdF.show(5)\n\n\n # Best practice it is explicitly define the schema\n playerSchema = \\\n StructType([\n StructField(\"id\", IntegerType()),\n StructField(\"player_api_id\", IntegerType()),\n StructField(\"player_name\", StringType()),\n StructField(\"player_fifa_api_id\", IntegerType()),\n StructField(\"birthday\", TimestampType()), \n StructField(\"height\", FloatType()),\n StructField(\"weight\", FloatType())\n ])\n\n player_schemadf = spark.read.schema(playerSchema).csv('gs://dataproc-staging-us-east1-548550014762-rie5an4z/notebooks/jupyter/player.csv')\n player_schemadf.printSchema()\n player_schemadf.schema\n player_schemadf.dtypes\n player_schemadf.columns\n\n\n\nif __name__ == \"__main__\":\n spark = SparkSession.builder.appName(\"Simple with Session\").getOrCreate()\n readdata(spark)\n \n spark.stop()\n","sub_path":"Code/load-df.py","file_name":"load-df.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"646471057","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nimport sys\nsys.path.append('../')\n\n\n#import parameters\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef InletArea(m_sulphur,airtofuel,t_cruise,M,a_cruise,rho):\n #function that calculates the required inlet area of the burner, using the altitude, sulphur mass, cruise mach number\n #and air to fuel ratio as an input\n \n v=a_cruise*M #cruise speed \n m_dotS=m_sulphur/t_cruise #mass flow of sulphur [kg/s]\n m_dotair=(airtofuel)*m_dotS #required air mass flow [kg/s]\n m_dottotal=m_dotS+m_dotair\n return m_dottotal/v/rho #required inlet area [m2]\n\n\ndef SulphursMassdisprate(m_sulphur,t_cruise,M_cruise,a_cruise):\n #outputs Sulphur dispersion rate\n #input total sulphur mass, cruise time, cruise Mach number and speed of sound during cruise\n \n v=a_cruise*M_cruise #cruisespeed [m/s]\n return m_sulphur/t_cruise/v #[kg/m]\n\ndef BurnerMass(m_sulphur,t_cruise,airtofuel):\n #function that calculates the burner mass, using the sulphur mass, cruise time and air to fuel ratio\n \n m_dotS=m_sulphur/t_cruise #mass flow of sulphur [kg/s]\n m_dotair=airtofuel*m_dotS #required air mass flow [kg/s]\n m_dottotal=m_dotS+m_dotair #total outflow [kg/s]\n \n return 1.6*(304*m_dottotal**0.9)**0.7 # burner mass [kg]\n\ndef SulphurtankVolume(m_sulphur,rho_sulphur):\n #outputs sulphur tank volume\n #inputs total sulphur mass and sulphur density\n\n return m_sulphur/rho_sulphur #[m^3]\n\ndef SulphurtankLength(m_sulphur,rho_sulphur,d_tank):\n #outputs sulphur tank length\n #inputs total sulphur mass, sulphur density and tank diameter\n V_sphere=d_tank**3/6*np.pi # Volume of 2 half spheres [m^3]\n if V_sphere>SulphurtankVolume(m_sulphur,rho_sulphur):\n raise ValueError('to large d_tank')\n V_cyl=SulphurtankVolume(m_sulphur,rho_sulphur)-V_sphere # Volume of cylindrical part of the tank [m^3]\n l_cyl=V_cyl/(d_tank**2*np.pi/4) # Length of cylindrical part of the tank [m]\n return l_cyl+d_tank #[m]\n\ndef SulphurtankMass(m_sulphur,rho_sulphur,d_tank,t_tank,rho_tank):\n #outputs sulphur tank weight\n #inputs total sulphur mass, sulphur density, tank diameter, tank thickness and tank material density\n V_sphere=d_tank**3/6*np.pi # Volume of 2 half spheres [m^3]\n if V_sphere>SulphurtankVolume(m_sulphur,rho_sulphur):\n raise ValueError('to large d_tank')\n V_cyl=SulphurtankVolume(m_sulphur,rho_sulphur)-V_sphere # Volume of cylindrical part of the tank [m^3]\n l_cyl=V_cyl/(d_tank**2*np.pi/4) # Length of cylindrical part of the tank [m]\n A_sphere=d_tank**2*np.pi # Surface area of 2 half spheres [m^2]\n A_cyl=d_tank*np.pi*l_cyl # Surface area of cylindrical part of the tank [m]\n return (A_sphere+A_cyl)*t_tank*rho_tank #[kg]\n\n\n##diameters=np.arange(1,2.5,0.01)\n##weights=np.array([])\n##\n##for D in diameters:\n## weights=np.append(weights,SulphurtankMass(10000,1121,D,0.003,2700))\n##\n##plt.plot(diameters,weights)\n##plt.show()\n\n","sub_path":"build/lib/A22DSE/Models/POPS/Current/payloadcalculations.py","file_name":"payloadcalculations.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"484981480","text":"# -*- coding: utf-8 -*-\n\"\"\"\n theonestore\n https://github.com/kapokcloud-inc/theonestore\n ~~~~~~~~~~~\n :copyright: © 2018 by the Kapokcloud Inc.\n :license: BSD, see LICENSE for more details.\n\"\"\"\n\nfrom app.helpers import (\n create_app, \n enable_logging,\n register_blueprint\n)\nfrom app.routes import ROOT_ROUTES\n\napp = create_app()\nenable_logging(app)\nregister_blueprint(app, ROOT_ROUTES)\n\nif __name__ == '__main__':\n app.config.from_pyfile('app/config/config.dev.cfg')\n app.run(host='127.0.0.1', debug=True, port=1105)\n\n","sub_path":"runapp.py","file_name":"runapp.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"332987703","text":"# -*- coding: utf8 -*-\nimport base64\nimport os\nimport settings\n\n\ndef create_ga_screen_result(task_uid):\n result_path = os.path.join(settings.RESULTS_FOLDER, task_uid)\n result = {}\n\n for file_name in os.listdir(result_path):\n if '.png' in file_name:\n file_path = os.path.join(result_path, file_name)\n screen_type = file_name.split('.png')[0]\n with open(file_path) as f:\n content = f.read()\n result[screen_type] = base64.b64encode(content)\n\n return result\n","sub_path":"lib/result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"}
+{"seq_id":"567846832","text":"class State:\n def __init__(self, q, sym):\n self.q = q # state\n self.sym = sym # symbol\n\n\nclass Rule:\n def __init__(self, text, cur_q, next_q, cur_sym, next_sym, cmd):\n self.text = text # content of rules\n self.cur_q = cur_q # current state\n self.next_q = next_q # next state\n self.cur_sym = cur_sym # current symbol\n self.next_sym = next_sym # next symbol\n self.cmd = cmd # move command\n\n\nclass MT:\n rules = []\n cur_state = State('', '')\n\n def __init__(self, mem, beg_state, width):\n self.mem = mem\n self.width = width\n self.cur_pos = 0\n self.cur_state.q = beg_state\n self.cur_state.sym = mem[self.cur_pos]\n self.r_num = 0\n\n def get_sym(self):\n return self.mem[self.cur_pos]\n\n def get_state(self):\n return self.cur_state.q\n\n def get_mem(self):\n return self.mem\n\n def add_rule(self, rule):\n self.rules.append(Rule(rule, rule[0:self.width],\n rule[(rule.find('->') + 2):((rule.find('->') + 2) + self.width)],\n rule[self.width], rule[len(rule) - 2], rule[len(rule) - 1]))\n self.r_num += 1\n return True\n\n def process(self, i):\n if self.rules[i].cmd == 'L':\n self.cur_pos += -1\n elif self.rules[i].cmd == 'R':\n self.cur_pos += 1\n self.cur_state.q = self.rules[i].next_q\n\n if self.cur_pos < 0:\n return False\n\n if self.cur_pos >= len(self.mem):\n self.mem = self.mem + \" \"\n self.cur_state.sym = self.mem[self.cur_pos]\n\n def step(self):\n i = 0\n while i < self.r_num:\n if self.cur_state.q == self.rules[i].cur_q and self.cur_state.sym == self.rules[i].cur_sym:\n with open(\"C:/Users/Admin/PycharmProjects/tvp2/result.txt\", \"a\") as file:\n file.write('Before: {} {} {}\\n'.format(self.get_mem(), self.get_state(), self.get_sym()))\n self.mem = self.mem[0:self.cur_pos] + self.rules[i].next_sym \\\n + self.mem[self.cur_pos + 1:len(self.mem)]\n\n with open(\"C:/Users/Admin/PycharmProjects/tvp2/result.txt\", \"a\") as file:\n file.write('After: {} {} {} {}\\n'.format(self.get_mem(), self.get_state(), self.get_sym(), self.rules[i].cmd))\n\n self.process(i)\n i += 1\n\n\nif __name__ == '__main__':\n with open('C:/Users/Admin/PycharmProjects/tvp2/entrance_tape.txt') as in_file:\n for line in in_file:\n line = line.replace('\\n', '')\n mt = MT(line, 'q00', 3)\n\n print('{} {} {}'.format(mt.get_mem(), mt.get_state(), mt.get_sym()))\n\n with open('C:/Users/Admin/PycharmProjects/tvp2/rules.txt') as in_file:\n for line in in_file:\n line = line.replace('\\n', '')\n mt.add_rule(line)\n\n while mt.get_state() != 'q09':\n mt.step()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"}
+{"seq_id":"537468901","text":"from __future__ import print_function\n\nimport sys\nimport argparse\nfrom pprint import pprint\n\n\nfrom ssdp import SSDP\nimport upnp\nfrom utils.xml import string_to_xml_to_dict, xml_dict_get\nfrom dlna.contentdirectory import ContentDirectory\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('Basic UPNP/DLNA command line client')\n parser.add_argument('--name', help='Name of device to look for')\n parser.add_argument('--service', help='Service to look for')\n parser.add_argument('-v', '--verbose', action='store_true', help='Service to look for')\n parser.add_argument('command', nargs=\"*\", help='Service to look for')\n args = parser.parse_args()\n\n if 'list' in args.command and len(args.command) > 1:\n print(\"The 'list' command doesn't play well with others. You may not get the expected results.\")\n\n filter_devices = 'list' not in args.command\n\n if len(args.command) == 0:\n print(\"Nothing to do. Please specify a command!\\nAvailable commands:\\n\")\n print(\" list - list all available services\")\n print(\" detail - show details of filtered services (use with --name and --service)\")\n print(\" browse - try and browse a ContentDirectory service\\n\")\n sys.exit(0)\n\n sdp = SSDP()\n sdp.discover()\n if args.verbose:\n sdp.dump()\n\n if len(sdp) == 0:\n print(\"No devices found...\")\n sys.exit(0)\n\n possible = []\n targets = []\n\n if filter_devices and args.name is not None:\n for dvc in sdp:\n if dvc.friendlyName is None:\n if args.verbose:\n print(\"Skipping device {} as no friendly name available.\".format(dvc.host_string()))\n continue\n if args.name in dvc.friendlyName:\n possible.append(dvc)\n else:\n possible = [dvc for dvc in sdp]\n\n if len(possible) == 0:\n print(\"No matches found for name '{}'. Exiting.\".format(args.name))\n sys.exit(0)\n possible = sorted(possible, key=lambda x: x.host_string())\n\n if filter_devices and args.service is not None:\n for dvc in possible:\n for svc in dvc.services:\n if args.service in svc.serviceType:\n targets.append(svc) \n else:\n for poss in possible:\n targets.extend(poss.services)\n\n if len(targets) == 0:\n print(\"No services found that match your criteria. Exiting...\")\n sys.exit(0)\n targets = sorted(targets, key=lambda x: x.serviceType)\n\n if 'browse' in args.command:\n target = None\n for poss in targets:\n if 'ContentDirectory' in poss.serviceType:\n target = poss\n break\n\n if target is None:\n print(\"Unable to find a ContentDirectory to browse...\")\n sys.exit(0)\n\n cdd = ContentDirectory(target)\n print(\"Browsing ContentDirectory for {}\".format(target.device.friendlyName))\n print(\"System Update ID: {}\".format(cdd.updateId))\n\n if cdd.Browse(RequestedCount=10):\n while True:\n cdd.display_containers()\n print(\"\\nEnter folder number to browse. 'U' to go up. 'I' for expanded information. Return or 'Q' to exit.\")\n opt = raw_input(\"Enter folder number to browse [return to exit]: \")\n opt = opt.strip()\n if opt == '' or opt in ['Q', 'q']:\n break\n if opt in ['U', 'u']:\n nxt = cdd.parent or 0\n elif opt in ['I', 'i']:\n cdd.display_containers(True)\n continue\n else:\n nxt = cdd.id_for_container(int(opt) - 1)\n if nxt is None:\n break\n if not cdd.Browse(nxt, RequestedCount=10):\n break\n\n if 'list' in args.command:\n print(\"Listing all services found.\")\n typ = None\n for tgt in targets:\n if typ != tgt.serviceType:\n print(\"\\n{}\".format(tgt.serviceType))\n print(\" {:20s} - {}\".format(tgt.device.host_string(),\n tgt.device.friendlyName))\n typ = tgt.serviceType\n\n if 'detail' in args.command:\n for tgt in targets:\n print(tgt.dump())\n\n","sub_path":"simpleupnp/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":4359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"}
+{"seq_id":"346984458","text":"from pairoptions import pairoptions\nimport MatlabFuncs as m\nfrom WriteData import WriteData\n\nclass verbose(object):\n\t\"\"\"\n\tVERBOSE class definition\n\n\t Available verbosity levels:\n\t mprocessor : model processing \n\t module : modules\n\t solution : solution sequence\n\t solver : solver info (extensive)\n\t convergence : convergence criteria\n\t control : control method\n\t qmu : sensitivity analysis\n\t autodiff : AD analysis\n\t smb : SMB analysis\n\n\t Usage:\n\t verbose=verbose();\n\t verbose=verbose(3);\n\t verbose=verbose('001100');\n\t verbose=verbose('module',True,'solver',False);\n\n\tWARNING: some parts of this file are Synchronized with src/c/shared/Numerics/Verbosity.h\n\t Do not modify these sections. See src/c/shared/Numerics/README for more info\n\t\"\"\"\n\n\tdef __init__(self,*args): # {{{\n\t\t#BEGINFIELDS\n\t\tself.mprocessor = False\n\t\tself.module = False\n\t\tself.solution = False\n\t\tself.solver = False\n\t\tself.convergence = False\n\t\tself.control = False\n\t\tself.qmu = False\n\t\tself.autodiff = False\n\t\tself.smb = False\n\t\t#ENDFIELDS\n\n\t\tif not len(args):\n\t\t\t#Don't do anything\n\t\t\tself.solution=True;\n\t\t\tself.qmu=True;\n\t\t\tself.control=True;\n\t\t\tpass\n\n\t\telif len(args) == 1:\n\t\t\tbinary=args[0]\n\t\t\tif isinstance(binary,(str,unicode)):\n\t\t\t\tif binary.lower()=='all':\n\t\t\t\t\tbinary=2**11-1 #all ones\n\t\t\t\t\tself.BinaryToVerbose(binary)\n\t\t\t\t\tself.solver=False #Do not use by default\n\t\t\t\telse:\n\t\t\t\t\tbinary=int(binary,2)\n\t\t\t\t\tself.BinaryToVerbose(binary)\n\t\t\telif isinstance(binary,(int,long,float)):\n\t\t\t\tself.BinaryToVerbose(int(binary))\n\n\t\telse:\n\t\t\t#Use options to initialize object\n\t\t\tself=pairoptions(*args).AssignObjectFields(self)\n\n\t\t\t#Cast to logicals\n\t\t\tlistproperties=vars(self)\n\t\t\tfor fieldname,fieldvalue in listproperties.iteritems():\n\t\t\t\tif isinstance(fieldvalue,bool) or isinstance(fieldvalue,(int,long,float)):\n\t\t\t\t\tsetattr(self,fieldname,bool(fieldvalue))\n\t\t\t\telse:\n\t\t\t\t\traise TypeError(\"verbose supported field values are logicals only (True or False)\")\n\t# }}}\n\tdef __repr__(self): # {{{\n\t\t\t\n\t\t#BEGINDISP\n\t\ts =\"class '%s' = \\n\" % type(self)\n\t\ts+=\" %15s : %s\\n\" % ('mprocessor',self.mprocessor)\n\t\ts+=\" %15s : %s\\n\" % ('module',self.module)\n\t\ts+=\" %15s : %s\\n\" % ('solution',self.solution)\n\t\ts+=\" %15s : %s\\n\" % ('solver',self.solver)\n\t\ts+=\" %15s : %s\\n\" % ('convergence',self.convergence)\n\t\ts+=\" %15s : %s\\n\" % ('control',self.control)\n\t\ts+=\" %15s : %s\\n\" % ('qmu',self.qmu)\n\t\ts+=\" %15s : %s\\n\" % ('autodiff',self.autodiff)\n\t\ts+=\" %15s : %s\\n\" % ('smb',self.smb)\n\t\t#ENDDISP\n\n\t\treturn s\n\t# }}}\n\tdef VerboseToBinary(self): # {{{\n\n\t\t#BEGINVERB2BIN\n\t\tbinary=0\n\t\tif self.mprocessor:\n\t\t\tbinary=binary | 1\n\t\tif self.module:\n\t\t\tbinary=binary | 2\n\t\tif self.solution:\n\t\t\tbinary=binary | 4\n\t\tif self.solver:\n\t\t\tbinary=binary | 8\n\t\tif self.convergence:\n\t\t\tbinary=binary | 16\n\t\tif self.control:\n\t\t\tbinary=binary | 32\n\t\tif self.qmu:\n\t\t\tbinary=binary | 64\n\t\tif self.autodiff:\n\t\t\tbinary=binary | 128\n\t\tif self.smb:\n\t\t\tbinary=binary | 256\n\t\t#ENDVERB2BIN\n\n\t\treturn binary\n\t# }}}\n\tdef BinaryToVerbose(self,binary): # {{{\n\n\t\t#BEGINBIN2VERB\n\t\tself.mprocessor =bool(binary & 1)\n\t\tself.module =bool(binary & 2)\n\t\tself.solution =bool(binary & 4)\n\t\tself.solver =bool(binary & 8)\n\t\tself.convergence=bool(binary & 16)\n\t\tself.control =bool(binary & 32)\n\t\tself.qmu =bool(binary & 64)\n\t\tself.autodiff =bool(binary & 128)\n\t\tself.smb =bool(binary & 256)\n\t\t#ENDBIN2VERB\n\t# }}}\n\tdef checkconsistency(self,md,solution,analyses): # {{{\n\t\treturn md\n\t# }}}\n\tdef marshall(self,prefix,md,fid): # {{{\n\t\tWriteData(fid,prefix,'data',self.VerboseToBinary(),'name','md.verbose','format','Integer')\n\t# }}}\n","sub_path":"issm/verbose.py","file_name":"verbose.py","file_ext":"py","file_size_in_byte":3775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"}
+{"seq_id":"324823654","text":"from ihsa_oop import Problem\nfrom PyQt4 import QtGui\nimport sys\nimport ihsagui\n\n\nclass IHSAApp(QtGui.QMainWindow, ihsagui.Ui_MainWindow):\n\tdef __init__(self):\n\t\tsuper(self.__class__, self).__init__()\n\t\tself.setupUi(self)\n\t\tself.Oblicz.clicked.connect(self.funkcjaoblicz)\n\t\tself.Zamknij.clicked.connect(self.funkcjazamknij)\n\tdef funkcjaoblicz(self):\n\t\tranges = []\n\t\tranges.append(float(self.dg1.value()))\n\t\tranges.append(float(self.gg1.value()))\n\t\tranges.append(float(self.dg2.value()))\n\t\tranges.append(float(self.gg2.value()))\t\t\n\t\tfunction = self.ownFunction.text()\n\t\tif function: \n\t\t\tif 'x3' in function:\n \t\t\t\tdimensions = 3\n \t\t\t\tranges.append(float(self.dg3.value()))\n \t\t\t\tranges.append(float(self.gg3.value()))\n\t\t\telse:\n \t\t\t\tdimensions = 2\n\t\telse:\n \t\t\tdimensions = 0\n\n\n\t\tprob = Problem(function, dimensions, ranges)\t\t\n\t\tprob.draw_contour(ranges)\n\t\tprob.solve()\n\n\t\twyniki = prob.harmony_memory\n\t\tfor i in range(7):\n\t\t\tx1 = QtGui.QTableWidgetItem(str(wyniki[i][0]))\n\t\t\tx2 = QtGui.QTableWidgetItem(str(wyniki[i][1]))\n\t\t\tself.tableWidget.setItem(i, 0, x1)\n\t\t\tself.tableWidget.setItem(i, 1, x2)\n\t\t\tif dimensions == 3:\n\t\t\t\tx3 = QtGui.QTableWidgetItem(str(wyniki[i][2]))\n\t\t\t\tself.tableWidget.setItem(i, 2, x3)\n\t\twartosci = prob.values\n\t\tfor i in range(7):\n\t\t\ty = QtGui.QTableWidgetItem(str(wartosci[i]))\n\t\t\tself.tableWidget.setItem(i, 3, y)\n\n\n\tdef funkcjazamknij(self):\n\t\tsys.exit()\n\ndef main():\n\tapp = QtGui.QApplication(sys.argv)\n\tform = IHSAApp()\n\tform.show()\n\tapp.exec_()\n\t\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"ihsamain.py","file_name":"ihsamain.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"}
+{"seq_id":"529984748","text":"#! /usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom contextlib import contextmanager\n\nimport pygame\n\nfrom .config import *\nfrom .utils.display import get_font\nfrom .utils.data_parser import load_game_group, dump_game_group\nfrom .scene.basic_scenes import *\n\n__author__ = 'fyabc'\n\n\nclass Game:\n def __init__(self):\n # Initialize pygame.\n pygame.init()\n self.main_window = pygame.display.set_mode(WindowSize)\n self.timer = pygame.time.Clock()\n self.gFont = get_font()\n\n pygame.display.set_caption(GameTitle)\n\n # Game initialize.\n self.previous_scene_id = None\n self.current_scene_id = 0\n self.args_between_scenes = []\n self.scenes = {}\n\n # Data initialize.\n self.game_groups_data = {\n game_group_name: load_game_group(game_group_name)\n for game_group_name in GameGroups\n }\n\n scene_map = {\n 'MainMenu': 0,\n 'HelpMenu': 1,\n 'GameSelectMenu': 2,\n 'GameMainMenu': 3,\n # ('LevelScene', 'basic'): 'basic',\n }\n\n self.add_scene(0, MainMenu, scene_map)\n self.add_scene(1, HelpMenu)\n self.add_scene(2, GameSelectMenu, scene_map)\n self.add_scene(3, GameMainMenu, scene_map)\n\n @contextmanager\n def _game_manager(self):\n yield\n\n pygame.quit()\n\n # Save some data of the game.\n print('Saving game status... ', end='')\n for game_group_data in self.game_groups_data.values():\n dump_game_group(game_group_data)\n print('done')\n\n print('The game is quited!')\n sys.exit(0)\n\n def add_scene(self, scene_id, scene_type, *args, **kwargs):\n self.scenes[scene_id] = scene_type(self, scene_id, *args, **kwargs)\n\n def run(self):\n with self._game_manager():\n while True:\n scene = self.scenes[self.current_scene_id]\n\n result = scene.run(self.previous_scene_id, *self.args_between_scenes)\n\n if hasattr(result, '__len__'):\n next_scene_id, *self.args_between_scenes = result\n else:\n next_scene_id = result\n self.args_between_scenes = []\n\n if next_scene_id == MainMenu.QuitID:\n break\n\n self.previous_scene_id, self.current_scene_id = self.current_scene_id, next_scene_id\n","sub_path":"Shift_pygame/Shift_pygame/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"82"}
+{"seq_id":"265751468","text":"class Solution(object):\n def superPow(self, a, b):\n \"\"\"\n :type a: int\n :type b: List[int]\n :rtype: int\n \"\"\"\n b.reverse()\n record = a%1337\n remainder = 1\n for i in xrange(len(b)):\n \tif i != 0:\n \t\trecord = pow(record,10)%1337\n \t\tfactor1 = record\n \telse:\n \t\tfactor1 = record\n \tremainder = (remainder*pow(factor1,b[i]))%1337\n return remainder","sub_path":"372_Super_Pow.py","file_name":"372_Super_Pow.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"357585036","text":"# Copyright (c) 2016 Intel Corporation.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport argparse\n\nfrom vpnclient.utils import FH\nfrom vpnclient.v1_0.command_list import ListCommand\nfrom vpnclient.v1_0.command_resource import CommandResource\nfrom vpnclient.v1_0.vpn.utils_vpn import (\n check_lifetime_value, help_algorithm_options, help_dh_options,\n check_name_len, DefaultList)\nfrom vpnclient.v1_0.vpn.vpn_choices import (\n IKEV1_INTEGRITY_ALGORITHM, IKEV2_INTEGRITY_ALGORITHM,\n IKEV1_ENCRYPTION_ALGORITHM, IKEV2_ENCRYPTION_ALGORITHM,\n DH_GROUP, LIFETIME_UNITS\n)\n\n_COMMAND_COLUMNS = [\n 'id',\n 'name',\n 'description',\n 'encryption_algorithm',\n 'integrity_algorithm',\n 'dh_group',\n 'phase1_negotiation_mode',\n 'lifetime_value',\n 'lifetime_units',\n 'ike_version',\n 'rekey',\n 'reauth',\n]\n\n_HTTP_RESOURCE = 'ikepolicies'\n\n# IKEPolicy Attributes' Choices\n\n_IKE_PHASE1_MODE = [\n 'aggressive',\n 'main',\n]\n\n_IKE_VERSION = [\n 'v1',\n 'v2',\n]\n\n_IKE_REKEY = [\n 'yes',\n 'no',\n]\n\n_IKE_REAUTH = [\n 'yes',\n 'no',\n]\n\n\ndef common_verify_ikepolicy_arguments(attrs):\n\n if attrs.get('ike_version', None) == 'v1':\n\n if not set(attrs.get('encryption_algorithm', [])).issubset(\n set(IKEV1_ENCRYPTION_ALGORITHM)):\n error_msg = _(\"encryption-algorithm has invalid choice(s) for IKE \"\n \"version 1\")\n raise argparse.ArgumentTypeError(error_msg)\n\n if not set(attrs.get('integrity_algorithm', [])).issubset(\n set(IKEV1_INTEGRITY_ALGORITHM)):\n error_msg = _(\"integrity-algorithm has invalid choice(s) for IKE \"\n \"version 1\")\n raise argparse.ArgumentTypeError(error_msg)\n\n if attrs.get('ike_version', None) == 'v2':\n\n if not set(attrs.get('encryption_algorithm', [])).issubset(\n set(IKEV2_ENCRYPTION_ALGORITHM)):\n error_msg = _(\"encryption-algorithm has invalid choice(s) for IKE \"\n \"version 2\")\n raise argparse.ArgumentTypeError(error_msg)\n\n if not set(attrs.get('integrity_algorithm', [])).issubset(\n set(IKEV2_INTEGRITY_ALGORITHM)):\n error_msg = _(\"integrity-algorithm has invalid choice(s) for IKE \"\n \"version 2\")\n raise argparse.ArgumentTypeError(error_msg)\n\n dh_group = attrs.get('dh_group')\n if dh_group is not None and not set(dh_group).issubset(\n set(DH_GROUP)):\n error_msg = _(\"dh_group has invalid choice(s)\")\n raise argparse.ArgumentTypeError(error_msg)\n\n if (attrs.get('ike_version', None) == 'v1' and\n attrs.get('reauth', None) == 'no'):\n error_msg = _(\"reauth value 'no' is an invalid choice for IKE \"\n \"version 1\")\n raise argparse.ArgumentTypeError(error_msg)\n\n if (attrs.get('ike_version', None) == 'v1' and\n len(attrs.get('encryption_algorithm', [])) > 1):\n error_msg = _(\"Multiple encryption-algorithm values are not applicable \"\n \"for IKE version 1\")\n raise argparse.ArgumentTypeError(error_msg)\n\n if (attrs.get('ike_version', None) == 'v1' and\n len(attrs.get('auth_algorithm', [])) > 1):\n error_msg = _(\"Multiple integrity-algorithm values are not applicable \"\n \"for IKE version 1\")\n raise argparse.ArgumentTypeError(error_msg)\n\n if (attrs.get('ike_version', None) == 'v1' and\n len(attrs.get('dh_group', [])) > 1):\n error_msg = _(\"Multiple dh_group values are not applicable for IKE \"\n \"version 1\")\n raise argparse.ArgumentTypeError(error_msg)\n\n\nclass CreateIKEPolicy(CommandResource):\n \"\"\"Create an IKEPolicy\"\"\"\n resource = 'ikepolicy'\n cmd_columns = _COMMAND_COLUMNS\n http_resource = _HTTP_RESOURCE\n\n @staticmethod\n def add_known_arguments(parser):\n parser.add_argument(\n 'name',\n metavar='NAME',\n type=check_name_len,\n help=FH(_(\"Name of the IKEPolicy\")))\n\n parser.add_argument(\n '--description',\n default='',\n help=FH(_(\"Description of the IKEPolicy\")))\n\n parser.add_argument(\n '--ike-version',\n default='v2',\n choices=_IKE_VERSION,\n help=FH(_(\"IKE version in lowercase, Default: v2\")))\n\n parser.add_argument(\n '--encryption-algorithm',\n default=DefaultList(['aes128']),\n action='append',\n help=FH(_(\n \"Encryption algorithm in lowercase, Default: aes-128 \\n\"\n \"For IKE version 2, repeat this option to specify multiple \\n\"\n \"encryption-algorithms\") +\n help_algorithm_options('ike', 'encryption')))\n\n parser.add_argument(\n '--integrity-algorithm',\n default=DefaultList(['sha1']),\n action='append',\n help=FH(_(\n \"Authentication algorithm in lowercase, Default: sha1 \\n\"\n \"For IKE version 2, repeat this option to specify multiple \\n\"\n \"integrity-algorithms\") +\n help_algorithm_options('ike', 'integrity')))\n\n parser.add_argument(\n '--dh-group',\n default=DefaultList(['modp1536']),\n action='append',\n help=FH(_(\n \"Diffie-Hellman dhgroup in lowercase, Default: modp1536 \\n\"\n \"For IKE version 2, repeat this option to specify multiple \\n\"\n \"dh-groups\") + help_dh_options()))\n\n parser.add_argument(\n '--phase1-negotiation-mode',\n default='main',\n choices=_IKE_PHASE1_MODE,\n help=FH(_(\"IKE Phase1 negotiation mode in lowercase, \\n\"\n \"Default: main\")))\n\n parser.add_argument(\n '--lifetime-value',\n type=check_lifetime_value,\n default='3600',\n help=FH(_(\"IKE lifetime value of the security association, \\n\"\n \"Default: 3600\")))\n\n parser.add_argument(\n '--lifetime-units',\n default='seconds',\n choices=LIFETIME_UNITS,\n help=FH(_(\"IKE lifetime units of the security association in \\n\"\n \"lowercase, Default: seconds\")))\n\n parser.add_argument(\n '--rekey',\n default='yes',\n choices=_IKE_REKEY,\n help=FH(_(\"Whether a connection should be renegotiated when it \\n\"\n \"is about to expire in lowercase, Default: yes\")))\n\n parser.add_argument(\n '--reauth',\n default='yes',\n choices=_IKE_REAUTH,\n help=FH(_(\"whether rekeying should also reauthenticate the peer \\n\"\n \"in lower case, Default: yes \\n\"\n \"Option 'no' is only valid for IKE version 1.\")))\n\n return parser\n\n @staticmethod\n def verify_arguments(attrs):\n common_verify_ikepolicy_arguments(attrs)\n\n\nclass ShowIKEPolicy(CommandResource):\n \"\"\"Show information of a given IKEPolicy\"\"\"\n resource = 'ikepolicy'\n cmd_columns = _COMMAND_COLUMNS\n http_resource = _HTTP_RESOURCE\n\n @staticmethod\n def add_known_arguments(parser):\n parser.add_argument(\n 'id',\n metavar='IKEPOLICY',\n help=FH(_(\"ID or Name of IKEPolicy to search\")))\n\n return parser\n\n\nclass ListIKEPolicy(CommandResource):\n \"\"\"List IKEPolicies\"\"\"\n resource = 'ikepolicy'\n cmd_columns = _COMMAND_COLUMNS\n http_resource = _HTTP_RESOURCE\n\n @staticmethod\n def add_known_arguments(parser):\n return ListCommand.add_args(parser)\n\n\nclass UpdateIKEPolicy(CommandResource):\n \"\"\"Update a given IKEPolicy\"\"\"\n resource = 'ikepolicy'\n cmd_columns = _COMMAND_COLUMNS\n http_resource = _HTTP_RESOURCE\n\n @staticmethod\n def add_known_arguments(parser):\n parser.add_argument(\n 'id',\n metavar='IKEPOLICY',\n help=FH(_(\"ID or Name of IKEPolicy to update\")))\n\n parser.add_argument(\n '--name',\n type=check_name_len,\n help=FH(_(\"Name of the IKEPolicy\")))\n\n parser.add_argument(\n '--description',\n help=FH(_(\"Description of the IKEPolicy\")))\n\n parser.add_argument(\n '--ike-version',\n choices=_IKE_VERSION,\n help=FH(_(\"IKE version in lowercase\")))\n\n parser.add_argument(\n '--encryption-algorithm',\n action='append',\n help=FH(_(\n \"Encryption algorithm in lowercase. \\n\"\n \"For IKE version 2, repeat this option to specify multiple \\n\"\n \"encryption-algorithms\") +\n help_algorithm_options('ike', 'encryption')))\n\n parser.add_argument(\n '--integrity-algorithm',\n action='append',\n help=FH(_(\n \"Authentication algorithm in lowercase. \\n\"\n \"For IKE version 2, repeat this option to specify multiple \\n\"\n \"integrity-algorithms\") +\n help_algorithm_options('ike', 'integrity')))\n\n parser.add_argument(\n '--dh-group',\n action='append',\n help=FH(_(\n \"Diffie-Hellman dhgroup in lowercase. \\n\"\n \"For IKE version 2, repeat this option to specify multiple \\n\"\n \"dh-groups\") + help_dh_options()))\n\n parser.add_argument(\n '--phase1-negotiation-mode',\n choices=_IKE_PHASE1_MODE,\n help=FH(_(\"IKE Phase1 negotiation mode in lowercase\")))\n\n parser.add_argument(\n '--lifetime-value',\n type=int,\n help=FH(_(\"IKE lifetime value of the security association\")))\n\n parser.add_argument(\n '--lifetime-units',\n choices=LIFETIME_UNITS,\n help=FH(_(\"IKE lifetime units of the security association in \\n\"\n \"lowercase\")))\n\n parser.add_argument(\n '--rekey',\n choices=_IKE_REKEY,\n help=FH(_(\"Whether a connection should be renegotiated when it \\n\"\n \"is about to expire\")))\n\n parser.add_argument(\n '--reauth',\n choices=_IKE_REAUTH,\n help=FH(_(\"Whether rekeying should also reauthenticate the peer. \\n\"\n \"Option 'no' is only valid for IKE version 1\")))\n\n return parser\n\n @staticmethod\n def verify_arguments(attrs):\n common_verify_ikepolicy_arguments(attrs)\n\n\nclass DeleteIKEPolicy(CommandResource):\n \"\"\"Delete a given IKEPolicy\"\"\"\n resource = 'ikepolicy'\n cmd_columns = _COMMAND_COLUMNS\n http_resource = _HTTP_RESOURCE\n\n @staticmethod\n def add_known_arguments(parser):\n parser.add_argument(\n 'id',\n metavar='IKEPOLICY',\n help=FH(_(\"ID or Name of IKEPolicy to delete\")))\n\n return parser\n","sub_path":"IPSec_EMS/common/vpnclient/v1_0/vpn/ikepolicy.py","file_name":"ikepolicy.py","file_ext":"py","file_size_in_byte":11667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"631293986","text":"\n\n#calss header\nclass _MERIT():\n\tdef __init__(self,): \n\t\tself.name = \"MERIT\"\n\t\tself.definitions = [u'If something merits a particular treatment, it deserves or is considered important enough to be treated in that way: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_merit.py","file_name":"_merit.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"109864215","text":"#!/usr/bin/env python\n#\n# Copyright 2011, Jason Graham\n#\n# Uses python-markdown to convert a markdown document to the body\n# of an HTML document to display with cgit (http://hjemli.net/git/cgit/).\n#\n# Install:\n#\n# 1- Install python-markdown ( sudo apt-get install python-markdown )\n# 2- Copy this script to /usr/local/bin/markdownize_cgit.py (with exec rights)\n# 3- Add this statement into the your cgit configuration:\n# # Implement globally\n# about-filter=/usr/local/bin/markdownize_cgit.py\n#\n# OR\n#\n# # Implement On a per-repo basis (must use\n# # the enable-filter-overrides=1 option)\n# repo.about-filter=/usr/local/bin/markdownize_cgit.py\n#\n\nimport sys\nimport markdown\n\ndef markdownize(in_stream=None, out_stream=None):\n\t# If not provided in_stream will be read from stdin and out_stream \n\t# will be written to stdout.\n\tif in_stream is None:\n\t in_stream = sys.stdin\n\tif out_stream is None:\n\t out_stream = sys.stdout\n\n\tout_stream.write(markdown.markdown(in_stream.read()))\n\nif __name__ == '__main__':\n\tif len(sys.argv) != 1:\n\t sys.exit(1)\n\tmarkdownize()","sub_path":"all-gists/772157/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}
+{"seq_id":"97191659","text":"import time\nimport datetime\nimport sys\nimport getopt\n\nfrom poloniex import poloniex\n\ndef main(agrv):\n period = 300\n pair = \"USDT_BTC\"\n prices = []\n current_moving_average = 0\n length_of_ma = 0\n start_time = False\n end_time = False\n historical_data = False\n trade_placed = False\n type_of_trade = False\n data_date = \"\"\n local_max = []\n current_resistance = 0.018\n data_points = []\n\n\n try:\n opts, args = getopt.getopt(agrv, \"hp:c:n:s:e\", [\"period=\",\"points=\"])\n except getopt.GetoptError:\n print(\"trading-bot.py -p -c \")\n sys.exit(2)\n\n for opt, arg in opts:\n if opt == '-h':\n print(\"trading-bot.py -p -c \")\n sys.exit()\n elif opt in (\"-p\", \"--period\"):\n if(int(arg) in [300, 900, 1800, 7200, 14400, 86400]):\n period = int(arg)\n else:\n print(\"Poloniex requires periods in 300, 900, 1800, 7200, 1440 ~\");\n sys.exit(2)\n elif opt in (\"-c\", \"--currency\"):\n pair = arg\n elif opt in (\"-n\", \"--points\"):\n length_of_ma = int(arg)\n elif opt in (\"-s\"):\n start_time = arg\n elif opt in (\"-e\"):\n end_time = arg\n\n conn = poloniex('hello', 'hello')\n\n current_moving_average = 5\n start_time = int(time.time() - 14400)\n end_time = int(time.time())\n print(start_time)\n print(end_time)\n print(current_moving_average)\n print(pair)\n\n\n\n output = open(\"output.html\", \"w\")\n output.truncate()\n output.write(\"\"\" \"\"\")\n\n\n \"\"\"\n while True:\n if(start_time and historical_data):\n print(\"!\")\n next_data_point = historical_data.pop(0)\n last_pair_price = next_data_point['weightedAverage']\n data_date = datetime.datetime.fromtimestamp(int(next_data_point['date'])).strftime('%Y-%m-%d %H:%M:%S')\n elif(start_time and not historical_data):\n for point in data_points:\n output.write( \"['\" + point['date'] + \"',\" + point['price'] + \",\" + point['label'] + \",\" + point['desc'] + \",\" + point['trend'])\n output.write(\"],\\n\")\n #output.write( \"\"]);var options = {title: 'Price Chart',legend: { position: 'bottom' }};var chart = new google.visualization.LineChart(document.getElementById('curve_chart'));chart.draw(data, options);}