diff --git "a/5436.jsonl" "b/5436.jsonl" new file mode 100644--- /dev/null +++ "b/5436.jsonl" @@ -0,0 +1,642 @@ +{"seq_id":"322935031","text":"#!/usr/bin/env python3\n\"\"\"Validator for challenges\n\nSimple script to validate jinja2 enriched challenges are valid json.\n\"\"\"\n\nimport argparse\nimport glob\nimport json\nimport os\nimport jinja2\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"fileglob\", type=str,\n help=\"File or glob pattern to match files\")\n\n args = parser.parse_args()\n if args.fileglob:\n return glob.iglob(args.fileglob)\n\ndef validate_templates(files):\n error_found = None\n for file_tmpl in files:\n abs_dirname = os.path.join(os.getcwd(), os.path.dirname(file_tmpl))\n filename = os.path.basename(file_tmpl)\n print(\">>> Parsing file: {}\".format(os.path.join(abs_dirname, filename)))\n templateLoader = jinja2.FileSystemLoader(searchpath=abs_dirname)\n templateEnv = jinja2.Environment(loader=templateLoader)\n template = templateEnv.get_template(filename)\n try:\n json_output = json.loads(template.render())\n except json.JSONDecodeError:\n rendered_filename = \"rendered\"+filename\n print(\"Couldn't decode <{}> in json.\".format(file_tmpl))\n print(\"Saving the rendered json template as <{}> so that you can inspect it in an editor\".format(rendered_filename))\n with open(rendered_filename, 'wt') as fp:\n fp.write(template.render())\n error_found = True\n\n if not error_found:\n print(\"Congratulations, no errors found.\")\n\nif __name__ == '__main__':\n files_to_parse = parse_args()\n validate_templates(files_to_parse)\n","sub_path":"eventdata/tests/validate_challanges.py","file_name":"validate_challanges.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"329749679","text":"import os\n\nimport unreal\n\nfrom openpype.pipeline import Anatomy\nfrom openpype.hosts.unreal.api import pipeline\n\n\nqueue = None\nexecutor = None\n\n\ndef _queue_finish_callback(exec, success):\n unreal.log(\"Render completed. Success: \" + str(success))\n\n # Delete our reference so we don't keep it alive.\n global executor\n global queue\n del executor\n del queue\n\n\ndef _job_finish_callback(job, success):\n # You can make any edits you want to the editor world here, and the world\n # will be duplicated when the next render happens. Make sure you undo your\n # edits in OnQueueFinishedCallback if you don't want to leak state changes\n # into the editor world.\n unreal.log(\"Individual job completed.\")\n\n\ndef start_rendering():\n \"\"\"\n Start the rendering process.\n \"\"\"\n print(\"Starting rendering...\")\n\n # Get selected sequences\n assets = unreal.EditorUtilityLibrary.get_selected_assets()\n\n # instances = pipeline.ls_inst()\n instances = [\n a for a in assets\n if a.get_class().get_name() == \"OpenPypePublishInstance\"]\n\n inst_data = []\n\n for i in instances:\n data = pipeline.parse_container(i.get_path_name())\n if data[\"family\"] == \"render\":\n inst_data.append(data)\n\n try:\n project = os.environ.get(\"AVALON_PROJECT\")\n anatomy = Anatomy(project)\n root = anatomy.roots['renders']\n except Exception:\n raise Exception(\"Could not find render root in anatomy settings.\")\n\n render_dir = f\"{root}/{project}\"\n\n # subsystem = unreal.get_editor_subsystem(\n # unreal.MoviePipelineQueueSubsystem)\n # queue = subsystem.get_queue()\n global queue\n queue = unreal.MoviePipelineQueue()\n\n ar = unreal.AssetRegistryHelpers.get_asset_registry()\n\n for i in inst_data:\n sequence = ar.get_asset_by_object_path(i[\"sequence\"]).get_asset()\n\n sequences = [{\n \"sequence\": sequence,\n \"output\": f\"{i['output']}\",\n \"frame_range\": (\n int(float(i[\"frameStart\"])),\n int(float(i[\"frameEnd\"])) + 1)\n }]\n render_list = []\n\n # Get all the sequences to render. If there are subsequences,\n # add them and their frame ranges to the render list. We also\n # use the names for the output paths.\n for s in sequences:\n subscenes = pipeline.get_subsequences(s.get('sequence'))\n\n if subscenes:\n for ss in subscenes:\n sequences.append({\n \"sequence\": ss.get_sequence(),\n \"output\": (f\"{s.get('output')}/\"\n f\"{ss.get_sequence().get_name()}\"),\n \"frame_range\": (\n ss.get_start_frame(), ss.get_end_frame())\n })\n else:\n # Avoid rendering camera sequences\n if \"_camera\" not in s.get('sequence').get_name():\n render_list.append(s)\n\n # Create the rendering jobs and add them to the queue.\n for r in render_list:\n job = queue.allocate_new_job(unreal.MoviePipelineExecutorJob)\n job.sequence = unreal.SoftObjectPath(i[\"master_sequence\"])\n job.map = unreal.SoftObjectPath(i[\"master_level\"])\n job.author = \"OpenPype\"\n\n # User data could be used to pass data to the job, that can be\n # read in the job's OnJobFinished callback. We could,\n # for instance, pass the AvalonPublishInstance's path to the job.\n # job.user_data = \"\"\n\n settings = job.get_configuration().find_or_add_setting_by_class(\n unreal.MoviePipelineOutputSetting)\n settings.output_resolution = unreal.IntPoint(1920, 1080)\n settings.custom_start_frame = r.get(\"frame_range\")[0]\n settings.custom_end_frame = r.get(\"frame_range\")[1]\n settings.use_custom_playback_range = True\n settings.file_name_format = \"{sequence_name}.{frame_number}\"\n settings.output_directory.path = f\"{render_dir}/{r.get('output')}\"\n\n renderPass = job.get_configuration().find_or_add_setting_by_class(\n unreal.MoviePipelineDeferredPassBase)\n renderPass.disable_multisample_effects = True\n\n job.get_configuration().find_or_add_setting_by_class(\n unreal.MoviePipelineImageSequenceOutput_PNG)\n\n # If there are jobs in the queue, start the rendering process.\n if queue.get_jobs():\n global executor\n executor = unreal.MoviePipelinePIEExecutor()\n executor.on_executor_finished_delegate.add_callable_unique(\n _queue_finish_callback)\n executor.on_individual_job_finished_delegate.add_callable_unique(\n _job_finish_callback) # Only available on PIE Executor\n executor.execute(queue)\n","sub_path":"openpype/hosts/unreal/api/rendering.py","file_name":"rendering.py","file_ext":"py","file_size_in_byte":4895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"561019693","text":"import tkinter\nfrom Board import Board\n\n\nclass Gui:\n def __init__(self):\n self.board = Board([0, 1, 2, 3, 4, 5, 6, 7, 8])\n self.text_font = \"Laksaman 18\"\n self.background_color = \"#FEFCFB\"\n self.rectangle_color = \"#FEFCFB\"\n self.line_color = \"#0A1128\"\n self.text_color = \"0A1128\"\n self.window = tkinter.Tk()\n self.window.title(\"Specify Board Settings\")\n self.window.configure(background=self.background_color)\n self.shuffles = 100\n self.start_board = [tkinter.StringVar(), tkinter.StringVar(), tkinter.StringVar(), tkinter.StringVar(),\n tkinter.StringVar(), tkinter.StringVar(), tkinter.StringVar(), tkinter.StringVar(),\n tkinter.StringVar()]\n self.int_start_board = []\n\n # beginning label\n tkinter.Label(self.window, text=\"8 BOARD\", font=self.text_font, bg=self.rectangle_color, borderwidth=2, relief=\"groove\").grid(row=0, columnspan=3, pady=5) # this is placed in 1 0\n # the 9 text fields for the board\n tkinter.Entry(self.window, textvariable=self.start_board[0], justify=tkinter.CENTER, bg=self.rectangle_color).grid(row=1, column=0, pady=5)\n tkinter.Entry(self.window, textvariable=self.start_board[1], justify=tkinter.CENTER, bg=self.rectangle_color).grid(row=1, column=1, pady=5)\n tkinter.Entry(self.window, textvariable=self.start_board[2], justify=tkinter.CENTER, bg=self.rectangle_color).grid(row=1, column=2, pady=5)\n\n tkinter.Entry(self.window, textvariable=self.start_board[3], justify=tkinter.CENTER, bg=self.rectangle_color).grid(row=2, column=0, pady=5)\n tkinter.Entry(self.window, textvariable=self.start_board[4], justify=tkinter.CENTER, bg=self.rectangle_color).grid(row=2, column=1, pady=5)\n tkinter.Entry(self.window, textvariable=self.start_board[5], justify=tkinter.CENTER, bg=self.rectangle_color).grid(row=2, column=2, pady=5)\n\n tkinter.Entry(self.window, textvariable=self.start_board[6], justify=tkinter.CENTER, bg=self.rectangle_color).grid(row=3, column=0, pady=5)\n tkinter.Entry(self.window, textvariable=self.start_board[7], justify=tkinter.CENTER, bg=self.rectangle_color).grid(row=3, column=1, pady=5)\n tkinter.Entry(self.window, textvariable=self.start_board[8], justify=tkinter.CENTER, bg=self.rectangle_color).grid(row=3, column=2, pady=5)\n\n # shuffle and submit buttons\n tkinter.Button(self.window, text=\"SHUFFLE\", command=self.shuffle_board, bg=self.background_color,\n font=self.text_font, width=15, pady=10, borderwidth=2, relief=\"groove\").grid(row=4, columnspan=3, pady=5)\n tkinter.Button(self.window, text=\"SOLVE\", command=self.set_board, bg=self.background_color,\n font=self.text_font, width=15, pady=10, borderwidth=2, relief=\"groove\").grid(row=5, columnspan=3, pady=5)\n self.window.mainloop()\n\n def shuffle_board(self):\n Board.shuffle_board(self.board, self.shuffles)\n print(self.board)\n for i in range(0, 9):\n self.start_board[i].set(self.board.board_list[i])\n\n def set_board(self):\n board = []\n for item in self.start_board:\n board.append(int(item.get()))\n self.int_start_board = board.copy()\n self.window.destroy()\n\n def get_board(self):\n return self.int_start_board\n","sub_path":"Gui.py","file_name":"Gui.py","file_ext":"py","file_size_in_byte":3399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"590701901","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 13 16:28:48 2019\r\n\r\n@author: Sean\r\n\"\"\"\r\n\r\n# 运动检测 微信通知\r\n\r\nimport cv2\r\nimport datetime\r\nimport numpy as np\r\nimport itchat\r\n\r\n\r\ndef moveDetect(bg, currentImg):\r\n bgGray = cv2.cvtColor(bg, cv2.COLOR_BGR2GRAY)\r\n imgGray = cv2.cvtColor(currentImg, cv2.COLOR_BGR2GRAY)\r\n diff = cv2.absdiff(bgGray, imgGray)\r\n ret, thresh = cv2.threshold(diff, 100, 255, cv2.THRESH_BINARY)\r\n k1 = np.ones((5,5), np.uint8)\r\n k2 = np.ones((15,15), np.uint8)\r\n erode = cv2.erode(thresh, k1, iterations=3)\r\n dilate = cv2.dilate(erode, k2, iterations=2)\r\n thresh, contours, hierarchy = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n cntNum = 0\r\n for cnt in contours:\r\n x,y,w,h = cv2.boundingRect(cnt)\r\n if w>50 and h>50:\r\n cv2.rectangle(currentImg, (x,y), (x+w,y+h), (0,255,0), 2)\r\n cntNum += 1\r\n if cntNum>0:\r\n cv2.putText(currentImg, 'Nums=%s'%cntNum, (10,30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,255))\r\n nowTime=datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\r\n filename = nowTime + '.jpg'\r\n cv2.imwrite(filename, currentImg)\r\n sendMsg('Detected One Movement at '+ nowTime, filename)\r\n# return cntNum \r\n \r\n \r\nmsgUnique = ''\r\ndef sendMsg(message, file):\r\n # nickname = input('please input your firends\\' nickname : ' )\r\n # 想给谁发信息,先查找到这个朋友,name后填微信备注即可,deepin测试成功\r\n # users = itchat.search_friends(name=nickname)\r\n users = itchat.search_friends(name='我的小号') # 使用备注名来查找实际用户名\r\n #获取好友全部信息,返回一个列表,列表内是一个字典\r\n# print(users)\r\n #获取`UserName`,用于发送消息\r\n userName = users[0]['UserName']\r\n global msgUnique\r\n if message != msgUnique:\r\n itchat.send(message, toUserName = userName)\r\n print('Detected Movement...')\r\n print('Succeed Sending...')\r\n msgUnique = message\r\n try:\r\n itchat.send_image(file, toUserName=userName) #如果是其他文件可以直接send_file\r\n print(\"img sending...\")\r\n except:\r\n print(\"img fail sending\")\r\n\r\n\r\ndef logIn():\r\n itchat.auto_login(hotReload=True) # 首次扫描登录后后续自动登录\r\n print('Login...')\r\n\r\n\r\ndef main():\r\n logIn()\r\n url = 'rtsp://admin:XXXX@(IP)/Streaming/Channels/1'\r\n cap=cv2.VideoCapture(url)\r\n if(cap.isOpened()): #视频打开成功\r\n flag = 1\r\n else:\r\n flag = 0\r\n count = 0\r\n if flag:\r\n while True:\r\n ret, frame = cap.read()\r\n if(frame is None):\r\n break\r\n count = count + 1\r\n if count == 1:\r\n bg = frame.copy()#保存第一帧图像\r\n continue\r\n else:\r\n moveDetect(bg, frame)\r\n cv2.imshow('move',frame)\r\n if cv2.waitKey(30) & 0xFF == 27: #按下Esc键退出\r\n break\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n \r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n \r\n","sub_path":"摄像头运动检测_background.py","file_name":"摄像头运动检测_background.py","file_ext":"py","file_size_in_byte":3168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"141817486","text":"import unittest\nfrom typing import List\n\nclass Solution(unittest.TestCase):\n def addOperators(self, num: str, target: int) -> List[str]:\n \"\"\"\nGiven a string that contains only digits 0-9 and a target value, return all possibilities to add binary operators (not unary) +, -, or * between the digits so they evaluate to the target value.\n\nExample 1:\n\nInput: num = \"123\", target = 6\nOutput: [\"1+2+3\", \"1*2*3\"]\nExample 2:\n\nInput: num = \"232\", target = 8\nOutput: [\"2*3+2\", \"2+3*2\"]\nExample 3:\n\nInput: num = \"105\", target = 5\nOutput: [\"1*0+5\",\"10-5\"]\nExample 4:\n\nInput: num = \"00\", target = 0\nOutput: [\"0+0\", \"0-0\", \"0*0\"]\nExample 5:\n\nInput: num = \"3456237490\", target = 9191\nOutput: []\n\n---\nBasic Idea: backtrack, deal with 0 starting case\n\"\"\"\n def backtracking(idx=0, path='', value=0, prev=None):\n if idx == len(num) and value == target:\n rtn.append(path)\n return\n\n # try each sub num\n for i in range(idx+1, len(num) + 1):\n sub = num[idx:i]\n tmp = int(sub)\n # allow '0' but no '0..'\n if i == idx + 1 or (i > idx + 1 and num[idx] != '0'):\n if prev is None :\n backtracking(i, sub, tmp, tmp)\n else:\n backtracking(i, path+'+'+sub, value + tmp, tmp)\n backtracking(i, path+'-'+sub, value - tmp, -tmp)\n # * is special, need to undo pre operation, then apply new one\n backtracking(i, path+'*'+sub, value - prev + prev*tmp, prev*tmp)\n\n rtn = []\n backtracking()\n\n return rtn\n\n def test(self):\n self.assertEqual([], self.addOperators(num = \"3456237490\", target = 9191))\n self.assertCountEqual([\"1+2+3\", \"1*2*3\"], self.addOperators(num = \"123\", target = 6))\n self.assertCountEqual([\"2*3+2\", \"2+3*2\"], self.addOperators(num = \"232\", target = 8))\n","sub_path":"src/main/python/expression_add_operators.py","file_name":"expression_add_operators.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"105051641","text":"# Copyright 2013 Walter Scheper\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n'''\nCreated on Feb 6, 2013\n\n@author: wscheper\n'''\nimport sys\n\nfrom . import app\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv[1:]\n hasher = app.HasherApp()\n try:\n return hasher.run(argv)\n except KeyboardInterrupt:\n return 130\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n","sub_path":"venv/Lib/site-packages/hasher/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"91780715","text":"#\n# @lc app=leetcode.cn id=52 lang=python3\n#\n# [52] N皇后 II\n#\n\n# @lc code=start\nclass Solution:\n def totalNQueens(self, n: int) -> int:\n self.count=0\n def dp(row,col,pie,na):\n if row==n:\n self.count+=1\n return\n \n pmt=(~(col|pie|na))&((1<>1)\n pmt=pmt&(~p)\n \n dp(0,0,0,0)\n return self.count\n \n# @lc code=end\n\n","sub_path":"Week_08/G20200389010076/LeetCode_52_0076.py","file_name":"LeetCode_52_0076.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"483480743","text":"# Voeg de hello module toe\nimport hello\n\n# Het oproepen van de funtie die we gemaakt hebben in hello\nhello.world()\n\n# Print een variabel ( Hello.shark )\nprint(hello.shark)\n\n# Het oproepen van een class\njesse = hello.Octopus(\"Jesse\", \"Oranje\")\njesse.tell_me_about_the_octopus()\n","sub_path":"environments/main_program.py","file_name":"main_program.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"347006729","text":"from typing import Iterator, Dict, Union, Sequence, Generator\nfrom elasticsearch_dsl import Index # type: ignore\nfrom elasticsearch_dsl.connections import connections # type: ignore\nfrom elasticsearch.helpers import bulk\nfrom es_service.doc_template import BaseDoc\n\n\nclass ESIndex(object):\n def __init__(\n self,\n index_name: str,\n docs: Union[Iterator[Dict], Sequence[Dict]],\n ):\n \"\"\"\n ES index structure\n :param index_name: the name of your index\n :param docs: wapo docs to be loaded\n \"\"\"\n # set an elasticsearch connection to your localhost\n connections.create_connection(hosts=[\"localhost\"], timeout=100, alias=\"default\")\n self.index = index_name\n es_index = Index(self.index) # initialize the index\n\n # delete existing index that has the same name\n if es_index.exists():\n es_index.delete()\n\n es_index.document(BaseDoc) # link document mapping to the index\n es_index.create() # create the index\n if docs is not None:\n self.load(docs)\n\n @staticmethod\n def _populate_doc(\n docs: Union[Iterator[Dict], Sequence[Dict]]\n ) -> Generator[BaseDoc, None, None]:\n \"\"\"\n populate the BaseDoc\n @modiefed by: Yunjing Lee\n :param docs: wapo docs\n :return:\n \"\"\"\n for i, doc in enumerate(docs):\n es_doc = BaseDoc(_id=i) # for ES index not the wapo id\n es_doc.doc_id = doc[\"doc_id\"]\n es_doc.title = doc[\"title\"]\n es_doc.author = doc[\"author\"]\n es_doc.content = doc[\"content_str\"]\n es_doc.custom_content = doc[\"content_str\"] # uncomment this to use custom analyzer on this field\n es_doc.annotation = doc[\"annotation\"]\n es_doc.date = doc[\"published_date\"]\n es_doc.ft_vector = doc[\"ft_vector\"]\n es_doc.sbert_vector = doc[\"sbert_vector\"]\n es_doc.lf_vector = doc[\"lf_vector\"]\n yield es_doc\n\n def load(self, docs: Union[Iterator[Dict], Sequence[Dict]]):\n # bulk insertion\n bulk(\n connections.get_connection(),\n (\n d.to_dict(\n include_meta=True, skip_empty=False\n ) # serialize the BaseDoc instance (include meta information and not skip empty documents)\n for d in self._populate_doc(docs)\n ),\n )\n","sub_path":"es_service/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"348380718","text":"from datetime import datetime\nfrom flask_jwt_extended.view_decorators import jwt_required\nfrom flask_restful import Resource, reqparse\n\nfrom models.base_classes import BaseResponse\nfrom models.product import ProductModel\n\n\nclass Product(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('id',\n type=int,\n required=False,\n help='This field cannot be left blank.')\n parser.add_argument('name',\n type=unicode,\n required=False,\n help='This field cannot be left blank.')\n parser.add_argument('description',\n type=unicode,\n required=False,\n help='This field cannot be left blank.')\n parser.add_argument('price',\n type=float,\n required=False,\n help='This field cannot be left blank.')\n parser.add_argument('image',\n type=unicode,\n required=False,\n help='This field cannot be left blank.')\n\n @jwt_required\n def get(self, _id=None):\n try:\n if _id:\n product = ProductModel.find_by_id(_id=_id)\n if product:\n return BaseResponse.ok_response('Successful.', product.json())\n return BaseResponse.bad_request_response('Product does not exists.', {})\n\n products = list(map(lambda x: x.json(), ProductModel.find_all()))\n return BaseResponse.ok_response('Successful.', products)\n except Exception as e:\n return BaseResponse.server_error_response(unicode(e))\n pass\n\n @jwt_required\n def post(self):\n try:\n data = Product.parser.parse_args()\n\n if ProductModel.find_by_name(name=data['name']) is None:\n return BaseResponse.bad_request_response('This product already exists.', {})\n\n product = ProductModel(name=data['name'], description=data['description'], price=data['price'],\n image=data['image'], created_at=None, updated_on=None, status=None)\n\n product.save_to_db()\n\n return BaseResponse.created_response('Product created successfully.', product.json())\n except Exception as e:\n return BaseResponse.server_error_response(unicode(e))\n pass\n\n @jwt_required\n def put(self, _id=None):\n try:\n data = Product.parser.parse_args()\n\n product = ProductModel.find_by_id(_id=_id)\n if product:\n product.name = data['name'] if (data['name']) else product.name\n product.description = data['description'] if (data['description']) else product.description\n product.price = data['price'] if (data['price']) else product.price\n product.image = data['image'] if (data['image']) else product.image\n product.updatedOn = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n product.save_to_db()\n\n return BaseResponse.ok_response('Product updated successfully.', product.json())\n\n return BaseResponse.not_acceptable_response('Product does not exists.', {})\n except Exception as e:\n return BaseResponse.server_error_response(unicode(e))\n pass\n\n @jwt_required\n def delete(self, _id=None):\n try:\n product = ProductModel.find_by_id(_id=_id)\n if product:\n product.status = 'INA'\n product.updatedOn = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n product.save_to_db()\n\n return BaseResponse.ok_response('Product deleted successfully.', {})\n\n return BaseResponse.not_acceptable_response('Product does not exists.', {})\n except Exception as e:\n return BaseResponse.server_error_response(unicode(e))\n pass\n","sub_path":"resources/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":4023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"88395712","text":"# resnet\n\nfrom keras.models import Model\nfrom keras.layers import Input,Dense,Dropout,BatchNormalization,Conv1D,MaxPooling1D,AveragePooling1D,concatenate,Activation,ZeroPadding1D\nfrom keras.layers import add,Flatten\nimport numpy as np\nfrom genarator import KerasBatchGenerator\nseed = 7\nnp.random.seed(seed)\nfrom keras import backend as K\n \npath = '/Users/huangzhuojie/project/Mypy/hefei_data/'\npath_train = '/Users/huangzhuojie/project/Mypy/hefei_data/train_A/'\npath_vali= '/Users/huangzhuojie/project/Mypy/hefei_data/vali_A/'\npath_test = '/Users/huangzhuojie/project/Mypy/hefei_data/test_A/'\npath_pre = '/Users/huangzhuojie/project/Mypy/hefei_data/testA/'\n\n\ndef Conv1d_BN(x, nb_filter,kernel_size, strides=1, padding='same',name=None):\n if name is not None:\n bn_name = name + '_bn'\n conv_name = name + '_conv'\n else:\n bn_name = None\n conv_name = None\n \n x = Conv1D(nb_filter,kernel_size,padding=padding,strides=strides,activation='relu',name=conv_name)(x)\n x = BatchNormalization(axis=2,name=bn_name)(x)\n return x\n \ndef Conv_Block(inpt,nb_filter,kernel_size,strides=1, with_conv_shortcut=False):\n x = Conv1d_BN(inpt,nb_filter=nb_filter,kernel_size=kernel_size,strides=strides,padding='same')\n x = Conv1d_BN(x, nb_filter=nb_filter, kernel_size=kernel_size,padding='same')\n if with_conv_shortcut:\n shortcut = Conv1d_BN(inpt,nb_filter=nb_filter,strides=strides,kernel_size=kernel_size)\n x = add([x,shortcut])\n return x\n else:\n x = add([x,inpt])\n return x\n \n# inpt = Input(shape=(224,224,3))\n\ndef f1(y_true, y_pred):\n def recall(y_true, y_pred):\n \"\"\"Recall metric.\n Only computes a batch-wise average of recall.\n Computes the recall, a metric for multi-label classification of\n how many relevant items are selected.\n \"\"\"\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n recall = true_positives / (possible_positives + K.epsilon())\n return recall\n\n def precision(y_true, y_pred):\n \"\"\"Precision metric.\n Only computes a batch-wise average of precision.\n Computes the precision, a metric for multi-label classification of\n how many selected items are relevant.\n \"\"\"\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision\n \n precision = precision(y_true, y_pred)\n recall = recall(y_true, y_pred)\n return 2*((precision*recall)/(precision+recall+K.epsilon()))\n\n\ninpt = Input(shape=(5000,8))\nx = ZeroPadding1D(3)(inpt)\nx = Conv1d_BN(x,nb_filter=64,kernel_size=7,strides=2,padding='valid')\nx = MaxPooling1D(pool_size=3,strides=2,padding='same')(x)\n#(56,56,64)\nx = Conv_Block(x,nb_filter=64,kernel_size=3)\nx = Conv_Block(x,nb_filter=64,kernel_size=3)\nx = Conv_Block(x,nb_filter=64,kernel_size=3)\n#(28,28,128)\nx = Conv_Block(x,nb_filter=128,kernel_size=3,strides=2,with_conv_shortcut=True)\nx = Conv_Block(x,nb_filter=128,kernel_size=3)\nx = Conv_Block(x,nb_filter=128,kernel_size=3)\nx = Conv_Block(x,nb_filter=128,kernel_size=3)\n#(14,14,256)\nx = Conv_Block(x,nb_filter=256,kernel_size=3,strides=2,with_conv_shortcut=True)\nx = Conv_Block(x,nb_filter=256,kernel_size=3)\nx = Conv_Block(x,nb_filter=256,kernel_size=3)\nx = Conv_Block(x,nb_filter=256,kernel_size=3)\nx = Conv_Block(x,nb_filter=256,kernel_size=3)\nx = Conv_Block(x,nb_filter=256,kernel_size=3)\n#(7,7,512)\nx = Conv_Block(x,nb_filter=512,kernel_size=3,strides=2,with_conv_shortcut=True)\nx = Conv_Block(x,nb_filter=512,kernel_size=3)\nx = Conv_Block(x,nb_filter=512,kernel_size=3)\nx = AveragePooling1D(pool_size=7)(x)\nx = Flatten()(x)\n# x = Dense(1000,activation='softmax')(x)\nx = Dense(55,activation='sigmoid')(x)\n \nmodel = Model(inputs=inpt,outputs=x)\n\n\nmodel.compile(loss=\"binary_crossentropy\", optimizer='rmsprop',metrics= ['acc',f1])\n# dir, batch_size,shuffle = False,min_index =None,max_index = None,skip_step=5\n# 24105\ntrain_genarator = KerasBatchGenerator(\n dir = path_train,\n batch_size = 128,\n shuffle = True,\n# min_index =3000,\n# max_index =20000,\n skip_step = 6).generate()\nvali_genarator = KerasBatchGenerator(\n dir = path_vali,\n batch_size = 128,\n# shuffle = True,\n# min_index =0,\n# max_index =1500,\n skip_step = 6).generate()\ntest_genarator = KerasBatchGenerator(\n dir = path_test,\n batch_size = 128,\n# shuffle = True,\n# min_index =1500,\n# max_index =3000,\n skip_step = 6).generate()\n# pre_genarator = KerasBatchGenerator(path_pre,20).pre_generate()\n\n# model.summary()\n\nhistory = model.fit_generator(\n train_genarator,\n steps_per_epoch = 350,\n epochs = 10,\n validation_data = vali_genarator,\n validation_steps = 20)\nmodel.save('/content/resnet_hefei.h5')\nhistory_dict = history.history\nprint (history_dict)\npre = model.evaluate_generator(test_genarator,steps =20 )\n# c = model.predict_generator(pre_genarator,steps =2)\nprint (pre)\n\n","sub_path":"tianchi/hefei/hefei_resnet.py","file_name":"hefei_resnet.py","file_ext":"py","file_size_in_byte":5130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"19974634","text":"\"\"\"SaleDAC URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path\nfrom django.conf.urls import include\n\nfrom rest_framework.routers import DefaultRouter\n\nimport common_dac.party.views as PartyViews\n\nrouter = DefaultRouter()\nrouter.register(r'individual', PartyViews.IndividualViewSet)\nrouter.register(r'company', PartyViews.CompanyViewSet)\nrouter.register(r'department', PartyViews.DepartmentViewSet)\nrouter.register(r'individual_contact_medium', PartyViews.IndividualContactMediumViewSet)\nrouter.register(r'individual_identifier', PartyViews.IndividualIdentifierViewSet)\nrouter.register(r'company_identifier', PartyViews.CompanyIdentifierViewSet)\nrouter.register(r'department_identifier', PartyViews.DepartmentIdentifierViewSet)\n\nurlpatterns = [\n path('', include(router.urls)),\n]\n","sub_path":"common_dac/party/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"253543922","text":"import cv2\r\n\r\ncar_cascade = cv2.CascadeClassifier('cars.xml') \r\n\r\ndef detect(img):\r\n\r\n\t# gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\tdist = []\r\n\tcars = car_cascade.detectMultiScale(img, scaleFactor = 1.2, minNeighbors = 4, minSize = (80, 80))\r\n\t\r\n\tfor (x,y,w,h) in cars: \r\n\t\t# cv2.rectangle(img,(x,y),(x + w, y + h),(0,0,255),2)\r\n\t\tdist.append([x + (w//2), y + (h//2), w * h])\r\n\t\t# cv2.putText(img, f\"{dist[-1][2]}\", (dist[-1][0], dist[-1][1]), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0,0,255), thickness = 2)\r\n\r\n\treturn cars, dist\r\n\r\nif __name__ == '__main__':\r\n img = cv2.imread(\"toycar4.jpg\")\r\n cars, dist = detect(img)\r\n print(cars, dist)\r\n \r\n","sub_path":"server/car_dist_detect.py","file_name":"car_dist_detect.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"365758656","text":"import os, sys\nimport time\nimport argparse\nimport numpy as np\nimport tensorflow as tf\n\nsys.path.append(\"strands_qsr_lib\\qsr_lib\\src3\")\ntry:\n from tensorflow.nn.rnn_cell import BasicLSTMCell, DropoutWrapper, MultiRNNCell\nexcept:\n from tensorflow.contrib.rnn import BasicLSTMCell, DropoutWrapper, MultiRNNCell\n\nimport stateful_lstm\n\nfrom config import Config, Qual_Config, Quan_Config\nfrom project import Project\nfrom generate_utils import gothrough\n\nclass EventProgressEstimator(object):\n \"\"\"\n Estimate the progress of event using LSTM\n \"\"\"\n def __init__(self, is_training = True, is_dropout = True, name=None, config = Qual_Config()):\n print ('is_training', is_training)\n print ('is_dropout', is_dropout)\n self.config = config\n self.num_steps = num_steps = config.num_steps\n self.n_input = n_input = config.n_input\n self.size = size = config.hidden_size\n # This is an option, if self.s2s = True -> Use all progress values\n # otherwise just use the last progress value\n self.s2s = config.s2s\n \n with tf.variable_scope(name):\n \"Declare all placeholders\"\n \"Placeholder for input\"\n \n \"\"\"\n batch_size\n num_steps: length of sequence\n n_input: size of feature vectors\n \"\"\"\n self._input_data = tf.placeholder(tf.float32, [None, num_steps, n_input])\n \n \"\"\"\n (batch_size x num_steps) for sequence to sequence\n batch_size for \n \"\"\"\n if self.s2s:\n self._targets = tf.placeholder(tf.float32, [None, num_steps])\n else:\n self._targets = tf.placeholder(tf.float32, [None])\n\n # Sample's weights, NOT network's weights\n self._weights = tf.placeholder(tf.float32, [None])\n \n \n if is_training:\n print ('Set lr')\n self.lr = tf.Variable(initial_value=0.0, trainable=False)\n \n lstm_cell = BasicLSTMCell(size, forget_bias = 1.0, state_is_tuple=True)\n \n if is_dropout and config.keep_prob < 1:\n lstm_cell = DropoutWrapper(lstm_cell, output_keep_prob=config.keep_prob)\n \n multi_lstm_cell = MultiRNNCell([lstm_cell] * config.num_layers, state_is_tuple=True)\n\n # Create variable wrapper for self.initial_state\n self.create_state( multi_lstm_cell )\n \n # Initial states of the cells\n # cell.state_size = config.num_layers * 2 * size\n # Size = ( batch_size x cell.state_size )\n \n # We don't know the batch_size here, so don't need\n # to specify initial_state\n # self._initial_state = multi_lstm_cell.zero_state(batch_size, tf.float32)\n \n inputs = tf.reshape(self._input_data, [-1, n_input]) # (batch_size * num_steps, n_input)\n \n with tf.variable_scope(\"linear\"):\n weight = tf.get_variable(\"weight\", [n_input, size])\n bias = tf.get_variable(\"bias\", [size])\n\n # (batch_size * num_steps, size)\n inputs = tf.matmul(inputs, weight) + bias\n \n inputs = tf.reshape(inputs, (-1, num_steps, size)) # (batch_size, num_steps, size)\n \n print (\"self.inputs.shape = %s \" % str(inputs.shape) + \" after linear layer\")\n \n # (output, state)\n # output is of size: ( batch_size, num_steps, size )\n # state is of size: ( batch_size, cell.state_size ) (last state only)\n with tf.variable_scope(\"lstm\"):\n output_and_state = tf.nn.dynamic_rnn(multi_lstm_cell, inputs, dtype=tf.float32,\n initial_state = self.initial_state)\n # output_and_state = tf.nn.dynamic_rnn(multi_lstm_cell, inputs, dtype=tf.float32)\n # output_and_state = tf.nn.dynamic_rnn(multi_lstm_cell, inputs, dtype=tf.float32)\n \n self.final_state = output_and_state[1]\n \n if self.s2s:\n # ( batch_size, num_steps, size )\n output = output_and_state[0]\n else:\n # ( num_steps, batch_size, size )\n output = tf.transpose(output_and_state[0], [1, 0, 2])\n \n # ( batch_size, size )\n output = tf.gather(output, int(output.get_shape()[0]) - 1)\n \n \n print (\"output.shape = %s after LSTM\" % str(output.shape))\n \n with tf.variable_scope(\"output_linear\"):\n weight = tf.get_variable(\"weight\", [size, 1])\n bias = tf.get_variable(\"bias\", [1])\n\n \n if self.s2s:\n # Need to reshape to 2 dimensions\n output = tf.reshape(output, [-1, size])\n output = tf.matmul(output, weight) + bias\n # ( batch_size, num_steps ) \n output = tf.reshape(output, [-1, num_steps])\n else:\n #( batch_size, 1)\n # @ is the same as matmul\n output = tf.matmul(output, weight) + bias\n \n # Remove all 1 dimension and squash the function down to [0..1]\n # ( batch_size, num_steps ) or (batch_size)\n self.output = tf.sigmoid(tf.squeeze(output))\n \n print (\"self.output.shape = %s after linear\" % str(self.output.shape))\n \n print (\"self._targets.shape = %s \" % str(self._targets.shape))\n \n # Loss = mean squared error of target and predictions\n self.loss = tf.losses.mean_squared_error(self._targets, self.output, self._weights)\n \n if is_training:\n # \n # optimizer = tf.train.AdagradOptimizer(learning_rate=self.lr)\n \n # self.train_op = optimizer.minimize(self.loss)\n\n if self.config.optimizer == 'sgd':\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.lr)\n tvars = tf.trainable_variables()\n self.train_op = []\n \n grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, tvars),\n self.config.max_grad_norm)\n self.train_op = optimizer.apply_gradients(zip(grads, tvars))\n\n elif self.config.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(learning_rate=self.lr)\n \n self.train_op = optimizer.minimize(self.loss)\n\n elif self.config.optimizer == 'adagrad':\n optimizer = tf.train.AdagradOptimizer(learning_rate=self.lr)\n \n self.train_op = optimizer.minimize(self.loss)\n \n \n def checkInputs(self, inputs):\n assert isinstance(inputs, np.ndarray)\n \n assert len(inputs.shape) == 3\n assert inputs.shape[1] == self.num_steps\n assert inputs.shape[2] == self.n_input\n \n def checkOutputs(self, outputs, batch_size):\n assert isinstance(outputs, np.ndarray)\n if self.s2s:\n assert len(outputs.shape) == 2\n assert outputs.shape[0] == batch_size\n assert outputs.shape[1] == self.num_steps\n else:\n assert len(outputs.shape) == 1\n assert outputs.shape[0] == batch_size\n \n def update(self, inputs, outputs, weights = None, initial_state = None, sess=None):\n \"\"\"\n inputs: np.array (batch_size, num_steps, n_input)\n outputs: np.array (batch_size, num_steps) or (batch_size)\n weights: (Optional) weight of samples np.array (batch_size)\n \n We need to run train_op to update the parameters\n We also need to return its loss\n \"\"\"\n self.checkInputs(inputs)\n \n batch_size = inputs.shape[0]\n \n self.checkOutputs(outputs, batch_size)\n \n sess = sess or tf.get_default_session()\n \n feed_dict = {self._input_data: inputs, self._targets: outputs}\n \n # if not initial_state is None:\n # feed_dict[self.initial_state] = initial_state\n\n if weights is None:\n weights = np.ones(batch_size, dtype=np.float32)\n \n feed_dict[self._weights] = weights\n \n _, loss, state = sess.run([self.train_op, self.loss, self.final_state], \n feed_dict)\n \n return loss, state\n \n def predict(self, inputs, outputs = None, weights = None, sess=None):\n \"\"\"\n inputs: np.array (batch_size, num_steps, n_input)\n outputs: np.array (batch_size, num_steps) or (batch_size)\n \n This function would not run train_op\n outputs is only optional if we want to get the loss\n \"\"\"\n self.checkInputs(inputs)\n \n batch_size = inputs.shape[0]\n\n if not outputs is None:\n self.checkOutputs(outputs, batch_size)\n \n sess = sess or tf.get_default_session()\n # self.reset_state(sess = sess)\n \n if weights is None:\n weights = np.ones(batch_size, dtype=np.float32)\n\n feed_dict = {self._input_data: inputs}\n\n if not outputs is None:\n # You can use self.initial_state: tf.zeros_like(self.initial_state)\n feed_dict [self._targets] = outputs\n feed_dict [self._weights] = weights\n\n predicted, loss = sess.run([self.output, self.loss], feed_dict)\n return (predicted, loss)\n else:\n predicted = sess.run(self.output,\n feed_dict)\n\n return predicted\n\n def create_state( self, cell ):\n # multi_lstm_cell.state_size = config.num_layers * 2 * size\n # ( batch_size x cell.state_size )\n # This initial state will not be updateable because it is not a variable, we have to create a variable wrapper\n self.initial_state = cell.zero_state(self.config.batch_size, tf.float32)\n \n def assign_lr(self, lr_value, sess=None):\n sess = sess or tf.get_default_session()\n \n sess.run(tf.assign(self.lr, lr_value))\n \n def get_state(self, sess=None):\n \"\"\"\n This basically gives the state of the cell\n \"\"\"\n sess = sess or tf.get_default_session()\n\n# LSTM cell states \n# state = None\n\ndef none_info ():\n while True:\n yield None\n\ndef run_epoch(m, data, lbl, info = none_info(), verbose=False, training = True):\n state = None\n costs = 0\n cost_iters = 0\n \n for step, (x, y, z) in enumerate( zip(data, lbl, info) ):\n y_prime = y\n if not m.config.s2s:\n # Just use the last label\n y_prime = y[:,-1]\n \n if training:\n cost, state = m.update(x, y_prime, weights = z, initial_state = state)\n else:\n predicted, cost = m.predict(x, y_prime, weights = z)\n \n if verbose:\n print ('Predicted = ' +str(predicted))\n print ('Labels = ' +str(y_prime))\n print ('Infos = ' +str(z))\n \n costs += cost\n cost_iters += 1\n \n print(\"costs %.3f, cost_iters %d, cost %.3f\" % \n (costs, cost_iters, costs / cost_iters))\n\n return costs / cost_iters\n \nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Test performance of progress learners for TRAIN/VALIDATE/TEST data.')\n\n parser.add_argument('-a', '--action', action='store', metavar = ('ACTION'),\n help = \"Action type. Choose from 'SlideToward', 'SlideAway', 'SlideNext', 'SlidePast', 'SlideAround'\" )\n parser.add_argument('-d', '--data', action='store', metavar = ('DATA'), default='FULL',\n help = \"Choose one of the followings: FULL, HALF, QUARTER. Default is FULL\" )\n parser.add_argument('-t', '--type', action='store', metavar = ('TYPE'), default='QUAL',\n help = \"Choose one of the followings: QUAL (qualitative), QUAN (quantitative). Default is QUAL\" )\n parser.add_argument('-p', '--project', action='store', metavar = ('PROJECT'),\n help = \"Location of project file.\" )\n\n args = parser.parse_args()\n \n project_name = args.action\n\n data_type = args.data\n if data_type == 'FULL':\n factor = 1\n elif data_type == 'HALF':\n factor = 2\n elif data_type == 'QUARTER':\n factor = 4\n\n feature_type = args.type\n if feature_type == 'QUAL':\n config = Qual_Config()\n elif feature_type == 'QUAN':\n config = Quan_Config()\n\n project_file = args.project\n\n if project_file is None:\n if feature_type == 'QUAL':\n project_file = os.path.join('learned_models', project_name.lower() + \"_project.proj\")\n elif feature_type == 'QUAN':\n project_file = os.path.join('learned_models', project_name.lower() + \"_raw.proj\")\n \n p = Project.load(project_file)\n \n np.random.seed()\n\n print ('p.training_data.shape = ' + str(p.training_data.shape))\n print ('p.validation_data.shape = ' + str(p.validation_data.shape))\n print ('p.testing_data.shape = ' + str(p.testing_data.shape))\n\n with tf.Graph().as_default(), tf.Session() as session:\n with tf.variable_scope(\"model\") as scope:\n print('-------- Setup m model ---------')\n m = EventProgressEstimator(is_training=True, name = p.name, config = config)\n \n with tf.variable_scope(\"model\", reuse = True) as scope: \n print('-------- Setup mtest model ---------')\n mtest = EventProgressEstimator(is_training=False, name = p.name, config = config)\n \n session.run(tf.global_variables_initializer())\n \n \"\"\"\n Training first\n \"\"\"\n train_losses = []\n validate_losses = []\n\n for i in range(config.max_max_epoch):\n print('-------------------------------')\n start_time = time.time()\n lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)\n m.assign_lr(config.learning_rate * lr_decay)\n\n print(\"Epoch: %d Learning rate: %.6f\" % (i + 1, session.run(m.lr)))\n \n indices = np.arange(p.training_data.shape[0] // factor)\n\n if config.epoch_shuffle:\n np.random.shuffle(indices)\n\n train_loss = run_epoch(m, p.training_data[indices], p.training_lbl[indices], training = True)\n \n \"Validating\"\n # [:,:,:,:8]\n validate_loss = run_epoch(mtest, p.validation_data, p.validation_lbl, training = False)\n\n train_losses.append(train_loss)\n validate_losses.append(validate_loss)\n\n print (repr(train_losses))\n print (repr(validate_losses))\n\n print ('------- TEST -------')\n run_epoch(mtest, p.testing_data, p.testing_lbl, training = False, verbose = True)\n","sub_path":"progress_learner.py","file_name":"progress_learner.py","file_ext":"py","file_size_in_byte":15396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"289357134","text":"#!Celsius_To_Fahrenheit_Calculator \nimport sys\nfrom Temperature import CtoF, FtoC, KtoF\n \ndef main(): \n convertType = sys.argv[1]\n valueString = sys.argv[2]\n value = float(valueString)\n if convertType == ('c'):\n fahrenheitOutput = CtoF(value)\n print(str(round(fahrenheitOutput, 1)) + ' F')\n elif convertType == ('f'):\n celsiusOutput = FtoC(value)\n print(str(round(celsiusOutput, 1)) + ' C') \n elif convertType == ('k'):\n kelvinOutput = KtoF(value)\n print(str(round(kelvinOutput, 1)) + 'F') \n else:\n print('Please choose either (C)elsius or (F)ahrenheit')\n \nif __name__ == '__main__': \n try:\n main()\n except KeyboardInterrupt:\n print('\\nok. bye!\\n')\n exit()\n except:\n print('Exception encountered. Restart program and try again.')\n exit()\n\n\n\n\n\n","sub_path":"rayshell/tempcalc.py","file_name":"tempcalc.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"568563512","text":"import sys\nimport os\nimport wget\nimport moviepy.editor as mp\n\ndef main():\n\n\tif len(sys.argv) > 1 :\n\n\t\tdownloadedFile = wget.download(str(sys.argv[1]))\n\t\tmovedDownloadedFile = \"files/\"+downloadedFile\n\t\t# move and rename file\n\t\tif len(sys.argv) > 2 :\n\t\t\tmovedDownloadedFile = \"files/\"+str(sys.argv[2])+\".mp4\"\n\t\tos.rename(downloadedFile, movedDownloadedFile)\n\t\tprint(\"Download Done\" + movedDownloadedFile)\n\n\t\tprint(\"Now converting to mp3 ...\")\n\n\t\tclip = mp.VideoFileClip(movedDownloadedFile)\n\t\tclip.audio.write_audiofile(movedDownloadedFile+\".mp3\")\n\n\t\t# delete the video file - not needed anymore\n\t\tos.remove(movedDownloadedFile)\n\n\t\tprint(\"Converting to mp3 done!\")\n\telse :\n\t\tprint(\"Error - enter video url.\")\n\nif __name__ == \"__main__\":\n main()","sub_path":"directVideoToAudio.py","file_name":"directVideoToAudio.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"516134869","text":"from __future__ import division, print_function, unicode_literals\nimport io\nimport re\nimport os\nimport difflib\nimport cv2\nimport json\n\nfrom django.conf import settings\n\nfrom idocr.recognition import model_handler\nfrom idocr.recognition.constants import (\n\tHEIGHT,\n\tWIDTH,\n\tTRAINED_MODELS,\n\tMODEL,\n\tLABELS)\n\n\nclass Recognizer():\n\tdef __init__(self, prefix=\"\"):\n\t\tself.model = model_handler.load_model(prefix + TRAINED_MODELS + MODEL)\n\t\tself.all_labels = LABELS\n\n\t\twith io.open(os.path.join(settings.PROJECT_ROOT, \"idocr/label_char_map.json\"), \"r\", encoding=\"utf8\") as f:\n\t\t\tself.label_char_map = json.loads(f.read(), encoding=\"utf8\")\n\n\tdef recognize_as_predict(self, image):\n\t\t\"\"\"\n\t\timage: models.Image object\n\t\t\"\"\"\n\t\tfor j in range(len(image.fields)): # constant 5\n\t\t\tfor k in range(len(image.fields[j].spans)): # constant 2-3\n\t\t\t\timage.fields[j].spans[k].predict_characters = []\n\t\t\t\trefine_segments = image.fields[j].spans[k].refine_segments\n\n\t\t\t\tstart = 0\n\t\t\t\tfor c in range(len(refine_segments)):\n\t\t\t\t\timg = image.fields[j].spans[k].image[:, start:refine_segments[c]]\n\t\t\t\t\tself._recognize_img(img, image.fields[j].spans[k].predict_characters)\n\t\t\t\t\tstart = refine_segments[c]\n\n\t\t\t\tlast_img = image.fields[j].spans[k].image[:, start:]\n\t\t\t\tself._recognize_img(last_img, image.fields[j].spans[k].predict_characters)\n\t\t\t\t\t\n\tdef post_process(self, image):\n\t\t# post process num field\n\t\ttext = image.fields[0].get_raw_text()\n\t\ttext = re.sub(r'( |\\n)', '', text) # remove all white spaces\n\t\timage.fields[0].postprocessed_text = re.sub(r'[^0-9]', '', text) # remove all non-number\n\t\t\n\t\t# post process name field\n\t\ttext = image.fields[1].get_raw_text()\n\t\t\n\t\ttext = re.sub(r',+', ' ', text)\n\t\ttext = re.sub(r'[0-9]', '', text)\n\t\ttext = re.sub(r'II(?=[N])', 'U', text)\n\t\ttext = re.sub(r'(? best_ratio:\n\t\t\t\t\tbest_string = s\n\t\t\t\t\tbest_ratio = ratio\n\t\t\t\n\t\t\treturn best_string\n\n\t\twith io.open(os.path.join(settings.PROJECT_ROOT, \"idocr/all_data/places\"), \"r\", encoding='utf8') as f:\n\t\t\ttext = f.read()\n\t\tplaces = text.split('\\n') \n\t\t\n\t\t# post process bplace field\n\t\tbplace = image.fields[3].get_raw_text()\n\t\tbplace = re.sub(r'\\n', ', ', bplace)\n\t\timage.fields[3].postprocessed_text = nearest_string(places, bplace)\n\t\t\n\t\t# post process cplace field\n\t\tcplace = image.fields[4].get_raw_text()\n\t\tcplace = re.sub(r'\\n', ', ', cplace)\n\t\timage.fields[4].postprocessed_text = nearest_string(places, cplace)\n\n\tdef _recognize_img(self, img, predict_characters):\n\t\tX = self._img_to_X(img)\n\t\tpred = self._predict(X)\n\t\tchar = self._pred_to_char(pred)\n\t\tpredict_characters.append(char)\n\n\tdef _pred_to_char(self, pred):\n\t\tlabel = self.all_labels[pred]\n\t\tchar = self.label_char_map.get(label)\n\t\tif char == None:\n\t\t\treturn \"\"\n\t\telse:\n\t\t\treturn char\n\n\tdef _img_to_X(self, img):\n\t\t# load image as grayscale\n\t\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\t\t# resize image\n\t\tres = cv2.resize(gray, dsize=(WIDTH, HEIGHT), interpolation=cv2.INTER_CUBIC)\n\t\t# flat image\n\t\timg_flat = res.flatten() / 255.0 # normalize from [0, 255] to [0, 1]\n\t\tX = img_flat.reshape(1, HEIGHT, WIDTH, 1).astype('float32')\n\t\treturn X\n\n\tdef _predict(self, X):\n\t\tpredictions = self.model.predict_classes(X, verbose=0)\n\t\treturn predictions[0]\n\t","sub_path":"IdRecDemo/idocr/recognition/recognizer.py","file_name":"recognizer.py","file_ext":"py","file_size_in_byte":4163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"516588016","text":"\"\"\" Utility module for persisting and retrieving Events, and information related to Events. \"\"\"\n\nfrom collections import OrderedDict\n\nfrom app import DB\nfrom app.persistence.models import Event, CompetitionEvent, UserEventResults\nfrom app.util.events_resources import get_WCA_event_names, get_non_WCA_event_names\n\n# -------------------------------------------------------------------------------------------------\n\ndef get_event_by_name(name):\n \"\"\" Returns an event by name. \"\"\"\n\n return Event.query.\\\n filter(Event.name == name).\\\n first()\n\n\ndef get_all_events():\n \"\"\" Returns a list of all events. \"\"\"\n\n return DB.session.\\\n query(Event).\\\n order_by(Event.id).\\\n all()\n\n\n# pylint: disable=C0103\ndef get_all_WCA_events():\n \"\"\" Returns a list of all WCA events. \"\"\"\n\n wca_names = set(get_WCA_event_names())\n return [e for e in get_all_events() if e.name in wca_names]\n\n\n# pylint: disable=C0103\ndef get_all_non_WCA_events():\n \"\"\" Returns a list of all non-WCA events. \"\"\"\n\n non_wca_names = set(get_non_WCA_event_names())\n return [e for e in get_all_events() if e.name in non_wca_names]\n\n\ndef get_events_id_name_mapping():\n \"\"\" Returns a dictionary of event ID to name mappings. \"\"\"\n\n mapping = OrderedDict()\n for event in get_all_events():\n mapping[event.id] = event.name\n\n return mapping\n\n\ndef get_events_name_id_mapping():\n \"\"\" Returns a dictionary of event name to ID mappings. \"\"\"\n\n mapping = OrderedDict()\n for event in get_all_events():\n mapping[event.name] = event.id\n\n return mapping\n\n\ndef get_all_events_user_has_participated_in(user_id):\n \"\"\" Returns a list of all events. \"\"\"\n\n return DB.session.\\\n query(Event).\\\n join(CompetitionEvent).\\\n join(UserEventResults).\\\n filter(UserEventResults.user_id == user_id).\\\n filter(UserEventResults.is_complete).\\\n distinct(Event.id).\\\n all()\n","sub_path":"app/persistence/events_manager.py","file_name":"events_manager.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"96583999","text":"# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom servicecatalog_puppet import constants\nfrom servicecatalog_puppet.workflow.manifest import section_task\nfrom servicecatalog_puppet.workflow.generic import (\n generic_schedule_run_deploy_in_spoke_task,\n)\n\n\nclass GenericSectionTask(section_task.SectionTask):\n\n section_name_singular = \"not_set\"\n section_name = \"not_set\"\n for_region_task_klass = \"not_set\"\n for_account_task_klass = \"not_set\"\n for_account_and_region_task_klass = \"not_set\"\n task_klass = \"not_set\"\n item_name = \"not_set\"\n supports_spoke_mode = False\n\n def params_for_results_display(self):\n return {\n \"puppet_account_id\": self.puppet_account_id,\n \"cache_invalidator\": self.cache_invalidator,\n }\n\n def requires(self):\n requirements = list()\n common_args = dict(\n puppet_account_id=self.puppet_account_id,\n manifest_file_path=self.manifest_file_path,\n )\n\n for name, details in self.manifest.get(self.section_name, {}).items():\n if (\n details.get(constants.MANIFEST_STATUS_FIELD_NAME)\n != constants.MANIFEST_STATUS_FIELD_VALUE_IGNORED\n ):\n common_args[self.item_name] = name\n requirements += self.handle_requirements_for(\n name,\n self.section_name_singular,\n self.section_name,\n self.for_region_task_klass,\n self.for_account_task_klass,\n self.for_account_and_region_task_klass,\n self.task_klass,\n common_args,\n self.supports_spoke_mode,\n )\n\n return requirements\n\n def run(self):\n if self.supports_spoke_mode and not self.is_running_in_spoke():\n yield generic_schedule_run_deploy_in_spoke_task.GenericScheduleRunDeployInSpokeTask(\n manifest_file_path=self.manifest_file_path,\n puppet_account_id=self.puppet_account_id,\n section_name=self.section_name,\n )\n self.write_output(self.manifest.get(self.section_name, {}))\n","sub_path":"servicecatalog_puppet/workflow/generic/generic_section_task.py","file_name":"generic_section_task.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"257017133","text":"class Solution(object):\n def arrayPairSum(self, nums):\n total = 0\n nums.sort()\n numgroup = len(nums) / 2\n j = 0\n for i in range(numgroup):\n \n total += nums[j]\n j = j + 2\n\n return total\n\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n ","sub_path":"561.py","file_name":"561.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"421247501","text":"from utils.listNode import ListNode\n\nclass Solution:\n def reverse(self, head, m, n):\n if not head:\n return head\n \n dummy = prev = ListNode(None)\n node = head\n rev, rev_tail = None, None\n\n count = 1\n while node:\n if count > n:\n rev_tail.next = node\n break\n \n if count >= m:\n if count == m: # set the rev tail\n rev_tail = node\n rev, rev.next, node = node, rev, node.next\n prev.next = rev\n \n else:\n prev.next = node\n prev = prev.next\n node = node.next\n \n count += 1\n\n return dummy.next\n\none = ListNode(1)\ntwo = ListNode(2)\nthree = ListNode(3)\nfour = ListNode(4)\nfive = ListNode(5)\n\none.next = two\ntwo.next = three\nthree.next = four\nfour.next = five\n\nprint(one)\n\nsolution = Solution()\nprint(solution.reverse(one, 2, 4))\n \n\n","sub_path":"linkedList/reverse_linked_list2.py","file_name":"reverse_linked_list2.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"45871088","text":"\"\"\"nottrello URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path\nfrom app.views import home, entrar, cadastrar, usuarioLogado, editarTarefa, excluirTarefa, excluirProjeto, marcarConcluido,listarTarefaProjeto, logout, editarPerfil, editarProjeto\nfrom nottrello import settings\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', home),\n path('home/', home),\n path('usuario/entrar', entrar),\n path('usuario/cadastrar', cadastrar),\n path('usuario/logado/', usuarioLogado, name='usuarioLogado'),\n path('usuario/perfil/', editarPerfil),\n path('/projeto/', listarTarefaProjeto),\n path('projeto/excluir/', excluirProjeto),\n path('projeto/editar/', editarProjeto),\n path('tarefa/editar/', editarTarefa),\n path('tarefa/excluir/', excluirTarefa),\n path('tarefa/concluido/', marcarConcluido),\n path('usuario/logout', logout)\n\n\n]\n\nif settings.DEBUG:\n urlpatterns+=static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)\n","sub_path":"nottrello/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"243182993","text":"from file_handling import *\n'''\n There are the tasks wich you need to make in order to pass your PA\n'''\n\n\ndef total_box_office():\n # Calculates the total sum of every box office in the list\n money = 0\n with open(\"movies_data.ini\", \"r\") as data:\n for line in data:\n line = line.strip()\n if \"box_office\" in line:\n money += int(line[11:])\n print(money)\n\n\ndef total_box_office_dict():\n sum_of_boxes = []\n sum_nums = 0\n sum_of_nums = []\n movies = file_import_dictionary()\n for key in movies.items():\n for v in key:\n if 'box_office' in v:\n sum_of_boxes.append(v['box_office'])\n for item in sum_of_boxes:\n for nums in item:\n nums = int(nums)\n sum_nums += nums\n sum_of_nums.append(sum_nums)\n print('$', sum_nums)\n\n\ndef highest_box_office():\n # Displays the highest grossing movie\n names = []\n directors = []\n stars = []\n release_years = []\n box_offices = []\n money = []\n with open(\"movies_data.ini\", \"r\") as data:\n for line in data:\n line = line.strip()\n if \"box_office\" in line:\n money.append(int(line[11:]))\n biggest = max(money)\n biggest = str(biggest)\n with open(\"movies_data.ini\", \"r\") as data:\n for line in data:\n line = line.strip()\n if \"name\" in line:\n names.append(line[5:])\n elif \"director\" in line:\n directors.append(line[11:])\n elif \"stars\" in line:\n stars.append(line[8:])\n elif \"release_year\" in line:\n release_years.append(line[-4:])\n elif \"box_office\" in line:\n box_offices.append(line[11:])\n index = box_offices.index(biggest)\n print(names[index])\n print(directors[index])\n print(stars[index])\n print(release_years[index])\n print(box_offices[index])\n\n\ndef highest_box_office_dict():\n movies = file_import_dictionary()\n num = 0\n a = ''\n for k in movies.items():\n for v in k:\n if 'box_office' in v:\n # print(v['box_office'])\n a = v['box_office']\n for element in a:\n if int(element) > num:\n num = int(element)\n bla = k[0]\n name = k[1] \n print(bla, name)\n\n\ndef oldest_and_newest():\n # Displays the oldest and the newest movie\n names = []\n directors = []\n stars = []\n release_years = []\n box_offices = []\n with open(\"movies_data.ini\", \"r\") as data:\n for line in data:\n line = line.strip()\n if \"name\" in line:\n names.append(line[5:])\n elif \"director\" in line:\n directors.append(line[11:])\n elif \"stars\" in line:\n stars.append(line[8:])\n elif \"release_year\" in line:\n release_years.append(int(line[-4:]))\n elif \"box_office\" in line:\n box_offices.append(line[11:])\n earliest = min(release_years)\n latest = max(release_years)\n earliest_index = release_years.index(earliest)\n latest_index = release_years.index(latest)\n print(\"\\n\")\n print(names[earliest_index])\n print(directors[earliest_index])\n print(stars[earliest_index])\n print(release_years[earliest_index])\n print(box_offices[earliest_index])\n print(\"\\n\")\n print(names[latest_index])\n print(directors[latest_index])\n print(stars[latest_index])\n print(release_years[latest_index])\n print(box_offices[latest_index])\n\n\ndef oldest_and_newest_dict():\n movies = file_import_dictionary()\n a = ''\n name = ''\n num = 0\n rnd_num = 4000\n its_me = ''\n for k in movies.items():\n for v in k:\n if 'release_year' in v:\n a = v['release_year']\n for element in a:\n if int(element) > num:\n num = int(element)\n its_me = k[0]\n name = k[1]\n elif int(element) < rnd_num:\n rnd_num = int(element)\n title = k[0]\n info = k[1]\n print(its_me, name, '\\n')\n print(title, info)\n\n\ndef alphabetically_last():\n # Displays the last movie alphabetically\n names = []\n directors = []\n stars = []\n release_years = []\n box_offices = []\n with open(\"movies_data.ini\", \"r\") as data:\n for line in data:\n line = line.strip()\n if \"name\" in line:\n names.append(line[5:])\n elif \"director\" in line:\n directors.append(line[11:])\n elif \"stars\" in line:\n stars.append(line[8:])\n elif \"release_year\" in line:\n release_years.append(int(line[-4:]))\n elif \"box_office\" in line:\n box_offices.append(line[11:])\n last = max(names)\n last_index = names.index(last)\n print(names[last_index])\n print(directors[last_index])\n print(stars[last_index])\n print(release_years[last_index])\n print(box_offices[last_index])\n\n\ndef alphabetically_last_dict():\n movies = file_import_dictionary()\n a = []\n for k in movies.items():\n for v in k:\n if 'name' in v:\n a.append(v['name'])\n print(max(a))\n\n \ndef average_year():\n # Calculates the average year the movies were made\n years = []\n with open(\"movies_data.ini\", \"r\") as data:\n for line in data:\n line = line.strip()\n if \"release_year\" in line:\n years.append(int(line[-4:]))\n lenght = len(years)\n total_years = sum(years)\n print(round(total_years / lenght))\n\n\ndef average_year_dict():\n print(\"¯\\_(ツ)_/¯\")\n\n\ndef longest_director():\n # Displays the movie with the longest named director\n names = []\n directors = []\n stars = []\n release_years = []\n box_offices = []\n with open(\"movies_data.ini\", \"r\") as data:\n for line in data:\n line = line.strip()\n if \"name\" in line:\n names.append(line[5:])\n elif \"director\" in line:\n directors.append(line[11:])\n elif \"stars\" in line:\n stars.append(line[8:])\n elif \"release_year\" in line:\n release_years.append(int(line[-4:]))\n elif \"box_office\" in line:\n box_offices.append(line[11:])\n last = max(names, key=len)\n last_index = names.index(last)\n print(names[last_index])\n print(directors[last_index])\n print(stars[last_index])\n print(release_years[last_index])\n print(box_offices[last_index])\n\n\ndef longest_director_dict():\n print(\"¯\\_(ツ)_/¯\")\n","sub_path":"statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":6868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"353244923","text":"# -*- coding: utf-8 -*-\n\nfrom sqlite_orm.database import Database\nfrom sqlite_orm.field import IntegerField, BooleanField, TextField\nfrom sqlite_orm.table import BaseTable\n\nimport logging, os\n\n\nclass User(BaseTable):\n __table_name__ = 'users'\n\n id = IntegerField(primary_key=True, auto_increment=True)\n name = TextField(not_null=True)\n active = BooleanField(not_null=True, default_value=1)\n\n\nclass Post(BaseTable):\n __table_name__ = 'posts'\n\n id = IntegerField(primary_key=True)\n name = TextField(not_null=True)\n id_user = IntegerField(foreign_key=User.id)\n\n\nclass Word(BaseTable):\n __table_name__ = 'words'\n\n id = IntegerField(primary_key=True, auto_increment=True)\n lan = TextField(not_null=True)\n word = TextField(not_null=True)\n meaning = TextField(not_null=True)\n\n\n\n\nif __name__ == '__main__':\n\n #logger configure:\n logging.basicConfig(filename=\"info.log\", \n level=logging.DEBUG,\n format=('%(asctime)s: '\n '%(filename)s: '\n '%(levelname)s: '\n '%(funcName)s(): '\n '%(lineno)d: '\n '%(message)s'), \n datefmt=\"%Y-%m-%d %H:%M:%S\")\n with Database(\"test.db\") as db:\n\n if os.path.exists('./test.db'):\n # create table\n db.query(Post, User, Word).create().execute()\n\n user1 = User(id=1, name='User1')\n user2 = User(id=2, name='User2')\n user3 = User(id=3, name='User3')\n\n post1 = Post(id=1, name='Post1', id_user=user1.id)\n post2 = Post(id=2, name='Post2', id_user=user2.id)\n post3 = Post(id=3, name='Post3', id_user=user3.id)\n\n word1 = Word(id=1, lan='en', word='Hello', meaning='hola')\n\n #insert data\n db.query().insert(user1, user2, user3, post1, post2, post3, word1).execute()\n\n # select with columns + autojoin with fk;\n print('\\n=======SELECT + Auto Join=======')\n for row in db.query(User, Post.name).select().join(Post).execute():\n print(row)\n\n # update\n db.query(User).update(name='User3_UPDATED').filter(User.name == 'User3').execute()\n\n print('\\n=======SELECT after update=======')\n for row in db.query(User, Post.name).select().join(Post).execute():\n print(row)\n\n #db.query(User).delete().filter(User.name == 'User3_UPDATED').execute()\n\n print('\\n=======SELECT after delete=======')\n for row in db.query(User, Post.name).select().join(Post).execute():\n print(row)\n\n\n print('\\n=======SELECT after delete=======')\n for row in db.query(Word).select().execute():\n print(row)\n\n # delete\n #db.query(User, Post).drop().execute()\n else:\n print(\"LA BASE DE DATOS NO ESTÁ VACÍA\")\n for row in db.query(Word).select().execute():\n print(row)\n\n print('\\n=======SELECT after delete=======')\n for row in db.query(User).select().execute():\n print(row)","sub_path":"vocabulary_reminder/connect_database.py","file_name":"connect_database.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"201493929","text":"import random\n\nfrom .carta import Carta\nfrom . import ESPADA, BASTO, ORO, COPA\n\n\nclass Mazo():\n\n def __init__(self):\n self.mazo = [\n Carta(ESPADA, 1), Carta(BASTO, 1), Carta(ESPADA, 7),\n Carta(ORO, 7), Carta(ESPADA, 3), Carta(BASTO, 3), Carta(ORO, 3), Carta(COPA, 3),\n Carta(ESPADA, 2), Carta(BASTO, 2), Carta(ORO, 2), Carta(COPA, 2),\n Carta(ORO, 1), Carta(COPA, 1),\n Carta(ESPADA, 12), Carta(BASTO, 12), Carta(ORO, 12), Carta(COPA, 12),\n Carta(ESPADA, 11), Carta(BASTO, 11), Carta(ORO, 11), Carta(COPA, 11),\n Carta(ESPADA, 10), Carta(BASTO, 10), Carta(ORO, 10), Carta(COPA, 10),\n Carta(BASTO, 7), Carta(COPA, 7),\n Carta(ESPADA, 6), Carta(BASTO, 6), Carta(ORO, 6), Carta(COPA, 6),\n Carta(ESPADA, 5), Carta(BASTO, 5), Carta(ORO, 5), Carta(COPA, 5),\n Carta(ESPADA, 4), Carta(BASTO, 4), Carta(ORO, 4), Carta(COPA, 4)\n ]\n\n def get_card(self):\n indice = random.randint(0, len(self.mazo) - 1)\n aux = self.mazo[indice]\n del self.mazo[indice]\n return aux\n","sub_path":"truco/mazo.py","file_name":"mazo.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"633417719","text":"\"\"\"\n@difficulty: medium\n@tags: greedy\n@notes: Always picking up the max clip[1] from clips, the solution can be improved to O(nlogn) with sorting in advance.\n\"\"\"\nclass Solution(object):\n def videoStitching(self, clips, T):\n \"\"\"\n :type clips: List[List[int]]\n :type T: int\n :rtype: int\n \"\"\"\n cur, count, nextCur = 0, 0, 0\n flag = True\n while flag:\n flag = False\n for clip in clips:\n if clip[0] <= cur and clip[1] >= T:\n return count + 1\n if clip[0] <= cur and clip[1] > nextCur:\n nextCur = clip[1]\n flag = True\n cur = nextCur\n count += 1\n return -1\n","sub_path":"solution/python/1024.py","file_name":"1024.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"289030477","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom keras.models import Sequential\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom keras.layers import LSTM, Dense, Dropout, Masking, Embedding\nimport dataset\nimport neural_network_evaluator\nimport visualiser\n# import data_picker\ndId = 57975963\nyear = 2019\nmonth = 6\nday = 21\nhour = 13\n\n\n\ncomputation_range = np.arange(1,5, 1)\nwhat_hour = np.arange(1,3, 1)\ndataset = dataset.main()\n\n# x_predict = x_predict.values.ravel()\n# print(\"x_predict shape :\", x_predict.shape)\n# ========================= filtering the dataset accoring to id, year, month, day, hour\n# print(\"dataset : \", dataset)\ndId_list = dataset.DeviceId.unique()\n# print(\"dId_list : \", dId_list)\n\n# ===============================\n# print(\"dataset : \", dataset)\ny_dataset = dataset.Value\nx_dataset = dataset.drop([\"Value\"], axis=1)\n\n# =============\nindexH = dataset[(dataset[\"hour\"] == hour) & (dataset[\"Day\"] == day) & (dataset[\"Month\"] == month)].index\nprint(\"index :: \", indexH)\nprint(\"index :: \", indexH[0])\nx_predict = x_dataset.iloc[indexH[0]]\nprint(\"x_predict shape :\", x_predict.shape)\n\nx_predict = x_predict.ravel()\nprint(\"x_predict shape :\", x_predict.shape)\n\nx_predict = x_predict.reshape(-1, 1)\nprint(\"x_predict shape :\", x_predict.shape)\n\nx_predict = x_predict.reshape(1, -1)\nprint(\"x_predict shape :\", x_predict.shape)\n# =============\n\n# print(\"y_dataset : \",y_dataset)\n# print(\"x_dataset : \", x_dataset)\nx_train, x_test, y_train, y_test = train_test_split(x_dataset, y_dataset, shuffle=False, test_size=0.2, random_state=42)\n\nx_train, x_cv, y_train, y_cv = train_test_split(x_train, y_train, shuffle=False, test_size=0.2, random_state=42)\nprint(\"x_train : \", x_train)\nprint(\"y_train : \", y_train)\nprint(\"x_train shape : \", x_train.shape)\nprint(\"y_train shape : \", y_train.shape)\n\n#////////////////////////////////////////////////////////////\n# x_train,y_train=np.array(x_train),np.array(y_train)\n# x_train.to_numpy()\nx_train = x_train.to_numpy()\nprint(\"x_train : \\n\", x_train)\nx_train=np.reshape(x_train,(x_train.shape[0],7,1))\nprint(\"x_train shape : \\n\", x_train.shape)\n\nmodel=Sequential()\n# lstm_model.add(LSTM(100, input_shape=(7,1)))\nmodel.add(LSTM(units=50,return_sequences=True,input_shape=(7, 1)))\n# lstm_model.add(LSTM(units = 50, return_sequences = True, input_shape = (x_train.shape[1], 1)))\nmodel.add(LSTM(units=50))\nmodel.add(Dense(1))\n\n# =============\n# def keras_model(input):\n# inputs = keras.Input(shape=(input, 1))\n# model = layers.LSTM(8)(inputs)\n# model = layers.LSTM(8)(model)\n# outputs = layers.Dense(1)(model)\n# model = keras.Model(inputs=inputs, outputs=outputs, name=\"mnist_model\")\n# return model\n\n\n# model = keras_model(7)\n\n# ===============\n\nopt = tf.keras.optimizers.Adam(learning_rate=0.0001)\nmodel.compile(loss='mse', optimizer=opt, metrics=['mse', 'mae', 'mape', 'cosine'])\nhistory = model.fit(x_train, y_train, epochs=5, batch_size=1, verbose=2)\n\n","sub_path":"neural networks - Copy/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"481794943","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#Import dataset and store it in two separate vectors\ndataset = pd.read_csv('./A-PreprocessingSteps/data/Data.csv')\nx = dataset.iloc[:, :-1].values #Matrix of features\ny = dataset.iloc[:, 3].values #Dependent variables set\n\n#Treat missing fields replacing them by the row's mean\nfrom sklearn.preprocessing import Imputer\nimputer = Imputer(missing_values = 'NaN',strategy = 'mean', axis = 0) #Defines the imputer strategy\nimputer = imputer.fit(x[:, 1:3])\nx[:, 1:3] = imputer.transform(x[:, 1:3]) #Replaces the missing data\n\n#Encoding non numerical data\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nle_x = LabelEncoder()#Associates country names to numerical labels\nx[:, 0] = le_x.fit_transform(x[:, 0])#Applies the transformation to the first column only\n\nohe = OneHotEncoder(categorical_features=[0])#Dummy variables, prevents the algorithm to order the data as for ex France > Germany\nx = ohe.fit_transform(x).toarray()\n\nle_y = LabelEncoder()#Associates country names to numerical labels\ny = le_y.fit_transform(y)#Applies the transformation to the first column only\n\n#Split the dataset into Training set and a Test set\n#The training set is the set the machine learning model uses to learn\n#The test set is the set used to validate if the machine learning model did indeed learn\nfrom sklearn.cross_validation import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 0)\n\n#Feature scaling\n#transforms every single column to a range of -1 and 1\nfrom sklearn.preprocessing import StandardScaler\nsc_x = StandardScaler()\nx_train = sc_x.fit_transform(x_train)\nx_test = sc_x.transform(x_test)\nprint(x_train)\nprint(x_test)\n\nprint(x)\nprint(y)","sub_path":"A-PreprocessingSteps/dataPreProcessing.py","file_name":"dataPreProcessing.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"32591542","text":"# coding=utf-8\n\"\"\"\nNAME\n main - library system program\nFILE\n main.py\nCLASSES\n NumID\n\"\"\"\n# note that print statements should not normally be in functions - they are\n# in some here for clarity in interpreting the behaviour.\n\nfrom Level2.IEA_2018_redsquirrel.src import Library\nfrom Level2.IEA_2018_redsquirrel.src import UserManager as um, ItemManager as im\nimport datetime as dt\n# import lib_exceptions\n\n\nclass NumbID:\n \"\"\"\n Utility class to provide unique identifier.\n\n Methods defined here:\n new_id(...)\n Provide next unique identifier, starts at 1.\n\n reset_id(...)\n Reset unique identifier.\n \"\"\"\n id_number = 0\n\n def __init__(self):\n pass\n\n def __del__(self):\n pass\n\n # note that these use the @staticmethod adornment since no instance\n # attributes are used.\n @staticmethod\n def new_id():\n NumbID.id_number += 1\n return NumbID.id_number\n\n @staticmethod\n def reset_id():\n NumbID.id_number = 0\n\n\ndef create_library_catalogue(lib_controller, infile):\n \"\"\"\n Populate library's list of loanable items.\n\n :param lib_controller: LibraryController object\n :param infile: text file containing book titles\n :return: no return\n \"\"\"\n item_manager = im.ItemManager(library_controller=lib_controller)\n file_ = open(infile, 'r')\n for line in file_:\n item_manager.create_book(line.strip(), NumbID.new_id())\n print('Number of books added: ', NumbID.id_number)\n item_manager.create_journal(\"Amazing Clouds\", NumbID.new_id())\n item_manager.create_journal(\"Sleuthing in C#\", NumbID.new_id())\n item_manager.create_dvd(\"Dad's Army\", NumbID.new_id())\n item_manager.create_dvd(\"Debugging to music\", NumbID.new_id())\n print('Total number of items: ', NumbID.id_number)\n\n\ndef create_library_members(lib_controller):\n \"\"\"\n Create some users for the library system's list.\n\n :param lib_controller: LibraryController object\n :return: no return\n \"\"\"\n user_manager = um.UserManager(lib_controller)\n #user_manager.set_library_controller(lib_controller)\n NumbID.reset_id()\n # create 4 users\n for count in range(0, 4):\n user_manager.create_user(NumbID.new_id())\n print('User ID created: ', NumbID.id_number)\n\n\ndef exercise1(user_id, title, lib_controller):\n \"\"\"\n Checkout an item.\n\n :param user_id: user's unique identifier\n :param title: item to borrow\n :param lib_controller: LibraryController object\n :return: no return\n \"\"\"\n lib_controller.user_checkout(user_id, title)\n\n\ndef exercise2(user_id, return_id, title, lib_controller):\n \"\"\"\n Return an item and checkout another.\n\n :param user_id: user's unique identifier\n :param return_id: unique id of item returned\n :param title: item to borrow\n :param lib_controller: LibraryController object\n :return: no return\n \"\"\"\n fine = lib_controller.user_fine(user_id)\n print(\"User: \", user_id)\n print(\"Total Fine: \", fine)\n \n lib_controller.user_return(user_id, return_id)\n lib_controller.user_checkout(user_id, title)\n\n\ndef johnny_codewarrior(lib_controller):\n \"\"\"\n Use case 1: user with no accrued fines, one book out (not overdue)\n checks out \"Document, Your job depends on it\"\n :param lib_controller: LibraryController object\n :return:\n \"\"\"\n try:\n # set up Johnny with a book already checked out, which obviously can't be overdue.\n # he'll not have any accrued fines either\n lib_controller.user_checkout(1, \"The Curious Incident of the Dog in the Night-time\")\n # now get the new book\n lib_controller.user_checkout(1, \"Document, Your job depends on it\")\n fine = lib_controller.user_fine(1)\n print(\"User: Johnny, ID \", str(1))\n print(\"Total Fine: \", fine)\n except Exception:\n # lib_exceptions.CannotBorrowException:\n print(\"User 1 may not borrow book\")\n\n\ndef judy_hacker(lib_controller):\n \"\"\"\n Use case 2: user has fines of £2, has one book out (not overdue),\n bringing back a journal, and would like a DVD.\n :param lib_controller: LibraryController object\n :return:\n \"\"\"\n try:\n # set up Judy with a book, an overdue journal, and with an accumulated fine\n lib_controller.user_checkout(2, \"The Time Traveler's Wife\")\n # we can make the journal overdue using the optional 'date' parameter in the library's checkout method\n lib_controller.user_checkout(2, \"Sleuthing in C#\", date=(dt.datetime.now() - dt.timedelta(days=16)))\n # we can be certain of a fine of £2 by having a book overdue by 4 days\n # but immediately return it to comply with the scenario\n lib_controller.user_checkout(2, \"Atonement\", date=(dt.datetime.now() - dt.timedelta(days=32)))\n lib_controller.user_return(2, 22)\n # now bring back the journal - which will have added to our fine total\n lib_controller.user_return(2, 102)\n # and finally, ask for the DVD - hopefully the accrued fines don't prevent it!\n lib_controller.user_checkout(2, \"Debugging to music\")\n\n fine = lib_controller.user_fine(2)\n print(\"User: Judy, ID \", str(2))\n print(\"Total Fine: \", fine)\n\n except Exception:\n # lib_exceptions.CannotBorrowException:\n print(\"User 2 may not borrow DVD\")\n\n\ndef miss_marple(lib_controller):\n \"\"\"\n Use case 3: user cannot find journal so needs to find out if it's already out.\n :param lib_controller: LibraryController object\n :return:\n \"\"\"\n # let's set up this item as loaned out to someone else\n lib_controller.user_checkout(2, \"Sleuthing in C#\")\n # now we'll do the query\n status = lib_controller.is_on_loan(\"Sleuthing in C#\")\n if status:\n print(\"Sleuthing in C# already on loan.\")\n else:\n print(\"Sleuthing in C# available.\")\n # now let's return it and try again\n lib_controller.user_return(2, 102)\n status = lib_controller.is_on_loan(\"Sleuthing in C#\")\n if status:\n print(\"Sleuthing in C# already on loan.\")\n else:\n print(\"Sleuthing in C# available.\")\n\n\ndef eric_halfbee(lib_controller):\n \"\"\"\n Use case 4: user returns overdue items but needs to be able to pay off before borrowing DVD.\n :param lib_controller: LibraryController object\n :return:\n \"\"\"\n try:\n # Let's borrow a few books, make their checkout date ages ago...\n lib_controller.user_checkout(4, \"The Da Vinci Code\", date=(dt.datetime.now() - dt.timedelta(days=75)))\n lib_controller.user_checkout(4, \"Harry Potter and the Philosopher's Stone\", date=(dt.datetime.now() - dt.timedelta(days=55)))\n lib_controller.user_checkout(4, \"Harry Potter and the Chamber of Secrets\", date=(dt.datetime.now() - dt.timedelta(days=65)))\n fine = lib_controller.user_fine(4)\n print(\"User: Eric, ID \", str(4))\n print(\"Total Fine: \", fine)\n # try to borrow something - this should throw an exception since Eric owes a pile of dosh...\n lib_controller.user_checkout(4, \"Angels and Demons\")\n\n except Exception as e:\n # lib_exceptions.CannotBorrowException as e:\n print(e.message)\n\n try:\n # so now pay off some of the money\n lib_controller.pay_fine(4, 35.50)\n fine = lib_controller.user_fine(4)\n print(\"User: Eric, ID \", str(4))\n print(\"Total Fine: \", fine)\n # and try again\n lib_controller.user_checkout(4, \"Angels and Demons\")\n lib_controller.user_checkout(4, \"Dad's Army\")\n print(\"Borrowed trashy novel & DVD successfully\")\n\n except Exception as e:\n # lib_exceptions.CannotBorrowException as e:\n print(e.message)\n\n\ndef main():\n \"\"\"\n Program initialisation and execution.\n :return: no return\n \"\"\"\n print(\"Initialising library controller...\")\n lib_controller = Library.Library()\n\n print(\"Populating library catalogue...\")\n infile = 'top100t.txt'\n try:\n create_library_catalogue(lib_controller, infile)\n except:\n print(\"Catalogue populating failed\")\n raise\n\n print(\"Populating library members...\")\n try:\n create_library_members(lib_controller)\n except:\n print(\"User populating failed\")\n raise\n\n print(\"Exercise 1...\")\n try:\n exercise1(1, 'The Kite Runner', lib_controller)\n except:\n print(\"Exercise 1 failed\")\n raise\n\n print(\"Exercise 2...\")\n try:\n exercise2(1, 19, 'Sleuthing in C#', lib_controller)\n except:\n print(\"Exercise 2 failed\")\n raise\n\n try:\n johnny_codewarrior(lib_controller)\n except:\n print(\"Johnny Codewarrior failed\")\n raise\n\n try:\n judy_hacker(lib_controller)\n except:\n print(\"Judy Hacker failed\")\n raise\n\n\n try:\n miss_marple(lib_controller)\n except:\n print(\"Miss Marple failed\")\n raise\n\n try:\n eric_halfbee(lib_controller)\n except:\n print(\"Eric Halfbee failed\")\n raise\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Level2/IEA_2018_redsquirrel/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"539438064","text":"import os\nfrom flask import Flask\nfrom flask_login import LoginManager\nimport views\nfrom database import Database\nfrom user import User,get_user\n\nlm=LoginManager()\n\n@lm.user_loader\ndef load_user(user_id):\n return get_user(user_id)\n\n\ndef create_app():\n app = Flask(__name__)\n app.config.from_object(\"settings\")\n\n\n app.add_url_rule(\"/\", view_func=views.home_page, methods=[\"GET\", \"POST\"])\n app.add_url_rule(\"/guide\",view_func=views.guide_page)\n app.add_url_rule(\"/courses\", view_func=views.courses_page, methods=[\"GET\", \"POST\"])\n app.add_url_rule(\"/logout\", view_func=views.logout_page)\n app.add_url_rule(\"/courses/\", view_func=views.course_page)\n app.add_url_rule(\"/courses//edit\",view_func=views.course_edit_page,methods=[\"GET\", \"POST\"])\n app.add_url_rule(\"/new-course\",view_func=views.course_add_page, methods=[\"GET\",\"POST\"])\n app.add_url_rule(\"/user\", view_func=views.user_page)\n app.add_url_rule(\"/courses//VF_conditions_add\",view_func=views.conditionAdding_page,methods=[\"GET\", \"POST\"])\n app.add_url_rule(\"/courses//VF_conditions_edit\",view_func=views.conditionEditing_page,methods=[\"GET\", \"POST\"])\n app.add_url_rule(\"/courses//VF_conditions\",view_func=views.conditions_page,methods=[\"GET\", \"POST\"])\n app.add_url_rule(\"/Register\",view_func=views.register_page,methods=[\"GET\", \"POST\"])\n\n lm.init_app(app)\n lm.login_view = \"home_page\"\n\n url=\"postgres://eqxokbcjiseyei:3bc64a91ec58aab73ba937f8652296acf5ab9b2671aba9deb9420dfbe25e5cf6@ec2-46-137-188-105.eu-west-1.compute.amazonaws.com:5432/d1f2968dk53lod\"\n db = Database(url)\n app.config[\"db\"] = db\n\n return app\n\napp = create_app()\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"431752146","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport time\nimport json\nimport logging\nimport shutil\nimport tarfile\n\nimport requests\nfrom tornado import web\nfrom tornado import gen\n\nfrom litepipeline.manager.handlers.base import BaseHandler, StreamBaseHandler, auth_check\nfrom litepipeline.manager.utils.venv_manager import VenvManager\nfrom litepipeline.manager.utils.common import file_sha1sum, file_md5sum, Errors, splitall\nfrom litepipeline.manager.config import CONFIG\n\nLOG = logging.getLogger(\"__name__\")\n\n\nclass CreateVenvHandler(StreamBaseHandler):\n @auth_check\n @gen.coroutine\n def post(self):\n result = {\"result\": Errors.OK}\n try:\n name = self.get_form_argument(\"name\", \"\")\n description = self.get_form_argument(\"description\", \"\")\n file_path = self.file_path.decode(\"utf-8\")\n if name and os.path.exists(file_path) and os.path.isfile(file_path):\n file_name = os.path.split(file_path)[-1].lower()\n if ((CONFIG[\"venv_store\"].endswith(\"tar.gz\") and file_name.endswith(\"tar.gz\")) or\n (CONFIG[\"venv_store\"].endswith(\"zip\") and file_name.endswith(\"zip\"))):\n result[\"venv_id\"] = VenvManager.instance().create(name, description, file_path)\n else:\n LOG.warning(\"venv wrong format\")\n Errors.set_result_error(\"VenvWrongFormat\", result)\n else:\n LOG.warning(\"invalid arguments\")\n Errors.set_result_error(\"InvalidParameters\", result)\n except Exception as e:\n LOG.exception(e)\n Errors.set_result_error(\"ServerException\", result)\n LOG.debug(\"deploy venv package use: %ss\", time.time() - self.start)\n self.write(result)\n self.finish()\n\n\nclass ListVenvHandler(BaseHandler):\n @auth_check\n @gen.coroutine\n def get(self):\n result = {\"result\": Errors.OK}\n try:\n offset = int(self.get_argument(\"offset\", \"0\"))\n limit = int(self.get_argument(\"limit\", \"0\"))\n filters = {}\n name = self.get_argument(\"name\", \"\")\n if name:\n filters[\"name\"] = name\n venv_id = self.get_argument(\"id\", \"\")\n if venv_id:\n filters[\"id\"] = venv_id\n LOG.debug(\"ListVenvHandler offset: %s, limit: %s\", offset, limit)\n r = VenvManager.instance().list(offset, limit, filters = filters)\n result[\"venvs\"] = r[\"venvs\"]\n result[\"total\"] = r[\"total\"]\n result[\"offset\"] = offset\n result[\"limit\"] = limit\n except Exception as e:\n LOG.exception(e)\n Errors.set_result_error(\"ServerException\", result)\n self.write(result)\n self.finish()\n\n\nclass DeleteVenvHandler(BaseHandler):\n @auth_check\n @gen.coroutine\n def delete(self):\n result = {\"result\": Errors.OK}\n try:\n venv_id = self.get_argument(\"venv_id\", \"\")\n if venv_id:\n success = VenvManager.instance().delete(venv_id)\n if not success:\n Errors.set_result_error(\"OperationFailed\", result)\n except Exception as e:\n LOG.exception(e)\n Errors.set_result_error(\"ServerException\", result)\n self.write(result)\n self.finish()\n\n\nclass UpdateVenvHandler(StreamBaseHandler):\n @auth_check\n @gen.coroutine\n def post(self):\n result = {\"result\": Errors.OK}\n try:\n venv_id = self.get_form_argument(\"venv_id\", \"\")\n name = self.get_form_argument(\"name\", \"\")\n description = self.get_form_argument(\"description\", \"\")\n LOG.debug(\"UpdateVenvHandler venv_id: %s, name: %s, description: %s\", venv_id, name, description)\n if venv_id and VenvManager.instance().info(venv_id):\n success = VenvManager.instance().update(venv_id, name, description, self.file_path.decode(\"utf-8\"))\n if not success:\n Errors.set_result_error(\"OperationFailed\", result)\n else:\n LOG.warning(\"invalid arguments\")\n Errors.set_result_error(\"InvalidParameters\", result)\n except Exception as e:\n LOG.exception(e)\n Errors.set_result_error(\"ServerException\", result)\n LOG.debug(\"update venv package use: %ss\", time.time() - self.start)\n self.write(result)\n self.finish()\n\n\nclass InfoVenvHandler(BaseHandler):\n @auth_check\n @gen.coroutine\n def get(self):\n result = {\"result\": Errors.OK}\n try:\n venv_id = self.get_argument(\"venv_id\", \"\")\n if venv_id:\n venv_info = VenvManager.instance().info(venv_id)\n if venv_info:\n result[\"venv_info\"] = venv_info\n elif venv_info is None:\n Errors.set_result_error(\"VenvNotExists\", result)\n else:\n Errors.set_result_error(\"OperationFailed\", result)\n else:\n LOG.warning(\"invalid arguments\")\n Errors.set_result_error(\"InvalidParameters\", result)\n except Exception as e:\n LOG.exception(e)\n Errors.set_result_error(\"ServerException\", result)\n self.write(result)\n self.finish()\n\n\nclass DownloadVenvHandler(BaseHandler):\n @auth_check\n @gen.coroutine\n def get(self):\n result = {\"result\": Errors.OK}\n try:\n venv_id = self.get_argument(\"venv_id\", \"\")\n sha1 = self.get_argument(\"sha1\", \"\")\n if venv_id:\n venv_info = VenvManager.instance().info(venv_id)\n if venv_info:\n if sha1 == \"\":\n sha1 = venv_info[\"sha1\"]\n f = VenvManager.instance().open(venv_id, sha1)\n if f:\n self.set_header('Content-Type', 'application/octet-stream')\n if \"venv_store\" in CONFIG:\n if \"tar.gz\" in CONFIG[\"venv_store\"]:\n self.set_header('Content-Disposition', 'attachment; filename=%s.tar.gz' % venv_id)\n elif \"zip\" in CONFIG[\"venv_store\"]:\n self.set_header('Content-Disposition', 'attachment; filename=%s.zip' % venv_id)\n else:\n self.set_header('Content-Disposition', 'attachment; filename=%s.tar.gz' % venv_id)\n else:\n self.set_header('Content-Disposition', 'attachment; filename=%s.tar.gz' % venv_id)\n buf_size = 1024 * 1024\n while True:\n data = f.read(buf_size)\n if not data:\n break\n self.write(data)\n self.flush()\n yield gen.moment\n f.close()\n self.finish()\n return\n else:\n Errors.set_result_error(\"OperationFailed\", result)\n elif venv_info is None:\n Errors.set_result_error(\"VenvNotExists\", result)\n else:\n Errors.set_result_error(\"OperationFailed\", result)\n except Exception as e:\n LOG.exception(e)\n Errors.set_result_error(\"ServerException\", result)\n self.set_status(400)\n self.write(result)\n self.finish()\n\n\nclass VenvHistoryListHandler(BaseHandler):\n @auth_check\n @gen.coroutine\n def get(self):\n result = {\"result\": Errors.OK, \"venv_histories\": [], \"total\": 0}\n try:\n venv_id = self.get_argument(\"venv_id\", \"\")\n offset = int(self.get_argument(\"offset\", \"0\"))\n limit = int(self.get_argument(\"limit\", \"0\"))\n if venv_id:\n venv_info = VenvManager.instance().info(venv_id)\n if venv_info:\n venv_histories = VenvManager.instance().list_history(offset, limit, {\"venv_id\": venv_id})\n if venv_histories:\n result[\"venv_histories\"] = venv_histories[\"histories\"]\n result[\"total\"] = venv_histories[\"total\"]\n result[\"offset\"] = offset\n result[\"limit\"] = limit\n elif venv_info is None:\n Errors.set_result_error(\"VenvNotExists\", result)\n else:\n Errors.set_result_error(\"OperationFailed\", result)\n else:\n LOG.warning(\"invalid arguments\")\n Errors.set_result_error(\"InvalidParameters\", result)\n except Exception as e:\n LOG.exception(e)\n Errors.set_result_error(\"ServerException\", result)\n self.write(result)\n self.finish()\n\n\nclass VenvHistoryInfoHandler(BaseHandler):\n @auth_check\n @gen.coroutine\n def get(self):\n result = {\"result\": Errors.OK}\n try:\n venv_id = self.get_argument(\"venv_id\", \"\")\n history_id = int(self.get_argument(\"history_id\", \"-1\"))\n if history_id != -1:\n venv_history = VenvManager.instance().info_history(history_id, venv_id)\n if venv_history:\n result[\"history_info\"] = venv_history\n elif venv_history is None:\n Errors.set_result_error(\"VenvHistoryNotExists\", result)\n else:\n Errors.set_result_error(\"OperationFailed\", result)\n else:\n LOG.warning(\"invalid arguments\")\n Errors.set_result_error(\"InvalidParameters\", result)\n except Exception as e:\n LOG.exception(e)\n Errors.set_result_error(\"ServerException\", result)\n self.write(result)\n self.finish()\n\n\nclass VenvHistoryActivateHandler(BaseHandler):\n @auth_check\n @gen.coroutine\n def put(self):\n result = {\"result\": Errors.OK}\n try:\n self.json_data = json.loads(self.request.body.decode(\"utf-8\"))\n history_id = self.get_json_argument(\"history_id\", \"\")\n venv_id = self.get_json_argument(\"venv_id\", \"\")\n if history_id and venv_id:\n success = VenvManager.instance().activate_history(history_id, venv_id = venv_id)\n if not success:\n Errors.set_result_error(\"OperationFailed\", result)\n else:\n LOG.warning(\"invalid arguments\")\n Errors.set_result_error(\"InvalidParameters\", result)\n except Exception as e:\n LOG.exception(e)\n Errors.set_result_error(\"ServerException\", result)\n self.write(result)\n self.finish()\n\n\nclass VenvHistoryDeleteHandler(BaseHandler):\n @auth_check\n @gen.coroutine\n def delete(self):\n result = {\"result\": Errors.OK}\n try:\n venv_id = self.get_argument(\"venv_id\", \"\")\n history_id = int(self.get_argument(\"history_id\", \"-1\"))\n if history_id != -1 and venv_id:\n success = VenvManager.instance().delete_history(history_id, venv_id)\n if not success:\n Errors.set_result_error(\"OperationFailed\", result)\n else:\n LOG.warning(\"invalid arguments\")\n Errors.set_result_error(\"InvalidParameters\", result)\n except Exception as e:\n LOG.exception(e)\n Errors.set_result_error(\"ServerException\", result)\n self.write(result)\n self.finish()","sub_path":"litepipeline/litepipeline/manager/handlers/venv.py","file_name":"venv.py","file_ext":"py","file_size_in_byte":11723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"536705492","text":"import os\nimport sys\nimport re\nimport glob\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom google.protobuf import text_format\nsys.path.append(os.path.dirname(os.path.realpath(__file__))+\"/../../build\")\nimport gsbn_pb2\n\n# command: ./plot_trace_t.py \n\nif len(sys.argv) < 7:\n\tprint(\"Arguments wrong! Please retry with command :\")\n\tprint(\"python \"+os.path.realpath(__file__)+\" [pij|eij|zi2|zj2|tij|wij]\")\n\texit(-1)\n\t\nnetwork = sys.argv[1]\nsnapshot = sys.argv[2]\nprojection = int(sys.argv[3])\nparameter = sys.argv[4]\ncord_i = int(sys.argv[5])\ncord_j = int(sys.argv[6])\n\n# Read the network\nsolver_param = gsbn_pb2.SolverParam()\ntry:\n\tf = open(network, \"r\")\n\ttext_format.Parse(f.read(), solver_param)\n\tf.close()\nexcept IOError:\n\tprint(sys.argv[1] + \": Could not open file.\")\n\texit(-1)\n\nnet_param = solver_param.net_param\nif projection >= len(net_param.proj_param) or projection < 0 :\n\tprint(\"Error Argument: projection id wrong!\")\n\texit(-1)\n\npop_param_list = net_param.pop_param\npop_param_size = len(pop_param_list)\n\nproj_param = net_param.proj_param[projection]\nsrc_pop = proj_param.src_pop\ndest_pop = proj_param.dest_pop\nif src_pop > pop_param_size or dest_pop > pop_param_size :\n\tprint(\"Error Argument: network description file is wrong!\")\n\texit(-1)\n\nfor i in range(pop_param_size):\n\tif i==src_pop:\n\t\tsrc_pop_dim_hcu = pop_param_list[i].hcu_num;\n\t\tsrc_pop_dim_mcu = pop_param_list[i].mcu_num;\n\tif i==dest_pop:\n\t\tdest_pop_dim_hcu = pop_param_list[i].hcu_num;\n\t\tdest_pop_dim_mcu = pop_param_list[i].mcu_num;\n\t\tdest_pop_slot = pop_param_list[i].slot_num;\n\ndest_pop_dim_conn = src_pop_dim_hcu*src_pop_dim_mcu\nif(dest_pop_slot < dest_pop_dim_conn):\n\tdest_pop_dim_conn = dest_pop_slot\n\nif src_pop_dim_hcu<0 or src_pop_dim_mcu<0 or dest_pop_dim_hcu<0 or dest_pop_dim_mcu<0 or dest_pop_dim_conn<0:\n\tprint(\"Error Argument: network description file is wrong!\")\n\texit(-1)\n\n# READ SNAPSHOT\ntrace=[]\n\nos.chdir(snapshot)\nfor f in glob.glob(\"SolverState*.bin\"):\n\tprint(f)\n\tsolver_state = gsbn_pb2.SolverState()\n\ttry:\n\t\tf = open(f, \"rb\")\n\t\tsolver_state.ParseFromString(f.read())\n\t\tf.close()\n\texcept IOError:\n\t\tprint(sys.argv[1] + \": Could not open snapshot file.\")\n\t\texit(-1)\n\t\n\ttimestamp = solver_state.timestamp\n\tii = np.zeros([dest_pop_dim_hcu*dest_pop_dim_conn])\n\tvector_state_i32_list = solver_state.vector_state_i32\n\tfor i in range(len(vector_state_i32_list)):\n\t\tvector_state_i32 = vector_state_i32_list[i]\n\t\tif vector_state_i32.name==\"ii_\"+str(projection):\n\t\t\tdata = vector_state_i32.data\n\t\t\tfor j in range(len(data)):\n\t\t\t\tii[j]=int(data[j])\n\t\t\n\tif parameter==\"pi\" or parameter==\"ei\" or parameter==\"zi\":\n\t\tvector_state_i16_list = solver_state.vector_state_i16\n\t\tfor i in range(len(vector_state_i16_list)):\n\t\t\tvector_state_i16 = vector_state_i16_list[i]\n\t\t\tif vector_state_i16.name==parameter+\"_\"+str(projection):\n\t\t\t\tdata = vector_state_i16.data\n\t\t\t\tfor j in range(len(data)):\n\t\t\t\t\ty=ii[j]\n\t\t\t\t\tx=j//dest_pop_dim_conn\n\t\t\t\t\tif y==cord_i and x==cord_j and y>=0:\n\t\t\t\t\t\tif parameter==\"pi\":\n\t\t\t\t\t\t\ttrace.append([timestamp, data[j]/2**18])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttrace.append([timestamp, data[j]/2**4])\n\tif parameter==\"pij\" or parameter==\"eij\" or parameter==\"zi2\" or parameter==\"zj2\":\n\t\tvector_state_i16_list = solver_state.vector_state_i16\n\t\tfor i in range(len(vector_state_i16_list)):\n\t\t\tvector_state_i16 = vector_state_i16_list[i]\n\t\t\tif vector_state_i16.name==parameter+\"_\"+str(projection):\n\t\t\t\tdata = vector_state_i16.data\n\t\t\t\tfor j in range(len(data)):\n\t\t\t\t\th=j//dest_pop_dim_mcu\n\t\t\t\t\tw=j%dest_pop_dim_mcu\n\t\t\t\t\ty=ii[h];\n\t\t\t\t\tx=h//dest_pop_dim_conn*dest_pop_dim_mcu+w\n\t\t\t\t\tif y==cord_i and x==cord_j and y>=0:\n\t\t\t\t\t\tif parameter==\"pij\":\n\t\t\t\t\t\t\ttrace.append([timestamp, data[j]/2**18])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttrace.append([timestamp, data[j]/2**4])\n\t\t\t\t\t\t\nprint(trace)\ntime=[]\nvalue=[]\ntrace.sort(key=lambda x: x[0])\nfor v in trace:\n\ttime.append(v[0])\n\tvalue.append(v[1])\nprint(time)\nprint(value)\nplt.plot(time, value)\nplt.show()\n","sub_path":"tools/plot/plot_trace_t.py","file_name":"plot_trace_t.py","file_ext":"py","file_size_in_byte":4064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"176057435","text":"from openpyxl import Workbook\r\nfrom openpyxl import load_workbook\r\nfrom openpyxl.compat import range\r\nfrom openpyxl.utils import get_column_letter\r\nfrom openpyxl.styles import colors\r\nfrom openpyxl.styles import Font\r\nfrom openpyxl.styles.colors import RED\r\nimport click\r\nimport os.path\r\n\r\n\r\n\r\n@click.option('--capitalize',help='Converts all strings to uppercase',is_flag = True)\r\n@click.option('--preservestyles',help='Converts all strings to uppercase',is_flag = True)\r\n@click.argument('dest_excel',nargs=1)\r\n@click.argument('source_excel', nargs=1)\r\n\r\n@click.command()\r\ndef cli(capitalize,preservestyles,source_excel,dest_excel):\r\n wb1 = Workbook()\r\n dest_filename = dest_excel\r\n ws1 = wb1.active\r\n ws = wb1.active\r\n s1 = \"C:/Users/susmitha/Desktop/MRNDSummer/pca5/\"\r\n s2 = source_excel\r\n if os.path.exists(s1 + dest_filename):\r\n if click.prompt('File Already Exists...Do You Want To Replace It???(Yes - Press any key No - press ctrl+c to exit)'):\r\n dest_filename = dest_excel\r\n else:\r\n v = click.prompt('NFN')\r\n\r\n book = load_workbook(s1+s2)\r\n ws = book.active\r\n listofsheets = book.sheetnames\r\n sheets = []\r\n sheets1 = []\r\n ft = Font(color=colors.RED)\r\n for diffsheet in listofsheets:\r\n sheets.append(diffsheet)\r\n sheets1.append(diffsheet)\r\n for i in range(0, len(sheets) - 1):\r\n sheets1.append(sheets1[0] + '1')\r\n\r\n for i in range(1, len(sheets)):\r\n ans = str(sheets1[i])\r\n if capitalize:\r\n ans = wb1.create_sheet(title=sheets[i])\r\n else :\r\n ans = wb1.create_sheet(title=sheets[i])\r\n wb = book.worksheets[i]\r\n # ans.title=sheets[i+1]\r\n for r in range(1, wb.max_row):\r\n res = []\r\n y = str(r)\r\n # print(x+y)\r\n cell = wb['A' + y]\r\n a = cell.value\r\n b = a.split('_')\r\n #print(b[2])\r\n sheet = wb1.get_sheet_by_name(sheets[i])\r\n #res.append(a)\r\n for c in range(1, 2):\r\n x = get_column_letter(c)\r\n y = str(r)\r\n # print(x+y)\r\n cell = wb['A' + y]\r\n sheet = wb1.get_sheet_by_name(sheets[i])\r\n res.append(cell.value)\r\n print(ans)\r\n print(res)\r\n ans.append(res)\r\n wb1.save(filename=dest_filename)\r\n\r\nif __name__ == '__main__':\r\n cli()\r\n","sub_path":"pca2/c2.py","file_name":"c2.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"506256515","text":"#编写一个python程序读取此数据集的内容,并统计得到以下信息:\r\n#1 一共��多少不同的用户\r\n#2 一共有多少不同的电影\r\n#3 一共有多少不同的电影种类\r\n#4 一共有多少电影没有外部链接\r\n#5 2018年一共有多少人进行过电影评分\r\n\r\n#导入文件\r\nimport pandas as pd\r\nfile1 = open('E:/技术培训/ml-25m/genome-scores.csv', encoding='utf-8')\r\nfile2 = open('E:/技术培训/ml-25m/genome-tags.csv', encoding='utf-8')\r\nfile3 = open('E:/技术培训/ml-25m/links.csv', encoding='utf-8')\r\nfile4 = open('E:/技术培训/ml-25m/movies.csv', encoding='utf-8')\r\nfile5 = open('E:/技术培训/ml-25m/ratings.csv', encoding='utf-8')\r\nfile6 = open('E:/技术培训/ml-25m/tags.csv', encoding='utf-8')\r\n\r\ngenome_scores = pd.read_csv(file1)\r\ngenome_tags = pd.read_csv(file2)\r\nlinks = pd.read_csv(file3)\r\nmovies = pd.read_csv(file4)\r\nratings = pd.read_csv(file5)\r\ntags = pd.read_csv(file6)\r\n\r\n\r\n#1\r\nuser1 = tags['userId']\r\nuser2 = ratings['userId']\r\nuser3 = user1.append(user2)\r\nuser = user3.drop_duplicates()\r\nprint('一共有不同的用户数:',len(user))\r\n\r\n#2\r\nmovie1 = genome_scores['movieId']\r\nmovie2 = links['movieId']\r\nmovie3 = movies['movieId']\r\nmovie4 = movie1.append(movie2)\r\nmovie5 = movie4.append(movie3)\r\nmovie = movie5.drop_duplicates() \r\nprint('一共有不同的电影数:',len(movie))\r\n\r\n#3\r\ngenres0 = movies['genres']\r\ngenres1 = list(genres0.str.split('|'));\r\ngenres=list(set([i for j in genres1 for i in j ])) # 以空格为分隔符,分隔成两个\r\nprint('一共有不同的电影种类数量:',len(genres))\r\n#4\r\ntm = links['tmdbId']\r\ntmna = tm.dropna()\r\nd= len(links)-len(tmna)\r\nprint('没有外部链接的电影数',d)\r\n#5\r\nimport time\r\n\r\ntime_start= time.strptime(\"2018-01-01 00:00:00\", \"%Y-%m-%d %H:%M:%S\")\r\ntime_end = time.strptime(\"2019-01-01 00:00:00\", \"%Y-%m-%d %H:%M:%S\")\r\ntimestamp_start = int(time.mktime(time_start))\r\ntimestamp_end = int(time.mktime(time_end))\r\ntimestamp = ratings[\"timestamp\"] \r\nratings_2018 = ratings.loc[(timestamp < timestamp_end) & (timestamp>= timestamp_start)]\r\nuser_2018 = ratings_2018['userId']\r\nuser_2018 = user_2018.drop_duplicates()\r\nprint('2018年电影评分人数:',len(user_2018))\r\n\r\n\r\n\r\n\r\n","sub_path":"python9.py","file_name":"python9.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"127407775","text":"import random \nimport statistics\nimport plotly.figure_factory as ff\nimport pandas as pd \nimport csv\nimport plotly.graph_objects as go \n\ndf = pd.read_csv(\"sp.csv\")\ndata = df[\"reading score\"].tolist()\nmean = sum(data)/len(data)\nstd_deviation = statistics.stdev(data)\nprint(f\"mean of the data is : {mean}\")\nprint(f\"standard deviation of data is : {std_deviation}\")\n\nmedian = statistics.median(data)\nmode = statistics.mode(data)\nprint(f\"median of the data is : {median}\")\nprint(f\"mode of the data is : {mode}\")\nfirst_sd_start, first_sd_end = mean - std_deviation, mean + std_deviation\nsecond_sd_start, second_sd_end = mean - 2*std_deviation, mean + 2*std_deviation\nthird_sd_start, third_sd_end = mean - 3*std_deviation, mean + 3*std_deviation\n\nfig = ff.create_distplot([data],[\"reading score\"], show_hist = False)\nfig.add_trace(go.Scatter(x = [mean, mean], y = [0, 0.20], mode = \"lines\", name = \"mean\"))\nfig.add_trace(go.Scatter(x = [first_sd_start, first_sd_start], y = [0, 0.20], mode = \"lines\", name = \"first std deviation\"))\nfig.add_trace(go.Scatter(x = [first_sd_end, first_sd_end], y = [0, 0.20], mode = \"lines\", name = \"first std deviation\"))\nfig.add_trace(go.Scatter(x = [second_sd_start, second_sd_start], y = [0, 0.20], mode = \"lines\", name = \"second std deviation\"))\nfig.add_trace(go.Scatter(x = [second_sd_end, second_sd_end], y = [0, 0.20], mode = \"lines\", name = \"second std deviation\"))\nfig.show()","sub_path":"sa.py","file_name":"sa.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"148449220","text":"from sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import KFold\nfrom sklearn.inspection import permutation_importance\nimport numpy as np\n\ndef prep_for_rf(response_col, df, colsub = 5, nested = 5):\n '''\n INPUT:\n y var/response col, the dataframe, a substring (of related variables one might want to omit, \n in this case the sub-components of total review scores), and possibly a nested var.\n \n Defaults are set to arbitrary numeric values for colsub and nested for the isinstance if statements \n \n OUTPUT:\n X and ys that have had nans properly addressed\n \n STEPS:\n 1. This function drops all rows that are missing the dependent variable/y variable.\n 2. If colsub has a value, it drops columns with that regex\n 3. If nested has a value, it groups by this value and finds the mean within the nested value\n (along the lines of person-mean imputation). This is to try to improve the accuracy of the\n mean imputation, and preserve differences between people. So in this case, if a listing appears\n twice, and the value for beds is for some reason missing in one, it is imputed with the average \n # of beds (Nas ignored by default) for this listing, not the average number of beds overall.\n 4. Finally, if there is no other option, it will fill nas with the general mean\n 5. IF there is no nested variable, it will simply fill the nas with the mean.\n 6. returns as X, y\n '''\n fillmean = lambda col: col.fillna(col.mean())\n df2 = df.copy()\n df2 = df2.dropna(subset = [response_col], axis = 0).reset_index(drop = True)\n if isinstance(colsub, str):\n X = df2[df2.columns.drop(list(df2.filter(regex=colsub)))]\n X = X.reset_index(drop = True)\n if isinstance(nested, str):\n X = X.groupby([nested]).transform(lambda x: x.fillna(x.mean()))\n X[nested] = df2[nested]\n X = X.apply(fillmean)\n else:\n X = X.apply(fillmean)\n y = df2[response_col]\n return(X, y)\n\ndef run_models_kfold(function, X, y, kfoldn = 10, n_rep = 15, pi_bool = True):\n '''\n INPUT:\n scikit rf regressor model, x vals, y vals, number of kfold splits, # of permutations,\n boolean whether or not to find permutation importances for kfold across trials\n \n OUTPUT:\n mae scores, mse scores, feature ranking\n \n STEPS:\n 1. Initialize the accuracy of the models to blank lists. Respective vals will be appended to this list\n 2. iterate over kfold splits\n 3. if pi_bool == True \n a. then use scikit's permutation importance to see which features are most important\n b. concatenate into labeled df.\n c. transform into long format\n d. label the sum of scores as 'importance' and indexers as 'labels'(will feed into further functions better in this format)\n 4. return list containing (1. list of mean absolute error, 2. list of mean squared error, 3. list of dfs of permutation importance)\n \n \n '''\n kf = KFold(n_splits=kfoldn,shuffle=True, random_state = 15)\n mae_ = []\n mse_ = []\n per_imps = []\n for train_index, test_index in kf.split(X):\n X_train, X_test = X.iloc[train_index], X.iloc[test_index]\n y_train, y_test = y.iloc[train_index], y.iloc[test_index]\n model = function.fit(X_train, y_train)\n mae_.append(mean_absolute_error(y_test, model.predict(X_test)))\n mse_.append(mean_squared_error(y_test, model.predict(X_test)))\n \n if pi_bool == True:\n result = permutation_importance(function, X_test, y_test, n_repeats=n_rep,\n random_state=15, n_jobs=2)\n sorted_idx = result.importances_mean.argsort()\n perm_imp = pd.DataFrame(result.importances[sorted_idx].T)\n perm_imp.columns = X_test.columns[sorted_idx]\n perm_imp = perm_imp.T.rename_axis(\"labels\")\n perm_imp['importance'] = perm_imp.mean(axis = 1)\n perm_imp = perm_imp.reset_index()\n \n per_imps.append(perm_imp)\n return([mae_, mse_, per_imps])\n\ndef run_each_participant(function, X, y, feat_bool = True):\n '''\n INPUT:\n scikit rf regressor model, x vals, y vals, boolean whether or not to find feature importances\n \n OUTPUT:\n mae scores, mse scores, feature ranking\n '''\n X = X.copy()\n y = y.copy()\n accuracy_model = []\n mse_ = []\n feat_imps = []\n \n for i in range(0,len(X[X.columns[0]])):\n \n X_test = X.loc[i]\n y_test = pd.Series(y[i])\n X_test = X_test.values.reshape(1, -1)\n \n X_train = X.drop(X.index[i])\n y_train =np.delete(y, i)\n model = function.fit(X_train, y_train)\n \n accuracy_model.append(mean_absolute_error(y_test, model.predict(X_test)))\n mse_.append(mean_squared_error(y_test, model.predict(X_test)))\n if feat_bool == True:\n feat_imp = pd.DataFrame({\n \n 'labels': X_train.columns,\n 'importance':model.feature_importances_\n })\n\n feat_imp = feat_imp.reset_index(drop = True)\n feat_imp = feat_imp.sort_values(by = 'importance', axis = 0)\n feat_imps.append(feat_imp)\n \n return([accuracy_model, mse_, feat_imps])\n\ndef specify_best(model, X, y, typ = 'RF'):\n '''\n INPUT:\n scikit regressor model, x vals, y vals, str telling if it is rf or svr\n \n OUTPUT:\n best parameter values for min samples split, max features, or kernel gamma and C\n '''\n if typ == 'RF':\n params = {'min_samples_split': np.arange(2, 100, 20), 'max_features': np.arange(2,15,4)}\n elif typ == 'svr':\n params = {'kernel': ['rbf', 'linear', 'sigmoid'], 'gamma': np.arange(0.001,0.9,0.1), 'C': [0.0001,0.01,1,10,100]}\n clf = GridSearchCV(model, params,cv = 5)\n clf.fit(X, y)\n print(clf.best_params_)\n return(clf.best_params_)\n\ndef top_features(feat_imps, featn = 15, mean = False):\n '''\n INPUT:\n result df from run_each_participant, number of features you want returned, or if you want cutoff to be mean\n only can use one at a time.\n \n OUTPUT:\n most important features, their importances, and frequency of models that they appeared in (as greater than mean or top featn)\n \n STEPS:\n 1. iterate through feat_imps, creat index var showing which trial it came from and reformat\n 2. concatenate this into one dataframe\n 3. fill nas (instances where a feature didn't occur) with zero (same as feature not occuring)\n 4. create a series of mean_vals\n 5. whichever selection method you chose, will use this series to select the topn or the > mean features\n 6. using the names/index from this series it will count how often they occur\n 6. create and return new dataframe.\n '''\n all_feat = []\n count = 0\n \n for i in feat_imps:\n i = i.reset_index(drop = False)\n i['index'] = [count]*len(i['index'])\n i = i.pivot(index = 'index', columns='labels', values='importance')\n all_feat.append(i)\n count += 1\n \n all_feat_df = pd.concat(all_feat, axis = 0)\n all_feat_df_filled = all_feat_df.fillna(0)\n mean_feat = all_feat_df_filled.mean(axis = 0)\n if mean == False:\n top_feat = mean_feat.sort_values().iloc[len(mean_feat)-featn: len(mean_feat)]\n else:\n top_feat = mean_feat.sort_values()\n top_feat = top_feat.where(top_feat > np.mean(top_feat.values))\n top_feat = top_feat.dropna()\n \n names = top_feat.index\n count_feat = all_feat_df.count(axis = 0)\n count_feat_sub = count_feat.loc[list(names)]\n \n top_feat_df = pd.DataFrame({\n 'features':names,\n 'feat_imps':top_feat.values,\n 'count_models': count_feat_sub.values\n })\n return(top_feat_df)\n \ndef print_vals(best_param, run_list, top_feat_df):\n '''\n INPUT:\n set of results from above functions\n \n OUTPUT:\n prints them into notebook to give an initial idea of what results are (not intended to be final form)\n '''\n print('mss: ' + str(best_param.get('min_samples_split')))\n print('max_feat: ' + str(best_param.get('max_features')))\n print('mean_mae: ' + str(np.mean(run_list[0])))\n print('mean_mse: '+ str(np.mean(run_list[1])))\n print('\\n' + 'frequency: ' + '\\n')\n for i in list(top_feat_df['count_models']):\n print(i) \n print('\\n' + 'feat_imp: ' + '\\n')\n for i in list(top_feat_df['feat_imps']):\n print(i)\n print('\\n' + 'top_ features' + '\\n')\n for i in list(top_feat_df['features']):\n print(i)","sub_path":"randomforestfunctions.py","file_name":"randomforestfunctions.py","file_ext":"py","file_size_in_byte":8819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"395383897","text":"import gzip\nimport os\nimport sys\nimport time\nimport zipfile\n\nimport wget\n\ndef download(url, destination=\".\", verbose=True):\n if verbose: sys.stdout.write(\"Downloading \" + url + \"...\"); sys.stdout.flush()\n startMillis = int(round(time.time() * 1000))\n filename = wget.download(url, destination)\n if verbose: print(\" done in \" + str(int(round(time.time() * 1000))-startMillis) + \"ms; \" + str(os.path.getsize(filename)/1000000) + \"MB\")\n\ndef extract(file, destination, verbose=True):\n file = str(file)\n startMillis = int(round(time.time() * 1000))\n if verbose: sys.stdout.write(\"Extracting \" + file + \" to \" + destination + \"...\"); sys.stdout.flush()\n\n if file.endswith(\".zip\"):\n with zipfile.ZipFile(file, \"r\") as archive:\n archive.extractall(destination)\n elif file.endswith(\".gz\"):\n with gzip.open(file, 'rb') as archive:\n with open(destination, 'w') as outfile:\n for line in archive:\n outfile.write(line)\n\n if verbose: print(\" done in \" + str(int(round(time.time() * 1000))-startMillis) + \"ms; \" + str(os.path.getsize(file)/1000000) + \"MB -> \" + str(getFolderSize(destination)/1000000) + \"MB\")\n\ndef getFolderSize(folder):\n total_size = os.path.getsize(folder)\n for item in os.listdir(folder):\n itemPath = os.path.join(folder, item)\n if os.path.isfile(itemPath):\n total_size += os.path.getsize(itemPath)\n elif os.path.isdir(itemPath):\n total_size += getFolderSize(itemPath)\n return total_size\n","sub_path":"gsm/DownloadUtil.py","file_name":"DownloadUtil.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"137626372","text":"import os\nfrom flask import Flask\n\n\ndef create_app():\n '''Flask package entry point. Create, initialize and return the\n applicaiton object.'''\n # create and configure the application\n app = Flask(__name__, instance_relative_config=True)\n dbpath = os.path.join(app.instance_path, 'songcat.db')\n app.config.from_mapping(\n SECRET_KEY='dev',\n SQLALCHEMY_DATABASE_URI='sqlite:///' + dbpath,\n SQLALCHEMY_TRACK_MODIFICATIONS=False\n )\n\n # ensure the instance folder exists\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n # attach the database\n from . import model\n model.db.init_app(app)\n app.cli.add_command(model.init_model_command)\n app.cli.add_command(model.init_test_data_command)\n\n # register blueprints\n from . import auth\n app.register_blueprint(auth.bp)\n\n from . import catalog\n app.register_blueprint(catalog.bp)\n\n from . import api\n app.register_blueprint(api.bp)\n\n return app\n","sub_path":"songcat/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"436794997","text":"from CTC_Target.Loader.IEMOCAP_Loader import Load_FAU\nimport tensorflow\nfrom CTC_Target.Model.CTC_BLSTM_LC_Attention import CTC_LC_Attention\nimport os\nfrom CTC_Target.Train.FAU_TRAIN.LabelBalance import LabelBalance\n\nif __name__ == '__main__':\n for bands in [30, 40]:\n for attentionScope in [3, 5, 7]:\n loadpath = '/mnt/external/Bobs/CTC_Target_FAU/Features/Bands%d/' % bands\n savepath = 'CTC-LA-%d/Bands-%d/' % (attentionScope, bands)\n if os.path.exists(savepath): continue\n os.makedirs(savepath)\n trainData, trainLabel, trainSeq, trainScription, testData, testlabel, testSeq, testScription = Load_FAU(\n loadpath=loadpath)\n trainData, trainLabel, trainSeq, trainScription = LabelBalance(trainData=trainData, trainLabel=trainLabel,\n trainSeq=trainSeq,\n trainScription=trainScription)\n\n graph = tensorflow.Graph()\n with graph.as_default():\n classifier = CTC_LC_Attention(trainData=trainData, trainLabel=trainScription,\n trainSeqLength=trainSeq, featureShape=bands, numClass=6, rnnLayers=2,\n graphRevealFlag=False, attentionScope=attentionScope)\n print(classifier.information)\n for episode in range(100):\n print('\\nEpisode %d/100 : Total Loss = %f\\n' % (episode, classifier.Train()), end='')\n classifier.Save(savepath=savepath + '%04d-Network' % episode)\n # exit()\n","sub_path":"CTC_Target/Train/FAU_TRAIN/Exp_FAU_LocalAttention.py","file_name":"Exp_FAU_LocalAttention.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"215906637","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2021/1/29 9:58\n# @Author : Warren.wang\n# @File : test_calculator.py\n# @Software: PyCharm\nimport sys\n\nimport pytest\nimport yaml\n\nsys.path.append('..')\nprint(\"rootpath\", sys.path)\n\nfrom pythoncode.Calculator import Calculator\n\n# yaml json excel csv xml\n#解析测试数据\ndef get_datas(type):\n with open(\"./datas/calc.yml\", encoding=\"utf-8\") as f:\n datas = yaml.safe_load(f)\n # print(datas)\n return (datas[type][\"datas\"], datas[type]['ids'])\n\n#测试类\nclass TestCalc:\n datas_add:list = get_datas('add')\n datas_div:list = get_datas('div')\n datas_mul:list = get_datas('multiply')\n datas_sub:list = get_datas('subtract')\n\n #前置条件\n def setup_class(self):\n print(\"开始计算\")\n self.calc = Calculator()\n\n #后置条件\n def teardown_class(self):\n print(\"结束计算\")\n\n # @pytest.mark.login #加标签\n # @pytest.mark.parametrize(\"a, b, result\",[\n # [1,1,2],[100,400,300],[1,0,1]\n # ])\n @pytest.mark.parametrize(\"a, b, expect\", datas_add[0], ids= datas_add[1])\n def test_add(self, a, b, expect):\n print(f\"a={a}, b={b}, result={expect}\")\n assert expect == self.calc.add(a, b)\n\n #浮点数:特殊处理\n @pytest.mark.parametrize(\"a, b, expect\", [\n [0.11,0.12,0.23],[0.1,0.2,0.3]\n ])\n def test_add_float(self, a, b, expect):\n print(f\"a={a}, b={b}, result={expect}\")\n assert expect == round(self.calc.add(a, b), 2)\n\n #使用for循环,当里面出现fail就会break掉,不推荐\n # def test_add1(self):\n # datas = [[1,1,2],[100,400,300],[1,0,1]]\n # for data in datas:\n # print(data)\n # assert data[2] == self.calc.add(data[0], data[1])\n\n @pytest.mark.parametrize(\"a, b, expect\", datas_div[0], ids= datas_div[1])\n def test_div(self, a, b, expect):\n # with pytest.raises(ZeroDivisionError):\n print(f\"a={a}, b={b}, result={expect}\")\n assert expect == self.calc.div(a, b)\n\n #除不尽\n @pytest.mark.parametrize(\"a, b, expect\", [\n [10,3,3.33],[10,-3,-3.33],[-10,-3,3.33]\n ])\n def test_div_indivisible(self, a, b, expect):\n print(f\"a={a}, b={b}, result={expect}\")\n assert expect == round(self.calc.div(a, b), 2)\n\n #被除数是0\n @pytest.mark.parametrize(\"a, b\", [\n [0.1, 0], [10, 0], [-10, 0]\n ])\n def test_div_zero(self, a, b):\n print(f\"a={a}, b={b}\")\n with pytest.raises(ZeroDivisionError): #抛出这个异常ZeroDivisionError\n self.calc.div(a, b)\n\n @pytest.mark.parametrize(\"a, b, expect\", datas_mul[0], ids=datas_mul[1])\n def test_mul(self, a, b, expect):\n print(f\"a={a}, b={b}, result={expect}\")\n assert expect == self.calc.mul(a, b)\n\n @pytest.mark.parametrize(\"a, b, expect\", [\n [0.11, 0.12, 0.01], [0.1, 0.2, 0.02]\n ])\n def test_mul_float(self, a, b, expect):\n print(f\"a={a}, b={b}, result={expect}\")\n assert expect == round(self.calc.mul(a, b), 2)\n\n\n @pytest.mark.parametrize(\"a, b, expect\", datas_sub[0], ids=datas_sub[1])\n def test_sub(self, a, b, expect):\n print(f\"a={a}, b={b}, result={expect}\")\n assert expect == self.calc.sub(a, b)\n\n @pytest.mark.parametrize(\"a, b, expect\", [\n [0.11, 0.12, -0.01], [0.1, 0.2, -0.1]\n ])\n def test_sub_float(self, a, b, expect):\n print(f\"a={a}, b={b}, result={expect}\")\n assert expect == round(self.calc.sub(a, b), 2)\n\nif __name__ == '__main__':\n pytest.main([\"-v\",\"-s\"])","sub_path":"testing/test_calculator.py","file_name":"test_calculator.py","file_ext":"py","file_size_in_byte":3567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"641559696","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 15 22:30:53 2020\n\n@author: encry973r\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndata = pd.read_csv('Social_Network_Ads.csv')\n\n# matrices of feature\nX = data.iloc[:, 2:4].values\ny = data.iloc[:, 4].values\n\n# train and test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.25, random_state=0)\n\n# feature scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n# fit to naive bayes model\nfrom sklearn.naive_bayes import GaussianNB\nclassifier = GaussianNB()\nclassifier.fit(X_train, y_train)\n\n# predicting the Test set result\ny_pred = classifier.predict(X_test)\n\n# confusion metric\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\ntruth = cm[0, 0] + cm[1, 1]\n\ncorrect_values = cm[0,0] + cm[1,1]\nwrong_values = cm[0,1] + cm[0,1]\naccuracy = np.round((correct_values/(correct_values + wrong_values))*100, 2)\n\nprint(\"Correct values : {0}\\nWrong values : {1}\\nAccuracy : {2}%\"\n .format(correct_values, wrong_values, accuracy))\n","sub_path":"2Classification/naiveBayes/testing12.py","file_name":"testing12.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"108495530","text":"import json\n\nclass NameModifier():\n def __init__(self, name, namesFile):\n self.name = name\n self.names = self.load_names(namesFile)\n\n def load_names(self, namesFile):\n namesJson = open(namesFile)\n names = json.load(namesJson)\n print('INFO loaded names for name translation')\n return names\n\n def modify(self, str):\n modified = str\n for x in self.names['names']:\n modified = modified.replace(' ' + x, ' ' + self.name)\n modified = modified.replace(' ' + x.lower(), ' ' + self.name)\n modified = modified.replace(x + ' ', self.name + ' ')\n modified = modified.replace(x.lower() + ' ', self.name + ' ')\n return modified\n\nclass OtherModifier():\n def __init__(self, otherFile):\n self.other = self.load_other(otherFile)\n\n def load_other(self, otherFile):\n otherJson = open(otherFile)\n other = json.load(otherJson)\n print('INFO loaded other substitutions')\n return other\n\n def modify(self, str):\n modified = str\n for x in self.other:\n modified = modified.replace(x, self.other[x])\n modified = modified.replace(x.lower(), self.other[x].lower())\n return modified\n\nclass TextModifier():\n def __init__(self, name, namesFile, otherFile):\n self.nameModifier = NameModifier(name, namesFile)\n self.otherModifier = OtherModifier(otherFile)\n\n def modify(self, str):\n modified = str\n modified = self.nameModifier.modify(modified)\n modified = self.otherModifier.modify(modified)\n return modified\n","sub_path":"src/cleverbot_text_modification.py","file_name":"cleverbot_text_modification.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"107318939","text":"from base64 import b64decode\nfrom json import loads\n\nfrom openmtc_etsi.model import ContentInstance\nfrom openmtc_etsi.scl import CreateRequestIndication, DeleteRequestIndication\nfrom openmtc_server.Plugin import Plugin\n\n\n# curl request for creating ContentInstance\n\n# curl -X POST -H \"Content-Type:application/json\"\n# -d '{\"test1\":{\"a\":1, \"b\":5}}'\n# localhost:14000/m2m/applications/ScalableDynamicApp/containers/mycontainer/subcontainers/emulated_device_nb_4/subcontainers/measurements/contentInstances\n\nclass SampleBackEndAppPlugin(Plugin):\n def _init(self):\n self.config_parameters()\n self._initialized()\n\n def config_parameters(self, ):\n self.app_name = self.config.get(\"app_name\", \"ScalableDynamicApp\");\n self.container_name_list = self.config[\"container_name_list\"]\n\n def create_container(self, result):\n\n self.logger.info(\"registering application [%s] containers %s\" % (self.app_name, self.container_name_list))\n path = result.resourceURI + \"/containers\"\n\n container = self.container_name_list[0]\n req_indication = CreateRequestIndication(path=path,\n resource=\"{\\\"container\\\":{\\\"id\\\":\\\"\"+container+\"\\\"}}\",\n content_type=\"application/json\")\n promise = self.api.handle_request_indication(req_indication)\n\n def register_app(self):\n self.logger.info(\"registering application [%s]\" % (self.app_name))\n path = \"/m2m/applications/\"\n self.app_path = path+self.app_name\n\n req_indication = CreateRequestIndication(path=path,\n resource=\"{\\\"application\\\":{\\\"appId\\\":\\\"\"+self.app_name+\"\\\"}}\",\n content_type=\"application/json\")\n promise = self.api.handle_request_indication(req_indication)\n promise.then(self.create_container)\n\n def _handle_contentInstances_created(self, instance, request_indication):\n self.logger.debug(\"Content Instance Created function is called\\n\")\n\n try:\n content_dict = loads(b64decode(request_indication.resource[\"content\"][\"$t\"]))\n except KeyError:\n content_dict = loads(b64decode(request_indication.resource[\"content\"][\"binaryContent\"]))\n\n self.logger.info(\"The ContentInstance is \" + str(content_dict))\n\n def deregister_app(self):\n try:\n req_indication = DeleteRequestIndication(path = self.app_path)\n promise = self.api.handle_request_indication(req_indication)\n except:\n pass\n\n def _start(self):\n self.register_app()\n self.events.resource_created.register_handler(self._handle_contentInstances_created, ContentInstance)\n self._started()\n\n def _stop(self):\n self.deregister_app()\n self._stopped()\n","sub_path":"eds/FrontEnd/server/openmtc-scl/src/openmtc_scl/plugins_eu_projects/scalable_sample_app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"327219645","text":"import logging\n\nfrom kommunitas.exceptions import StrategyError\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef strategy_safe_wrapper(f, message: str = \"\", default_retval=None, supress_error=False):\n \"\"\"\n Wrapper around user-provided methods and functions.\n Caches all exceptions and returns either the default_retval (if it's not None) or raises\n a StrategyError exception, which then needs to be handled by the calling method.\n \"\"\"\n def wrapper(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except ValueError as error:\n logger.warning(\n f\"{message}\"\n f\"Strategy caused the following exception: {error}\"\n f\"{f}\"\n )\n if default_retval is None and not supress_error:\n raise StrategyError(str(error)) from error\n return default_retval\n except Exception as error:\n logger.exception(\n f\"{message}\"\n f\"Unexpected error {error} calling {f}\"\n )\n if default_retval is None and not supress_error:\n raise StrategyError(str(error)) from error\n return default_retval\n\n return wrapper\n","sub_path":"kommunitas/strategy/strategy_wrapper.py","file_name":"strategy_wrapper.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"218998212","text":"'''\nCreated on 2021. 5. 9.\n\n@author: clomia2\n'''\nfrom flask import Flask, request\nfrom tensorflow import keras\nimport numpy as np \nimport pandas as pd\n\n# Access-Control-Allow-Origin에 대한 처리\nfrom flask_cors import CORS\n\n# Flask 객체 생성\napp = Flask(__name__)\n\n# Access-Control-Allow-Origin에 대한 처리\nCORS(app)\n\n# 스프링에서 오는 회원 리스트 요청을 json으로 응답하는 함수\n@app.route(\"/selfTest\", methods=[\"POST\"])\ndef selfTest():\n # 증상 받기 \n symptom = request.form.get(\"symptom\")\n print(\"symptom : \",symptom)\n \n # 증상 머신러닝 데이터로 치환\n test_data = changeFormat(symptom)\n \n # 머신러닝으로 예측\n result = selfTestMechineLearning(test_data)\n \n # 병명 글지로 바꾸기\n disease = diseaseNameSearch(result-1)\n \n return disease \n\n\n# 증상을 머신러닝 데이터로 바꾸기\ndef changeFormat(data):\n # 증상 데이터 불러오기\n df = pd.read_json(\"../testData/symptom.json\")\n \n # 증상 중복 데이터 제거\n symptom = df.drop_duplicates(['case1'])\n\n # 증상 한개한개를 리스트로 만들기\n symptom_list = symptom['case1'].values.tolist()\n #print(\"symptom_list:\",symptom_list)\n \n # return data 선언\n test_data = []\n \n # 증상 분할 및 전처리\n data_spilt = data.split(',')\n for j in range(len(data_spilt)):\n data_spilt[j] = data_spilt[j].strip()\n #print(\"data_spilt:\",data_spilt)\n #증상 갯수 for\n for i in range(len(symptom_list)): \n if symptom_list[i] in data_spilt:\n test_data.append(1)\n else:\n test_data.append(0)\n \n test_data = [test_data]\n print(\"test_data : \",test_data)\n # 예측 데이터 반환\n # [[1, 0, 0, ...,1, 0]]\n return test_data\n\ndef diseaseNameSearch(result):\n df = pd.read_csv(\"../testData/diseaseData.csv\", encoding=\"cp949\")\n print('df[\"data1\"][result] : ',df[\"data1\"][result])\n return df[\"data1\"][result]\n\ndef selfTestMechineLearning(result):\n model = keras.models.load_model(\"../model_save/machinelearning.h5\")\n #test_loss, test_acc = model.evaluate(X, Y)\n #print(\"evaluate accuracy \", test_acc)\n predictions = model.predict(result)\n print(predictions[0])\n print(\"유력후보 : \", np.argmax(predictions[0]))\n return np.argmax(predictions[0])\n\nif __name__ == \"__main__\": \n app.run(host=\"0.0.0.0\", port=9000, debug=True)\n ","sub_path":"selftest/mechinelearning/selftest.py","file_name":"selftest.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"616831416","text":"import networkx as nx #Por utilizarmos uma biblioteca para constuir o grafo, é preciso importá-la, no primeiro run é lento (não sabemos o porquê, cremos que seja porque esteja importando toda a biblioteca)\n#Para importar a biblioteca use os seguinte comandos: \n# 1- sudo apt install python3-pip\n# 2- pip3 install networkx\n\ndef inicializar(residual):\n\trelacao = input()\n\trelacao = relacao.split(\" \")\n\tvertices = int(relacao[0])\n\taretas = int(relacao[1])\n\torigem = int(relacao[2])\n\tdestino = int(relacao[3])\n\n\t# criando os vertices com os atributos cor e predessesor\n\tfor x in range(vertices):\n\t\tresidual.add_node(x,cor = \"white\", pred = None)\n\t# criando as arestas com o fluxo inicial\n\tfor i in range(aretas):\n\t\tentrada = input()\n\t\tentrada = entrada.split(\" \")\n\t\tresidual.add_edge(int(entrada[0]),int(entrada[1]),fluxo = int(entrada[2]))\n\t# criando as arestas de retorno e criando vertices artificias \n\tarestas = residual.edges()\n\tfor i in list(arestas):\n\t\tif not residual.has_edge(i[1],i[0]):\n\t\t\tresidual.add_edge(int(i[1]),int(i[0]),fluxo = 0)\n\t\telse:\n\t\t\tresidual.add_node(vertices, cor = \"white\",pred = None)\n\n\t\t\tresidual.add_edge(i[1],vertices,fluxo = residual[i[1]][i[0]][\"fluxo\"])\n\t\t\tresidual.add_edge(vertices,i[1],fluxo = 0)\n\n\t\t\tresidual.add_edge(vertices,i[0], fluxo = residual[i[1]][i[0]][\"fluxo\"])\t\n\t\t\tresidual.add_edge(i[0],vertices, fluxo = 0)\n\n\t\t\tresidual[i[1]][i[0]][\"fluxo\"] = 0\n\t\t\tvertices = vertices + 1\n\n\n\n\treturn [origem,destino]\t\n\ndef inicializarBFS(G):\n\tfor i in G.nodes():\n\t\tG.node[i][\"cor\"] = \"white\"\n\t\tG.node[i][\"pred\"] = None\n\ndef BFS(G,raiz,destino):\n\tinicializarBFS(G)\n\tG.node[raiz][\"cor\"] = \"gray\"\n\tfila = []\n\tfila.append(raiz)\n\twhile fila != []:\n\t\tu = fila.pop(0)\n\t\tfor elemento in G.neighbors(u):\n\t\t\tif G.node[elemento][\"cor\"] == \"white\" and G[u][elemento][\"fluxo\"] > 0: #verifica-se, além de ser branca, se há fluxo na aresta\n\t\t\t\tG.node[elemento][\"cor\"] = \"gray\"\n\t\t\t\tG.node[elemento][\"pred\"] = u\n\t\t\t\tfila.append(elemento)\n\t\tG.node[u][\"cor\"] = \"black\"\t\n\treturn True if G.node[destino][\"pred\"] != None else False \n\ndef Ford_Fulkerson(residual,origem,destino):\n\tfluxoMax = 0\n\twhile BFS(residual,origem,destino): # se tiver caminho entre origem -> destino retorna true\n\t\tflowMinimo = float(\"inf\")\n\t\tcaso_base = destino\n\t\twhile caso_base != origem:\n\t\t\tpred = residual.node[caso_base][\"pred\"]\n\t\t\tflowMinimo = min(flowMinimo,residual[pred][caso_base][\"fluxo\"]) #verifica qual o fluxo min no caminho aumentante (gargalo) \n\t\t\tcaso_base = residual.node[caso_base][\"pred\"]\n\n\t\tfluxoMax = fluxoMax + flowMinimo # ao encontrar um caminho, somar para fluxoMax o caminho aumentante\n\n\t\tcaso_base = destino\n\t\twhile caso_base != origem: # atualizar o grafo residual, aresta de avanço = fluxo - gargalo aresta de retorno = fluxo + gargalo\n\t\t\tpred = residual.node[caso_base][\"pred\"]\n\t\t\tresidual[pred][caso_base][\"fluxo\"] -= flowMinimo\n\t\t\tresidual[caso_base][pred][\"fluxo\"] += flowMinimo\n\t\t\tcaso_base = residual.node[caso_base][\"pred\"]\n\treturn fluxoMax\t\n\t\t \t\n\ndef main():\n\tresidual = nx.DiGraph()\n\tinputs = inicializar(residual) # inputs[0] = origem inputs[1] = destino\n\tprint(Ford_Fulkerson(residual,inputs[0],inputs[1]))\n\t#respostaCerta =nx.maximum_flow(residual, inputs[0], inputs[1], capacity = \"fluxo\") #Caso queira comparar com a resposta, utilize essa funcao e comente a de cima\n\t#print(respostaCerta) #será exibido o fluxo e uma relacao de quanto fluxo passa pelas arestas\nif __name__ == '__main__':\n\tmain()\n","sub_path":"Fulkerson.py","file_name":"Fulkerson.py","file_ext":"py","file_size_in_byte":3434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"417455138","text":"import Store\nimport googlemaps\nfrom pprint import pprint\nfrom functools import reduce\n\nCLOUD_KEY = \"INSERT KEY HERE\"\nzips = Store.zipcodes\n\n\ndef find_roads(rests):\n \"\"\"\n Using the googlemaps api, with geocoding and roads apis, find the\n most popular roads travelled on to reach fast food restaurants in given zipcode\n\n :param rests: list of restaurants in a specified zipcode\n :return: list of most popular roads travelled to get to fast food restaurants in a certain zipcode\n \"\"\"\n gmaps = googlemaps.Client(key=CLOUD_KEY)\n paths = []\n\n for restaurant in rests:\n address = restaurant.address + \", \" + restaurant.getCity() + \", \" + \"CA \" + str(restaurant.getZipCode())\n geocode_result = gmaps.geocode(address)\n\n road_dict = dict(geocode_result[0])\n\n lat = road_dict[\"geometry\"][\"location\"][\"lat\"]\n lng = road_dict[\"geometry\"][\"location\"][\"lng\"]\n\n coords = str(lat) + \",\" + str(lng) + \"|\"\n\n paths.append(coords)\n\n if len(paths) == 0:\n print(\"NO DATA AVAILABLE\")\n return\n\n # paths is now a list of coords\n n = 2\n pairs = [paths[i:i + n] for i in range(0, len(paths), n) if len(paths[i:i + n]) != 0]\n results = []\n for pair in pairs:\n temp = \"\"\n for element in pair:\n temp += element\n result = gmaps.snap_to_roads(temp[:-1])\n results.append(result)\n\n addresses = []\n for result in results:\n for location in result:\n temp_dict = dict(location)\n lat = temp_dict[\"location\"][\"latitude\"]\n lng = temp_dict[\"location\"][\"longitude\"]\n address = gmaps.reverse_geocode((lat, lng))\n addresses.append(address)\n\n to_return = []\n\n for address in addresses:\n for addr in address:\n dic = dict(addr) # each address component\n if \"food\" and \"restaurant\" in dic[\"types\"]:\n for element in dic[\"address_components\"]: # iterate through all long/short names\n temp_dic = dict(element)\n if temp_dic['types'] == ['route']:\n if temp_dic[\"long_name\"] not in to_return:\n to_return.append(temp_dic[\"long_name\"])\n break\n print(list(set(to_return)))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Process.py","file_name":"Process.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"190635607","text":"from distutils.core import setup\nimport matplotlib\nimport py2exe\n\ndata_files = []\ndata_files += matplotlib.get_py2exe_datafiles()\ndata_files += ['test.ico','background.jpg','popup.jpg','appIcon.ico']\n\nsetup(\n windows=[{'script':'appconpopup.py',\n 'version':'1.00',\n 'author' : 'guoz',\n 'icon_resources': [(1,'appIcon.ico')],\n 'dest_base':'Hs Cointoss Counter'}],\n options={'py2exe': {'packages': ['Tkinter','FileDialog']}},\n \n \n data_files=data_files\n )","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"113372758","text":"#chapter 4_data structure\n#problem_14_hash 동명이인\n\n# hashing with dictionary\ndef find_same_name(a):\n \"\"\"\n 두 번 이상 나온 이름 찾기\n 입력 : 이름 n개 들어있는 리스트 a\n 출력 : n개 중 반복되는 이름의 집합\n \"\"\"\n name_dict = {}\n for name in a:\n name_dict[name] = name_dict.get(name,0) + 1\n result = set()\n for name in name_dict:\n if name_dict[name] >= 2:\n result.add(name)\n return result\n\nname = [\"Tom\", \"Jerry\", \"Mike\", \"Tom\"]\nprint(find_same_name(name))\n# Tom\n\nname2 = [\"Tom\", \"Jerry\", \"Mike\", \"Tom\", \"Mike\"]\nprint(find_same_name(name2))\n# Tom, Mike\n\n# exercise 14-1\ndef name_return(info,num):\n \"\"\"\n 학생 번호로 이름 찾기\n 입력 : 번호와 이름으로 구성된 딕셔너리 info\n 출력 : 이름 혹은 없을 경우 ?\n \"\"\"\n try:\n return info[num]\n except:\n return \"?\"\n\nnum_hum = {39: \"Justin\", 14:\"John\", 67:\"Mike\", 105:\"Summer\"}\nprint(name_return(num_hum,105))\n# Summer\nprint(name_return(num_hum,777))\n# ?","sub_path":"chapter 4_data structure_problem_14_hash 동명이인.py","file_name":"chapter 4_data structure_problem_14_hash 동명이인.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"289909348","text":"\"\"\"\n\nThe functions lt and rt make 90-degree turns by default, but you can provide a\nsecond argument that specifies the number of degrees. For example, lt(bob, 45)\nturns bob 45 degrees to the left.\n\nMake a copy of square and change the name to polygon. Add another parameter\nnamed n and modify the body so it draws an n-sided regular polygon. Hint: The\nexterior angles of an n-sided regular polygon are 360.0 / n degrees.\n\n\n\"\"\"\n\nfrom TurtleWorld import *\n\ndef polygon(t,len,n):\n\n for i in range(8):\n fd(t,len)\n lt(t,n)\n\nworld = TurtleWorld()\nbob = Turtle()\n\npolygon(bob,20,45)\n","sub_path":"thinkpython/swampy.1.1/exer3.py","file_name":"exer3.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"11555261","text":"from contextlib import nested\n\nfrom django.conf import settings\nfrom django.utils.unittest import skip\n\nimport requests\nfrom elasticutils.tests import ESTestCase\nfrom mock import patch\nfrom nose.tools import eq_\nfrom requests.exceptions import RequestException\nfrom requests.models import Response\n\nfrom flicks.base.tests import TestCase\nfrom flicks.videos.forms import SearchForm, UploadForm\nfrom flicks.videos.tests import build_video\n\n\nclass SearchFormTests(TestCase, ESTestCase):\n def _videos(self, **kwargs):\n params = {\n 'search': '',\n 'category': 'all',\n 'region': 'all'\n }\n params.update(kwargs)\n f = SearchForm(params)\n return f.videos()\n\n def setUp(self):\n self.user = self.build_user()\n\n def test_search(self):\n \"\"\"Test that basic title searching works.\"\"\"\n with nested(build_video(self.user, title='Honey badger'),\n build_video(self.user, title='Lolcats')) as (v1, v2):\n eq_(list(self._videos(search='badger')), [v1])\n\n def test_category_filter(self):\n \"\"\"Test that search results can be filtered by category.\"\"\"\n with nested(build_video(self.user, category='animation'),\n build_video(self.user, category='psa')) as (v1, v2):\n eq_(list(self._videos(category='psa')), [v2])\n\n def test_region_filter(self):\n \"\"\"Test that search results can be filtered by region.\"\"\"\n with nested(build_video(self.user, region='america'),\n build_video(self.user, region='europe')) as (v1, v2):\n eq_(list(self._videos(region='europe')), [v2])\n\n def test_invalid_search(self):\n \"\"\"Test that an invalid form will return an empty list.\"\"\"\n eq_(self._videos(category=''), [])\n\n\n@skip\nclass UploadFormTests(TestCase):\n def _form(self, **kwargs):\n params = {\n 'title': 'Test',\n 'upload_url': 'http://test.com',\n 'category': 'psa',\n 'region': 'america',\n 'agreement': True\n }\n params.update(kwargs)\n return UploadForm(params)\n\n def _response(self, status_code, content_type):\n response = Response()\n response.status_code = status_code\n if content_type is not None:\n response.headers['content-type'] = content_type\n return response\n\n @patch.object(requests, 'head')\n def test_request_exception(self, head):\n \"\"\"If an error occurs while testing the upload url, the form is not\n valid.\n \"\"\"\n head.side_effect = RequestException\n form = self._form()\n eq_(form.is_valid(), False)\n eq_(form.errors.keys(), ['upload_url'])\n\n @patch.object(requests, 'head')\n def test_invalid_status_code(self, head):\n \"\"\"If the url returns a non-200 status code, the form is not valid.\"\"\"\n head.return_value = self._response(404, '')\n form = self._form()\n eq_(form.is_valid(), False)\n eq_(form.errors.keys(), ['upload_url'])\n\n @patch.object(requests, 'head')\n @patch.object(settings, 'INVALID_VIDEO_CONTENT_TYPES', ['invalid/type'])\n def test_invalid_content_type(self, head):\n \"\"\"If the url returns an invalid content-type, the form is not\n valid.\n \"\"\"\n head.return_value = self._response(200, 'invalid/type; charset=UTF-8')\n form = self._form()\n eq_(form.is_valid(), False)\n eq_(form.errors.keys(), ['upload_url'])\n\n @patch.object(requests, 'head')\n def test_no_content_type(self, head):\n \"\"\"If the url does not return a content-type, the form is valid.\"\"\"\n head.return_value = self._response(200, None)\n form = self._form()\n eq_(form.is_valid(), True)\n\n @patch.object(requests, 'head')\n @patch.object(settings, 'INVALID_VIDEO_CONTENT_TYPES', ['invalid/type'])\n def test_success(self, head):\n \"\"\"If the url returns a valid content-type and a 200 OK, the form is\n valid.\n \"\"\"\n head.return_value = self._response(200, 'video/mpeg')\n form = self._form()\n eq_(form.is_valid(), True)\n","sub_path":"flicks/videos/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":4147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"486818489","text":"# BT3051 Assignment 3\n#Roll number : BE14B002\n# Collaborators : None\n#Time: 1:30\nfrom bs4 import BeautifulSoup as bs\ndef parse_SBML(name):\n\twith open(name) as content:\n\t\tsoup = bs(content, 'xml')\n\t\tname={}\n\tfor ispecies in soup.listOfSpecies:\n\t\tif(ispecies.name ==\"species\"):\n\t\t\tname[ispecies.attrs['id']]=ispecies.attrs['name']\n\tfor ireactions in soup.listOfReactions:\n\t\treactflag=0\n\t\tprodflag=0\n\t\tif ireactions.name == \"reaction\" :\n\t\t\tprint(ireactions.attrs['id'],\":\",end=\" \",sep=\"\")\n\t\t\tfor ireactants in ireactions.listOfReactants:\t\t\t\t\n\t\t\t\tif ireactants.name == \"speciesReference\":\n\t\t\t\t\tif reactflag == 1:\n\t\t\t\t\t\tprint(\"+\",end=\" \")\n\t\t\t\t\tif int(float(ireactants.attrs['stoichiometry']))==1:\n\t\t\t\t\t\tprint(name[ireactants.attrs['species']],end=\" \")\n\t\t\t\t\t\treactflag=1\n\t\t\t\t\telif float(ireactants.attrs['stoichiometry']).is_integer():\n\t\t\t\t\t\tprint(int(float(ireactants.attrs['stoichiometry'])),name[ireactants.attrs['species']],end=\" \")\n\t\t\t\t\t\treactflag=1\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(ireactants.attrs['stoichiometry'],name[ireactants.attrs['species']],end=\" \")\n\t\t\t\t\t\treactflag=1\n\t\t\tif ireactions.attrs['reversible'] == \"true\":\n\t\t\t\tprint(\"<==>\",end=\" \")\n\t\t\telse :\n\t\t\t\tprint(\"==>\",end=\" \")\n\t\t\tfor iproducts in ireactions.listOfProducts:\t\t\t\t\n\t\t\t\tif iproducts.name == \"speciesReference\":\n\t\t\t\t\tif prodflag == 1:\n\t\t\t\t\t\tprint(\"+\",end=\" \")\n\t\t\t\t\tif int(float(iproducts.attrs['stoichiometry']))==1:\n\t\t\t\t\t\tprint(name[iproducts.attrs['species']],end=\" \")\n\t\t\t\t\t\tprodflag=1\n\t\t\t\t\telif float(iproducts.attrs['stoichiometry']).is_integer():\n\t\t\t\t\t\tprint(int(float(iproducts.attrs['stoichiometry'])),name[iproducts.attrs['species']],end=\" \")\n\t\t\t\t\t\tprodflag=1\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(iproducts.attrs['stoichiometry'],name[iproducts.attrs['species']],end=\" \")\n\t\t\t\t\t\tprodflag=1\n\t\t\tprint()\nparse_SBML(\"Ec_iAF1260_flux1.xml\")","sub_path":"hw/hw3/hw3.py","file_name":"hw3.py","file_ext":"py","file_size_in_byte":1788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"509949371","text":"import sys\nfrom collections import deque\n\n\nN, M = map(int, sys.stdin.readline().split())\n\nocean = [[0] * (N + 1) for _ in range(N + 1)]\nfor idx in range(1, N+1):\n link = list(map(int, sys.stdin.readline().split()))\n for i, li in enumerate(link):\n if li:\n ocean[idx][i+1] = 1\n\ndist = [0] * (N + 1)\n\ndef bfs(start: int):\n visited = [start]\n queue = deque([start])\n dist[start] = 1\n \n while queue:\n current_node = queue.popleft()\n ans = []\n for search_node in range(len(ocean[current_node])):\n if ocean[current_node][search_node] and not search_node in visited:\n queue.append(search_node)\n visited.append(search_node)\n dist[search_node] += dist[current_node] + 1\n \n rel_ans = []\n for i in range(1, N+1):\n ans = []\n for j in range(1, N+1):\n if dist[j] == i:\n ans.append(j)\n if ans: \n rel_ans.append(ans)\n \n for rel in rel_ans:\n print(*rel)\n\nif __name__ == \"__main__\":\n bfs(M)\n","sub_path":"week_4/신윤재/5975.py","file_name":"5975.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"599605980","text":"'''\nplay_sync.py\n\nPlay back an audio file synchronously.\n'''\n\nimport pygame\n\ndef sync_playback(filename):\n # takes in a file and plays it back \n pygame.mixer.init()\n pygame.mixer.music.load(filename)\n pygame.mixer.music.play()\n\nsync_playback('one.wav')\n","sub_path":"chapter_1_fundamentals/play_sync.py","file_name":"play_sync.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"559087860","text":"import socket\n\nclass serveur():\n\n def __init__(self):\n self.port = 0\n self.hote = \"localhost\"\n self.identifiant = \"\"\n self.mot_de_passe = \"\"\n\n #assure une premiere connection avec le serveur ( demande port & adresse )\n def connecter(self):\n while 1:\n while 1: #tant que l'utilisateur ne rentre pas un port correcte :\n try:\n self.port = input(\"Port du serveur : \")\n self.hote = input(\"Adresse du serveur : \")\n if self.port == \"\":\n self.port = 40000\n else:\n self.port = int(self.port)\n if self.hote == \"\":\n self.hote = \"localhost\"\n break\n\n except ValueError:\n print(\"Erreur : numero de port invalide.\")\n\n\n self.connexion = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n try:\n self.connexion.connect((self.hote, self.port))\n print(\"Info : connexion établie avec le serveur ({}) sur le port({})\".format(self.hote,self.port))\n return\n except socket.error:\n print(\"Erreur : impossible de se connecter au serveur.\")\n\n #se charge de la communication avec le serveur\n def communiquer(self):\n while 1:\n try:\n message_reçu = self.connexion.recv(1024).decode()\n if \"ask.\" in message_reçu:\n message_reçu = message_reçu.replace(\"ask.\", \"\")\n reponse = input(message_reçu)\n self.connexion.send(bytes(reponse, 'utf-8'))\n\n elif \"end.\" in message_reçu:\n self.fermer()\n else:\n print(message_reçu)\n except ConnectionResetError:\n print(\"Le serveur vous a deconnecté.\")\n\n #se charge de fermer le serveur proprement\n def fermer(self):\n print(\"Info : Fermeture de la connexion\")\n self.connexion.close()\n print(\"Info : Connexion interrompue\")\n\n\nserveur = serveur()\nserveur.connecter()\nserveur.communiquer()\nserveur.fermer()","sub_path":"chat/rsa_client.py","file_name":"rsa_client.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"292019778","text":"#####################################\n######## FILE ABANDONED #############\n#####################################\n\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport cv2\nfrom sklearn.utils import shuffle\nimport tensorflow as tf\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\ntf.compat.v1.disable_eager_execution()\n\n\n#import pickle -- not used (instead used np.save and np.load)\n\n####################\n### LOAD DATASET ###\n####################\nDOGDATADIR = \"D:\\\\Work\\\\College\\\\Spring 2020\\\\DeepLearningCoursera-2\\\\training_set\\\\training_set\\\\dogs\"\nCATDATADIR = \"D:\\\\Work\\\\College\\\\Spring 2020\\\\DeepLearningCoursera-2\\\\training_set\\\\training_set\\\\cats\"\n\nIMG_SIZE = 224\ndog_arr = []\ncat_arr = []\n\ndog_data = np.empty(shape=(4000, 224, 224))\ncat_data = np.empty(shape=(4000, 224, 224))\n\n\ntry:\n dogpath = \"D:\\\\Work\\\\College\\\\Spring 2020\\\\DeepLearningCoursera-2\\\\dogsSaved\"\n catpath = \"D:\\\\Work\\\\College\\\\Spring 2020\\\\DeepLearningCoursera-2\\\\catsSaved\"\n dogs = open(dogpath, 'rb')\n cats = open(catpath, 'rb')\n dog_data = np.load(dogs)\n cat_data = np.load(cats)\n print('Existing save files found....')\nexcept IOError:\n \n for img in os.listdir(DOGDATADIR):\n try: \n dog_img = cv2.imread(os.path.join(DOGDATADIR, img), cv2.IMREAD_GRAYSCALE)\n dog_img = cv2.resize(dog_img, (IMG_SIZE, IMG_SIZE))\n dog_arr.append(dog_img)\n except:\n continue\n \n for img in os.listdir(CATDATADIR):\n try:\n cat_img = cv2.imread(os.path.join(CATDATADIR, img), cv2.IMREAD_GRAYSCALE)\n cat_img = cv2.resize(cat_img,(IMG_SIZE, IMG_SIZE))\n cat_arr.append(cat_img)\n except:\n continue\n \n\n dog_data = np.asanyarray(dog_arr)\n cat_data = np.asanyarray(cat_arr)\n dog_data = np.expand_dims(dog_data, axis=4)\n cat_data = np.expand_dims(cat_data, axis=4)\n\n outfile = open('dogsSaved', 'wb')\n np.save(outfile, dog_data)\n outfile.close()\n outfile2 = open('catsSaved', 'wb')\n np.save(outfile2, cat_data)\n outfile2.close()\n print(\"save files created for future runs.\")\n\n\n\ndog_label = np.ones((dog_data.shape[0], 1))\ncat_label = np.zeros((cat_data.shape[0],1))\n\nX_train_orig = np.concatenate((dog_data, cat_data), axis = 0)\nY_train_orig = np.concatenate((dog_label, cat_label), axis = 0)\n\nX_train, Y_train = shuffle(X_train_orig, Y_train_orig, random_state = 0)\nX_train = X_train/225\n\n\n## Create mini-batches\nminibatches_X = np.array_split(X_train, 16, axis = 0)\nminibatches_Y = np.array_split(Y_train, 16, axis = 0)\n\n\nprint(minibatches_Y[0].shape)\n\n\n### Sanity check ###\n'''\nplt.imshow(X_train[1], cmap = 'gray')\nplt.show()\nprint(Y_train[1])\n'''\n\n## Create placeholders\nX = tf.compat.v1.placeholder(tf.float32, (500, 224, 224, 1), name = 'X')\nY = tf.compat.v1.placeholder(tf.float32, (500, 1), name = 'Y')\n\n## Initializing parameters\ninitializer = tf.initializers.GlorotUniform()\nW1 = tf.Variable(initializer(shape = (24, 24, 1, 4)), name = \"W1\")\n# output = (201, 201, 4)\nW2 = tf.Variable(initializer(shape = (3, 3, 4, 4)), name = \"W2\")\n\n### Forw prop ###\n\nZ1 = tf.nn.conv2d(X, W1, strides = [1, 1, 1, 1], padding = 'SAME')\nA1 = tf.nn.relu(Z1)\nP1 = tf.nn.max_pool(A1, ksize = [1, 8, 8, 1], strides= [1,8,8,1], padding = 'SAME')\n\nZ2 = tf.nn.conv2d(P1, W2, strides = [1, 1, 1, 1], padding = \"SAME\")\nA2 = tf.nn.relu(Z2)\nP2 = tf.nn.max_pool(A2, ksize = [1, 4, 4, 1], strides = [1, 4, 4, 1], padding = \"SAME\")\n\nF = tf.compat.v1.layers.flatten(P2)\nZ3 = tf.compat.v1.layers.dense(F, 1)\n\ncost = tf.reduce_mean(tf.nn.softmax(Z3))\noptimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=0.01).minimize(cost)\ninit = tf.compat.v1.global_variables_initializer()\nwith tf.compat.v1.Session() as sess:\n sess.run(init)\n minibatch_cost = 0\n i=0\n for i in range(16):\n print(\"HERE\")\n # for epoch in range(10):\n # minibatch_cost = 0\n # for i in range(16):\n # _, t_cost = sess.run([optimizer, cost], feed_dict={X:minibatches_X[i], Y:minibatches_Y[i]})\n # minibatch_cost += t_cost/16\n # print(\"Cost after epoch %i: %f\"%(epoch, minibatch_cost))\n l , t_cost = sess.run([optimizer, cost], feed_dict={X:minibatches_X[i]})\n \n print(\"Done\")\n #minibatch_cost+=t_cost/16\n print(\"Cost :\"+ t_cost)\n \n \n prediction = tf.argmax(Z3, 1)\n correct_pred = tf.equal(prediction, tf.argmax(Y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, \"float\"))\n print(accuracy)\n train_acc = accuracy.eval({X:X_train, Y:Y_train})\n print(\"Train accuracy: \"+train_acc)","sub_path":"catvdog.py","file_name":"catvdog.py","file_ext":"py","file_size_in_byte":4591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"566797914","text":"# ARC005C\n# 0-1BFS\n# 道は0,壁への移動を1として考える\n# 次が壁なら末尾に入れて,次が道なら先頭に入れる\n# 先頭から取り出す\n# ゴールにたどり着くまでに通った壁の個数を数える\n\nfrom collections import deque\n# 入力\nH, W = map(int, input().split())\nfield = [list(input()) for _ in range(H)]\n\n# 4方向の移動\ndx = [1, 0, -1, 0]\ndy = [0, 1, 0, -1]\n\n# BFSのためのデータ構造\ndist = [[-1] * W for _ in range(H)]\nque_x = deque()\nque_y = deque()\n\n# 探索中に各マスはどこからきたのかを表す配列\nprev_x = [[-1] * W for _ in range(H)]\nprev_y = [[-1] * W for _ in range(H)]\n\n\n# スタートとゴール\nfor i in range(H):\n for k in range(W):\n if field[i][k] == \"s\":\n s_x = i\n s_y = k\n if field[i][k] == \"g\":\n g_x = i\n g_y = k\n\n# 初期条件\ndist[s_x][s_y] = 0\nque_x.append(s_x)\nque_y.append(s_y)\n\n\n# BFS開始\nwhile que_x and que_y:\n x = que_x[0]\n y = que_y[0]\n que_x.popleft()\n que_y.popleft()\n for direct in range(4):\n nx = x + dx[direct]\n ny = y + dy[direct]\n if nx < 0 or H <= nx or ny < 0 or W <= ny:\n continue\n if dist[nx][ny] != -1:\n continue\n if field[nx][ny] == \"#\":\n dist[nx][ny] = 1\n que_x.append(nx)\n que_y.append(ny)\n prev_x[nx][ny] = x\n prev_y[nx][ny] = y\n else:\n dist[nx][ny] = 0\n que_x.appendleft(nx)\n que_y.appendleft(ny)\n prev_x[nx][ny] = x\n prev_y[nx][ny] = y\n\nx = g_x\ny = g_y\ncnt = 0\n\nwhile x != -1 and y != -1:\n if field[x][y] == \"#\":\n cnt += 1\n px = prev_x[x][y]\n py = prev_y[x][y]\n x, y = px, py\n\n##print(cnt)\nif 0 <= cnt <= 2:\n print(\"YES\")\nelse:\n print(\"NO\")","sub_path":"AtCoder_Ant/Beginner/2_1_3/ARC_005_C.py","file_name":"ARC_005_C.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"141333643","text":"import os\nimport ast\nimport logging\nimport argument_parser\nfrom support_methods import *\n\nlogging.basicConfig(level=logging.INFO)\n\n\ndef find_files_by_extention(from_path, extention, amount):\n files_list = []\n for whole_path, dirs, files in os.walk(from_path, topdown=True):\n for file in files:\n files_list.append(extention_only(file, whole_path, extention))\n if len(files_list) >= amount:\n break\n list = filter(None, files_list)\n logging.info('Found %s *.%s files' % (len(list), extention))\n return list\n\n\ndef get_nodes(files):\n trees = [ast_file_parser(f) for f in files]\n trees = filter(None, trees)\n nodes = list(flattening([ast.walk(t) for t in trees]))\n return nodes\n\n\ndef run(args=argument_parser.args):\n location = location_determining(args.source, args.path)\n files = find_files_by_extention(location, args.extention, args.amount)\n if args.entities == 'functions':\n entity_list = functions_names(get_nodes(files))\n else:\n entity_list = variables_names(get_nodes(files))\n logging.info('Searching in %s...' % args.entities)\n words = search_in(entity_list, args.part_of_speech)\n logging.info('Looking for %s' % args.part_of_speech)\n semi_result = the_most_common_of(words, len(words))\n dictionary = dict((x, y) for x, y in semi_result)\n output_method(args.output, dictionary)\n","sub_path":"code_observer.py","file_name":"code_observer.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"596538736","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport glob\n\n#l = [pd.read_csv(filename) for filename in glob.glob(\"/path/*.txt\")]\n\n\n#file_02Nm = 'W:/Projekte/MAXCoat_61906/04_Bearbeitung/GDL-Analyse/Abbott_Analyse/abbott_0,2_curve.txt'\n\nfor filename in glob.glob('W:/Projekte/MAXCoat_61906/04_Bearbeitung/GDL-Analyse/Abbott_Analyse/*.txt'):\n df_abbott_specs = pd.read_csv(filename, decimal=',', encoding='cp1252',\n error_bad_lines=False, delim_whitespace=True,\n index_col=False, keep_default_na=False, skiprows=5)\n\n print(df_abbott_specs)\n\n\n df_xvalues = df_abbott_specs['%']\n df_yvalues = df_abbott_specs['µm']\n\n\n x_values = np.asarray(df_abbott_specs['%'])\n y_values = np.asarray(df_abbott_specs['µm'])\n\n graphname = filename.split('/')[-1].split('\\\\')[-1].strip('abbott_').strip('_curve.tx')\n plt.plot(x_values, y_values, label=graphname)\n plt.gca().invert_yaxis()\n\nplt.legend()\nplt.show()","sub_path":"abbott_new.py","file_name":"abbott_new.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"97312464","text":"from django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import render_to_string\nfrom main.models import Advert\nfrom django.utils import timezone\n\nfrom conf import settings\nfrom main.celery import app\nfrom main.models import Subscribe\n\n\n@app.task()\ndef newsletter(subject, object_list=None, **kwargs):\n mail_list = Subscribe.objects.values_list('user__email', flat=True)\n from_email = settings.DEFAULT_FROM_EMAIL\n if object_list:\n context = {'subject': subject, 'object_list': object_list}\n else:\n title = kwargs.get('title')\n price = kwargs['price']\n context = {'subject': subject, 'title': title, 'price': price}\n html_content = render_to_string('news/news.html', context=context)\n\n for email in mail_list:\n msg = EmailMultiAlternatives(\n subject,\n html_content,\n from_email,\n [email])\n msg.content_subtype = 'html'\n msg.send()\n\n\n@app.task()\ndef news():\n # Отправлять сводку еженедельных подборок товаров через celery\n tm = timezone.now() - timezone.timedelta(days=7)\n object_list = Advert.objects.filter(created__gte=tm).only(\n 'advert_title', 'price')[:9]\n newsletter(subject='Новое за неделю', object_list=object_list)\n","sub_path":"main/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"328026242","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractUser,User\n# Create your models here.\nclass UserInfo(models.Model):\n name = models.CharField(max_length=32,verbose_name=\"用户名\",unique=True,null=False)\n password = models.CharField(max_length=32,verbose_name=\"密码\")\n email = models.CharField(max_length=32,verbose_name=\"邮箱\")\n\n class Meta:\n verbose_name = \"用户信息表\"\n\nclass Blog(models.Model):\n surfix = models.CharField(max_length=32,verbose_name=\"博客后缀\")\n theme = models.CharField(max_length=32,verbose_name=\"主题 \")\n title = models.CharField(max_length=32,verbose_name=\"标题\")\n summary = models.CharField(max_length=256,verbose_name=\"简介\")\n user = models.OneToOneField(to=\"UserInfo\")\n\nclass FansEachother(models.Model):\n star_id = models.ForeignKey(to=\"UserInfo\",related_name=\"s_id\",verbose_name=\"明星ID\")\n fans_id = models.ForeignKey(to=\"UserInfo\",related_name=\"f_id\",verbose_name=\"粉丝ID\")\n\nclass MaintenlanceForm(models.Model):\n uuid = models.CharField(max_length=32,verbose_name=\"报障单号\")\n title = models.CharField(max_length=32,verbose_name=\"标题\")\n ctime = models.DateTimeField(auto_now=True,verbose_name=\"创建时间\")\n dtime = models.DateTimeField(verbose_name=\"处理时间\",null=True)\n initiator = models.ForeignKey(to=\"UserInfo\",related_name=\"initiator\",verbose_name=\"发起人\")\n processor = models.ForeignKey(to=\"UserInfo\",related_name=\"processor\",verbose_name=\"处理人\")\n status_choices = (\n (1,\"待处理\"),\n (2,\"处理中\"),\n (3,\"已完成\"),\n )\n status = models.IntegerField(choices=status_choices,default=1,null=False)\n\nclass Classify(models.Model):\n caption = models.CharField(max_length=32,verbose_name=\"标题\")\n bid = models.ForeignKey(to=\"Blog\")\n\n def __str__(self):\n return self.caption\n\nclass Tags(models.Model):\n caption = models.CharField(max_length=32,verbose_name=\"标题\")\n bid = models.ForeignKey(to=\"Blog\")\n\n def __str__(self):\n return self.caption\n\nclass Article(models.Model):\n # id = models.AutoField()\n title = models.CharField(max_length=32,verbose_name=\"标题\")\n summary = models.CharField(max_length=128,verbose_name=\"简介\")\n detail = models.TextField(verbose_name=\"文章详情\")\n ctime = models.DateTimeField(auto_now=True)\n type_of_article = (\n (1,\"Python\"),\n (2,\"OpenStack\"),\n (3,\"GoLang\"),\n (4,\"MySQL\"),\n )\n types = models.IntegerField(choices=type_of_article,default=1)\n\nclass Article_to_Tags(models.Model):\n # id = models.AutoField()\n article_id = models.ForeignKey(to=\"Article\")\n tags_id = models.ForeignKey(to=\"Tags\")\n\nclass Comment(models.Model):\n # id = models.AutoField()\n content = models.TextField()\n user = models.ForeignKey(to=\"UserInfo\")\n ctime = models.DateTimeField(auto_now=True,null=False)\n parent_comment_id = models.ForeignKey(to=\"Comment\",null=True,default=\"\")\n article = models.ForeignKey(to=\"Article\")\n\nclass Up_Down(models.Model):\n # id = models.AutoField()\n article_id = models.ForeignKey(to=\"Article\")\n user_id = models.ForeignKey(to=\"UserInfo\")\n is_up = models.BooleanField(default=\"\")\n #联合唯一索引\n\n","sub_path":"repository/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"329048551","text":"from github import Github\nimport os\n\n\n# Please replace \"\" with your own GitHub Access Token\ng = Github(\"\")\n# g = Github('9fc70c72f642a8979bc0788cdd90d3c9f23cd2a2')\n\n\nrepo_full_name = 'eclipse/eclipse.platform.ui'\n\n\nrepo_name = repo_full_name.split('/')[1]\n\nrepo = g.get_repo(repo_full_name)\ntag_list = repo.get_tags()\nprint('==============================')\nprint('Accessing repo: [' + repo.name + ']')\n\n\nrelease_date_file_name = os.path.join('./step2_results/', repo_name + \"_release_date.csv\" )\nrelease_date_file = open(release_date_file_name, \"a\")\n\nprint('Continue getting release date in repo: ' + repo_name + ' after 1 hour.')\nprint('==============================')\n\n\ntotal_count = tag_list.totalCount\n\nfor t in tag_list[4500:]: \n release_date_file.write(str(t.commit.commit.committer.date) + '\\n')\n\nrelease_date_file.close()","sub_path":"step2/step2_3.py","file_name":"step2_3.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"142947790","text":"import heapq\nfrom collections import Counter,deque\nclass key_val():\n def __init__(self, key, val):\n self.key = key\n self.val = val\n def __gt__(self, other):\n if self.val > other.val:\n return True\n elif self.val == other.val:\n return self.key < other.key\n return False\n\nclass Solution(object):\n def topKFrequent(self, words, k):\n \"\"\"\n :type words: List[str]\n :type k: int\n :rtype: List[str]\n \"\"\"\n if len(words) == 0 or k == 0:\n return []\n dic = Counter(words)\n heap = []\n for key, val in dic.items():\n heapq.heappush(heap, key_val(key, val))\n if len(heap) > k:\n heapq.heappop(heap)\n res = deque()\n for _ in range(k):\n res.appendleft(heapq.heappop(heap).key)\n return res\na = Solution()\ns = [\"i\", \"love\", \"leetcode\", \"i\", \"love\", \"coding\"]\nprint(a.topKFrequent(s, 4))\nprint(\"Done\")\n\n\n\n","sub_path":"All_about_sorted/quickSort/Leetcode_692/lc_692.py","file_name":"lc_692.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"153280919","text":"import os\nimport shutil\n\npath='./data/noise256_0.915'\n\nwith open('noise.txt','a') as n:\n for i in os.listdir(path):\n src='./data/train256/' + i[:4] + '/' + i\n dst='./data/noise256/' + i\n n.writelines('./data/train256/'+i[:4]+'/'+i+'\\n')\n if not os.path.exists(src):\n print(src)\n continue\n shutil.move(src,dst)\n\n\n\n\n","sub_path":"listnoise.py","file_name":"listnoise.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"256004779","text":"import re\r\nimport os\r\nimport bs4\r\nimport time\r\nimport logger\r\nimport random\r\nimport hashlib\r\nimport threading\r\nimport urllib.parse as urlpar\r\nimport urllib.request as urlreq\r\n\r\n\r\nclass crawler:\r\n\tdef __init__(self, print_function = print):\r\n\t\t# initialize environment variables\r\n\t\tself.__attack_payloads = [\r\n\t\t\t\"\\\"><%s><\\\"\",\r\n\t\t\t\"\\'><%s><\\'\",\r\n\t\t\t\"\\\" %s=\\\"\",\r\n\t\t\t\"\\' %s=\\'\",\r\n\t\t]\r\n\t\tself.__try_count = 0\r\n\t\tself.__find_count = 0\r\n\r\n\t\tself.__target_url = \"\"\r\n\t\tself.__http_headers = {}\r\n\t\tself.__allowed_hosts = []\r\n\t\tself.__not_allowed_paths = []\r\n\t\tself.__log_file_path = \"\"\r\n\t\tself.__max_scan_depth = 1\r\n\r\n\t\tself.__print = print_function\r\n\t\treturn\r\n\r\n\tdef get_try_count(self):\r\n\t\treturn self.__try_count\r\n\r\n\tdef get_find_count(self):\r\n\t\treturn self.__find_count\r\n\r\n\tdef set_log_file_path(self, path):\r\n\t\t# valid check parameter\r\n\t\tif type(path) != str:\r\n\t\t\treturn False\r\n\r\n\t\t# assign variable value\r\n\t\tself.__log_file_path = path\r\n\t\treturn True\r\n\r\n\tdef set_max_scan_depth(self, depth):\r\n\t\t# type convert\r\n\t\tif type(depth) == str and depth.isdigit():\r\n\t\t\tdepth = int(depth)\r\n\r\n\t\t# valid check parameter\r\n\t\tif type(depth) != int or depth < 1:\r\n\t\t\treturn False\r\n\r\n\t\t# assign variable value\r\n\t\tself.__max_scan_depth = depth\r\n\t\treturn True\r\n\r\n\tdef set_target_url(self, url):\r\n\t\t# valid check parameter\r\n\t\tif type(url) != str or not self.__is_allowed_url(url):\r\n\t\t\treturn False\r\n\r\n\t\tself.__target_url = url\r\n\t\treturn True\r\n\r\n\tdef set_http_header(self, headers):\r\n\t\t# valid check parameter\r\n\t\tif type(headers) != dict:\r\n\t\t\treturn False\r\n\r\n\t\tfor key in headers.keys():\r\n\t\t\tif type(key) != str or type(headers[key]) != str:\r\n\t\t\t\treturn False\r\n\r\n\t\t# assign variable value\r\n\t\tself.__http_headers = headers\r\n\t\treturn True\r\n\r\n\tdef set_allowed_host(self, hosts):\r\n\t\t# valid check parameter\r\n\t\tif type(hosts) != list:\r\n\t\t\treturn False\r\n\r\n\t\tself.__allowed_hosts = []\r\n\t\tfor host in hosts:\r\n\t\t\t# valid check parameter\r\n\t\t\tif type(host) != str:\r\n\t\t\t\treturn False\r\n\r\n\t\t\t# parse the host\r\n\t\t\tparsed_url = urlpar.urlparse(host)\r\n\t\t\tif parsed_url == False:\r\n\t\t\t\treturn False\r\n\r\n\t\t\t# assign variable value\r\n\t\t\tself.__allowed_hosts.append((parsed_url.scheme.lower(), parsed_url.netloc.lower()))\r\n\t\treturn True\r\n\r\n\tdef set_not_allowed_path(self, paths):\r\n\t\t# valid check parameter\r\n\t\tif type(paths) != list:\r\n\t\t\treturn False\r\n\r\n\t\tself.__not_allowed_paths = []\r\n\t\tfor path in paths:\r\n\t\t\tif type(path) != str:\r\n\t\t\t\treturn False\r\n\r\n\t\t\t\t# assign variable value\r\n\t\t\t\tself.__not_allowed_paths.append(path.lower())\r\n\t\treturn True\r\n\r\n\tdef scan(self):\r\n\r\n\t\tself.__print(\"Started to scan.\")\r\n\r\n\t\t# assign variables\r\n\t\tself.__attack_check_string = self.__get_rand_str(5)\r\n\t\tself.__visited = []\r\n\t\tself.__thread_count = 0\r\n\t\tself.__try_count = 0\r\n\t\tself.__find_count = 0\r\n\r\n\t\t# initialize logger\r\n\t\tself.__log = logger.logger(self.__log_file_path, {\r\n\t\t\t'url': self.__target_url,\r\n\t\t\t'attack_check_string': self.__attack_check_string,\r\n\t\t})\r\n\r\n\t\t# start crawling\r\n\t\tself.__crawling({\r\n\t\t\t'url': self.__target_url,\r\n\t\t\t'method': \"get\",\r\n\t\t\t'referer': \"\",\r\n\t\t\t\"get_params\": {},\r\n\t\t\t\"post_params\": {},\r\n\t\t})\r\n\r\n\t\t# wait while working\r\n\t\twhile self.__thread_count > 0:\r\n\t\t\ttime.sleep(0.001)\r\n\r\n\t\t# close log\r\n\t\tself.__log.close()\r\n\r\n\t\tself.__print(\"Succeed to scan. ( Try: %d / Find : %d )\" % (self.get_try_count(), self.get_find_count()))\r\n\t\treturn True\r\n\r\n\tdef __request_web(self, target):\r\n\t\ttry:\r\n\t\t\t# parse the url of target\r\n\t\t\tparsed_url = urlpar.urlparse(target['url'])\r\n\t\t\turl = urlpar.urlunparse((parsed_url.scheme, parsed_url.netloc, parsed_url.path, \"\", urlpar.urlencode(target['get_params']), \"\"))\r\n\t\t\tdel parsed_url\r\n\r\n\t\t\t# if http method is post, assign post parameters\r\n\t\t\tpost_params = urlpar.urlencode(target['post_params']).encode() if target['method'] == \"post\" else None\r\n\r\n\t\t\t# if not set referer header, assign referer header\r\n\t\t\theaders = self.__http_headers\r\n\t\t\tif \"referer\" not in list(map(lambda x: x.lower(), headers.keys())):\r\n\t\t\t\theaders['referer'] = target['referer']\r\n\r\n\t\t\t# request from target and get response\r\n\t\t\tres = urlreq.urlopen(urlreq.Request(url, post_params, headers))\r\n\r\n\t\t\t# check if content-type header is text/html\r\n\t\t\tif res.info().get_content_type() != \"text/html\":\r\n\t\t\t\treturn False\r\n\r\n\t\t\t# encode response\r\n\t\t\thtml = res.read().decode(\"utf-8\", \"ignore\")\r\n\t\t\treturn html\r\n\r\n\t\texcept Exception as err_msg:\r\n\t\t\t#print(\"Error:\", target['url'], err_msg)\r\n\t\t\treturn False\r\n\r\n\tdef __get_rand_str(self, min_len, max_len = None, table = \"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"):\r\n\t\t# get random string\r\n\t\tres = \"\"\r\n\t\tfor _ in range(min_len if max_len == None else random.randint(min_len, max_len)):\r\n\t\t\tres += random.choice(table)\r\n\t\treturn res\r\n\r\n\tdef __parse_target_from_html(self, target):\r\n\t\tres = []\r\n\r\n\t\t# get html source from target\r\n\t\thtml = self.__request_web(target)\r\n\t\tif html == False:\r\n\t\t\treturn False\r\n\r\n\t\t# parse html source\r\n\t\ttry:\r\n\t\t\tsoup = bs4.BeautifulSoup(html, \"html.parser\")\r\n\t\texcept:\r\n\t\t\treturn False\r\n\t\tdel html\r\n\r\n\t\t# parse url from html tags and strings\r\n\t\turls = []\r\n\t\turls += [urlpar.urljoin(target['url'], url) for url in re.findall(\"(http[s]?:)?//(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+\", soup.prettify())]\r\n\t\turls += [urlpar.urljoin(target['url'], tag.attrs.get(\"href\", \"\")) for tag in soup.select(\"a\")]\r\n\t\turls += [urlpar.urljoin(target['url'], tag.attrs.get(\"src\", \"\")) for tag in soup.select(\"iframe\")]\r\n\t\turls += [urlpar.urljoin(target['url'], tag.attrs.get(\"data\", \"\")) for tag in soup.select(\"object\")]\r\n\t\turls += [urlpar.urljoin(target['url'], tag.attrs.get(\"src\", \"\")) for tag in soup.select(\"script\")]\r\n\t\turls += [urlpar.urljoin(target['url'], tag.attrs.get(\"href\", \"\")) for tag in soup.select(\"link\")]\r\n\r\n\t\tfor url in urls:\r\n\t\t\t# check if url is valid\r\n\t\t\tif not self.__is_allowed_url(url):\r\n\t\t\t\tcontinue\r\n\r\n\t\t\t# append to results\r\n\t\t\tparsed_url = urlpar.urlparse(url)\r\n\t\t\tres.append({\r\n\t\t\t\t\"method\": \"get\",\r\n\t\t\t\t\"url\": urlpar.urlunparse((parsed_url.scheme, parsed_url.netloc, parsed_url.path, \"\", \"\", \"\")),\r\n\t\t\t\t\"get_params\": dict(urlpar.parse_qsl(parsed_url.query)),\r\n\t\t\t\t\"post_params\": {},\r\n\t\t\t\t\"referer\": target['url'],\r\n\t\t\t\t\"depth\": target['depth'] + 1\r\n\t\t\t})\r\n\t\t\tdel parsed_url\r\n\r\n\t\tdel urls\r\n\r\n\t\t# parse url from form tag\r\n\t\tfor form_tag in soup.select(\"form\"):\r\n\t\t\t# get url from action attribute\r\n\t\t\turl = urlpar.urljoin(target['url'], form_tag.attrs.get(\"action\", \"\"))\r\n\r\n\t\t\t# check if url is allowed\r\n\t\t\tif not self.__is_allowed_url(url):\r\n\t\t\t\tcontinue\r\n\r\n\t\t\t# get http method from method attribute\r\n\t\t\tmethod = form_tag.attrs.get(\"method\", \"get\").lower()\r\n\r\n\t\t\t# parse input tags\r\n\t\t\tparams = {}\r\n\t\t\tfor input_tag in form_tag.select(\"input\"):\r\n\r\n\t\t\t\t# exclude no named parameter and button type\r\n\t\t\t\tif len(input_tag.attrs.get(\"name\", \"\")) == 0 or input_tag.attrs.get(\"type\", \"\").lower() in [\"button\", \"submit\"]:\r\n\t\t\t\t\tcontinue\r\n\r\n\t\t\t\t# parse default value of parameter\r\n\t\t\t\tif len(input_tag.attrs.get(\"value\", \"\")) > 0:\r\n\t\t\t\t\tparams[input_tag.attrs['name']] = input_tag.attrs['value']\r\n\r\n\t\t\t\t# set value automatically\r\n\t\t\t\telse:\r\n\t\t\t\t\tinput_tag_type = input_tag.attrs.get(\"type\", \"\").lower()\r\n\r\n\t\t\t\t\t# set random email\r\n\t\t\t\t\tif input_tag_type == \"email\":\r\n\t\t\t\t\t\tusername = self.__get_rand_str(3, 5)\r\n\t\t\t\t\t\tdomain = self.__get_rand_str(3, 4) + \".\" + self.__get_rand_str(3, 4)\r\n\t\t\t\t\t\tparams[input_tag.attrs['name']] = username + \"@\" + domain\r\n\r\n\t\t\t\t\t# set random number\r\n\t\t\t\t\telif input_tag_type == \"number\":\r\n\t\t\t\t\t\tmin_val = int(input_tag.attrs.get(\"min\", 1))\r\n\t\t\t\t\t\tmax_val = int(input_tag.attrs.get(\"max\", 99999))\r\n\t\t\t\t\t\tparams[input_tag.attrs['name']] = str(random.randint(min_val, max_val))\r\n\t\t\t\t\t\tdel min_val, max_val\r\n\r\n\t\t\t\t\t# set random value\r\n\t\t\t\t\telif \"required\" in input_tag.attrs or \"pattern\" in input_tag.attrs or \"minlength\" in input_tag.attrs:\r\n\t\t\t\t\t\tmin_len = int(input_tag.attrs.get(\"minlength\", 6))\r\n\t\t\t\t\t\tmax_len = int(input_tag.attrs.get(\"maxlength\", 8))\r\n\t\t\t\t\t\tparams[input_tag.attrs['name']] = self.__get_rand_str(min_len, max_len)\r\n\t\t\t\t\t\tdel min_len, max_len\r\n\r\n\t\t\t\t\t# set null value\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tparams[input_tag.attrs['name']] = \"\"\r\n\r\n\t\t\t\t\tdel input_tag_type\r\n\r\n\t\t\t# if http method isn't post, params change to url query\r\n\t\t\tif method == \"post\":\r\n\t\t\t\tget_params = {}\r\n\t\t\t\tpost_params = params\r\n\t\t\telse:\r\n\t\t\t\tget_params = params\r\n\t\t\t\tpost_params = {}\r\n\r\n\t\t\t# append to results\r\n\t\t\tparsed_url = urlpar.urlparse(url)\r\n\t\t\tget_params.update(dict(urlpar.parse_qsl(parsed_url.query)))\r\n\t\t\tres.append({\r\n\t\t\t\t\"method\": method,\r\n\t\t\t\t\"url\": urlpar.urlunparse((parsed_url.scheme, parsed_url.netloc, parsed_url.path, \"\", \"\", \"\")),\r\n\t\t\t\t\"get_params\": get_params,\r\n\t\t\t\t\"post_params\": post_params,\r\n\t\t\t\t\"referer\": target['url'],\r\n\t\t\t\t\"depth\": target['depth'] + 1\r\n\t\t\t})\r\n\t\t\tdel method, parsed_url, params\r\n\r\n\t\treturn res\r\n\r\n\tdef __is_allowed_url(self, url):\r\n\t\ttry:\r\n\t\t\tparsed_url = urlpar.urlparse(url)\r\n\r\n\t\t\t# check if host is allowed\r\n\t\t\tif (parsed_url.scheme.lower(), parsed_url.netloc.lower()) not in self.__allowed_hosts:\r\n\t\t\t\treturn False\r\n\r\n\t\t\t# check if path is not allowed\r\n\t\t\tif parsed_url.path.lower() in self.__not_allowed_paths:\r\n\t\t\t\treturn False\r\n\r\n\t\t\t# check if path is possible parse extension\r\n\t\t\t#if re.match(\".*?\\.(png|jpe?g|gif|bmp|asx|asf|wmv|wma|mpe?g|mov|avi|mp3|mp4||swf|css|sass|less|js|tar|gz|zip|rar|txt)$\", parsed_url.path, re.I):\r\n\t\t\t#\treturn False\r\n\r\n\t\t\treturn True\r\n\t\texcept:\r\n\t\t\treturn False\r\n\r\n\tdef __check_vuln(self, target):\r\n\t\tself.__thread_count += 1\r\n\t\tself.__try_count += 1\r\n\r\n\t\tself.__print(\"Tried to attack with %s\" % target['url'])\r\n\r\n\t\t# get html source from target\r\n\t\thtml = self.__request_web(target)\r\n\t\tif html == False:\r\n\t\t\ttarget['is_find'] = False\r\n\r\n\t\telse:\r\n\t\t\ttry:\r\n\t\t\t\t# parse html source\r\n\t\t\t\tsoup = bs4.BeautifulSoup(html, \"html.parser\")\r\n\t\t\t\tdel html\r\n\t\t\t\t# check\r\n\t\t\t\tif soup.find(self.__attack_check_string) != None or soup.findAll(lambda tag: self.__attack_check_string.lower() in list(map(lambda x: x.lower(), tag.attrs.keys()))) != []:\r\n\t\t\t\t\tself.__print(\"Found vulnerability from \", target['url'])\r\n\t\t\t\t\tself.__find_count += 1\r\n\t\t\t\t\ttarget['is_find'] = True\r\n\t\t\t\telse:\r\n\t\t\t\t\ttarget['is_find'] = False\r\n\t\t\texcept:\r\n\t\t\t\ttarget['is_find'] = False\r\n\r\n\t\tself.__log.write(target)\r\n\t\tself.__thread_count -= 1\r\n\t\treturn target['is_find']\r\n\r\n\tdef __crawling(self, main_target):\r\n\t\tmain_target['depth'] = 1\r\n\t\ttargets = [ main_target ]\r\n\r\n\t\twhile len(targets) > 0:\r\n\t\t\t# pop first target from targets list\r\n\t\t\ttarget = targets[0]\r\n\t\t\tdel targets[0]\r\n\r\n\t\t\ttext = target['url'] + \"\\n\" + str(target['get_params'].keys()) + \"\\n\" + str(target['post_params'].keys())\r\n\t\t\thashed = int(hashlib.md5(text.encode()).hexdigest(), 16)\r\n\t\t\tif hashed in self.__visited:\r\n\t\t\t\tcontinue\r\n\t\t\tself.__visited.append(hashed)\r\n\t\t\tdel text, hashed\r\n\r\n\t\t\t# append parsed target\r\n\t\t\tif target['depth'] < self.__max_scan_depth:\r\n\t\t\t\ttemp_target = self.__parse_target_from_html(target)\r\n\t\t\t\tif temp_target != False:\r\n\t\t\t\t\ttargets += temp_target\r\n\t\t\t\tdel temp_target\r\n\r\n\t\t\t# if http method is post..\r\n\t\t\tif target['method'].lower() == \"post\":\r\n\r\n\t\t\t\tfor payload in self.__attack_payloads:\r\n\t\t\t\t\tfor key in target['post_params'].keys():\r\n\t\t\t\t\t\ttemp_target = target.copy()\r\n\r\n\t\t\t\t\t\t# set attack payload to parameters\r\n\t\t\t\t\t\ttemp_post_param = target['post_params'].copy()\r\n\t\t\t\t\t\ttemp_post_param[key] = payload % self.__attack_check_string\r\n\t\t\t\t\t\ttemp_target['post_params'] = temp_post_param\r\n\t\t\t\t\t\tdel temp_post_param\r\n\r\n\t\t\t\t\t\t# check vulnerability\r\n\t\t\t\t\t\tthreading.Thread(target = self.__check_vuln, args = (temp_target, )).start()\r\n\t\t\t\t\t\tdel temp_target\r\n\r\n\t\t\t# attack by url query\r\n\t\t\tfor payload in self.__attack_payloads:\r\n\t\t\t\tfor key in target['get_params'].keys():\r\n\t\t\t\t\ttemp_target = target.copy()\r\n\r\n\t\t\t\t\t# set attack payload to parameters\r\n\t\t\t\t\ttemp_get_param = target['get_params'].copy()\r\n\t\t\t\t\ttemp_get_param[key] = payload % self.__attack_check_string\r\n\t\t\t\t\ttemp_target['get_params'] = temp_get_param\r\n\t\t\t\t\tdel temp_get_param\r\n\r\n\t\t\t\t\t# check vulnerability\r\n\t\t\t\t\tthreading.Thread(target = self.__check_vuln, args = (temp_target, )).start()\r\n\t\t\t\t\tdel temp_target\r\n\r\n\t\treturn True","sub_path":"crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":12037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"571699334","text":"import pyeccodes.accessors as _\n\n\ndef load(h):\n\n def wrapped(h):\n\n discipline = h.get_l('discipline')\n parameterCategory = h.get_l('parameterCategory')\n parameterNumber = h.get_l('parameterNumber')\n\n if discipline == 0 and parameterCategory == 7 and parameterNumber == 6:\n return 'm**2 s**-2'\n\n stepType = h.get_s('stepType')\n\n if discipline == 0 and parameterCategory == 1 and parameterNumber == 9 and stepType == \"accum\":\n return 'kg m**-2'\n\n if discipline == 0 and parameterCategory == 1 and parameterNumber == 10 and stepType == \"accum\":\n return 'kg m**-2'\n\n return wrapped\n","sub_path":"pyeccodes/defs/grib2/localConcepts/lfpw/units_def.py","file_name":"units_def.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"631963076","text":"\n\n\n################# QuckSOrtLumutoParticipation\n\ndef partition(arr,l,h):\n\n pivot=arr[h]\n i=l-1\n for j in range(l,h):\n if arr[j]<=pivot:\n i=i+1\n arr[j],arr[i]=arr[i],arr[j]\n arr[i+1],arr[h]=arr[h],arr[i+1]\n return i+1\n \n\n\ndef qSort(arr,l,h):\n if l= 1:\n spot_exponent -= 1\n print('exponent:', spot_exponent)\n elif key == b'e':\n if spot_exponent <= 127:\n spot_exponent += 1\n print('exponent:', spot_exponent)\n elif key == b'u':\n if cutoff >= 1:\n cutoff -= 1\n print('cutoff:', cutoff)\n elif key == b'o':\n if cutoff <= 89:\n cutoff += 1\n print('cutoff:', cutoff)\n glutPostRedisplay()\n\n# Здесь начинается выполнение программы\n# Использовать двойную буферизацию и цвета в формате RGB (Красный, Зеленый, Синий)\nglutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)\n# Указываем начальный размер окна (ширина, высота)\nglutInitWindowSize(500, 500)\n# Указываем начальное положение окна относительно левого верхнего угла экрана\nglutInitWindowPosition(50, 50)\n# Инициализация OpenGl\nglutInit(sys.argv)\n# Создаем окно с заголовком \"Happy New Year!\"\nglutCreateWindow(\"Happy New Year!\")\n# Определяем процедуру, отвечающую за перерисовку\nglutDisplayFunc(draw)\n# Определяем процедуру, отвечающую за обработку клавиш\nglutSpecialFunc(specialkeys)\nglutKeyboardFunc(keyPressed)\n# Вызываем нашу функцию инициализации\ninit()\n# Запускаем основной цикл\nglutMainLoop()\n","sub_path":"lab5/lab5.py","file_name":"lab5.py","file_ext":"py","file_size_in_byte":6218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"269908215","text":"\"\"\"This module is used to get the number of input files, filenames, original websearch \nfilename, read all files into separate dataframes and then combine the results dataframes\ninto one overall dataframe that will be processed further\n\"\"\"\nfrom __future__ import print_function\nfrom builtins import str\nfrom builtins import input\nfrom builtins import range\nimport pandas as pd\nimport time\nimport os\n\n\ndef numFiles():\n \"\"\"This is to get the number of database results input files that need to be processed\n\n Returns:\n int: number of input files\n \"\"\"\n while True:\n try:\n countFiles = int(input(\"Number of input files:-\\n\\n \"))\n return countFiles # need to make sure have more than one file!!\n except ValueError:\n print (\"Integer needed, try again!\")\n\n\ndef inputFiles(numFiles):\n \"\"\"Asks used the input the name of each of the database results files\n separately, reads them into separate dataframes and then concatenates them\n into one new dataframe\n\n Args:\n number of input files (int): the number of database results files \n that will be read into dataframes\n\n Returns:\n dataframe: dataframe of oncatenated database results dataframe\n \"\"\"\n d = {}\n inputFile = {}\n\n for i in range(1, numFiles + 1):\n d[i] = \"inputFile{0}\".format(i)\n inputFile[i] = d[i]\n\n for i in range(1, numFiles + 1):\n inputF = input(\"Name of file \" + str(i) + \":-\\n\\n \")\n inputFile[i] = pd.read_table(inputF, sep=',')\n\n# default is to concatenate along axis=0, i.e. rows - this is what I want\n# ignore index, as want to generate a new one based on concatenated data\n dfCombined = pd.concat(inputFile, ignore_index=True).fillna(\"NA\")\n\n return (dfCombined)\n\n\ndef inputOriginalfile():\n \"\"\"Asks user to input the name of the original websearch input file \n and reads it into a dataframe\n\n Returns:\n dataframe, str: original websearch file as dataframe, original websearch \n filename as string\n \"\"\"\n originalFile = input(\n \"Name of original masses/retention times file:-\\n\\n \")\n originalDF = pd.read_table(originalFile, sep=',')\n # rename MZ and Time from the lipidFinder output\n originalDF.rename(columns={'MZ': 'ORIGINAL_MASS'}, inplace=True)\n originalDF.rename(columns={'Time': 'RETENTION_TIME'}, inplace=True)\n\n return originalDF, originalFile\n\n\ndef readParameters():\n \"\"\"Read the parameters file ad extract parameter(s) used during processing\n\n Returns:\n float: delta ppm value used as a cutoff during processing\n \"\"\"\n parmsFile = pd.read_table('websearch_parameters.csv', sep=',')\n\n deltaPPM = int((parmsFile.ix[3][1]))\n\n return (deltaPPM)\n","sub_path":"File Processing/processingSteps.py","file_name":"processingSteps.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"627371001","text":"# The MIT License (MIT)\n# Copyright (c) 2014 Microsoft Corporation\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"Internal class for global endpoint manager implementation in the Azure Cosmos\ndatabase service.\n\"\"\"\n\nimport threading\n\nfrom urllib.parse import urlparse\n\nfrom . import _constants as constants\nfrom . import exceptions\nfrom ._location_cache import LocationCache\n\n# pylint: disable=protected-access\n\n\nclass _GlobalEndpointManager(object):\n \"\"\"\n This internal class implements the logic for endpoint management for\n geo-replicated database accounts.\n \"\"\"\n\n def __init__(self, client):\n self.Client = client\n self.EnableEndpointDiscovery = client.connection_policy.EnableEndpointDiscovery\n self.PreferredLocations = client.connection_policy.PreferredLocations\n self.DefaultEndpoint = client.url_connection\n self.refresh_time_interval_in_ms = self.get_refresh_time_interval_in_ms_stub()\n self.location_cache = LocationCache(\n self.PreferredLocations,\n self.DefaultEndpoint,\n self.EnableEndpointDiscovery,\n client.connection_policy.UseMultipleWriteLocations,\n self.refresh_time_interval_in_ms,\n )\n self.refresh_needed = False\n self.refresh_lock = threading.RLock()\n self.last_refresh_time = 0\n\n def get_refresh_time_interval_in_ms_stub(self): # pylint: disable=no-self-use\n return constants._Constants.DefaultUnavailableLocationExpirationTime\n\n def get_write_endpoint(self):\n return self.location_cache.get_write_endpoint()\n\n def get_read_endpoint(self):\n return self.location_cache.get_read_endpoint()\n\n def resolve_service_endpoint(self, request):\n return self.location_cache.resolve_service_endpoint(request)\n\n def mark_endpoint_unavailable_for_read(self, endpoint):\n self.location_cache.mark_endpoint_unavailable_for_read(endpoint)\n\n def mark_endpoint_unavailable_for_write(self, endpoint):\n self.location_cache.mark_endpoint_unavailable_for_write(endpoint)\n\n def get_ordered_write_endpoints(self):\n return self.location_cache.get_ordered_write_endpoints()\n\n def get_ordered_read_endpoints(self):\n return self.location_cache.get_ordered_read_endpoints()\n\n def can_use_multiple_write_locations(self, request):\n return self.location_cache.can_use_multiple_write_locations_for_request(request)\n\n def force_refresh(self, database_account):\n self.refresh_needed = True\n self.refresh_endpoint_list(database_account)\n\n def refresh_endpoint_list(self, database_account, **kwargs):\n with self.refresh_lock:\n # if refresh is not needed or refresh is already taking place, return\n if not self.refresh_needed:\n return\n try:\n self._refresh_endpoint_list_private(database_account, **kwargs)\n except Exception as e:\n raise e\n\n def _refresh_endpoint_list_private(self, database_account=None, **kwargs):\n if database_account:\n self.location_cache.perform_on_database_account_read(database_account)\n self.refresh_needed = False\n\n if (\n self.location_cache.should_refresh_endpoints()\n and self.location_cache.current_time_millis() - self.last_refresh_time > self.refresh_time_interval_in_ms\n ):\n if not database_account:\n database_account = self._GetDatabaseAccount(**kwargs)\n self.location_cache.perform_on_database_account_read(database_account)\n self.last_refresh_time = self.location_cache.current_time_millis()\n self.refresh_needed = False\n\n def _GetDatabaseAccount(self, **kwargs):\n \"\"\"Gets the database account.\n\n First tries by using the default endpoint, and if that doesn't work,\n use the endpoints for the preferred locations in the order they are\n specified, to get the database account.\n \"\"\"\n try:\n database_account = self._GetDatabaseAccountStub(self.DefaultEndpoint, **kwargs)\n return database_account\n # If for any reason(non-globaldb related), we are not able to get the database\n # account from the above call to GetDatabaseAccount, we would try to get this\n # information from any of the preferred locations that the user might have\n # specified (by creating a locational endpoint) and keeping eating the exception\n # until we get the database account and return None at the end, if we are not able\n # to get that info from any endpoints\n except exceptions.CosmosHttpResponseError:\n for location_name in self.PreferredLocations:\n locational_endpoint = _GlobalEndpointManager.GetLocationalEndpoint(self.DefaultEndpoint, location_name)\n try:\n database_account = self._GetDatabaseAccountStub(locational_endpoint, **kwargs)\n return database_account\n except exceptions.CosmosHttpResponseError:\n pass\n raise\n\n def _GetDatabaseAccountStub(self, endpoint, **kwargs):\n \"\"\"Stub for getting database account from the client.\n\n This can be used for mocking purposes as well.\n \"\"\"\n return self.Client.GetDatabaseAccount(endpoint, **kwargs)\n\n @staticmethod\n def GetLocationalEndpoint(default_endpoint, location_name):\n # For default_endpoint like 'https://contoso.documents.azure.com:443/' parse it to\n # generate URL format. This default_endpoint should be global endpoint(and cannot\n # be a locational endpoint) and we agreed to document that\n endpoint_url = urlparse(default_endpoint)\n\n # hostname attribute in endpoint_url will return 'contoso.documents.azure.com'\n if endpoint_url.hostname is not None:\n hostname_parts = str(endpoint_url.hostname).lower().split(\".\")\n if hostname_parts is not None:\n # global_database_account_name will return 'contoso'\n global_database_account_name = hostname_parts[0]\n\n # Prepare the locational_database_account_name as contoso-EastUS for location_name 'East US'\n locational_database_account_name = global_database_account_name + \"-\" + location_name.replace(\" \", \"\")\n\n # Replace 'contoso' with 'contoso-EastUS' and return locational_endpoint\n # as https://contoso-EastUS.documents.azure.com:443/\n locational_endpoint = default_endpoint.lower().replace(\n global_database_account_name, locational_database_account_name, 1\n )\n return locational_endpoint\n\n return None\n","sub_path":"sdk/cosmos/azure-cosmos/azure/cosmos/_global_endpoint_manager.py","file_name":"_global_endpoint_manager.py","file_ext":"py","file_size_in_byte":7801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"580872648","text":"\"\"\"\nModule for guiding Arc/Sky line tracing\n\n.. include common links, assuming primary doc root is up one directory\n.. include:: ../include/links.rst\n\n\"\"\"\nimport os\nimport copy\nimport inspect\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom astropy import stats, visualization\n\nfrom pypeit import msgs, datamodel\nfrom pypeit.display import display\nfrom pypeit.core import arc\nfrom pypeit.core import tracewave\n\nfrom IPython import embed\n\n\nclass WaveTilts(datamodel.DataContainer):\n \"\"\"\n Simple DataContainer for the output from BuildWaveTilts\n\n All of the items in the datamodel are required for instantiation,\n although they can be None (but shouldn't be)\n\n \"\"\"\n version = '1.1.0'\n\n # MasterFrame fun\n master_type = 'Tilts'\n master_file_format = 'fits'\n\n datamodel = {'coeffs': dict(otype=np.ndarray, atype=np.floating,\n descr='2D coefficents for the fit on the initial slits. One '\n 'set per slit/order (3D array).'),\n 'bpmtilts': dict(otype=np.ndarray, atype=np.integer,\n descr='Bad pixel mask for tilt solutions. Keys are taken from '\n 'SlitTraceSetBitmask'),\n 'nslit': dict(otype=int,\n descr='Total number of slits. This can include masked slits'),\n 'spat_id': dict(otype=np.ndarray, atype=np.integer, descr='Slit spat_id '),\n 'spat_order': dict(otype=np.ndarray, atype=np.integer,\n descr='Order for spatial fit (nslit)'),\n 'spec_order': dict(otype=np.ndarray, atype=np.integer,\n descr='Order for spectral fit (nslit)'),\n 'func2d': dict(otype=str, descr='Function used for the 2D fit'),\n 'PYP_SPEC': dict(otype=str, descr='PypeIt spectrograph name'),\n 'spat_flexure': dict(otype=float, descr='Flexure shift from the input TiltImage')}\n\n def __init__(self, coeffs, nslit, spat_id, spat_order, spec_order, func2d, bpmtilts=None,\n spat_flexure=None, PYP_SPEC=None):\n\n # Parse\n args, _, _, values = inspect.getargvalues(inspect.currentframe())\n d = dict([(k,values[k]) for k in args[1:]])\n # Setup the DataContainer\n datamodel.DataContainer.__init__(self, d=d)\n\n def _init_internals(self):\n # Master stuff\n self.master_key = None\n self.master_dir = None\n\n def _bundle(self):\n \"\"\"\n Bundle the data in preparation for writing to a fits file.\n\n See :func:`pypeit.datamodel.DataContainer._bundle`. Data is\n always written to a 'TILTS' extension.\n \"\"\"\n return super(WaveTilts, self)._bundle(ext='TILTS')\n\n def is_synced(self, slits):\n \"\"\"\n Confirm the slits in WaveTilts are aligned to that in SlitTraceSet\n\n Barfs if not\n\n Args:\n slits (:class:`pypeit.slittrace.SlitTraceSet`):\n\n \"\"\"\n if not np.array_equal(self.spat_id, slits.spat_id):\n msgs.error(\"Your tilt solutions are out of sync with your slits. Remove Masters and start from scratch\")\n\n def fit2tiltimg(self, slitmask, flexure=None):\n \"\"\"\n Generate a tilt image from the fit parameters\n\n Mainly to allow for flexure\n\n Args:\n slitmask (`numpy.ndarray`_):\n flexure (float, optional):\n Spatial shift of the tilt image onto the desired frame\n (typically a science image)\n\n Returns:\n `numpy.ndarray`_: New tilt image\n\n \"\"\"\n _flexure = 0. if flexure is None else flexure\n\n final_tilts = np.zeros_like(slitmask).astype(float)\n gdslit_spat = np.unique(slitmask[slitmask >= 0]).astype(int)\n # Loop\n for slit_spat in gdslit_spat:\n slit_idx = self.spatid_to_zero(slit_spat)\n # Calculate\n coeff_out = self.coeffs[:self.spec_order[slit_idx]+1,:self.spat_order[slit_idx]+1,slit_idx]\n _tilts = tracewave.fit2tilts(final_tilts.shape, coeff_out, self.func2d, spat_shift=-1*_flexure)\n # Fill\n thismask_science = slitmask == slit_spat\n final_tilts[thismask_science] = _tilts[thismask_science]\n # Return\n return final_tilts\n\n def spatid_to_zero(self, spat_id):\n \"\"\"\n Convert slit spat_id to zero-based\n Mainly for coeffs\n\n Args:\n spat_id (int):\n\n Returns:\n int:\n\n \"\"\"\n mtch = self.spat_id == spat_id\n return np.where(mtch)[0][0]\n\n\nclass BuildWaveTilts:\n \"\"\"\n Class to guide slit/order tracing\n\n Args:\n mstilt (:class:`pypeit.images.buildimage.TiltImage`): Tilt image\n slits (:class:`pypeit.slittrace.SlitTraceSet`):\n Slit edges\n spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`):\n Spectrograph object\n par (:class:`pypeit.par.pypeitpar.WaveTiltsPar` or None):\n The parameters used to fuss with the tilts\n wavepar (:class:`pypeit.par.pypeitpar.WaveSolutionPar` or None):\n The parameters used for the wavelength solution\n det (int): Detector index\n qa_path (:obj:`str`, optional):\n Directory for QA output.\n master_key (:obj:`str`, optional): For naming QA only\n spat_flexure (float, optional):\n If input, the slitmask and slit edges are shifted prior\n to tilt analysis.\n\n\n Attributes:\n spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`):\n tilts_dict (dict):\n Holds the tilts data\n steps : list\n mask : ndarray, bool\n True = Ignore this slit\n all_trcdict : list of dict\n All trace dict's\n tilts : ndarray\n Tilts for a single slit/order\n all_ttilts : list of tuples\n Tuple of tilts ndarray's\n final_tilts : ndarray\n Final tilts image\n gpm (`numpy.ndarray`_):\n Good pixel mask\n Eventually, we might attach this to self.mstilt although that would then\n require that we write it to disk with self.mstilt.image\n \"\"\"\n\n # TODO This needs to be modified to take an inmask\n def __init__(self, mstilt, slits, spectrograph, par, wavepar, det=1, qa_path=None,\n master_key=None, spat_flexure=None):\n\n # TODO: Perform type checking\n self.spectrograph = spectrograph\n self.par = par\n self.wavepar = wavepar\n\n self.mstilt = mstilt\n self.slits = slits\n self.det = det\n self.qa_path = qa_path\n self.master_key = master_key\n self.spat_flexure = spat_flexure\n\n # --------------------------------------------------------------\n # TODO: Build another base class that does these things for both\n # WaveTilts and WaveCalib?\n\n # Get the non-linear count level\n self.nonlinear_counts = 1e10 if self.spectrograph is None \\\n else self.spectrograph.nonlinear_counts(self.mstilt.detector)\n\n # Set the slitmask and slit boundary related attributes that the\n # code needs for execution. This also deals with arcimages that\n # have a different binning then the trace images used to defined\n # the slits\n\n # TODO -- Tidy this up into one or two methods?\n # Load up all slits\n # TODO -- Discuss further with JFH\n all_left, all_right, mask = self.slits.select_edges(initial=True, flexure=self.spat_flexure) # Grabs all, initial slits\n self.tilt_bpm = np.invert(mask == 0)\n self.tilt_bpm_init = self.tilt_bpm.copy()\n # Slitmask\n # TODO -- Discuss further with JFH\n self.slitmask_science = self.slits.slit_img(initial=True, flexure=self.spat_flexure) # All unmasked slits\n # Resize\n gpm = (self.mstilt.bpm == 0) if self.mstilt.bpm is not None \\\n else np.ones_like(self.slitmask_science, dtype=bool)\n self.shape_science = self.slitmask_science.shape\n self.shape_tilt = self.mstilt.image.shape\n self.slitcen = arc.resize_slits2arc(self.shape_tilt, self.shape_science, (all_left+all_right)/2)\n self.slitmask = arc.resize_mask2arc(self.shape_tilt, self.slitmask_science)\n self.gpm = (arc.resize_mask2arc(self.shape_tilt, gpm)) & (self.mstilt.image < self.nonlinear_counts)\n # --------------------------------------------------------------\n\n # Key Internals\n self.mask = None\n self.all_trace_dict = [None]*self.slits.nslits\n self.tilts = None\n # 2D fits are stored as a dictionary rather than list because we will jsonify the dict\n self.all_fit_dict = [None]*self.slits.nslits\n self.steps = []\n # Main outputs\n self.final_tilts = None\n self.fit_dict = None\n self.trace_dict = None\n\n def extract_arcs(self):\n \"\"\"\n Extract the arcs down each slit/order\n\n Wrapper to arc.get_censpec()\n\n Returns:\n :obj:`tuple`: Extracted arcs in two `numpy.ndarray`_ objects\n \"\"\"\n arccen, arccen_bpm, arc_maskslit = arc.get_censpec(self.slitcen, self.slitmask,\n self.mstilt.image, gpm=self.gpm,\n slit_bpm=self.tilt_bpm)\n #, nonlinear_counts=self.nonlinear_counts)\n # Step\n self.steps.append(inspect.stack()[0][3])\n\n # Update the mask\n self.tilt_bpm |= arc_maskslit\n\n return arccen, arccen_bpm\n\n def find_lines(self, arcspec, slit_cen, slit_idx, bpm=None, debug=False):\n \"\"\"\n Find the lines for tracing\n\n Wrapper to tracewave.tilts_find_lines()\n\n Args:\n arcspec:\n slit_cen:\n slit_idx (int):\n Slit index, zero-based\n bpm (`numpy.ndarray`_, optional):\n debug (bool, optional):\n\n Returns:\n tuple: 2 objectcs\n - `numpy.ndarray`_ or None: Spectral positions of lines to trace\n - `numpy.ndarray`_ or None: Spatial positions of lines to trace\n\n \"\"\"\n # TODO: Implement this!\n only_these_lines = None\n if self.par['idsonly']:\n # Put in some hook here for getting the lines out of the\n # wave calib for i.e. LRIS ghosts.\n raise NotImplementedError('Select lines with IDs for tracing not yet implemented.')\n\n # TODO -- This should be order not slit!\n tracethresh = self._parse_param(self.par, 'tracethresh', slit_idx)\n lines_spec, lines_spat, good \\\n = tracewave.tilts_find_lines(arcspec, slit_cen, tracethresh=tracethresh,\n sig_neigh=self.par['sig_neigh'],\n nfwhm_neigh=self.par['nfwhm_neigh'],\n only_these_lines=only_these_lines,\n fwhm=self.wavepar['fwhm'],\n nonlinear_counts=self.nonlinear_counts,\n bpm=bpm, debug_peaks=False, debug_lines=debug)\n\n if debug:\n mean, median, stddev = stats.sigma_clipped_stats(self.mstilt.image, sigma=3.)\n# vmin, vmax = visualization.ZScaleInterval().get_limits(self.mstilt.image)\n vmin = median - 2*stddev\n vmax = median + 2*stddev\n plt.imshow(self.mstilt.image, origin='lower', interpolation='nearest', aspect='auto',\n vmin=vmin, vmax=vmax)\n plt.scatter(lines_spat[good], lines_spec[good], marker='x', color='k', lw=2, s=50)\n plt.scatter(lines_spat[np.invert(good)], lines_spec[np.invert(good)], marker='x', color='C3', lw=2, s=50)\n plt.show()\n\n self.steps.append(inspect.stack()[0][3])\n return (None, None) if lines_spec is None else (lines_spec[good], lines_spat[good])\n\n\n\n def fit_tilts(self, trc_tilt_dict, thismask, slit_cen, spat_order, spec_order, slit_idx,\n show_QA=False, doqa=True):\n \"\"\"\n Fit the tilts\n\n all_fit_dict and all_trace_dict are filled in place\n\n Args:\n trc_tilt_dict (dict): Contains information from tilt tracing\n slit_cen (ndarray): (nspec,) Central trace for this slit\n spat_order (int): Order of the 2d polynomial fit for the spatial direction\n spec_order (int): Order of the 2d polytnomial fit for the spectral direction\n slit_idx (int): zero-based, integer index for the slit in question\n\n Optional Args:\n show_QA: bool, default = False\n show the QA instead of writing it out to the outfile\n doqa: bool, default = True\n Construct the QA plot\n\n Returns:\n `numpy.ndarray`_: coeff: ndarray (spat_order + 1, spec_order+1)\n Array containing the coefficients for the 2d legendre polynomial fit\n \"\"\"\n # Index\n self.all_fit_dict[slit_idx], self.all_trace_dict[slit_idx] \\\n = tracewave.fit_tilts(trc_tilt_dict, thismask, slit_cen, spat_order=spat_order,\n spec_order=spec_order,maxdev=self.par['maxdev2d'],\n sigrej=self.par['sigrej2d'], func2d=self.par['func2d'],\n doqa=doqa, master_key=self.master_key,\n slitord_id=self.slits.slitord_id[slit_idx],\n minmax_extrap=self.par['minmax_extrap'],\n show_QA=show_QA, out_dir=self.qa_path)\n\n self.steps.append(inspect.stack()[0][3])\n return self.all_fit_dict[slit_idx]['coeff2']\n\n def trace_tilts(self, arcimg, lines_spec, lines_spat, thismask, slit_cen,\n debug_pca=False, show_tracefits=False):\n \"\"\"\n Trace the tilts\n\n Args:\n\n arcimg (`numpy.ndarray`_):\n Arc image. Shape is (nspec, nspat).\n lines_spec (`numpy.ndarray`_):\n Array containing the spectral pixel location of each\n line found for this slit. Shape is (nlines,).\n lines_spat (`numpy.ndarray`_):\n Array containing the spatial pixel location of each line,\n which is the slitcen evaluate at the spectral position\n position of the line stored in lines_spec. Shape is\n (nlines,).\n thismask (`numpy.ndarray`_):\n Image indicating which pixels lie on the slit in\n equation. True = on the slit. False = not on slit. Shape\n is (nspec, nspat) with dtype=bool.\n slit_cen (:obj:`int`):\n Integer index indicating the slit in question.\n\n Returns:\n dict: Dictionary containing information on the traced tilts required to fit the filts.\n\n \"\"\"\n trace_dict = tracewave.trace_tilts(arcimg, lines_spec, lines_spat, thismask, slit_cen,\n inmask=self.gpm, fwhm=self.wavepar['fwhm'],\n spat_order=self.par['spat_order'],\n maxdev_tracefit=self.par['maxdev_tracefit'],\n sigrej_trace=self.par['sigrej_trace'],\n debug_pca=debug_pca, show_tracefits=show_tracefits)\n\n # Return\n self.steps.append(inspect.stack()[0][3])\n return trace_dict\n\n def model_arc_continuum(self, debug=False):\n \"\"\"\n Model the continuum of the arc image.\n\n The method uses the arc spectra extracted using\n :attr:`extract_arcs` and fits a characteristic low-order\n continuum for each slit/order using\n :func:`pypeit.util.robust_polyfit_djs` and the parameters\n `cont_function`, `cont_order`, and `cont_rej` from\n :attr:`par`. The characteristic continuum is then rescaled to\n match the continuum at each spatial position in the\n slit/order.\n \n .. note::\n The approach used here may be too simplistic (in the\n robustness of the continuum fit and then how the\n continuum is rescaled and projected for each slit/order).\n Tests should be performed to make sure that the approach\n is good enough to trace the centroid of the arc/sky lines\n without biasing the centroids due to a non-zero continuum\n level.\n\n Args:\n debug (:obj:`bool`, optional):\n Run the method in debug mode.\n\n Returns:\n numpy.ndarray: Returns a 2D image with the same shape as\n :attr:`mstilt` with the model continuum.\n \"\"\"\n # TODO: Should make this operation part of WaveTiltsPar ...\n # Parse the upper and lower sigma rejection thresholds; used\n # when rescaling continuum from center spectrum.\n lower_rej, upper_rej = self.par['cont_rej'] if hasattr(self.par['cont_rej'], '__len__') \\\n else np.repeat(self.par['cont_rej'], 2)\n\n # Fit the continuum of the extracted arc spectra for each slit\n nspec, nslits = self.arccen.shape\n spec = np.arange(nspec, dtype=float)\n arc_continuum = np.zeros(self.arccen.shape, dtype=float)\n arc_fitmask = np.zeros(self.arccen.shape, dtype=bool)\n for i in range(nslits):\n if self.tilt_bpm[i]:\n continue\n # TODO: What to do with the following iter_continuum parameters?:\n # sigthresh, sigrej, niter_cont, cont_samp, cont_frac_fwhm\n arc_continuum[:,i], arc_fitmask[:,i] \\\n = arc.iter_continuum(self.arccen[:,i], inmask=np.invert(self.arccen_bpm[:,i]),\n fwhm=self.wavepar['fwhm'])\n # TODO: Original version. Please leave it for now.\n# arc_fitmask[:,i], coeff \\\n# = utils.robust_polyfit_djs(spec, self.arccen[:,i], self.par['cont_order'],\n# function=self.par['cont_function'],\n# minx=spec[0], maxx=spec[-1],\n# upper=upper_rej, lower=lower_rej, use_mad=True,\n# sticky=True)\n# arc_continuum[:,i] = utils.func_val(coeff, spec, self.par['cont_function'],\n# minx=spec[0], maxx=spec[-1])\n\n if debug:\n plt.plot(spec, self.arccen[:,i], color='k', label='Arc')\n plt.scatter(spec, np.ma.MaskedArray(self.arccen[:,i], mask=arc_fitmask[:,i]),\n marker='x', s=10, color='C1', label='Ignored')\n plt.plot(arc_continuum[:,i], color='C3', label='Cont. Fit')\n plt.xlabel('Spectral pixel')\n plt.ylabel('Counts')\n plt.legend()\n plt.show()\n\n # For each slit, rescale the continuum to the spectrum at a\n # given spatial position along the slit/order. This\n # implementation may be too simplistic in how it treats the\n # spatial axis.\n nspat = self.slitmask.shape[1]\n cont_image = np.zeros(self.mstilt.image.shape, dtype=float)\n # TODO: Can probably do this without the for loop but this\n # still may be faster.\n for i in range(nslits):\n # Masked?\n if self.tilt_bpm[i]:\n continue\n # Find the pixels in this slit\n indx = self.slitmask == self.slits.spat_id[i]\n\n # Set a single width for the slit to simplify the\n # calculation\n width = np.sum(indx, axis=1)\n width = int(np.amax(width[np.invert(self.arccen_bpm[:,i])]))\n\n # Get the spatial indices for spectral pixels in the\n # spatial dimension that follow the curvature of the slit\n # center.\n # TODO: May need to be more sophisticated.\n _spat = (self.slitcen[:,i,None] + np.arange(width)[None,:] - width//2).astype(int)\n\n # Set the index of masked pixels or those off the detector\n # to -1 so that they don't cause the image indexing to\n # fault and can be selected for masking\n _spat[(_spat < 0) | (_spat >= nspat) | self.arccen_bpm[:,i,None]] = -1\n\n # Pull out the slit pixels into a square array and mask\n # pixels off of the slit\n aligned_spec = np.tile(np.arange(nspec), (width,1)).T\n aligned_flux = np.ma.MaskedArray(self.mstilt.image[aligned_spec, _spat],\n mask=_spat==-1)\n\n # Use a sigma-clipped median to determine the scaling of\n # the continuum fit to the central extracted spectrum to\n # match the spectrum at each spatial position.\n # TODO: Instead of determining the scale factor directly,\n # use the slit-illumination profile?\n cont_renorm = stats.sigma_clipped_stats(aligned_flux/arc_continuum[:,i,None],\n sigma_lower=lower_rej, sigma_upper=upper_rej,\n axis=0)[1]\n\n # Fill the image with the continuum for this slit\n indx = np.invert(aligned_flux.mask)\n cont_image[aligned_spec[indx], _spat[indx]] \\\n = (arc_continuum[:,i,None] * cont_renorm[None,:])[indx]\n\n # Remove continuum measurements that are off any slits (because\n # of the fixed width assumption)\n cont_image[self.slitmask == -1] = 0.\n return cont_image\n\n def run(self, doqa=True, debug=False, show=False):\n \"\"\"\n Main driver for tracing arc lines\n\n Code flow:\n\n #. Extract an arc spectrum down the center of each slit/order\n #. Loop on slits/orders\n #. Trace and fit the arc lines (This is done twice, once\n with trace_crude as the tracing crutch, then again\n with a PCA model fit as the crutch).\n #. Repeat trace.\n #. 2D Fit to the offset from slitcen\n #. Save\n\n Args:\n doqa (bool):\n debug (bool):\n show (bool):\n\n Returns:\n :class:`WaveTilts`:\n\n \"\"\"\n # Extract the arc spectra for all slits\n self.arccen, self.arccen_bpm = self.extract_arcs()\n\n # TODO: Leave for now. Used for debugging\n# self.par['rm_continuum'] = True\n# debug = True\n# show = True\n\n # Subtract arc continuum\n _mstilt = self.mstilt.image.copy()\n if self.par['rm_continuum']:\n continuum = self.model_arc_continuum(debug=debug)\n _mstilt -= continuum\n if debug:\n # TODO: Put this into a function\n vmin, vmax = visualization.ZScaleInterval().get_limits(_mstilt)\n w,h = plt.figaspect(1)\n fig = plt.figure(figsize=(3*w,h))\n ax = fig.add_axes([0.15/3, 0.1, 0.8/3, 0.8])\n ax.imshow(self.mstilt.image, origin='lower', interpolation='nearest',\n aspect='auto', vmin=vmin, vmax=vmax)\n ax.set_title('MasterArc')\n ax = fig.add_axes([1.15/3, 0.1, 0.8/3, 0.8])\n ax.imshow(continuum, origin='lower', interpolation='nearest',\n aspect='auto', vmin=vmin, vmax=vmax)\n ax.set_title('Continuum')\n ax = fig.add_axes([2.15/3, 0.1, 0.8/3, 0.8])\n ax.imshow(_mstilt, origin='lower', interpolation='nearest',\n aspect='auto', vmin=vmin, vmax=vmax)\n ax.set_title('MasterArc - Continuum')\n plt.show()\n\n # Final tilts image\n self.final_tilts = np.zeros(self.shape_science,dtype=float)\n max_spat_dim = (np.asarray(self.par['spat_order']) + 1).max()\n max_spec_dim = (np.asarray(self.par['spec_order']) + 1).max()\n self.coeffs = np.zeros((max_spec_dim, max_spat_dim,self.slits.nslits))\n self.spat_order = np.zeros(self.slits.nslits, dtype=int)\n self.spec_order = np.zeros(self.slits.nslits, dtype=int)\n\n # TODO sort out show methods for debugging\n if show:\n viewer,ch = display.show_image(self.mstilt.image*(self.slitmask > -1),chname='tilts')\n\n # Loop on all slits\n for slit_idx, slit_spat in enumerate(self.slits.spat_id):\n if self.tilt_bpm[slit_idx]:\n continue\n #msgs.info('Computing tilts for slit {0}/{1}'.format(slit, self.slits.nslits-1))\n msgs.info('Computing tilts for slit {0}/{1}'.format(slit_idx, self.slits.nslits))\n # Identify lines for tracing tilts\n msgs.info('Finding lines for tilt analysis')\n self.lines_spec, self.lines_spat \\\n = self.find_lines(self.arccen[:,slit_idx], self.slitcen[:,slit_idx],\n slit_idx,\n bpm=self.arccen_bpm[:,slit_idx], debug=debug)\n\n if self.lines_spec is None:\n self.slits.mask[slit_idx] = self.slits.bitmask.turn_on(self.slits.mask[slit_idx], 'BADTILTCALIB')\n continue\n\n thismask = self.slitmask == slit_spat\n\n # Performs the initial tracing of the line centroids as a\n # function of spatial position resulting in 1D traces for\n # each line.\n msgs.info('Trace the tilts')\n self.trace_dict = self.trace_tilts(_mstilt, self.lines_spec, self.lines_spat,\n thismask, self.slitcen[:, slit_idx])\n\n # TODO: Show the traces before running the 2D fit\n\n if show:\n display.show_tilts(viewer, ch, self.trace_dict)\n\n self.spat_order[slit_idx] = self._parse_param(self.par, 'spat_order', slit_idx)\n self.spec_order[slit_idx] = self._parse_param(self.par, 'spec_order', slit_idx)\n # 2D model of the tilts, includes construction of QA\n # NOTE: This also fills in self.all_fit_dict and self.all_trace_dict\n coeff_out = self.fit_tilts(self.trace_dict, thismask, self.slitcen[:,slit_idx],\n self.spat_order[slit_idx], self.spec_order[slit_idx],\n slit_idx,\n doqa=doqa, show_QA=show)\n self.coeffs[:self.spec_order[slit_idx]+1,:self.spat_order[slit_idx]+1,slit_idx] = coeff_out\n\n # TODO: Need a way to assess the success of fit_tilts and\n # flag the slit if it fails\n\n # Tilts are created with the size of the original slitmask,\n # which corresonds to the same binning as the science\n # images, trace images, and pixelflats etc.\n self.tilts = tracewave.fit2tilts(self.slitmask_science.shape, coeff_out,\n self.par['func2d'])\n # Save to final image\n thismask_science = self.slitmask_science == slit_spat\n self.final_tilts[thismask_science] = self.tilts[thismask_science]\n\n if debug:\n # TODO: Add this to the show method?\n vmin, vmax = visualization.ZScaleInterval().get_limits(_mstilt)\n plt.imshow(_mstilt, origin='lower', interpolation='nearest', aspect='auto',\n vmin=vmin, vmax=vmax)\n for slit_idx, slit_spat in enumerate(self.slits.spat_id):\n spat = self.all_trace_dict[slit_idx]['tilts_spat']\n spec = self.all_trace_dict[slit_idx]['tilts']\n spec_fit = self.all_trace_dict[slit_idx]['tilts_fit']\n in_fit = self.all_trace_dict[slit_idx]['tot_mask']\n not_fit = np.invert(in_fit) & (spec > 0)\n fit_rej = in_fit & np.invert(self.all_trace_dict[slit_idx]['fit_mask'])\n fit_keep = in_fit & self.all_trace_dict[slit_idx]['fit_mask']\n plt.scatter(spat[not_fit], spec[not_fit], color='C1', marker='.', s=30, lw=0)\n plt.scatter(spat[fit_rej], spec[fit_rej], color='C3', marker='.', s=30, lw=0)\n plt.scatter(spat[fit_keep], spec[fit_keep], color='k', marker='.', s=30, lw=0)\n with_fit = np.invert(np.all(np.invert(fit_keep), axis=0))\n for t in range(in_fit.shape[1]):\n if not with_fit[t]:\n continue\n l, r = np.nonzero(in_fit[:,t])[0][[0,-1]]\n plt.plot(spat[l:r+1,t], spec_fit[l:r+1,t], color='k')\n plt.show()\n\n # Record the Mask\n bpmtilts = np.zeros_like(self.slits.mask, dtype=self.slits.bitmask.minimum_dtype())\n for flag in ['BADTILTCALIB']:\n bpm = self.slits.bitmask.flagged(self.slits.mask, flag)\n if np.any(bpm):\n bpmtilts[bpm] = self.slits.bitmask.turn_on(bpmtilts[bpm], flag)\n\n # Build and return DataContainer\n tilts_dict = {'coeffs':self.coeffs,\n 'func2d':self.par['func2d'], 'nslit':self.slits.nslits,\n 'spat_order':self.spat_order, 'spec_order':self.spec_order,\n 'spat_id':self.slits.spat_id, 'bpmtilts': bpmtilts,\n 'spat_flexure': self.spat_flexure, 'PYP_SPEC': self.spectrograph.name}\n return WaveTilts(**tilts_dict)\n\n def _parse_param(self, par, key, slit):\n \"\"\"\n Grab a parameter for a given slit\n\n Args:\n par (ParSet):\n key (str):\n slit (int):\n\n Returns:\n object: Value of the parameter\n\n \"\"\"\n param_in = par[key]\n if isinstance(param_in, (float, int)):\n param = param_in\n elif isinstance(param_in, (list, np.ndarray)):\n param = param_in[slit]\n else:\n raise ValueError('Invalid input for parameter {:s}'.format(key))\n return param\n\n def __repr__(self):\n # Generate sets string\n txt = '<{:s}: '.format(self.__class__.__name__)\n if len(self.steps) > 0:\n txt+= ' steps: ['\n for step in self.steps:\n txt += '{:s}, '.format(step)\n txt = txt[:-2]+']' # Trim the trailing comma\n txt += '>'\n return txt\n\n","sub_path":"pypeit/wavetilts.py","file_name":"wavetilts.py","file_ext":"py","file_size_in_byte":30904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"573076084","text":"\n\n#calss header\nclass _GENUINE():\n\tdef __init__(self,): \n\t\tself.name = \"GENUINE\"\n\t\tself.definitions = [u'If something is genuine, it is real and exactly what it appears to be: ', u'If people or emotions are genuine, they are honest and sincere: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_genuine.py","file_name":"_genuine.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"354895366","text":"from rest.abstract.signal_handler import SignalHandler\n\n\n#\n# All connection managers such as TopicManager, PipeManager etc. are handlers\n# they subscribe to needed signals and handle that signals.\n#\nclass ConnectionMediator(object):\n\n def __init__(self):\n self._signal_handlers = {}\n\n def add_handler(self, sig, handler):\n if not issubclass(type(handler), SignalHandler):\n raise TypeError(\"Handler object must be inherited from \" + SignalHandler.__name__)\n\n handlers = self._signal_handlers.get(sig)\n\n if not handlers:\n handlers = []\n self._signal_handlers[sig] = handlers\n\n handlers.append(handler)\n\n def signal(self, sig, msg):\n handlers = self._signal_handlers.get(sig)\n\n if handlers:\n for handler in handlers:\n handler.handle(sig, msg)\n\n\nclass Signals(object):\n SIG_EVENT = 0,\n SIG_CLIENT_DELETED = 1\n SIG_CLIENT_CREATED = 2\n SIG_DISCONNECTED = 3\n SIG_PARAMETER_SET = 4","sub_path":"rest/connections/connection_mediator.py","file_name":"connection_mediator.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"122057175","text":"h, w = map(int, input().split())\n\nwall = list(map(int, input().split()))\n\nstack = []\n\np = 0\nwater = 0\n\nfor p in range(w):\n while stack and wall[stack[-1]] < wall[p]:\n prev = stack.pop()\n\n if not stack: break\n\n height = min(wall[p], wall[stack[-1]]) - wall[prev]\n distance = p - stack[-1] - 1\n\n water += distance * height\n \n stack.append(p)\n\nprint(water)","sub_path":"backjoon/14719.py","file_name":"14719.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"151017614","text":"import os\nimport sys\nimport glog as log\nsys.path.insert(0, \"../\")\nimport config\nimport boto3\nfrom gpiozero import LED\nled = LED(17)\n\nCLIENT_WAIT_TIME=20\n\n# Create SQS client\nclient = boto3.client('sqs')\n\nwhile(True):\n response = client.receive_message(\n QueueUrl=config.sqs_url,\n AttributeNames=[\n 'SentTimestamp'\n ],\n MaxNumberOfMessages=1,\n MessageAttributeNames=[\n 'All'\n ],\n WaitTimeSeconds=CLIENT_WAIT_TIME\n )\n \n if 'Messages' in response:\n message = response['Messages'][0]\n body = message['Body']\n receipt_handle = message['ReceiptHandle']\n\n print(body, end=\"\\n\\n\")\n led.on()\n client.delete_message(\n QueueUrl=config.sqs_url,\n ReceiptHandle=receipt_handle\n )\n else:\n #print(response, end=\"\\n\\n\")\n pass\n","sub_path":"client/client_lights.py","file_name":"client_lights.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"284219265","text":"import sys\nimport scipy.io\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.lines as mlines\nfrom mpl_toolkits.mplot3d import Axes3D \n\nfrom sklearn.datasets import load_digits\nfrom sklearn.manifold import SpectralEmbedding\n\nfrom matplotlib import rc\nrc('text', usetex=True)\nrc('font', family='serif', size=26)\n\n\nmat = scipy.io.loadmat('../data/raw/hc_13/T1rawpos.mat')\ndays = mat['rawpos']\n\nday = 21\nepoch = 1\ncellNo = \"T26_0\"\nstart = 790\nend = 1595\n\nd = days[0][day][0][epoch][0][0]['data']\nspikes = d[:, 0]\nxs = d[:, 1]\nys = d[:, 2]\n\nfct = 1\nrng = int((end-start)/fct)\nresX = np.zeros(rng)\nresY = np.zeros(rng)\n\nfor i in range(0, spikes.size):\n resX[int((int(spikes[i])-start)/fct)] = xs[i]\n resY[int((int(spikes[i])-start)/fct)] = ys[i]\n\ncMap = []\n\nfor i in range(0,rng):\n if not (resX[i]>240 or resX[i]<115 or resY[i]>170 or resY[i]<35):\n if (resX[i]<177 and resY[i]<102.5):\n cMap.append('r')\n elif (resX[i]<177 and resY[i]>102.5):\n cMap.append('b')\n elif (resX[i]>177 and resY[i]<102.5):\n cMap.append('g')\n elif (resX[i]>177 and resY[i]>102.5):\n cMap.append('k')\n else:\n cMap.append('k')\n\nprint(len(cMap))\n\n#X = np.genfromtxt(\"../data/processed/hc_13/T22_4.csv\", delimiter=',')[:, (1,2)]\nd1 = np.load(\"../distances/21/1/filtered_1s__distance_matrix_T22_4_1s_20ms.npy\")\nd2 = np.load(\"../distances/21/1/filtered_1s__distance_matrix_T26_0_1s_20ms.npy\")\nd3 = np.load(\"../distances/21/1/filtered_1s__distance_matrix_T27_6_1s_20ms.npy\")\n# d1 = d1/np.amax(d1)\n# d2 = d2/np.amax(d2)\n# d3 = d3/np.amax(d3)\n# d4 = d4/np.amax(d4)\n# d5 = d5/np.amax(d5)\n# d6 = d6/np.amax(d6)\n# d7 = d7/np.amax(d7)\n# d8 = d8/np.amax(d8)\n# d9 = d9/np.amax(d9)\n# d10 = d10/np.amax(d10)\n# d11 = d11/np.amax(d11)\n# d12 = d12/np.amax(d12)\ndM = np.sqrt(d1**2 + d2**2 + d3**2)\n\nprint(dM.shape)\n\ndef thresholdMatrix(sM, topN):\n n = sM.shape[0]\n m = sM.shape[1]\n result = np.zeros(shape=(n,m))\n\n columnSorted = np.sort(sM,axis=0)\n rowSorted = np.sort(sM,axis=1)\n\n rowTopN = np.zeros(n)\n columnTopN = np.zeros(n)\n for i in range(0, n):\n rowTopN[i] = rowSorted[i, :][n-topN]\n columnTopN[i] = columnSorted[:, i][n-topN]\n\n\n for i in range(0, n):\n for j in range(0, m):\n if (sM[i][j]>=rowTopN[i] or sM[i][j]>=columnTopN[j]):\n result[i][j] = sM[i][j]\n\n return result\n\nembedding = SpectralEmbedding(n_components=2, affinity=\"precomputed\", n_neighbors=0)\ncoords = embedding.fit( thresholdMatrix(1/(dM+0.1), 10) ).embedding_\nprint(coords.shape)\n\nfig = plt.figure(figsize=(9,9))\nax = plt.subplot(111)\n#ax.plot(coords[:, 0], coords[:, 1], 'o', label=\"Target neurons\", c=cMap)\nfor i in range(len(cMap)):\n ax.scatter(coords[i, 0]*100, coords[i, 1]*100, color=cMap[i])\nplt.title('Diffusion Map Dimensions')\nplt.xlabel('dimension1')\nplt.ylabel('dimension2')\nax.yaxis.set_label_coords(-0.08,0.5)\n#ax.legend(loc='upper left', bbox_to_anchor=(0.75, 1.075), shadow=True, ncol=1)\n#plt.savefig('../results/dm/21/1/test_dmlib.svg', format=\"svg\")\nline1 = mlines.Line2D(range(1), range(1), color=\"white\", marker='o',markersize=10, markerfacecolor=\"red\")\nline2 = mlines.Line2D(range(1), range(1), color=\"white\", marker='o',markersize=10,markerfacecolor=\"green\")\nline3 = mlines.Line2D(range(1), range(1), color=\"white\", marker='o',markersize=10, markerfacecolor=\"blue\")\nline4 = mlines.Line2D(range(1), range(1), color=\"white\", marker='o',markersize=10,markerfacecolor=\"gray\")\nplt.legend((line1,line2,line3,line4),('SW','NW', 'NE', 'SE'),numpoints=1,\n bbox_to_anchor=(0.8, 0.22), borderaxespad=0., prop={'size': 17})\n\nplt.savefig('../results/21/1/filtered_dm2d.png')\nplt.savefig('../results/21/1/filtered_dm2d.pdf')\n\nfig = plt.figure(figsize=(9,9))\nax = plt.subplot(111)\nax.plot(coords[:, 0][100:145], 'o-', label=\"Target neurons\")\nplt.title('DM lib Dimensions')\nplt.xlabel('dimension1')\nplt.ylabel('dimension2')\nax.legend(loc='upper left', bbox_to_anchor=(0.75, 1.075), shadow=True, ncol=1)\n#plt.savefig('../results/dm/21/1/test_dmlib_ev1.svg', format=\"svg\")\nplt.savefig('../results/21/1/filtered_dmev1.png')\n\nfig = plt.figure(figsize=(9,9))\nax = plt.subplot(111)\nax.plot(coords[:, 1][100:145], 'o-', label=\"Target neurons\")\nplt.title('DM lib Dimensions')\nplt.xlabel('dimension1')\nplt.ylabel('dimension2')\nax.legend(loc='upper left', bbox_to_anchor=(0.75, 1.075), shadow=True, ncol=1)\n#plt.savefig('../results/dm/21/1/test_dmlib_ev2.svg', format=\"svg\")\nplt.savefig('../results/21/1/filtered_dmev2.png')\n\n\nrng = dM.shape[0]\nfig = plt.figure()\nax = fig.gca(projection='3d')\nz = np.linspace(0, rng, rng)\n\nax.plot(coords[:, 0], coords[:, 1], z, 'o-', label='parametric curve', linewidth=0.6, markersize=1)# c = plt.cm.jet(z/max(z)))\nax.legend()\n\n#plt.show()\nplt.savefig('../results/21/1/filtered_dm3d.png')","sub_path":"core/diffusion_lib_filtered_1s.py","file_name":"diffusion_lib_filtered_1s.py","file_ext":"py","file_size_in_byte":4738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"641162658","text":"import logging\nimport os\n\nimport numpy as np\nimport torch\nfrom tensorboardX import SummaryWriter\n\nfrom unet3d.losses import GeneralizedDiceLoss\nfrom . import utils\n\n\nclass UNet3DTrainer:\n \"\"\"3D UNet trainer.\n\n Args:\n model (Unet3D): UNet 3D model to be trained\n optimizer (nn.optim.Optimizer): optimizer used for training\n loss_criterion (callable): loss function\n accuracy_criterion (callable): used to compute training/validation accuracy (such as Dice or Rand score)\n saving the best checkpoint is based on the result of this function on the validation set\n device (torch.device): device to train on\n loaders (dict): 'train' and 'val' loaders\n checkpoint_dir (string): dir for saving checkpoints and tensorboard logs\n max_num_epochs (int): maximum number of epochs\n max_num_iterations (int): maximum number of iterations\n max_patience (int): number of EPOCHS with no improvement\n after which the training will be stopped\n validate_after_iters (int): validate after that many iterations\n log_after_iters (int): number of iterations before logging to tensorboard\n validate_iters (int): number of validation iterations, if None validate\n on the whole validation set\n best_val_accuracy (float): best validation accuracy so far (higher better)\n num_iterations (int): useful when loading the model from the checkpoint\n num_epoch (int): useful when loading the model from the checkpoint\n \"\"\"\n\n def __init__(self, model, optimizer, loss_criterion, accuracy_criterion,\n device, loaders, checkpoint_dir,\n max_num_epochs=200, max_num_iterations=1e5, max_patience=20,\n validate_after_iters=100, log_after_iters=100,\n validate_iters=None, best_val_accuracy=float('-inf'),\n num_iterations=0, num_epoch=0, logger=None):\n if logger is None:\n self.logger = utils.get_logger('UNet3DTrainer', level=logging.DEBUG)\n else:\n self.logger = logger\n\n self.logger.info(f\"Sending the model to '{device}'\")\n self.model = model.to(device)\n self.logger.debug(model)\n\n self.optimizer = optimizer\n self.loss_criterion = loss_criterion\n self.accuracy_criterion = accuracy_criterion\n self.device = device\n self.loaders = loaders\n self.checkpoint_dir = checkpoint_dir\n self.max_num_epochs = max_num_epochs\n self.max_num_iterations = max_num_iterations\n self.validate_after_iters = validate_after_iters\n self.log_after_iters = log_after_iters\n self.validate_iters = validate_iters\n self.best_val_accuracy = best_val_accuracy\n self.writer = SummaryWriter(\n log_dir=os.path.join(checkpoint_dir, 'logs'))\n\n self.num_iterations = num_iterations\n self.num_epoch = num_epoch\n # used for early stopping\n self.max_patience = max_patience\n self.patience = max_patience\n\n @classmethod\n def from_checkpoint(cls, checkpoint_path, model, optimizer, loss_criterion, accuracy_criterion, loaders,\n logger=None):\n logger.info(f\"Loading checkpoint '{checkpoint_path}'...\")\n state = utils.load_checkpoint(checkpoint_path, model, optimizer)\n logger.info(\n f\"Checkpoint loaded. Epoch: {state['epoch']}. Best val accuracy: {state['best_val_accuracy']}. Num_iterations: {state['num_iterations']}\")\n checkpoint_dir = os.path.split(checkpoint_path)[0]\n return cls(model, optimizer, loss_criterion, accuracy_criterion, torch.device(state['device']), loaders,\n checkpoint_dir,\n best_val_accuracy=state['best_val_accuracy'],\n num_iterations=state['num_iterations'],\n num_epoch=state['epoch'],\n max_num_epochs=state['max_num_epochs'],\n max_num_iterations=state['max_num_iterations'],\n max_patience=state['max_patience'],\n validate_after_iters=state['validate_after_iters'],\n log_after_iters=state['log_after_iters'],\n validate_iters=state['validate_iters'],\n logger=logger)\n\n def fit(self):\n for _ in range(self.num_epoch, self.max_num_epochs):\n # train for one epoch\n best_model_found = self.train(self.loaders['train'])\n\n if best_model_found:\n self.patience = self.max_patience\n else:\n self.patience -= 1\n if self.patience <= 0:\n # early stop the training\n self.logger.info(\n f'Validation accuracy did not improve for the last {self.max_patience} epochs. Early stopping...')\n break\n # adjust learning rate when reaching half of the max_patience\n if self.patience == self.max_patience // 2:\n self._adjust_learning_rate()\n self.patience = self.max_patience\n\n self.num_epoch += 1\n\n def train(self, train_loader):\n \"\"\"Trains the model for 1 epoch.\n\n Args:\n train_loader (torch.utils.data.DataLoader): training data loader\n\n Returns:\n True if the best performing model was found within this epoch,\n False otherwise\n \"\"\"\n train_losses = utils.RunningAverage()\n train_accuracy = utils.RunningAverage()\n\n # sets the model in training mode\n self.model.train()\n\n # initialize the return value\n best_model_found = False\n\n for i, (input, target) in enumerate(train_loader):\n self.logger.info(\n f'Training iteration {self.num_iterations}. Batch {i}. Epoch [{self.num_epoch}/{self.max_num_epochs - 1}]')\n\n input, target = input.to(self.device), target.to(self.device)\n\n # forward pass\n output = self.model(input)\n\n # if the labels in the target are stored in the single channel (e.g. when CrossEntropyLoss is used)\n # put the in the separate channels for accuracy criterion computation and tensorboard logging\n if target.dim() == 4:\n expanded_target = self._expand_target(target, output.size()[1])\n else:\n expanded_target = target\n\n # compute the loss\n if isinstance(self.loss_criterion, GeneralizedDiceLoss):\n loss = self.loss_criterion(output, expanded_target)\n else:\n loss = self.loss_criterion(output, target)\n\n # compute the accuracy criterion\n accuracy = self.accuracy_criterion(output, expanded_target)\n\n train_losses.update(loss.item(), input.size(0))\n train_accuracy.update(accuracy.item(), input.size(0))\n\n # compute gradients and update parameters\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n self.num_iterations += 1\n\n if self.num_iterations % self.log_after_iters == 0:\n # log stats, params and images\n self.logger.info(\n f'Training stats. Loss: {train_losses.avg}. Accuracy: {train_accuracy.avg}')\n self._log_stats('train', train_losses.avg, train_accuracy.avg)\n self._log_params()\n self._log_images(input, expanded_target, output)\n\n if self.num_iterations % self.validate_after_iters == 0:\n # evaluate on validation set\n val_accuracy = self.validate(self.loaders['val'])\n\n # remember best validation metric\n is_best = self._is_best_val_accuracy(val_accuracy)\n # update the return value\n best_model_found |= is_best\n\n # save checkpoint\n self._save_checkpoint(is_best)\n\n if self.max_num_iterations < self.num_iterations:\n self.logger.info(\n f'Maximum number of iterations {self.max_num_iterations} exceeded. Finishing training...')\n break\n\n def validate(self, val_loader):\n self.logger.info('Validating...')\n\n val_losses = utils.RunningAverage()\n val_accuracy = utils.RunningAverage()\n\n self.model.eval()\n try:\n with torch.no_grad():\n for i, (input, target) in enumerate(val_loader):\n self.logger.info(f'Validation iteration {i}')\n input, target = input.to(self.device), target.to(\n self.device)\n\n # forward pass\n output = self.model(input)\n\n if target.dim() == 4:\n expanded_target = self._expand_target(target, output.size()[1])\n else:\n expanded_target = target\n\n # compute the loss\n if isinstance(self.loss_criterion, GeneralizedDiceLoss):\n loss = self.loss_criterion(output, expanded_target)\n else:\n loss = self.loss_criterion(output, target)\n\n accuracy = self.accuracy_criterion(output, expanded_target)\n\n val_losses.update(loss.item(), input.size(0))\n val_accuracy.update(accuracy.item(), input.size(0))\n\n if i == self.validate_iters:\n # stop validation\n break\n\n self._log_stats('val', val_losses.avg, val_accuracy.avg)\n self.logger.info(\n f'Validation finished. Loss: {val_losses.avg}. Accuracy: {val_accuracy.avg}')\n return val_accuracy.avg\n finally:\n self.model.train()\n\n def _adjust_learning_rate(self, decay_rate=0.75):\n \"\"\"Sets the learning rate to the initial LR decayed by 'decay_rate'\"\"\"\n\n def get_lr(optimizer):\n for param_group in optimizer.param_groups:\n return param_group['lr']\n\n old_lr = get_lr(self.optimizer)\n assert old_lr > 0\n new_lr = decay_rate * old_lr\n self.logger.info(f'Changing learning rate from {old_lr} to {new_lr}')\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = new_lr\n\n def _is_best_val_accuracy(self, val_accuracy):\n is_best = val_accuracy > self.best_val_accuracy\n if is_best:\n self.logger.info(\n f'Saving new best validation accuracy: {val_accuracy}')\n self.best_val_accuracy = max(val_accuracy, self.best_val_accuracy)\n return is_best\n\n def _save_checkpoint(self, is_best):\n utils.save_checkpoint({\n 'epoch': self.num_epoch + 1,\n 'num_iterations': self.num_iterations,\n 'model_state_dict': self.model.state_dict(),\n 'best_val_accuracy': self.best_val_accuracy,\n 'optimizer_state_dict': self.optimizer.state_dict(),\n 'device': str(self.device),\n 'max_num_epochs': self.max_num_epochs,\n 'max_num_iterations': self.max_num_iterations,\n 'validate_after_iters': self.validate_after_iters,\n 'log_after_iters': self.log_after_iters,\n 'validate_iters': self.validate_iters,\n 'max_patience': self.max_patience\n }, is_best, checkpoint_dir=self.checkpoint_dir,\n logger=self.logger)\n\n def _log_stats(self, phase, loss_avg, accuracy_avg):\n tag_value = {\n f'{phase}_loss_avg': loss_avg,\n f'{phase}_accuracy_avg': accuracy_avg\n }\n\n for tag, value in tag_value.items():\n self.writer.add_scalar(tag, value, self.num_iterations)\n\n def _log_params(self):\n self.logger.info('Logging model parameters and gradients')\n for name, value in self.model.named_parameters():\n self.writer.add_histogram(name, value.data.cpu().numpy(),\n self.num_iterations)\n self.writer.add_histogram(name + '/grad',\n value.grad.data.cpu().numpy(),\n self.num_iterations)\n\n def _log_images(self, input, target, prediction):\n sources = {\n 'inputs': input.data.cpu().numpy(),\n 'targets': target.data.cpu().numpy(),\n 'predictions': prediction.data.cpu().numpy()\n }\n for name, batch in sources.items():\n for tag, image in self._images_from_batch(name, batch):\n self.writer.add_image(tag, image, self.num_iterations)\n\n def _images_from_batch(self, name, batch):\n tag_template = '{}/batch_{}/channel_{}/slice_{}'\n\n slice_idx = batch.shape[2] // 2 # get the middle slice\n tagged_images = []\n for batch_idx in range(batch.shape[0]):\n for channel_idx in range(batch.shape[1]):\n tag = tag_template.format(name, batch_idx, channel_idx,\n slice_idx)\n img = batch[batch_idx, channel_idx, slice_idx, ...]\n tagged_images.append((tag, (self._normalize_img(img))))\n\n return tagged_images\n\n @staticmethod\n def _normalize_img(img):\n return (img - np.min(img)) / np.ptp(img)\n\n def _expand_target(self, input, C):\n \"\"\"\n Converts NxDxHxW label image to NxCxDxHxW, where each label is stored in a separate channel\n :param input: 4D input image (NxDxHxW)\n :param C: number of channels/labels\n :return: 5D output image (NxCxDxHxW)\n \"\"\"\n assert input.dim() == 4\n shape = input.size()\n shape = list(shape)\n shape.insert(1, C)\n shape = tuple(shape)\n\n result = torch.zeros(shape)\n # for each batch instance\n for i in range(input.size()[0]):\n # iterate over channel axis and create corresponding binary mask in the target\n for c in range(C):\n mask = result[i, c]\n mask[input[i] == c] = 1\n return result.to(self.device)\n","sub_path":"unet3d/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":14294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"222685626","text":"import time\n\ndelay = 0.1\n\ndef loadbar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='='):\n percent = ('{0:.' + str(decimals) + 'f}').format(100*(iteration/float(total)))\n filled = int(length*iteration//total)\n bar = fill*filled + '-'*(length - filled)\n\n print(f'\\r{prefix} [{bar}] {percent}% {suffix}', end='\\r')\n if iteration == total:\n print()\n\nitems = list(range(0, 50))\nl = len(items)\n\nloadbar(0, l, prefix='Progress:', suffix='Complete', length=l)\nfor i, item in enumerate(items):\n time.sleep(delay)\n loadbar(i + 1, l, prefix='Progress:', suffix='Complete', length=l)\n","sub_path":"loadingbar.py","file_name":"loadingbar.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"375467469","text":"import os\nimport sys\nimport torch\nimport pandas as pd\nfrom skimage import io, transform\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nfrom util import *\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\nimport xml.etree.ElementTree as ET\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\nclass IdCardDataset(Dataset):\n def __init__(self, image_dir, anno_dir, inp_dim, transform=None):\n self.image_dir = image_dir\n self.anno_dir = anno_dir\n self.transform = transform\n\n self.image_files = self.get_files(self.image_dir, inp_dim)\n self.anno_files = self.generate_anno_files(self.image_files, self.anno_dir)\n\n def get_files(self, dir, inp_dim):\n imlist = []\n for filename in os.listdir(dir):\n if os.path.isfile(filename) and '_Bd_' not in filename:\n imlist.append(os.path.join(dir, filename))\n\n loaded_ims = [cv2.imread(x) for x in imlist]\n im_batches = list(map(prep_image, loaded_ims, [inp_dim for x in range(len(imlist))]))\n\n return im_batches\n\n def generate_anno_files(self, image_files, anno_dir):\n anno_files = []\n\n for image_file in image_files:\n base = os.path.basename(image_file)\n anno_files.append(anno_dir + '/' + os.path.splitext(base)[0] + '.xml')\n\n return anno_files\n\n def read_anno(self, file):\n root = ET.parse(file).getroot()\n rect_list = []\n\n for elem in root:\n if elem.tag == 'object':\n for sub_elem in elem:\n if sub_elem.tag == 'bndbox':\n rect = []\n for sub_sub_elem in sub_elem:\n rect.append(int(sub_sub_elem.text))\n rect_list.append(rect)\n\n return rect_list\n\n\n def __len__(self):\n return len(self.image_files)\n\n def __getitem__(self, item):\n image_file = self.image_files[item]\n image = io.imread(image_file)\n\n anno_file = self.anno_files[item]\n anno = self.read_anno(anno_file)\n\n sample = {'image': image, 'anno': anno}\n\n if self.transform:\n sample = self.transform(sample)\n\n return sample\n\ndef create_dataloader(image_dir, anno_dir):\n dataset = IdCardDataset(image_dir, anno_dir)\n dataloader = DataLoader(dataset, batch_size=4, shuffle=True, num_workers=-1)\n\n return dataloader\n","sub_path":"dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"116214725","text":"# The isBadVersion API is already defined for you.\n# @param version, an integer\n# @return a bool\n# def isBadVersion(version):\n\nclass Solution:\n def firstBadVersion(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n low=0\n high=n\n while(high - low >5):\n mid=low+(high-low)/2\n flag=isBadVersion(mid)\n if flag==True:\n high=mid\n else:\n low=mid\n low=int(low)\n high=int(high)\n for i in range(low,high):\n if not isBadVersion(i) and isBadVersion(i+1):\n return i+1\n return 1\n\n#Note:\n#Runtime: 36 ms, faster than 60.25% of Python3 online submissions for First Bad Version.\n#Memory Usage: 13 MB, less than 6.19% of Python3 online submissions for First Bad Version.\n","sub_path":"278FirstBadVersion.py","file_name":"278FirstBadVersion.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"634694882","text":"from config import data_path\nimport os\n\n\n\n\nboard_data = os.path.join(data_path,'industry_stock.csv')\n\n# print(board_data)\n\n\n\n\n\ndef board_to_stock():\n board_dict = {}\n with open (board_data, 'r', encoding='utf-8') as file:\n for content in file:\n item = content.split(',')\n industry = item[6].strip('\"')\n if industry not in board_dict:\n board_dict[industry] = []\n board_dict[industry].append(item[1].strip('\"'))\n return board_dict\n\n\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n r = board_to_stock()\n print(r)","sub_path":"update_jobs/cal_boardIndex/board_stock.py","file_name":"board_stock.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"646232175","text":"import os\nimport boto3\nfrom urllib.parse import parse_qs\n\n\nclass Registration():\n\n payload = None\n\n def __init__(self, event):\n self.dynamodb = boto3.resource('dynamodb')\n self.table = self.dynamodb.Table(os.environ['ddb'])\n self.event = event\n\n def execute(self):\n self.buildDataStruct()\n self.table.put_item(Item=self.payload)\n return True\n\n def buildDataStruct(self):\n sms_data = parse_qs(self.event['body'])\n self.payload = {\n 'smssid': sms_data['SmsSid'][0],\n 'email': sms_data['Body'][0],\n 'from_number': sms_data['From'][0]\n }\n","sub_path":"src/common/registration.py","file_name":"registration.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"605206046","text":"import os\nimport csv\n\nbudget_csv = os.path.join(\"Resources\", \"budget_data.csv\")\n\n# Open and read csv\nwith open(budget_csv) as csvfile:\n csvreader = csv.reader(csvfile, delimiter=\",\")\n\n # Read the header row first (skip this part if there is no header)\n csv_header = next(csvreader)\n print(f\"Header: {csv_header}\")\n\n # Read through each row of data after the header\n monthCounter = 0\n netTotalPL = 0\n for row in csvreader:\n\n monthCounter += 1\n netTotalPL += int(row[1])\n \n \n\n\n print(monthCounter)\n print(netTotalPL)\n ","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"373326560","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport os\nimport requests\nimport re\n\nconfig_file_path = \"/etc/v2ray/config.json\"\n\narukas_user = [\n {\n \"email\": \"FOO@BAR.COM\",\n \"password\": \"PASSWORD\"\n }\n]\n\nnode_vmess = {\n \"address\": \"0.0.0.0\",\n \"port\": 0,\n \"users\": [\n {\n \"id\": \"67a7ac7f-3656-a5f3-3434-a3faaa744770\",\n \"alterId\": 1\n }\n ]\n}\n\nports_used = [\n {'number': 10010, 'protocol': 'udp'}\n]\n\nconfig = {\n \"log\": {\n \"loglevel\": \"warning\"\n },\n \"inbound\": {\n \"port\": 1080,\n \"listen\": \"127.0.0.1\",\n \"protocol\": \"socks\",\n \"settings\": {\n \"auth\": \"noauth\",\n \"udp\": False,\n \"ip\": \"127.0.0.1\"\n }\n },\n \"outbound\": {\n \"protocol\": \"vmess\",\n \"settings\": {\n \"vnext\": []\n }\n },\n \"outboundDetour\": [\n {\n \"protocol\": \"freedom\",\n \"settings\": {},\n \"tag\": \"direct\"\n }\n ],\n \"dns\": {\n \"servers\": [\n \"8.8.8.8\",\n \"8.8.4.4\",\n\n \"208.67.222.222\",\n \"208.67.220.220\",\n\n \"199.85.126.30\",\n \"199.85.127.30\",\n\n \"156.154.70.1\",\n \"156.154.71.1\",\n\n \"localhost\"\n ]\n },\n \"routing\": {\n \"strategy\": \"rules\",\n \"settings\": {\n \"domainStrategy\": \"IPIfNonMatch\",\n \"rules\": [\n {\n \"type\": \"field\",\n \"port\": \"1-52\",\n \"outboundTag\": \"direct\"\n },\n {\n \"type\": \"field\",\n \"port\": \"54-79\",\n \"outboundTag\": \"direct\"\n },\n {\n \"type\": \"field\",\n \"port\": \"81-442\",\n \"outboundTag\": \"direct\"\n },\n {\n \"type\": \"field\",\n \"port\": \"444-65535\",\n \"outboundTag\": \"direct\"\n },\n {\n \"type\": \"chinasites\",\n \"outboundTag\": \"direct\"\n },\n {\n \"type\": \"field\",\n \"ip\": [\n \"0.0.0.0/8\",\n \"10.0.0.0/8\",\n \"100.64.0.0/10\",\n \"127.0.0.0/8\",\n \"169.254.0.0/16\",\n \"172.16.0.0/12\",\n \"192.0.0.0/24\",\n \"192.0.2.0/24\",\n \"192.168.0.0/16\",\n \"198.18.0.0/15\",\n \"198.51.100.0/24\",\n \"203.0.113.0/24\",\n \"::1/128\",\n \"fc00::/7\",\n \"fe80::/10\"\n ],\n \"outboundTag\": \"direct\"\n },\n {\n \"type\": \"chinaip\",\n \"outboundTag\": \"direct\"\n }\n ]\n }\n },\n \"transport\": {\n \"kcpSettings\": {\n \"uplinkCapacity\": 10,\n \"downlinkCapacity\": 20\n }\n }\n}\n\n\ndef main():\n url_login = \"https://app.arukas.io/api/login\"\n url_container = \"https://app.arukas.io/api/containers\"\n\n for user_info in arukas_user:\n try:\n # 登录\n login_result = requests.post(url_login, data=user_info, timeout=5)\n assert 'OK' == login_result.json()['message']\n\n # 获取Container信息\n header = {\n 'Cookie': \"\"\n }\n for cookie in login_result.cookies.items():\n header['Cookie'] += cookie[0] + \"=\" + cookie[1] + \";\"\n container_result = requests.get(url_container, headers=header, timeout=5)\n assert 200 == container_result.status_code\n container_detail = container_result.json()\n\n # 解析得到节点信息\n for container in container_detail[\"data\"]:\n for port_aim in ports_used:\n try:\n ith = container[\"attributes\"][\"ports\"].index(port_aim)\n node_select = container[\"attributes\"][\"port_mappings\"][0][ith]\n\n ip = re.findall(r'\\d{1,3}-\\d{1,3}-\\d{1,3}-\\d{1,3}', node_select['host'])[0].replace('-', '.')\n port = node_select['service_port']\n\n node = node_vmess.copy()\n node[\"address\"] = ip\n node[\"port\"] = port\n\n config[\"outbound\"][\"settings\"][\"vnext\"].append(node)\n except:\n pass\n except Exception as e:\n print(e)\n\n # 写入配置文件\n with open(config_file_path, mode='w') as f:\n print(json.dumps(config, sort_keys=True, indent=4), file=f)\n\n # 重启V2Ray\n os.system(\"service v2ray restart\")\n\n\nif \"__main__\" == __name__:\n main()\n print(\"Done\")\n","sub_path":"tools/UpdateArukas.py","file_name":"UpdateArukas.py","file_ext":"py","file_size_in_byte":5064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"210314663","text":"# -*- coding: utf-8 -*-\n\nimport os\n\nfrom flask import Blueprint, render_template, send_from_directory, abort, request\nfrom flask import current_app as APP\nfrom flask.ext.login import login_required, current_user\n\nfrom .models import User\nfrom ..post.constants import POSTS_PER_PAGE\n\nuser = Blueprint('user', __name__, url_prefix='/user')\n\n\n@user.route('/')\n@login_required\ndef index():\n if not current_user.is_authenticated():\n abort(403)\n\n page = int(request.args.get('page', 1))\n posts = current_user.followed_posts().paginate(page, POSTS_PER_PAGE, False)\n\n# page = int(request.args.get('page', 1))\n# pagination = User.query.paginate(page=page, per_page=10)\n# return render_template('index.html', pagination=pagination)\n\n return render_template('user/index.html', user=current_user, posts=posts)\n\n\n@user.route('//profile')\ndef profile(user_id):\n user = User.get_by_id(user_id)\n return render_template('user/profile.html', user=user)\n\n\n@user.route('//avatar/')\n@login_required\ndef avatar(user_id, filename):\n dir_path = os.path.join(APP.config['UPLOAD_FOLDER'], 'user_%s' % user_id)\n return send_from_directory(dir_path, filename, as_attachment=True)\n","sub_path":"build/lib/vshare/user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"269275058","text":"from art import logo\n\ndef add(n1,n2):\n \"\"\"Adds n1 and n2 together\"\"\"\n return n1 + n2\n\ndef subtract (n1, n2):\n \"\"\"Subtracts n1 from n2\"\"\"\n return n1 - n2\n\ndef multiply (n1, n2):\n \"\"\"Yes we multiply\"\"\"\n return n1 * n2\n\ndef divide (n1, n2):\n \"\"\"Divides n1 from n2\"\"\"\n return n1 / n2\n\noperations = {\n \"+\": add,\n \"-\": subtract,\n \"*\": multiply,\n \"/\": divide\n} \n\ndef calculator():\n print(logo)\n num1 = float(input(\"What is your first number?: \"))\n for symbol in operations:\n print(symbol)\n should_continue = True\n\n while should_continue:\n operation_symbols = input(\"Pick an operation: \")\n num2 = float(input(\"What is your next number?: \"))\n calculation_function = operations[operation_symbols]\n answer = calculation_function(num1, num2)\n print(f\"{num1} {operation_symbols} {num2} = {answer}\")\n\n if input(f\"Type 'y' to continue calculating with {answer} or 'n' to start a new calculation: \") == \"y\":\n num1 = answer\n else:\n should_continue == False\n calculator()\ncalculator()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"156981344","text":"# plotUtils.py\n# ------------\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef plotAgreementRatios(ratio_dict, is_new_figure=False):\n indices = ratio_dict.keys()\n min_index = np.min(indices)\n max_index = np.max(indices)\n ratio_list = list()\n for i in range(min_index, max_index+1):\n if i in ratio_dict:\n ratio_list.append(ratio_dict[i])\n else:\n ratio_list.append(0)\n # return if the list is empty\n if len(ratio_list) == 0:\n return\n # plot the ratio list\n plt.ion()\n # it can be used to control whether to show all the lines in one figure\n if is_new_figure:\n plt.figure()\n plt.show()\n plt.plot(ratio_list)\n plt.xlabel('steps')\n plt.ylabel('policy agreement ratios')\n plt.draw()\n plt.pause(0.001)\n\n\ndef plotAveragePolicyAgreementRatios(ratios_list):\n # inner helper function\n def getPolicyAgreementRatios(ratio_dict, i_step):\n keys = ratio_dict.keys()\n max_key = np.max(keys)\n if i_step in ratio_dict:\n return ratio_dict[i_step]\n else:\n # otherwise return the last value\n return ratio_dict[max_key]\n\n # find the smallest and largest index\n min_index = np.inf\n max_index = -np.inf\n for ratios in ratios_list:\n min_index = min(min_index, np.min(ratios.keys()))\n max_index = max(max_index, np.max(ratios.keys()))\n\n n_experiments = len(ratios_list)\n avg_agreement_ratios = dict()\n for i in range(min_index, max_index+1):\n sum_agreement_ratios = float(np.sum([getPolicyAgreementRatios(ratios, i) for ratios in ratios_list]))\n avg_agreement_ratios[i] = sum_agreement_ratios / float(n_experiments)\n\n # plot the ratios\n plotAgreementRatios(avg_agreement_ratios, True)\n # return the calculated avg ratios\n return avg_agreement_ratios\n\n\n\n\n\n","sub_path":"experiment-logs/deterministic-environment/38_agentQ_alpha0.5_epsilon1_policyConverge_synchInput_speed2.0/all_py_files_snapshot/BerkeleyGridWorld/plotUtils.py","file_name":"plotUtils.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"108470704","text":"import taichi as ti\nimport numpy as np\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\n\nti.init(arch=ti.gpu)\n\n@ti.data_oriented\nclass lbm_solver:\n def __init__(self,\n nx, # domain size\n ny,\n niu, # viscosity of fluid\n bc_type, # [left,top,right,bottom] boundary conditions: 0 -> Dirichlet ; 1 -> Neumann\n bc_value, # if bc_type = 0, we need to specify the velocity in bc_value\n cy = 0, # whether to place a cylindrical obstacle\n cy_para = [0.0, 0.0, 0.0], # location and radius of the cylinder\n steps = 60000): # total steps to run\n self.nx = nx # by convention, dx = dy = dt = 1.0 (lattice units)\n self.ny = ny\n self.niu = niu\n self.tau = 3.0 * niu + 0.5\n self.inv_tau = 1.0 / self.tau\n self.rho = ti.var(dt=ti.f32, shape=(nx, ny))\n self.vel = ti.Vector(2, dt=ti.f32, shape=(nx, ny))\n self.mask = ti.var(dt=ti.f32, shape=(nx, ny))\n self.display_var = ti.var(dt=ti.f32, shape=(nx, ny))\n self.f_old = ti.Vector(9, dt=ti.f32, shape=(nx, ny))\n self.f_new = ti.Vector(9, dt=ti.f32, shape=(nx, ny))\n self.w = ti.var(dt=ti.f32, shape=9)\n self.e = ti.var(dt=ti.i32, shape=(9, 2))\n self.bc_type = ti.var(dt=ti.i32, shape=4)\n self.bc_value = ti.var(dt=ti.f32, shape=(4, 2))\n self.cy = cy\n self.cy_para = ti.var(dt=ti.f32, shape=3)\n self.bc_type.from_numpy(np.array(bc_type, dtype=np.int32))\n self.bc_value.from_numpy(np.array(bc_value, dtype=np.float32))\n self.cy_para.from_numpy(np.array(cy_para, dtype=np.float32))\n self.steps = steps\n arr = np.array([ 4.0 / 9.0, 1.0 / 9.0, 1.0 / 9.0, 1.0 / 9.0, 1.0 / 9.0, 1.0 / 36.0,\n 1.0 / 36.0, 1.0 / 36.0, 1.0 / 36.0], dtype=np.float32)\n self.w.from_numpy(arr)\n arr = np.array([[0, 0], [1, 0], [0, 1], [-1, 0], [0, -1], [1, 1],\n [-1, 1], [-1, -1], [1, -1]], dtype=np.int32)\n self.e.from_numpy(arr)\n\n @ti.func # compute equilibrium distribution function\n def f_eq(self, i, j, k):\n eu = ti.cast(self.e[k, 0], ti.f32) * self.vel[i, j][0] + ti.cast(self.e[k, 1], \n ti.f32) * self.vel[i, j][1]\n uv = self.vel[i, j][0]**2.0 + self.vel[i, j][1]**2.0\n return self.w[k] * self.rho[i, j] * (1.0 + 3.0 * eu + 4.5 * eu**2 - 1.5 * uv)\n\n @ti.kernel\n def init(self):\n for i, j in self.rho:\n self.vel[i, j][0] = 0.0\n self.vel[i, j][1] = 0.0\n self.rho[i, j] = 1.0\n self.mask[i, j] = 0.0\n for k in ti.static(range(9)):\n self.f_new[i, j][k] = self.f_eq(i, j, k)\n self.f_old[i, j][k] = self.f_new[i, j][k]\n if(self.cy==1):\n if ((ti.cast(i, ti.f32) - self.cy_para[0])**2.0 + (ti.cast(j, ti.f32)\n - self.cy_para[1])**2.0 <= self.cy_para[2]**2.0):\n self.mask[i, j] = 1.0\n\n\n @ti.kernel\n def collide_and_stream(self): # lbm core equation\n for i, j in ti.ndrange((1, self.nx - 1), (1, self.ny - 1)):\n for k in ti.static(range(9)):\n ip = i - self.e[k, 0]\n jp = j - self.e[k, 1]\n self.f_new[i,j][k] = (1.0-self.inv_tau)*self.f_old[ip,jp][k] + \\\n self.f_eq(ip,jp,k)*self.inv_tau\n\n @ti.kernel\n def update_macro_var(self): # compute rho u v\n for i, j in ti.ndrange((1, self.nx - 1), (1, self.ny - 1)):\n self.rho[i, j] = 0.0\n self.vel[i, j][0] = 0.0\n self.vel[i, j][1] = 0.0\n for k in ti.static(range(9)):\n self.f_old[i, j][k] = self.f_new[i, j][k]\n self.rho[i, j] += self.f_new[i, j][k]\n self.vel[i, j][0] += (ti.cast(self.e[k, 0], ti.f32) *\n self.f_new[i, j][k])\n self.vel[i, j][1] += (ti.cast(self.e[k, 1], ti.f32) *\n self.f_new[i, j][k])\n self.vel[i, j][0] /= self.rho[i, j]\n self.vel[i, j][1] /= self.rho[i, j]\n\n @ti.kernel\n def apply_bc(self): # impose boundary conditions\n # left and right\n for j in ti.ndrange(1, self.ny - 1):\n # left: dr = 0; ibc = 0; jbc = j; inb = 1; jnb = j\n self.apply_bc_core(1, 0, 0, j, 1, j)\n\n # right: dr = 2; ibc = nx-1; jbc = j; inb = nx-2; jnb = j\n self.apply_bc_core(1, 2, self.nx - 1, j, self.nx - 2, j)\n\n # top and bottom\n for i in ti.ndrange(self.nx):\n # top: dr = 1; ibc = i; jbc = ny-1; inb = i; jnb = ny-2\n self.apply_bc_core(1, 1, i, self.ny - 1, i, self.ny - 2)\n\n # bottom: dr = 3; ibc = i; jbc = 0; inb = i; jnb = 1\n self.apply_bc_core(1, 3, i, 0, i, 1)\n\n # cylindrical obstacle\n # Note: for cuda backend, putting 'if statement' inside loops can be much faster!\n for i, j in ti.ndrange(self.nx, self.ny): \n if (self.cy == 1 and self.mask[i, j] == 1):\n self.vel[i, j][0] = 0.0 # velocity is zero at solid boundary \n self.vel[i, j][1] = 0.0\n inb = 0\n jnb = 0\n if (ti.cast(i,ti.f32) >= self.cy_para[0]):\n inb = i + 1\n else:\n inb = i - 1\n if (ti.cast(j,ti.f32) >= self.cy_para[1]):\n jnb = j + 1\n else:\n jnb = j - 1\n self.apply_bc_core(0, 0, i, j, inb, jnb)\n\n @ti.func\n def apply_bc_core(self, outer, dr, ibc, jbc, inb, jnb):\n if (outer == 1): # handle outer boundary\n if (self.bc_type[dr] == 0):\n self.vel[ibc, jbc][0] = self.bc_value[dr, 0]\n self.vel[ibc, jbc][1] = self.bc_value[dr, 1]\n elif (self.bc_type[dr] == 1):\n self.vel[ibc, jbc][0] = self.vel[inb, jnb][0]\n self.vel[ibc, jbc][1] = self.vel[inb, jnb][1]\n self.rho[ibc, jbc] = self.rho[inb, jnb]\n for k in ti.static(range(9)):\n self.f_old[ibc,jbc][k] = self.f_eq(ibc,jbc,k) - self.f_eq(inb,jnb,k) + \\\n self.f_old[inb,jnb][k]\n\n @ti.kernel\n def get_display_var(self, flg: ti.template()):\n if ti.static(flg == 0): # get velocity magnitude\n for i, j in ti.ndrange(self.nx, self.ny):\n self.display_var[i, j] = ti.sqrt(self.vel[i, j][0]**2.0 +\n self.vel[i, j][1]**2.0)\n elif ti.static(flg == 1): # get x-direction component only\n for i, j in ti.ndrange(self.nx, self.ny):\n self.display_var[i, j] = self.vel[i, j][0]\n\n def solve(self):\n gui = ti.GUI('lbm solver', (self.nx, self.ny))\n self.init()\n for i in range(self.steps):\n self.collide_and_stream()\n self.update_macro_var()\n self.apply_bc()\n self.get_display_var(0)\n img = cm.plasma(self.display_var.to_numpy() / 0.15)\n gui.set_image(img)\n gui.show()\n if (i % 1000 == 0):\n print('Step: {:}'.format(i))\n # ti.imwrite((img[:,:,0:3]*255).astype(np.uint8), 'fig/karman_'+str(i).zfill(6)+'.png')\n\n def pass_to_py(self):\n self.get_display_var(1)\n return self.display_var.to_numpy()\n\n\nif __name__ == '__main__':\n flow_case = 0\n if (flow_case == 0): # von Karman vortex street: Re = U*D/niu = 200\n lbm = lbm_solver(401, 101, 0.005, [0, 0, 1, 0],\n [[0.1, 0.0], [0.0, 0.0], [0.0, 0.0], [0.0, 0.0]],\n 1,[80.0, 50.0, 10.0])\n lbm.solve()\n elif (flow_case == 1): # lid-driven cavity flow: Re = U*L/niu = 1000\n lbm = lbm_solver(256, 256, 0.0255, [0, 0, 0, 0],\n [[0.0, 0.0], [0.1, 0.0], [0.0, 0.0], [0.0, 0.0]])\n lbm.solve()\n\n # compare with literature results\n y_ref, u_ref = np.loadtxt('data/ghia1982.dat', unpack=True, skiprows=2, usecols=(0, 2))\n fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(4, 3), dpi=200)\n axes.plot(np.linspace(0, 1.0, 256), lbm.pass_to_py()[256 // 2, :] / 0.1, 'b-', label='LBM')\n axes.plot(y_ref, u_ref, 'rs', label='Ghia et al. 1982')\n axes.legend()\n axes.set_xlabel(r'Y')\n axes.set_ylabel(r'U')\n plt.tight_layout()\n plt.show()\n","sub_path":"melt/lbm.py","file_name":"lbm.py","file_ext":"py","file_size_in_byte":8530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"446357175","text":"from __future__ import print_function\nimport sys, os\nimport IPython as ip\nfrom cnn3d.dataset import grasping_9dir\nfrom cnn3d.baseline.pca_grasping import PCAGrasping\nfrom cnn3d.baseline.svm_grasping import SVMGrasping\nfrom cnn3d.baseline.rand_grasping import RANDGrasping\nfrom cnn3d.grasping import CNN3dGrasping\nfrom cnn3d import params\nimport numpy as np\nfrom cnn3d.utils.grid_manip import rotate_grid\nimport cnn3d.utils.common as common\nfrom keras.models import load_model\nimport time\nimport cnn3d.viz as viz\nimport matplotlib.pyplot as plt\n\n\n\nclass colors:\n ok = '\\033[92m'\n fail = '\\033[91m'\n close = '\\033[0m'\n\ncss = \"\"\"\nhtml { margin: 0 }\nbody {\n background:#fff;\n color:#000;\n font:75%/1.5em Helvetica, \"DejaVu Sans\", \"Liberation sans\", \"Bitstream Vera Sans\", sans-serif;\n position:relative;\n}\n/*dt { font-weight: bold; float: left; clear; left }*/\ndiv { padding: 10px; width: 80%; margin: auto }\nimg { border: 1px solid #eee }\ndl { margin:0 0 1.5em; }\ndt { font-weight:700; }\ndd { margin-left:1.5em; }\ntable {\n border-collapse:collapse;\n border-spacing:0;\n margin:0 0 1.5em;\n padding:0;\n}\ntd { padding:0.333em;\n vertical-align:middle;\n}\n}\"\"\"\n\nimport sys\nimport cStringIO\n\nnp.set_printoptions(precision=2)\nnp.set_printoptions(suppress=True)\n\nclass html_writer():\n def __init__(self, fname, imgfmt='pdf'):\n self._fname = fname\n self._imgfmt = imgfmt\n with open(self._fname, 'w') as f:\n f.write('')\n f.write('')\n\n def close(self):\n with open(self._fname, 'a') as f:\n f.write('')\n\n def fig2pdf(self, fig):\n sio = cStringIO.StringIO()\n # fig.savefig(sio, format=self._imgfmt, bbox_inches='tight')\n fig.savefig(sio, format=self._imgfmt)\n return sio.getvalue().encode(\"base64\").strip()\n\n def add(self, X, X_idx, p, wrist_idx, appr_idx, y_wrist=[], y_appr=[], X_orig=np.array([]), with_gt=False):\n\n f_gt = viz.show_vgrid(X, p_appr=y_appr, p_wrist=y_wrist)\n f_pred = viz.show_vgrid(X, p_appr=common.p2appr_p(p), p_wrist=common.p2wrist_p(p))\n if X_orig.size > 0:\n f_gt_orig = viz.show_vgrid(X_orig, p_appr=y_appr, p_wrist=y_wrist)\n\n with open(self._fname, 'a') as f:\n f.write('
')\n f.write('')\n f.write('
id:{}'.format(X_idx))\n if X_orig.size > 0 and with_gt:\n if self._imgfmt == 'pdf':\n f.write('' % (self._imgfmt, self.fig2pdf(f_gt_orig)))\n else:\n f.write('' % (self._imgfmt, self.fig2pdf(f_gt_orig)))\n\n if with_gt:\n if self._imgfmt == 'pdf':\n f.write('' % (self._imgfmt, self.fig2pdf(f_gt)))\n else:\n f.write('' % (self._imgfmt, self.fig2pdf(f_gt)))\n # f.write('
')\n if self._imgfmt == 'pdf':\n f.write('' % (self._imgfmt, self.fig2pdf(f_pred)))\n else:\n f.write('' % (self._imgfmt, self.fig2pdf(f_pred)))\n f.write('
')\n f.write('
Approaching directions
')\n f.write('
ground truth:
{}
'.format(y_appr))\n f.write('
predction:
{}
'.format(common.p2appr_p(p)))\n f.write('
chosen approaching direction index:
{}
'.format('green' if y_appr[appr_idx] else 'red', appr_idx))\n f.write('
Wrist orientations
')\n f.write('
ground truth:
{}
'.format(y_wrist))\n f.write('
predction:
{}
'.format(common.p2wrist_p(p)))\n f.write('
chosen wrist orientation index:
{}
'.format('green' if y_wrist[wrist_idx] else 'red', wrist_idx))\n f.write('
')\n f.write('
')\n plt.close(f_gt)\n plt.close(f_pred)\n if X_orig.size > 0:\n plt.close(f_gt_orig)\n\nclass GraspEvaluation():\n def __init__(self, grasping_methods, viz_fp=False, entire=True, viz=False, side_grasp=False, noise_voxels=0, occluded_slices=0, resolution=32):\n if noise_voxels > 0 or occluded_slices > 0:\n self.test_challenging = True\n else:\n self.test_challenging = False\n\n if self.test_challenging:\n self.X_train, self.y_train, self.X_test_orig, self.y_test, self.input_shape, self.nb_classes, self.X_test = grasping_9dir.load(numof_noisy_voxels=noise_voxels, numof_occluded_slices=occluded_slices, resolution=resolution)\n else:\n self.X_train, self.y_train, self.X_test, self.y_test, self.input_shape, self.nb_classes = grasping_9dir.load(numof_noisy_voxels=noise_voxels, numof_occluded_slices=occluded_slices, resolution=resolution)\n\n self.shape_competion = params.train['shape_completion']\n if self.shape_competion:\n print('loading learned shape completion model')\n self.model_sc = load_model(params.train['sc_model_fname'])\n print('converting partial grids to completed grids ')\n\n # self.X_train = self.shape_complete(self.X_train)\n self.X_test = self.shape_complete(self.X_test)\n\n self.grasping_methods = grasping_methods\n self._viz_fp = viz_fp\n self._entire = entire\n self._viz = viz\n self._side_grasp = side_grasp\n\n def shape_complete(self, X, binary_mode=True):\n # [-1, 1] to [0, 1]\n X += 1.0\n X /= 2.0\n X = self.model_sc.predict(X)\n if binary_mode:\n X = np.round(X)\n # [0, 1] to [-1, 1]\n X *= 2.0\n X -= 1.0\n\n return X\n\n def print_verbose(self, X, p, wrist_idx, appr_idx, y_wrist=[], y_appr=[]):\n print('')\n print('approaching direction')\n print('prediced:'); print(common.p2appr_p(p)); print(appr_idx)\n if len(y_appr) > 0:\n print('gt:'); print(y_appr); print(common.argnmax(y_appr))\n\n print('wrist orientation')\n print('prediced:'); print(common.p2wrist_p(p)); print(wrist_idx)\n if len(y_wrist) > 0:\n print('gt:'); print(y_wrist); print(common.argnmax(y_wrist))\n\n def evaluate(self):\n # self.X_test = self.X_test.astype('float32')\n N = self.X_test.shape[0]\n print(N, 'test samples')\n assert self.X_test.shape[0] == self.y_test.shape[0]\n\n if (self._viz_fp or self._viz) and self._entire:\n hw = True\n else:\n hw = False\n\n accuracy = []\n for gm, gm_name in self.grasping_methods:\n print(gm_name)\n correct_wrist = 0\n correct_appr = 0\n # correct_both = 0\n\n if hw:\n hwfn = 'viz_' + gm_name\n if self._viz_fp:\n hwfn += '_fp'\n elif self._viz:\n hwfn += '_all'\n hwfn += '.html'\n self._hw = html_writer(hwfn, imgfmt='pdf')\n\n for i in range(N) if self._entire else range(100):\n X = self.X_test[i, 0, :]\n p = gm.predict(X)\n wrist_idx, appr_idx = common.p2wrist_appr_indices(p)\n\n y = self.y_test[i, :]\n\n y_wrist, y_appr = common.p2wrist_p(y), common.p2appr_p(y)\n\n print('(' + (common.print_pos() if y_appr[appr_idx] == 1 else common.print_neg()) + (common.print_pos() if y_wrist[wrist_idx] == 1 else common.print_neg()) + ')', end='')\n\n if y_wrist[wrist_idx] == 1:\n correct_wrist += 1\n\n if y_appr[appr_idx] == 1:\n correct_appr += 1\n\n if self._viz:\n # self.print_verbose(X, p, wrist_idx, appr_idx, y_wrist, y_appr)\n if hw:\n if self.test_challenging:\n X_orig = self.X_test_orig[i, 0, :]\n self._hw.add(X, i, p, wrist_idx, appr_idx, y_wrist, y_appr, X_orig, with_gt=True if gm_name == 'cnn3d' else False)\n else:\n self._hw.add(X, i, p, wrist_idx, appr_idx, y_wrist, y_appr)\n elif (y_wrist[wrist_idx] != 1 or y_appr[appr_idx] != 1) and self._viz_fp:\n # self.print_verbose(X, p, wrist_idx, appr_idx, y_wrist, y_appr)\n if hw:\n if self.test_challenging:\n X_orig = self.X_test_orig[i, 0, :]\n self._hw.add(X, i, p, wrist_idx, appr_idx, y_wrist, y_appr, X_orig)\n else:\n self._hw.add(X, i, p, wrist_idx, appr_idx, y_wrist, y_appr)\n\n # if self._side_grasp:\n # # print('up direction')\n # g = X\n # p = gm.predict(g)\n # wrist_idx, appr_idx = common.p2wrist_appr_indices(p)\n # # self.print_verbose(g, p, wrist_idx, appr_idx)\n\n # # print('front direction')\n # g = rotate_grid(X, 0, 90, 0)\n # p = gm.predict(g)\n # wrist_idx, appr_idx = common.p2wrist_appr_indices(p)\n # # self.print_verbose(g, p, wrist_idx, appr_idx)\n \n # # print('left direction')\n # g = rotate_grid(X, 90, 0, 90)\n # p = gm.predict(g)\n # wrist_idx, appr_idx = common.p2wrist_appr_indices(p)\n # # self.print_verbose(g, p, wrist_idx, appr_idx)\n\n # # print('right direction')\n # g = rotate_grid(X, -90, 0, -90)\n # p = gm.predict(g)\n # wrist_idx, appr_idx = common.p2wrist_appr_indices(p)\n\n # # self.print_verbose(g, p, wrist_idx, appr_idx)\n\n if self._entire:\n # acc = float(correct_both)/N\n acc_wrist = float(correct_wrist)/N\n acc_appr = float(correct_appr)/N\n print('')\n # print('accuracy: ', acc)\n print('accuracy (wrist): ', acc_wrist)\n print('accuracy (appr): ', acc_appr)\n accuracy.append((acc_wrist, acc_appr))\n else:\n print('')\n\n if hw:\n self._hw.close()\n return accuracy\n\nimport argparse\nif __name__ == '__main__': \n parser = argparse.ArgumentParser()\n parser.add_argument('-m', '--method', type=str, required=True, help='grasping method (pca, svm, cnn3d, fcn, rand)')\n parser.add_argument('--entire', type=int, default=1)\n parser.add_argument('--viz', type=int, default=0)\n parser.add_argument('--viz_fp', type=int, default=0)\n # parser.add_argument('--num_pc', type=int, default=2, help='if pca is chosen')\n # parser.add_argument('--model_fname', type=str, default='/tmp/cnn3d_train/grasping_epoch150_train_noside_cnn3d.h5', help='required if cnn3d is chosen')\n parser.add_argument('--model_fname', type=str, default='/tmp/grasping_epoch150_train_cnn3d_side.h5', help='required if cnn3d is chosen')\n parser.add_argument('--model_fname_fcn', type=str, default='/tmp/grasping_epoch1000_train_fcn.h5', help='required if cnn3d is chosen')\n # parser.add_argument('--model_fname', type=str, default='/tmp/cnn3d_train/grasping_epoch30_train_noside_cnn3d.h5', help='required if cnn3d is chosen')\n parser.add_argument('--side_grasp', type=int, default=0, help='enable to test side grasping')\n parser.add_argument('--noise_voxels', type=int, default=0, help='number of noisy voxels in testing dataset')\n parser.add_argument('--occluded_slices', type=int, default=0, help='number of occluded (x-z) slices in testing dataset')\n parser.add_argument('--model_fname_svm', type=str, default='/tmp/svm_grasp_baseline_iter1000.pkl', help='required if svm is chosen')\n # parser.add_argument('--model_fname_svm', type=str, default='/tmp/svm_grasp_baseline_iter10.pkl', help='required if svm is chosen')\n # parser.add_argument('--model_fname_svm', type=str, default='/tmp/svm_grasp_baseline_centered_iter2000.pkl', help='required if svm is chosen')\n parser.add_argument('--batch', type=int, default=0)\n parser.add_argument('--resolution', type=int, default=32)\n args = parser.parse_args()\n\n print('grasping method %s is chosen.' % args.method)\n\n gms = []\n if args.method == 'pca':\n gm = PCAGrasping()\n gms.append((gm, 'pca'))\n elif args.method == 'svm':\n gm = SVMGrasping(args.model_fname_svm)\n gms.append((gm, 'svm'))\n elif args.method == 'cnn3d':\n gm = CNN3dGrasping(args.model_fname)\n gms.append((gm, 'cnn3d'))\n elif args.method == 'fcn':\n gm = CNN3dGrasping(args.model_fname_fcn)\n gms.append((gm, 'fcn'))\n elif args.method == 'rand':\n gm = RANDGrasping()\n gms.append((gm, 'rand'))\n elif args.method == 'all':\n gm = RANDGrasping()\n gms.append((gm, 'rand'))\n gm = PCAGrasping()\n gms.append((gm, 'pca'))\n gm = SVMGrasping(args.model_fname_svm)\n gms.append((gm, 'svm'))\n gm = CNN3dGrasping(args.model_fname_fcn)\n gms.append((gm, 'fcn'))\n gm = CNN3dGrasping(args.model_fname)\n gms.append((gm, 'cnn3d'))\n else:\n sys.exit('unknown grasping method: %s' % args.method)\n\n if not args.batch:\n # run only once\n ge = GraspEvaluation(gms, viz=args.viz, entire=args.entire, viz_fp=args.viz_fp, side_grasp=args.side_grasp, noise_voxels=args.noise_voxels, occluded_slices=args.occluded_slices, resolution=args.resolution)\n acc = ge.evaluate()\n print(acc)\n else:\n # run multiple times in a batch mode\n\n num_interests = 2 # approaching direction, wrist orientation\n\n # very quick evaluation (good for checking all methods are working)\n # num_trials = 1\n # range_nv = range(0, 1000+1, 1000)\n # range_os = range(0, 4+1, 4)\n\n # num_trials = 3\n # range_nv = range(0, 2000+1, 1000)\n # range_os = range(0, 8+1, 4)\n\n # num_trials = 10\n # range_nv = range(0, 10000+1, 2000)\n # range_os = range(0, 24+1, 8)\n\n\n # num_trials = 10\n # range_nv = range(0, 10000+1, 2000)\n # range_os = range(0, 24+1, 8)\n\n # num_trials = 10\n # range_nv = range(0, 10000+1, 1000)\n # range_os = range(0, 24+1, 4)\n\n # extensive evaluation (good for paper)\n num_trials = 30\n range_nv = range(0, 10000+1, 1000)\n range_os = range(0, 24+1, 4)\n\n num_methods = len(gms)\n acc_array = np.zeros((num_interests, len(range_nv), len(range_os), len(gms), num_trials), dtype=np.float32)\n for i in range(num_trials):\n for j, nv in enumerate(range_nv):\n for k, os in enumerate(range_os):\n print('[%d/%d][%d/%d][%d/%d]' % (i, num_trials, j, len(range_nv), k, len(range_os)))\n ge = GraspEvaluation(gms, noise_voxels=nv, occluded_slices=os)\n acc = ge.evaluate()\n for l in range(num_methods):\n acc_array[0, j, k, l, i] = acc[l][0] # wrist\n acc_array[1, j, k, l, i] = acc[l][1] # appr\n\n # ip.embed()\n\n np.savez('eval_grasping_' + str(time.time()) + '.npz', \n acc_array=acc_array,\n num_interests=num_interests, \n num_trials=num_trials, \n range_nv=range_nv, \n range_os=range_os, \n gms=gms)\n\n\n\n\n\n","sub_path":"eval_grasping.py","file_name":"eval_grasping.py","file_ext":"py","file_size_in_byte":16171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"567708604","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom __future__ import unicode_literals\nimport sys\n# import os\nimport platform\n\nfrom PyInstaller.utils.win32.versioninfo import (\n VarStruct, VarFileInfo, StringStruct, StringTable,\n StringFileInfo, FixedFileInfo, VSVersionInfo, SetVersion,\n)\nimport PyInstaller.__main__\n\narch = sys.argv[1] if len(sys.argv) > 1 else platform.architecture()[0][:2]\nassert arch in ('32', '64')\nprint('Building %sbit version' % arch)\n_x86 = '_x86' if arch == '32' else ''\n\nFILE_DESCRIPTION = 'Media Downloader%s' % (' (32 Bit)' if _x86 else '')\n\n# root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\n# print('Changing working directory to %s' % root_dir)\n# os.chdir(root_dir)\n\nexec(compile(open('yt_dlp/version.py').read(), 'yt_dlp/version.py', 'exec'))\nVERSION = locals()['__version__']\n\nVERSION_LIST = VERSION.split('.')\nVERSION_LIST = list(map(int, VERSION_LIST)) + [0] * (4 - len(VERSION_LIST))\n\nprint('Version: %s%s' % (VERSION, _x86))\nprint('Remember to update the version using devscipts\\\\update-version.py')\n\nVERSION_FILE = VSVersionInfo(\n ffi=FixedFileInfo(\n filevers=VERSION_LIST,\n prodvers=VERSION_LIST,\n mask=0x3F,\n flags=0x0,\n OS=0x4,\n fileType=0x1,\n subtype=0x0,\n date=(0, 0),\n ),\n kids=[\n StringFileInfo([\n StringTable(\n '040904B0', [\n StringStruct('Comments', 'yt-dlp%s Command Line Interface.' % _x86),\n StringStruct('CompanyName', 'https://github.com/yt-dlp'),\n StringStruct('FileDescription', FILE_DESCRIPTION),\n StringStruct('FileVersion', VERSION),\n StringStruct('InternalName', 'yt-dlp%s' % _x86),\n StringStruct(\n 'LegalCopyright',\n 'pukkandan.ytdlp@gmail.com | UNLICENSE',\n ),\n StringStruct('OriginalFilename', 'yt-dlp%s.exe' % _x86),\n StringStruct('ProductName', 'yt-dlp%s' % _x86),\n StringStruct('ProductVersion', '%s%s' % (VERSION, _x86)),\n ])]),\n VarFileInfo([VarStruct('Translation', [0, 1200])])\n ]\n)\n\nPyInstaller.__main__.run([\n '--name=yt-dlp%s' % _x86,\n '--onefile',\n '--icon=devscripts/cloud.ico',\n '--exclude-module=youtube_dl',\n '--exclude-module=youtube_dlc',\n '--exclude-module=test',\n '--exclude-module=ytdlp_plugins',\n '--hidden-import=mutagen',\n '--hidden-import=Crypto',\n '--upx-exclude=vcruntime140.dll',\n 'yt_dlp/__main__.py',\n])\nSetVersion('dist/yt-dlp%s.exe' % _x86, VERSION_FILE)\n","sub_path":"pyinst.py","file_name":"pyinst.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"298292767","text":"\n\n\ndef minmax(noPalos, jugador, profundidad):\n ganadas = [0,0,0]\n \n #print(\"\\t\"*profundidad + \"Profundidad > \" + str(profundidad))\n #print(\"\\t\"*profundidad + 'Palos Restantes: ' + str(noPalos))\n #ALGUIEN GANA\n if noPalos >= 2 and noPalos <= 4:\n for i in range(2,5,+1):\n if noPalos == i:\n #print(\"\\t\"*profundidad + jugador + \" toma: !\"+str(abs(1-i))+\"\\n\")\n if jugador == \"PC\":\n return abs(1-i)\n else:\n return 0\n if noPalos <= 1:\n return 1\n for i in range(1,4,+1):\n #print(\"\\t\"*profundidad + jugador +\" toma: \"+ str(i)+\"\\n\")\n if jugador == 'PC':\n x = minmax(noPalos - i, 'Jugador', profundidad + 1)\n if x == 0:\n ganadas[i-1] += 1\n elif isinstance(x,list):\n for j in range(3):\n ganadas[i-1] += x[j]\n else:\n x = minmax(noPalos - i, 'PC', profundidad + 1)\n if x == 0:\n ganadas[i-1] += 1\n elif isinstance(x,list):\n for j in range(3):\n ganadas[i-1] += x[j]\n #print(\"---\"*10)\n return ganadas\n \n\n\n\ndef IniciarPartida():\n pc_incia = ''\n while True:\n print(\"\\n¿Quién inicia?\\n1 - Computadora\\n2 - Tu\")\n pc_incia = input()\n\n if pc_incia == '1':\n pc_incia = True\n break\n elif pc_incia == '2':\n pc_incia = False\n break\n\n palos_restantes = 0\n\n while True:\n print(\"\\nIngrese la cantidad de palos\")\n palos_restantes = int(input())\n if palos_restantes > 0:\n break\n \n while True:\n #print(\"Palos Restantes: \" + str(palos_restantes)+\"\\n\")\n if pc_incia:\n if palos_restantes < 20:\n jugada = minmax(palos_restantes, 'PC',0)\n if isinstance(jugada,list):\n palos_restantes -= jugada.index(min(jugada)) + 1\n print(\"PC ha tomado \" + str(jugada.index(min(jugada)) + 1) + \" palos\")\n else:\n palos_restantes -= jugada\n print(\"PC ha tomado \" + str(jugada) + \" palo(s)\")\n else:\n palos_restantes -= 3\n print(\"PC ha tomado 3 palo(s)\")\n print(\"Palos Restantes: \" + str(palos_restantes)+\"\\n\") \n else:\n while True:\n print(\"\\nTu turno, ¿Cuántos palos vas a tomar?\")\n jugada = int(input())\n if jugada < 1 or jugada > 3:\n print(\"\\nERROR > Ingresa un numero entre 1 y 3\")\n else:\n palos_restantes -= jugada\n print(\"Haz tomado \" + str(jugada) + \" palo(s)\")\n print(\"Palos Restantes: \" + str(palos_restantes)+\"\\n\")\n break\n \n if palos_restantes <= 0:\n if pc_incia:\n print(\"Ganaste!!\")\n else:\n print(\"Computadora Gana!!\")\n return\n \n pc_incia = not pc_incia\n\n\nIniciarPartida()\n\nprint('\\n\\nPresiona enter para salir...')\ninput()\n\n","sub_path":"min-max.pyw","file_name":"min-max.pyw","file_ext":"pyw","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"548359663","text":"from fastapi import HTTPException\n\n\nclass HttpTimeoutException(HTTPException):\n status_code = 504\n\n\nclass HttpAllResponsesFailedException(HTTPException):\n status_code = 512\n detail = 'All responses failed'\n\n def __init__(self):\n super(HttpAllResponsesFailedException, self).__init__(\n status_code=HttpAllResponsesFailedException.status_code,\n detail=HttpAllResponsesFailedException.detail)\n","sub_path":"src/test/service/error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"104416730","text":"from numpy import *\n\ndef j(A,b,guess = None,eps = None,lam = None,maxit = None):\n '''\n %% ***********************************************************************\n % FUNCTION Jacobi\n % Purpose: Performs the Jacobi iterative method of solution\n %\n % Function call: [x] = Jacobi(A,b,guess,eps,lam)\n %\n % Input: A = Input system of equations AX = B\n % b = solution of system of equations AX = B\n % guess = Initial guess. Will be set to all 0s if nothing is entered.\n % eps = tolerance of difference between. Will be set to 1e-6 if nothing is\n % entered\n % lam = relaxation constant. Will be set to 1 if nothing is entered.\n % maxit = maximum number of iterations allowed. Will be set to 10000 if\n % nothing is entered\n %\n % Outputs: x = solution of x's in the system of equations AX = B\n %\n % Adam Hollock\n % 26 January 2012\n %% ***********************************************************************\n '''\n if guess == None:\n guess = zeros(A.shape[0],1)\n if eps == None:\n eps = 1e-2\n if lam == None:\n lam = 1\n if maxit == None:\n maxit = 10000\n\n #Will catch the matrix if the number of variables don't match the number of\n #solutions or the size of the system in general\n if A.shape[1] != b.shape[0] or A.shape[0] != guess.shape[0]:\n error('Great Scott!!!')\n\n #initializes an error matrix, an x matrix, and a counter.\n errorValues = ones((guess.shape[0],1))\n x = guess\n count = 0\n\n #Begins the gauss seidel solver algorithm\n while (max(errorValues) > eps) and (maxit > count):\n #initializes a solution matrix.\n xnew = x\n for i in arange(1,guess.shape[0]):\n #Performs the main mathematical work of the gauss-seidel function.\n Ause = concatenate((A[i,:i-1],A[i,i+1:]),1)\n xuse = concatenate((xnew[:i-1],xnew[i+1:]))\n xnew[i] = (b[i]-Ause*xuse)/(A[i,i])\n #Applies the relaxation constant\n xnew[i] = lam*xnew[i] + (1-lam)*x[i]\n errorValues[i] = abs((xnew[i]-x[i])/xnew[i])*100\n x = xnew\n count = count + 1;\n return x","sub_path":"Week 3/Jacobi.py","file_name":"Jacobi.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"332912203","text":"# Copyright 2015-2016 F5 Networks Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom f5_image_prep.openstack.glance import GlanceLib\nfrom f5_image_prep.openstack.openstack import get_creds\n\nimport os\nimport pytest\nimport subprocess\nimport sys\n\n\nBIGIPFILE = 'BIGIP-11.6.0.0.0.401.qcow2'\nVEIS_SCRIPT = \\\n '/home/imageprep/f5-openstack-image-prep/f5_image_prep/ve_image_sync.py'\nos.environ['OS_TENANT_NAME'] = 'admin'\nos.environ['OS_USERNAME'] = 'admin'\nos.environ['OS_PASSWORD'] = 'changeme'\nos.environ['OS_AUTH_URL'] = 'http://10.190.4.147:5000/v2.0'\nTEST_IMG = None\n\n\n@pytest.fixture\ndef VEImageSync(request):\n from f5_image_prep.ve_image_sync import VEImageSync as veis\n\n def delete_image():\n GlanceLib(get_creds()).glance_client.images.delete(TEST_IMG.id)\n\n request.addfinalizer(delete_image)\n\n creds = get_creds()\n work_dir = sys.path[0]\n return veis(creds, BIGIPFILE, work_dir)\n\n\ndef test_image_sync(VEImageSync):\n global TEST_IMG\n TEST_IMG = VEImageSync.sync_image()\n imgs = GlanceLib(get_creds()).glance_client.images.list()\n assert TEST_IMG.id in [img.id for img in imgs]\n\n\ndef test_image_sync_command_line():\n output = subprocess.check_output(['python', VEIS_SCRIPT, '-i', BIGIPFILE])\n assert 'Patching image...' in output\n assert 'Uploading patched image to glance...' in output\n assert 'Image Model:' in output\n","sub_path":"f5_image_prep/test/functional/test_ve_image_sync.py","file_name":"test_ve_image_sync.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"58652240","text":"import utils.sqlitdb as sqlDb\nimport utils.database as fileDb\nfrom models.Book import Book\nuseSqliteDb = False\n\ndef set_data_store(choice):\n if str(choice) not in ['1','2']:\n return False, None\n if choice == '2':\n sqlDb.create_books_table()\n return choice, fileDb\n return choice ,sqlDb\n\ndef menu():\n return input(\n \"\"\"\n Welcome to Book Store!!!\n Please make a choice to continue:\n - 'a': Add a new book to store\n - 'l': List books in the book store\n - 'r': Mark book as read\n - 'd': Delete a particular book from book store\n - 'q': Quit\n Your Choice: \"\"\"\n )\n\ndef getBookNameAndAuthor():\n bookName = input('Enter Book name:')\n bookAuthor = input('Enter Book Author:')\n return (bookName, bookAuthor)\n\ndef main():\n [ADD, LIST, TOGGLE_READ, DELETE, QUIT] = ['a','l','r','d','q']\n chosenDb,db = set_data_store(input(\"\"\"\n Choose type of data store:\n 1. Sqlite Database\n 2. File Storage\n Press any other key to QUIT\n Choice:\"\"\"))\n if not chosenDb:\n return\n print(db)\n user_input = menu()\n while user_input!= QUIT:\n if user_input == ADD:\n book,author = getBookNameAndAuthor()\n db.add_book(Book(book,author))\n elif user_input == LIST:\n print(db.list_books())\n elif user_input == TOGGLE_READ:\n book,author = getBookNameAndAuthor()\n db.mark_read(book,author)\n elif user_input == DELETE:\n book,author = getBookNameAndAuthor()\n db.delete_book(book,author)\n user_input = menu()\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"section-6/milestone project 2: Book Store/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"187366618","text":"import numpy as np\nimport os\nimport cv2\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom timeit import default_timer as timer\n\t\ndef lk_flow_fast(Old_Image, New_Image, window_size=9, debug_level=0):\n\tkernel_x = np.array([[-1., 1.], [-1., 1.]])\n\tkernel_y = np.array([[-1., -1.], [1., 1.]])\n\t\n\tOld_Image = Old_Image / 255. # normalize pixels\n\tNew_Image = New_Image / 255. # normalize pixels\n\tw = int(window_size/2)\n\tu=np.zeros(Old_Image.shape)\n\tv=np.zeros(Old_Image.shape)\n\t\n\tIx = cv2.filter2D(Old_Image,-1,kernel_x)\n\tIy = cv2.filter2D(Old_Image,-1,kernel_y)\n\tIt = New_Image - Old_Image\n\t\n\tif debug_level>0:\n\t\tplot_image(Ix, title=\"partial x derivative of old image\")\n\t\tplot_image(Iy, title=\"partial y derivative of old image\")\n\t\tplot_image(It, title=\"partial time derivative of images\")\n\t\t\n\tparamaters = np.zeros(Old_Image.shape+(5,))\n\tparamaters[..., 0] = Ix ** 2\n\tparamaters[..., 1] = Iy ** 2\n\tparamaters[..., 2] = Ix * Iy\n\tparamaters[..., 3] = Ix * It\n\tparamaters[..., 4] = Iy * It\n\tdel Ix, Iy, It\n\t\n\tparamaters_cumulative_sums = np.cumsum(np.cumsum(paramaters, axis=0),axis=1)\n\tdel paramaters\n\t\n\twindow_sums = (paramaters_cumulative_sums[2*w+1:, 2*w+1:] - \\\n\t\tparamaters_cumulative_sums[2*w+1:, :-1-2*w] - \\\n\t\tparamaters_cumulative_sums[:-1-2*w, 2*w+1:] + \\\n\t\tparamaters_cumulative_sums[:-1-2*w, :-1-2*w])\n\tdel paramaters_cumulative_sums\n\t\n\tdet = (window_sums[...,0]*window_sums[..., 1]-window_sums[..., 2]**2)\n\t\n\tdet = np.nan_to_num(det)\n\t#plot_image(det)\n\t#np.seterr(all='warn', invalid='print')\n\t\n\tflow_u = np.where(det != 0, \\\n\t\t(-window_sums[..., 1] * window_sums[..., 3] + \\\n\t\twindow_sums[..., 2] * window_sums[..., 4]) / det, \\\n\t\t0.0)\n\tflow_v = np.where(det != 0, \\\n\t\t(-window_sums[..., 0] * window_sums[..., 4] + \\\n\t\twindow_sums[..., 2] * window_sums[..., 3]) / det, \\\n\t\t0.0)\n\tdel det\n\t\n\tu = np.pad(flow_u, [(w, w+1), (w+1, w)], mode='constant', constant_values=0)\n\tv = np.pad(flow_v, [(w, w+1), (w+1, w)], mode='constant', constant_values=0)\n\tdel flow_u,flow_v\n\n\treturn (u,v)\n\t\ndef lk_flow_improved(Old_Image, New_Image, window_size, debug_level=0):\n\tkernel_x = np.array([[-1., 1.], [-1., 1.]])\n\tkernel_y = np.array([[-1., -1.], [1., 1.]])\n\t\n\tw = int(window_size/2)\n\tu=np.zeros(Old_Image.shape)\n\tv=np.zeros(Old_Image.shape)\n\t\n\tIx = np.array(cv2.filter2D(np.array(Old_Image, dtype='float'),-1,kernel_x), dtype='float')\n\tIy = np.array(cv2.filter2D(np.array(Old_Image, dtype='float'),-1,kernel_y), dtype='float')\n\tIt = np.array(New_Image, dtype='float') - np.array(Old_Image, dtype='float')\n\t\n\tif debug_level>0:\n\t\tplot_image(Ix, title=\"partial x derivative of old image\")\n\t\tplot_image(Iy, title=\"partial y derivative of old image\")\n\t\tplot_image(It, title=\"partial time derivative of images\")\n\t\t\n\tparamaters = np.zeros(Old_Image.shape+(5,))\n\tparamaters[..., 0] = cv2.GaussianBlur(np.array(Ix ** 2, dtype='float'),(window_size,window_size),0)\n\tparamaters[..., 1] = cv2.GaussianBlur(np.array(Iy ** 2, dtype='float'),(window_size,window_size),0)\n\tparamaters[..., 2] = cv2.GaussianBlur(np.array(Ix * Iy, dtype='float'),(window_size,window_size),0)\n\tparamaters[..., 3] = cv2.GaussianBlur(np.array(Ix * It, dtype='float'),(window_size,window_size),0)\n\tparamaters[..., 4] = cv2.GaussianBlur(np.array(Iy * It, dtype='float'),(window_size,window_size),0)\n\tdel Ix, Iy, It\n\n\t\n\ttemp = np.nan_to_num(np.array(paramaters[..., 0]*paramaters[..., 1]-paramaters[..., 2]*paramaters[..., 2]))\n\tATA = np.nan_to_num(np.array([[paramaters[..., 0],paramaters[..., 2]],[paramaters[..., 2],paramaters[..., 1]]]))\n\teigns = np.nan_to_num(np.linalg.eigvals(ATA.T).T)\n\t\n\tmineign = np.minimum(eigns[0,:,:],eigns[1,:,:]) \n\tmaxeign = np.maximum(eigns[0,:,:],eigns[1,:,:]) \n\t\n\t\n\tu = np.where( ( (mineign > 1e-2) & ( (maxeign / mineign) < 255) ), \\\n\t\tparamaters[..., 4] * paramaters[..., 2] - paramaters[..., 3] * paramaters[..., 1], \\\n\t\t0.0)\n\tv = np.where( ( (mineign > 1e-2) & ( (maxeign / mineign) < 255) ), \\\n\t\t-paramaters[..., 3] * paramaters[..., 2] - paramaters[..., 4] * paramaters[..., 0], \\\n\t\t0.0)\n\t\n\tu = np.nan_to_num(u/temp)\n\tv = np.nan_to_num(v/temp)\n\t\n\treturn (u,v)\n\ndef generate_image_pyramid(image, minSize=(30,30)):\n\tcopyImage = image\n\tpyramids = []\n\twhile copyImage.shape[0] > minSize[1] and copyImage.shape[1] > minSize[0]:\n\t\tpyramids.insert(0, copyImage)\n\t\tcopyImage = cv2.pyrDown(copyImage)\n\treturn pyramids\n\t\ndef generate_window_size_pyramid(windowSize, depth, minSize=3):\n\tsizes = []\n\tfor _ in range(depth):\n\t\tif int(windowSize)%2 == 0:\n\t\t\twindowSize = int(windowSize)+1\n\t\tsizes.insert(0, windowSize)\n\t\twindowSize = int(windowSize/2)\n\t\tif windowSizeold_frame.shape[1]:\n\t\t\tshape = (int(1280),int(720))\n\t\telse:\n\t\t\tshape = (int(720),int(1280))\n\t\t#shape = old_frame.shape\n\t\t\n\t\t#scale old_frame down to desired shape\n\t\told_frame = cv2.resize(old_frame, (shape[1],shape[0]), interpolation = cv2.INTER_LINEAR)\n\t\t\n\t\t#matricies to hold image fill data\n\t\tfinal_image = np.zeros(shape, dtype=int)\n\t\tset_pixels = np.full(shape, 255.0, dtype=float)\n\t\t\n\t\tprevios_mags = np.zeros((5,shape[0],shape[1]))\n\t\tprevios_frames = np.zeros((3,shape[0],shape[1]))\n\t\t\n\t\tframe_counter = 0\n\t\tmax_set_val = -1.0\n\t\t\n\t\twhile(video.isOpened()):\n\t\t\tfor i in range(skipframes):\n\t\t\t\tret,frame = video.read()\n\t\t\t\tif frame is None:\n\t\t\t\t\tbreak\n\t\t\t\n\t\t\tif frame is not None:\n\t\t\t\tret, frame = video.read()\n\t\t\t\n\t\t\tif ( maxframes!=-1 and frame_counter == maxframes) or not video.isOpened() or frame is None:\n\t\t\t\tplot_image(final_image, title=\"Produced Image\", cmap='gray')\n\t\t\t\tplot_image(set_pixels, title=\"Cofidence\", cmap='gray')\n\t\t\t\treturn\n\t\t\t\t\n\t\t\tnew_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\t\t\tnew_frame = cv2.resize(new_frame, (shape[1],shape[0]), interpolation = cv2.INTER_LINEAR)\n\t\t\t\n\t\t\tprint(\"On flow \"+str(frame_counter))\n\t\t\t\n\t\t\tfeature_params = dict( maxCorners = 100,\n qualityLevel = 0.3,\n minDistance = 7,\n blockSize = 7 )\n\t\t\t\n\t\t\tu,v = coarse_to_fine_lk_flow(old_frame,new_frame,debug_level=debug_level-1,window_size=window_size)\n\t\t\tmag = np.arctan(np.sqrt(np.add(np.square(u),np.square(v)))/2)*2/np.pi\n\t\t\t\n\t\t\t\n\t\t\tif debug_level>0:\n\t\t\t\tplot_flow(old_frame,new_frame,(u,v))\n\t\t\t\t\n\t\t\tmags = cv2.GaussianBlur(mag,(25,25),0)\n\t\t\t\t\n\t\t\tprevios_mags[frame_counter%5] = mags\n\t\t\tprevios_frames[frame_counter%3] = old_frame\n\t\t\t\n\t\t\tif frame_counter>=4:\n\t\t\t\n\t\t\t\tmagn = np.zeros(shape)\n\t\t\t\tmagn += (1)*previos_mags[(frame_counter)%5]\n\t\t\t\tmagn += (2)*previos_mags[(frame_counter-1)%5]\n\t\t\t\tmagn += (3)*previos_mags[(frame_counter-2)%5]\n\t\t\t\tmagn += (2)*previos_mags[(frame_counter-3)%5]\n\t\t\t\tmagn += (1)*previos_mags[(frame_counter-4)%5]\n\t\t\t\tmagn /= 12\n\t\t\t\t\n\t\t\t\tfor i in range(shape[0]):\n\t\t\t\t\tfor j in range(shape[1]):\n\t\t\t\t\t\tif magn[i,j] 1000 and valor_serie <= 5000):\n return valor_serie * 1.10\n if(valor_serie > 5000):\n return valor_serie * 1.15\n\nciudad_calcular = ciudades_uno.map(calcular)\n\nresultado = ciudades_uno.where(ciudades_uno < 1000,\n ciudades_uno * 1.05)\n\nseries_numeros = pd.Series(['1.0', '2', -3])\nprint(pd.to_numeric(series_numeros, downcast = 'integer'))\n\nseries_numeros_err = pd.Series(['no tiene', '1.0', '2', -3])\nprint(pd.to_numeric(series_numeros_err))\nprint(pd.to_numeric(series_numeros_err, errors = 'ignore'))\nprint(pd.to_numeric(series_numeros_err, errors = 'coerce'))\n\n\n\n","sub_path":"03-Pandas/b_series.py","file_name":"b_series.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"232628464","text":"'''***************************************************************************\r\n*\r\n* FILE NAME:\r\n* bta_serv_nethawkconfiguration.py\r\n*\r\n* DESCRIPTION:\r\n* This module will create actual Nethawk configuration file based on details provided in setup configuration file and\r\n* logging configuration file.\r\n* \r\n* \r\n* REVISION HISTORY\r\n*\r\n* Date Author Reason\r\n* 21 Nov 2009 Manish Gupta First Draft\r\n*\r\n* Copyright 2009, ARICENT\r\n\r\n***************************************************************************'''\r\n\r\n# Import system modules\r\nimport sys\r\nimport os\r\nfrom string import *\r\nfrom xml.dom import minidom\r\n\r\n# Import user defined modules\r\nimport bta_util_constants as CONSTANTS\r\nfrom bta_serv_nethawkconfigcreator import clsNethawkConfiguration\r\n\r\n\r\n\r\n\r\n#File CONSTANTS\r\nMODULE_NAME = \"bta_serv_nethawkconfiguration\"\r\n\r\nNETHAWK = \"nethawk\"\r\nNETHAWK_CONFIG_FILE_NAME = \"NethawkConfiguration.xml\"\r\n\r\nCONST_STACK_OMUSIG = 2\r\nCONST_STACK_TRXSIG = 1\r\nCONST_STACK_PCU = 81\r\nCONST_STACK_TRAU_FR = 4\r\nCONST_STACK_TRAU_HR = 5\r\n\r\nCONST_PARAM_NO_SETTINGS = 0\r\nCONST_PARAM_DYNAMIC_ABIS_ON = 1\r\nCONST_PARAM_DYNAMIC_ABIS_OFF = 2\r\n\r\nCONST_BIT_RATE_8 = 3\r\nCONST_BIT_RATE_16 = 2\r\nCONST_BIT_RATE_32 = 1\r\nCONST_BIT_RATE_64 = 0\r\n\r\nBIT_RATE_8 = 8\r\nBIT_RATE_16 = 16\r\nBIT_RATE_32 = 32\r\nBIT_RATE_64 = 64\r\n\r\nSUB_TSL_0 = 0\r\nSUB_TSL_1 = 1\r\nSUB_TSL_2 = 2\r\nSUB_TSL_3 = 3\r\n\r\nYES = \"Y\"\r\nACTIVATED = 1\r\nACTIVATE = \"activate\"\r\nLINE = \"line\"\r\nTIMESLOT = \"timeslot\"\r\nRATE = \"rate_0_to_3\"\r\nSUB_CH = \"sub_ch_0_to_7\"\r\nSTACK_ID = \"stack_id\"\r\nPARAM = \"param_0_to_2\"\r\n\r\nTAG_OM = \"om\"\r\nTAG_TRXSIG = \"trxsig\"\r\nTAG_PCU = \"pcu\"\r\nTAG_TRAU = \"trau\"\r\nTAG_MESSAGE = \"messages\"\r\n\r\nSTACK_OM = 'O&M'\r\nSTACK_TRXSIG = 'TRXSIG'\r\nSTACK_PCU = 'PCU'\r\nSTACK_TRAU_FR = 'TRAU FR'\r\nSTACK_TRAU_HR = 'TRAU HR'\r\n\r\nATT_MESSAGES = \"messages\"\r\nATT_STATS = \"stats\"\r\n\r\nclass clsNethawkGCFCreator:\r\n '''\r\n Description : This class reads the command parameters and command xml.\r\n '''\r\n def __init__( self, setup_config, obj_log_config, dict_cmd_file ):\r\n '''\r\n * FUNCTION NAME:\r\n * __init__()\r\n *\r\n * DESCRIPTION:\r\n * Constructor Function of clsNethawkGCFCreator class.\r\n * \r\n *\r\n * INPUT:\r\n * setup_config - Object of setup configuration file\r\n * obj_log_config - Object of logging configuration file\r\n * dict_cmd_file - Dictionary which holds path of all XML files\r\n * \r\n * RETURNS:\r\n * Object of clsNethawkGCFCreator Class\r\n *\r\n * NOTES:\r\n * \r\n '''\r\n self.setup_config = setup_config\r\n self.obj_log_config = obj_log_config\r\n self.dict_cmd_file = dict_cmd_file\r\n self.obj_bsc_interface = obj_log_config.obj_logging_configuration\r\n self.obj_nethawk = None\r\n self.executable_dir = None\r\n \r\n self.initialize()\r\n\r\n def initialize( self ):\r\n \r\n '''\r\n * FUNCTION NAME:\r\n * initialize( self )\r\n *\r\n * DESCRIPTION:\r\n * Initializes class attributes\r\n *\r\n * INPUT:\r\n * None \r\n *\r\n * RETURNS:\r\n * None\r\n * NOTES:\r\n * \r\n ''' \r\n try:\r\n obj_bts_site = self.setup_config.dict_obj_bts_site_config[ self.setup_config.site_index ] \r\n self.executable_dir = self.setup_config.dict_file_path[CONSTANTS.FILE_TYPE_EXECUTABLES]\r\n \r\n if CONSTANTS.IF_GENERAL in obj_bts_site.dict_obj_interface:\r\n obj_gen_intf = obj_bts_site.dict_obj_interface[ CONSTANTS.IF_GENERAL ]\r\n \r\n if CONSTANTS.CMD_TYPE_NETHAWK in obj_gen_intf.dict_instrument_type:\r\n tool_id = obj_gen_intf.dict_instrument_type[ CONSTANTS.CMD_TYPE_NETHAWK ][0]\r\n self.obj_nethawk = obj_gen_intf.dict_obj_instruments[tool_id]\r\n else:\r\n raise Exception(\"Nethawk configuration not defined in setup config\")\r\n else:\r\n raise Exception(\"General IF configuration not defined in setup config\")\r\n except Exception as detail:\r\n raise Exception(MODULE_NAME + \".clsNethawkGCFCreator.startRecording, \" +\\\r\n str( detail ))\r\n \r\n def createConfigurationFile( self, gcf_file_path=CONSTANTS.EMPTY_STRING ):\r\n '''\r\n * FUNCTION NAME:\r\n * createConfigurationFile()\r\n *\r\n * DESCRIPTION:\r\n * Reads logging details from looging configuration file and create nethawk configuration file\r\n * by providing all parameters from logging configuration.\r\n *\r\n * INPUT:\r\n * \r\n * RETURNS:\r\n * nethawk gcf file \r\n *\r\n * NOTES:\r\n * \r\n '''\r\n try:\r\n nc = clsNethawkConfiguration(self.setup_config, self.dict_cmd_file, gcf_file_path)\r\n nc.readXMLFileParameters()\r\n \r\n line_no = 1\r\n data = {}\r\n data_list = []\r\n connections = 0\r\n \r\n for tag_msg, dict_abis_log_config in self.obj_bsc_interface.dict_abis.items():\r\n for tag_misc, tag_value in dict_abis_log_config.items():\r\n if ((tag_misc != ATT_MESSAGES) and (tag_misc != ATT_STATS)):\r\n data_item = {} # temp dictionary\r\n connections += 1\r\n \r\n bit_rate = int( self.obj_bsc_interface.dict_abis[tag_msg][tag_misc].bit_rate )\r\n pcm_tsl = int( self.obj_bsc_interface.dict_abis[tag_msg][tag_misc].ts_id )\r\n sub_tsl = int( self.obj_bsc_interface.dict_abis[tag_msg][tag_misc].sub_channel ) \r\n line = int( self.obj_bsc_interface.dict_abis[tag_msg][tag_misc].line )\r\n stack = self.obj_bsc_interface.dict_abis[tag_msg][tag_misc].stack \r\n\r\n rate_id = self.selectBitRate(bit_rate)\r\n stack_id = self.selectStackID ( tag_msg, stack )\r\n\r\n if (YES == str(dict_abis_log_config[ATT_MESSAGES]).capitalize()):\r\n data_list.append(data_item)\r\n # assigning values to dictionary items.\r\n data_item[ACTIVATE] = ACTIVATED\r\n data_item[LINE] = line\r\n data_item[TIMESLOT] = pcm_tsl\r\n data_item[RATE] = rate_id\r\n data_item[SUB_CH] = sub_tsl\r\n data_item[STACK_ID] = stack_id\r\n data_item[PARAM] = CONST_PARAM_NO_SETTINGS\r\n \r\n # PCU Prefilter \r\n \r\n data_item_pcu_prefilter ={}\r\n data_item_pcu_prefilter[\"dir\"]=0\r\n data_item_pcu_prefilter[\"filter_in\"]=0\r\n data_item_pcu_prefilter[\"mask_format\"]=0\r\n \r\n data_list_pcu_prefilter =[]\r\n data_list_pcu_prefilter.append(data_item_pcu_prefilter)\r\n \r\n # IP 1-5\r\n \r\n # First 4 IP contains 5 fields (Activate, Adapter, fa_flag, filter_file,address)\r\n # Last IP (i.e. IP5) contains only (fa_flag, address)\r\n data_item_ip = {}\r\n data_list_ip = []\r\n data_item_ip[\"activate\"]=0\r\n data_item_ip[\"adapter\"]=-1\r\n data_item_ip[\"fa_flag\"]=1\r\n data_item_ip[\"filter_file\"]=\"\"\r\n data_item_ip[\"address\"]=0\r\n \r\n data_list_ip.append(data_item_ip)\r\n data_list_ip.append(data_item_ip)\r\n data_list_ip.append(data_item_ip)\r\n data_list_ip.append(data_item_ip)\r\n \r\n data_item_ip5 = {}\r\n data_item_ip5[\"fa_flag\"]=2\r\n data_item_ip5[\"address\"]=0\r\n data_list_ip.append(data_item_ip5)\r\n \r\n # PCU Predefine 1-6\r\n \r\n data_item_pcu_predefine = {}\r\n data_list_pcu_predefine = []\r\n \r\n data_item_pcu_predefine[\"enabled\"]=0\r\n data_item_pcu_predefine[\"direction\"]=0\r\n \r\n # Added same data for multiple times for different Predefines 1-6\r\n data_list_pcu_predefine.append(data_item_pcu_predefine)\r\n data_list_pcu_predefine.append(data_item_pcu_predefine)\r\n data_list_pcu_predefine.append(data_item_pcu_predefine)\r\n data_list_pcu_predefine.append(data_item_pcu_predefine)\r\n data_list_pcu_predefine.append(data_item_pcu_predefine)\r\n data_list_pcu_predefine.append(data_item_pcu_predefine)\r\n \r\n # Local Adrressses\r\n # Data for MAC Addresses\r\n data_item_local_addr = {}\r\n data_list_local_addr = []\r\n \r\n data_item_local_addr[\"mac_address\"]=\"0.13.96.167.136.233\"\r\n \r\n # Stack\r\n #Data for Stack\r\n \r\n #Stack 0\r\n d_i5 = {}\r\n d_i5[\"level\"]=1\r\n d_i5[\"coding\"]=0\r\n d_i5[\"color\"]=2\r\n d_i5[\"stack_num\"]=0\r\n d_i5[\"layer_start_index\"]=0\r\n d_i5[\"layer_end_index\"]=3\r\n \r\n #Stack 1\r\n d_i6 = {}\r\n d_i6[\"level\"]=1\r\n d_i6[\"coding\"]=0\r\n d_i6[\"color\"]=2\r\n d_i6[\"stack_num\"]=1\r\n d_i6[\"layer_start_index\"]=0\r\n d_i6[\"layer_end_index\"]=8\r\n \r\n dl5 = []\r\n dl5.append(d_i5)\r\n dl5.append(d_i6)\r\n \r\n # Adding created data to settings dictionary\r\n data[\"connection_data_list\"]=data_list\r\n data[\"pcu_prefilter\"]=data_list_pcu_prefilter\r\n data[\"ip\"]=data_list_ip\r\n data[\"pcu_predefine\"]=data_list_pcu_predefine\r\n data[\"local_address\"]=data_list_local_addr\r\n data[\"stack\"]=dl5\r\n \r\n # API to process data and write to file\r\n config_file_name = nc.writeToFile(data)\r\n \r\n return config_file_name\r\n except Exception as detail:\r\n raise Exception(detail)\r\n \r\n def selectBitRate( self, bit_rate ):\r\n '''\r\n * FUNCTION NAME:\r\n * selectBitRate()\r\n *\r\n * DESCRIPTION:\r\n * Select correct bit rate (convert bit rate into nethawk specified bit rate)\r\n *\r\n * INPUT:\r\n * bit_rate - Bit rate which needs to be converted to rate_id\r\n *\r\n * RETURNS:\r\n * rate id , in case of error it raise the error \r\n *\r\n * NOTES:\r\n * \r\n ''' \r\n if (BIT_RATE_8 == bit_rate):\r\n rate_id = CONST_BIT_RATE_8\r\n elif (BIT_RATE_16 == bit_rate):\r\n rate_id = CONST_BIT_RATE_16\r\n elif (BIT_RATE_32 == bit_rate):\r\n rate_id = CONST_BIT_RATE_32 \r\n elif (BIT_RATE_64 == bit_rate):\r\n rate_id = CONST_BIT_RATE_64\r\n else:\r\n raise Exception(\"Invalid bit rate\")\r\n\r\n return rate_id\r\n\r\n def selectStackID ( self, tag_msg, stack ):\r\n '''\r\n * FUNCTION NAME:\r\n * selectBitRate()\r\n *\r\n * DESCRIPTION:\r\n * Select correct stack id on the basis of type of message (OM, TRXSIG, PCU) and stack provided by user (TRAU FR, TRAU HR)\r\n *\r\n * INPUT:\r\n * tag_msg - Type of Message ( O&M, TRXSIG, PCU)\r\n * stack - Stack provided by user (TRAU FR, TRAU HR)\r\n *\r\n * RETURNS:\r\n * stack id , in case of error it raise the error \r\n *\r\n * NOTES:\r\n * \r\n ''' \r\n if (TAG_OM == tag_msg):\r\n stack_id = CONST_STACK_OMUSIG \r\n elif (TAG_TRXSIG == tag_msg):\r\n stack_id = CONST_STACK_TRXSIG \r\n elif (TAG_PCU == tag_msg):\r\n stack_id = CONST_STACK_PCU\r\n elif ((STACK_TRAU_FR == stack) and (TAG_TRAU == tag_msg)):\r\n stack_id = CONST_STACK_TRAU_FR\r\n elif ((STACK_TRAU_HR == stack) and (TAG_TRAU == tag_msg)):\r\n stack_id = CONST_STACK_TRAU_HR\r\n else:\r\n raise Exception(\"Invalid Stack ID\")\r\n\r\n return stack_id\r\n\r\n def startRecording( self, gcf_file, file_path ):\r\n \r\n '''\r\n * FUNCTION NAME:\r\n * startRecording( self, gcf_file, file_path )\r\n *\r\n * DESCRIPTION:\r\n * Starts the nethawk recording\r\n *\r\n * INPUT:\r\n * gcf_file - The nethawk configuration file\r\n * file_path - The grc file path\r\n *\r\n * RETURNS:\r\n * None\r\n * NOTES:\r\n * \r\n ''' \r\n nethawk_recording_interval = None\r\n try:\r\n grc_file_name = file_path + \"ScenarioLogs.grc\" \r\n \r\n self.launchNethawk()\r\n self.loadNethawkConfig( gcf_file )\r\n \r\n if self.obj_log_config.nethawk_recording_interval:\r\n nethawk_recording_interval = self.obj_log_config.nethawk_recording_interval\r\n elif self.obj_nethawk.recording_interval:\r\n nethawk_recording_interval = self.obj_nethawk.recording_interval\r\n else:\r\n nethawk_recording_interval = CONSTANTS.DEFAULT_INTERVAL_IN_MIN_NETHAWK_RECORDING\r\n \r\n args = [ CONSTANTS.NETHAWK_START_RECORDING, '\\\"' + grc_file_name + '\\\"', '\\\"' + self.executable_dir + '\\\"',\\\r\n str( nethawk_recording_interval ) ]\r\n os.spawnv( os.P_WAIT, self.executable_dir + CONSTANTS.NETHAWK_START_RECORDING, args ) \r\n except Exception as detail:\r\n raise Exception(MODULE_NAME + \".clsNethawkGCFCreator.startRecording, \" +\\\r\n str( detail ))\r\n \r\n def stopRecording( self ):\r\n \r\n '''\r\n * FUNCTION NAME:\r\n * stopRecording( self )\r\n *\r\n * DESCRIPTION:\r\n * Stops the nethawk recording\r\n *\r\n * INPUT:\r\n * None\r\n *\r\n * RETURNS:\r\n * None\r\n * NOTES:\r\n * \r\n ''' \r\n try:\r\n args = [ CONSTANTS.NETHAWK_STOP_RECORDING ]\r\n os.spawnv( os.P_WAIT, self.executable_dir + CONSTANTS.NETHAWK_STOP_RECORDING, args )\r\n\r\n self.terminateNethawk() \r\n except Exception as detail:\r\n raise Exception(MODULE_NAME + \".clsNethawkGCFCreator.stopRecording, \" +\\\r\n str( detail ))\r\n\r\n def terminateNethawk( self ):\r\n \r\n '''\r\n * FUNCTION NAME:\r\n * terminateNethawk( self )\r\n *\r\n * DESCRIPTION:\r\n * Terminates the nethawk executable\r\n *\r\n * INPUT:\r\n * None\r\n *\r\n * RETURNS:\r\n * None\r\n * NOTES:\r\n * \r\n '''\r\n try:\r\n args = [ CONSTANTS.NETHAWK_TERMINATE ]\r\n os.spawnv( os.P_WAIT, self.executable_dir + CONSTANTS.NETHAWK_TERMINATE, args ) \r\n except Exception as detail:\r\n raise Exception(MODULE_NAME + \".clsNethawkGCFCreator.terminateNethawk, \" +\\\r\n str( detail ))\r\n\r\n def launchNethawk( self ):\r\n \r\n '''\r\n * FUNCTION NAME:\r\n * launchNethawk( self )\r\n *\r\n * DESCRIPTION:\r\n * Launches the nethawk executable\r\n *\r\n * INPUT:\r\n * None\r\n *\r\n * RETURNS:\r\n * None\r\n * NOTES:\r\n * \r\n '''\r\n try:\r\n args = [ CONSTANTS.NETHAWK_LAUNCH, '\\\"'+ self.obj_nethawk.path + '\\\"','\\\"' + self.executable_dir + '\\\"']\r\n os.spawnv( os.P_WAIT, self.executable_dir + CONSTANTS.NETHAWK_LAUNCH, args ) \r\n except Exception as detail:\r\n raise Exception(MODULE_NAME + \".clsNethawkGCFCreator.launchNethawk, \" +\\\r\n str( detail ))\r\n\r\n def loadNethawkConfig( self, gcf_file ):\r\n \r\n '''\r\n * FUNCTION NAME:\r\n * loadNethawkConfig( self, gcf_file )\r\n *\r\n * DESCRIPTION:\r\n * Loads the nethawk configuration file\r\n *\r\n * INPUT:\r\n * gcf_file - The nethawk configuration file\r\n *\r\n * RETURNS:\r\n * None\r\n * NOTES:\r\n * \r\n '''\r\n try:\r\n args = [ CONSTANTS.NETHAWK_LOAD_CONFIG, gcf_file ] \r\n os.spawnv( os.P_WAIT, self.executable_dir + CONSTANTS.NETHAWK_LOAD_CONFIG, args) \r\n except Exception as detail:\r\n raise Exception(MODULE_NAME + \".clsNethawkGCFCreator.loadNethawkConfig, \" +\\\r\n str( detail ))\r\n\r\n\r\n","sub_path":"kk_ADTRAN/Execution_Framework1_build25/Execution_Framework1_build25/bta_serv_nethawkconfiguration.py","file_name":"bta_serv_nethawkconfiguration.py","file_ext":"py","file_size_in_byte":17201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"543392515","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import Twist\n\nkey_mapping = {\n 'w': [1, 0], # avant\n 'a': [0, 1], # gauche\n 's': [0, 0], # stop\n 'd': [0, -1], # droite\n 'x': [-1, 0] # arriere\n}\n\n# Middle speed and rotations\nspeed = 0\nrot = 0\n\n\ndef keys_cb(msg, twist_pub):\n global speed, rot\n if len(msg.data) == 0 or msg.data not in key_mapping.keys():\n return # nothing to do, unknown key\n vels = key_mapping[msg.data[0]]\n if msg.data == 's':\n # we reset the speed and rot\n speed = 0\n rot = 0\n else:\n speed += vels[0]\n rot += vels[1]\n\n t = Twist()\n t.linear.x = speed\n t.angular.z = rot\n twist_pub.publish(t)\n\nif __name__ == '__main__':\n rospy.init_node('keys_to_twist')\n twist_pub = rospy.Publisher('cmd_vel', Twist, queue_size=1)\n rospy.Subscriber('keys', String, keys_cb, twist_pub)\n rospy.spin()\n","sub_path":"brest2016_ws/src/teleop/src/keys_to_twist_morse.py","file_name":"keys_to_twist_morse.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"576524889","text":"#!/usr/bin/env python\n# coding: utf-8\n\n_B_MEAN_ = 103.939\n_G_MEAN_ = 116.779\n_R_MEAN_ = 123.68\n\nimg_height = 224\nimg_width = 224\n\nframe_num = 5\n\ntrain_steps = 10\ntrain_epoch = 10\nval_steps = 10\n\n\nvideo_batch_size = 2\n\nmodel_save_path = 'model-ckpt/'\n\ndata_path = 'dataset/'\n\ntest_data_path = 'video_test/'\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"286662909","text":"from adafruit_servokit import *\nimport time\nimport keyboard\n\nkit = ServoKit(channels=16)\nkit.servo[0].set_pulse_width_range(1000, 2200)\nkit.servo[1].set_pulse_width_range(1000, 2200)\nkit.servo[2].set_pulse_width_range(1000, 2200)\nkit.servo[3].set_pulse_width_range(1000, 2200)\n\n\nclass Movements:\n def __init__(self):\n self.maxValue0 = 163.9\n self.minValue0 = 27.5\n self.medValue0 = 95.5\n\n self.maxValue1 = 163.5\n self.minValue1 = 30\n self.medValue1 = 98\n\n self.maxValue2 = 163.5\n self.minValue2 = 16.5\n self.medValue2 = 101\n\n self.maxValue3 = 163.5\n self.minValue3 = 29\n self.medValue3 = 95.5\n\n kit.servo[0].angle = self.medValue0 # X AXIS MOVING\n kit.servo[1].angle = self.medValue1 # Y AXIS MOVING\n kit.servo[2].angle = 16.5 # THROTTLE\n kit.servo[3].angle = self.medValue3 # TURNING\n\n\n def altHold(self): \n kit.servo[0].angle = self.medValue0\n kit.servo[1].angle = self.medValue1\n kit.servo[2].angle = self.medValue2\n kit.servo[3].angle = self.medValue3\n\n def setArmed(self, armed): # ARM DISARM FUNCTION\n if armed == True:\n kit.servo[2].angle = self.minValue2\n kit.servo[3].angle = self.maxValue3\n time.sleep(5)\n kit.servo[3].angle = self.medValue3\n kit.servo[2].angle = self.medValue2\n i=self.minValue2\n while iself.minValue2:\n kit.servo[2].angle = i\n i-=0.5\n time.sleep(0.04)","sub_path":"smoothlib.py","file_name":"smoothlib.py","file_ext":"py","file_size_in_byte":3824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"226022519","text":"# encoding: UTF-8\n\n# Autor: Jean Paul Esquivel Lobato A01376152\n# Descripción: Cálcula rendimiento de un auto.\n\n\n# Cálcular el rendimiento 1 del auto\n\ndef calcularRendimientoKmXLt(km,litros):\n rendimiento = km/litros\n return rendimiento\n\n# Operación de conversión a partir del rendimiento\n\ndef calcularMillasPorGalon(km,litros):\n millas = km / 1.609344\n galones = litros * .264172051\n rendimiento2 = millas / galones\n return rendimiento2\n\n# Función principal.\n\ndef main():\n kilometrosRecor = int(input(\"Teclea el número de km recorridos: \"))\n litrosRecor = int(input(\"Teclea el número de litros de gasolina usados: \"))\n millasGalon = calcularMillasPorGalon (kilometrosRecor, litrosRecor)\n kilomeLit = calcularRendimientoKmXLt (kilometrosRecor, litrosRecor)\n print (\" \")\n print (\"Si recorres %d kms con %.0f litros de gasolina, el rendimeinto es de:\" % (kilometrosRecor, litrosRecor))\n print (\"%.2f\" % kilomeLit, (\"km/l\"))\n print(\"%.2f\" % millasGalon, (\"mi/gal\"))\n kmR = int(input(\"¿Cuántos kilómetros vas a recorrer? \"))\n litrosNece = kmR / kilomeLit\n\n\n print (\"Para recorrer %d km necesitas %.2f litros de gasolina\"\"\"% (kmR,litrosNece))\n\nmain()\n","sub_path":"Rendimiento de autos.py","file_name":"Rendimiento de autos.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"239934857","text":"import contextvars\nfrom unittest.mock import Mock\n\ntest_cv = contextvars.ContextVar(\"test-cv\")\n\n\nclass TestContextVars:\n def test_can_set_cv_on_context(self):\n outer_value = Mock(name=\"outer-value\")\n inner_value = Mock(name=\"outer-value\")\n\n def run_within_context():\n test_cv.set(inner_value)\n assert test_cv.get() == inner_value\n\n token = test_cv.set(outer_value)\n context = contextvars.copy_context()\n try:\n context.run(run_within_context)\n assert test_cv.get() == outer_value\n finally:\n test_cv.reset(token)\n","sub_path":"tests/test_std.py","file_name":"test_std.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"546904996","text":"import pandas as pd\nimport numpy as np\nimport scipy.io\n\nTINY = np.finfo('f').tiny\nHUGE = np.finfo('f').max\n\ndef sigmoid(x):\n return 1. / (1 + np.exp(-x))\n\ndef load_data():\n mat_data = scipy.io.loadmat('datasets/data.mat')['data']\n data = {}\n data['X_train'] = mat_data[0][0][0][0][0][0].T\n data['y_train'] = mat_data[0][0][0][0][0][1].T\n data['X_valid'] = mat_data[0][0][1][0][0][1].T\n data['y_valid'] = mat_data[0][0][1][0][0][0].T\n data['X_test'] = mat_data[0][0][2][0][0][0].T\n data['y_test'] = mat_data[0][0][2][0][0][1].T\n return data\n\nclass NeuralNet(object):\n def __init__(self, seed=0,\n r_mean=0, r_std=0.01,\n n_hidden=200):\n self.N_IN = 256\n self.N_HIDDEN = n_hidden\n self.N_OUT = 10\n\n np.random.seed(seed)\n\n self.w1 = np.random.normal(r_mean, r_std, (self.N_IN, self.N_HIDDEN))\n self.b1 = np.random.normal(r_mean, r_std, self.N_HIDDEN)\n self.w2 = np.random.normal(r_mean, r_std, (self.N_HIDDEN, self.N_OUT))\n self.b2 = np.random.normal(r_mean, r_std, self.N_OUT)\n\n self.input = np.zeros(self.N_IN)\n self.hidden = np.zeros(self.N_HIDDEN)\n self.output = np.zeros(self.N_OUT)\n\n self.w1_delta = np.zeros((self.N_IN, self.N_HIDDEN))\n self.b1_delta = np.zeros(self.N_HIDDEN)\n self.w2_delta = np.zeros((self.N_HIDDEN, self.N_OUT))\n self.b2_delta = np.zeros(self.N_OUT)\n\n def f_prop(self, X):\n n_samples = X.shape[0]\n self.input = X\n self.hidden = np.dot(self.input, self.w1) + self.b1\n self.hidden = sigmoid(self.hidden)\n self.output = np.dot(self.hidden, self.w2) + self.b2\n self.output = np.exp(self.output)\n self.output[np.isposinf(self.output)] = HUGE\n self.output = self.output / self.output.sum(axis=1).reshape((n_samples, 1))\n\n def loss(self, w_decay, y_true):\n classification_loss = -np.mean(np.sum(np.log(self.output) * y_true, axis=1))\n sum_weights = np.sum(self.w1**2) + np.sum(self.b1**2) + np.sum(self.w2**2) + np.sum(self.b2**2)\n sum_weights = np.sum(self.w1**2) + np.sum(self.w2**2)\n wd_loss = 1/2 * w_decay * sum_weights\n return classification_loss + wd_loss\n\n def b_prop(self, X, y, w_decay, learning_rate, momentum):\n n_samples = y.shape[0]\n\n self.f_prop(X)\n loss = self.loss(w_decay, y)\n\n loss_deriv_output_in = self.output - y\n loss_deriv_hidden_in = np.dot(loss_deriv_output_in, self.w2.T) * \\\n self.hidden * (1 - self.hidden)\n loss_deriv_w2 = np.dot(self.hidden.T, loss_deriv_output_in) + self.w2*w_decay\n loss_deriv_b2 = loss_deriv_output_in.sum(0) + self.b2*w_decay\n loss_deriv_w1 = np.dot(self.input.T, loss_deriv_hidden_in) + self.w1*w_decay\n loss_deriv_b1 = loss_deriv_hidden_in.sum(0) + self.b1*w_decay\n\n self.w2_delta = momentum*self.w2_delta + loss_deriv_w2/n_samples\n self.b2_delta = momentum*self.b2_delta + loss_deriv_b2/n_samples\n self.w1_delta = momentum*self.w1_delta + loss_deriv_w1/n_samples\n self.b1_delta = momentum*self.b1_delta + loss_deriv_b1/n_samples\n\n self.w2 -= learning_rate * self.w2_delta\n self.b2 -= learning_rate * self.b2_delta\n self.w1 -= learning_rate * self.w1_delta\n self.b1 -= learning_rate * self.b1_delta\n\n return loss\n\n def train(self, X_train, y_train, X_valid, y_valid,\n n_iter=100, w_decay=0,\n learning_rate=0.1, momentum=0.9,\n batchsize=100, do_early_stop=False,\n verbose=False):\n n_batches = (X_train.shape[0] // batchsize) + int(X_train.shape[0] % batchsize)\n\n if do_early_stop:\n best_valid_loss_so_far = np.inf\n\n for i_iter in range(n_iter):\n i = i_iter % n_batches\n X_batch = X_train[(batchsize*i):(batchsize*(i+1)), :]\n y_batch = y_train[(batchsize*i):(batchsize*(i+1))]\n batch_loss = self.b_prop(X_batch, y_batch, w_decay, learning_rate, momentum)\n if verbose:\n print('Iteration {}, batch loss: {:.5f}'.format(i_iter, batch_loss))\n if do_early_stop:\n self.f_prop(X_valid)\n if self.loss(w_decay, y_valid) < best_valid_loss_so_far:\n best_valid_loss_so_far = self.loss(w_decay, y_valid)\n\n self.f_prop(X_train)\n train_loss = self.loss(w_decay, y_train)\n self.f_prop(X_valid)\n valid_loss = self.loss(w_decay, y_valid)\n acc = np.mean(np.argmax(self.output, axis=1) == np.argmax(y_valid, axis=1))\n\n print('Train loss: {:.5f}; Valid loss: {:.5f}; accuracy: {:.3f}'.format(train_loss, valid_loss, acc))\n if do_early_stop:\n print('Best Valid loss: {:.5f}'.format(best_valid_loss_so_far))\n\nif __name__ == '__main__':\n data = load_data()\n\n # nn = NeuralNet(r_mean=0, r_std=0.1, n_hidden=10)\n # nn.train(X_train=data['X_train'], y_train=data['y_train'],\n # X_valid=data['X_valid'], y_valid=data['y_valid'],\n # w_decay=0, n_iter=70, learning_rate=0.005, momentum=0, batchsize=4)\n\n # for m in [0, 0.9]:\n # for lr in [0.002, 0.01, 0.05, 0.2, 1.0, 5.0, 20.0]:\n # nn = NeuralNet(r_mean=0, r_std=0.1, n_hidden=10)\n # print(m, lr, end='|')\n # nn.train(X_train=data['X_train'], y_train=data['y_train'],\n # X_valid=data['X_valid'], y_valid=data['y_valid'],\n # w_decay=0, n_iter=70, learning_rate=lr, momentum=m, batchsize=4)\n\n # nn = NeuralNet(r_mean=0, r_std=0.1, n_hidden=200)\n # nn.train(X_train=data['X_train'], y_train=data['y_train'],\n # X_valid=data['X_valid'], y_valid=data['y_valid'],\n # w_decay=0, n_iter=1000, learning_rate=0.35, momentum=0.9, batchsize=100)\n\n # nn = NeuralNet(r_mean=0, r_std=0.1, n_hidden=200)\n # nn.train(X_train=data['X_train'], y_train=data['y_train'],\n # X_valid=data['X_valid'], y_valid=data['y_valid'],\n # w_decay=0, n_iter=1000, learning_rate=0.35, momentum=0.9,\n # batchsize=100, do_early_stop=True, verbose=True)\n\n # for wd in [0, 0.0001, 0.001, 0.01, 1, 5]:\n # print(wd, end=' | ')\n # nn = NeuralNet(r_mean=0, r_std=0.1, n_hidden=200)\n # nn.train(X_train=data['X_train'], y_train=data['y_train'],\n # X_valid=data['X_valid'], y_valid=data['y_valid'],\n # w_decay=wd, n_iter=1000, learning_rate=0.35, momentum=0.9, batchsize=100)\n # nn.f_prop(data['X_valid'])\n # print(nn.loss(0, data['y_valid']))\n\n # for nh in [10, 30, 100, 130, 170]:\n # print(nh, end=' | ')\n # nn = NeuralNet(r_mean=0, r_std=0.1, n_hidden=nh)\n # nn.train(X_train=data['X_train'], y_train=data['y_train'],\n # X_valid=data['X_valid'], y_valid=data['y_valid'],\n # w_decay=0, n_iter=1000, learning_rate=0.35, momentum=0.9, batchsize=100)\n\n # for nh in [18, 37, 113, 189, 236]:\n # print(nh, end=' | ')\n # nn = NeuralNet(r_mean=0, r_std=0.1, n_hidden=nh)\n # nn.train(X_train=data['X_train'], y_train=data['y_train'],\n # X_valid=data['X_valid'], y_valid=data['y_valid'],\n # w_decay=0.01, n_iter=1000, learning_rate=0.35, momentum=0.9, batchsize=100)\n\n","sub_path":"week_9_optimization_and_generalization/week_9_assignment.py","file_name":"week_9_assignment.py","file_ext":"py","file_size_in_byte":7376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"585835983","text":"from django.http import Http404\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.utils import timezone\n\nfrom .models import BlogPost\n# from .forms import BlogPostForm\nfrom .forms import BlogPostModelForm\n\n# Create your views here.\ndef blog_post_detail(request, slug):\n # try:\n # obj = BlogPost.objects.get(id=post_id)\n # template_name = 'blog_post_detail.html'\n # context = {\"object\": obj}\n # return render(request, template_name, context)\n # except:\n # raise Http404\n\n # obj = get_object_or_404(BlogPost, id=post_id)\n obj = get_object_or_404(BlogPost, slug=slug)\n template_name = 'blog/detail.html'\n context = {\"object\": obj}\n return render(request, template_name, context)\n\n\ndef blog_post_list_view(request):\n # object_list = BlogPost.objects.all()\n qs = BlogPost.objects.published()\n if request.user.is_authenticated:\n my_qs = BlogPost.objects.filter(user=request.user)\n qs = (qs | my_qs).distinct()\n\n template_name = \"blog/list.html\"\n context = {\"object_list\": qs}\n return render(request, template_name, context)\n\n# @login_required(login_url='/login')\n@staff_member_required\ndef blog_post_create_view(request):\n template_name = \"blog/form.html\"\n\n form = BlogPostModelForm(request.POST or None, request.FILES or None)\n if form.is_valid():\n # print(form.cleaned_data)\n form.user = request.user\n form.save()\n form = BlogPostModelForm() # clean form\n\n context = {\"form\": form}\n return render(request, template_name, context)\n\n@staff_member_required\ndef blog_post_update_view(request, slug):\n template_name = \"blog/form.html\"\n\n obj = get_object_or_404(BlogPost, slug=slug)\n form = BlogPostModelForm(request.POST or None, instance=obj)\n if form.is_valid():\n print(form.cleaned_data)\n form.save()\n\n context = {\"form\": form, \"title\": \"Update blogpost\"}\n return render(request, template_name, context)\n\n@staff_member_required\ndef blog_post_delete_view(request, slug):\n template_name = \"blog/delete.html\"\n\n obj = get_object_or_404(BlogPost, slug=slug)\n if request.method == \"POST\":\n obj.delete()\n return redirect(\"/blog/\")\n\n context = {\"obj\": obj, \"title\": \"Delete blogpost\"}\n return render(request, template_name, context)\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"128997497","text":"import json\nimport os\n\nimport pandas as pd\nfrom celery import shared_task\n\nfrom chicken.utils.views.api import get_local_lottie_file_name\nfrom tacos import settings\n\n\n@shared_task\ndef clean_files(url, config_file_path):\n if not os.path.isfile(config_file_path):\n return\n\n with open(config_file_path, 'r') as f:\n df = pd.read_csv(f, index_col='url')\n # Remove local files\n try:\n lottie_files = json.loads(df.loc[url, 'lottie_files'])\n for filename in lottie_files.keys():\n os.remove(os.path.join(settings.LOCAL_FOLDER_LOTTIE_FILES, get_local_lottie_file_name(url, filename)))\n except TypeError:\n # No file saved\n pass\n # Remove row\n df = df.drop(url, axis=0)\n df.to_csv(config_file_path)\n","sub_path":"tacos/chicken/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"166915720","text":"import line, pygame, vector, math\nPOLYGON, CIRCLE = 0, 1\n\nclass Polygon:\n\tdef __init__(self, points, reference, color=(0,0,0)):\n\t\tself.absPts = []\n\t\tself.relPts = []\n\t\tself.ref = vector.Vector(reference[0], reference[1])\n\t\tself.Type = POLYGON\n\t\tself.color = color\n\t\tself.absPtsLock = False #this is to indicate to other threads that the polygons pointlist shouldn't be read right now\n\n\t\tfor point in points:\n\t\t\tself.absPts.append(vector.Vector(point[0],point[1]))\n\t\t\n\t\tfor point in self.absPts:\n\t\t\tself.relPts.append(point.subtract(self.ref))\n\t\t\n\t\tself.maxDist = 0.0\n\t\tfor point in self.relPts:\n\t\t\tif point.magnitude > self.maxDist:\n\t\t\t\tself.maxDist = point.magnitude\n\t\t\n\t\tself.xAxis = vector.Vector(1, 0)\n\t\tself.yAxis = vector.Vector(0, 1)\n\t\tself.update()\n\t\n\tdef update(self):\n\t\tself.updateAbsPts()\n\t\tself.updateSides()\n\t\n\tdef updateSides(self):\n\t\tself.sides = []\n\t\tfor i in range(1, len(self.absPts)):\n\t\t\tself.sides.append(line.Line(self.absPts[i-1], self.absPts[i]))\n\t\tself.sides.append(line.Line(self.absPts[0], self.absPts[len(self.absPts)-1]))\n\t\t\n\tdef updateAbsPts(self):\n\t\tself.absPtsLock = True\n\t\tself.absPts = []\n\t\tfor rp in self.relPts:\n\t\t\tself.absPts.append(self.xAxis.multiplyByScalar(rp.x).add(self.yAxis.multiplyByScalar(rp.y)).add(self.ref))\n\t\t\t#print self.xAxis.multiplyByScalar(rp.x).add(self.yAxis.multiplyByScalar(rp.y)).add(self.ref).toTuple()\n\t\tself.absPtsLock = False\n\t\n\tdef getPoints(self):\n\t\ttoReturn = []\n\t\tfor point in self.absPts:\n\t\t\ttoReturn.append(point.toTuple())\n\t\treturn toReturn\n\t\n\tdef move(self, vector):\n\t\tself.ref = self.ref.add(vector)\n\t\tself.update()\n\n\tdef moveAndCollide(self, vector, collidables, slide=True, origVec=None):\n\t\tif origVec == None:\n\t\t\torigVec = vector\n\t\tself.move(vector)\n\t\tfor c in collidables:\n\t\t\tisctn = self.detectCollision(c)\n\t\t\tif isctn is not None:\n\t\t\t\tself.move(vector.getNegative())\n\t\t\t\tif vector.magnitude > 1.0:\n\t\t\t\t\ttoReturn = self.moveAndCollide(vector.multiplyByScalar(0.5), collidables, slide, origVec)\n\t\t\t\telif slide == True:\n\t\t\t\t\tself.moveAndCollide(self.slide(origVec, c), collidables, False)\n\t\t\t\treturn True\n\t\n\tdef slide(self, origVec, collidable):\n\t\torigGoal = self.ref.add(origVec)\n\t\tl = line.Line(self.ref, origGoal)\n\t\tside = None\n\t\tisctn = None\n\t\tfor s in collidable.sides:\n\t\t\ti = l.getIsctn(s, False, True)\n\t\t\tif i is not None and (isctn == None or self.ref.distanceTo(i) < self.ref.distanceTo(isctn)):\n\t\t\t\tside = s\n\t\t\t\tisctn = i\n\t\t\n\t\tif side == None:\n\t\t\tfor point in self.absPts:\n\t\t\t\tgoal = point.add(origVec)\n\t\t\t\tl = line.Line(point, goal)\n\t\t\t\tfor s in collidable.sides:\n\t\t\t\t\ti = l.getIsctn(s, False, True)\n\t\t\t\t\tif i is not None and (isctn == None or self.ref.distanceTo(i) < self.ref.distanceTo(isctn)):\n\t\t\t\t\t\tside = s\n\t\t\t\t\t\tisctn = i\n\t\tif side is not None:\n\t\t\tsideDir = line.Line(vector.Vector(0.0,0.0), side.getUnitVector())\n\t\t\treturn sideDir.getPerpendicular(origVec).getIsctn(sideDir, False, False)\n\t\t#sideDir = line.Line(vector.Vector(0.0,0.0), side.getUnitVector())\n\t\t#return sideDir.getPerpendicular(origVec).getIsctn(sideDir, False, False)\n\t\t\t\t\t\n\tdef detectCollision(self, collidable):\n\t\tif collidable == self:\n\t\t\treturn None\n\t\tif collidable.Type == POLYGON:\n\t\t\tif self.ref.distanceTo(collidable.ref) > self.maxDist + collidable.maxDist:\n\t\t\t\treturn None\n\t\t\tfor ownSide in self.sides:\n\t\t\t\tfor otherSide in collidable.sides:\n\t\t\t\t\tisctn = ownSide.getIsctn(otherSide)\n\t\t\t\t\tif isctn is not None:\n\t\t\t\t\t\treturn isctn\n\t\tif collidable.Type == CIRCLE:\n\t\t\treturn collidable.detectCollision(self)\n\t\treturn None\n\n\tdef rotate(self, rotation):\n\t\tnewXaxisX = self.xAxis.x * math.cos(rotation) - self.xAxis.y * math.sin(rotation)\n\t\tnewXaxisY = self.xAxis.x * math.sin(rotation) + self.xAxis.y * math.cos(rotation)\n\t\tnewYaxisX = self.yAxis.x * math.cos(rotation) - self.yAxis.y * math.sin(rotation)\n\t\tnewYaxisY = self.yAxis.x * math.sin(rotation) + self.yAxis.y * math.cos(rotation)\n\t\tself.xAxis = vector.Vector(newXaxisX,newXaxisY)\n\t\tself.yAxis = vector.Vector(newYaxisX,newYaxisY)\n\t\tself.update()\n\t\n\tdef rotateAndCollide(self, rotation, collidables):\n\t\trotation = float(rotation)\n\t\tself.rotate(rotation)\n\t\tfor c in collidables:\n\t\t\t#isctn = self.detectCollision(c)\n\t\t\tfor p in c.absPts:\n\t\t\t\tif self.containsPoint(p): #for some reason the default detectCollision method doesn't seem to work on very small rotations\n\t\t\t\t\tself.rotate(-rotation)\n\t\t\t\t\tif rotation > 0.01:\n\t\t\t\t\t\tself.rotateAndCollide(rotation/2, collidables)\n\t\t\t\t\t\n\tdef containsPoint(self, point):\n\t\tfor p in self.absPts:\n\t\t\tl = line.Line(point, p)\n\t\t\tfor s in self.sides:\n\t\t\t\tif s.getIsctn(l) is not None and s.p1 is not p and s.p2 is not p:\n\t\t\t\t\treturn False\n\t\treturn True\n\n\tdef containsObject(self, obj):\n\t\tif obj.Type == POLYGON:\n\t\t\tfor pt in obj.absPts:\n\t\t\t\tif self.containsPoint(pt) == True:\n\t\t\t\t\treturn True\n\t\telse:\n\t\t\tif self.containsPoint(obj.center) == True:\n\t\t\t\treturn True\n\t\t#for both:\n\t\tif self.detectCollision(obj) is not None:\n\t\t\treturn True\n\t\treturn False\n\t\t\t\n\tdef draw(self, surface):\n\t\tpygame.draw.polygon(surface, self.color, self.getPoints())\n\t\t#pygame.draw.line(surface, (0,0,0), self.sides[0].p1.toTuple(), self.sides[0].p2.toTuple())\n\t\t#pygame.draw.line(surface, (255,0,0), self.sides[1].p1.toTuple(), self.sides[1].p2.toTuple())\n\t\t\n\t\t#pygame.draw.line(surface, (0,255,0), self.sides[2].p1.toTuple(), self.sides[2].p2.toTuple())\n\n\t\t#pygame.draw.line(surface, (0,0,255), self.sides[3].p1.toTuple(), self.sides[3].p2.toTuple())\n\t\t\n\t\t#print \"ZWART: \" + str(self.sides[0].getUnitVector().toTuple())\n\t\t#print \"ROOD: \" + str(self.sides[1].getUnitVector().toTuple())\n\t\t#print \"GROEN: \" + str(self.sides[2].getUnitVector().toTuple())\n\t\t#print \"BLAUW: \" + str(self.sides[3].getUnitVector().toTuple())\n\n\tdef getRotation(self):#Yes, I know.\n\t\txLine = line.Line(vector.Vector(0,0), vector.Vector(1,0))\n\t\tisctn = xLine.getPerpendicular(self.xAxis).getIsctn(xLine, False, False)\n\t\tif isctn == None:\n\t\t\treturn 0.0\n\t\tadjacentLeg = line.Line(vector.Vector(0,0),isctn)\n\t\tadjacentLegLen = adjacentLeg.getLength()\n\t\tif isctn.x < 0:\n\t\t\tadjacentLegLen = -adjacentLegLen\n\t\trotation = math.acos(adjacentLegLen)\n\t\tif self.xAxis.y < 0:\n\t\t\trotation = 2 * math.pi - rotation\n\t\treturn rotation\n\n\t\n\t\ndef getRectangle(center, width, height, rotation, color=(0,0,0)):#geen klassefunctie; wordt gebruikt om een rechthoekige Polygon snel en makkelijk te construeren\n\tl = ((width/2, height/2), (width/2, -height/2), (-width/2, -height/2), (-width/2, height/2))\n\tpoints = []\n\tfor element in l:\n\t\tpoints.append((center[0] + element[0], center[1] + element[1]))\n\tpolygon = Polygon(points, center, color)\n\tpolygon.rotate(rotation)\n\treturn polygon\n\n\n#print getRectangle((0,0), 30, 40, -0.7).getRotation()\n","sub_path":"polygon.py","file_name":"polygon.py","file_ext":"py","file_size_in_byte":6621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"178055228","text":"from argparse import ArgumentParser\nfrom typing import List\nimport random\nfrom datetime import datetime\n\n\nparser = ArgumentParser()\nparser.add_argument(\"--iterations\", default=10, type=int)\n\nVERBOSE = 1\n\n\nclass LoggableEntry:\n _log = []\n\n def log(self, message):\n formatted = f\"[{datetime.utcnow()}] {self.__str__()}: {message}\"\n self._log.append(formatted)\n if VERBOSE:\n print(formatted)\n\n\nclass Token(LoggableEntry):\n def __init__(self, id):\n self._log = []\n super(LoggableEntry, self).__init__()\n self.id = id\n\n def __str__(self):\n return f\"Token {self.id}\"\n\n\nclass Place(LoggableEntry):\n def __init__(self, id, capacity: int, tokens: List[Token]):\n self._log = []\n super(LoggableEntry, self).__init__()\n self.capacity = capacity\n self.id = id\n self.tokens = tokens\n self.deleted_tokens = []\n\n def has_token(self):\n if len(self.tokens) > 0:\n return True\n return False\n\n def has_space(self):\n \"\"\"If there are less tokens than capacity\"\"\"\n return len(self.tokens) < self.capacity\n\n def __str__(self):\n return f\"Place {self.id}\"\n\n def add_token(self, token):\n self.tokens.append(token)\n self.log(f\"Added token {token}. I have {len(self.tokens)}\")\n\n def pop_token(self):\n token = self.tokens.pop()\n token.log(\"Going through transition, getting removed\")\n self.log(f\"Removed token {token}. I still have {len(self.tokens)} left\")\n\n\nclass Transition(LoggableEntry):\n def __init__(self, id, inputs: List[Place], outputs: List[Place]):\n super(LoggableEntry, self).__init__()\n self._log = []\n self.inputs = inputs\n self.id = id\n self.outputs = outputs\n self.selected_output = self.outputs[0]\n\n def __str__(self):\n return f\"Transition {self.id}\"\n\n def tick(self, i, deletions, creations):\n if all(inp.has_token() for inp in self.inputs) and all(\n o.has_space() for o in self.outputs\n ):\n for inp in self.inputs:\n # inp.pop_token()\n deletions.append(inp)\n for outp in self.outputs:\n new_token = Token(random.randint(0, 100000))\n new_token.log(f\"Created and added to {outp}\")\n self.log(f\"Added {new_token} to {outp}\")\n # outp.add_token(new_token)\n creations.append((outp, new_token))\n \n self.log(f\"Ran transition {str(self)}\")\n\n","sub_path":"petri/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"576987958","text":"from homework.homework_1108.TestHttpRequest import TestHttpRequest\nfrom homework.homework_1108.DoExcel import DoExcel\n\n\nclass running:\n def __init__(self, excelname, sheetname):\n self.excelname = excelname\n self.sheetname = sheetname\n\n def running(self):\n excel_list = DoExcel(self.excelname, self.sheetname).read_excel()\n for item in excel_list:\n try:\n res = TestHttpRequest(url=(item[\"host\"] + item[\"url\"]), data=eval(item[\"data\"]), method=item[\"method\"],\n code=item[\"code\"]).HttpRequest()\n assert str(item[\"code\"]) == res.json()[\"code\"]\n except (AssertionError, Exception) as e:\n print(type(item[\"code\"]), type(res.json()[\"code\"]))\n print(\"测试未通过的用例id为:{id},描述为:{description}\".format(id=item[\"id\"], description=item[\"description\"]))\n print(\"预期结果是:{anticipate},实际结果是:{real},接口返回的错误是:{msg}\".format(anticipate=item[\"code\"],\n real=res.json()[\"code\"],\n\n msg=res.json()[\"msg\"]))\n\n\nif __name__ == \"__main__\":\n running(\"test_data/testCaseRead.xlsx\", \"testCaseRead\").running()\n","sub_path":"homework/homework_1108/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"247353142","text":"import C\r\ndef puntos(palabra):\r\n contador = 0 \r\n for i in palabra:\r\n if(i == ':'):\r\n contador = contador +1\r\n if (contador>0):\r\n return True\r\n else:\r\n return False\r\n\r\ndef carga_cola(c1,c2,palabra):\r\n i = 1\r\n \r\n if palabra[0] != ':':\r\n C.insertarcola(c1,palabra[0]) \r\n while(palabra[i]!= \":\" and i may:\r\n print(\"Segundoa palabra es mas grande que la primera\")\r\n else:\r\n print(\"La primera palabra es mas grande que la segunda\")\r\n \r\ndef identicas(c1,c2):\r\n con_igual = 0 \r\n tam_ini = C.tamcola(c1)\r\n while(not C.colavacia(c1)):\r\n x = C.supresion(c1)\r\n y = C.supresion(c2)\r\n if (x == y):\r\n con_igual = con_igual + 1\r\n if con_igual == tam_ini:\r\n return True\r\n else:\r\n return False\r\n \r\nc1 = C.Cola()\r\nC.crearcola(c1)\r\n\r\nc2 = C.Cola()\r\nC.crearcola(c2)\r\n\r\n\r\n\r\npalabra = input(\"INGRESE 2 PALABRAS SEPARADAS POR \"\":\"\" PUNTOS\")\r\nif puntos(palabra):\r\n carga_cola(c1,c2,palabra)\r\n if tam_iguales(c1,c2):\r\n if identicas(c1,c2):\r\n print(\"Las palabras son identicas. Tienen los mismos caracteres\")\r\n else:\r\n print(\"Las palabras tienen el mismos tamaño, pero diferentes caracteres\")\r\n else:\r\n mayor(c1,c2)\r\n\r\nelse:\r\n print(\"La palabra no tiene 2 puntos.\")\r\n\r\n \r\n","sub_path":"punto5_2.py","file_name":"punto5_2.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"428139021","text":"import cv2\nfrom matplotlib import pyplot as plt\n\nimgRGB = cv2.imread('Resources/houseRGB.jpg')\n# 圖像是 200*150像素\ncolor = ('b', 'g', 'r')\nfor i, col in enumerate(color):\n img_hist = cv2.calcHist([imgRGB], [i], None, [256], [0, 256])\n plt.plot(img_hist, color=col)\n plt.xlim([0, 256])\n\ncv2.imshow(\"RGB\", imgRGB)\nplt.show()\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"histogram testRGB.py","file_name":"histogram testRGB.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"529770653","text":"from flask import Flask\nfrom flask_restful import Resource, Api, request\nfrom selenium import webdriver\nimport json\nimport sys\nimport time\nimport os\n\n##Web Server Code\n\napp = Flask(__name__)\napi = Api(app)\n\nclass Server(Resource):\n def get(self):\n uri_param = request.args.get('uri')\n #return (json.dumps(uri_to_imagelist(uri_param)))\n #call_processingonblah()\n return {'result':uri_to_imagelist(uri_param)}\n\napi.add_resource(Server, '/')\n\n\n##Backend Code\n\ndef uri_to_imagelist(uri):\n final_image_list = []\n uri_param = uri\n driver = webdriver.PhantomJS('vendor/phantomjs/bin/phantomjs')\n #sdfs\n #driver = webdriver.Chrome('vendor/chromedriver/chromedriver')\n driver.get(\"http://homes.com/\" + uri_param)\n print(\"Opened the url\", file=sys.stderr)\n time.sleep(3)\n first_image_url = driver.find_element_by_class_name(\"img\").get_attribute(\"src\")\n gallerySize = int((driver.find_element_by_xpath(\n '//*[@id=\"main\"]/article/div[2]/div[1]/div[3]/div/div[1]/div[1]/div/div/div/div[1]/div/span[2]')).text)\n for i in range(1, gallerySize + 1):\n final_image_list.append(first_image_url.replace('_1', \"_\" + str(i)))\n # print(first_image_url, file=sys.stderr)\n driver.quit()\n return final_image_list\n\n\n\nif __name__ == '__main__':\n # app.run(debug=True, host = os.getenv(\"IP\",\"0.0.0.0\"),port = int (os.getenv('PORT', 5000)))\n app.run(port = int (os.getenv('PORT', 33507)))\n #app.run(port=33507)","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"581640669","text":"from sqlite_utils import fetch_all\nfrom collections import Counter, defaultdict\nfrom PIL import Image\nimport numpy as np\nfrom wordcloud import WordCloud\nimport os\nimport spacy\n\nfrom nltk import word_tokenize, sent_tokenize\nfrom nltk import WordNetLemmatizer\nfrom nltk import wordnet as wn\nfrom nltk.collocations import FreqDist, ngrams\nfrom nltk.corpus import stopwords\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer as SIA\n\nfrom gensim.utils import simple_preprocess\nfrom gensim.corpora import Dictionary\nfrom gensim.models.ldamodel import LdaModel\n\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom sklearn.feature_extraction.text import CountVectorizer\n\nfrom Levenshtein import distance\nfrom autocorrect import spell\nfrom operator import itemgetter\nfrom nltk.corpus import words\n\n\nword_dict = words.words() # 236.736 words\n\ndef calc_distances(word, dictionary=word_dict):\n \"\"\"Takes a word and returns it's Levenshtein distances from all words in a dictionary.\"\"\"\n return sorted([(distance(word, w), w) for w in dictionary], key=itemgetter(0))\n\ndef correct_spelling(word, method='autocorrect'):\n \"\"\"\n :param method: Method by which to correct spelling, autocorrect or levenshtein\n :return: Correctly spelled word.\n \"\"\"\n if method == 'levenshtein':\n return calc_distances(word)[0]\n elif method == 'autocorrect':\n return spell(word)\n else:\n raise NotImplementedError(\"Not a valid method choice.\")\n\n\nmachiavelli, montesquieu = fetch_all()\n\nstop_words = stopwords.words(\"english\")\n\n# use https://burner.bonanza.com/ if you need to create masks\ndef get_wordcloud(text, *, background_color=\"white\", mask_path=\"\", additional_stopwords=[],\n max_words=2000, save=True, ret=True, **kwargs):\n \"\"\"\n Utility that takes a text string as input and returns a WordCloud instance. Also saves the image.\n\n :param text: Text string\n :param background_color: String, default='white'\n :param mask_path: String, image path\n :param additional_stopwords: List of strings\n :param max_words: Int, default=2000\n :param save: Boolean, if you want to save the file. New name: \"{filename}_wordcloud.{ext}\"\n :param ret: if you want to return the WordCloud instance\n :param kwargs: Additional keywords arguments to be passed when creating the WordCloud.\n \"\"\"\n\n stopwords_set = set(stop_words)\n if additional_stopwords:\n additional_stopwords = set(additional_stopwords)\n stopwords_set = stopwords_set.union(additional_stopwords)\n\n if mask_path:\n mask = np.array(Image.open(mask_path))\n\n wc = WordCloud(background_color=background_color, max_words=max_words,\n stopwords=stopwords_set, mask=mask, **kwargs)\n wc.generate(text)\n\n if save:\n filename, ext = os.path.splitext(\"path_to_file\")\n wc.to_file(\"{}_wordcloud.{}\".format(filename, ext))\n\n if ret:\n return wc\n\n\ndef get_polarity(sent):\n \"\"\"\n A helper function that, given a sentence, computes the polarity (negative, positive, or neutral sentiment).\n Based on nltk.sentiment.vader.SentimentIntensityAnalyzer.\n\n :param sent: A string.\n :return: A string with the most probable sentiment, and the dict with all values.\n\n Example:\n >>> get_polarity(\"I don't feel well.\")\n 'neu', {'neg': 0.476, 'neu': 0.524, 'pos': 0.0, 'compound': -0.2057}\n >>> get_polarity(\"The weather today is great!\")\n 'pos', {'neg': 0.0, 'neu': 0.477, 'pos': 0.523, 'compound': 0.6588}\n \"\"\"\n\n sia = SIA()\n scores = sia.polarity_scores(sent)\n\n return max(scores), scores\n\n\n# TODO: This whole class probably needs to be moved to another file\n# TODO: Refactor code such that each method can be called on arbitrary text, not just on self.attributes.\n# TODO: Store individual corpuses using inner classes (?), aka the Composition Design Pattern\nclass Corpus_:\n\n def __init__(self,):\n \"\"\"\n The Corpus_ class is to be initialized without arguments. The default use is intended to follow\n a certain pipeline: add corpuses (lists of sentences stored in a default_dict), use methods on\n some, all, or none of them (e.g. on other text), etc.\n \"\"\"\n\n self.corpus = defaultdict(list)\n self.tokenized = defaultdict(list)\n self.lemmatized = defaultdict(list)\n self.create_lemmatization_mappings()\n\n # a unified collection of all sentences/tokens/lemmas of all documents\n self.corpus_library = list()\n self.token_library = list()\n self.lemma_library = list()\n\n self.vectorizer = None\n self.feature_names = None\n self.topics = None\n self.topic_algorithm = None\n\n def add_corpus(self, corpus, corpus_name, also_process=True, raw=False, tokenized=False):\n \"\"\"\n Takes a text as input and adds it to the corpus. If also_process (true by default),\n text is preprocessed and tokenized and lemmatized copies are created.\n\n :param corpus: A list of strings (sentences or words)\n :param corpus_name: String, the name of the corpus.\n :param also_process: Boolean, default=True\n :param raw: Boolean, if true corpus is expected to be a string\n :param tokenized: Boolean, if true corpus is expected to be tokenized (list of lists with strings)\n :return:\n \"\"\"\n\n assert isinstance(corpus_name, str), \"Corpus name is not a string\"\n if tokenized:\n assert isinstance(corpus, list), \"Input `corpus` is not a list.\"\n assert (all(isinstance(sent, list) for sent in corpus)), \"Sentences are not lists.\"\n elif not raw:\n assert isinstance(corpus, list), \"Input `corpus` is not a list.\"\n assert (all(isinstance(sent, str) for sent in corpus)), \"Sentences are not strings.\"\n else:\n assert isinstance(corpus, str), \"Input `corpus` is not a string.\"\n corpus = sent_tokenize(corpus)\n\n self.corpus[corpus_name] = corpus\n self.corpus_library.extend(corpus)\n\n if also_process:\n if not tokenized:\n self.tokenize_corpus(corpus_name)\n self.lemmatize_corpus(corpus_name)\n self.create_lemmatization_mappings()\n\n def tokenize_corpus(self, corpus_name, remove_punctuation=True, lower=True, remove_stopwords=False, ret=False):\n \"\"\"\n Helper method to tokenize a certain corpus with various optional arguments. Tokenized text\n gets stored in the self.tokenized defaultdict and can be accessed with its key.\n\n :param corpus_name: A string pointing to a saved corpus.\n :param remove_stopwords: Remove common words. Using nltk's stopwords for English. Optional, default=False.\n :param remove_punctuation: Removes punctuation. Optional, default=True.\n :param lower: Lower the text. Optional, default=True.\n :param ret: Set to True if you need the method to return the tokens, default=False.\n :return: If ret=True, tokenized text, can also be accessed as an attribute.\n \"\"\"\n\n if lower and remove_punctuation:\n # simple_preprocess lowers, removes punctuation, and tokenizes, all-in-one and much faster\n tokens = [simple_preprocess(sent) for sent in self.corpus[corpus_name]]\n elif lower:\n tokens = [word_tokenize(sent.lower()) for sent in self.corpus[corpus_name]]\n else:\n tokens = [[word_tokenize(word) for word in sent] for sent in self.corpus[corpus_name]]\n\n if remove_stopwords:\n tokens = [[word for word in sent if word not in stop_words] for sent in tokens]\n\n self.tokenized[corpus_name] = tokens\n self.token_library.extend(tokens)\n\n if ret:\n return tokens\n\n def _synset_lemmatizer(self, word):\n \"\"\"\n Simplify words by taking their simpler or most common synsets. Words up to 3 letters do not get modified.\n\n Examples:\n in: \"hello\", out: \"hello\"\n in: \"distanced\", out: \"distance\"\n in: \"spaces\", out: \"space\"\n in: \"told\", out: \"tell\"\n It's not perfect:\n in: \"comprehend\", out: \"grok\"\n \"\"\"\n\n # don't modify small words\n if len(word) <= 3:\n return word\n\n try:\n # get synsets\n synsets_list = wn.wordnet.synsets(word)\n\n # clear synsets: get names as strings\n synsets_list = [w.name().split(\".\")[0] for w in synsets_list]\n\n word_counter = Counter(synsets_list)\n\n # if there are many words\n if len(word_counter) > 1:\n word_freq1, word_freq2 = word_counter.most_common(2) # each is a tuple: (\"word\", counts)\n\n # if they have the same frequencies: pick the shorter word, else pick the first\n if word_freq1[1] == word_freq2[1]:\n if len(word_freq1[0]) <= len(word_freq2[0]):\n return word_freq1[0]\n else:\n return word_freq2[0]\n else:\n return word_freq1[0]\n\n # if there is only one word\n else:\n return word_counter.most_common()[0][0]\n\n # if there are no synsets, return the word as it is\n except IndexError:\n return word\n\n def lemmatize_corpus(self, corpus_name, lemmatizer=\"synset\", ret=False):\n # TODO: add more lemmatizers\n \"\"\"\n Take a corpus's tokenized form and lemmatize each word based\n on selected lemmatizer. If ret is True, values are returned.\n\n :param tokens: Should be a list of lists with strings, e.g.: [[\"d\", \"w\"], [\"a\"]].\n :param lemmatizer: There are various lemmatizers available, passed as string arguments:\n - 'wordnet': Uses nltk.WordNetLemmatizer\n - 'synset': Uses a custom implementation. Takes a word and returns its most common (or simplest) synset\n - 'spacy': Uses spacy.load('en')\n :param ret: Set to True if you need the method to return the lemmatized tokens, default=False.\n :return: List of lists with lemmatized tokens, e.g.: [[\"d\", \"w\"], [\"a\"]].\n \"\"\"\n\n if lemmatizer == \"wordnet\":\n wnl = WordNetLemmatizer()\n _lemmatized = [[wnl.lemmatize(word) for word in sent] for sent in self.tokenized[corpus_name]]\n elif lemmatizer == \"spacy\":\n nlp = spacy.load('en', disable=['parser', 'ner'])\n allowed_postags = ['NOUN', 'ADJ', 'VERB', 'ADV']\n _lemmatized = [[token.lemma_ for token in nlp(\" \".join(sent)) if token.pos_ in allowed_postags]\n for sent in self.tokenized[corpus_name]]\n else:\n # If'synset'\n print(\"Using 'synset' lemmatizer.\")\n _lemmatized = [[self._synset_lemmatizer(word) for word in sent] for sent in self.tokenized[corpus_name]]\n\n self.lemmatized[corpus_name] = _lemmatized\n self.lemma_library.extend(_lemmatized)\n\n if ret:\n return _lemmatized\n\n def create_lemmatization_mappings(self, ret=False):\n \"\"\"\n If ret is True this method returns a defaultdict where the keys are the words in the\n lemmatized text and the values are lists of words from the original text that were\n transformed to said new word. If ret is False, then the `mappings` attribute gets updated.\n These mappings are joint for all corpuses.\n\n :return: If ret=True, returns a defaultdict of word mappings, e.g.: {\"new\":[\"old1\", \"old2\"]}.\n \"\"\"\n\n word_mappings = defaultdict(list)\n\n for corpus_name, corpus in self.corpus.items():\n for (old, new) in zip([w for sent in self.tokenized[corpus_name] for w in sent],\n [w for sent in self.lemmatized[corpus_name] for w in sent]):\n\n if old not in word_mappings[new]:\n word_mappings[new].append(old)\n\n self.mappings = word_mappings\n\n if ret:\n return word_mappings\n\n def model_topics(self, corpus_name=None, text=None, interpret=False,\n train_also_on_lemmatized=False, train_only_on_lemmatized=False,\n algorithm=\"lda_sklearn\", no_features=3000, no_topics=50, no_top_words=15):\n \"\"\"\n Take a list of sentences / documents and return a list of topics, and the created model.\n\n :param corpus_name:\n :param interpret: Whether to print human-readable results. One could also use self.topics.\n :param text: A list of strings / sentences / documents.\n :param train_also_on_lemmatized:\n :param train_only_on_lemmatized:\n :param algorithm:\n :param no_topics: How many topics do we want to keep track of, int, default=50.\n :param no_features: How many words we want to keep track of, int, default=3000.\n :param no_top_words: How many words to keep track of per topic, int, default=15.\n :param ret :return: A list of lists with keywords representing a topic, and the model instance.\n \"\"\"\n\n if text is not None:\n assert isinstance(text, list), \"Input `corpus` is not a list.\"\n\n # TODO: add other ways for topic modelling\n if algorithm == \"lda_sklearn\":\n\n if self.topic_algorithm != algorithm:\n\n self.topic_algorithm = algorithm\n\n self.vectorizer = CountVectorizer(max_df=0.95, min_df=2,\n max_features=no_features,\n stop_words='english')\n\n # Fit the model with all documents currently available, store sparse counts vector\n if train_also_on_lemmatized:\n extended_set = [\" \".join(sent) for sent in self.lemma_library]\n extended_set.extend(self.corpus_library)\n tf_counts = self.vectorizer.fit_transform(extended_set)\n elif train_only_on_lemmatized:\n tf_counts = self.vectorizer.fit_transform([\" \".join(sent) for sent in self.lemma_library])\n else:\n tf_counts = self.vectorizer.fit_transform(self.corpus_library)\n\n # Create LDA model and fit it\n self.topic_model = LatentDirichletAllocation(n_components=no_topics, max_iter=30,\n learning_method='batch', evaluate_every=128,\n perp_tol=1e-3, learning_offset=50.)\n\n self.topic_model.fit(tf_counts)\n\n self.feature_names = self.vectorizer.get_feature_names()\n\n self.topics = [[self.feature_names[i] for i in topic.argsort()[:-no_top_words - 1:-1]]\n for topic in self.topic_model.components_]\n\n if corpus_name is not None:\n tf_counts = self.vectorizer.transform(self.corpus[corpus_name])\n elif text is not None:\n tf_counts = self.vectorizer.transform(text)\n else:\n return\n\n topic_scores = self.topic_model.transform(tf_counts)\n\n elif algorithm == \"lda_gensim\":\n\n if self.topic_algorithm != \"lda_gensim\":\n self.topic_algorithm = algorithm\n if train_also_on_lemmatized:\n extended_set = list()\n extended_set.extend(self.lemma_library)\n extended_set.extend(self.token_library)\n\n self.feature_names = Dictionary(extended_set)\n bow = [self.feature_names.doc2bow(sent) for sent in extended_set]\n\n elif train_only_on_lemmatized:\n self.feature_names = Dictionary(self.lemma_library)\n bow = [self.feature_names.doc2bow(sent) for sent in self.lemma_library]\n\n else:\n self.feature_names = Dictionary(self.token_library)\n bow = [self.feature_names.doc2bow(sent) for sent in self.token_library]\n\n self.topic_model = LdaModel(corpus=bow, id2word=self.feature_names, num_topics=no_topics,\n chunksize=100, passes=10, alpha=\"auto\", per_word_topics=True)\n\n self.topics = self.topic_model.print_topics()\n\n if corpus_name is not None:\n bow = [self.feature_names.doc2bow(sent) for sent in self.lemmatized[corpus_name]]\n\n topic_scores = self.topic_model[bow]\n\n # TODO: bugfix this: 'TypeError: doc2bow expects an array of unicode tokens on input, not a single string'\n # elif text is not None:\n # # Text needs to be converted to tokens and then lemmas\n # text = [word_tokenize(sent) for sent in text]\n # text = [[self._synset_lemmatizer(word) for word in sent] for sent in text]\n # text = [\" \".join(word for sent in text for word in sent)]\n # bow = [self.feature_names.doc2bow(sent) for sent in text]\n #\n # topic_scores = self.topic_model[bow]\n\n else:\n print(\"Wrong algorithm.\")\n return\n\n if topic_scores:\n return topic_scores\n\n\n def statistics(self, ret=False):\n # TODO: add more\n token_frequency_dict = FreqDist([word for sent in self.tokenized for word in sent])\n lemma_frequency_dict = FreqDist([word for sent in self.lemmatized for word in sent])\n\n if ret:\n return token_frequency_dict, lemma_frequency_dict\n else:\n self.token_frequency_dict = token_frequency_dict\n self.lemma_frequency_dict = lemma_frequency_dict\n","sub_path":"feature_extractions_utils.py","file_name":"feature_extractions_utils.py","file_ext":"py","file_size_in_byte":17812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"231037319","text":"# This sample tests variadic TypeVar matching for unions.\n\n# pyright: reportMissingModuleSource=false\n\nfrom typing import List, Literal, TypeVar, Union\nfrom typing_extensions import TypeVarTuple, Unpack\n\n\n_T = TypeVar(\"_T\")\n_Xs = TypeVarTuple(\"_Xs\")\n_Ys = TypeVarTuple(\"_Ys\")\n\n\ndef func1(x: Union[Unpack[_Xs]]) -> Union[Unpack[_Xs]]:\n ...\n\n\ndef func2(x: Union[Unpack[_Xs], Unpack[_Ys]]) -> Union[Unpack[_Xs], Unpack[_Ys]]:\n ...\n\n\ndef func3(x: Union[int, Unpack[_Xs]]) -> Union[Unpack[_Xs]]:\n ...\n\n\ndef func4(x: Union[_T, Unpack[_Xs]]) -> Union[_T, Unpack[_Xs]]:\n ...\n\n\ndef func5(x: Union[Unpack[_Xs]], *args: Unpack[_Xs]) -> Union[Unpack[_Xs]]:\n ...\n\n\ndef func6(*args: Unpack[_Xs]) -> Union[Unpack[_Xs]]:\n ...\n\n\ndef func7(a: List[Union[Unpack[_Xs]]]) -> Union[Unpack[_Xs]]:\n ...\n\n\ndef test1(a: int, b: str, c: List[int], d: Union[complex, str]):\n v1_1 = func1(a)\n t_v1_1: Literal[\"int\"] = reveal_type(v1_1)\n\n v1_2 = func1(d)\n t_v1_2: Literal[\"complex | str\"] = reveal_type(v1_2)\n\n # ---------\n\n # This behavior isn't defined by PEP 646, but neither\n # did PEP 484 define the behavior for multiple (non-\n # variadic) TypeVar matching within a Union. So behavior\n # is likely to vary between type checkers here.\n v2_1 = func2(a)\n t_v2_1: Literal[\"int\"] = reveal_type(v2_1)\n\n v2_2 = func2(d)\n t_v2_2: Literal[\"str | complex\"] = reveal_type(v2_2)\n\n # ---------\n\n v3_1 = func3(a)\n t_v3_1: Literal[\"int\"] = reveal_type(v3_1)\n\n # This should generate an error\n v3_2 = func3(d)\n\n v3_3 = func3(b)\n t_v3_3: Literal[\"str\"] = reveal_type(v3_3)\n\n # ---------\n\n # This behavior isn't defined by PEP 646 or PEP 484.\n v4_1 = func4(a)\n t_v4_1: Literal[\"int\"] = reveal_type(v4_1)\n\n v4_2 = func4(d)\n t_v4_2: Literal[\"str | complex\"] = reveal_type(v4_2)\n\n # ---------\n\n # This should generate an error\n v5_1 = func5(a)\n\n v5_2 = func5(a, a)\n t_v5_2: Literal[\"int\"] = reveal_type(v5_2)\n\n # This should generate an error\n v5_3 = func5(a, b)\n\n # This should generate an error\n v5_4 = func5(a, b, c)\n\n # ---------\n\n v6_1 = func6(a)\n t_v6_1: Literal[\"int\"] = reveal_type(v6_1)\n\n v6_2 = func6(a, b)\n t_v6_2: Literal[\"int | str\"] = reveal_type(v6_2)\n\n v6_3 = func6(a, b, d)\n t_v6_3: Literal[\"int | str | complex\"] = reveal_type(v6_3)\n\n # ---------\n\n v7_1 = func7([a])\n t_v7_1: Literal[\"int\"] = reveal_type(v7_1)\n\n x: List[Union[int, str]] = [a, b]\n v7_2 = func7(x)\n t_v7_2: Literal[\"int | str\"] = reveal_type(v7_2)\n\n v7_3 = func7([a, b, d])\n t_v7_3: Literal[\"int | str | complex\"] = reveal_type(v7_3)\n","sub_path":"packages/pyright-internal/src/tests/samples/variadicTypeVar8.py","file_name":"variadicTypeVar8.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"564593818","text":"import json, traceback\n\ndef addCommu():\n try:\n with open(\"db.json\", 'r', -1, \"UTF-8\") as json_file:\n db_r = json.load(json_file)\n except:\n traceback.print_exc()\n print(\"에러: db.json을 제대로 삽입했는지 확인해주세요\")\n try:\n with open(\"add.json\", 'r', -1, \"UTF-8\") as json_file:\n add_r = json.load(json_file)\n except:\n traceback.print_exc()\n print(\"에러: add.json을 제대로 삽입했는지 확인해주세요\")\n for i in add_r:\n for j in i:\n if 'text' in j and 'text-patch' in j:\n if j['text'] in db_r:\n print(j['text'] + \" : \" +j['text-patch']+\" - 덮어씀\")\n else:\n print(j['text'] + \" : \" +j['text-patch']+\" - 추가\")\n db_r[j['text']] = j['text-patch']\n if 'select' in j and 'select-patch' in j:\n if j['select'] in db_r:\n print(j['select'] + \" : \" +j['select-patch']+\" - 덮어씀\")\n else:\n print(j['select'] + \" : \" +j['select-patch']+\" - 추가\")\n db_r[j['select']] = j['select-patch']\n\n # if 'text' in j and 'text-ko' in j:\n # if j['text'] in db_r:\n # print(j['text'] + \" : \" +j['text-ko']+\" - 덮어씀\")\n # else:\n # print(j['text'] + \" : \" +j['text-ko']+\" - 추가\")\n # db_r[j['text']] = j['text-ko']\n\n try:\n with open('db.json', 'w+', -1, \"UTF-8\") as w:\n w.write(json.dumps(db_r, ensure_ascii = False, sort_keys=True, indent=4))\n except:\n print(\"에러: 파일을 덮어씌우는데 실패했습니다\")\n\n print(\"\\nfinish\")\n \n \n\nif __name__ == '__main__':\n print(\"작업시작\")\n addCommu()","sub_path":"shinycolors-db/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"533325881","text":"# -*- coding: utf-8 -*-\n##############################################################################\n# For copyright and license notices, see __openerp__.py file in module root\n# directory\n##############################################################################\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import ValidationError\n\n\nclass account_check_action_wizard(models.TransientModel):\n _name = 'account.check.action.wizard'\n _description = 'Account Check Action Wizard'\n\n @api.model\n def _default_product(self):\n if self._context.get('default_partner_id'):\n partner_id = self.env['res.partner'].browse(self._context.get('default_partner_id'))\n if partner_id.supplier:\n return True\n return False\n\n @api.model\n def _default_partner_id(self):\n if self._context.get('default_partner_id', False):\n return self._context.get('default_partner_id')\n if self._context.get('default_check_ids', False):\n check_ids = self.env['account.check'].browse(self._context.get('default_check_ids'))\n return check_ids[0].operation_partner_id.id\n return self.env['res.partner']\n\n @api.model\n def _default_amount(self):\n if self._context.get('default_check_ids', False):\n check_ids = self.env['account.check'].browse(self._context.get('default_check_ids'))\n return sum(x.amount for x in self.check_ids)\n return 0.0\n\n date = fields.Date(\n default=fields.Date.context_today,\n required=True,\n )\n partner_id = fields.Many2one('res.partner', string='Partner', default=_default_partner_id)\n debit_note = fields.Boolean(string='Debit note', default=_default_product)\n journal_id = fields.Many2one('account.journal', string='Journal',\n domain=[('type', 'in', ['cash', 'bank'])])\n expense_check_account_id = fields.Many2one(\n 'account.account',\n 'Account Expense',\n domain=lambda self: [('user_type_id', '=', self.env.ref('account.data_account_type_expenses').id)]\n )\n amount = fields.Monetary(currency_field='company_currency_id', string='Expense amount', default=_default_amount)\n amount_total = fields.Monetary(currency_field='company_currency_id', string='Amount total', compute='_compute_calcular_amount_total')\n\n communication = fields.Char(string='Memo')\n\n company_id = fields.Many2one(related='journal_id.company_id', readonly=True, store=True)\n company_currency_id = fields.Many2one(related='company_id.currency_id', readonly=True)\n\n action_type = fields.Char(\n 'Action type passed on the context',\n required=True,\n )\n check_ids = fields.Many2many(\n 'account.check',\n string='Checks',\n copy=False\n )\n tax_ids = fields.Many2many('account.tax', 'account_check_action_tax', 'account_check_action_id', 'tax_id',\n string='Taxes',\n domain=[('type_tax_use', '=', 'purchase'), '|', ('active', '=', False),\n ('active', '=', True)])\n\n @api.one\n @api.depends('check_ids')\n def _compute_calcular_amount_total(self):\n if self.check_ids:\n self.amount_total = sum(x.amount for x in self.check_ids)\n else:\n self.amount_total = 0.0\n\n @api.multi\n def action_confirm(self):\n self.ensure_one()\n if self.action_type not in [\n 'use', 'claim', 'bank_debit', 'reject', 'negotiated', 'selled', 'customer_return', 'reject_holding', 'deposited', 'reject_holding']:\n raise ValidationError(_(\n 'Action %s not supported on checks') % self.action_type)\n check = self.env['account.check'].browse(\n self._context.get('active_id'))\n journal_type = ''\n if self.action_type in ['customer_return','claim']:\n journal_type = 'sale'\n if self.action_type == 'selled':\n #if self.amount <= 0.0:\n #raise ValidationError(_(\n #'Action %s not supported on checks') % self.action_type)\n if self.check_ids:\n return getattr(self.check_ids.with_context(action_date=self.date, partner=self.partner_id.id,\n expense_amount=self.amount, debit_note=self.debit_note,\n journal_type=journal_type,\n journal=self.journal_id.id,\n expense_account=self.expense_check_account_id.id,\n tax_ids=self.tax_ids.ids if self.tax_ids else False,\n check_ids=self.check_ids.ids), self.action_type)()\n else:\n return getattr(check.with_context(action_date=self.date, partner=self.partner_id.id,\n expense_amount=self.amount, debit_note=self.debit_note,\n journal_type=journal_type,\n journal=self.journal_id.id,\n expense_account=self.expense_check_account_id.id,\n tax_ids=self.tax_ids.ids if self.tax_ids else False), self.action_type)()\n if self.action_type == 'negotiated':\n if self.check_ids:\n return getattr(\n self.check_ids.with_context(action_date=self.date, partner=self.partner_id.id,\n journal_type=journal_type,\n tax_ids=self.tax_ids.ids if self.tax_ids else False), self.action_type)()\n else:\n return getattr(\n check.with_context(action_date=self.date, journal_type=journal_type, partner=self.partner_id.id), self.action_type)()\n if self.action_type == 'deposited':\n if self.check_ids:\n return getattr(\n self.check_ids.with_context(action_date=self.date, journal_type=journal_type, journal=self.journal_id.id, default_communication=self.communication), self.action_type)()\n else:\n return getattr(\n check.with_context(action_date=self.date, journal_type=journal_type, journal=self.journal_id.id, default_communication=self.communication), self.action_type)()\n\n if self.action_type in ['bank_debit', 'reject_holding']:\n if self.check_ids:\n return getattr(\n self.check_ids.with_context(action_date=self.date), self.action_type)()\n else:\n return getattr(\n check.with_context(action_date=self.date), self.action_type)()\n\n if self.partner_id:\n return getattr(\n check.with_context(action_date=self.date, partner=self.partner_id.id), self.action_type)()\n else:\n return getattr(\n check.with_context(action_date=self.date), self.action_type)()\n","sub_path":"l10n_ar_account_check/wizard/account_check_action_wizard.py","file_name":"account_check_action_wizard.py","file_ext":"py","file_size_in_byte":7245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"418402559","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 20 17:09:06 2020\n\n@author: demir\n\"\"\"\n#RANDOM SELECTION'DA ZEKA YOK ! SADECE RANDOM\nimport pandas as py\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nveriler = pd.read_csv(\"Ads_CTR_Optimisation.csv\")\n\"\"\"\n#RANDOM SELECTION\nimport random\ntoplam = 0\nsatirSayisi = len(veriler)\nsutunSayisi = len(veriler.columns)\nsecilenler = []\nfor n in range(0, satirSayisi):\n randRow = random.randrange(sutunSayisi)\n secilenler.append(randRow)\n odul = veriler.values[n, randRow] # verilerdeki n. satirdaki daha onceden sectigimiz randRow'a bakiyor, 1 ise odul.\n toplam += odul\n \nplt.hist(secilenler)\nplt.show()\n\"\"\"\nsatirSayisi = len(veriler) # 10 000 tiklama\nsutunSayisi = len(veriler.columns)\n#Ri(n)\noduller = [0] * sutunSayisi # 10 tane 0 iceren liste\n#Ni(n)\ntiklamalar = [0] * sutunSayisi\ntoplam = 0 # toplam odul\nsecilenler = []\nimport math\nfor n in range(0, satirSayisi):\n ad = 0 \n max_ucb = 0\n for i in range(0, sutunSayisi):\n if(tiklamalar[i] > 0): \n ortalama_odul = oduller[i] / tiklamalar[i]\n delta = math.sqrt(3/2 * math.log(n)/tiklamalar[i])\n ucb = ortalama_odul + delta\n else:\n ucb = satirSayisi*10 # sadece ucb'yi cok buyuk bir sey yapmaya calisiyoruz\n if max_ucb < ucb:\n max_ucb = ucb\n ad = i\n secilenler.append(ad)\n tiklamalar[ad] += 1\n odul = veriler.values[n, ad] # verilerdeki n. satirdaki daha onceden sectigimiz randRow'a bakiyor, 1 ise odul.\n oduller[ad] += odul\n toplam += odul\nprint(\"toplam odul\")\nprint(toplam)\n\nplt.hist(secilenler)\nplt.show()","sub_path":"Bölüm 27/randomSample.py","file_name":"randomSample.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"301733516","text":"def run(self, tmp=None, task_vars=None):\n if (task_vars is None):\n task_vars = dict()\n result = super(ActionModule, self).run(tmp, task_vars)\n if self._play_context.check_mode:\n result['skipped'] = True\n return result\n executable = self._task.args.get('executable')\n result.update(self._low_level_execute_command(self._task.args.get('_raw_params'), executable=executable))\n return result","sub_path":"Data Set/bug-fixing-5/67e6bd18e495b32d4748d5eee1c9b8939c1fb715--bug.py","file_name":"67e6bd18e495b32d4748d5eee1c9b8939c1fb715--bug.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"122722817","text":"#Moving circle\n#Austin Cefaratti 1/16/19\n#mover the circle to where the user clicks\n\nfrom graphics import*\n\ndef moveTo(shape, newCenter):\n win = GraphWin(\"circle moving\", 500,500)\n circle = Circle(Point(250,250), shape)\n circle.setFill(\"yellow\")\n circle.draw(win)\n circle2 = Circle(Point(0,0), 0)\n circle2.draw(win)\n for i in range(10):\n newCenter = win.getMouse()\n circle2.undraw()\n newX = newCenter.getX()\n newY = newCenter.getY()\n circle2 = Circle(Point(newX,newY), shape)\n circle2.setFill(\"yellow\")\n circle2.draw(win)\n circle.undraw()\n print(\"Click again to quit\")\n win.getMouse()\n win.close()\ndef main():\n shape = eval(input(\"Enter the radius of the circle you want: \"))\n print(\"Click in the box to move the circle.\")\n newCenter = 0\n moveTo(shape, newCenter)\n \n\n\n\n\nmain()\n","sub_path":"python-2.2/moving-circle.py","file_name":"moving-circle.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"574680475","text":"from flask import Flask\nfrom flask_restful import Resource, Api\nfrom flask.ext.mongoengine import MongoEngine\n\napp = Flask(__name__)\napi = Api(app)\n\n# Mongo Config\napp.config[\"MONGODB_SETTINGS\"] = {'DB': \"api_test_db\"}\napp.config[\"SECRET_KEY\"] = \"F15Htacos\"\napp.config['UPLOAD_FOLDER'] = \"/tmp/api_test\"\n\ndb = MongoEngine(app)\n\n#Blueprint Registration\nfrom api_test.views.api_v1.project import api_v1, api_v1_bp, API_VERSION_V1\n\napp.register_blueprint(\n api_v1_bp,\n url_prefix='{prefix}/v{version}'.format(\n prefix='/api',\n version=API_VERSION_V1))\n","sub_path":"api_test/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"640641438","text":"#coding:utf-8\nimport HTMLTestRunner\nimport unittest\nimport time\nclass Test(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n print(\"start!\")\n @classmethod\n def tearDownClass(cls):\n time.sleep(1)\n print(\"end!\")\n def test02(self):\n u'使用断言assertEqual'\n a=1\n b=1\n self.assertEqual(a,b)\n print(a)\n\n def test03(self):\n u'使用assertIn'\n a='hello'\n b='hello world'\n self.assertIn(a,b)\n print(\"执行测试用例03\")\n\n def test01(self):\n u'使用assertTrue'\n a = True\n self.assertTrue(a)\n print(\"执行测试用例01\")\n\n def test04(self):\n u'断言结果错误'\n a = \"我在测试\"\n b =5\n self.assertEqual(a,b,msc=\"失败原因: %s != %s\"%(a,b))\n print(\"add方法\")\n\n\n\n# if __name__ == \"__main__\":\n# unittest.main()\n","sub_path":"test0915/case/blog/test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"141456436","text":"from sklearn.datasets import load_iris\r\niris = load_iris()\r\nprint(iris.data)\r\n\r\n#4개의 특징 이름을 출력한다.\r\nprint(iris.feature_names)\r\n\r\n#정수는 꽃의 종류를 나타낸다.\r\n#0 = setosa / 1 = versicolor / 2 = virginica\r\nprint(iris.target)\r\n\r\n\r\n# 학습데이터를 학습에 사용하는 train set와 검증을 위해 사용하는 test set으로 나눠서\r\n# 쓰자는 의미이다.\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nX = iris.data\r\ny = iris.target\r\n\r\n#(80:20)으로 분할 한다.\r\nX_train,X_test,y_train,y_test = train_test_split(X, y, test_size = 0.2, random_state = 4)\r\n\r\n#몇개의 행, 열로 되어있는 지 알 수 있다.\r\nprint(X_train.shape)\r\nprint(X_test.shape)\r\n\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn import metrics\r\n\r\nknn = KNeighborsClassifier(n_neighbors=5)\r\n#fit을 부르면은 학습이 되는 것이다.\r\nknn.fit(X_train, y_train)\r\n#knn에서는 fit에서는 많은 일을 하지 않음\r\n\r\n#KNN에선 predict에서 많은 일을 한다. (거리 측정)\r\ny_pred = knn.predict(X_test)\r\nscores = metrics.accuracy_score(y_test, y_pred)\r\n#여기서의 y_test는 정답 / y_pred는 학습한 데이터 // 이 두가지를 비교해 보는 것이다.\r\n\r\nprint(scores)\r\n\r\n\r\nknn = KNeighborsClassifier(n_neighbors=5)\r\nknn.fit(X,y)\r\n\r\n#0 = setosa / 1 = versicolor / 2 = virginica\r\nclasses = {0:'setosa', 1:'versicolor', 2:'virginica'}\r\n#딕셔너리 -> 숫자로 나오는 것을 문자로 나오게 한것\r\n\r\n#아직 보지 못한 새로운 데이터를 제시해보자.\r\n#데이터를 두개를 준 것이다.\r\nx_new = [[3, 4, 5, 2], [5, 4, 2, 2]]\r\n\r\n#predict를 이용하여 예측해보라고 한것.\r\ny_predict = knn.predict(x_new)\r\n\r\n#첫번째 [0] 데이터와 두번째 데이터를 예측한 결\r\nprint(classes[y_predict[0]])\r\nprint(classes[y_predict[1]])","sub_path":"code/KNN.py","file_name":"KNN.py","file_ext":"py","file_size_in_byte":1844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"38518770","text":"from django.shortcuts import render, HttpResponseRedirect\nfrom django.http import HttpResponse\nfrom julyapp.models import *\n# Create your views here.\ndef homepage (request):\n return render(request, 'index.html', locals())\ndef mainpage (request):\n return render(request, 'mainpage.html', locals())\ndef hello (request):\n return HttpResponse (\"Hello World\")\ndef hello1 (request):\n return HttpResponse (\"Hi Saran\")\ndef add (request):\n a=5\n b=10\n c=a+b\n return render(request, 'add.html', locals())\ndef name (request):\n\tif request.POST: \n\t\t#import ipdb;ipdb.set_trace()\n\t\tfirst_name=request.POST['firstname']\n\t\tlast_name=request.POST['lastname']\n\t\tsex=request.POST['sex']\n\t\tuploadfile=request.FILES['uploadfile']\n\t\tage=request.POST['age']\n\t\tdob=request.POST['dob']\n\t\temail1=request.POST['email']\n\t\ttry:\n\t\t\tmarriage=request.POST['marriage']\n\t\texcept Exception:\n\t\t\tmarriage=False\n\t\tdata=Contact(first_name=first_name,last_name=last_name,sex=sex,uploadfile=uploadfile,age=age,dob=dob,email1=email1,marriage=marriage,)\n\t\tdata.save()\n\tb=Contact.objects.all()\n\t #name=str(firstname)+str(lastname)\n\treturn render(request, 'name.html', locals())\n\ndef edit (request,id):\n\tb=Contact.objects.get(id=id)\n\tif request.POST:\n\t\tb.first_name=request.POST['firstname']\n\t\tb.last_name=request.POST['lastname']\n\t\tb.sex=request.POST['sex']\n\t\tb.uploadfile=request.FILES['uploadfile']\n\t\tb.age=request.POST['age']\n\t\tb.dob=request.POST['dob']\n\t\tb.email1=request.POST['email']\n\t\tb.marriage=request.POST['marriage']\n\t\tb.save()\n\t\treturn HttpResponseRedirect('/list')\n\treturn render(request, 'edit.html', locals())\n\ndef list (request):\n\tb=Contact.objects.all()\n\treturn render(request, 'list.html', locals())\n\ndef delete (request,id):\n\tb=Contact.objects.get(id=id)\n\tif request.POST:\n\t\tb.delete()\n\t\treturn HttpResponseRedirect('/list')\n\treturn render(request, 'delete.html', locals())\n\t\t\n\t#return render(request, 'edit.html', locals())\n\t#return HttpResponseRedirect('/list')\ndef dontdelete (request):\n\treturn HttpResponseRedirect('/list')\n\t#return render(request, 'list.html', locals())\t","sub_path":"site_media/media/files_upload/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"597130825","text":"import inspect\nimport torch\n\nfrom pathlib import Path\nfrom typing import Union\n\n\ndef get_local_time():\n import datetime\n import pytz\n timezone = pytz.timezone('Europe/Rome')\n return datetime.datetime.now(tz=timezone).strftime('%d-%m-%H-%M-%S')\n\n\ndef get_last_modified(dirname: Union[Path, str]) -> Union[str, Path]:\n dirname = Path(dirname) if isinstance(dirname, str) else dirname\n if len(list(dirname.iterdir())) == 0:\n return \"\"\n files = [(file, file.stat().st_mtime) for file in dirname.iterdir()]\n return sorted(files, key=lambda x: x[1])[-1][0]\n\n\nproject_dir = Path(inspect.getsourcefile(lambda: 0)).resolve().parent.parent\ndata_dir = project_dir / 'data'\n\nlocal_time = get_local_time()\nlog_dir = project_dir / 'runs'\nlog_dir.mkdir(exist_ok=True)\nlog_dir_local_time = log_dir / local_time\nlog_dir_last_modified = get_last_modified(log_dir)\ncheckpoints_dir = project_dir / 'checkpoints'\ncheckpoints_dir.mkdir(exist_ok=True)\ncheckpoints_dir_local_time = checkpoints_dir / f\"{local_time}.pt\"\ncheckpoints_dir_last_modified = get_last_modified(checkpoints_dir)\n\ninterpolations_dir = project_dir / 'interpolations'\ninterpolations_dir.mkdir(exist_ok=True)\n\ntorch.manual_seed(42)\n\nknobs = dict()\nif torch.cuda.is_available():\n knobs['device'] = torch.device('cuda')\n # print('Running on GPU (device).')\nelse:\n knobs['device'] = torch.device('cpu')\n print('The program is running on CPU: CUDA not available.')\nknobs['device'] = torch.device('cuda')\nknobs['num_epochs'] = 1000\nknobs['batch_size'] = 100\nknobs['lr_encoder'] = 5e-5 # 1e-3 -> 1e-4 -> 5e-5\nknobs['lr_decoder'] = 5e-5 # 1e-3 -> 1e-4 -> 5e-5\nknobs['lr_discriminator'] = knobs['lr_encoder']\nknobs['hidden_dim'] = 8\nknobs['lambda_reconstruction'] = 1.\nknobs['lambda_penalty'] = 1.\nknobs['wasserstein_penalty'] = knobs['lambda_penalty']\nknobs['lambda_fooling_term'] = 1.\nknobs['gamma'] = 0.2\nknobs['sigma'] = 1.\nknobs['max_norm_encoder'] = 5. # (average norm of gradients: 50)\nknobs['max_norm_decoder'] = 15. # (average norm of gradients: 150)\nknobs['max_norm_discriminator'] = knobs['max_norm_encoder']\nknobs['time_to_collect'] = 100\nknobs['clip_gradient'] = False\nknobs['fast_models'] = True\nknobs['resume'] = True\n\n\"\"\"\n----------------------------------------------------------------------------------------\nConfigurations\n----------------------------------------------------------------------------------------\nConfigurations #1\nModel: acwwai\nlambda_reconstruction: 0.05\nclip_gradient: False\n\n\nConfigurations #2\nmax_norm_*:\n----------------------------------------------------------------------------------------\n\"\"\"\n","sub_path":"mnist/src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"590696519","text":"from __future__ import unicode_literals\nimport logging\nimport pickle\nfrom StringIO import StringIO\nimport sys\n\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db import models as django_models\nfrom django.test.testcases import TransactionTestCase\nfrom django.utils.unittest.case import skipIf\n\nfrom .. import get_tenant_model\nfrom ..models import (db_schema_table, Tenant, TenantModel, TenantModelBase,\n TenantModelDescriptor, TenantSpecificModel)\nfrom ..utils import model_name\n\nfrom .managers import ManagerOtherSubclass, ManagerSubclass\nfrom .models import (AbstractTenantModel, NonTenantModel, RelatedSpecificModel,\n RelatedTenantModel, SpecificModel, SpecificModelProxy,\n SpecificModelProxySubclass, SpecificModelSubclass, TenantModelMixin)\nfrom .utils import logger, skipIfCustomTenant, TenancyTestCase\n\n\nclass TenantTest(TransactionTestCase):\n def assertSwapFailure(self, tenant_model, expected_message):\n with self.assertRaisesMessage(ImproperlyConfigured, expected_message):\n with self.settings(TENANCY_TENANT_MODEL=tenant_model):\n get_tenant_model()\n\n def test_invalid_tenant_user_model_format(self):\n stream = StringIO()\n handler = logging.StreamHandler(stream)\n logger.addHandler(handler)\n with self.settings(TENANCY_TENANT_MODEL='invalid'):\n pass\n logger.removeHandler(handler)\n stream.seek(0)\n self.assertIn(\n \"TENANCY_TENANT_MODEL must be of the form 'app_label.model_name'\",\n stream.read()\n )\n\n def test_swap_failures(self):\n \"\"\"\n Make sure tenant swap failures raise the correct exception\n \"\"\"\n self.assertSwapFailure(\n 'not.Installed',\n \"TENANCY_TENANT_MODEL refers to model 'not.Installed' that has not been installed\"\n )\n self.assertSwapFailure(\n 'contenttypes.ContentType',\n \"TENANCY_TENANT_MODEL refers to models 'contenttypes.ContentType' which is not a subclass of 'tenancy.AbstractTenant'\"\n )\n\n @skipIfCustomTenant\n def test_content_types_deleted(self):\n \"\"\"\n Make sure content types of tenant models are deleted upon their related\n tenant deletion.\n \"\"\"\n tenant = Tenant.objects.create(name='tenant')\n model = tenant.specificmodels.model\n content_type = ContentType.objects.get_for_model(model)\n tenant.delete()\n self.assertFalse(ContentType.objects.filter(pk=content_type.pk).exists())\n\n\nclass TenantModelsCacheTest(TenancyTestCase):\n def test_initialized_models(self):\n \"\"\"\n Make sure models are loaded upon model initialization.\n \"\"\"\n self.assertIn('models', self.tenant.__dict__)\n\n\nclass TenantModelBaseTest(TenancyTestCase):\n def test_simple_instancecheck(self):\n instance = self.tenant.specificmodels.create()\n self.assertIsInstance(instance, django_models.Model)\n self.assertIsInstance(instance, TenantModelMixin)\n self.assertIsInstance(instance, SpecificModel)\n self.assertNotIsInstance(instance, SpecificModelSubclass)\n self.assertNotIsInstance(instance, RelatedSpecificModel)\n self.assertNotIsInstance(instance, TenantModelBaseTest)\n\n def test_concrete_inheritance_instancecheck(self):\n instance = self.tenant.specific_models_subclasses.create()\n self.assertIsInstance(instance, django_models.Model)\n self.assertIsInstance(instance, TenantModelMixin)\n self.assertIsInstance(instance, SpecificModel)\n self.assertIsInstance(instance, SpecificModelSubclass)\n self.assertNotIsInstance(instance, RelatedSpecificModel)\n self.assertNotIsInstance(instance, TenantModelBaseTest)\n\n def test_proxy_inheritance_instancecheck(self):\n instance = self.tenant.specific_model_proxies.create()\n self.assertIsInstance(instance, django_models.Model)\n self.assertIsInstance(instance, TenantModelMixin)\n self.assertIsInstance(instance, SpecificModel)\n self.assertIsInstance(instance, SpecificModelProxy)\n self.assertNotIsInstance(instance, RelatedSpecificModel)\n self.assertNotIsInstance(instance, TenantModelBaseTest)\n\n def assertIsSubclass(self, cls, base):\n self.assertTrue(issubclass(cls, base))\n\n def assertIsNotSubclass(self, cls, base):\n self.assertFalse(issubclass(cls, base))\n\n def test_subclasscheck(self):\n self.assertIsSubclass(SpecificModel, TenantModelMixin)\n tenant_specific_model = self.tenant.specificmodels.model\n self.assertIsSubclass(tenant_specific_model, AbstractTenantModel)\n self.assertIsSubclass(tenant_specific_model, SpecificModel)\n self.assertIsSubclass(tenant_specific_model, django_models.Model)\n self.assertIsNotSubclass(tenant_specific_model, SpecificModelSubclass)\n self.assertIsNotSubclass(tenant_specific_model, RelatedSpecificModel)\n self.assertIsNotSubclass(tenant_specific_model, tuple)\n\n def test_concrete_inheritance_subclasscheck(self):\n tenant_specific_model = self.tenant.specificmodels.model\n tenant_specific_model_subclass = self.tenant.specific_models_subclasses.model\n self.assertIsSubclass(tenant_specific_model_subclass, SpecificModel)\n self.assertIsSubclass(tenant_specific_model_subclass, tenant_specific_model)\n\n def test_proxy_inheritance_subclasscheck(self):\n specific_model = self.tenant.specificmodels.model\n specific_model_proxy = SpecificModelProxy.for_tenant(self.tenant)\n self.assertIsSubclass(specific_model_proxy, SpecificModel)\n self.assertIsSubclass(specific_model_proxy, SpecificModelProxy)\n self.assertIsSubclass(specific_model_proxy, specific_model)\n specific_model_proxy_subclass = SpecificModelProxySubclass.for_tenant(self.tenant)\n self.assertIsSubclass(specific_model_proxy_subclass, SpecificModel)\n self.assertIsSubclass(specific_model_proxy_subclass, SpecificModelProxy)\n self.assertIsSubclass(specific_model_proxy_subclass, SpecificModelProxySubclass)\n self.assertIsSubclass(specific_model_proxy_subclass, specific_model)\n self.assertIsSubclass(specific_model_proxy_subclass, specific_model_proxy)\n\n def assertPickleEqual(self, obj):\n pickled = pickle.dumps(obj)\n self.assertEqual(pickle.loads(pickled), obj)\n\n @skipIf(sys.version_info < (2, 7),\n \"Model class can't be pickled on python < 2.7\")\n def test_pickling(self):\n self.assertPickleEqual(SpecificModel)\n self.assertPickleEqual(self.tenant.specificmodels.model)\n self.assertPickleEqual(self.tenant.specificmodels.model.__bases__[0])\n self.assertPickleEqual(self.tenant.specific_models_subclasses.model)\n self.assertPickleEqual(self.tenant.specific_models_subclasses.model.__bases__[0])\n\n def test_tenant_specific_model_dynamic_subclassing(self):\n \"\"\"\n Make sure tenant specific models can be dynamically subclassed.\n \"\"\"\n model = self.tenant.specificmodels.model\n model_subclass = type(\n str(\"%sDynamicSubclass\" % model.__name__),\n (model,),\n {'__module__': model.__module__}\n )\n self.assertEqual(model.tenant, model_subclass.tenant)\n self.assertIsSubclass(model_subclass, model)\n self.assertIsNotSubclass(model, model_subclass)\n\n def test_exceptions_subclassing(self):\n \"\"\"\n Make sure tenant specific models exceptions subclass their exposed\n model one.\n \"\"\"\n for model in TenantModelBase.references:\n tenant_model = model.for_tenant(self.tenant)\n try:\n tenant_model._default_manager.get()\n except Exception as e:\n self.assertIsInstance(e, model.DoesNotExist)\n for parent in model._meta.parents:\n self.assertIsInstance(e, parent.DoesNotExist)\n\n def test_parent_subclassing(self):\n \"\"\"\n Make sure references to the exposed model are all removed.\n \"\"\"\n tenant_specific_model = self.tenant.specificmodels.model\n self.assertNotIn(\n SpecificModel,\n tenant_specific_model._meta.parents\n )\n attr_name = \"%s_ptr\" % model_name(SpecificModel._meta)\n self.assertFalse(hasattr(tenant_specific_model, attr_name))\n\n def test_manager_assignment(self):\n \"\"\"\n Managers should be inherited correctly.\n \"\"\"\n # Concrete\n specific_model = SpecificModel.for_tenant(self.tenant)\n self.assertIsInstance(specific_model._default_manager, ManagerSubclass)\n self.assertIsInstance(specific_model.objects, ManagerSubclass)\n self.assertIsInstance(\n specific_model.custom_objects, ManagerOtherSubclass\n )\n # Proxy\n specific_model_proxy = SpecificModelProxy.for_tenant(self.tenant)\n self.assertIsInstance(\n specific_model_proxy._default_manager, ManagerOtherSubclass\n )\n self.assertIsInstance(\n specific_model_proxy.objects, ManagerOtherSubclass\n )\n self.assertIsInstance(\n specific_model_proxy.proxied_objects, ManagerSubclass\n )\n # Concrete subclass\n specific_model_subclass = SpecificModelSubclass.for_tenant(self.tenant)\n self.assertIsInstance(\n specific_model_subclass._default_manager, ManagerOtherSubclass\n )\n self.assertIsInstance(\n specific_model_subclass.objects, ManagerOtherSubclass\n )\n\n\nclass TenantModelDescriptorTest(TenancyTestCase):\n def test_class_accessing(self):\n \"\"\"\n Make sure the descriptor is available from the class.\n \"\"\"\n self.assertIsInstance(Tenant.specificmodels, TenantModelDescriptor)\n\n def test_related_name(self):\n \"\"\"\n Make sure the descriptor is correctly attached to the Tenant model\n when the related_name is specified or not.\n \"\"\"\n self.assertTrue(issubclass(\n self.tenant.specificmodels.model, SpecificModel)\n )\n self.assertTrue(issubclass(\n self.tenant.related_specific_models.model, RelatedSpecificModel)\n )\n\n def test_content_type_created(self):\n \"\"\"\n Make sure the content type associated with the returned model is\n always created.\n \"\"\"\n opts = self.tenant.specificmodels.model._meta\n self.assertTrue(\n ContentType.objects.filter(\n app_label=opts.app_label,\n model=model_name(opts)\n ).exists()\n )\n\n\nclass TenantModelTest(TenancyTestCase):\n def test_isolation_between_tenants(self):\n \"\"\"\n Make sure instances created in a tenant specific schema are not\n shared between tenants.\n \"\"\"\n self.tenant.related_specific_models.create()\n self.assertEqual(self.other_tenant.related_specific_models.count(), 0)\n self.other_tenant.related_specific_models.create()\n self.assertEqual(self.tenant.related_specific_models.count(), 1)\n\n def test_db_table(self):\n \"\"\"\n Make sure the `db_table` associated with tenant models is correctly\n prefixed based on the tenant and suffixed by the un-managed model's\n `db_table`.\n \"\"\"\n self.assertEqual(\n self.tenant.specificmodels.model._meta.db_table,\n db_schema_table(self.tenant, SpecificModel._meta.db_table)\n )\n self.assertEqual(\n self.tenant.specific_models_subclasses.model._meta.db_table,\n db_schema_table(self.tenant, SpecificModelSubclass._meta.db_table)\n )\n\n def test_field_names(self):\n \"\"\"\n Make sure tenant specific models' fields are the same as the one\n defined on the un-managed one.\n \"\"\"\n models = (\n SpecificModel,\n SpecificModelSubclass, # Test inheritance scenarios\n RelatedTenantModel, # And models with m2m fields\n )\n for tenant in Tenant.objects.all():\n for model in models:\n opts = model._meta\n tenant_model = model.for_tenant(tenant)\n tenant_opts = tenant_model._meta\n for field in (opts.local_fields + opts.many_to_many):\n tenant_field = tenant_opts.get_field(field.name)\n self.assertEqual(tenant_field.__class__, field.__class__)\n\n def test_foreign_key_between_tenant_models(self):\n \"\"\"\n Make sure foreign keys between TenantModels work correctly.\n \"\"\"\n for tenant in Tenant.objects.all():\n # Test object creation\n specific = tenant.specificmodels.create()\n related = tenant.related_tenant_models.create(fk=specific)\n # Test reverse related manager\n self.assertEqual(specific.fks.get(), related)\n # Test reverse filtering\n self.assertEqual(tenant.specificmodels.filter(fks=related).get(), specific)\n\n def test_m2m(self):\n \"\"\"\n Make sure m2m between TenantModels work correctly.\n \"\"\"\n for tenant in Tenant.objects.all():\n # Test object creation\n related = tenant.related_tenant_models.create()\n specific_model = related.m2m.create()\n # Test reverse related manager\n self.assertEqual(specific_model.m2ms.get(), related)\n # Test reverse filtering\n self.assertEqual(tenant.specificmodels.filter(m2ms=related).get(), specific_model)\n\n def test_m2m_with_through(self):\n for tenant in Tenant.objects.all():\n related = tenant.related_tenant_models.create()\n specific = tenant.specificmodels.create()\n tenant.m2m_specifics.create(\n related=related,\n specific=specific\n )\n self.assertEqual(related.m2m_through.get(), specific)\n self.assertEqual(specific.m2ms_through.get(), related)\n\n def test_m2m_to_non_tenant(self):\n \"\"\"\n Make sure m2m between TenantModels work correctly.\n \"\"\"\n for tenant in Tenant.objects.all():\n # Test object creation\n related = tenant.related_tenant_models.create()\n non_tenant = related.m2m_non_tenant.create()\n # Test reverse related manager\n reverse_descriptor_name = \"tenant_%s_relatedtenantmodels\" % tenant.name\n self.assertEqual(getattr(non_tenant, reverse_descriptor_name).get(), related)\n # Test reverse filtering\n self.assertEqual(NonTenantModel.objects.filter(\n **{reverse_descriptor_name:related}).get(), non_tenant)\n\n def test_not_managed_auto_intermediary_model(self):\n \"\"\"\n Make sure that exposed un-managed models with m2m relations have their\n intermediary models also un-managed.\n \"\"\"\n get_field = RelatedTenantModel._meta.get_field\n self.assertFalse(get_field('m2m').rel.through._meta.managed)\n self.assertFalse(get_field('m2m_to_undefined').rel.through._meta.managed)\n self.assertFalse(get_field('m2m_through').rel.through._meta.managed)\n self.assertFalse(get_field('m2m_recursive').rel.through._meta.managed)\n self.assertFalse(get_field('m2m_non_tenant').rel.through._meta.managed)\n\n def test_invalid_foreign_key_related_name(self):\n # Ensure `related_name` with no %(tenant)s format placeholder also\n # raises an improperly configured error.\n with self.assertRaisesMessage(ImproperlyConfigured,\n \"Since `InvalidRelatedName.fk` is originating from an instance \"\n \"of `TenantModelBase` and not pointing to one \"\n \"its `related_name` option must ends with a \"\n \"'+' or contain the '%(class)s' format \"\n \"placeholder.\"):\n class InvalidRelatedName(TenantModel):\n fk = django_models.ForeignKey(NonTenantModel, related_name='no-tenant')\n\n def test_invalid_m2m_through(self):\n with self.assertRaisesMessage(ImproperlyConfigured,\n \"Since `InvalidThrough.m2m` is originating from an instance of \"\n \"`TenantModelBase` its `through` option must also be pointing \"\n \"to one.\"):\n class InvalidThrough(TenantModel):\n m2m = django_models.ManyToManyField(NonTenantModel,\n through='InvalidIntermediary')\n class InvalidIntermediary(django_models.Model):\n pass\n\n def test_non_tenant_related_descriptor(self):\n \"\"\"\n Make sure related descriptor are correctly attached to non-tenant\n models and removed on tenant deletion.\n \"\"\"\n for tenant in Tenant.objects.all():\n attr = \"tenant_%s_specificmodels\" % tenant.name\n self.assertTrue(hasattr(NonTenantModel, attr))\n tenant.delete()\n self.assertFalse(hasattr(NonTenantModel, attr))\n\n def test_subclassing(self):\n \"\"\"\n Make sure tenant model subclasses share the same tenant.\n \"\"\"\n for tenant in Tenant.objects.all():\n parents = tenant.specific_models_subclasses.model._meta.parents\n for parent in parents:\n if issubclass(parent, TenantSpecificModel):\n self.assertEqual(parent.tenant, tenant)\n tenant.specific_models_subclasses.create()\n self.assertEqual(tenant.specificmodels.count(), 1)\n\n def test_signals(self):\n \"\"\"\n Make sure signals are correctly dispatched for tenant models\n \"\"\"\n for tenant in Tenant.objects.all():\n signal_model = tenant.signal_models.model\n instance = signal_model()\n instance.save()\n instance.delete()\n self.assertListEqual(\n signal_model.logs(),\n [\n django_models.signals.pre_init,\n django_models.signals.post_init,\n django_models.signals.pre_save,\n django_models.signals.post_save,\n django_models.signals.pre_delete,\n django_models.signals.post_delete\n ]\n )\n\n\nclass NonTenantModelTest(TransactionTestCase):\n def test_fk_to_tenant(self):\n \"\"\"\n Non-tenant models shouldn't be allowed to have a ForeignKey pointing\n to an instance of `TenantModelBase`.\n \"\"\"\n with self.assertRaisesMessage(ImproperlyConfigured,\n \"`NonTenantFkToTenant.fk`'s `to` option` can't point to an \"\n \"instance of `TenantModelBase` since it's not one itself.\"):\n class NonTenantFkToTenant(django_models.Model):\n fk = django_models.ForeignKey('UndeclaredSpecificModel')\n\n class UndeclaredSpecificModel(TenantModel):\n pass\n\n def test_m2m_to_tenant(self):\n \"\"\"\n Non-tenant models shouldn't be allowed to have ManyToManyField pointing\n to an instance of `TenantModelBase`.\n \"\"\"\n with self.assertRaisesMessage(ImproperlyConfigured,\n \"`NonTenantM2MToTenant.m2m`'s `to` option` can't point to an \"\n \"instance of `TenantModelBase` since it's not one itself.\"):\n class NonTenantM2MToTenant(django_models.Model):\n m2m = django_models.ManyToManyField(SpecificModel)\n","sub_path":"tenancy/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":19582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"200269191","text":"from flask import Flask, render_template\nimport datetime\nfrom app_secrets import app_secrets\n\nclient_id = app_secrets.get('client_id')\nclient_secret = app_secrets.get('client_id')\n\napp = Flask(__name__)\n\n@app.route('/')\ndef root(name=None):\n now = datetime.datetime.now()\n return render_template('index.html', name='Hello', now=now)\n\n@app.route('/redirect')\ndef redirect():\n print('Redirect')\n return render_template('redirect.html')\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"553885410","text":"import os\nimport re\nimport pyseq\nimport sys\nimport pprint\nfrom PIL import Image\nfrom pathlib import Path\nimport fsutil\npath = (\"N:\\\\\")\nprint(path)\n#file = os.stat(\"Schilpad_retopo_V01_bak5.hip\")\n#print('Size of file is', file.st_size, 'bytes')\n\n\n\nmy_root_set = set()\n\nfor root, dirs, files in os.walk(path):\n for name in files:\n my_root_set.add(root)\n #print(root)\n\n#print(\"my root sets zijn : {}\".format(my_root_set))\n\ndef format_bytes(size):\n # 2**10 = 1024\n power = 2**10\n n = 0\n power_labels = {0 : '', 1: 'kilo', 2: 'mega', 3: 'giga', 4: 'tera'}\n while size > power:\n size /= power\n n += 1\n return size, power_labels[n]+'bytes'\n\ndef humanbytes(B):\n 'Return the given bytes as a human friendly KB, MB, GB, or TB string'\n B = float(B)\n KB = float(1024)\n MB = float(KB ** 2) # 1,048,576\n GB = float(KB ** 3) # 1,073,741,824\n TB = float(KB ** 4) # 1,099,511,627,776\n\n if B < KB:\n return '{0} {1}'.format(B,'Bytes' if 0 == B > 1 else 'Byte')\n elif KB <= B < MB:\n return '{0:.2f} KB'.format(B/KB)\n elif MB <= B < GB:\n return '{0:.2f} MB'.format(B/MB)\n elif GB <= B < TB:\n return '{0:.2f} GB'.format(B/GB)\n elif TB <= B:\n return '{0:.2f} TB'.format(B/TB)\n\ntests = [1, 1024, 500000, 1048576, 50000000, 1073741824, 5000000000, 1099511627776, 5000000000000]\n\n#for t in tests: print('{0} == {1}'.format(t,humanbytes(t)))\n\ndef print_lijstnamen(folder):\n \"\"\"print namen uit van alle lijst sequences uit een folder\"\"\"\n for lijstnaam in range(len(folder)):\n print(folder[lijstnaam])\n\ndef detect_sequences(pad): # geeft een pad aan en ontvang alle sequences uit dat pad.\n folder = pyseq.get_sequences(pad) # dit is een pad waar meerdere sequences in bestaan\n #pp = pprint.PrettyPrinter(indent=4)\n #pp.pprint(folder)\n #print(str(pad) + \" bevat volgende aantal file sequenses: \" + str(len(folder))) # aantal folder in folder\n #print_lijstnamen(folder)\n #print(\"type folder is:\" + str(type(folder)))\n return folder\n\ndef get_list_file_size(folder, items):\n #my_sequence_dict = {\"eq\": {\"foldernaam\": \"\", \"sequencenaam\" : \"\",\"sequencesize\":\"\"}}\n my_sequence_dict = {\"sequence\":[\"\",\"\",\"\"]}\n\n for i, sequence in enumerate(folder):\n #print(\"naam folder is {}\".format(len(folder[i])))\n\n if len(folder[i]) == 1:\n pass\n else:\n #print(\"naam daadwerkelijke sequence is {}\".format(folder[i]))\n\n\n filesize_list = []\n for file in folder[i]:\n #print(file)\n #print(path + file)\n full_path = str(items) + \"\\\\\" + str(file)\n #print(str(path))\n #print(str(file))\n #print(full_path)\n #file_size=os.path.getsize(str(full_path))\n #file_size = os.stat(full_path)\n #print('Size of file is', file_size.st_size, 'bytes')\n #print(\"the size of \" + full_path)\n\n\n # mijn os.stat approach\n #current_file = os.stat(full_path)\n #print('Size of file is', os.stat(path).st_size, 'bytes')\n #filesize_list.append(os.stat(path).st_size)\n\n #mijn sys.getsizeof approach\n \"\"\" image_file = Image.open(full_path)\n print(\"File Size In Bytes:- \"+str(len(image_file.fp.read()))) \"\"\"\n\n\n maat = fsutil.get_file_size(full_path)\n #print(\"get file size is: \" + str(maat)) \n size_str = fsutil.get_file_size_formatted(full_path)\n #print(\"get file size formatted is: \" + str(size_str))\n filesize_list.append(maat)\n\n\n\n filesize_totaal = sum(filesize_list)\n #print(len(filesize_list))\n #print(filesize_totaal)\n #print(humanbytes(filesize_totaal))\n\n #my_sequence_dict [\"foldernaam\"] = folder\n\n my_sequence_dict[\"sequence\"][0] = str(items)\n my_sequence_dict[\"sequence\"][1] = str(folder[i])\n my_sequence_dict[\"sequence\"][2] = humanbytes(filesize_totaal)\n #print(my_sequence_dict)\n return my_sequence_dict\n\n\n\n\nfor its, items in enumerate(my_root_set):\n gevonden = detect_sequences(items)\n #print(\"gevonden: {}\".format(gevonden))\n #print(\"its: {}\".format(its))\n #print(\"items: {}\".format(items))\n #print(\"Formaat van dict is: {}\".format(len(gevonden)))\n print(get_list_file_size(gevonden, items))\n #print(\"gevonden\" + str(type(gevonden)))","sub_path":"System_crawl_sequence_size_v01.py","file_name":"System_crawl_sequence_size_v01.py","file_ext":"py","file_size_in_byte":4529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"520051680","text":"import unittest\nimport uuid\nfrom typing import Any, Dict, Optional, List, NoReturn\nfrom dataclasses import asdict\n\nfrom autowp.core.resource.entity import Server\nfrom autowp.core.resource.repository import ResourceRepository \nfrom autowp.core.resource.usecase.create import CreateNewResourceUseCase \nfrom autowp.core.resource.usecase.remove import RemoveResourceUseCase \nfrom autowp.core.resource.usecase.detail import DetailResourceUseCase \n\nclass FakeRepo(ResourceRepository):\n\tdef __init__(self, memory: Dict[str, Any]):\n\t\tself.memory = memory\n\n\tdef create(self, server: Server) -> Optional[Server]:\n\t\tmem_id = uuid.uuid4()\n\t\tself.memory[str(mem_id)] = server\n\n\t\treturn Server( \\\n\t\t\tprofile_id=server.profile_id, \n\t\t\thost=server.host, \n\t\t\tuser=server.user, \n\t\t\tsites=server.sites,\n\t\t\tprovider=server.provider,\n\t\t\tcustom_key=server.custom_key,\n\t\t\tlabels=server.labels,\n\t\t\tid=mem_id \\\n\t\t)\n\t\n\tdef get_list(self, options: Optional[Dict[str, Any]] = None) -> Optional[List[Server]]:\n\t\tif not self.memory:\n\t\t\treturn None\n\n\t\tservers = [server for id, server in self.memory.items()]\n\t\treturn servers\n\n\tdef findById(self, resource_id: str) -> Optional[Server]:\n\t\tif not self.memory:\n\t\t\treturn None\n\n\t\tif resource_id in self.memory:\n\t\t\treturn self.memory[resource_id]\n\t\t\n\t\treturn None\n\n\tdef findByField(self, field: str, value: Any) -> Optional[Server]:\n\t\tif not self.memory:\n\t\t\treturn None\n\n\t\tfor _, val in self.memory.items():\n\t\t\tdata = asdict(val)\n\t\t\tfor key, val2 in data.items():\n\t\t\t\tif key == field:\n\t\t\t\t\tif val2 == value:\n\t\t\t\t\t\treturn val \n\n\t\treturn None\n\t\n\tdef remove(self, resource_id: str) -> NoReturn:\n\t\tresource_id = str(resource_id)\n\t\tif resource_id in self.memory.keys():\n\t\t\tdel self.memory[resource_id]\n\nclass RemoveResourceTestCase(unittest.TestCase):\n\n\tdef test_remove_success(self):\n\t\tmemory = {}\n\t\trepo = FakeRepo(memory)\n\t\tusecase = CreateNewResourceUseCase(repo)\n\n\t\tserver = Server(profile_id='profile_id', host='host', user='user', sites=10)\n\t\tsaved = usecase.register(server)\n\t\tself.assertEqual('profile_id', saved.profile_id)\n\n\t\tremove_adapter = RemoveResourceUseCase(repo)\n\t\tremove_adapter.remove(saved.id)\n\n\t\tdetail_usecase = DetailResourceUseCase(repo)\n\t\tdetail = detail_usecase.detail(str(saved.id))\n\t\tself.assertIsNone(detail)\n","sub_path":"autowp/tests/test_core/test_resource/test_usecase/test_remove.py","file_name":"test_remove.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"400547442","text":"from odoo import models,api,fields\nfrom odoo.exceptions import Warning, ValidationError,UserError,except_orm\nfrom io import StringIO\nimport base64\nimport csv\nfrom datetime import datetime\nfrom odoo.tools.float_utils import float_round, float_compare\n\nclass import_product_package_info(models.TransientModel):\n _name = 'import.product.package.info.ept'\n _description = 'import product package info'\n \n choose_file = fields.Binary('Choose File',filters='*.csv')\n file_name = fields.Char(\"File Name\")\n picking_id = fields.Many2one('stock.picking',string=\"Picking\")\n delimiter=fields.Selection([('semicolon','Semicolon')],\"Seperator\",default=\"semicolon\")\n \n \n @api.model\n def default_get(self,fields):\n context = dict(self._context or {})\n vals = super(import_product_package_info,self).default_get(fields)\n picking_id = context.get('active_id', [])\n vals['picking_id'] = picking_id\n return vals\n \n @api.one\n def get_file_name(self, name=datetime.strftime(datetime.now(),'%Y%m%d%H%M%S%f')):\n return '/tmp/product_package_%s_%s' %(self.env.uid,name)\n \n @api.one\n def read_file(self,file_name,file):\n imp_file = StringIO(base64.decodestring(file).decode('utf-8'))\n new_file_name = self.get_file_name(name=file_name)[0]\n file_write = open(new_file_name,'w')\n file_write.writelines(imp_file.getvalue())\n file_write.close()\n file_read = open(new_file_name, \"rU\")\n dialect = csv.Sniffer().sniff(file_read.readline())\n file_read.seek(0)\n if self.delimiter=='semicolon':\n reader = csv.DictReader(file_read,dialect=dialect,delimiter=';',quoting=csv.QUOTE_NONE)\n# elif self.delimiter=='colon':\n# reader = csv.DictReader(file_read,dialect=dialect,delimiter=',',quoting=csv.QUOTE_NONE)\n# else:\n# reader = csv.DictReader(file_read,dialect=dialect,delimiter='\\t',quoting=csv.QUOTE_NONE)\n return reader\n \n @api.one\n def validate_fields(self, fieldname):\n '''\n This import pattern requires few fields default, so check it first whether it's there or not.\n '''\n require_fields = ['default_code', 'quantity', 'package_ref','height','width','length','weight','package_type']\n missing = []\n for field in require_fields:\n if field not in fieldname:\n missing.append(field)\n #missing = list(set(require_fields) - set(fieldname))\n \n if len(missing) > 0:\n raise except_orm(('Incorrect format found..!'), ('Please provide all the required fields in file, missing fields => %s.' %(missing)))\n \n return True\n \n def fill_dictionary_from_file(self,reader):\n product_data = []\n for row in reader:\n vals = {\n 'default_code' : row.get('default_code'),\n 'quantity' : row.get('quantity'),\n 'package_ref' : row.get('package_ref'),\n 'height' : row.get('height'),\n 'width' : row.get('width'),\n 'length' : row.get('length'),\n 'weight' : row.get('weight'),\n 'package_type' : row.get('package_type')\n }\n product_data.append(vals)\n \n return product_data\n \n @api.multi\n def import_package_info(self):\n if self.file_name and self.file_name[-3:] != 'csv':\n raise Warning(\"You can only import CSV file\")\n product_packaging_obj = self.env['product.packaging']\n product_product_obj = self.env['product.product']\n stock_move_obj = self.env['stock.move']\n stock_move_line_obj = self.env['stock.move.line']\n stock_quant_package_obj = self.env['stock.quant.package']\n \n reader = self.read_file(self.file_name,self.choose_file)[0]\n fieldname = reader.fieldnames\n picking_id = self.picking_id\n if self.validate_fields(fieldname) :\n product_data = self.fill_dictionary_from_file(reader)\n for data in product_data:\n default_code = data.get('default_code')\n file_qty = data.get('quantity')\n package_ref = data.get('package_ref')\n height = data.get('height')\n width = data.get('width')\n length = data.get('length')\n weight = data.get('weight')\n package_type = data.get('package_type','')\n if package_type.lower() == 'pallet':\n package_type = 'pallet'\n elif package_type.lower() == 'carton':\n package_type = 'carton'\n product_package = product_packaging_obj.search([('height','=',float(height)),('width','=',float(width)),('length','=',float(length))])\n if not product_package:\n product_package = product_packaging_obj.create({\n 'name' : 'BOX %s x %s x %s'%(height,width,length),\n 'height' : float(height) ,\n 'width' : float(width),\n 'length' : float(length)\n })\n \n \n package = False\n if not package:\n package = stock_quant_package_obj.search([('amazon_carrier_code','=',package_ref)])\n if not package:\n package = stock_quant_package_obj.create({'amazon_carrier_code' : package_ref})\n package.write({'packaging_id' : product_package.id,\n 'amazon_package_weight' : float(weight),\n 'package_type' : package_type})\n product = product_product_obj.search([('default_code','=',default_code)])\n move_lines = stock_move_obj.search([('picking_id','=',picking_id.id),('product_id','=',product.id),('state','in',('confirmed','assigned','partially_available'))])\n if not move_lines :\n continue \n qty_left = float(file_qty)\n for move in move_lines:\n if qty_left > move.reserved_availability :\n raise Warning(\"File Qty Should be equal to Reserved qty\")\n if qty_left <= 0.0 :\n break\n move_line_remaning_qty = (move.product_uom_qty)-(sum(move.move_line_ids.mapped('qty_done')))\n stock_move_lines = move.move_line_ids.filtered(lambda o: o.qty_done <= 0 and not o.result_package_id)\n \n for stock_move_line in stock_move_lines:\n if stock_move_line.product_uom_qty<=qty_left:\n op_qty=stock_move_line.product_uom_qty\n else:\n op_qty=qty_left\n stock_move_line.write({'qty_done':op_qty})\n self._put_in_pack_ept(stock_move_line,package)\n qty_left=float_round(qty_left -op_qty,precision_rounding=stock_move_line.product_uom_id.rounding,rounding_method='UP')\n move_line_remaning_qty=move_line_remaning_qty-op_qty\n if qty_left<=0.0:\n break\n if qty_left>0.0 and move_line_remaning_qty>0.0:\n if move_line_remaning_qty<=qty_left:\n op_qty=move_line_remaning_qty\n else:\n op_qty=qty_left\n stock_move_line_obj.create(\n { \n 'product_id':move.product_id.id,\n 'product_uom_id':move.product_id.uom_id.id, \n 'picking_id':picking_id.id,\n 'qty_done':float(op_qty) or 0,\n 'ordered_qty':float(op_qty) or 0,\n 'result_package_id':package and package.id or False,\n 'location_id':picking_id.location_id.id, \n 'location_dest_id':picking_id.location_dest_id.id,\n 'move_id':move.id,\n })\n qty_left=float_round(qty_left -op_qty,precision_rounding=move.product_id.uom_id.rounding,rounding_method='UP')\n if qty_left<=0.0:\n break\n if qty_left>0.0:\n stock_move_line_obj.create(\n { \n 'product_id': move_lines[0].product_id.id,\n 'product_uom_id':move_lines[0].product_id.uom_id.id, \n 'picking_id':picking_id.id,\n 'ordered_qty':float(qty_left) or 0,\n 'qty_done':float(qty_left) or 0,\n 'result_package_id':package and package.id or False,\n 'location_id':picking_id.location_id.id, \n 'location_dest_id':picking_id.location_dest_id.id,\n 'move_id':move_lines[0].id,\n })\n \n self.picking_id.write({'is_package_info_imported' : True})\n return True\n \n \n \n def _put_in_pack_ept(self,operation,package):\n operation_ids = self.env['stock.move.line']\n if float_compare(operation.qty_done, operation.product_uom_qty, precision_rounding=operation.product_uom_id.rounding) >= 0:\n operation_ids |= operation\n else:\n quantity_left_todo = float_round(\n operation.product_uom_qty - operation.qty_done,\n precision_rounding=operation.product_uom_id.rounding,\n rounding_method='UP')\n new_operation = operation.copy(\n default={'product_uom_qty':0, 'qty_done': operation.qty_done})\n operation.write({'product_uom_qty': quantity_left_todo,'qty_done': 0.0})\n new_operation.write({'product_uom_qty':operation.qty_done})\n operation_ids |= new_operation\n package and operation_ids.write({'result_package_id': package.id})\n return True\n ","sub_path":"odoo_apps/amazon_vendor_central_ept/wizard/import_product_package_info_ept.py","file_name":"import_product_package_info_ept.py","file_ext":"py","file_size_in_byte":10654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"270265347","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib\n\n# data to plot\nn_groups = 11\n\n\nbld_old = (9.03, 6.61, 6.56, 14.21,\t6.95,\t11.08,\t4.31,\t2.83,\t9.11,\t7.64, 21.01)\nbld_new = (9.31, 6.53, 6.69, 14.48, 5.91, 10.18, 4.29, 2.01, 10.98, 6.94, 21.85)\n\n#plt.figure(1)\n#plt.figure(figsize=(10,5))\n\n# create plot\nfig, ax = plt.subplots()\nindex = np.arange(n_groups)\nbar_width = 0.25\nopacity = 0.8\n\nrects1 = plt.bar(index, bld_new, bar_width, alpha=opacity, color='teal', label='Bld 2')\n\nrects2 = plt.bar(index + bar_width, bld_old, bar_width, alpha=opacity, color='chocolate', label='Bld 1')\n\n#plt.xlabel('some build (26) vs. some build (81)')\nplt.ylabel('Time (Seconds)')\n#plt.title('Some Performance test with Load')\nplt.xticks(rotation=75)\n#plt.xticks(index + bar_width, ('A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K'))\nplt.legend()\n\nplt.tight_layout()\n\nrows = [\"Build2\", \"Build1\"]\ncolumns = ('A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K')\ncolor = ('teal', 'chocolate')\n\n#plt.figure(figsize=(10,5))\n\n#the_table = plt.table(cellText=[[1,2,3,4,5,6,7,8,9,10,11], [3,4,5,6,7,8,9,8,7,6,5]],\nthe_table = plt.table(cellText=[bld_new, bld_old],\n rowLabels=rows,\n rowColours=color,\n colLabels=columns,\n loc='bottom')\n\nthe_table.auto_set_font_size(False)\nthe_table.set_fontsize(9)\n\n#### plt.figure(figsize=(10,5))\n\n#plt.subplots(figsize=(10,5))\n\nplt.subplots_adjust(left=0.1, bottom=0.2, top=0.9)\n\nplt.ylabel(\"KPI (in seconds)\")\n#plt.yticks(new_build, old_build)\nplt.xticks([])\n\nplt.figtext(0.5, 0.05,\"01(2) vs. old build 1\", wrap=True, horizontalalignment='center', fontsize=9)\n\nplt.title('Template: KPI Performance: with Load (seconds or bandwidth')\n\n#### plt.figure(figsize=(10,5))\n\n#matplotlib.rc('figure', figsize=[10,5])\n\nplt.show()\n\n","sub_path":"chart_example/plotchart.py","file_name":"plotchart.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"425604166","text":"# -*- UTF-8 -*-\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\n\nfrom src.analyse_users import get_user_tags\n\n\ndef plot_tags_trend(csv_path, tag_list):\n df = pd.read_csv(csv_path)\n\n for tag_name in tag_list:\n tags_count_percentage = []\n row_tags_sum = df['row_tags_sum']\n for index, val in enumerate(df[tag_name]):\n # sum = sum + val\n tags_count_percentage.append(val / row_tags_sum[index])\n # period = pd.period_range(start=\"2008-08\", end=\"2018-08\", freq='M')\n # 设置时间标签显示格式\n plt.plot(df['periods'], tags_count_percentage, marker=\"o\", label=tag_name)\n\n plt.ylabel('% of Stack Overflow Tags that month')\n plt.xticks(rotation=90)\n plt.title(\"Tags Trends\")\n # 显示图例\n plt.legend()\n plt.show()\n\n\ndef plot_user_tags_stacked(user_tags_dataframe):\n \"\"\"\n 数目最多的五个标签,每月在总标签(数目)的比例\n :param user_tags_dataframe:\n :return:\n \"\"\"\n # 统计每个标签的数目\n every_tag_count = pd.Series()\n for col in user_tags_dataframe.columns.values:\n sum = 0\n for index in user_tags_dataframe.index.values:\n sum += user_tags_dataframe.loc[index, col]\n every_tag_count[col] = sum\n # print(every_tag_count)\n # 统计每月的标签数目\n every_month_tag_count = pd.Series()\n for index in user_tags_dataframe.index.values:\n sum = 0\n for col in user_tags_dataframe.columns.values:\n sum += user_tags_dataframe.loc[index, col]\n every_month_tag_count[index] = sum\n # print(every_month_tag_count)\n # 排序,寻找前 5 的标签\n every_tag_count = every_tag_count.sort_values(ascending=False)\n # print(every_tag_count)\n Y_df = pd.DataFrame(data=0, index=every_tag_count.index[:5], columns=user_tags_dataframe.index.values)\n\n\n for tag in Y_df.index.values:\n for period in Y_df.columns.values:\n Y_df.loc[tag, period] = user_tags_dataframe.loc[period, tag] / every_month_tag_count[period]\n\n # print(Y_df)\n\n fig, ax = plt.subplots()\n # X:N array Y: M*N 2d array\n ax.stackplot(user_tags_dataframe.index.values, Y_df.values,\n labels=Y_df.index.values)\n plt.ylabel(\"% of User's Tags that month\")\n plt.xticks(rotation=90)\n plt.title(\"User's Top 5 Tag Trends\")\n plt.legend()\n plt.show()\n\n\nif __name__ == '__main__':\n df = get_user_tags(29407)\n # print(df)\n plot_user_tags_stacked(df)\n\n","sub_path":"records/about Stack Overflow data/users trends/plot_trend.py","file_name":"plot_trend.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"19769390","text":"def binary_search(list, target):\n low_index = 0\n high_index = len(list) - 1\n\n while low_index <= high_index:\n mid_index = (low_index + high_index) // 2\n guess_item = list[mid_index]\n if guess_item == target:\n return guess_item\n if guess_item > target:\n high_index = mid_index - 1\n else:\n low_index = mid_index + 1\n return None\n\nmy_list = [1, 3, 5, 6, 8, 10]\n\nprint(binary_search(my_list, 3))\nprint(binary_search(my_list, -3))\n","sub_path":"GrokkingAlgorithms/01_binary_search/python/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"200847107","text":"'''\r\nCreated on 4 lip 2015\r\n\r\n@author: Sfgv\r\n'''\r\n\r\nimport numpy as np\r\nfrom Tools.constants import VARIABLE\r\nfrom HMM.hmm import pair_lse\r\nimport cv2\r\n\r\ndef sum_exp(l):\r\n s = -float('inf')\r\n for x in l:\r\n s = pair_lse(s, x)\r\n \r\n return s\r\n\r\ndef search_row(dctBlock_rgb, hmm_model_H0, hmm_model_H1, *hmm_model_H2):\r\n \r\n found = []\r\n \r\n def sprt(row, hmm__H0, hmm__H1):\r\n b = 20\r\n a = -5\r\n# print('sprt')\r\n S = 0\r\n M = 0\r\n restart = True\r\n a0_i_1 = 1\r\n a1_i_1 = 1\r\n for i in range(0,(len(row))):\r\n\r\n s0 = hmm__H0.sequential_forward( row[i], restart)\r\n s1 = hmm__H1.sequential_forward( row[i], restart)\r\n \r\n restart = False\r\n# print('s0',s0)\r\n# print('s1',s1)\r\n# print(sum_exp(s0), sum( numpy.exp(s0)))\r\n a0_i = sum_exp(s0) \r\n a1_i = sum_exp(s1) \r\n \r\n f0 = a0_i - a0_i_1 \r\n f1 = a1_i - a1_i_1\r\n \r\n p = f1-f0\r\n \r\n S += p\r\n print('i',i,S)\r\n if (S > b): \r\n M = i+1\r\n S = 0\r\n elif (S < a):\r\n return [M,i-M+1]\r\n \r\n a0_i_1 = a0_i\r\n a1_i_1 = a1_i\r\n \r\n return [M,i-M+1]\r\n \r\n def cusum(row):\r\n print('cusum')\r\n b = 10\r\n S = 0\r\n\r\n restart = True\r\n a0_i_1 = 0\r\n a1_i_1 = 0\r\n for i in range(0,(len(row))):\r\n# print('i',i)\r\n s0 = hmm_model_H0.sequential_forward( row[i], restart)\r\n s1 = hmm_model_H1.sequential_forward( row[i], restart)\r\n \r\n restart = False\r\n \r\n a0_i = sum_exp(s0) \r\n a1_i = sum_exp(s1) \r\n \r\n f0 = a0_i - a0_i_1 \r\n f1 = a1_i - a1_i_1\r\n \r\n p = f1 - f0\r\n \r\n S = max(0, S + p)\r\n if S > 0:\r\n print('cusum',S)\r\n if (S > b): \r\n return (i+1)\r\n \r\n a0_i_1 = a0_i\r\n a1_i_1 = a1_i\r\n \r\n return len(row)\r\n \r\n i = 0\r\n while i < len(dctBlock_rgb):\r\n# print('while', dctBlock_rgb[i:])\r\n N = cusum(dctBlock_rgb[i:])\r\n print('while',i,N)\r\n found += ([0] * N)\r\n \r\n i += N\r\n if i < len(dctBlock_rgb):\r\n M,N = sprt(dctBlock_rgb[i:], hmm_model_H0, hmm_model_H1)\r\n print('sprt',M,N)\r\n iM = i+M\r\n while i < iM:\r\n MA, NA = [M,0]\r\n \r\n for model in hmm_model_H2: \r\n MA_T, NA_T = sprt(dctBlock_rgb[i:iM], model, hmm_model_H1)\r\n if MA_T < MA:\r\n MA, NA = MA_T, NA_T\r\n elif MA_T == MA:\r\n MA, NA = MA_T, np.maximum(NA, NA_T)\r\n \r\n# MA, NA = [np.min(MA, MA_T), np.min(NA, NA_T)]\r\n found += ([1] * MA)\r\n found += ([0] * NA)\r\n i = i+MA+NA\r\n found += ([0] * N)\r\n \r\n i += N\r\n else:\r\n break\r\n\r\n return found\r\n\r\ndef zigzag(matrix):\r\n n = len(matrix)\r\n indexorder = sorted(((x,y) for x in range(n) for y in range(n)), key = lambda x: (x[0]+x[1], ((-x[1]) if (x[0]+x[1]) % 2 else x[1])) )\r\n return [matrix[index[0]][index[1]] for index in indexorder]\r\n\r\ndef find_object(img_matrix_ycbcr, img_matrix_o, hmm_model_H0, hmm_model_H1, *hmm_model_H2):\r\n \r\n p2dhmm_model_H0 = hmm_model_H0.model\r\n p2dhmm_model_H1 = hmm_model_H1.model\r\n \r\n window_size = hmm_model_H0.window\r\n \r\n coeff_n = VARIABLE['coefficients_number']\r\n \r\n blockH = VARIABLE['block_height']\r\n blockW = VARIABLE['block_width']\r\n\r\n imgH, imgW = np.array(np.shape(img_matrix_ycbcr)[:2])\r\n \r\n overlapH = int( blockH/2 )\r\n overlapW = int( blockW/2 )\r\n\r\n numBlockH = int( (imgH-blockH)/(blockH-overlapH) ) + 1\r\n numBlockW = int( (imgW-blockW)/(blockW-overlapW) ) + 1\r\n \r\n img_r_matrix = np.array(img_matrix_ycbcr[:,:,0], np.float32)\r\n img_g_matrix = np.array(img_matrix_ycbcr[:,:,1], np.float32)\r\n img_b_matrix = np.array(img_matrix_ycbcr[:,:,2], np.float32)\r\n\r\n data_bgr = [ ]\r\n for row in range(numBlockH):\r\n bgr = []\r\n for col in range(numBlockW):\r\n row_start, col_start = [ row*(blockW-overlapW), col*(blockH-overlapH)]\r\n row_end, col_end = [ row_start+blockW, col_start+blockH ] \r\n \r\n \r\n bgr.append( zigzag( cv2.dct(img_b_matrix[row_start:row_end, col_start:col_end]), coeff_n ) )\r\n bgr.append( zigzag( cv2.dct(img_g_matrix[row_start:row_end, col_start:col_end]), coeff_n ) )\r\n bgr.append( zigzag( cv2.dct(img_r_matrix[row_start:row_end, col_start:col_end]), coeff_n ) )\r\n \r\n data_bgr.append(bgr)\r\n \r\n data_bgr = np.reshape( np.array(data_bgr), (numBlockH, numBlockW, 3, coeff_n) )\r\n \r\n \r\n# print(np.sum(found))\r\n# print(found)\r\n\r\n# for row in range(len(found)):\r\n# for col in range(len(found[0])):\r\n# # print('rowcol',row,col)\r\n# if (found[row][col] == 1):\r\n# row_start, col_start = [ row*(blockW-overlapW), col*(blockH-overlapH)]\r\n# row_end, col_end = [ row_start+blockW, col_start+blockH ] \r\n\r\n# if (row > 0):\r\n# if (found[row-1][col] == 0):\r\n# \r\n# row_start = row_start + overlapW\r\n# \r\n# if (row < (len(found)-1)):\r\n# if (found[row+1][col] == 0):\r\n# row_end = row_end - overlapW \r\n# \r\n# if (col > 0):\r\n# if (found[row][col-1] == 0):\r\n# col_start = col_start + overlapH\r\n# \r\n# if (col < (len(found[0])-1)):\r\n# if(found[row][col+1] == 0):\r\n# col_end = col_end - overlapH \r\n\r\n\r\n# img[row_start:row_end,col_start:col_end] = [0,250,0]#get_rgb([hsv[0], int(hsv[1]/2), 100 ])\r\n# print('po')\r\n# print(img_matrix[row_start:row_end,col_start:col_end])\r\n# elif (horizontal[i][j] == 1):\r\n# matrix[i][j] = [255,0,0]\r\n# elif (vertical[i][j] == 1):\r\n# matrix[i][j] = [0,0,255]\r\n \r\n return(img)\r\n# print('horizontal')\r\n# horizontal = numpy.zeros( (len(matrix), len(matrix[0])) )\r\n# for i in range(0, len(matrix) ):\r\n# if i %100 == 0:\r\n# print(i)\r\n# # print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%')\r\n# horizontal[ i,: ] = search_row(matrix[ i,: ], hmm_model_H0, hmm_model_H1, hmm_model_H2)\r\n# \r\n# print(matrix[ 0,: ])\r\n# print('vertical')\r\n# print(matrix[ :,0 ])\r\n# vertical = numpy.zeros( (len(matrix), len(matrix[0])) )\r\n# for i in range(0,len(matrix[0])):\r\n# if i %100 == 0:\r\n# print(i)\r\n# \r\n# vertical[ :,i ] = numpy.transpose( search_row( matrix[ :,i ], hmm_model_H0, hmm_model_H1, hmm_model_H2) )\r\n# \r\n# \r\n# for i in range(0,len(matrix)):\r\n# for j in range(0,len(matrix[0])):\r\n# if (horizontal[i][j] == 1) and (vertical[i][j] == 1):\r\n# #hsv = get_hsv(matrix[i][j])\r\n# matrix[i][j] = [0,250,0]#get_rgb([hsv[0], int(hsv[1]/2), 100 ])\r\n# elif (horizontal[i][j] == 1):\r\n# matrix[i][j] = [255,0,0]\r\n# elif (vertical[i][j] == 1):\r\n# matrix[i][j] = [0,0,255]\r\n \r\n\r\n \r\n# def search_row(row, hmm_model_H0, hmm_model_H1, hmm_model_H2, is_trawa):\r\n# global row_temp\r\n# treshold_down = VARIABLE['density']\r\n# treshold_up = VARIABLE['treshold']\r\n# \r\n# S1 = 0\r\n# # S2 = 0\r\n# for i in range(2,(len(row)+1)):\r\n# # print(i,'--------------------')\r\n# s0 = hmm_model_H0.forward(row[:i])\r\n# s1 = hmm_model_H1.forward(row[:i])\r\n# \r\n# a0_i_1 = math.exp(s0[i-1,0]) + math.exp(s0[i-1,1])\r\n# a0_i = math.exp(s0[i,0]) + math.exp(s0[i,1])\r\n# \r\n# a1_i_1 = math.exp(s1[i-1,0]) + math.exp(s1[i-1,1])\r\n# a1_i = math.exp(s1[i,0]) + math.exp(s1[i,1])\r\n# \r\n# # a2_i_1 = math.exp(s2[i-1,0]) + math.exp(s2[i-1,1])\r\n# # a2_i = math.exp(s2[i,0]) + math.exp(s2[i,1])\r\n# \r\n# f0 = (a0_i/a0_i_1) \r\n# f1 = (a1_i/a1_i_1) \r\n# # f2 = (a2_i/a2_i_1) \r\n# \r\n# p1 = math.log(f1/f0)\r\n# # p2 = math.log(f2/f0)\r\n# \r\n# S1 += p1\r\n# # S2 += p2\r\n# # print(S1, S2)\r\n# if (S1 > treshold_up): \r\n# row_temp += ([0] * i)\r\n# search_row(row[i:], hmm_model_H0, hmm_model_H1, hmm_model_H2, False)\r\n# break\r\n# elif (S1 < treshold_down):\r\n# row_temp += ([1] * i)\r\n# \r\n# search_row(row[i:], hmm_model_H0, hmm_model_H1, hmm_model_H2, False)\r\n# break\r\n# elif i == len(row):\r\n# # print(S1, S2)\r\n# if abs(S1-treshold_down) < abs(S1-treshold_up) :\r\n# row_temp += ([1] * i)\r\n# else:\r\n# row_temp += ([0] * i)\r\n# \r\n# if (len(row) == 1):\r\n# p0 = hmm_model_H0.log_probability(row)\r\n# p1 = hmm_model_H1.log_probability(row)\r\n# if p0 > p1:\r\n# row_temp += [1]\r\n# else:\r\n# row_temp += [0]\r\n# \r\n# i=0\r\n# \r\n# if is_trawa:\r\n# # print('#############################################################################################koncowka')\r\n# # print(row_temp)\r\n# while i < len(row_temp):\r\n# if row_temp[i] == 1:\r\n# j = i\r\n# \r\n# while i < len(row_temp):\r\n# if row_temp[i] == 1:\r\n# i += 1\r\n# else:\r\n# break\r\n# \r\n# row_temp[j:i] = check_trawa( row[j:i], hmm_model_H0, hmm_model_H2 )\r\n# row_temp_trawa.clear()\r\n# \r\n# i += 1\r\n# \r\n# return(row_temp)\r\n# \r\n# def check_trawa( row, hmm_model_H0, hmm_model_H1 ):\r\n# global row_temp_trawa\r\n# treshold_down = VARIABLE['density']\r\n# treshold_up = VARIABLE['treshold']\r\n# # print('check trawa', row)\r\n# S1 = 0\r\n# # S2 = 0\r\n# for i in range(2,(len(row)+1)):\r\n# # print(i,'--------------------')\r\n# s0 = hmm_model_H0.forward(row[:i])\r\n# s1 = hmm_model_H1.forward(row[:i])\r\n# # s2 = hmm_model_H2.forward(row[:i])\r\n# # print(row[:i])\r\n# # print(s0)\r\n# # print(s1)\r\n# # print(s2)\r\n# # \r\n# a0_i_1 = math.exp(s0[i-1,0]) + math.exp(s0[i-1,1])\r\n# a0_i = math.exp(s0[i,0]) + math.exp(s0[i,1])\r\n# \r\n# a1_i_1 = math.exp(s1[i-1,0]) + math.exp(s1[i-1,1])\r\n# a1_i = math.exp(s1[i,0]) + math.exp(s1[i,1])\r\n# \r\n# # a2_i_1 = math.exp(s2[i-1,0]) + math.exp(s2[i-1,1])\r\n# # a2_i = math.exp(s2[i,0]) + math.exp(s2[i,1])\r\n# \r\n# f0 = (a0_i/a0_i_1) \r\n# f1 = (a1_i/a1_i_1) \r\n# # f2 = (a2_i/a2_i_1) \r\n# \r\n# p1 = math.log(f1/f0)\r\n# # p2 = math.log(f2/f0)\r\n# \r\n# S1 += p1\r\n# # S2 += p2\r\n# # print(S1)\r\n# if (S1 > treshold_up): \r\n# row_temp_trawa += ([0] * i)\r\n# check_trawa(row[i:], hmm_model_H0, hmm_model_H1)\r\n# break\r\n# elif (S1 < treshold_down):\r\n# row_temp_trawa += ([1] * i)\r\n# \r\n# check_trawa(row[i:], hmm_model_H0, hmm_model_H1)\r\n# break\r\n# elif i == len(row):\r\n# # print(S1, S2)\r\n# if abs(S1-treshold_down) < abs(S1-treshold_up) :\r\n# row_temp_trawa += ([1] * i)\r\n# else:\r\n# row_temp_trawa += ([0] * i)\r\n# \r\n# if (len(row) == 1):\r\n# # print('konscowka 1')\r\n# p0 = hmm_model_H0.log_probability(row)\r\n# p1 = hmm_model_H1.log_probability(row)\r\n# # print(p0, p1)\r\n# if p0 > p1:\r\n# row_temp_trawa += [1]\r\n# else:\r\n# row_temp_trawa += [0]\r\n# \r\n# return row_temp_trawa\r\n ","sub_path":"HMM/Engine/find.py","file_name":"find.py","file_ext":"py","file_size_in_byte":12619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"528021541","text":"# 编写登录接口\nimport json\ncount=0\nfor i in range(3):\n name=input('请输入用户名:').strip()\n pwd=input('请输入密码:').strip()\n if len(name)==0:\n print('用户名不能为空!')\n if len(name)==0:\n print('密码不能为空')\n fp=open('user.json','r')\n a=fp.read()\n fp.close()\n j=json.loads(a)\n if name == j['name']:\n if pwd==j['pwd']:\n print('登录成功')\n break\n else:\n count+=1\n print('密码错误!')\n if count==3:\n print('锁定用户!')\n else:\n print('用户名错误')\n\n","sub_path":"day1/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"236124904","text":"# Manages the DocBot Flask application\n\nfrom flask import Flask, render_template, url_for, request, redirect\nfrom datetime import datetime\nimport threading\nimport time\n\nimport chat\nimport autoopen\n\n# Create the flask object\napp = Flask(__name__, template_folder = \"templates\", static_folder = \"static\")\n\nconversations = []\nstatus = \"Online\"\n\n# Function to add a message to conversations\ndef add_msg (message, mtype) :\n\ttimemsg = datetime.now().strftime(\"%H:%M | %B %d\")\n\tresponse = [mtype, message, timemsg]\n\tconversations.append(response)\n\n# Function to initialize the conversation\ndef start_conversing() :\n\tadd_msg(\"Hello! I am DocBot, a digitally trained medical consultant\", 0)\n\tadd_msg(\"To finish this consultation, enter 'stop'\", 0)\n\n# Function to get a reply from the bot\ndef respond (message) :\n\ttime.sleep(1)\n\tglobal status\n\tstatus = \"Online\"\n\tanswer = chat.reply(message)\n\tadd_msg(answer, 0)\n\n# Binds the base URL to the 'consult' function\n@app.route('/', methods = ['POST', 'GET'])\ndef consult() :\n\tif request.method == 'POST' :\n\t\ttry :\n\t\t\tmessage = request.form['message']\n\t\t\tif message == \"\" :\n\t\t\t\tpass\n\t\t\telif message.lower() == \"stop\" :\n\t\t\t\ttime.sleep(1)\n\t\t\t\tconversations.clear()\n\t\t\t\tstart_conversing()\n\t\t\telse :\n\t\t\t\tadd_msg(message, 1)\n\t\t\t\tglobal status\n\t\t\t\tstatus = \"Typing ...\"\n\t\t\t\tt = threading.Thread(target = respond, args = (message, ))\n\t\t\t\tt.start()\n\t\t\treturn redirect('/#chat')\n\t\texcept :\n\t\t\treturn \"There was an error reaching DocBot\"\n\telse :\n\t\treturn render_template('consult.html', conv = conversations, status = status)\n\n# Main method\nif __name__ == \"__main__\" :\n\tstart_conversing()\n\ttimer = threading.Timer(2, autoopen.flaskopen)\n\ttimer.start() \n\tapp.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"512781942","text":"# Method for sorting the list\ndef Sorting(list):\n NewList = list\n # Sort the list ascending alphabetacally \n NewList.sort()\n #NewList = sorted(list, key=len, reverse=False)\n # Sort the list by length\n NewList.sort(key=lambda item: (len(item), item))\n\n # Output to a new text file\n newTextFile = open('output.txt', 'r+')\n # Add new lines after each element in the list\n for x in NewList:\n newTextFile.writelines(str(x) + \"\\n\")\n newTextFile.close()\n\n # Return our new sorted list\n return NewList\n\n# Open the text file to be sorted\ntext_file = open('Sort Me.txt', 'r')\ntemp = text_file.read().splitlines()\n# Strip away any whitespaces\nreadList = [x.strip(' ') for x in temp]\ntext_file.close()\n\n# Print the old list to compare to the sorted list\n# Add new lines for clarity \nprint('\\n'+\"Old List\"+'\\n')\nprint(readList) \nprint('\\n')\nprint('\\n'+\"New Sorted List\"+'\\n')\nprint(Sorting(readList))\n# Erase output in output.txt so we can use it again\nopen('output.txt', 'w').close()","sub_path":"SortFunction.py","file_name":"SortFunction.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"100828446","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/protect/mutation_calling/radia.py\n# Compiled at: 2018-05-07 13:54:25\nfrom __future__ import print_function\nfrom collections import defaultdict\nfrom math import ceil\nfrom protect.common import docker_path, docker_call, export_results, get_files_from_filestore, untargz\nfrom protect.mutation_calling.common import sample_chromosomes, merge_perchrom_vcfs\nfrom toil.job import PromisedRequirement\nimport os, sys\n\ndef radia_disk(tumor_bam, normal_bam, rna_bam, fasta):\n return int(ceil(tumor_bam.size) + ceil(normal_bam.size) + ceil(rna_bam.size) + 5 * ceil(fasta.size))\n\n\ndef run_radia_with_merge(job, rna_bam, tumor_bam, normal_bam, univ_options, radia_options):\n \"\"\"\n A wrapper for the the entire RADIA sub-graph.\n\n :param dict rna_bam: Dict dicts of bam and bai for tumor RNA-Seq obtained by running STAR within\n ProTECT.\n :param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq\n :param dict normal_bam: Dict of bam and bai for normal DNA-Seq\n :param dict univ_options: Dict of universal options used by almost all tools\n :param dict radia_options: Options specific to RADIA\n :return: fsID to the merged RADIA calls\n :rtype: toil.fileStore.FileID\n \"\"\"\n spawn = job.wrapJobFn(run_radia, rna_bam['rna_genome'], tumor_bam, normal_bam, univ_options, radia_options, disk='100M', memory='100M').encapsulate()\n merge = job.wrapJobFn(merge_perchrom_vcfs, spawn.rv(), univ_options, disk='100M', memory='100M')\n job.addChild(spawn)\n spawn.addChild(merge)\n return merge.rv()\n\n\ndef run_radia(job, rna_bam, tumor_bam, normal_bam, univ_options, radia_options):\n \"\"\"\n Spawn a RADIA job for each chromosome on the input bam trios.\n\n :param dict rna_bam: Dict of bam and bai for tumor DNA-Seq. It can be one of two formats\n rna_bam: # Just the genomic bam and bai\n |- 'rna_genome_sorted.bam': fsID\n +- 'rna_genome_sorted.bam.bai': fsID\n OR\n rna_bam: # The output from run_star\n |- 'rna_transcriptome.bam': fsID\n |- 'rna_genome': # Only this part will be used\n |- 'rna_genome_sorted.bam': fsID\n +- 'rna_genome_sorted.bam.bai': fsID\n :param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq\n :param dict normal_bam: Dict of bam and bai for normal DNA-Seq\n :param dict univ_options: Dict of universal options used by almost all tools\n :param dict radia_options: Options specific to RADIA\n :return: Dict of results from running RADIA on every chromosome\n perchrom_radia:\n |- 'chr1': fsID\n |- 'chr2' fsID\n |\n |-...\n |\n +- 'chrM': fsID\n :rtype: dict\n \"\"\"\n if 'rna_genome' in rna_bam.keys():\n rna_bam = rna_bam['rna_genome']\n else:\n if set(rna_bam.keys()) == {'rna_genome_sorted.bam', 'rna_genome_sorted.bam.bai'}:\n pass\n else:\n raise RuntimeError('An improperly formatted dict was passed to rna_bam.')\n bams = {'tumor_rna': rna_bam['rna_genome_sorted.bam'], 'tumor_rnai': rna_bam['rna_genome_sorted.bam.bai'], \n 'tumor_dna': tumor_bam['tumor_dna_fix_pg_sorted.bam'], \n 'tumor_dnai': tumor_bam['tumor_dna_fix_pg_sorted.bam.bai'], \n 'normal_dna': normal_bam['normal_dna_fix_pg_sorted.bam'], \n 'normal_dnai': normal_bam['normal_dna_fix_pg_sorted.bam.bai']}\n if radia_options['chromosomes']:\n chromosomes = radia_options['chromosomes']\n else:\n chromosomes = sample_chromosomes(job, radia_options['genome_fai'])\n perchrom_radia = defaultdict()\n for chrom in chromosomes:\n radia = job.addChildJobFn(run_radia_perchrom, bams, univ_options, radia_options, chrom, memory='6G', disk=PromisedRequirement(radia_disk, tumor_bam['tumor_dna_fix_pg_sorted.bam'], normal_bam['normal_dna_fix_pg_sorted.bam'], rna_bam['rna_genome_sorted.bam'], radia_options['genome_fasta']))\n filter_radia = radia.addChildJobFn(run_filter_radia, bams, radia.rv(), univ_options, radia_options, chrom, memory='6G', disk=PromisedRequirement(radia_disk, tumor_bam['tumor_dna_fix_pg_sorted.bam'], normal_bam['normal_dna_fix_pg_sorted.bam'], rna_bam['rna_genome_sorted.bam'], radia_options['genome_fasta']))\n perchrom_radia[chrom] = filter_radia.rv()\n\n job.fileStore.logToMaster('Ran spawn_radia on %s successfully' % univ_options['patient'])\n return perchrom_radia\n\n\ndef run_radia_perchrom(job, bams, univ_options, radia_options, chrom):\n \"\"\"\n Run RADIA call on a single chromosome in the input bams.\n\n :param dict bams: Dict of bam and bai for tumor DNA-Seq, normal DNA-Seq and tumor RNA-Seq\n :param dict univ_options: Dict of universal options used by almost all tools\n :param dict radia_options: Options specific to RADIA\n :param str chrom: Chromosome to process\n :return: fsID for the chromsome vcf\n :rtype: toil.fileStore.FileID\n \"\"\"\n work_dir = os.getcwd()\n input_files = {'rna.bam': bams['tumor_rna'], \n 'rna.bam.bai': bams['tumor_rnai'], \n 'tumor.bam': bams['tumor_dna'], \n 'tumor.bam.bai': bams['tumor_dnai'], \n 'normal.bam': bams['normal_dna'], \n 'normal.bam.bai': bams['normal_dnai'], \n 'genome.fa.tar.gz': radia_options['genome_fasta'], \n 'genome.fa.fai.tar.gz': radia_options['genome_fai']}\n input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)\n for key in ('genome.fa', 'genome.fa.fai'):\n input_files[key] = untargz(input_files[(key + '.tar.gz')], work_dir)\n\n input_files = {key:docker_path(path) for key, path in input_files.items()}\n radia_output = ('').join([work_dir, '/radia_', chrom, '.vcf'])\n radia_log = ('').join([work_dir, '/radia_', chrom, '_radia.log'])\n parameters = [univ_options['patient'],\n chrom,\n '-n', input_files['normal.bam'],\n '-t', input_files['tumor.bam'],\n '-r', input_files['rna.bam'],\n ('').join(['--rnaTumorFasta=', input_files['genome.fa']]),\n '-f', input_files['genome.fa'],\n '-o', docker_path(radia_output),\n '-i', univ_options['ref'],\n '-m', input_files['genome.fa'],\n '-d', 'aarjunrao@soe.ucsc.edu',\n '-q', 'Illumina',\n '--disease', 'CANCER',\n '-l', 'INFO',\n '-g', docker_path(radia_log)]\n docker_call(tool='radia', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'], tool_version=radia_options['version'])\n output_file = job.fileStore.writeGlobalFile(radia_output)\n job.fileStore.logToMaster('Ran radia on %s:%s successfully' % (univ_options['patient'], chrom))\n return output_file\n\n\ndef run_filter_radia(job, bams, radia_file, univ_options, radia_options, chrom):\n \"\"\"\n Run filterradia on the RADIA output.\n\n :param dict bams: Dict of bam and bai for tumor DNA-Seq, normal DNA-Seq and tumor RNA-Seq\n :param toil.fileStore.FileID radia_file: The vcf from runnning RADIA\n :param dict univ_options: Dict of universal options used by almost all tools\n :param dict radia_options: Options specific to RADIA\n :param str chrom: Chromosome to process\n :return: fsID for the filtered chromsome vcf\n :rtype: toil.fileStore.FileID\n \"\"\"\n work_dir = os.getcwd()\n input_files = {'rna.bam': bams['tumor_rna'], \n 'rna.bam.bai': bams['tumor_rnai'], \n 'tumor.bam': bams['tumor_dna'], \n 'tumor.bam.bai': bams['tumor_dnai'], \n 'normal.bam': bams['normal_dna'], \n 'normal.bam.bai': bams['normal_dnai'], \n 'radia.vcf': radia_file, \n 'genome.fa.tar.gz': radia_options['genome_fasta'], \n 'genome.fa.fai.tar.gz': radia_options['genome_fai'], \n 'cosmic_beds': radia_options['cosmic_beds'], \n 'dbsnp_beds': radia_options['dbsnp_beds'], \n 'retrogene_beds': radia_options['retrogene_beds'], \n 'pseudogene_beds': radia_options['pseudogene_beds'], \n 'gencode_beds': radia_options['gencode_beds']}\n input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)\n for key in ('genome.fa', 'genome.fa.fai'):\n input_files[key] = untargz(input_files[(key + '.tar.gz')], work_dir)\n\n for key in ('cosmic_beds', 'dbsnp_beds', 'retrogene_beds', 'pseudogene_beds', 'gencode_beds'):\n input_files[key] = untargz(input_files[key], work_dir)\n\n input_files = {key:docker_path(path) for key, path in input_files.items()}\n filterradia_log = ('').join([work_dir, '/radia_filtered_', chrom, '_radia.log'])\n parameters = [univ_options['patient'],\n chrom.lstrip('chr'),\n input_files['radia.vcf'],\n '/data',\n '/home/radia/scripts',\n '-d', input_files['dbsnp_beds'],\n '-r', input_files['retrogene_beds'],\n '-p', input_files['pseudogene_beds'],\n '-c', input_files['cosmic_beds'],\n '-t', input_files['gencode_beds'],\n '--noSnpEff',\n '--noBlacklist',\n '--noTargets',\n '--noRnaBlacklist',\n '-f', input_files['genome.fa'],\n '--log=INFO',\n '-g', docker_path(filterradia_log)]\n docker_call(tool='filterradia', tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'], tool_version=radia_options['version'])\n output_file = ('').join([work_dir, '/', chrom, '.vcf'])\n os.rename(('').join([work_dir, '/', univ_options['patient'], '_', chrom, '.vcf']), output_file)\n output_fsid = job.fileStore.writeGlobalFile(output_file)\n export_results(job, output_fsid, output_file, univ_options, subfolder='mutations/radia')\n job.fileStore.logToMaster('Ran filter-radia on %s:%s successfully' % (\n univ_options['patient'], chrom))\n return output_fsid\n\n\ndef process_radia_vcf(job, radia_vcf, work_dir, univ_options):\n \"\"\"\n Process the RADIA vcf to for passing calls and additionally sites having multiple alt alleles\n to pick out on the most likely ones.\n\n :param toil.fileStore.FileID radia_vcf: fsID for a RADIA generated chromosome vcf\n :param str work_dir: Working directory\n :param dict univ_options: Dict of universal options used by almost all tools\n :return: Path to the processed vcf\n :rtype: str\n \"\"\"\n radia_vcf = job.fileStore.readGlobalFile(radia_vcf)\n with open(radia_vcf, 'r') as (infile):\n with open(radia_vcf + 'radia_parsed.tmp', 'w') as (outfile):\n for line in infile:\n if line.startswith('#'):\n print(line.strip(), file=outfile)\n continue\n line = line.strip().split('\\t')\n if line[6] != 'PASS' or 'MT=GERM' in line[7]:\n continue\n if len(line[4]) == 1:\n print(('\\t').join(line), file=outfile)\n else:\n seq_field_indeces = [\n 9, 10]\n alleles = [line[3]] + line[4].split(',')\n normal_af = line[9].split(':')[6].split(',')\n tumor_ad = line[10].split(':')[5].split(',')\n tumor_af = line[10].split(':')[6].split(',')\n if len(line[11]) > 1:\n rna_ad = line[11].split(':')[5].split(',')\n rna_af = line[11].split(':')[6].split(',')\n seq_field_indeces += [11]\n else:\n rna_ad = rna_af = [0, 0, 0, 0]\n out_alleles = set([])\n out_af_ad_index = {0}\n for i in range(1, len(normal_af)):\n if (float(tumor_af[i]) >= 0.1 and int(tumor_ad[i]) >= 4 or float(rna_af[i]) >= 0.1 and int(rna_ad[i]) >= 4) and float(normal_af[i]) < 0.1:\n out_alleles.add(alleles[i])\n out_af_ad_index.add(i)\n\n if len(out_alleles) > 0:\n line[4] = (',').join(out_alleles)\n for seq_field_index in seq_field_indeces:\n deets = line[seq_field_index].split(':')\n for field_index in range(5, 9):\n field = deets[field_index].split(',')\n deets[field_index] = (',').join([ x for i, x in enumerate(field) if i in out_af_ad_index\n ])\n\n deets[1] = str(sum([ int(x) for x in deets[5].split(',') ]))\n gt_by_ad = set([ i for i, x in enumerate(deets[5].split(',')) if int(x) >= 4\n ])\n gt_by_af = set([ i for i, x in enumerate(deets[6].split(',')) if float(x) >= 0.1\n ])\n genotype = gt_by_ad.intersection(gt_by_af)\n if len(genotype) == 0:\n deets[0] = '0/0'\n elif len(genotype) == 1:\n deets[0] = ('/').join([ str(x) for x in genotype ] + [ str(x) for x in genotype ])\n elif len(genotype) == 2:\n deets[0] = ('/').join([ str(x) for x in genotype ])\n else:\n print('ERROR : triple genotype detected', file=sys.stderr)\n print(line, file=sys.stdout)\n line[seq_field_index] = (':').join(deets)\n\n print(('\\t').join(line), file=outfile)\n\n return outfile.name","sub_path":"pycfiles/protect-2.6.1-py2.7/radia.py","file_name":"radia.py","file_ext":"py","file_size_in_byte":13767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"546586395","text":"\"\"\" PIAF\n Core containers for connections to the PI Asset Framework.\n\"\"\"\n# Copyright 2017 Hugo van den Berg, Stijn de Jong\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# pragma pylint: disable=unused-import, redefined-builtin\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom builtins import (\n ascii,\n bytes,\n chr,\n dict,\n filter,\n hex,\n input,\n int,\n list,\n map,\n next,\n object,\n oct,\n open,\n pow,\n range,\n round,\n str,\n super,\n zip,\n)\n\ntry:\n from __builtin__ import str as BuiltinStr\nexcept ImportError:\n BuiltinStr = str\n# pragma pylint: enable=unused-import, redefined-builtin\nfrom warnings import warn\n\nfrom PIconnect.AFSDK import AF\nfrom PIconnect.PIData import PISeries, PISeriesContainer\nfrom PIconnect._operators import add_operators, OPERATORS\n\n\nclass PIAFDatabase(object):\n \"\"\"PIAFDatabase\n\n Context manager for connections to the PI Asset Framework database.\n \"\"\"\n\n version = \"0.1.1\"\n\n servers = {\n s.Name: {\"server\": s, \"databases\": {d.Name: d for d in s.Databases}}\n for s in AF.PISystems()\n }\n if AF.PISystems().DefaultPISystem:\n default_server = servers[AF.PISystems().DefaultPISystem.Name]\n elif len(servers) > 0:\n default_server = servers[list(servers)[0]]\n else:\n default_server = None\n\n def __init__(self, server=None, database=None):\n self.server = None\n self.database = None\n self._initialise_server(server)\n self._initialise_database(database)\n\n def _initialise_server(self, server):\n if server and server not in self.servers:\n message = 'Server \"{server}\" not found, using the default server.'\n warn(message=message.format(server=server), category=UserWarning)\n server = self.servers.get(server, self.default_server)\n self.server = server[\"server\"]\n\n def _initialise_database(self, database):\n server = self.servers.get(self.server.Name)\n if not server[\"databases\"]:\n server[\"databases\"] = {x.Name: x for x in self.server.Databases}\n if database and database not in server[\"databases\"]:\n message = 'Database \"{database}\" not found, using the default database.'\n warn(message=message.format(database=database), category=UserWarning)\n default_db = self.server.Databases.DefaultDatabase\n self.database = server[\"databases\"].get(database, default_db)\n\n def __enter__(self):\n self.server.Connect()\n return self\n\n def __exit__(self, *args):\n pass\n # Disabled disconnecting because garbage collection sometimes impedes\n # connecting to another server later\n # self.server.Disconnect()\n\n def __repr__(self):\n return \"%s(\\\\\\\\%s\\\\%s)\" % (\n self.__class__.__name__,\n self.server_name,\n self.database_name,\n )\n\n @property\n def server_name(self):\n \"\"\"Return the name of the connected PI AF server.\"\"\"\n return self.server.Name\n\n @property\n def database_name(self):\n \"\"\"Return the name of the connected PI AF database.\"\"\"\n return self.database.Name\n\n @property\n def children(self):\n \"\"\"Return a dictionary of the direct child elements of the database.\"\"\"\n return {c.Name: PIAFElement(c) for c in self.database.Elements}\n\n def descendant(self, path):\n \"\"\"Return a descendant of the database from an exact path.\"\"\"\n return PIAFElement(self.database.Elements.get_Item(path))\n\n\nclass PIAFElement(object):\n \"\"\"Container for PI AF elements in the database.\"\"\"\n\n version = \"0.1.0\"\n\n def __init__(self, element):\n self.element = element\n\n def __repr__(self):\n return \"%s(%s)\" % (self.__class__.__name__, self.name)\n\n @property\n def name(self):\n \"\"\"Return the name of the current element.\"\"\"\n return self.element.Name\n\n @property\n def parent(self):\n \"\"\"Return the parent element of the current element, or None if it has none.\"\"\"\n if not self.element.Parent:\n return None\n return self.__class__(self.element.Parent)\n\n @property\n def children(self):\n \"\"\"Return a dictionary of the direct child elements of the current element.\"\"\"\n return {c.Name: self.__class__(c) for c in self.element.Elements}\n\n def descendant(self, path):\n \"\"\"Return a descendant of the current element from an exact path.\"\"\"\n return self.__class__(self.element.Elements.get_Item(path))\n\n @property\n def attributes(self):\n \"\"\"Return a dictionary of the attributes of the current element.\"\"\"\n return {a.Name: PIAFAttribute(self, a) for a in self.element.Attributes}\n\n\n@add_operators(\n operators=OPERATORS,\n members=[\"_current_value\", \"interpolated_values\"],\n newclassname=\"VirtualPIAFAttribute\",\n attributes=[\"element\", \"attribute\"],\n)\nclass PIAFAttribute(PISeriesContainer):\n \"\"\"Container for attributes of PI AF elements in the database.\"\"\"\n\n version = \"0.1.0\"\n\n def __init__(self, element, attribute):\n super().__init__()\n self.element = element\n self.attribute = attribute\n\n def __repr__(self):\n return \"%s(%s, %s; Current Value: %s %s)\" % (\n self.__class__.__name__,\n self.name,\n self.description,\n self.current_value,\n self.units_of_measurement,\n )\n\n @property\n def name(self):\n \"\"\"Return the name of the current attribute.\"\"\"\n return self.attribute.Name\n\n @property\n def parent(self):\n \"\"\"Return the parent attribute of the current attribute, or None if it has none.\"\"\"\n if not self.attribute.Parent:\n return None\n return self.__class__(self.element, self.attribute.Parent)\n\n @property\n def children(self):\n \"\"\"Return a dictionary of the direct child attributes of the current attribute.\"\"\"\n return {\n a.Name: self.__class__(self.element, a) for a in self.attribute.Attributes\n }\n\n @property\n def description(self):\n \"\"\"Return the description of the PI Point.\"\"\"\n return self.attribute.Description\n\n @property\n def last_update(self):\n \"\"\"Return the time at which the current_value was last updated.\"\"\"\n return PISeries.timestamp_to_index(self.attribute.GetValue().Timestamp.UtcTime)\n\n @property\n def units_of_measurement(self):\n \"\"\"Return the units of measurement in which values for this element are reported.\"\"\"\n return self.attribute.DefaultUOM\n\n def _current_value(self):\n return self.attribute.GetValue().Value\n\n def _recorded_values(self, time_range, boundary_type, filter_expression):\n include_filtered_values = False\n return self.attribute.Data.RecordedValues(\n time_range,\n boundary_type,\n self.attribute.DefaultUOM,\n filter_expression,\n include_filtered_values,\n )\n\n def _interpolated_values(self, time_range, interval, filter_expression):\n \"\"\"Internal function to actually query the pi point\"\"\"\n include_filtered_values = False\n return self.attribute.Data.InterpolatedValues(\n time_range,\n interval,\n self.attribute.DefaultUOM,\n filter_expression,\n include_filtered_values,\n )\n\n def _summary(self, time_range, summary_types, calculation_basis, time_type):\n return self.attribute.Data.Summary(\n time_range, summary_types, calculation_basis, time_type\n )\n\n def _summaries(\n self, time_range, interval, summary_types, calculation_basis, time_type\n ):\n return self.attribute.Data.Summaries(\n time_range, interval, summary_types, calculation_basis, time_type\n )\n\n def _filtered_summaries(\n self,\n time_range,\n interval,\n filter_expression,\n summary_types,\n calculation_basis,\n filter_evaluation,\n filter_interval,\n time_type,\n ):\n return self.attribute.Data.FilteredSummaries(\n time_range,\n interval,\n filter_expression,\n summary_types,\n calculation_basis,\n filter_evaluation,\n filter_interval,\n time_type,\n )\n","sub_path":"PIconnect/PIAF.py","file_name":"PIAF.py","file_ext":"py","file_size_in_byte":9418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"13754966","text":"import networkx as nx\nimport numpy as np\nimport ReadMultiplexNetwork as RMN\nimport pickle\nimport copy\nimport random\n\n\ndef merge_g(nx_graphs):\n '''生成合并的图结构'''\n m_g = nx.Graph()\n for g in nx_graphs:\n m_g.add_nodes_from(g.nodes())\n m_g.add_edges_from(g.edges())\n return m_g\n\ndef get_selected_edges(pos_edge_list, neg_edge_list):\n '''边训练和测试的标签'''\n edges = pos_edge_list + neg_edge_list\n labels = np.zeros(len(edges))\n labels[:len(pos_edge_list)] = 1\n return edges, labels\n\ndef generate_pos_neg_links(nx_graph, merge_network, test_para):\n '''生成正负样例边'''\n Multi_Networks = copy.deepcopy(nx_graph)\n # train_g = copy.deepcopy(merge_network)\n selected_layer = random.randint(0, len(Multi_Networks))\n train_g = copy.deepcopy(Multi_Networks[selected_layer])\n train_ng = Multi_Networks.remove(train_g)\n # 获取网络中存在的边\n exit_edges = list(train_g.edges())\n num_exit = len(exit_edges)\n\n # 获取网络中不存在的边\n noexit_edges = list(nx.non_edges(train_g))\n num_noexit = len(noexit_edges)\n\n # 随机化列表的序列\n random.shuffle(exit_edges)\n random.shuffle(noexit_edges)\n\n # 正例边的采样\n pos_edge_list = []\n n_count = 0\n edges = exit_edges\n rnd = np.random.RandomState(seed=None)\n rnd_inx = rnd.permutation(edges) # 基于随机种子产生下标\n for eii in rnd_inx:\n edge = eii\n # 删除该边\n data = train_g[edge[0]][edge[1]]\n train_g.remove_edge(*edge)\n\n # 测试存在的边在删除之后,整个网络能否联通\n if nx.is_connected(train_g):\n flag = True\n for g in Multi_Networks:\n if edge in g.edges():\n gt = copy.deepcopy(g)\n gt.remove_edge(*edge)\n if nx.is_connected(gt) == False:\n del gt\n flag = False\n break\n if flag:\n for g in Multi_Networks:\n if edge in g.edges():\n g.remove_edge(*edge)\n pos_edge_list.append(tuple(edge))\n n_count += 1\n else:\n train_g.add_edge(*edge, **data)\n else:\n train_g.add_edge(*edge, **data)\n\n # 正采样的边\n if not len(pos_edge_list): # 如果原始图都是空的,那么就没有意义,所以就随机选择一定数量的边\n pos_edge_list = exit_edges[:int(len(exit_edges)*test_para)]\n [g.remove_edge(*e) for g in Multi_Networks for e in pos_edge_list if e in g.edges()]\n [train_g.remove_edge(*e) for e in pos_edge_list]\n nneg = npos = len(pos_edge_list)\n else:\n # 确定测试边的个数\n if len(pos_edge_list) < num_noexit:\n npos = int(test_para * len(pos_edge_list)) # 正例的数量\n else:\n npos = int(test_para * num_noexit)\n nneg = npos # 负例的数量\n pos_edge_list = pos_edge_list[:nneg]\n\n # 负采样的边\n neg_edge_list = noexit_edges[:nneg]\n # 测试边数据集和标签\n test_edges, labels = get_selected_edges(pos_edge_list, neg_edge_list)\n return Multi_Networks, train_g, pos_edge_list, neg_edge_list, test_edges, labels\n\ndef selected_pos_neg_links(nx_graph, test_para):\n '''\n nx_grahp:表示原始的网络\n test_para:表示测试的边的比例\n '''\n\n Multi_Networks = copy.deepcopy(nx_graph)\n # 选择要操作的网络\n selected_layer = random.randint(0, len(Multi_Networks)-1)\n train_g = copy.deepcopy(Multi_Networks[selected_layer])\n Multi_Networks.remove(Multi_Networks[selected_layer])\n\n # 重新得到剩余网络构建的图\n train_mg = merge_g(Multi_Networks)\n\n # 获取网络中存在的边\n exit_edges = list(train_g.edges())\n num_exit = len(exit_edges)\n\n # 获取网络中不存在的边\n noexit_edges = list(nx.non_edges(train_g))\n num_noexit = len(noexit_edges)\n\n # 随机化列表的序列\n random.shuffle(exit_edges)\n random.shuffle(noexit_edges)\n\n # 根据得到的正例的个数和负例的个数判断最终的测试边个数\n if num_noexit < num_exit:\n selected_edges_number = int(num_noexit * test_para)\n else:\n selected_edges_number = int(num_exit * test_para)\n\n pos_edge_list = exit_edges[:selected_edges_number]\n neg_edge_list = noexit_edges[:selected_edges_number]\n\n # 边测试集的标准化\n test_posedges_list = list(set(exit_edges) - set(pos_edge_list))\n test_negedges_list = noexit_edges[selected_edges_number:selected_edges_number+len(test_posedges_list)]\n test_edges, labels = get_selected_edges(test_posedges_list, test_negedges_list)\n return Multi_Networks, train_mg, pos_edge_list, neg_edge_list, test_edges, labels\n\ndef each_selected_pos_neg_links(nx_graph, test_para):\n '''\n nx_grahp:表示原始的网络\n test_para:表示测试的边的比例\n '''\n\n Multi_Networks = copy.deepcopy(nx_graph)\n test_edges_list = []\n test_edges_lable = []\n # 选择要操作的网络\n for train_g in Multi_Networks:\n # 获取网络中存在的边\n exit_edges = list(train_g.edges())\n num_exit = len(exit_edges)\n\n # 获取网络中不存在的边\n noexit_edges = list(nx.non_edges(train_g))\n num_noexit = len(noexit_edges)\n\n # 根据得到的正例的个数和负例的个数判断最终的测试边个数\n if num_noexit < num_exit:\n selected_edges_number = int(num_noexit * test_para)\n else:\n selected_edges_number = int(num_exit * test_para)\n\n # 随机选择测试边\n random.shuffle(exit_edges)\n random.shuffle(noexit_edges)\n\n # 正、负边的个数都相同\n pos_edge_list = exit_edges[:selected_edges_number]\n neg_edge_list = noexit_edges[:selected_edges_number]\n train_g.remove_edges_from(pos_edge_list)\n test_edges, labels = get_selected_edges(pos_edge_list, neg_edge_list)\n test_edges_list.append(test_edges)\n test_edges_lable.append(labels)\n\n # 重新得到剩余网络构建的图\n train_mg = merge_g(Multi_Networks)\n\n # 边测试集的标准化\n return Multi_Networks, train_mg, test_edges_list, test_edges_lable\n\nif __name__ == '__main__':\n\n # 加载数据集,主要是将节点的初始编号从0开始,CKM数据集缺少154,165,195,201,203这五个节点,所以在构建网络的时候需要删除这些节点\n nx_graphs = RMN.read_f('../pierreauger_multiplex.edges')\n merge_graph = merge_g(nx_graphs)\n\n # 加载社区的标签为其他任务使用\n name = 'pierreauger_information'\n ground_truth_norm = []\n\n if sorted(list(merge_graph.nodes()))[0] > 0:\n merge_graph = nx.relabel_nodes(merge_graph, lambda x: x-1)\n nx_graphs = [nx.relabel_nodes(nx_graphs[i], lambda x: x-1) for i in range(len(nx_graphs))]\n\n # 将每一层网络中的节点补全,形成标准的Multiplex Network\n nx_graphs_Norm = []\n for g in nx_graphs:\n temp_g = nx.Graph()\n temp_g.add_nodes_from(merge_graph.nodes())\n temp_g.add_edges_from(g.edges())\n nx_graphs_Norm.append(temp_g)\n\n # 生成测试和训练时使用的正例、负例边\n train_ng, train_mg, test_edges, labels = each_selected_pos_neg_links(nx_graphs_Norm, 0.2)\n # 保存处理后的数据\n Dict_graph = {'merge_graph':merge_graph, 'nx_graph':nx_graphs_Norm, \"comm_label\":np.array(ground_truth_norm),'train_mg':train_mg, 'train_ng':train_ng, 'test_edges': test_edges, \"test_labels\": labels}\n pickle.dump(Dict_graph, open('./' + name+ '.pickle', '+wb'))\n\n\n","sub_path":"Code/Construct_Network.py","file_name":"Construct_Network.py","file_ext":"py","file_size_in_byte":7741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"556713666","text":"import threading\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport infinc\nimport time\nimport math\n\nclass SimpleClient :\n def __init__(self, therm1, therm2) :\n self.fig, self.ax = plt.subplots()\n now = time.time()\n self.lastTime = now\n self.times = [time.strftime(\"%H:%M:%S\", time.localtime(now-i)) for i in range(30, 0, -1)]\n self.infTemps = [0]*30\n self.incTemps = [0]*30\n self.infLn, = plt.plot(range(30), self.infTemps, label=\"Infant Temperature\")\n self.incLn, = plt.plot(range(30), self.incTemps, label=\"Incubator Temperature\")\n plt.xticks(range(30), self.times, rotation=45)\n plt.ylim((20,50))\n plt.legend(handles=[self.infLn, self.incLn])\n self.infTherm = therm1\n self.incTherm = therm2\n\n self.ani = animation.FuncAnimation(self.fig, self.updateInfTemp, interval=500)\n self.ani2 = animation.FuncAnimation(self.fig, self.updateIncTemp, interval=500)\n\n def updateTime(self) :\n now = time.time()\n if math.floor(now) > math.floor(self.lastTime) :\n t = time.strftime(\"%H:%M:%S\", time.localtime(now))\n self.times.append(t)\n #last 30 seconds of of data\n self.times = self.times[-30:]\n self.lastTime = now\n plt.xticks(range(30), self.times,rotation = 45)\n plt.title(time.strftime(\"%A, %Y-%m-%d\", time.localtime(now)))\n\n\n def updateInfTemp(self, frame) :\n self.updateTime()\n self.infTemps.append(self.infTherm.getTemperature()-273)\n #self.infTemps.append(self.infTemps[-1] + 1)\n self.infTemps = self.infTemps[-30:]\n self.infLn.set_data(range(30), self.infTemps)\n return self.infLn,\n\n def updateIncTemp(self, frame) :\n self.updateTime()\n self.incTemps.append(self.incTherm.getTemperature()-273)\n #self.incTemps.append(self.incTemps[-1] + 1)\n self.incTemps = self.incTemps[-30:]\n self.incLn.set_data(range(30), self.incTemps)\n return self.incLn,\n\nUPDATE_PERIOD = .05 #in seconds\nSIMULATION_STEP = .1 #in seconds\n\n#create a new instance of IncubatorSimulator\nbob = infinc.Human(mass = 8, length = 1.68, temperature = 36 + 273)\nbobThermo = infinc.SmartThermometer(bob, UPDATE_PERIOD)\nbobThermo.start() #start the thread\n\ninc = infinc.Incubator(width = 1, depth=1, height = 1, temperature = 37 + 273, roomTemperature = 20 + 273)\nincThermo = infinc.SmartThermometer(inc, UPDATE_PERIOD)\nincThermo.start() #start the thread\n\nincHeater = infinc.SmartHeater(powerOutput = 1500, setTemperature = 45 + 273, thermometer = incThermo, updatePeriod = UPDATE_PERIOD)\ninc.setHeater(incHeater)\nincHeater.start() #start the thread\n\nsim = infinc.Simulator(infant = bob, incubator = inc, roomTemp = 20 + 273, timeStep = SIMULATION_STEP, sleepTime = SIMULATION_STEP / 10)\n\nsim.start()\n\nsc = SimpleClient(bobThermo, incThermo)\n\nplt.grid()\nplt.show()\n\n","sub_path":"SampleClient.py","file_name":"SampleClient.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"548906415","text":"'''\n从数组中移除指定数值,并返回新数组的长度\n需要常量空间,在python中不可使用range\n'''\nclass Solution(object):\n def removeElement(self, nums, val):\n \"\"\"\n :type nums: List[int]\n :type val: int\n :rtype: int\n \"\"\"\n i,j=0,0\n\n n=len(nums)\n while i 6:\n return\n\n pygame.mixer.Sound.play(self.sound_fire)\n\n self.game.current_scene.bullets.append(\n Bullet(\n self.game,\n self.position + (self.direction * self.size),\n self.direction))\n\n\n def handle_key_press(self, key_pressed):\n if key_pressed[pygame.K_RIGHT]:\n self.direction.rotate_ip(self.rotation_speed * self.game.dt)\n\n elif key_pressed[pygame.K_LEFT]:\n self.direction.rotate_ip(-self.rotation_speed * self.game.dt)\n\n self.thrusting = False\n if key_pressed[pygame.K_UP]:\n self.thrusting = True\n\n def update(self):\n\n if self.invincible:\n if time.time() - self.spawn_time > self.invincible_time:\n self.invincible = False\n\n # Move the player\n current_speed = self.velocity.length()\n if self.thrusting:\n acceleration = self.acceleration * (self.game.dt)\n self.velocity = self.velocity + (self.direction * acceleration)\n\n # Ensure the player doesn't go too fast\n if current_speed > self.maximum_speed:\n scale = self.maximum_speed / current_speed\n self.velocity = self.velocity * scale\n\n # If we're not thrusting, and we've almost come to a stop, stop!\n elif current_speed < 0.1:\n self.velocity = pygame.math.Vector2()\n\n # Otherwise, slowly decelerate\n elif current_speed > 0:\n deceleration = self.deceleration * (self.game.dt)\n self.velocity += (self.velocity.normalize() * -1) * deceleration\n\n self.position = self.position + (self.velocity * self.game.dt)\n self.position = self.game.wrap_position(self.position, self.size)\n\n # Generate coordinates to use to draw the player and detect collisions\n self.points = []\n for rotation in [0, -135, 135]:\n angle = pygame.math.Vector2(0, -1).angle_to(self.direction) - 90\n angle = (angle + rotation) % 360\n self.points.append((self.size, angle))\n\n def draw(self):\n\n colour = \"white\"\n paused = isinstance(self.game.current_scene, Pause)\n if self.invincible and not paused:\n t = round(time.time() - self.spawn_time, 1)\n t = t * 10 % 2\n if t == 0:\n colour = \"green\"\n # Draw our player\n for line in utils.points_to_lines(self.position, self.points):\n pygame.draw.line(self.game.display, colour, *line, 3)\n","sub_path":"asteroids/entities/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":4380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"158778869","text":"import json\nfrom urllib.parse import urljoin\nimport requests\nfrom lxml import etree\n\nbase_url = 'https://www.qd8.com/changecity.php' # 各个地区的链接\ncates = [\n {\"cate_name\": \"公司注册\", 'cate_url': 'gongsizhuce/'},\n {\"cate_name\": \"商标\", 'cate_url': 'shangbiao/'}\n]\n\n\ndef get_start_urls():\n \"\"\"\n 请求所有地区的链接的页面,获取所有的地区的链接\n :return:\n \"\"\"\n # 请求页面\n result = requests.get(base_url)\n # 转换成HTML\n html = parse_response(result)\n # 解析页面获取所有的地区名称和链接\n all_cities = html.xpath('//dl[@id=\"clist\"]/div/dd/a')\n for city in all_cities:\n # 城市名称\n local = city.xpath('./text()')[0]\n # 城市的链接\n city_url = city.xpath('./@href')[0]\n for cate in cates:\n # 创建字典\n dd = dict()\n # 类别名称\n cate_name = cate.get('cate_name')\n # 类别的链接\n cate_url = cate.get('cate_url')\n # 完整的URL\n url = urljoin(city_url, cate_url)\n # 字典中增添值\n dd['local'] = local\n dd['cate_name'] = cate_name\n dd['url'] = url\n # 写入文件\n record_result(dd)\n\n\ndef record_result(data):\n \"\"\"\n 将结果写入文件\n :param data:\n :return:\n \"\"\"\n path = r'G:\\工作\\APP\\wandoujia\\QiDian8_city.json'\n with open(path, 'a+', encoding='utf-8') as f:\n f.write(json.dumps(data, ensure_ascii=False) + \"\\n\")\n\n\ndef parse_response(res):\n \"\"\"\n 将获取的响应转换为HTML\n :param res: 响应(text)\n :return: HTML\n \"\"\"\n try:\n if res.status_code == 200:\n # 测试时使用\n print(\"查看获取的响应:\", res.text)\n res.encoding = res.apparent_encoding\n return etree.HTML(res.text, etree.HTMLParser())\n else:\n print(\"响应的状态码不是200\")\n return False\n except Exception as e:\n print(e)\n\n\n# 测试代码\nif __name__ == \"__main__\":\n get_start_urls()\n","sub_path":"wandoujia/utils/qidian8.py","file_name":"qidian8.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"625872232","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nimport sys\nimport json\nfrom datetime import datetime, time, timedelta\nimport re\n\ndef read_race_log(lines):\n racers = {}\n classifications = []\n # processing log as list of lines(strings)\n for line in lines:\n # verify if log came as espected\n # [TIMESTAMP, ID-PILOTO, LAP, LAPTIME, SPEED]'\n if len(line.split()) != 5:\n raise NameError('LogFormat')\n timestamp, racers_idname, lap, laptime, speed = line.split()\n if len(racers_idname.split('-')) != 2:\n raise NameError('LogFormat')\n racers_name, racers_id = racers_idname.split('-')\n current_lap = {\n \"lap\": lap.strip(),\n \"timestamp\": timestamp.strip(),\n \"laptime\": laptime.strip(),\n \"speed\": speed.strip()\n }\n if racers_id in racers.keys():\n racers[racers_id][\"id\"] = racers_id.strip()\n racers[racers_id][\"name\"] = racers_name.strip()\n racers[racers_id][\"laps\"].append(current_lap)\n else:\n racers[racers_id] = {}\n racers[racers_id][\"id\"] = racers_id.strip()\n racers[racers_id][\"name\"] = racers_name.strip()\n racers[racers_id][\"laps\"] = [current_lap]\n\n classification = [racers[racer] for racer in racers.keys()]\n return classification\n\ndef get_race_stats(classification):\n race_duration = timedelta(seconds=0)\n for racer in classification:\n lastlap_timestamp = '99:99:99.999'\n completed_laps = '0'\n total_time = timedelta(seconds=0)\n best_lap = timedelta(hours=999)\n avg_speed = None\n for lap in racer[\"laps\"]:\n # get number of completed laps\n if lap[\"lap\"] > completed_laps:\n completed_laps = lap[\"lap\"]\n # split laptime on ':' and '.' to create a timedelta for time manipulation\n minutes, seconds, microseconds = re.split(r'[:|\\.]', lap[\"laptime\"])\n laptime = timedelta(minutes = int(minutes), seconds = int(seconds),\n microseconds = int(microseconds)*1000)\n # get best lap\n if laptime < best_lap:\n best_lap = laptime\n # get racer total time\n aux_time = total_time + laptime\n total_time = aux_time\n # get last lap timestamp regestry\n if lap[\"lap\"] == '4':\n lastlap_timestamp = lap[\"timestamp\"]\n # get race duration based on the last lap of the last racer\n if total_time > race_duration:\n race_duration = total_time\n # get average speed\n speed = float(lap[\"speed\"].replace(',', '.'))\n avg_speed = ((avg_speed + speed) / 2) if avg_speed else speed\n racer[\"total_time\"] = total_time\n racer[\"avarege_speed\"] = avg_speed\n racer[\"lastlap_timestamp\"] = lastlap_timestamp\n racer[\"best_lap\"] = best_lap\n racer[\"completed_laps\"] = completed_laps\n return classification, race_duration\n\ndef get_classification(classification):\n # sort list of dictionary [https://wiki.python.org/moin/SortingListsOfDictionaries]\n sort_on = \"lastlap_timestamp\"\n decorated = [(dict_[sort_on], dict_) for dict_ in classification]\n decorated.sort()\n classification = [dict_ for (key, dict_) in decorated]\n return classification\n\ndef print_race_stats(classification, race_duration):\n position = 1\n print (\"Tempo de prova: {}\\n\".format(race_duration))\n print (\"{:<20}{:<20}{:<20}{:<20}{:<20}{:<20}{:<20} \".format(\"POSIÇÃO\", \"COD\",\n \"PILOTO\",\"VOLTAS COMPLETADAS\",\"TEMPO TOTAL\",\"VELOCIDADE MÉDIA\",\n \"MELHOR VOlTA\"))\n\n # get firt racer total time as reference time\n first_totaltime = classification[0][\"total_time\"]\n for racer in classification:\n # get difference time from first place\n first_diff = racer[\"total_time\"] - first_totaltime\n print (\"{:<20}{:<20}{:<20}{:<20}{:<20}{:<20}{:<20}{:<20} \".format(\n str(position) + \"º\", racer[\"id\"], racer[\"name\"],\n racer[\"completed_laps\"], racer[\"total_time\"],\n racer[\"avarege_speed\"], racer[\"best_lap\"], first_diff))\n position = position + 1\n\nif __name__ == '__main__':\n if (len(sys.argv) != 2):\n print (\"missing argument [race log]\")\n exit(2)\n\n race = open(sys.argv[1], 'r')\n # skipping header\n line = race.readline()\n\n lines = race.readlines()\n\n racers = read_race_log(lines)\n racers_stats, race_duration = get_race_stats(racers)\n\n classification = get_classification(racers_stats)\n\n print_race_stats(classification, race_duration)\n","sub_path":"corrida.py","file_name":"corrida.py","file_ext":"py","file_size_in_byte":4739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"485719111","text":"from BeautifulSoup import BeautifulSoup, Tag\nimport os\nimport xbmc\nimport util\n\ndef initDatabase():\n\taddon_work_folder = os.path.join(xbmc.translatePath( \"special://profile/addon_data/\" ), \"plugin.program.download-next-ep\")\n\tsoup = BeautifulSoup(\"\")\n\ttry:\n\t\tif not os.path.exists(addon_work_folder):\n\t\t\tos.makedirs(addon_work_folder)\n\t\tf = getDatabase(\"w\")\n\t\tf.write(soup.prettify())\n\t\tf.close()\n\t\t#util.msg(\"file write success\",\"wote file to:\\n\" +addon_work_folder )\n\texcept:\n\t\tutil.msg(localize(50000), localize(50003))\n\ndef addSerie(scraperid):\n\tmode = util.getMode()\n\txbmcid = mode.show.xbmcId\n\txbmcName = mode.show.titel\n\tf = getDatabase(\"r\")\n\tsoup = BeautifulSoup(f.read())\n\tf.close()\n\tserie = soup.findAll(scraperid = scraperid)\n\tif len(serie) == 0 :\n\t\tseries = soup.find(\"series\")\n\t\ttag = Tag(soup, \"serie\")\n\t\ttag.attrs.append(('xbmcid', xbmcid))\n\t\ttag.attrs.append(('scraperid', scraperid))\n\t\ttag.attrs.append(('xbmcname', xbmcName))\n\t\tseries.append(tag)\n\t\tf = getDatabase(\"w\")\n\t\tf.write(soup.prettify())\n\t\tf.close()\n\telif len(serie) == 1:\n\t\tserieTag = serie[0]\n\t\tsid = int(serieTag['xbmcid'])\n\t\txName = str(serieTag['xbmcname'])\n\t\tif xName != xbmcName or sid != xbmcid:\n\t\t\t#data is inconsistent\n\t\t\t#remove tag\n\t\t\tserieTag.extract()\n\t\t\tf = getDatabase(\"w\")\n\t\t\tf.write(soup.prettify())\n\t\t\tf.close()\n\t\t\t#add new tag\n\t\t\treturn addSerie(scraperid)\n\t\telse:\n\t\t\treturn\t\t#data is consistent no change is needed\n\telse:\n\t\t#more then one match for a serie\n\t\t#remove all matching tags\n\t\tfor item in serie:\n\t\t\titem.extract()\n\t\tf = getDatabase(\"w\")\n\t\tf.write(soup.prettify())\n\t\tf.close()\n\t\t#add new tag\n\t\treturn addSerie(scraperid)\n\t\t\n\t\t\ndef addEpisode(xbmcid, scraperid, snr,enr, title, airdate):\n\tf = getDatabase(\"r\")\n\tsoup = BeautifulSoup(f.read())\n\tf.close()\n\tserie = soup.find(scraperid = scraperid)\n\t#TODO check inconsistency\n\tif serie == None :\n\t\treturn False\n\tseason = serie.find(seasonnr = snr)\n\tif season == None:\n\t\ttag = Tag(soup, \"season\")\n\t\ttag.attrs.append(('seasonnr', snr))\n\t\tserie.append(tag)\n\t\tseason = serie.find(seasonnr = snr)\n\tif season == None:\n\t\tutil.msg(localize(50000), localize(50004))\n\t\treturn False\n\tepisode = season.find(episodenr = enr)\n\tif episode == None:\n\t\tepisodetag = Tag(soup, \"episode\")\n\t\tepisodetag.attrs.append(('episodenr', enr))\n\t\ttitletag = Tag(soup, \"title\")\n\t\ttitletag.insert(0,title)\n\t\tepisodetag.append(titletag)\n\t\tairdatetag = Tag(soup, \"airdate\")\n\t\tairdatetag.insert(0,airdate)\n\t\tepisodetag.append(airdatetag)\n\t\tseason.append(episodetag)\n\t\t\n\t\tf = getDatabase(\"w\")\n\t\tf.write(soup.prettify())\n\t\tf.close()\n\t#else:\n\t\t#check consistency\n\treturn True\n\t\ndef getDatabase(mode):\n\taddon_work_folder = os.path.join(xbmc.translatePath( \"special://profile/addon_data/\" ), \"plugin.program.download-next-ep\")\n\tf = open(addon_work_folder + os.sep + \"database.xml\", mode)\n\treturn f\n\n\ndef getScraperId(xbmcid):\n\tf = getDatabase(\"r\")\n\tsoup = BeautifulSoup(f.read())\n\tf.close()\n\tserie = soup.find(xbmcid = xbmcid)\n\tif serie == None:\n\t\treturn -1\n\treturn serie['scraperid']\n\t\ndef getEpisodes(scraperId):\n\tf = getDatabase(\"r\")\n\tsoup = BeautifulSoup(f.read())\n\tf.close()\n\tserie = soup.find(scraperid = scraperId)\n\tretlist = []\n\tseasons = serie.findAll('season')\n\tfor season in seasons:\n\t\tepisodes = season.findAll('episode')\n\t\tfor episode in episodes:\n\t\t\tepisodelist = [season['seasonnr'], episode['episodenr']]\n\t\t\tepisodelist.append(episode.find('title').getText())\n\t\t\tepisodelist.append(episode.find('airdate').getText())\n\t\t\tretlist.append(episodelist)\n\treturn retlist\n\t\t\n\t\ndef databaseExists():\n\taddon_work_folder = os.path.join(xbmc.translatePath( \"special://profile/addon_data/\" ), \"plugin.program.download-next-ep\")\n\tif not os.path.exists(addon_work_folder+ os.sep + \"database.xml\"):\n\t\treturn False\n\treturn True\n\t\n\n","sub_path":"resources/lib/databaseHandler.py","file_name":"databaseHandler.py","file_ext":"py","file_size_in_byte":3806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"233172946","text":"import signal\n\nimport encoder_mytrial\nimport time\nimport example_motor\nimport PID\nimport numpy as np\nimport math\nimport Lidar\nimport os\nimport RPi.GPIO as GPIO\nimport LocalizationClient as lc\n\ndef turn(angle):\n encoder.rightDistance = 0\n encoder.leftDistance = 0\n wheelBase = 21\n wheelRadius = 4\n print(\"rec dist : \" + str((math.pi * (angle * (math.pi / 180)) * wheelBase) / (2 * wheelRadius)))\n while (np.average([abs(encoder.leftDistance), abs(encoder.rightDistance)]) <\n ((math.pi * (angle * (math.pi / 180)) * wheelBase) / (2 * wheelRadius))):\n time.sleep(.05)\n measuredPhiDotLeft = -1 * encoder.getPhiDotLeft()\n measuredPhiDotRight = encoder.getPhiDotRight()\n\n PIDleft.control(-.8, measuredPhiDotLeft, motors.setPhiDotDesiredLeft)\n PIDRight.control(.8, measuredPhiDotRight, motors.setPhiDotDesiredRight)\n\n # motors.PID(1,measuredPhiDotLeft,1.5,motors.setPhiDotDesiredLeft)\n # motors.PID(1,measuredPhiDotRight, 1.5, motors.setPhiDotDesiredRight)\n print(\"Phi Dot Right is: \" + str(measuredPhiDotRight))\n print(\"Phi Dot Left is: \" + str(measuredPhiDotLeft))\n\n print(\"left Dist \" + str(encoder.leftDistance))\n print(\"right Dist \" + str(encoder.rightDistance))\n\n\ndef Forward(dist):\n encoder.rightDistance = 0\n encoder.leftDistance = 0\n while (np.average([abs(encoder.leftDistance), abs(encoder.rightDistance)]) < dist):\n time.sleep(.05)\n measuredPhiDotLeft = -1 * encoder.getPhiDotLeft()\n measuredPhiDotRight = encoder.getPhiDotRight()\n\n PIDleft.control(1.5, measuredPhiDotLeft, motors.setPhiDotDesiredLeft)\n PIDRight.control(1.5, measuredPhiDotRight, motors.setPhiDotDesiredRight)\n\n motors.PID(1, measuredPhiDotLeft, 1.5, motors.setPhiDotDesiredLeft)\n motors.PID(1, measuredPhiDotRight, 1.5, motors.setPhiDotDesiredRight)\n # print(\"Phi Dot Right is: \" + str(measuredPhiDotRight))\n # print(\"Phi Dot Left is: \" + str(measuredPhiDotLeft))\n\n print(\"left Dist \" + str(encoder.leftDistance))\n print(\"rightt Dist \" + str(encoder.rightDistance))\n\ndef testEncoder():\n lastX = 0\n lastY = 0\n lastTheta = 0\n\n\n\n while True:\n delx = encoder.x - lastX\n dely = encoder.y - lastY\n delTheta = encoder.theata - lastTheta\n\n print(\"del x is: \" + str(delx))\n print(\"del y is: \" + str(dely))\n print(\"del theta is: \" + str(np.degrees(delTheta)))\n\n lastX = encoder.x\n lastY = encoder.y\n lastTheta = encoder.theata\n\n time.sleep(10)\n\nif __name__ == \"__main__\":\n\n cell_resolution = 50\n\n x = 5 * cell_resolution\n y = 45 * cell_resolution\n th = -90 # must send over -th matlab is clockwise python is counter\n\n # Define Deltas (Not Constant in Real Life)\n dx = 0 * cell_resolution\n dy = 0 * cell_resolution\n dth = 0\n\n # Instantiate Particle Filter\n\n\n lo_c = lc.LocalizationClient()\n lo_c.sendData(np.array([float(x), float(y), float(th)]))\n\n try:\n encoder = encoder_mytrial.Encoder()\n encoder.start()\n\n motors = example_motor.Motor()\n\n PIDleft = PID.PID()\n PIDleft.Kd = 0\n PIDleft.Ki = .25\n PIDleft.Kp = .25\n\n PIDRight = PID.PID()\n PIDRight.Kd = 0\n PIDRight.Ki = .25\n PIDRight.Kp = .25\n\n lidar = Lidar.Lidar()\n lidar.start()\n\n measures = []\n for x in range(50):\n\n measures = lidar.measures\n measures = np.append([float(dx), float(dy), float(dth)], measures)\n\n lo_c.sendData(measures)\n\n time.sleep(.2)\n\n\n\n\n\n\n\n except KeyboardInterrupt:\n\n GPIO.cleanup()\n lo_c.close()\n print(\"Killed\")\n #os.killpg(1, signal.SIGTERM)\n #exit(1)\n\n\n\n\n\n\n","sub_path":"Driver.py","file_name":"Driver.py","file_ext":"py","file_size_in_byte":3781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"534237393","text":"# -*- coding: utf-8 -*-\n# /usr/bin/env python2.7 + brainvisa compliant env\n#\n# This software and supporting documentation are distributed by\n# Institut Federatif de Recherche 49\n# CEA/NeuroSpin, Batiment 145,\n# 91191 Gif-sur-Yvette cedex\n# France\n#\n# This software is governed by the CeCILL license version 2 under\n# French law and abiding by the rules of distribution of free software.\n# You can use, modify and/or redistribute the software under the\n# terms of the CeCILL license version 2 as circulated by CEA, CNRS\n# and INRIA at the following URL \"http://www.cecill.info\".\n#\n# As a counterpart to the access to the source code and rights to copy,\n# modify and redistribute granted by the license, users are provided only\n# with a limited warranty and the software's author, the holder of the\n# economic rights, and the successive licensors have only limited\n# liability.\n#\n# In this respect, the user's attention is drawn to the risks associated\n# with loading, using, modifying and/or developing or reproducing the\n# software by the user in light of its specific status of free software,\n# that may mean that it is complicated to manipulate, and that also\n# therefore means that it is reserved for developers and experienced\n# professionals having in-depth computer knowledge. Users are therefore\n# encouraged to load and test the software's suitability as regards their\n# requirements in conditions enabling the security of their systems and/or\n# data to be ensured and, more generally, to use and operate it in the\n# same conditions as regards security.\n#\n# The fact that you are presently reading this means that you have had\n# knowledge of the CeCILL license version 2 and that you accept its terms.\n\n\"\"\"Creating pickle file from T1 MRI datas\n\nThe aim of this script is to create dataset of cropped skeletons from MRIs\nsaved in a .pickle file.\nSeveral steps are required: normalization, crop and .pickle generation\n\n Typical usage\n -------------\n You can use this program by first entering in the brainvisa environment\n (here brainvisa 5.0.0 installed with singurity) and launching the script\n from the terminal:\n >>> bv bash\n >>> python dataset_gen_pipe.py\n\n Alternatively, you can launch the script in the interactive terminal ipython:\n >>> %run dataset_gen_pipe.py\n\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport sys\nimport os\nfrom os import listdir\nfrom os.path import join\n\nimport numpy as np\n\nimport six\n\nfrom deep_folding.anatomist_tools.utils.logs import LogJson\nfrom deep_folding.anatomist_tools.utils.load_bbox import compute_max_box\nfrom deep_folding.anatomist_tools.utils.resample import resample\nfrom deep_folding.anatomist_tools.utils.sulcus_side import complete_sulci_name\nfrom deep_folding.anatomist_tools.load_data import fetch_data\n\n_ALL_SUBJECTS = -1\n\n_SIDE_DEFAULT = 'L' # hemisphere 'L' or 'R'\n\n_INTERP_DEFAULT = 'nearest' # default interpolation for ApplyAimsTransform\n\n_RESAMPLING_DEFAULT = None # if None, resampling method is AimsApplyTransform\n\n_OUT_VOXEL_SIZE = (1, 1, 1) # default output voxel size for Bastien's resampling\n\n# sulcus to encompass:\n# its name depends on the hemisphere side\n_SULCUS_DEFAULT = 'S.T.s.ter.asc.ant.'\n\n# Input directories\n# -----------------\n\n# Input directory contaning the morphologist analysis of the HCP database\n_SRC_DIR_DEFAULT = '/neurospin/hcp'\n\n# Directory where subjects to be processed are stored.\n# Default is for HCP dataset\n_MORPHOLOGIST_DIR_DEFAULT = 'ANALYSIS/3T_morphologist'\n\n# Directory that contains the transformation file\n# from native to MNI through SPM\n# These files have been created with spm_skeleton\n_TRANSFORM_DIR_DEFAULT = '/neurospin/dico/data/deep_folding/data/transform'\n\n# Directory containing bounding box json files\n# default corresponds to bounding boxes computed for voxels of 1mm\n_BBOX_DIR_DEFAULT = '/neurospin/dico/data/deep_folding/data/bbox'\n\n# Output (target) directory\n# -------------------------\n_TGT_DIR_DEFAULT = '/neurospin/dico/data/deep_folding/test'\n\n\nclass DatasetCroppedSkeleton:\n \"\"\"Generates cropped skeleton files and corresponding pickle file\n \"\"\"\n\n def __init__(self, src_dir=_SRC_DIR_DEFAULT,\n tgt_dir=_TGT_DIR_DEFAULT,\n transform_dir=_TRANSFORM_DIR_DEFAULT,\n bbox_dir=_BBOX_DIR_DEFAULT,\n morphologist_dir=_MORPHOLOGIST_DIR_DEFAULT,\n list_sulci=_SULCUS_DEFAULT,\n side=_SIDE_DEFAULT,\n interp=_INTERP_DEFAULT,\n resampling=_RESAMPLING_DEFAULT,\n out_voxel_size=_OUT_VOXEL_SIZE):\n \"\"\"Inits with list of directories and list of sulci\n\n Args:\n src_dir: list of strings naming ful path source directories,\n containing MRI images\n tgt_dir: name of target (output) directory with full path\n transform_dir: directory containing transformation files\n (generated using transform.py)\n bbox_dir: directory containing bbox json files\n (generated using bounding_box.py)\n list_sulci: list of sulcus names\n side: hemisphere side (either L for left, or R for right hemisphere)\n interp: string giving interpolation for AimsApplyTransform\n \"\"\"\n\n self.src_dir = src_dir\n self.side = side\n # Transforms sulcus in a list of sulci\n self.list_sulci = ([list_sulci] if isinstance(list_sulci, str)\n else list_sulci)\n self.list_sulci = complete_sulci_name(self.list_sulci, self.side)\n\n self.tgt_dir = tgt_dir\n self.transform_dir = transform_dir\n self.bbox_dir = bbox_dir\n self.morphologist_dir = morphologist_dir\n self.interp = interp\n self.resampling = resampling\n self.out_voxel_size = out_voxel_size\n\n # Morphologist directory\n self.morphologist_dir = join(self.src_dir, self.morphologist_dir)\n # default acquisition subdirectory\n self.acquisition_dir = \"%(subject)s/t1mri/default_acquisition\"\n # (input) name of normalized SPM file\n self.normalized_spm_file = \"normalized_SPM_%(subject)s.nii\"\n\n # Directory where to store cropped files\n self.cropped_dir = join(self.tgt_dir, self.side + 'crops')\n\n # Names of files in function of dictionary: keys -> 'subject' and 'side'\n # Files from morphologist pipeline\n self.normalized_spm_file = 'normalized_SPM_%(subject)s.nii'\n self.skeleton_file = 'default_analysis/segmentation/' \\\n '%(side)sskeleton_%(subject)s.nii.gz'\n\n # Names of files in function of dictionary: keys -> 'subject' and 'side'\n self.transform_file = 'natif_to_template_spm_%(subject)s.trm'\n self.cropped_file = '%(subject)s_normalized.nii.gz'\n\n # Initialization of bounding box coordinates\n self.bbmin = np.zeros(3)\n self.bbmax = np.zeros(3)\n\n # Creates json log class\n json_file = join(self.tgt_dir, self.side + 'dataset.json')\n self.json = LogJson(json_file)\n\n def crop_one_file(self, subject_id):\n \"\"\"Crops one file\n\n Args:\n subject_id: string giving the subject ID\n \"\"\"\n\n # Identifies 'subject' in a mapping (for file and directory namings)\n subject = {'subject': subject_id, 'side': self.side}\n print(subject_id)\n\n # Names directory where subject analysis files are stored\n subject_dir = \\\n join(self.morphologist_dir, self.acquisition_dir % subject)\n\n # Transformation file name\n file_transform = join(self.transform_dir, self.transform_file % subject)\n\n # Normalized SPM file name\n file_SPM = join(subject_dir, self.normalized_spm_file % subject)\n\n # Skeleton file name\n file_skeleton = join(subject_dir, self.skeleton_file % subject)\n if os.path.exists(file_skeleton) and os.path.exists(file_transform):\n # Creates output (cropped) file name\n file_cropped = join(self.cropped_dir, self.cropped_file % subject)\n\n # Normalization and resampling of skeleton images\n if self.resampling:\n resample(file_skeleton,\n file_cropped,\n output_vs=self.out_voxel_size,\n transformation=file_transform)\n\n else :\n cmd_normalize = 'AimsApplyTransform' + \\\n ' -i ' + file_skeleton + \\\n ' -o ' + file_cropped + \\\n ' -m ' + file_transform + \\\n ' -r ' + file_SPM + \\\n ' -t ' + self.interp\n os.system(cmd_normalize)\n\n # Take the coordinates of the bounding box\n bbmin = self.bbmin\n bbmax = self.bbmax\n xmin, ymin, zmin = str(bbmin[0]), str(bbmin[1]), str(bbmin[2])\n xmax, ymax, zmax = str(bbmax[0]), str(bbmax[1]), str(bbmax[2])\n\n # Crop of the images based on bounding box\n cmd_bounding_box = ' -x ' + xmin + ' -y ' + ymin + ' -z ' + zmin + \\\n ' -X ' + xmax + ' -Y ' + ymax + ' -Z ' + zmax\n cmd_crop = 'AimsSubVolume' + \\\n ' -i ' + file_cropped + \\\n ' -o ' + file_cropped + cmd_bounding_box\n os.system(cmd_crop)\n\n def crop_files(self, number_subjects=_ALL_SUBJECTS):\n \"\"\"Crop nii files\n\n The programm loops over all subjects from the input (source) directory.\n\n Args:\n number_subjects: integer giving the number of subjects to analyze,\n by default it is set to _ALL_SUBJECTS (-1).\n \"\"\"\n\n if number_subjects:\n\n # subjects are detected as the directory names under src_dir\n list_all_subjects = [dI for dI in os.listdir(self.morphologist_dir)\\\n if os.path.isdir(os.path.join(self.morphologist_dir,dI))]\n\n # Gives the possibility to list only the first number_subjects\n list_subjects = (\n list_all_subjects\n if number_subjects == _ALL_SUBJECTS\n else list_all_subjects[:number_subjects])\n\n # Creates target and cropped directory\n if not os.path.exists(self.tgt_dir):\n os.makedirs(self.tgt_dir)\n if not os.path.exists(self.cropped_dir):\n os.makedirs(self.cropped_dir)\n\n # Writes number of subjects and directory names to json file\n dict_to_add = {'nb_subjects': len(list_subjects),\n 'src_dir': self.src_dir,\n 'transform_dir': self.transform_dir,\n 'bbox_dir': self.bbox_dir,\n 'side': self.side,\n 'interp': self.interp,\n 'list_sulci': self.list_sulci,\n 'bbmin': self.bbmin.tolist(),\n 'bbmax': self.bbmax.tolist(),\n 'tgt_dir': self.tgt_dir,\n 'cropped_dir': self.cropped_dir,\n 'resampling_type': 'AimsApplyTransform' if self.resampling is None else 'Bastien',\n 'out_voxel_size': self.out_voxel_size\n }\n self.json.update(dict_to_add=dict_to_add)\n\n for subject in list_subjects:\n self.crop_one_file(subject)\n\n def dataset_gen_pipe(self, number_subjects=_ALL_SUBJECTS):\n \"\"\"Main API to create pickle files\n\n The programm loops over all subjects from the input (source) directory.\n\n Args:\n number_subjects: integer giving the number of subjects to analyze,\n by default it is set to _ALL_SUBJECTS (-1).\n \"\"\"\n\n self.json.write_general_info()\n\n # Generate cropped files\n if number_subjects:\n self.bbmin, self.bbmax = compute_max_box(sulci_list=self.list_sulci,\n side=self.side,\n talairach_box=False,\n src_dir=self.bbox_dir)\n # Generate cropped files\n self.crop_files(number_subjects=number_subjects)\n # Creation of .pickle file for all subjects\n if number_subjects:\n fetch_data(cropped_dir=self.cropped_dir,\n tgt_dir=self.tgt_dir,\n side=self.side)\n\n\ndef parse_args(argv):\n \"\"\"Function parsing command-line arguments\n\n Args:\n argv: a list containing command line arguments\n\n Returns:\n params: dictionary with keys: src_dir, tgt_dir, nb_subjects, list_sulci\n \"\"\"\n\n # Parse command line arguments\n parser = argparse.ArgumentParser(\n prog='dataset_gen_pipe.py',\n description='Generates cropped and pickle files')\n parser.add_argument(\n \"-s\", \"--src_dir\", type=str, default=_SRC_DIR_DEFAULT,\n help='Source directory where the MRI data lies. '\n 'Default is : ' + _SRC_DIR_DEFAULT)\n parser.add_argument(\n \"-t\", \"--tgt_dir\", type=str, default=_TGT_DIR_DEFAULT,\n help='Target directory where to store the cropped and pickle files. '\n 'Default is : ' + _TGT_DIR_DEFAULT)\n parser.add_argument(\n \"-r\", \"--transform_dir\", type=str, default=_TRANSFORM_DIR_DEFAULT,\n help='Transform directory where transformation files from native '\n 'to Talairach files have been stored. '\n 'Default is : ' + _TRANSFORM_DIR_DEFAULT)\n parser.add_argument(\n \"-b\", \"--bbox_dir\", type=str, default=_BBOX_DIR_DEFAULT,\n help='Bounding box directory where json files containing '\n 'bounding box coordinates have been stored. '\n 'Default is : ' + _BBOX_DIR_DEFAULT)\n parser.add_argument(\n \"-m\", \"--morphologist_dir\", type=str, default=_MORPHOLOGIST_DIR_DEFAULT,\n help='Directory where subjects to be processed are stored')\n parser.add_argument(\n \"-u\", \"--sulcus\", type=str, default=_SULCUS_DEFAULT, nargs='+',\n help='Sulcus name around which we determine the bounding box. '\n 'If there are several sulci, add all sulci '\n 'one after the other. Example: -u sulcus_1 sulcus_2 '\n 'Default is : ' + _SULCUS_DEFAULT)\n parser.add_argument(\n \"-i\", \"--side\", type=str, default=_SIDE_DEFAULT,\n help='Hemisphere side (either L or R). Default is : ' + _SIDE_DEFAULT)\n parser.add_argument(\n \"-n\", \"--nb_subjects\", type=str, default=\"all\",\n help='Number of subjects to take into account, or \\'all\\'. '\n '0 subject is allowed, for debug purpose.'\n 'Default is : all')\n parser.add_argument(\n \"-e\", \"--interp\", type=str, default=_INTERP_DEFAULT,\n help=\"Same interpolation type as for AimsApplyTransform. \"\n \"Type of interpolation used for Volumes: \"\n \"n[earest], l[inear], q[uadratic], c[cubic], quartic, \"\n \"quintic, six[thorder], seven[thorder]. \"\n \"Modes may also be specified as order number: \"\n \"0=nearest, 1=linear...\")\n parser.add_argument(\n \"-p\", \"--resampling\", type=str, default=None,\n help='Method of resampling to perform. '\n 'Type of resampling: '\n 's[ulcus] for Bastien method'\n 'If None, AimsApplyTransform is used.'\n 'Default is : None')\n parser.add_argument(\n \"-v\", \"--out_voxel_size\", type=int, nargs='+', default=_OUT_VOXEL_SIZE,\n help='Voxel size of output images'\n 'Default is : 1 1 1')\n\n params = {}\n\n args = parser.parse_args(argv)\n params['src_dir'] = args.src_dir\n params['tgt_dir'] = args.tgt_dir\n params['bbox_dir'] = args.bbox_dir\n params['transform_dir'] = args.transform_dir\n params['list_sulci'] = args.sulcus # a list of sulci\n params['side'] = args.side\n params['interp'] = args.interp\n params['resampling'] = args.resampling\n params['out_voxel_size'] = tuple(args.out_voxel_size)\n params['morphologist_dir'] = args.morphologist_dir\n\n number_subjects = args.nb_subjects\n\n # Check if nb_subjects is either the string \"all\" or a positive integer\n try:\n if number_subjects == \"all\":\n number_subjects = _ALL_SUBJECTS\n else:\n number_subjects = int(number_subjects)\n if number_subjects < 0:\n raise ValueError\n except ValueError:\n raise ValueError(\n \"number_subjects must be either the string \\\"all\\\" or an integer\")\n params['nb_subjects'] = number_subjects\n\n return params\n\n\ndef dataset_gen_pipe(src_dir=_SRC_DIR_DEFAULT, tgt_dir=_TGT_DIR_DEFAULT,\n transform_dir=_TRANSFORM_DIR_DEFAULT,\n bbox_dir=_BBOX_DIR_DEFAULT,\n morphologist_dir=_MORPHOLOGIST_DIR_DEFAULT,\n side=_SIDE_DEFAULT, list_sulci=_SULCUS_DEFAULT,\n number_subjects=_ALL_SUBJECTS, interp=_INTERP_DEFAULT,\n resampling=_RESAMPLING_DEFAULT,\n out_voxel_size=_OUT_VOXEL_SIZE):\n \"\"\"Main program generating cropped files and corresponding pickle file\n \"\"\"\n\n dataset = DatasetCroppedSkeleton(src_dir=src_dir, tgt_dir=tgt_dir,\n transform_dir=transform_dir,\n bbox_dir=bbox_dir,\n morphologist_dir=morphologist_dir,\n side=side, list_sulci=list_sulci,\n interp=interp, resampling=resampling,\n out_voxel_size=out_voxel_size)\n dataset.dataset_gen_pipe(number_subjects=number_subjects)\n\n\ndef main(argv):\n \"\"\"Reads argument line and creates cropped files and pickle file\n\n Args:\n argv: a list containing command line arguments\n \"\"\"\n\n # This code permits to catch SystemExit with exit code 0\n # such as the one raised when \"--help\" is given as argument\n try:\n # Parsing arguments\n params = parse_args(argv)\n # Actual API\n dataset_gen_pipe(src_dir=params['src_dir'],\n tgt_dir=params['tgt_dir'],\n transform_dir=params['transform_dir'],\n bbox_dir=params['bbox_dir'],\n morphologist_dir=params['morphologist_dir'],\n side=params['side'],\n list_sulci=params['list_sulci'],\n interp=params['interp'],\n number_subjects=params['nb_subjects'],\n resampling=params['resampling'],\n out_voxel_size=params['out_voxel_size'])\n except SystemExit as exc:\n if exc.code != 0:\n six.reraise(*sys.exc_info())\n\n\n######################################################################\n# Main program\n######################################################################\n\nif __name__ == '__main__':\n # This permits to call main also from another python program\n # without having to make system calls\n main(argv=sys.argv[1:])\n","sub_path":"deep_folding/anatomist_tools/dataset_gen_pipe.py","file_name":"dataset_gen_pipe.py","file_ext":"py","file_size_in_byte":19465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"478160551","text":"\"\"\"Support for handling datapane script config\"\"\"\nimport dataclasses as dc\nimport json\nimport os\nimport re\nfrom pathlib import Path\nfrom typing import ClassVar, List, Optional\n\nimport dacite\nimport importlib_resources as ir\nimport jsonschema\nimport stringcase\nimport yaml\n\nfrom datapane.client import DPError\nfrom datapane.client.utils import MissingCloudPackagesError\nfrom datapane.common import SDict, SSDict, log, utf_read_text\n\n# app paths\nDATAPANE_YAML = Path(\"datapane.yaml\")\nPYPROJECT_TOML = Path(\"pyproject.toml\")\nDEFAULT_PY = Path(\"dp_script.py\")\nDEFAULT_IPYNB = Path(\"dp_script.ipynb\")\nre_check_name = re.compile(r\"^[\\S ]+$\")\n\n# TODO - look at other libs\n# - https://lidatong.github.io/dataclasses-json/ (or marshmallow)\n# - https://github.com/edaniszewski/bison\n# - https://pydantic-docs.helpmanual.io/\n\n# TODO\n# - add support to extract elements from inline and pyproject\n# - use datapane.py by default as script\n# - convert notebook (use 1st markdown as docstring?)\n\n\ndef generate_name(postfix: str) -> str:\n dir_name = stringcase.titlecase(os.path.basename(os.getcwd()))\n return f\"{dir_name} {postfix}\"\n\n\n# TODO(obsolete) - not really needed now, can remove in future\ndef validate_name(x: str):\n if re_check_name.match(x) is None:\n raise DPError(f\"'{x}' is not a valid service name\")\n\n\n@dc.dataclass\nclass DatapaneCfg:\n \"\"\"Wrapper around the datapane script config\"\"\"\n\n name: str = \"datapane\"\n # relative path to script\n script: Path = dc.field(default_factory=lambda: DEFAULT_IPYNB if DEFAULT_IPYNB.exists() else DEFAULT_PY)\n config: dc.InitVar[Path] = None\n proj_dir: ClassVar[Path] = None\n\n # run options\n container_image_name: str = \"\"\n # docker_image: Optional[str] = None\n parameters: List[SDict] = dc.field(default_factory=list)\n pre_commands: List[str] = dc.field(default_factory=list)\n post_commands: List[str] = dc.field(default_factory=list)\n\n # environment variables\n env: List[SSDict] = dc.field(default_factory=list)\n # metadata\n description: str = \"Datapane Script\"\n source_url: str = \"\"\n group: Optional[str] = None\n\n # build options\n include: List[str] = dc.field(default_factory=list)\n exclude: List[str] = dc.field(default_factory=list)\n requirements: List[str] = dc.field(default_factory=list)\n\n def __post_init__(self, config: Optional[Path]):\n validate_name(self.name)\n\n # TODO - support running config/script from another dir with abs paths\n # all paths are relative and we're running from the same dir\n if config:\n assert config.parent == self.script.parent == Path(\".\"), \"All files must be in the main project directory\"\n # we must be in the project dir for now\n # TODO - move this logic to create_initial\n self.proj_dir = self.script.resolve(strict=False).parent\n assert os.getcwd() == os.path.abspath(self.proj_dir), \"Please run from source directory\"\n\n # # config and script dir must be in same dir\n # if config:\n # assert config.resolve(strict=True).parent == self.proj_dir, \\\n # \"Config and Script directory must be in same directory\"\n\n # validate config\n if self.parameters:\n config_schema = json.loads(ir.read_text(\"datapane.resources\", \"script_parameter_def.schema.json\"))\n jsonschema.validate(self.parameters, config_schema)\n\n @classmethod\n def create_initial(cls, config_file: Path = None, script: Path = None, **kw) -> \"DatapaneCfg\":\n raw_config = {}\n\n if config_file:\n assert config_file.exists()\n else:\n config_file = DATAPANE_YAML\n\n if config_file.exists():\n # read config from the yaml file\n log.debug(f\"Reading datapane config file at {config_file}\")\n with config_file.open(\"r\") as f:\n raw_config = yaml.safe_load(f)\n elif PYPROJECT_TOML.exists():\n # TODO - implement pyproject parsing\n log.warning(\"pyproject.toml found but not currently supported - ignoring\")\n raw_config = {}\n elif script:\n # we don't have a default config - perhaps in the script file\n # TODO - try read config from source-code\n abs_script = config_file.parent / script\n if script.suffix == \".ipynb\":\n log.debug(\"Converting notebook\")\n mod_code = extract_py_notebook(abs_script)\n else:\n mod_code = utf_read_text(abs_script)\n log.debug(\"Reading config from python script/notebook\")\n log.debug(mod_code)\n\n # overwrite config with command-line options\n if script:\n raw_config.update(script=script)\n raw_config.update(kw)\n readme = config_file.parent / \"README.md\"\n if readme.exists():\n raw_config[\"description\"] = utf_read_text(readme)\n elif \"description\" not in raw_config:\n raw_config[\"description\"] = cls.description\n\n dp_cfg = dacite.from_dict(cls, data=raw_config, config=dacite.Config(cast=[Path]))\n return dp_cfg\n\n @classmethod\n def create(cls, **raw_config) -> \"DatapaneCfg\":\n return dacite.from_dict(cls, data=raw_config, config=dacite.Config(cast=[Path]))\n\n @staticmethod\n def exists() -> bool:\n check_files = [DATAPANE_YAML, PYPROJECT_TOML, DEFAULT_PY, DEFAULT_IPYNB]\n return any(x.exists() for x in check_files)\n\n def to_dict(self) -> SDict:\n d = dc.asdict(self)\n # TODO - make script a getter/setter\n d[\"script\"] = str(d[\"script\"]) # this is hacky - need a better DTO-conversion method\n build_fields = {\"include\", \"exclude\"}\n return {k: v for k, v in d.items() if k not in build_fields}\n\n\ndef extract_py_notebook(in_file: Path) -> str:\n \"\"\"Extract the python code from a given notebook\"\"\"\n try:\n import nbconvert\n import nbformat\n from traitlets.config import Config as TConfig\n except ImportError:\n raise MissingCloudPackagesError()\n\n # we use config for importing\n c = TConfig()\n # c.PythonExporter.preprocessors = []\n # load the notebook\n notebook: nbformat.NotebookNode = nbformat.read(str(in_file), as_version=nbformat.NO_CONVERT)\n # TODO - any preprocessing here\n # convert it\n conv = nbconvert.PythonExporter(config=c)\n (body, resources) = conv.from_notebook_node(notebook)\n # (body, resources) = conv.from_filename(str(in_file))\n # write the notebook\n # writer = nbconvert.writers.FilesWriter()\n # writer.write(body, resources, \"output_notebook\")\n\n # TODO - use nbconvert jinja support once v6 supports custom templates properly\n # postprocess body\n # we need to mock get_ipython as nbconvert doesn't comment it out\n header = \"\"\"# inject get_ipython mock for magic functions\nfrom unittest.mock import Mock\nget_ipython = Mock()\n\"\"\"\n\n script = header + body\n return script\n","sub_path":"src/datapane/client/scripts/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":6977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"142802382","text":"def getSequence(a, b, n, m):\n \n # if a[i] == b[j] then c[i][j] = c[i - 1][j - 1] + 1\n # else z[k] = max(c[i][j - 1], c[i - 1][j])\n \n c = [[0 for i in range(m + 1)] for j in range(n + 1)]\n c[0][0] = 0 if a[0] == b[0] else 0\n \n for i in range(0, n + 1):\n for j in range(0, m + 1):\n \n if i == 0 or j == 0:\n c[i][j] = 0\n elif a[i - 1] == b[j - 1]:\n c[i][j] = c[i - 1][j - 1] + 1\n else:\n c[i][j] = max(c[i][j - 1], c[i - 1][j])\n \n i = n\n j = m\n l = [0]*c[i][j]\n \n while i > 0 and j > 0:\n if a[i - 1] == b[j - 1]:\n l[c[i][j] - 1] = a[i - 1]\n i-=1\n j-=1\n else:\n if c[i - 1][j] > c[i][j - 1]:\n i -= 1\n else:\n j -= 1\n \n return ' '.join(str(e) for e in l)\n\nif __name__ == '__main__':\n f = open(\"./longest-common-subsequent.txt\")\n\n vals = f.readline().rstrip('\\n').split(' ')\n n, m = [int(val) for val in vals]\n \n vals = f.readline().rstrip('\\n').split(' ')\n a = [int(val) for val in vals]\n \n vals = f.readline().rstrip('\\n').split(' ')\n b = [int(val) for val in vals]\n \n \n print(getSequence(a, b, n, m))\n","sub_path":"hackerrank/lcs.py","file_name":"lcs.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"452826277","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 8 17:02:55 2016\n\n@author: Administrator\n\nclass\n\n\"\"\"\n\nclass Student(object):\n '''This is a student class'''\n \n #默认的访问修饰都是public 在属性和方法前面加双下划线'__'即为private 在类的外部无法访问private的成员,可以通过public的方法访问private成员\n #单下划线 '_'代表虽然我可以被访问,但是,请把我视为私有变量,不要随意访问\n city = 'shanghai'\n #__init__方法为构造方法\n #Python中类中的所有方法都必须有一个self参数,调用时无需传入\n def __init__(self, name, score):\n '构造方法初始化数据'\n self.name = name\n self.__score = score\n \n def print_score(self):\n print('%s-->%s : %s' % (self.city, self.name, self.__score))\n \n def get_grade(self):\n if self.__score >= 90 :\n return 'A'\n elif self.__score >= 60:\n return 'B'\n else:\n return 'C'\n \nAmc = Student('Amc', 80)\nAmc.age = 22\nJoker = Student('Joker', 60)\n\nAmc.print_score() \nJoker.print_score()\nprint(Student.__doc__)\nprint(Amc.city)\nprint(Joker.get_grade())\nprint(Amc.age) #支持对实例绑定属性\n#print(Joker.score) #私有属性不允许在类的外部访问\n#print(Joker.age) #'Student' object has no attribute 'age'\n\n\nclass Fish(object):\n def __init__(self):\n self.name = 'amc'\n def say_hello(self):\n print('Hello %s' % self.name)\nclass Goldfish(Fish):\n pass\n \nclass Shark(Fish):\n def __init__(self):\n #防止子类的同名构造函数覆盖父类的构造方法,要传入self参数\n #Fish.__init__(self)\n #super()方法也有同样的效果且不用传入self参数,推荐使用耦合性更低\n #只需调用不想被覆盖的方法即可\n super().__init__()\n self.color = 'white'\n \ngoldfish = Goldfish()\ngoldfish.say_hello()\n\nshark = Shark()\nshark.say_hello()","sub_path":"class_demo.py","file_name":"class_demo.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"515076018","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 23 11:52:43 2018\n\n@author: bramv\n\"\"\"\nimport os\nimport numpy as np\nimport pyproj\nimport pickle\nimport matplotlib.pyplot as plt\n\n\n\n\nR_earth = 6370e3\n\n\nclass Reproject():\n def __init__(self, proj_input, proj_output):\n self.data_mapping_info_filename = './Exported_files/data_mapping_info_'+proj_input.srs+'_'+proj_output.srs+'.pkl'\n os.makedirs(os.path.dirname(self.data_mapping_info_filename), exist_ok = True)\n \n self.data_mapping_info_parameters = ['input_metadata', 'output_metadata', 'output_ybounds', 'output_xbounds', 'output_dy', 'output_dx', 'output_shape','output_grid', 'inside_input_grid', 'input_rows', 'input_columns','inside_output_grid', 'output_rows', 'output_columns']\n for j in self.data_mapping_info_parameters:\n exec('self.' + j + ' = {}')\n \n self.proj_output = proj_output\n self.proj_input = proj_input\n \n self.import_data_mapping_info()\n \n \n \n def reproject(self, input_data, input_metadata, output_metadata, method = '2-way'):\n \"\"\"\n Input:\n - data should be a 2D array that contains the input data.\n - input_metadata should be a dictionary of the form \n {'input_grid': [2D array with x coordinates of grid cells, 2D array with y coordinates of grid cells], \n 'dx': Grid cell width in x direction, 'dy': Grid cell width in y direction}.\n - output_metadata should be a dictionary in which either 'output_grid' or 'output_dx' and 'output_dy' are specified.\n If output_grid is specified, then it should have be of the form [2D array with x coordinates, 2D array with y coordinates].\n If output_dx and output_dy are specified, then these should be numbers that represent the grid cell width of the output grid in both the\n x and y direction.\n - method: Either '1-way' or '2-way'. If '1-way', then each grid cell in the output grid gets assigned the value of the nearest grid cell in the input grid.\n If '2-way', then in addition to the '1-way' step there is also a second step in which each input grid cell is mapped onto the nearest output grid cell, and \n the output grid cell gets assigned the average value of all input grid cells that are mapped onto it. \n \n Output:\n - Array with reprojected data\n - x boundaries of the output grid\n - y boundaries of the output grid\n \"\"\"\n self.input_data = input_data\n self.method = method\n \n #First check whether there is a need to update parameters that contain info about the data mappings\n input_metadata['nrows'] = self.input_data.shape[0]\n input_metadata['ncols'] = self.input_data.shape[1]\n proj_changed = (list(input_metadata) != list(self.input_metadata) or\\\n any([input_metadata[i] != self.input_metadata[i] if not isinstance(input_metadata[i] != self.input_metadata[i], np.ndarray) else\\\n (input_metadata[i] != self.input_metadata[i]).any() for i in input_metadata])) or\\\n (list(output_metadata) != list(self.output_metadata) or\\\n any([output_metadata[i] != self.output_metadata[i] if not isinstance(output_metadata[i] != self.output_metadata[i], np.ndarray) else\\\n (output_metadata[i] != self.output_metadata[i]).any() for i in output_metadata]))\n \n \n self.input_metadata = input_metadata.copy() \n self.output_metadata = output_metadata.copy()\n \n if proj_changed:\n self.input_grid = input_metadata['input_grid']\n self.inputgrid_onto_outputgrid = self.map_inputgrid_onto_outputgrid()\n \n \n if not 'output_grid' in self.output_metadata:\n self.output_dx, self.output_dy = self.input_metadata['output_dx'], self.input_metadata['output_dy']\n if proj_changed:\n self.output_xbounds, self.output_ybounds, self.output_shape = self.get_output_grid_info()\n self.output_grid = self.get_output_grid()\n else:\n self.output_grid = self.output_metadata['output_grid']\n self.output_shape = self.output_grid[0].shape\n self.output_dx, self.output_dy = self.output_grid[0][0, 1] - self.output_grid[0][0, 0], self.output_grid[1][0, 0] - self.output_grid[1][1, 0]\n self.output_xbounds = np.array([np.floor(self.output_grid[0].min() / self.output_dx) * self.output_dx, np.ceil(self.output_grid[0].max() / self.output_dx) * self.output_dx])\n self.output_ybounds = np.array([np.floor(self.output_grid[1].min() / self.output_dy) * self.output_dy, np.ceil(self.output_grid[1].max() / self.output_dy) * self.output_dy])\n \n \n output_data = np.zeros(self.output_shape, dtype = 'float32')\n \n if proj_changed:\n #Obtain information about the required data mappings\n self.inside_output_grid, self.output_rows, self.output_columns = self.map_inputgridcells_onto_outputgridcells()\n self.inside_input_grid, self.input_rows, self.input_columns = self.map_outputgridcells_onto_inputgridcells()\n \n #Map the input data onto the output grid\n output_data = self.map_inputdata_onto_outputgrid()\n \n if proj_changed:\n self.save_data_mapping_info()\n \n return output_data, self.output_xbounds, self.output_ybounds\n \n \n \n def import_data_mapping_info(self):\n \"\"\"\n Import pickled parameters that contain info about the output grid and the data mappings\n \"\"\"\n try:\n with open(self.data_mapping_info_filename, 'rb') as f:\n data_mapping_info = pickle.load(f)\n for j in self.data_mapping_info_parameters:\n exec('self.'+j+' = data_mapping_info[j]')\n except Exception:\n self.input_metadata = {} #Indicate that the data mapping has to be performed\n \n def save_data_mapping_info(self):\n \"\"\"\n Save parameters that contain info about the output grid and the data mappings into a pickle file.\n \"\"\"\n data_mapping_info = {}\n for j in self.data_mapping_info_parameters:\n data_mapping_info[j] = eval('self.'+j) #Doing this with 1 line doesn't work\n with open(self.data_mapping_info_filename, 'wb') as f:\n pickle.dump(data_mapping_info, f)\n \n \n \n def map_inputgrid_onto_outputgrid(self):\n \"\"\"\n Maps the given input grid onto the output grid.\n \"\"\"\n return np.array(pyproj.transform(self.proj_input, self.proj_output, self.input_grid[0], self.input_grid[1]))\n\n \n def get_output_grid_info(self):\n \"\"\"\n Choose the boundaries such that the input data just fits in the rectangular domain.\n Also determines the required shape of the output grid.\n \"\"\" \n output_ybounds = np.array([np.floor(min([j[1].min() for j in self.inputgrid_onto_outputgrid.values()]) / self.output_dy) * self.output_dy, \n np.ceil(max([j[1].max() for j in self.inputgrid_onto_outputgrid.values()]) / self.output_dy) * self.output_dy])\n output_xbounds = np.array([np.floor(min([j[0].min() for j in self.inputgrid_onto_outputgrid.values()]) / self.output_dx) * self.output_dx, \n np.ceil(max([j[0].max() for j in self.inputgrid_onto_outputgrid.values()]) / self.output_dx) * self.output_dx])\n y_range, x_range = np.diff(output_ybounds)[0], np.diff(output_xbounds)[0]\n \n output_shape = np.array([y_range / self.output_dy, x_range / self.output_dx]).astype('int')\n return output_xbounds, output_ybounds, output_shape\n\n def get_output_grid(self):\n \"\"\"\n Returns the x and y coordinates of the grid centers of the output grid. In the returned array, the first element\n contains a 2D array with all x coordinates, and the second element a 2D array with all y coordinates.\n \"\"\"\n return np.array(np.meshgrid(0.5*self.output_dx + np.arange(self.output_xbounds[0], self.output_xbounds[1], self.output_dx), 0.5*self.output_dy + np.arange(self.output_ybounds[0], self.output_ybounds[1], self.output_dy)))\n \n def map_outputgrid_onto_inputgrid(self):\n \"\"\"\n Performs the inverse of the map given by map_inputgrid_onto_outputgrid.\n \"\"\"\n return np.array(pyproj.transform(self.proj_output, self.proj_input, self.output_grid[0], self.output_grid[1])) \n \n def map_inputgridcells_onto_outputgridcells(self):\n \"\"\"\n Maps input grid cells onto the output grid cells, and returns the corresponding row and column numbers.\n \"\"\"\n output_rows = np.floor((self.output_ybounds[1] - self.inputgrid_onto_outputgrid[1]) / self.output_dy).astype('int')\n output_columns = np.floor((self.inputgrid_onto_outputgrid[0] - self.output_xbounds[0]) / self.output_dx).astype('int')\n inside_output_grid = (output_rows >= 0) & (output_rows < self.output_shape[0]) &\\\n (output_columns >= 0) & (output_columns < self.output_shape[1])\n output_rows, output_columns = output_rows[inside_output_grid], output_columns[inside_output_grid]\n return inside_output_grid, output_rows, output_columns\n \n def map_outputgridcells_onto_inputgridcells(self):\n \"\"\"\n Maps output grid cells onto the input grid cells, and returns the corresponding row and column numbers. \n \"\"\"\n dx, dy = self.input_metadata['dx'], self.input_metadata['dy']\n \n outputgrid_onto_inputgrid = self.map_outputgrid_onto_inputgrid()\n \n input_rows = np.floor((self.input_grid[1].max() - outputgrid_onto_inputgrid[1]) / dy).astype('int')\n input_columns = np.floor((outputgrid_onto_inputgrid[0] - self.input_grid[0].min()) / dx).astype('int')\n \n #Inside input domain:\n inside_input_grid = (input_rows >= 0) & (input_rows < self.input_metadata['nrows']) &\\\n (input_columns >= 0) & (input_columns < self.input_metadata['ncols']) \n input_rows, input_columns = input_rows[inside_input_grid], input_columns[inside_input_grid]\n return inside_input_grid, input_rows, input_columns\n \n \n def map_inputdata_onto_outputgrid(self):\n \"\"\"\n Maps input data onto the output grid.\n\n Step 1 is to fill every grid cell in the output grid with the nearest input grid cell, if available. \n This uses the map from the output grid onto the input grid\n \"\"\"\n output_data = np.zeros(self.output_shape[0] * self.output_shape[1], dtype = 'float32')\n output_data[self.inside_input_grid.flatten()] = self.input_data[(tuple(self.input_rows), tuple(self.input_columns))].flatten()\n\n if self.method == '2-way':\n \"\"\"\n There are however grid cells in the output grid onto which the inverse map from the input grid onto the output grid maps\n multiple input grid cells. \n Step 2 is therefore to fill these grid cells in the output grid with the average data value of all input grid cells that get\n mapped onto it.\n \"\"\"\n #output_data_indices contains the indices of the elements in output_data onto which the input data gets mapped\n output_data_indices = (self.output_rows * self.output_shape[1] + self.output_columns).flatten()\n output_data_indices_init = output_data_indices.copy()\n \n data_flat = self.input_data[self.inside_output_grid]\n #data_flat_indices is used to identify which elements in data_flat have alreaoutput_dy been taken into account in the loop below\n data_flat_indices = np.arange(data_flat.shape[0]).astype('int')\n \n output_data[output_data_indices] = 0.\n counts = np.zeros(output_data.shape, dtype = 'float') #Number of input grid cells that get mapped onto a single output grid cell\n data_nottreated = np.array([1]) #Initialization\n while np.count_nonzero(data_nottreated) > 0:\n output_data[output_data_indices] += data_flat\n counts[output_data_indices] += 1\n \n indices_treated = -1 + np.zeros(output_data.shape, dtype = 'int')\n indices_treated[output_data_indices] = data_flat_indices\n data_nottreated = np.ones(data_flat.shape, dtype = 'bool')\n data_nottreated[indices_treated[indices_treated != -1]] = 0\n \n output_data_indices = output_data_indices[data_nottreated]\n data_flat = data_flat[data_nottreated]\n data_flat_indices = data_flat_indices[:len(data_flat)]\n output_data[output_data_indices_init] = output_data[output_data_indices_init] / counts[output_data_indices_init]\n \n return output_data.reshape(self.output_shape)","sub_path":"Satellite/reproject_data.py","file_name":"reproject_data.py","file_ext":"py","file_size_in_byte":13022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"369025359","text":"#!/usr/bin/env python\n# -- BEGIN LICENSE BLOCK ----------------------------------------------\n# Copyright (c) 2019, FZI Forschungszentrum Informatik\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted\n# provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions\n# and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of\n# conditions and the following disclaimer in the documentation and/or other materials provided\n# with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its contributors may be used to\n# endorse or promote products derived from this software without specific prior written\n# permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR\n# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND\n# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY\n# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# -- END LICENSE BLOCK ------------------------------------------------\n\n\nimport unittest\nimport io\nimport sys\nimport tempfile\n\nfrom catkin_doc.parsers.launchparser import LaunchParser\nimport catkin_doc.datastructures as ds\n\nif sys.version_info[0] == 3:\n StringIO = io.StringIO\nelse:\n StringIO = io.BytesIO\n\n\nclass TestLaunch(unittest.TestCase):\n \"\"\"Test basic functionality of the launchfile parser module\"\"\"\n\n def test_parsing(self):\n source_code = r'''\n \n \n \n \n'''\n\n source_file = tempfile.NamedTemporaryFile()\n source_file.write(source_code.encode(encoding=\"utf-8\", errors=\"strict\"))\n source_file.seek(0)\n parser = LaunchParser(source_file.name, \"/package/root\")\n\n launchfile = parser.launchfile\n\n self.assertEqual(len(launchfile.children[ds.KEYS[\"launch_argument\"]]), 4)\n\n self.assertEqual(launchfile.children[ds.KEYS[\"launch_argument\"]][0].name, \"foo\")\n self.assertEqual(launchfile.children[ds.KEYS[\"launch_argument\"]][0].default_value, \"bar\")\n self.assertEqual(launchfile.children[ds.KEYS[\"launch_argument\"]][0].description, \"foobar\")\n\n self.assertEqual(launchfile.children[ds.KEYS[\"launch_argument\"]][1].name, \"foo1\")\n self.assertEqual(launchfile.children[ds.KEYS[\"launch_argument\"]][1].default_value, \"bar\")\n self.assertEqual(launchfile.children[ds.KEYS[\"launch_argument\"]][1].description, None)\n\n self.assertEqual(launchfile.children[ds.KEYS[\"launch_argument\"]][2].name, \"foo2\")\n self.assertEqual(launchfile.children[ds.KEYS[\"launch_argument\"]][2].default_value, None)\n self.assertEqual(launchfile.children[ds.KEYS[\"launch_argument\"]][2].description, \"foobar\")\n\n self.assertEqual(launchfile.children[ds.KEYS[\"launch_argument\"]][3].name, \"foo3\")\n self.assertEqual(launchfile.children[ds.KEYS[\"launch_argument\"]][3].default_value, None)\n self.assertEqual(launchfile.children[ds.KEYS[\"launch_argument\"]][3].description, None)\n","sub_path":"test/test_launchfile.py","file_name":"test_launchfile.py","file_ext":"py","file_size_in_byte":3758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"326428144","text":"#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n\r\ni = 12\r\nstr = 'http://www.baidu.com/index={0}'.format(i)\r\nprint(str)\r\n\r\nstr = str + '/123/'\r\nprint(str)\r\n\r\nimport time\r\n\r\nprint(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())) + ' 时间')\r\n\r\n'''\r\nfrom fake_useragent import UserAgent\r\nua = UserAgent()\r\nprint(ua.random)\r\nprint(ua.random)\r\n'''\r\n\r\nfor i in range(6,31,6):\r\n print(i)\r\n\r\nl1 = [1,2,3,4,5]\r\nl2 = ['a', 'b', 'c', 'd', 'e']\r\n\r\nfor l in l2:\r\n if l is 'c':\r\n print(l1[l2.index(l)])\r\nfor i in range(len(l2)):\r\n if l2[i] is 'c':\r\n print(l1[i])\r\n\r\n\r\nzipper = zip(l1, l2)\r\nprint(zipper)\r\nfor ite in zipper:\r\n print(ite)\r\n\r\n i1 = ite[1]\r\n print(i1)\r\n\r\ndict1 = { 'abc': 1, 'bbb' : 1}\r\nif dict1.get('abc') == 1:\r\n print('has abc')\r\n print(dict1)\r\nif dict1.get('mm') != 1:\r\n print('has not mm')\r\n print(dict1)\r\n dict1['mm'] = 12\r\n print(dict1)\r\n\r\ndef toString(c):\r\n return c*2\r\nl3 = map(toString, l1)\r\nprint(list(l3))\r\n\r\nsx = 'hello'\r\nprint(len(sx))\r\nsx2 = ''\r\nprint(len(sx2))\r\n\r\nwith open('error.txt', 'a', encoding='utf-8') as f:\r\n f.write('hello')\r\n f.write('\\r\\n')","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"619571982","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\"\"\"\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\n\nimport logging\nfrom time import sleep\n\nimport zmq\nfrom zmq.utils.monitor import recv_monitor_message\n\nfrom ..router import Router\nfrom ..constants import MESSAGE_TYPE_REQUEST, MESSAGE_TYPE_REPLY, TRACE_FUNC, LOOP_CONTINUE, MESSAGE_TYPE_ROUTE, \\\n TRACE_NET, STABILIZE_TIMEOUT\nfrom ..concerns.manager import ConcernsManager\nfrom p2p0mq.peer_store import PeerStore\nfrom ..security import SecurityManager\nfrom ..utils.thread.koloopthread import KoLoopThread\nfrom .client import Sender\nfrom .server import Receiver\n\nlogger = logging.getLogger('p2p0mq.app')\n\n\nclass LocalPeer(PeerStore, SecurityManager,\n ConcernsManager, Router, KoLoopThread):\n \"\"\"\n Resource management class that exposes a single local peer.\n\n This class can be executed both as a thread and as an\n online worker if current thread has nothing to do.\n When used as a thread call `start()` and it will perform\n the necessary steps. When used online call `run()`.\n\n To see some other router-dealer patterns goto:\n - [http://hintjens.com/blog:42]\n\n Attributes:\n name (str):\n The name of the thread.\n config (dict):\n Configuration options loaded at startup.\n zmq_monitor (zmq.Poller):\n Brings together all monitoring sockets.\n zmq_context (zmq.Context):\n The zmq resource management structure.\n receiver (Receiver):\n The server of the local peer. It runs on a separate thread\n and has a distinct web address.\n sender (Sender):\n The client used by local peer. It runs on a separate thread\n and has a distinct web address.\n \"\"\"\n def __init__(self, config,\n sender_address='127.0.0.1', sender_port=8341,\n receiver_address='127.0.0.1', receiver_port=8342,\n zmq_context=None,\n zmq_monitor=False,\n *args, **kwargs):\n \"\"\"\n Constructor.\n\n Attributes:\n sender_address (str):\n The address where the sender should use.\n sender_port (int):\n The port where the sender should use.\n receiver_address (str):\n The address where the receiver should use.\n receiver_port (int):\n The port where the receiver should use.\n zmq_context (zmq.Context):\n The zmq resource management structure. If not provided the\n application uses the global context.\n zmq_monitor (bool):\n Should we monitor the sockets and log debug information\n about them? If `True` the constructor will create a poller in\n `zmq_monitor` attribute.\n \"\"\"\n super(LocalPeer, self).__init__(*args, **kwargs)\n self.name = 'p2p0mq.A.th' if self.uuid is None \\\n else ('%s-p2p0mq.A.th' % self.uuid[-4:-1].decode(\"utf-8\"))\n self.config = config\n self.zmq_monitor = zmq.Poller() if zmq_monitor else None\n self.zmq_context = zmq_context or zmq.Context.instance()\n self.receiver = Receiver(\n app=self, context=self.zmq_context,\n bind_address=receiver_address, bind_port=receiver_port)\n self.sender = Sender(\n app=self, context=self.zmq_context,\n bind_address=sender_address, bind_port=sender_port)\n logger.debug('application constructed')\n\n def create(self):\n \"\"\" Starts the local peer. \"\"\"\n logger.debug(\"The local peer %r is being created...\", self.uuid)\n self.start_db()\n self.prepare_cert_store(self.uuid)\n self.start_auth(self.zmq_context)\n self.start_concerns()\n self.receiver.start()\n self.sender.start()\n logger.debug(\"The local peer %r was created\", self.uuid)\n\n def terminate(self):\n \"\"\" Terminates the local peer.\n\n This method is not to be called directly by the user of the class.\n\n This method should be written defensively, as the environment\n might not be fully set (an exception in create() does not prevent\n this method from being executed).\n \"\"\"\n logger.debug(\"The local peer %r is being terminated...\", self.uuid)\n\n if self.receiver is not None:\n self.receiver.stop.set()\n\n if self.sender is not None:\n self.sender.stop.set()\n\n sleep(0.5)\n\n if self.receiver is not None:\n self.receiver.join()\n assert self.receiver.socket is None\n\n if self.sender is not None:\n self.sender.join()\n assert self.sender.socket is None\n\n self.terminate_concerns()\n self.terminate_auth()\n self.terminate_db()\n logger.debug(\"The local peers %r was terminated\", self.uuid)\n\n def execute(self):\n \"\"\" A single step in the local peer loop. \"\"\"\n logger.log(TRACE_FUNC, \"Application %r starts execute()\", self.uuid)\n self.sync_database()\n\n self.execute_concerns()\n\n replies = self.process_requests(\n self.receiver.typed_queues[MESSAGE_TYPE_REQUEST])\n requests = self.process_replies(\n self.receiver.typed_queues[MESSAGE_TYPE_REPLY])\n routed = self.process_routes(\n self.receiver.typed_queues[MESSAGE_TYPE_ROUTE])\n\n self.sender.enqueue_all(replies, requests, routed)\n logger.log(TRACE_FUNC, \"Application %r ends execute\", self.uuid)\n\n self.monitor()\n return LOOP_CONTINUE\n\n def monitor(self):\n \"\"\" Called on each loop to do monitoring tasks.\"\"\"\n if not self.zmq_monitor:\n return\n\n if len(self.zmq_monitor.sockets) == 0:\n if self.receiver is not None and self.receiver.socket is not None:\n if self.sender is not None and self.sender.socket is not None:\n self.zmq_monitor.register(\n self.receiver.socket.get_monitor_socket())\n self.zmq_monitor.register(\n self.sender.socket.get_monitor_socket())\n event_map = {}\n setattr(self, 'event_map', event_map)\n # print(\"Event names:\")\n for name in dir(zmq):\n if name.startswith('EVENT_'):\n value = getattr(zmq, name)\n # print(\"%21s : %4i\" % (name, value))\n event_map[value] = name\n\n event_map = getattr(self, 'event_map')\n socket_rec = self.zmq_monitor.sockets[0]\n socket_se = self.zmq_monitor.sockets[1]\n socks = dict(self.zmq_monitor.poll(100))\n if socket_rec in socks and socks[socket_rec] & zmq.POLLIN:\n message = recv_monitor_message(socket_rec)\n message.update({'description': event_map[message['event']]})\n logger.log(TRACE_NET, \"RECEIVER: %r\", message)\n if socket_se in socks and socks[socket_se] & zmq.POLLIN:\n message = recv_monitor_message(socket_se)\n message.update({'description': event_map[message['event']]})\n logger.log(TRACE_NET, \"SENDER: %r\", message)\n\n def stable(self):\n \"\"\" Tell if the local peer is started. \"\"\"\n partial = (\n self.receiver is not None and\n self.sender is not None and\n self.run_loop_counter > 4 and\n self.receiver.run_loop_counter > 4 and\n self.sender.run_loop_counter > 4 and\n self.next_peer_db_sync_time > self.tick\n )\n if partial and not self.no_encryption:\n partial = self.auth_thread is not None\n return partial\n\n def wait_to_stabilize(self):\n \"\"\" Wait for the local peer to became stable. \"\"\"\n for i in range(STABILIZE_TIMEOUT*2):\n if self.stable():\n return True\n sleep(0.5)\n return False\n\n","sub_path":"p2p0mq/app/local_peer.py","file_name":"local_peer.py","file_ext":"py","file_size_in_byte":8034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"349213599","text":"import pandas as pd\nfrom numpy import mean\n\nimport numpy as np\nfrom sklearn.metrics import *\nfrom sklearn.model_selection import KFold, ShuffleSplit\nfrom sklearn import tree\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom scipy.stats import chisquare\nfrom sklearn.svm import SVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom xgboost import XGBClassifier\nfrom sklearn.model_selection import train_test_split\nimport time\n\n\ndef get_acc_auc_kfold(X,Y,model,k=3):\n accuracy = []\n auc = []\n #model = tree.DecisionTreeClassifier(max_depth=5)\n #model = RandomForestClassifier(max_depth=5)\n #model = LogisticRegression()\n #model = KNeighborsClassifier(n_neighbors=3)\n kf = KFold(n_splits=k)\n Yarray = np.ravel(Y)\n \n for train_index, test_index in kf.split(X):\n \n model.fit(X.iloc[train_index],Yarray[train_index])\n predict = model.predict(X.iloc[test_index])\n accuracy.append(accuracy_score(Yarray[test_index],predict))\n auc.append(roc_auc_score(Yarray[test_index],predict))\n \n acc_avg=mean(accuracy)\n auc_avg=mean(auc)\n \n return acc_avg, auc_avg\n\ndef get_acc_auc_randomisedCV(X,Y,model,iterNo=3,test_percent=0.35):\n\t#TODO: First get the train indices and test indices for each iteration\n\t#Then train the classifier accordingly\n\t#Report the mean accuracy and mean auc of all the iterations\n \n accuracy = []\n auc = []\n \n Yarray = np.ravel(Y)\n #model = KNeighborsClassifier(n_neighbors=3)\n rs = ShuffleSplit(n_splits=iterNo, test_size=test_percent)\n for train_index, test_index in rs.split(X):\n model.fit(X.iloc[train_index],Yarray[train_index])\n predict = model.predict(X.iloc[test_index])\n accuracy.append(accuracy_score(Yarray[test_index],predict))\n auc.append(roc_auc_score(Yarray[test_index],predict))\n #result = chisquare(predict,Yarray[test_index])\n \n acc_avg=mean(accuracy)\n auc_avg=mean(auc)\n \n \n return acc_avg, auc_avg\n\n\ndef get_acc_auc_test_train(X,Y,model,iterNo=8,test_percent=0.80):\n\t#TODO: First get the train indices and test indices for each iteration\n\t#Then train the classifier accordingly\n\t#Report the mean accuracy and mean auc of all the iterations\n \n Yarray = np.ravel(Y)\n \n X_train, X_test, Y_train, Y_test = train_test_split(X, Yarray, test_size=0.20, random_state = 0)\n model.fit(X_train,Y_train)\n prediction = model.predict(X_test)\n\n acc= accuracy_score(Y_test,prediction)\n auc= roc_auc_score(Y_test,prediction)\n \n \n acc= accuracy_score(Y_test,prediction)\n auc= roc_auc_score(Y_test,prediction)\n precision = precision_score(Y_test, prediction)\n recall = recall_score(Y_test,prediction)\n f1 = f1_score(Y_test,prediction)\n \n \n return acc, auc, precision, recall, f1\n\n\n\ndef seperate_variables(df):\n X = df.loc[:,'radius_mean':'fractal_dimension_worst']\n\n Y = df[['diagnosis']]\n \n return X,Y\n\ndef test_model(X,Y,model):\n start = time.time()\n acc_k, auc_k= get_acc_auc_kfold(X,Y,model)\n print((\"Average Accuracy in KFold CV: \"+str(acc_k)))\n print((\"Average AUC in KFold CV: \"+str(auc_k)))\n \n acc_r,auc_r = get_acc_auc_randomisedCV(X,Y,model)\n print((\"Average Accuracy in Randomised CV: \"+str(acc_r)))\n print((\"Average AUC in Randomised CV: \"+str(auc_r)))\n print('\\n')\n \n acc_l, auc_l, precision, recall, f1 = get_acc_auc_test_train(X, Y, model)\n print((\"Accuracy in Test-Train: \"+str(acc_l)))\n print((\"AUC in Test-Train\"+str(auc_l)))\n print((\"Precision in Test-Train\") +str(precision))\n print((\"Recall in Test-Train\") +str(recall))\n print((\"f1 in Test-Train\")+str(f1))\n end = time.time()\n print(\"Time to run tests: \" + str(end - start))\n \n return acc_k,auc_k, acc_r,auc_r, acc_l,auc_l\n \n\n\n\ndef main():\n df = pd.read_csv(\"./data/breast-cancer/data.csv\")\n \n df['diagnosis']=pd.Categorical(df['diagnosis'])\n df['diagnosis'] = df['diagnosis'].cat.codes\n \n \n \n normallized = df.loc[:,'diagnosis':]/df.loc[:,'diagnosis':].max()\n testResults = pd.DataFrame(columns = ['acc_kfolds','auc_kfolds','acc_rand','auc_rand','acc_test','auc_test'])\n \n X, Y = seperate_variables(normallized)\n print(\"percentage of positive diagnosis:\" + str(((Y.loc[Y['diagnosis']==1]).shape[0])/Y.shape[0]))\n \n #model = KNeighborsClassifier(n_neighbors=3)\n model = SVC(C = 8)\n print(\"Testing SVM rbf\")\n result = test_model(X,Y,model)\n testResults.loc['SVMrbf']=result\n print(\"\\n\")\n \n model = SVC(C = 15, kernel = 'linear')\n print(\"Testing SVM linear\")\n result = test_model(X,Y,model)\n testResults.loc['SVMlin']=result\n print(\"\\n\")\n \n model = AdaBoostClassifier(base_estimator=DecisionTreeClassifier(max_depth=5))\n print(\"AdaBoost Tree\")\n result = test_model(X,Y,model)\n testResults.loc['AdaBoost']=result\n print(\"\\n\")\n \n model = RandomForestClassifier(max_depth=5)\n print(\"Testing Random Forest Tree\")\n result = test_model(X,Y,model)\n testResults.loc['RandomForest']=result\n print(\"\\n\")\n \n model = XGBClassifier(learning_rate=0.01, n_estimators=25, max_depth=10,gamma=0.51)\n print(\"Testing XGB Tree\")\n result = test_model(X,Y,model)\n testResults.loc['XGBTree']=result\n print(\"\\n\")\n \n \n #model = MLPClassifier(solver = 'lbfgs',learning_rate='adaptive',alpha = .005)\n model = MLPClassifier(solver = 'lbfgs', learning_rate='adaptive')\n print(\"Testing Neural Network\")\n result = test_model(X,Y,model)\n testResults.loc['NeuralNW']=result\n print(\"\\n\")\n \n \n model = KNeighborsClassifier(n_neighbors=5)\n print(\"Testing K nearest Neighbors\")\n result = test_model(X,Y,model)\n testResults.loc['KNN']=result\n print(\"\\n\")\n \n model = LogisticRegression(C=50)\n print(\"Testing Logistic Regression\")\n result = test_model(X,Y,model)\n testResults.loc['LogReg']=result\n print(\"\\n\")\n \n testResults.plot.bar(ylim=(.80,1),figsize=(24,24),fontsize=26)\n \n \n \n \n \n \n \n \nif __name__ == \"__main__\":\n\tmain()\n ","sub_path":"hw1/MLHW1/breast_cancer.py","file_name":"breast_cancer.py","file_ext":"py","file_size_in_byte":6294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"398049494","text":"import requests as req\nfrom urllib.parse import urlencode\nfrom os.path import exists, isdir\nfrom os import makedirs\nimport simplejson as json\nfrom time import sleep\n\n\nmagnitudes = {\n # \"SO2\": 1,\n # \"NO\": 7,\n # \"NO2\": 8,\n # \"NOx\": 12,\n # \"O3\": 14,\n # \"CO\": 6,\n \"PM10\": 10\n}\n\nprovinces = {\n \"Barcelona\": 8\n}\n \n\ndef request (magnitude, province, year, month, day):\n url_template = \"https://analisi.transparenciacatalunya.cat/resource/uy6k-2s8r.geojson?\"\n query_params = {\n \"dia\": day,\n \"mes\": month,\n \"any\": year,\n \"magnitud\": magnitude,\n \"provincia\": province\n }\n \n try:\n res = req.get(url_template+urlencode(query_params))\n return res.json()\n except Exception as e:\n print(e)\n sleep(30)\n request(magnitude, province, year, month, day)\n \n \ndef write (magnitude, province, year, month, day, points):\n file_name = 'data'\n if not exists(file_name):\n makedirs(file_name)\n\n file_name = file_name + '/vector'\n if not exists(file_name):\n makedirs(file_name)\n \n file_name = file_name + '/' + str(magnitude)\n if not exists(file_name):\n makedirs(file_name)\n \n file_name = file_name + '/' + str(province)\n if not exists(file_name):\n makedirs(file_name)\n \n file_name = file_name + '/' + str(year) + '-' + str(month) + '-' + str(day)\n with open(file_name + '.geojson', 'w') as file:\n print('writing file ' + file_name + '.geojson')\n json.dump(points, file)\n \n \ndef run ():\n year = 2018\n months = [i+1 for i in range(12)]\n days = [i+1 for i in range(31)]\n \n for province in provinces.values():\n for magnitude in magnitudes.values():\n for month in months:\n for day in days:\n points = request(magnitude, province, year, month, day)\n write(magnitude, province, year, month, day, points)\n \n \nif __name__ == '__main__':\n run()\n\n","sub_path":"scripts/py/request_icqa.py","file_name":"request_icqa.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"416653173","text":"# 2018/02/08 修改表tmp_shool_major\ndef get_item(itemClass):\n item = itemClass()\n item[\"housing_type\"] = \"\"\n item[\"available_time\"] = \"\"\n item[\"house_name\"] = \"\"\n item[\"room_type\"] = \"\"\n item[\"car_spaces\"] = \"\"\n item[\"lease\"] = \"\"\n item[\"address\"] = \"\"\n item[\"detaile_address\"] = \"\"\n item[\"supporting_facilities\"] = \"\"\n item[\"price\"] = \"\"\n item[\"isRent\"] = \"1\"\n item[\"postal_code\"] = \"\"\n item[\"picture\"] = \"\"\n item[\"housing_introduce\"] = \"\"\n item[\"supplier_type\"] = \"\"\n item[\"supplier_name\"] = \"\"\n item[\"supplier_logo\"] = \"\"\n item[\"country\"] = \"\"\n item[\"city\"] = \"\"\n item[\"contact_name\"] = \"\"\n item[\"contact_phone\"] = \"\"\n item[\"contact_email\"] = \"\"\n item[\"url\"] = \"\"\n item[\"status\"] = \"2\"\n item[\"deposit\"] = \"\"\n item[\"area\"] = \"\"\n item[\"floor\"] = \"\"\n item[\"price_include\"] = \"\"\n return item\n","sub_path":"yyx_crawler/scrapymodule_Rent/scrapymodule_Rent/getItem.py","file_name":"getItem.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"144873534","text":"from flask import Flask, render_template\nimport paho.mqtt.client as mqtt\nfrom flask_socketio import SocketIO, emit\nimport datetime\nimport concurrent.futures\n#_____________1. init app_______________\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secret!'\nsocketio = SocketIO(app)\nstudentnumber = 0\n\n#___________2. Routers________________\n\n#@app.rout('/help')\n#def help():\n# return render_template('help.html') \n\n\n\n@app.route('/')\ndef index():\n print(\"here\")\n now = datetime.datetime.now()\n timeString = now.strftime(\"%Y-%m-%d %H:%M\")\n templateData = {\n 'time':timeString\n }\n global studentnumber\n print(studentnumber)\n return render_template('index.html',students = studentnumber,**templateData )\n\n@socketio.on('my event', namespace='/test')\ndef test_message(message):\n emit('response', {'studentnumber': message[studentnumber]})\n \n@socketio.on('connect', namespace='/test')\ndef test_connect():\n emit('my response', {'studentnumber': studentnumber})\n \n@socketio.on('disconnect', namespace='/test')\ndef test_disconnect():\n print('Client disconnected')\n \ndef on_connect(client,userdata,flag,rc): \n print(\"Connected with result code\"+str(rc))\n\n client.subscribe(\"#\")\n\ndef on_message(client,userdata,msg):\n global studentnumber\n print(msg.topic + \" \\n \" + msg.payload.decode(\"utf-8\") + \"\\n\")\n studentnumber=msg.payload.decode(\"utf-8\")\n socketio.emit('response', {'studentnumber': studentnumber}, namespace='/test')\n\n\n@app.route('/get_data', methods=['GET'])\ndef get_data():\n print(\"here\")\n global studentnumber\n print(studentnumber)\n return jsonify(students = studentnumber)\n \ndef mqttloop(client):\n client.loop_forever()\n\n#__________3.start server_____________________\nif __name__== '__main__':\n client = mqtt.Client()\n client.on_connect = on_connect\n client.on_message = on_message\n\n client.connect(\"127.0.0.1\",1883,60)\n executor = concurrent.futures.ThreadPoolExecutor(max_workers=3)\n executor.submit(mqttloop, client)\n print(\"Active!!!\")\n socketio.run(app, debug=True, host='0.0.0.0',)\n \n","sub_path":"project/website/myapp.py","file_name":"myapp.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"182674303","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nSpyder Editor\r\n\r\nThis is a temporary script file.\r\n\"\"\"\r\n\r\n\"\"\"\r\nThis py script will have the functionality to call vba script to clean up the sofi\r\nremit files and attempt to automate the entire process\r\nWithin the sofi macro it can create the 3-column dat file\r\n\"\"\"\r\n\r\nimport sys\r\nimport os\r\nimport xlwings as xw\r\n\r\n#########################################################################################################\r\n# Prompt console for usper input. Follow print func below #\r\n#########################################################################################################\r\ndef user_input():\r\n # Asking user for year and month input\r\n # Retun the as a 4-digit output as year and month, 1701\r\n\r\n while True:\r\n dist_date = input(\"Please enter the distrituion month and year, (example 1701 for January 2017): \")\r\n\r\n try:\r\n if int(dist_date[-2:]) >= 1 and int(dist_date[-2:]) <= 12:\r\n month = int(dist_date[-2:])\r\n year = int(dist_date[0:2])\r\n\r\n if month < 10:\r\n month = \"0{}\".format(str(month))\r\n else:\r\n month = str(month)\r\n\r\n dist_date = \"{}{}\".format(str(year), str(month))\r\n\r\n break\r\n else:\r\n continue\r\n except:\r\n pass\r\n\r\n # return dist_date as a string like 1801 for Jan. 2018\r\n return dist_date\r\n\r\n\r\n#########################################################################################################\r\n# Calling scrub_sof()/SOFI button within the excel #\r\n# pulling numbers that will be input in hist-job row #\r\n# sofi_scrub() will call up the hist-job and paste the data #\r\n#########################################################################################################\r\ndef sofi_scrub():\r\n dist_date = user_input()\r\n # Wilmington Trust path sofi remit files path = 'd:/deals/wt/sofi_servicing_report/'\r\n svcr_path = 'd:/deals/wt/sofi_servicing_report/'\r\n\r\n # May not need this dictionary since there is a dictionary in sofi_scrub() vba\r\n sofi_deals = {\r\n 'SOFI2017-1': 'd:/deals/wt/2017/SOFI/SoFi-1/',\r\n 'SOFI2017-2': 'd:/deals/wt/2017/SOFI/SoFi-2/',\r\n 'SOFI2017-3': 'd:/deals/wt/2017/SOFI/SoFi-3/',\r\n 'SOFI2017-4': 'd:/deals/wt/2017/SOFI/SoFi-4/',\r\n 'SOFI2017-5': 'd:/deals/wt/2017/SOFI/SoFi-5/',\r\n 'SOFI2017-6': 'd:/deals/wt/2017/SOFI/SoFi-6/',\r\n 'SOFI2018-1': 'd:/deals/wt/2018/SOFI/SoFi-1/',\r\n }\r\n\r\n remit_files = os.listdir(\"{}{}/\".format(svcr_path, dist_date))\r\n # FileNotFoundError will pop up if not such dir exist\r\n\r\n # Remove any misc files within the servicing report files\r\n for file in remit_files:\r\n if 'sofi' not in file.lower():\r\n remit_files.pop(remit_files.index(file))\r\n\r\n\r\n # Create a macro that python can call when it opens up the hist-job\r\n wb = xw.Book('d:/execs/ETI SoFi.xlam')\r\n sofi = wb.macro('sofi_scrub')\r\n\r\n # Does not show on the screeen\r\n for file in remit_files:\r\n wb1 = xw.Book(\"{}{}/{}\".format(svcr_path, dist_date, file))\r\n sofi()\r\n wb1.save()\r\n wb1.close()\r\n\r\n wb.close()\r\n # find the deal hist-job and paste the data\r\n# hist_job = []\r\n# for key in sofi_deals.keys():\r\n# if key in file:\r\n# deal_path = sofi_deals[key]\r\n# files = os.listdir(deal_path)\r\n# for file in files:\r\n# if 'HIST-JOB' in file:\r\n# hist_job.append(file)\r\n#\r\n# if hist_job == None:\r\n# print('HIST-JOB cannot be found!!!')\r\n# continue\r\n\r\n# hist_job = \"{}{}\".format(deal_path, hist_job[0])\r\n# wb2 = xw.Book(hist_job)\r\n\r\n\r\n\r\n\r\n\r\n#########################################################################################################\r\n# main func #\r\n#########################################################################################################\r\nif __name__ == '__main__':\r\n sofi_scrub()\r\n sys.exit()\r\n","sub_path":"sofi_remit_scrub.py","file_name":"sofi_remit_scrub.py","file_ext":"py","file_size_in_byte":4477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"262224039","text":"#!/usr/bin/env python3\n\n\"\"\"\nThis is an extension to the argparse ecosystem\n\nPurpose is to enable the creation of CLI options that would behave like\naction=append, but with a check on choices\n\n(*) accumulative : *dest* holds a list of strings (might be extensible to support types)\n(*) restrictive : all elements in the list must be in *choices*\n\nIn practical terms, we want to specify one or several values for a parameter\nthat is itself constrained, like an antenna mask that must be among '1', '3' and '7'\n\nAs of this writing, it is possible to write a code that uses\naction=append, choices=['a', 'b', 'c'] and default=['a', 'b']\nbut then defaults are always returned...\n\"\"\"\n\nimport argparse\n\nclass ListOfChoices(argparse.Action):\n\n \"\"\"\n Use with e.g.\n parser.add_argument(choices=('1', '2', '3', '4'), default=['1', '2'],\n typeaction=ListOfChoices)\n \"\"\"\n\n def __init__(self, *args, **kwds):\n self.result = []\n super().__init__(*args, **kwds)\n\n def __call__(self, parser, namespace, value, option_string=None):\n self.result.append(value)\n setattr(namespace, self.dest, self.result)\n\n#################### unit test\nif __name__ == '__main__':\n def test1():\n \"\"\"\n ListOfChoices micro-test\n \"\"\"\n def new_parser():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"-a\", \"--antenna-mask\", default=['1', '1'],\n choices=['1', '3', '7', '11'],\n action=ListOfChoices,\n help=\"specify antenna mask for each node\")\n return parser\n\n print(new_parser().parse_args([]))\n print(new_parser().parse_args(['-a', '1']))\n print(new_parser().parse_args(['-a', '1', '-a', '3']))\n print(new_parser().parse_args(['-a', '1', '-a', '3', '-a', '11']))\n print(new_parser().parse_args())\n\n test1()\n","sub_path":"r2labProtocolEvaluation/listofchoices.py","file_name":"listofchoices.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"64534066","text":"# Original code from \"Reinforcement Learning in Motion\" by Phil Tabor.\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport gym\n\n# Gym - CartPole\n#\n# State space:\n# Cart position -> -2.4 < x < 2.4\n# Cart velocity -> -inf < dx/dt < +inf\n# Pole angle (rad) -> -0.73 < Θ < +0.73\n# Pole velocity at the tip -> -inf < dx/dt < +inf\n#\n# Action space:\n# Push left (0)\n# Push right (1)\n\n\ndef action_argmax(Q, state):\n \"\"\"Return the action with the maximum value for the current state.\n\n Arguments:\n Q {dict} -- state-action function array\n state {array} -- Current state\n\n Returns:\n int -- Action to take\n \"\"\"\n values = np.array([Q[state, a] for a in range(N_ACTIONS)])\n # Random tie breaking\n action = np.random.choice(np.where(values == values.max())[0])\n return action\n\n\n# Discretize the state spaces\npole_theta_space = np.linspace(-0.209, 0.209, 10)\npole_theta_vel_space = np.linspace(-4, 4, 10)\ncart_pos_space = np.linspace(-2.4, 2.4, 10)\ncart_vel_space = np.linspace(-4, 4, 10)\n\n\ndef get_state(observation):\n \"\"\"Return the state based on the observation from the env\n\n Arguments:\n observation {array} -- states from the env.\n\n Returns:\n array -- current state\n \"\"\"\n cart_x, cart_vel, pole_theta, pole_vel = observation\n cart_x = int(np.digitize(cart_x, cart_pos_space))\n cart_vel = int(np.digitize(cart_vel, cart_vel_space))\n pole_theta = int(np.digitize(pole_theta, pole_theta_space))\n pole_vel = int(np.digitize(pole_vel, pole_theta_vel_space))\n\n return (cart_x, cart_vel, pole_theta, pole_vel)\n\n\ndef plot_running_avg(total_rewards):\n \"\"\"Plot the running average of rewards\n\n Arguments:\n total_rewards {array} -- Array of reward for each episodes\n \"\"\"\n n = len(total_rewards)\n running_avg = np.empty(n)\n for t in range(n):\n running_avg[t] = np.mean(total_rewards[max(0, t-100):(t+1)])\n plt.plot(running_avg)\n plt.title(\"Running Average\")\n plt.show()\n\n\nif __name__ == '__main__':\n\n env = gym.make('CartPole-v0')\n ALPHA = 0.1\n GAMMA = 1.0\n EPS = 1.0\n N_ACTIONS = env.action_space.n\n\n # Construct the state space\n states = []\n for i in range(len(cart_pos_space) + 1):\n for j in range(len(cart_vel_space) + 1):\n for k in range(len(pole_theta_space) + 1):\n for l in range(len(pole_theta_vel_space) + 1):\n states.append((i, j, k, l))\n\n # initialise Q(s,a) to 0\n Q = {}\n for state in states:\n for action in range(N_ACTIONS):\n Q[state, action] = 0\n\n number_games = 15000\n total_rewards = np.zeros(number_games)\n\n for i in range(number_games):\n\n if i % 5000 == 0:\n print('Starting game', i)\n\n observation = env.reset()\n state = get_state(observation)\n\n # e-greedy action selection\n rand = np.random.random()\n random_action = env.action_space.sample()\n action = action_argmax(Q, state) if rand < (1 - EPS) else random_action\n\n done = False\n ep_rewards = 0\n\n while not done:\n observation_, reward, done, info = env.step(action)\n ep_rewards += reward\n state_ = get_state(observation_)\n\n # e-greedy action selection\n rand = np.random.random()\n random_action = env.action_space.sample()\n action_ = action_argmax(\n Q, state_) if rand < (1 - EPS) else random_action\n Q[state, action] = Q[state, action] + ALPHA * (\n reward + GAMMA * Q[state_, action_] - Q[state, action])\n state, action = state_, action_\n\n # At the end of the episode decrease epsilon by a small amount such as\n # it converges to a greedy strategy halfway through the series of ep.\n if EPS - 2 / number_games > 0:\n EPS -= 2 / number_games\n else:\n EPS = 0\n\n total_rewards[i] = ep_rewards\n\n # Save the Q dictionnary to be played back.\n np.save('Q_values.npy', Q)\n\n # Print the running average.\n plot_running_avg(total_rewards)\n","sub_path":"01 - Algorithm implementations/Sarsa - CartPole/Sarsa_CartPole.py","file_name":"Sarsa_CartPole.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"473723147","text":"from bami.basalt.community import BasaltCommunity\n\nfrom gumby.experiment import experiment_callback\nfrom gumby.modules.community_experiment_module import IPv8OverlayExperimentModule\n\n\nclass BasaltModule(IPv8OverlayExperimentModule):\n \"\"\"\n This module contains code to manage experiments with the Basalt community.\n \"\"\"\n\n def __init__(self, experiment):\n super().__init__(experiment, BasaltCommunity)\n self.samples = []\n\n def on_peer_sampled(self, peer):\n self.samples.append(peer)\n\n def on_id_received(self):\n super().on_id_received()\n self.gumby_config.basalt.enabled = True\n\n @experiment_callback\n def register_sample_callback(self):\n self.overlay.sample_callback = self.on_peer_sampled\n\n @experiment_callback\n def write_basalt_stats(self):\n # Write view\n self._logger.info(\"Writing view\")\n with open(\"view.csv\", \"w\") as out_file:\n for peer in self.overlay.view:\n out_file.write(\"%s\\n\" % peer)\n\n # Write samples\n self._logger.info(\"Writing %d samples\", len(self.samples))\n with open(\"peer_samples.csv\", \"w\") as out_file:\n for peer in self.samples:\n peer_id = int(peer.address[1]) - 12000\n out_file.write(\"%d\\n\" % peer_id)\n","sub_path":"experiments/basalt/basalt_module.py","file_name":"basalt_module.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"538472730","text":"\"\"\"Plot the fraction of articles that are female, by birth year and first edit date.\"\"\"\nimport wikibios\nfrom matplotlib import pyplot\nfrom operator import itemgetter\n\nfigure = pyplot.figure()\n\nrows_by_birth_year = sorted(wikibios.rows, key=itemgetter('birth_year'))\nbirth_year_medians = []\nfraction_female_by_birth_year = []\nN = 1000\ni = 0\nwhile i + N <= len(rows_by_birth_year):\n\tchunk = rows_by_birth_year[i:i+N]\n\ti = i + N\n\n\tbirth_year_medians.append(chunk[N / 2]['birth_year'])\n\n\tcount_female = 0.0\n\tfor row in chunk:\n\t\tif row['gender'] == 'female':\n\t\t\tcount_female = count_female + 1\n\tfraction_female = count_female / N\n\tfraction_female_by_birth_year.append(fraction_female)\n\naxes1 = figure.add_subplot(2, 1, 1)\naxes1.plot(birth_year_medians, fraction_female_by_birth_year)\naxes1.set_xlabel('Birth Year')\naxes1.set_ylabel('Fraction Female')\n\nrows_by_firstedit = sorted(wikibios.rows, key=itemgetter('firstedit'))\nfraction_female_by_firstedit = []\nfirstedit_medians = []\ni = 0\nwhile i + N <= len(rows_by_birth_year):\n\tchunk = rows_by_firstedit[i:i+N]\n\ti = i + N\n\n\tfirstedit_medians.append(chunk[N / 2]['firstedit'])\n\n\tcount_female = 0.0\n\tfor row in chunk:\n\t\tif row['gender'] == 'female':\n\t\t\tcount_female = count_female + 1\n\tfraction_female = count_female / N\n\tfraction_female_by_firstedit.append(fraction_female)\n\naxes2 = figure.add_subplot(2, 1, 2)\naxes2.plot(firstedit_medians, fraction_female_by_firstedit)\naxes2.set_xlabel('Article Year')\naxes2.set_ylabel('Fraction Female')\n\nfigure.savefig('fractions.pdf')\n","sub_path":"wikibios/fractionplot.py","file_name":"fractionplot.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"117328501","text":"#!/usr/bin/python3\n\nfrom PyQt4 import QtGui, QtCore\nfrom PyQt4.QtCore import Qt\nimport sys, os\n\nclass ImageMenuBar(QtGui.QMenuBar):\n def __init__(self, imageWindow):\n QtGui.QMenuBar.__init__(self)\n self.view = imageWindow\n self.i = self.addMenu(\"Image\")\n self.i.addAction(\"Open\", self.open)\n self.i.addAction(\"Quit\", self.quit)\n self.t = self.addMenu(\"Transform\")\n self.ts = self.t.addMenu(\"Scale\")\n self.ats10 = self.ts.addAction(\"10%\", self.scale10)\n self.ats10.setDisabled(True)\n self.ats10.setCheckable(True)\n self.ats25 = self.ts.addAction(\"25%\", self.scale25)\n self.ats25.setDisabled(True)\n self.ats25.setCheckable(True)\n self.ats75 = self.ts.addAction(\"75%\", self.scale75)\n self.ats75.setDisabled(True)\n self.ats75.setCheckable(True)\n self.ats100 = self.ts.addAction(\"100%\", self.scale100)\n self.ats100.setDisabled(True)\n self.ats100.setCheckable(True)\n self.ats100.setChecked(True)\n self.ats250 = self.ts.addAction(\"250%\", self.scale250)\n self.ats250.setDisabled(True)\n self.ats250.setCheckable(True)\n self.ats300 = self.ts.addAction(\"300%\", self.scale300)\n self.ats300.setDisabled(True)\n self.ats300.setCheckable(True)\n self.tr = self.t.addAction(\"Rotate 180\\u00B0\", self.rotate180)\n self.at = self.t.addAction(\"Grayscale\", self.grayscale)\n self.at.setCheckable(True)\n self.at.setDisabled(True)\n\n def open(self):\n self.dir = \"{n}/Pictures\".format(n = os.environ['HOME'])\n self.fileName = QtGui.QFileDialog.getOpenFileName(self, \"Open File\", self.dir)\n self.view.load(self.fileName)\n self.at.setChecked(False)\n self.isGray = False\n self.at.setDisabled(False)\n self.ats10.setDisabled(False)\n self.ats25.setDisabled(False)\n self.ats75.setDisabled(False)\n self.ats100.setDisabled(False)\n self.ats250.setDisabled(False)\n self.ats300.setDisabled(False)\n self.ats10.setChecked(False)\n self.ats25.setChecked(False)\n self.ats75.setChecked(False)\n self.ats100.setChecked(True)\n self.ats250.setChecked(False)\n self.ats300.setChecked(False)\n\n def quit(self):\n sys.exit()\n\n def scale10(self):\n self.view.scale(0.1)\n self.ats25.setChecked(False)\n self.ats75.setChecked(False)\n self.ats100.setChecked(False)\n self.ats250.setChecked(False)\n self.ats300.setChecked(False)\n\n def scale25(self):\n self.view.scale(0.25)\n self.ats10.setChecked(False)\n self.ats75.setChecked(False)\n self.ats100.setChecked(False)\n self.ats250.setChecked(False)\n self.ats300.setChecked(False)\n\n def scale75(self):\n self.view.scale(0.75)\n self.ats10.setChecked(False)\n self.ats25.setChecked(False)\n self.ats100.setChecked(False)\n self.ats250.setChecked(False)\n self.ats300.setChecked(False)\n\n def scale100(self):\n self.view.scale(1.0)\n self.ats10.setChecked(False)\n self.ats25.setChecked(False)\n self.ats75.setChecked(False)\n self.ats250.setChecked(False)\n self.ats300.setChecked(False)\n\n def scale250(self):\n self.view.scale(2.5)\n self.ats10.setChecked(False)\n self.ats25.setChecked(False)\n self.ats75.setChecked(False)\n self.ats100.setChecked(False)\n self.ats300.setChecked(False)\n\n def scale300(self):\n self.view.scale(3.0)\n self.ats10.setChecked(False)\n self.ats25.setChecked(False)\n self.ats75.setChecked(False)\n self.ats100.setChecked(False)\n self.ats250.setChecked(False)\n\n def rotate180(self):\n self.view.rotate(180) \n\n def grayscale(self):\n if self.isGray == True:\n self.view.refresh()\n self.isGray = False\n else:\n self.view.grayscale()\n self.isGray = True\n\nclass ImageWindow(QtGui.QScrollArea):\n def __init__(self):\n QtGui.QScrollArea.__init__(self)\n self.image = ImageView()\n self.setWidgetResizable(True)\n self.isMovie = False\n\n def load(self, path):\n self.image = ImageView()\n if path[-3:] == 'gif':\n self.image = AnimatedView()\n self.isMovie = True\n self.image.load(path)\n self.setWidget(self.image)\n if path[-3:] == 'gif':\n self.image.animate()\n\n def scale(self, factor):\n self.image.scale(factor)\n\n def scaled(self, w, h):\n self.image.scaled(w, h)\n\n def grayscale(self):\n self.image.grayscale()\n\n def rotate(self, angle):\n self.image.rotate(angle)\n\n def refresh(self, original = False):\n self.image.refresh(original)\n\nclass AnimatedView(QtGui.QLabel):\n def __init__(self):\n QtGui.QLabel.__init__(self)\n self.movie = QtGui.QMovie()\n self.setAlignment(Qt.AlignCenter)\n\n def load(self, path):\n self.reader = QtGui.QImageReader(path)\n self.image = self.reader.read()\n while not self.image.isNull():\n self.mWidth = self.image.height()\n self.mHeight = self.image.width()\n self.image = self.reader.read()\n self.w = self.mWidth\n self.h = self.mHeight\n self.movie = QtGui.QMovie(path)\n self.setMovie(self.movie)\n self.setMinimumSize(self.w, self.h)\n\n def scale(self, factor):\n pass\n\n def greyscale(self):\n pass\n\n def refresh(self):\n pass\n\n def scaled(self, w, h):\n pass\n\n def rotate(self, angle):\n pass\n\n def animate(self):\n self.movie.start()\n\nclass ImageView(QtGui.QWidget):\n def __init__(self):\n QtGui.QWidget.__init__(self)\n self.master = QtGui.QPixmap()\n self.display = QtGui.QPixmap()\n self.painter = QtGui.QPainter()\n self.factor = 1.0 #scale\n self.isGray = False\n\n def paintEvent(self, event):\n super().paintEvent(event)\n self.painter.begin(self)\n self.painter.drawPixmap(0, 0, self.w, self.h, self.display)\n self.painter.end()\n\n def scale(self, factor):\n self.factor = factor\n self.w = self.mWidth * factor\n self.h = self.mHeight * factor\n if self.isGray == True:\n self.display = self.master.scaled(self.w, self.h, 0, 1)\n self.grayscale()\n else:\n self.display = self.master.scaled(self.w, self.h, 0, 1)\n self.setMinimumSize(self.w, self.h)\n self.repaint()\n\n def scaled(self, w, h):\n self.display = self.master.scaled(w, h, 1, 1)\n self.setMinimumSize(w, h)\n self.repaint()\n\n def grayscale(self):\n self.i = self.display.toImage()\n for x in range(int(self.w)):\n for y in range(int(self.h)):\n self.pxl = self.i.pixel(x, y)\n self.gray = QtGui.qGray(self.pxl)\n self.i.setPixel(x, y, QtGui.qRgb(self.gray, self.gray, self.gray))\n self.display = self.display.fromImage(self.i)\n self.isGray = True\n self.repaint()\n\n def rotate(self, angle):\n self.angle = angle\n self.transform = QtGui.QTransform()\n self.transform.rotate(angle)\n self.display = self.display.transformed(self.transform, Qt.SmoothTransformation)\n self.isRotated = True\n self.repaint()\n\n def refresh(self, original = False):\n self.isGray = False\n self.display = self.master\n if original == False:\n self.scale(self.factor)\n self.repaint()\n\n def load(self, filename):\n self.master = QtGui.QPixmap(filename)\n self.mWidth = self.master.width()\n self.mHeight = self.master.height()\n self.w, self.h = self.mWidth, self.mHeight\n self.setMinimumSize(self.w, self.h)\n self.display = self.master.copy()\n self.repaint()\n\n\n\nif __name__ == '__main__':\n app = QtGui.QApplication(sys.argv)\n window = QtGui.QMainWindow()\n image = ImageWindow()\n window.setMenuBar(ImageMenuBar(image))\n window.setCentralWidget(image)\n window.resize(700, 500)\n window.show()\n app.exec_()\n","sub_path":"image-viewer.py","file_name":"image-viewer.py","file_ext":"py","file_size_in_byte":8259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"362852224","text":"import itertools\nimport sys\nimport fileinput\nfrom itertools import combinations\nimport random\n\ndef permute(n):\n\n import itertools\n\n return list(set(list(itertools.permutations(n))))\n\n\natom_type1 = \"Fe\"\natom_type2 = \"Pt\"\n\nn_atoms = 32\natom_positions = [x for x in range(1,n_atoms+1)]\n\nnum_random_picks = 1000\nstart_filtering = False\n\nfile_count = 0\ncritical_number = 0\n\nfor n_type1 in range(0,n_atoms+1):\n\n print(\"Number of \", atom_type1, \" atmos: \", n_type1)\n print(\"Critical_number: \", critical_number)\n\n if n_type1 >= n_atoms-critical_number:\n n_type2 = n_atoms - n_type1\n perm_list = list( combinations(atom_positions, n_type2) )\n print(\"Number of permutations: \", len(perm_list))\n for file in range(0, len(perm_list)):\n with open(\"i_lsms\") as main:\n with open('i_lsms_'+str(file_count), 'w') as new_main:\n input_data = main.read()\n count = 1\n for value in range(1,n_atoms+1): \n if value in perm_list[file]:\n atom_replacement = atom_type2\n else:\n atom_replacement = atom_type1\n\n input_data = input_data.replace('atom'+str(count)+'_type', atom_replacement)\n count += 1\n\n new_main.write(input_data)\n file_count = file_count + 1\n\n else:\n\n if start_filtering == False:\n perm_list = list( combinations(atom_positions, n_type1) )\n\n if len(perm_list)>num_random_picks and start_filtering==False:\n critical_number = n_type1\n start_filtering = True\n\n if start_filtering == False:\n print(\"Number of permutations: \", len(perm_list))\n else:\n print(\"Number of permutations: \", num_random_picks)\n\n if start_filtering == False:\n\n num_perms = len(perm_list)\n if num_perms == 0:\n num_perms = 1\n\n for file in range(0, num_perms):\n with open(\"i_lsms\") as main:\n with open('i_lsms_'+str(file_count), 'w') as new_main:\n input_data = main.read()\n count = 1\n for value in range(1,n_atoms+1): \n if value in perm_list[file]:\n atom_replacement = atom_type1\n else:\n atom_replacement = atom_type2\n\n input_data = input_data.replace('atom'+str(count)+'_type', atom_replacement)\n count += 1\n\n new_main.write(input_data)\n file_count = file_count + 1\n\n else:\n type1_positions = []\n for pick in range(1,num_random_picks+1):\n type1_positions.append( random.sample(range(1,n_atoms+1),n_type1) ) \n for file in range(0, len(type1_positions)):\n with open(\"i_lsms\") as main:\n with open('i_lsms_'+str(file_count), 'w') as new_main:\n input_data = main.read()\n count = 1\n for value in range(1,n_atoms+1): \n if value in type1_positions[file]:\n atom_replacement = atom_type1\n else:\n atom_replacement = atom_type2\n \n input_data = input_data.replace('atom'+str(count)+'_type', atom_replacement)\n count += 1\n\n new_main.write(input_data)\n file_count = file_count + 1\n\n","sub_path":"MC_NN_Massi/lsms_simulations/permutations_repetition.py","file_name":"permutations_repetition.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"64277076","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\nfrom dangdang.items import DangdangItem\n\n\nclass BookSpider(scrapy.Spider):\n name = 'book'\n allowed_domains = ['dangdang.com']\n start_urls = ['http://category.dangdang.com/cp01.00.00.00.00.00.html']\n\n def parse(self, response):\n goodslist = response.xpath('//ul[@class=\"filtrate_list\"]/li[1]').xpath('.//span')\n for goods in goodslist:\n try:\n category_big = goods.xpath('a/text()').extract()\n category_big_id = goods.xpath('a/@href').extract_first().split('.')[1]\n category_big_url = \"http://category.dangdang.com/pg1-cp01.{}.00.00.00.00.html\".format(str(category_big_id))\n\n yield scrapy.Request(url=category_big_url, callback=self.parse_second,\n meta={\"ID1\":category_big_id, \"ID2\":category_big})\n except Exception as e:\n raise e\n\n\n def parse_second(self, response):\n goodslist = response.xpath('//ul[@class=\"filtrate_list\"]/li[1]').xpath('.//span')\n for goods in goodslist:\n try:\n # category_small = goods.xpath('a/text()').pop().replace(' ', '')\n category_small = goods.xpath('a/text()').extract()\n # category_small_id = goods.xpath('a/@href').pop().split('.')[1]\n category_small_id = goods.xpath('a/@href').extract_first().split('.')[2]\n\n for i in range(1, 101):\n category_small_url = \"http://category.dangdang.com/pg{}-cp01.{}.{}.00.00.00.html\".format(str(i), str(response.meta[\"ID1\"]), str(category_small_id))\n yield scrapy.Request(url=category_small_url, callback=self.parse_detail,\n meta={\"ID1\":response.meta[\"ID1\"], \"ID2\":response.meta[\"ID2\"],\n \"ID3\":category_small_id, \"ID4\":category_small})\n except Exception as e:\n raise e\n\n def parse_detail(self, response):\n try:\n goodslist = response.xpath('//*[@class=\"bigimg\"]/li')\n for goods in goodslist:\n item = DangdangItem()\n try:\n comments = goods.xpath('.//p[@class=\"detail\"]/text()').extract()\n title = goods.xpath('.//p[@class=\"name\"]/a/text()').extract()\n time = goods.xpath('.//p[@class=\"search_book_author\"]/span[2]/text()').extract_first().replace(\" /\", '')\n press = goods.xpath('.//p[@class=\"search_book_author\"]/span[3]/a/text()').extract()\n price = goods.xpath('.//p[@class=\"price\"]//span[@class=\"search_now_price\"]/text()').extract()\n discount = goods.xpath('.//p[@class=\"price\"]//span[@class=\"search_discount\"]/text()').extract()\n\n item['comments'] = comments[0].encode('utf-8')\n item['title'] = title[0].encode('utf-8')\n item['time'] = time.encode('utf-8')\n item['press'] = press[0].encode('utf-8')\n item['price'] = price[0].encode('utf-8')\n item['discount'] = discount[0].encode('utf-8')\n item['category1'] = response.meta[\"ID4\"][0].encode('utf-8')\n item['category2'] = response.meta[\"ID2\"][0].encode('utf-8')\n except Exception as e:\n raise e\n yield item\n except Exception as e:\n raise e\n","sub_path":"dangdang/dangdang/spiders/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"496028407","text":"#!/usr/bin/env python\n\nfrom distutils.command.build_ext import build_ext\ntry:\n from setuptools import setup, Extension\nexcept ImportError:\n from distutils.core import setup, Extension\n\nfrom ttyprompt import ttyprompt\n\n\n_classifiers = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: ISC License (ISCL)',\n 'Operating System :: OS Independent',\n 'Programming Language :: C',\n 'Programming Language :: Python',\n 'Topic :: Scientific/Engineering :: Mathematics',\n]\n\nwith open('README.rst', 'r') as rst_file:\n _long_description = rst_file.read()\n\n_setup_args = {\n 'author': ttyprompt.__author__,\n 'author_email': ttyprompt.__email__,\n 'classifiers': _classifiers,\n 'description': ttyprompt.__doc__,\n 'license': ttyprompt.__license__,\n 'long_description': _long_description,\n 'name': 'TTYprompt',\n 'url': 'https://bitbucket.org/eliteraspberries/ttyprompt',\n 'version': ttyprompt.__version__,\n}\n\n_requirements = [\n]\n\n_setup_args['install_requires'] = _requirements\n\ntty_module = Extension(\n 'ttyprompt.tty',\n sources=['getline.c', 'ttyprompt.c', 'ttyprompt/tty.c'],\n include_dirs=['.'],\n)\n\nextensions = [\n tty_module,\n]\n\ncython_sources = [\n 'ttyprompt/tty.pyx',\n]\n\n\nclass BuildExtCommand(build_ext):\n\n user_options = build_ext.user_options + [\n ('use-cython', None, 'compile Cython sources'),\n ]\n\n boolean_options = build_ext.boolean_options + [\n 'use-cython',\n ]\n\n def initialize_options(self):\n build_ext.initialize_options(self)\n self.use_cython = False\n\n def finalize_options(self):\n build_ext.finalize_options(self)\n\n def run(self):\n if self.use_cython:\n self.compile_cython()\n build_ext.run(self)\n\n def compile_cython(self):\n global cython_sources\n try:\n from Cython.Build import cythonize\n cythonize(cython_sources)\n except ImportError:\n pass\n\n\nif __name__ == '__main__':\n\n setup(\n packages=['ttyprompt'],\n ext_modules=extensions,\n cmdclass={\n 'build_ext': BuildExtCommand,\n },\n **_setup_args\n )\n","sub_path":"pypi_install_script/TTYprompt-0.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"12416742","text":"from django.contrib.contenttypes.models import ContentType\nfrom django.db.models.query_utils import Q\n\nimport django_filters\n\nfrom bluebottle.bluebottle_drf2.pagination import BluebottlePagination\nfrom bluebottle.utils.utils import get_client_ip\nfrom bluebottle.utils.views import (\n ListCreateAPIView, ListAPIView, RetrieveUpdateDestroyAPIView, OwnerListViewMixin\n)\nfrom bluebottle.utils.permissions import (\n OneOf, ResourcePermission, RelatedResourceOwnerPermission, ResourceOwnerPermission\n)\nfrom bluebottle.projects.models import Project\nfrom bluebottle.wallposts.permissions import RelatedManagementOrReadOnlyPermission\n\nfrom .models import TextWallpost, MediaWallpost, MediaWallpostPhoto, Wallpost, Reaction\nfrom .serializers import (TextWallpostSerializer, MediaWallpostSerializer,\n MediaWallpostPhotoSerializer, ReactionSerializer,\n WallpostSerializer)\nfrom .permissions import WallpostOwnerPermission\n\n\nclass WallpostFilter(django_filters.FilterSet):\n parent_type = django_filters.CharFilter(name=\"content_type__name\")\n parent_id = django_filters.NumberFilter(name=\"object_id\")\n\n class Meta:\n model = Wallpost\n fields = ['parent_type', 'parent_id']\n\n\nclass SetAuthorMixin(object):\n def perform_create(self, serializer):\n serializer.save(author=self.request.user, ip_address=get_client_ip(self.request))\n\n def perform_update(self, serializer):\n serializer.save(editor=self.request.user, ip_address=get_client_ip(self.request))\n\n\nclass FilterQSParams(object):\n\n def get_qs(self, qs):\n parent_id = self.request.query_params.get('parent_id', None)\n parent_type = self.request.query_params.get('parent_type', None)\n if parent_type == 'project':\n qs = qs.filter(conten_object__slug=parent_id)\n elif parent_id:\n qs = qs.filter(conten_object__id=parent_id)\n\n text = self.request.query_params.get('text', None)\n if text:\n qs = qs.filter(Q(title__icontains=text) |\n Q(description__icontains=text))\n\n status = self.request.query_params.get('status', None)\n if status:\n qs = qs.filter(status=status)\n return qs\n\n\nclass WallpostOwnerFilterMixin(object):\n def get_queryset(self):\n qs = super(WallpostOwnerFilterMixin, self).get_queryset()\n permission = '{}.api_read_{}'.format(\n self.model._meta.app_label, self.model._meta.model_name\n )\n\n if not self.request.user.has_perm(permission):\n user = self.request.user if self.request.user.is_authenticated else None\n qs = qs.filter(\n Q(project_wallposts__owner=user) |\n Q(task_wallposts__author=user) |\n Q(task_wallposts__project__owner=user) |\n Q(task_wallposts__project__promoter=user) |\n Q(fundraiser_wallposts__owner=user)\n )\n\n return qs\n\n\nclass WallpostList(WallpostOwnerFilterMixin, ListAPIView):\n queryset = Wallpost.objects.all()\n serializer_class = WallpostSerializer\n pagination_class = BluebottlePagination\n permission_classes = (\n OneOf(ResourcePermission, RelatedResourceOwnerPermission),\n RelatedManagementOrReadOnlyPermission\n )\n\n def get_queryset(self, queryset=queryset):\n queryset = super(WallpostList, self).get_queryset()\n\n # Some custom filtering projects slugs.\n parent_type = self.request.query_params.get('parent_type', None)\n parent_id = self.request.query_params.get('parent_id', None)\n if parent_type == 'project':\n content_type = ContentType.objects.get_for_model(Project)\n else:\n white_listed_apps = ['projects', 'tasks', 'fundraisers']\n content_type = ContentType.objects.filter(\n app_label__in=white_listed_apps).get(model=parent_type)\n queryset = queryset.filter(content_type=content_type)\n\n if parent_type == 'project' and parent_id:\n try:\n project = Project.objects.get(slug=parent_id)\n except Project.DoesNotExist:\n return Wallpost.objects.none()\n queryset = queryset.filter(object_id=project.id)\n else:\n queryset = queryset.filter(object_id=parent_id)\n\n queryset = queryset.order_by('-created')\n return queryset\n\n\nclass WallpostPagination(BluebottlePagination):\n page_size = 5\n\n\nclass TextWallpostList(WallpostOwnerFilterMixin, SetAuthorMixin, ListCreateAPIView, FilterQSParams):\n queryset = TextWallpost.objects.all()\n serializer_class = TextWallpostSerializer\n filter_class = WallpostFilter\n pagination_class = WallpostPagination\n\n permission_classes = (\n OneOf(ResourcePermission, RelatedResourceOwnerPermission),\n RelatedManagementOrReadOnlyPermission\n )\n\n def get_queryset(self, queryset=None):\n queryset = super(TextWallpostList, self).get_queryset()\n # Some custom filtering projects slugs.\n parent_type = self.request.query_params.get('parent_type', None)\n parent_id = self.request.query_params.get('parent_id', None)\n if parent_type == 'project' and parent_id:\n try:\n project = Project.objects.get(slug=parent_id)\n except Project.DoesNotExist:\n return Wallpost.objects.none()\n queryset = queryset.filter(object_id=project.id)\n queryset = queryset.order_by('-created')\n return queryset\n\n\nclass TextWallpostDetail(RetrieveUpdateDestroyAPIView, SetAuthorMixin):\n queryset = TextWallpost.objects.all()\n serializer_class = TextWallpostSerializer\n permission_classes = (OneOf(ResourcePermission, RelatedResourceOwnerPermission), )\n\n\nclass MediaWallpostList(TextWallpostList, SetAuthorMixin):\n queryset = MediaWallpost.objects.all()\n serializer_class = MediaWallpostSerializer\n filter_class = WallpostFilter\n pagination_class = WallpostPagination\n\n permission_classes = (\n OneOf(ResourcePermission, RelatedResourceOwnerPermission),\n RelatedManagementOrReadOnlyPermission\n )\n\n def perform_create(self, serializer):\n self.check_object_permissions(\n self.request,\n serializer.Meta.model(**serializer.validated_data)\n )\n return super(MediaWallpostList, self).perform_create(serializer)\n\n\nclass MediaWallpostDetail(TextWallpostDetail):\n queryset = MediaWallpost.objects.all()\n serializer_class = MediaWallpostSerializer\n\n\nclass WallpostDetail(RetrieveUpdateDestroyAPIView):\n queryset = Wallpost.objects.all()\n serializer_class = WallpostSerializer\n permission_classes = (\n OneOf(ResourcePermission, RelatedResourceOwnerPermission),\n WallpostOwnerPermission\n )\n\n\nclass MediaWallpostPhotoPagination(BluebottlePagination):\n page_size = 4\n\n\nclass MediaWallpostPhotoList(OwnerListViewMixin, SetAuthorMixin, ListCreateAPIView):\n queryset = MediaWallpostPhoto.objects.all()\n serializer_class = MediaWallpostPhotoSerializer\n pagination_class = MediaWallpostPhotoPagination\n permission_classes = (OneOf(ResourcePermission, ResourceOwnerPermission), )\n\n owner_filter_field = 'author'\n\n def create(self, request, *args, **kwargs): # FIXME\n \"\"\"\n Work around browser issues.\n\n Adding photos to a wallpost works correctly in Chrome. Firefox (at least\n FF 24) sends the ```mediawallpost``` value to Django with the value\n 'null', which is then interpreted as a string in Django. This is\n incorrect behaviour, as ```mediawallpost``` is a relation.\n\n Eventually, this leads to HTTP400 errors, effectively breaking photo\n uploads in FF.\n\n The quick fix is detecting this incorrect 'null' string in ```request.POST```\n and setting it to an empty string. ```request.POST``` is mutable because\n of the multipart nature.\n\n NOTE: This is something that should be fixed in the Ember app or maybe even\n Ember itself.\n \"\"\"\n post = request.POST.get('mediawallpost', False)\n if post and post == u'null':\n request.POST['mediawallpost'] = u''\n return super(MediaWallpostPhotoList, self).create(request, *args, **kwargs)\n\n\nclass MediaWallpostPhotoDetail(RetrieveUpdateDestroyAPIView):\n queryset = MediaWallpostPhoto.objects.all()\n serializer_class = MediaWallpostPhotoSerializer\n\n permission_classes = (OneOf(ResourcePermission, ResourceOwnerPermission), )\n\n\nclass ReactionList(OwnerListViewMixin, SetAuthorMixin, ListCreateAPIView):\n queryset = Reaction.objects.all()\n serializer_class = ReactionSerializer\n\n permission_classes = (OneOf(ResourcePermission, ResourceOwnerPermission), )\n pagination_class = BluebottlePagination\n filter_fields = ('wallpost',)\n\n owner_filter_field = 'author'\n\n\nclass ReactionDetail(SetAuthorMixin, RetrieveUpdateDestroyAPIView):\n queryset = Reaction.objects.all()\n serializer_class = ReactionSerializer\n permission_classes = (OneOf(ResourcePermission, ResourceOwnerPermission), )\n","sub_path":"bluebottle/wallposts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"409252890","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 12 14:25:03 2019\n\n@author: student\n\"\"\"\nimport numpy as np\nimport pandas as pd\nmovie=pd.read_csv(\"movies.csv\")\n#print(movie['genres'])\nlst=['Action','Comedy','Romance','Thriller']\nlst2=[]\nmoviecolumns=movie[['genres','title']]\n#print(moviecolumns)\nstr1=movie.genres.astype(str)\na=movie.genres.str.split('|')#.str.get(1)\nu=0\nmovie['splitted']=a\nprint(movie['splitted'])\na=0\nb=0\nc=0\nd=0\nfor j in movie['splitted']:\n if lst[0] in j:\n a=a+1\n elif lst[1] in j:\n b=b+1\n elif lst[2] in j:\n c=c+1\n elif lst[3] in j:\n d=d+1\nprint(a,b,c,d)\n\ndict2={}\n\n \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#i=0\n#c=0\n#for j in movie['genres']:\n# if lst[c] in j:\n# print(j)\n# i=i+1 \n# if lst[c+1] in j:\n# print(j)\n# i=i+1\n \n# print(i)\n# c=c+1\n \n \n # if c > 4:\n# break\n\n#for i in genres:\n# if i[0]==\"Action|Comedy|Romance|Thriller\":\n# print(\"MASALA MOVIES\")\n# print(i[1]['title'])\n\n\n\n\n#\n#for j in movie['genres']:\n# j.split(\"\\\\|\")\n# print(j)\n","sub_path":"Bussiness Analytics/movieanalysis.py","file_name":"movieanalysis.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"41541385","text":"from datetime import datetime, timedelta\n\nwith open('input.txt', 'r') as f:\n data = f.read().split('\\n')\n\ndef rationalize_timestamps(diary):\n\tfor i, row in enumerate(diary):\n\t\tif row[0].hour > 1:\n\t\t\tdiary[i][0] += timedelta(days=1)\n\t\t\tdiary[i][0] = diary[i][0].replace(hour=0, minute=0)\n\t\tif row[0].hour == 0 and row[1][0] == '#':\n\t\t\tdiary[i][0] = diary[i][0].replace(hour=0, minute=0)\n\treturn diary\n\ndata = [[datetime.strptime(row[row.find(\"[\")+1:row.find(\"]\")], \"%Y-%m-%d %H:%M\"), row[row.find(\"]\")+8:]] for row in data]\ndata = sorted(rationalize_timestamps(data))\n\nschedules = {}\n\nfor row in data:\n\tif row[1][0] == '#':\n\t\tguard = row[1].split(' ')[0]\n\t\tif guard in schedules.keys():\n\t\t\tschedules[guard].append([0 for i in range(60)])\n\t\telse:\n\t\t\tschedules[guard] = [[0 for i in range(60)]]\n\telif row[1] == 'asleep':\n\t\tfor minute in range(row[0].minute, 60):\n\t\t\tschedules[guard][-1][minute] = 1\n\telif row[1] == 'up':\n\t\tfor minute in range(row[0].minute, 60):\n\t\t\tschedules[guard][-1][minute] = 0\n\nguard_stats = {}\n\nfor key, value in schedules.items():\n\tsumma = sum([sum(day) for day in value])\n\tcount_list = [0 for i in range(60)]\n\tfor day in value:\n\t\tfor minute, value in enumerate(day):\n\t\t\tcount_list[minute] += value\n\tguard_stats[key] = {'summa': None, 'type_value': None, 'schedule_sum': count_list}\n\tguard_stats[key]['summa'] = summa\n\tguard_stats[key]['type_value'] = count_list.index(max(count_list))\n\nmatch = ['noguard', 0, 0]\nprint(guard_stats)\nfor key, value in guard_stats.items():\n\tfor minute, sleep_sum in enumerate(guard_stats[key]['schedule_sum']):\n\t\tif sleep_sum > match[1]:\n\t\t\tmatch = [key, sleep_sum, minute]\nprint(match)\nprint(int(match[0][1:])*match[2])\n","sub_path":"4/4b.py","file_name":"4b.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"430121232","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Mar 22 15:41:08 2018\r\n\r\n@author: Bi Jianshui\r\n\r\nEmail: bijianshui@gokudata.com\r\n\"\"\"\r\n\r\nimport time\r\nimport emulator\r\nfrom strategy import Strategy\r\nimport itertools\r\nimport datetime\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n\r\n# def cal_ema(data, n):\r\n# ema = np.zeros(shape=len(data))\r\n# ema[0] = np.mean(data[0:n - 1])\r\n# for i in range(len(data) - 1):\r\n# ema[i + 1] = (data[i + 1] - ema[i]) * 2 / (n + 1) + ema[i]\r\n# return ema\r\n\r\nclass MyStrategy(Strategy):\r\n\r\n def initialize(self):\r\n self.name = '(MA_34-MA_12)MA_34'\r\n\r\n start_date = '2013-01-01'\r\n end_date = '2017-12-31'\r\n self.load_data('5MIN', start_date, end_date, '500')\r\n # self.load_data('5MIN', start_date, end_date, 'all')\r\n # self.load_data('5MIN', start_date, end_date, '300')\r\n # self.params = [20]\r\n\r\n N = [25]\r\n M = [1000]\r\n self.params = list(itertools.product(N, M))\r\n\r\n '''\r\n para1 = [20,40,60]\r\n para2 = [5,15]\r\n self.params = list(itertools.product(para1,para2))\r\n '''\r\n\r\n @staticmethod\r\n def handle_data(data, param):\r\n '''\r\n data['vwap_ma'] = data['vwap_adj'].rolling(window=param).apply(np.mean)\r\n data['close_ma'] = data['close_adj'].rolling(window=param).apply(np.mean)\r\n data['factor'] = data['vwap_ma'] / data['close_ma']\r\n '''\r\n\r\n open = data['open_adj']\r\n high = data['high_adj']\r\n low = data['low_adj']\r\n close = data['close_adj']\r\n vwap = data['vwap_adj']\r\n vol = data['volume']\r\n amount = data['amount']\r\n wb = data['WB']\r\n buy_bo_5 = data['buy_bo_5']\r\n sell_bo_5 = data['sell_bo_5']\r\n h_l = high - low\r\n close_tem = np.append(close[0], close)\r\n # 认为前一期收盘价格为close[0]\r\n # print 'type of close', close.dtype\r\n close_p = np.delete(close_tem, -1)\r\n # print 'type of close_p', close_p.dtype\r\n open_tem = np.append(open[0], open)\r\n open_p = np.delete(open_tem, -1)\r\n high_tem = np.append(high[0], high)\r\n high_p = np.delete(high_tem, -1)\r\n low_tem = np.append(low[0], low)\r\n low_p = np.delete(low_tem, -1)\r\n\r\n # average = (high + low + close) / 3\r\n # average_tem = np.append(average[0], average)\r\n # # 认为前一期收盘价格为close[0]\r\n # # print 'type of close', close.dtype\r\n # average_p = np.delete(average_tem, -1)\r\n # volume_index = np.zeros(shape=average)\r\n #\r\n # for i in range(len(average_p)-1):\r\n # if average < average_p:\r\n # volume_index[i] = -1\r\n # else:\r\n # volume_index[i] = 1\r\n #\r\n # vol_weight = vol * volume_index\r\n MA_34 = close.rolling(window=34).apply(np.mean)\r\n MA_12 = close.rolling(window=12).apply(np.mean)\r\n\r\n # EMA_34 = close.rolling(window=34).apply(pd.DataFrame.ewm)\r\n # EMA_12 = close.rolling(window=12).apply(pd.DataFrame.ewm)\r\n # print('param:',param)\r\n # ts_short = param[0]\r\n # ts_long = param[1]\r\n # data['bo_5'] = data['sell_bo_5'] - data['buy_bo_5']\r\n # # bo_5是资金净流入\r\n # data['bo_5_sum'] = data['bo_5'].rolling(window=ts_short).apply(np.sum)\r\n # # data['bo_5_sum'] = data['bo5'].rolling(window=ts_short).apply(np.sum) # 60min\r\n # data['factor'] = data['bo_5_sum'].rolling(window=(ts_long + 1)).apply(\r\n # lambda x: x.argsort().argsort()[-1] * 1.0 / ts_long)\r\n # ma5 = open.rolling(window=5).apply(np.mean)\r\n # ma10 = open.rolling(window=10).apply(np.mean)\r\n\r\n # print(volume_index.shape)\r\n # print(EMA_34.shape)\r\n # print(difference.shape)\r\n data['factor'] = (MA_34 - MA_12) / MA_34\r\n print('factor:')\r\n print(data['factor'])\r\n return data[['ticker', 'time', 'factor']]\r\n\r\n\r\nif __name__ == '__main__':\r\n starttime = datetime.datetime.now()\r\n t = MyStrategy()\r\n e = emulator.Emulator(t)\r\n e.run(mode='parallel')\r\n #e.run()\r\n endtime = datetime.datetime.now()\r\n print (endtime - starttime)\r\n","sub_path":"(MA_34-MA_12)MA_34.py","file_name":"(MA_34-MA_12)MA_34.py","file_ext":"py","file_size_in_byte":4219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"314194366","text":"import re\nimport requests\nimport random\nimport json\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom bs4 import BeautifulSoup\nfrom requests import exceptions\n'''\n功能介绍:\n 爬取全国各个城市未来7天的天气状况\n获取城市的代码:\n http://weather.china.com.cn/domestic.html\n http://qq.ip138.com/weather/ 备用站点\n\n获取相应省市对应情况:\n http://api.weather.china.com.cn/weather/data/f城市代码.js\n\n获取未来7天的天气:\n http://www.weather.com.cn/weather/101200104.shtml\n http://www.weather.com.cn/weather/城市代码.shtml\n\n想法:\n 1.通过城市代码找出相对应的省市\n 2.将其分类,然后后通过城市代码获取天气\n'''\n\n\nclass ChinaWeatherSpider(object):\n # 初始化构造函数\n def __init__(self):\n # 城市代码url\n self.city_code_url = 'http://weather.china.com.cn/domestic.html'\n # self.weather_url = weather_url # 天气url\n # 请求头的设置\n self.headers_list = [\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.106 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.18 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:21.0) Gecko/20100101 Firefox/21.0'\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2486.0 Safari/537.36 Edge/13.10586'\n ]\n self.headers = {\n 'User-Agent': random.choice(self.headers_list),\n 'Connection': 'close'\n }\n\n # 获取各大城市的代码\n def get_city_code(self):\n req = requests.get(url=self.city_code_url, headers=self.headers)\n soup = BeautifulSoup(req.content, 'html.parser')\n city_code_list = [] # 添加城市代码列表\n # 获取城市代码\n for city_code in soup.find('div', id='nav1').find_all('a'):\n city_code_list.append(city_code.get('data-areaid').split(','))\n return city_code_list # 二纬列表\n\n # 获取每个省市区相关信息\n def divide_city_code(self):\n city_list = [] # 获取所有省市区的信息\n # 各大省的列表\n for list in self.get_city_code():\n # 市城市代码\n for city_code in list:\n dic = {} # 获取省市区, 时间\n url = 'http://api.weather.china.com.cn/weather/data/f' + city_code + '.js'\n req = requests.get(url=url, headers=self.headers)\n p = re.compile(r'\\{.*\\}').search(req.text, re.M | re.I)\n dict = json.loads(p.group())\n if dict['city']['c7'] != \"\":\n dic['province'] = dict['city']['c7'] + '省' # 省\n dic['city'] = dict['city']['c5'] + '市' # 市\n dic['citycode'] = city_code # 市代码\n dic['longitude'] = dict['city']['c14'] # 经度\n dic['latitude'] = dict['city']['c13'] # 纬度\n dic['area'] = dict['city']['c3'] + '区' # 区\n dic['areacode'] = city_code # 区代码\n dic['datetime'] = dict['forecast']['f0'] # 时间\n city_list.append(dic)\n print(dic)\n return city_list\n\n # 获取天气情况, 返回天气的所有数据\n def get_climate(self):\n print('程序开始')\n count = 0\n weather_list = [] # 存放所有的城市的天气\n time_count = 0\n all_city = self.read_data('data/save_temporary.json')\n # all_city = self.divide_city_code()\n\n for list in all_city: # 获取所有城市的信息\n try:\n url = 'http://www.weather.com.cn/weather/' + list['areacode'] + '.shtml'\n req = requests.get(url=url, headers=self.headers)\n soup = BeautifulSoup(req.content, 'html.parser')\n time_count = 0\n # 获取七天的天气的信息\n for clim in soup.find(\n 'ul', class_=\"t clearfix\").find_all(\n 'li', class_=[\"skyid\", \"sky\"]):\n weather_map = {} # 存放每个天气的状况\n weather_map['area'] = list['area'] # 获取区域的列表\n weather_map['areacode'] = list['areacode'] # 获取区域的代码\n # 字符串转时间格式\n t = datetime.strptime(\n list['datetime'],\n '%Y%m%d%H%M') + timedelta(days=time_count)\n # 时间转字符串, 获取区域的时间\n weather_map['datetime'] = t.strftime('%Y%m%d%H%M')\n weather_map['climate'] = clim.find(\n 'p', class_='wea').text # 获取天气\n weather_map['temperature'] = clim.find(\n 'p', class_='tem').text.strip('\\n') # 获取温度\n weather_map['winddirection'] = [\n x.get('title')\n for x in clim.find('p', class_='win').find_all('span')\n ] # 获取风向\n weather_map['windcount'] = clim.find(\n 'p', class_='win').i.text # 获取风的级数\n # print(weather_map)\n weather_list.append(weather_map)\n time_count = time_count + 1\n except exceptions.Timeout as e:\n print('超时请求下一条链接', e)\n # 当服务器连接最大的时候就会出现此异常\n except requests.exceptions.ConnectionError as r:\n print('连接错误', r)\n\n count = count + 1\n print(count)\n # 存储天气的信息\n self.save_data(weather_list, 'data/climate.json')\n return weather_list\n\n '''\n 原来数据:\n [\n {},\n {},\n ]\n 排列数据:\n [{\n \"湖北省\": {\n \"武汉市\": [\n {\n \"蔡甸区\": \"1\"\n },\n {\n \"新洲区\": \"1\"\n }\n ]\n }\n }]\n {'province': '海南省', 'city': '东方市', 'citycode': '101310202', 'longitude': 19.06, 'latitude': 108.37, 'area': '东方区', 'areacode': '101310202', 'datetime': '201804271800'}\n '''\n\n # 省市区的代码进行数据清洗\n def clear_data(self):\n all_city = self.divide_city_code()\n # 保存临时信息\n self.save_data(all_city, 'data/save_temporary.json')\n # all_city = json.loads(self.read_data('lis.json'))\n province = None # 记录省的名字\n city = None # 记录市的名字\n\n acount_list = [] # 总的list集合\n province_map = {} # 每隔省的map集合\n city_map = {} # 每个省中市的map集合\n city_list = [] # 每个市的去列表\n\n provice_count = 1 # 省计数\n city_count = 1 # 市计数\n province_set = set() # 省set, 判断set集合中省的个数\n city_set = set() # 市set, 判断set集合中市的个数\n\n # 添加一个无用行\n all_city.append({\n 'province': '#',\n 'city': '#',\n 'citycode': '#',\n 'longitude': '#',\n 'latitude': '#',\n 'area': '#',\n 'areacode': '#',\n 'datetime': '#'\n })\n # 将省市格式化\n for list in all_city:\n # 利用set集合中的数量来判断是否相同\n city_set.add(list['city'])\n if city_count == len(city_set):\n # 从第二次相同的市开始执行,记录前一次的该市的区信息\n if city_count >= 2:\n city_map[city] = city_list\n city_list = []\n city_count = city_count + 1\n city = list['city']\n\n # 利用set集合中的数量来判断省是否相同\n province_set.add(list['province'])\n if provice_count == len(province_set):\n # 从第二次相同的省开始执行,记录前一次的该省的市信息\n if provice_count >= 2:\n province_map[province] = city_map\n acount_list.append(province_map)\n province_map = {}\n city_map = {}\n print(acount_list)\n provice_count = provice_count + 1\n province = list['province']\n\n # area_map 记录地区相关信息\n area_map = {}\n area_map['area'] = list['area']\n area_map['areacode'] = list['areacode']\n area_map['longitude'] = list['longitude']\n area_map['latitude'] = list['latitude']\n city_list.append(area_map)\n\n # 保存数据排列后的信息\n self.save_data(acount_list, 'data/newData.json')\n return acount_list\n\n # 保存数据为json文件\n def save_data(self, all_city, file_path_name):\n with open(file_path_name, 'wb') as f:\n f.write(json.dumps(all_city, ensure_ascii=False).encode('utf-8'))\n\n # 读取json文件数据, 返回解析后的dict类型数据\n def read_data(self, file_path_name):\n file = None\n with open(file_path_name, 'rb') as f:\n file = f.read().decode('utf-8')\n return json.loads(file)\n\n\nif __name__ == '__main__':\n ChinaWeatherSpider().clear_data()\n","sub_path":"Weather/ChinaWeather.py","file_name":"ChinaWeather.py","file_ext":"py","file_size_in_byte":9754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"336966488","text":"import os\nfrom discord.ext import commands\nfrom discord import Intents\nimport random\n\nfrom dotenv import dotenv_values\n\ntoken = dotenv_values(\".env\")[\"TOKEN\"]\n\n\nbot = commands.Bot(\n command_prefix=\"!\", # Change to desired prefix\n case_insensitive=True, # Commands aren't case-sensitive\n intents=Intents.all()\n)\n\nbot.author_id = 369134955932155906 \n\n@bot.event\nasync def on_ready(): # When the bot is ready\n print(\"I'm in\")\n print(bot.user) # Prints the bot's username and identifier\n\n@bot.command()\nasync def pong(ctx):\n await ctx.send('pong')\n\n@bot.command() # Sends back name of the caller.\nasync def name(ctx) :\n await ctx.send(ctx.author)\n\n@bot.command() #Print status section with their member list\nasync def count(ctx) :\n await ctx.send(\"**+++Online Members+++**\")\n for member in ctx.guild.members :\n if (member.raw_status == \"online\") :\n await ctx.send(member.name)\n await ctx.send(\"**+++Offline Members+++**\")\n for member in ctx.guild.members :\n if (member.raw_status == \"offline\") :\n await ctx.send(member.name)\n await ctx.send(\"**+++Idle Members+++**\")\n for member in ctx.guild.members :\n if (member.raw_status == \"idle\") :\n await ctx.send(member.name)\n await ctx.send(\"**+++Do not disturb Members+++**\")\n for member in ctx.guild.members :\n if (member.raw_status == \"dnd\") :\n await ctx.send(member.name)\n\n@bot.command() # NOT WORKING\nasync def admin(ctx, message) :\n #if (not(ctx.guild.get_member(message))) :\n # await ctx.send(\"Please give a valid username/mention.\")\n #else :\n # await ctx.send(\"Reussite\")\n if (ctx.message.raw_mentions) :\n await ctx.send(\"Pipou\")\n #Check nombre d'arg.Récupérer user. Verifier s'il existe. Creer role. Assigner role admin.\n\n@bot.command() #Hardcoded random xkcd, random link from xkcd kjept sending the same one through the bot.\nasync def xkcd(ctx) :\n url = \"https://xkcd.com/\" + str(random.randint(1,2521))\n await ctx.send(url)\n\n@bot.command() # Allows to ban the first mentionned person.\nasync def ban(ctx, message) :\n if (ctx.message.raw_mentions) :\n id = ctx.message.raw_mentions[0]\n await ctx.guild.ban(ctx.guild.get_member(id), reason=\"Bonne nuit mon petit !\")\n\n\nbot.run(token) # Starts the bot","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"332856797","text":"import threading\nfrom neopixel import *\nimport time\nfrom flask import current_app\nfrom lamp_support import Grid, CANDLE_FLICKER\n\n\ndef updatePixels(start_is_on, start_rgb_color, start_brightness,\n end_is_on, end_rgb_color, end_brightness,\n effect, transition):\n if hasattr(current_app, 'thr'):\n current_app.thr.do_run = False\n current_app.thr.join()\n\n current_app.thr = threading.Thread(target=async_updatePixels,\n args=[start_is_on, start_rgb_color,\n start_brightness, end_is_on,\n end_rgb_color, end_brightness,\n effect, transition])\n current_app.thr.start()\n\n\ndef async_updatePixels(start_is_on, start_rgb_color, start_brightness,\n end_is_on, end_rgb_color, end_brightness,\n effect, transition):\n\n t = threading.currentThread()\n\n LED_COUNT = 240 # Number of LED pixels.\n LED_PIN = 18 # GPIO pin connected to the pixels (must support PWM!).\n LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)\n LED_DMA = 5 # DMA channel to use for generating signal (try 5)\n LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest\n LED_INVERT = False # True to invert the signal\n strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA,\n LED_INVERT, LED_BRIGHTNESS)\n strip.begin()\n\n if effect in ('candle'):\n transition = 0\n\n if transition == 0:\n transition_steps = range(1, 2)\n transition = 0.1\n else:\n transition_steps = range(0, transition * 10 + 1)\n\n rgb_color_step = [0, 0, 0]\n rgb_color_step[0] = float(end_rgb_color[0] - start_rgb_color[0])\\\n / (transition * 10.0)\n rgb_color_step[1] = float(end_rgb_color[1] - start_rgb_color[1])\\\n / (transition * 10.0)\n rgb_color_step[2] = float(end_rgb_color[2] - start_rgb_color[2])\\\n / (transition * 10.0)\n brightness_step = float(end_brightness - start_brightness)\\\n / (transition * 10.0)\n\n if end_is_on:\n for step in transition_steps:\n if getattr(t, \"do_run\", True) is False:\n break\n\n r = start_rgb_color[0] + round(step * rgb_color_step[0])\n g = start_rgb_color[1] + round(step * rgb_color_step[1])\n b = start_rgb_color[2] + round(step * rgb_color_step[2])\n brightness = int(start_brightness + round(step * brightness_step))\n largest_rgb_component = max(r, g, b)\n r_ratio = float(r)/float(largest_rgb_component)\n g_ratio = float(g)/float(largest_rgb_component)\n b_ratio = float(b)/float(largest_rgb_component)\n adjusted_r = int(r_ratio * float(brightness))\n adjusted_g = int(g_ratio * float(brightness))\n adjusted_b = int(b_ratio * float(brightness))\n\n if effect == \"standard\":\n for pixel in range(strip.numPixels()):\n strip.setPixelColor(pixel, Color(adjusted_g,\n adjusted_r,\n adjusted_b))\n elif effect == \"shade\":\n for pixel in range(strip.numPixels()):\n strip.setPixelColor(pixel, Color(0, 0, 0))\n for double_row in range(0, 8):\n base = double_row * 27 + 10\n for offset in range(8):\n strip.setPixelColor(base + offset, Color(adjusted_g,\n adjusted_r,\n adjusted_b))\n base = double_row * 27 + 24\n for offset in range(8):\n strip.setPixelColor(base + offset, Color(adjusted_g,\n adjusted_r,\n adjusted_b))\n elif effect == \"ring\":\n for pixel in range(strip.numPixels()):\n strip.setPixelColor(pixel, Color(0, 0, 0))\n for pixel in range(0, 27):\n strip.setPixelColor(pixel, Color(adjusted_g,\n adjusted_r,\n adjusted_b))\n\n elif effect == \"sunrise\":\n grid = Grid(27, 17, 10)\n height = brightness - 106\n grid.clear()\n grid.set_super_row(0)\n grid.set_super_row(1)\n if height > 0:\n for y in range(height):\n flip_y = height - y\n for x in range(0, int(flip_y/2.2)):\n grid.set_pixel(x, y+20)\n grid.set_pixel(80-x, 169-y)\n array = grid.strip_array()\n pixel_number = 0\n for mult in array:\n strip.setPixelColor(pixel_number,\n Color(int(mult*adjusted_g),\n int(mult*adjusted_r),\n int(mult*adjusted_b)))\n pixel_number += 1\n elif effect == \"candle\":\n while True:\n for value in CANDLE_FLICKER:\n for pixel in range(0, 27):\n strip.setPixelColor(pixel,\n Color(int(adjusted_g*value*5),\n int(adjusted_r*value*5),\n int(adjusted_b*value*5)))\n strip.show()\n time.sleep(0.01)\n if getattr(t, \"do_run\", True) is False:\n break\n if getattr(t, \"do_run\", True) is False:\n break\n\n strip.show()\n time.sleep(0.1)\n\n else:\n for pixel in range(strip.numPixels()):\n strip.setPixelColor(pixel, Color(0, 0, 0))\n strip.show()\n","sub_path":"webapp/app/pixels.py","file_name":"pixels.py","file_ext":"py","file_size_in_byte":6370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"237784586","text":"import mock\nimport testify as T\n\nimport vimap.exception_handling\nimport vimap.pool\nimport vimap.worker_process\nimport vimap.testing\n\n\n@vimap.worker_process.worker\ndef worker_raise_exc_immediately(seq, init=0):\n raise ValueError(\"hello\")\n return seq\n\n\n@vimap.worker_process.worker\ndef worker_raise_exc_with_curleys(seq, init=0):\n for x in seq:\n if x >= 0:\n raise ValueError(\"{0} curley braces!\")\n yield\n\n\ndef serialize_error(error):\n return (type(error), str(error))\n\n\nclass ExceptionsTest(T.TestCase):\n @mock.patch.object(vimap.exception_handling, 'print_exception')\n def test_basic_exceptions(self, print_exc_mock):\n processes = vimap.pool.fork(worker_raise_exc_immediately.init_args(init=i)\n for i in [1, 1, 1])\n res = list(processes.imap(list(range(1, 10))).zip_in_out())\n T.assert_equal(res, [])\n T.assert_equal(processes.finished_workers, True)\n\n calls = print_exc_mock.call_args_list\n errors = [serialize_error(call[0][0]) for call in calls]\n T.assert_equal(errors, [serialize_error(ValueError(\"hello\"))] * 3)\n\n @mock.patch.object(vimap.exception_handling, 'print_exception')\n def test_exception_with_curleys(self, print_exc_mock):\n '''Dumb test ... I aim to write tests for most every bug that had existed,\n but this is kinda 1-off ... (.format() got a curley brace).\n '''\n processes = vimap.pool.fork(worker_raise_exc_with_curleys.init_args(init='{a}')\n for _ in [1, 1, 1])\n res = list(processes.imap(list(range(1, 10))).zip_in_out())\n T.assert_equal(res, [])\n\n calls = print_exc_mock.call_args_list\n errors = [serialize_error(call[0][0]) for call in calls]\n T.assert_equal(errors, [serialize_error(ValueError(\"{0} curley braces!\"))] * 3)\n\n @mock.patch.object(vimap.exception_handling, 'print_exception')\n def test_unconsumed_exceptions(self, print_exc_mock):\n processes = vimap.pool.fork(worker_raise_exc_immediately.init_args(init=i)\n for i in [1, 1, 1])\n del processes\n\n calls = print_exc_mock.call_args_list\n errors = [serialize_error(call[0][0]) for call in calls]\n T.assert_equal(errors, [serialize_error(ValueError(\"hello\"))] * 3)\n\n @mock.patch.object(vimap.exception_handling, 'print_exception')\n def test_a_few_error(self, print_exc_mock):\n processes = vimap.pool.fork((worker_raise_exc_with_curleys.init_args(init=i)\n for i in xrange(2)), in_queue_size_factor=2)\n processes.imap([1]).block_ignore_output()\n del processes\n\n calls = print_exc_mock.call_args_list\n errors = [serialize_error(call[0][0]) for call in calls]\n T.assert_equal(errors, [serialize_error(ValueError(\"{0} curley braces!\"))])\n\n @mock.patch.object(vimap.exception_handling, 'print_warning')\n @mock.patch.object(vimap.exception_handling, 'print_exception')\n def test_fail_after_a_while(self, print_exc_mock, print_warning_mock):\n processes = vimap.pool.fork((worker_raise_exc_with_curleys.init_args(init=i)\n for i in xrange(100)), in_queue_size_factor=2)\n processes.imap([-1] * 3000 + list(range(50)))\n del processes\n\n calls = print_exc_mock.call_args_list\n errors = [serialize_error(call[0][0]) for call in calls]\n T.assert_equal(errors, [serialize_error(ValueError(\"{0} curley braces!\"))] * 50)\n\n # NOTE: Sometimes, the weakref in the pool is deleted, so 'has_exceptions' is\n # not set, and the pool prints warnings we don't actually care about. Make\n # sure that this is the only warning printed.\n if print_warning_mock.call_args_list:\n T.assert_equal(len(print_warning_mock.call_args_list), 1)\n [warning] = print_warning_mock.call_args_list\n T.assert_in('Pool disposed before input was consumed', warning[0][0])\n","sub_path":"tests/exceptions_test.py","file_name":"exceptions_test.py","file_ext":"py","file_size_in_byte":3919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"201881609","text":"import nltk\nimport docx\nimport os\nimport string\nimport math\nimport re\n\nfrom nltk.stem import PorterStemmer\nfrom nltk.corpus import stopwords\n\n\nspath = r\"G:\\project\\All Cancer Files\"\n\nvoca=[]\ntext=''\ntemp_text=''\nct=0\n\nfor i in os.listdir(spath) :\n if i.endswith('.docx'):\n if i[:2] !='~$':\n doc = docx.Document(spath+'\\\\'+i)\n for para in doc.paragraphs:\n text = text+' '+para.text\n temp_text = temp_text+' '+para.text\n voca.append(temp_text)\n temp_text=''\n\n#Classify Y\n\n\ndef find_y(voca):\n c=len(voca)\n y=[1 for i in range(0,c)]\n for i in range(0,c) :\n Word=nltk.word_tokenize(voca[i])\n if 'II' in Word:\n y[i]=2\n if 'III' in Word:\n y[i]=3\n if 'IV' in Word:\n y[i]=4\n if 'V' in Word:\n y[i]=5\n if 'VI' in Word:\n y[i]=6 \n \n return y\n\n# text is your corpus\n\ndef clean_text(text):\n w_text = nltk.word_tokenize(text)\n \n punc_text=[]\n for i in w_text:\n if i not in string.punctuation:\n punc_text.append(i) \n\n for i in punc_text:\n i=i.lower()\n \n stop_words=set(stopwords.words('english'))\n\n non_stop_text=[] \n for i in punc_text:\n if i not in stop_words:\n non_stop_text.append(i)\n \n unique_text = list(set(non_stop_text))\n\n return unique_text\n\ndef doc_words_cleaning(voca): # cleans and preprocesses each document \n doc_words = [] \n for i in range(len(voca)):\n doc_words.append(clean_text(voca[i]))\n return doc_words\n\ndef bag_of_words(doc_words,doc_temp,vocabulary): # boolean representation \n bool_vec = []\n temp_vec=[]\n \n for i in range(len(doc_words)):\n for j in range(len(vocabulary)):\n if vocabulary[j] in doc_words[i]:\n temp_vec.append(1)\n else:\n temp_vec.append(0) \n \n bool_vec.append(temp_vec) \n temp_vec=[] \n \n return bool_vec\n\ndef doc_term_freq(boo):\n doc_freq=[]\n for i in range(len(boo)):\n summ=0\n for j in range(len(boo[0])):\n if boo[i][j] == 1:\n summ=summ+1\n doc_freq.append(summ)\n return doc_freq\n\n# count of no of vocabulary words in each document\n\ndef count_voca_words(bove):\n a=[]\n for i in range(len(bove)):\n a.append(bove[i].count(1)) \n return a\n\n# term frequency [NAIVE] :- \n \n #This gives the frequency of every word in the\n\ndef term_frequency_naive(doc_words,vocabulary): \n tf=[]\n temp=[]\n for i in range(len(doc_words)):\n for j in vocabulary:\n temp.append(doc_words[i].count(j))\n tf.append(list(zip(temp,vocabulary)))\n temp=[]\n \n return tf \n\n# tf - idf term frequency \n\ndef tf(doc_words,vocabulary):\n tf=[]\n temp=[]\n for i in range(len(doc_words)):\n for j in vocabulary:\n temp.append((doc_words[i].count(j))/len(doc_words[i]))\n tf.append(temp)\n temp=[]\n return tf\n\ndef idf(doc_words,vocabulary):\n doc_count = 0\n idf=[]\n for i in vocabulary:\n for j in range(len(doc_words)):\n if i in doc_words[j]:\n doc_count = doc_count + 1\n idf.append(math.log(len(doc_words)/(doc_count)))\n doc_count = 0 \n \n return idf\n\ndef tf_idf_score_calc(tf,idf):\n tf_idf = []\n temp=[]\n for i in range(len(tf)):\n for j in range(len(idf)):\n if tf[i][j] !=0 and idf[j] !=0:\n temp.append(tf[i][j]*idf[j])\n else:\n temp.append(0)\n tf_idf.append(temp)\n temp=[]\n \n return tf_idf\n\n\ndef Roc_curve(y_test,y_score,n_classes):\n import numpy as np\n import pandas as pd\n import matplotlib.pyplot as plt\n from itertools import cycle\n from sklearn.metrics import roc_curve, auc\n from scipy import interp\n\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n\n l=len(set(y_score))\n\n for i in range(l):\n fpr[i], tpr[i], _ = roc_curve(np.array(pd.get_dummies(y_test))[:, i], np.array(pd.get_dummies(y_score))[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n\n\n all_fpr = np.unique(np.concatenate([fpr[i] for i in range(l)]))\n\n mean_tpr = np.zeros_like(all_fpr)\n for i in range(l):\n mean_tpr += interp(all_fpr, fpr[i], tpr[i])\n\n mean_tpr = mean_tpr/(n_classes-1)\n\n fpr[\"macro\"] = all_fpr\n tpr[\"macro\"] = mean_tpr\n roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"])\n\n lw=2\n plt.figure(figsize=(8,5))\n plt.plot(fpr[\"macro\"], tpr[\"macro\"],\n label='macro-average ROC curve (area = {0:0.2f})'\n ''.format(roc_auc[\"macro\"]),\n color='green', linestyle=':', linewidth=4)\n\n colors = cycle(['chocolate', 'aqua', 'darkorange', 'cornflowerblue', 'cadetblue','burntsienna','cornflowerblue'])\n for i, color in zip(range(l), colors):\n plt.plot(fpr[i], tpr[i], color=color, lw=lw,label='ROC curve of class {0} (area = {1:0.2f})'''.format(i, roc_auc[i]))\n\n plt.plot([0, 1], [0, 1], 'k--',color='red', lw=lw)\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.annotate('Random Guess',(.5,.48),color='red')\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver Operating Characteristic for Logistic Regression')\n plt.legend(loc=\"lower right\")\n plt.show()\n\n \n \ndef Logistic(x,y):\n #splitting Training Data and Test Data\n from sklearn.model_selection import train_test_split\n x_train,x_test,y_train,y_test=train_test_split(x,y,random_state = 0)\n\n\n #Logistic Regression\n from sklearn import linear_model\n #from sklearn import metrics\n model=linear_model.LogisticRegression(multi_class='multinomial', solver='newton-cg')\n model=model.fit(x_train,y_train)\n result_log=model.predict(x_test)\n\n #Model report\n from sklearn.metrics import confusion_matrix\n from sklearn.metrics import accuracy_score\n from sklearn.metrics import classification_report\n\n results = confusion_matrix(y_test, result_log)\n print('Confusion Matrix :')\n print(results)\n\n print('Accuracy Score :',accuracy_score(y_test,result_log))\n\n print('Report :' )\n print (classification_report(y_test,result_log))\n\n Roc_curve(y_test,result_log,len(set(y)))\n\nvocabulary = clean_text(text)\n\ndoc_words = doc_words_cleaning(voca)\n\nbow = bag_of_words(doc_words,voca,vocabulary)\n\ny=find_y(voca)\n\ndoc_freq = count_voca_words(bow) \n\nterm_freq_naive = term_frequency_naive(doc_words,vocabulary)\n\nterm_frequency = tf(doc_words,vocabulary)\n\ninverse_doc_freq = idf(doc_words,vocabulary)\n\ntf_idf_score = tf_idf_score_calc(term_frequency,inverse_doc_freq)\n\nprint(\"\\nBag of Words:\")\nLogistic(bow,y)\n\n\nprint(\"\\nTerm Frequency:\")\nLogistic(term_frequency,y)\n\nprint(\"\\ntf_idf:\")\nLogistic(tf_idf_score,y)\n","sub_path":"LogisticRegression.py","file_name":"LogisticRegression.py","file_ext":"py","file_size_in_byte":6903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"257587019","text":"#!/usr/bin/env python\r\n# -*- encoding: utf-8 -*-\r\n'''\r\n@File : ui_zhlian.py \r\n@Desc : \r\n@Contact : thefreer@outlook.com\r\n@License : (C)Copyright 2018-2019, TheFreer.NET\r\n@WebSite : www.thefreer.net\r\n@Modify Time @Author @Version\r\n------------ ------- --------\r\n2019/3/5 13:37 thefreer 1.0 \r\n'''\r\nfrom PyQt5 import QtCore, QtWidgets\r\nfrom PyQt5.QtWidgets import QApplication, QMessageBox, QDesktopWidget, QLabel, QComboBox\r\nfrom PyQt5.QtGui import QIcon\r\nfrom ui.ui_spider import Ui_MainWindow\r\nimport sys\r\nfrom spider.thread_spider import spiderThread\r\nimport time\r\n\r\n\r\n# 这个ui需要改进\r\nclass MainWindow(QtWidgets.QMainWindow, Ui_MainWindow):\r\n\tdef __init__(self, parent=None):\r\n\t\tsuper(MainWindow, self).__init__(parent)\r\n\t\tself.setupUi(self)\r\n\t\tself.beginBtn.clicked.connect(self.spiderStart)\r\n\t\tself.endBtn.clicked.connect(self.spiderEnd)\r\n\t\tself.init_ui()\r\n\t\r\n\tdef init_ui(self):\r\n\t\tself.setWindowTitle(\"智联招聘爬虫工具\")\r\n\t\tself.setWindowIcon(QIcon('ui/icon.png'))\r\n\t\tself.statusBar().showMessage('Ready')\r\n\t\r\n\t# choices = ['1. 选择爬取的url', '收到的简历', '回收站']\r\n\t# self.comboBox.addItems(choices)\r\n\t\r\n\tdef spiderEnd(self):\r\n\t\tself.myThread.exit()\r\n\t\tself.textEdit.append(\"爬虫线程被手动终结\")\r\n\t\tself.beginBtn.setEnabled(True)\r\n\t\r\n\tdef spiderStart(self):\r\n\t\tself.beginBtn.setEnabled(False)\r\n\t\tcookie = self.cookieLine.text()\r\n\t\tif cookie == '' or cookie == None:\r\n\t\t\twith open('cookie.ini', 'r') as f:\r\n\t\t\t\tcookie = f.readlines()[0]\r\n\t\t\tf.close()\r\n\t\telse:\r\n\t\t\twith open('cookie.ini', 'w') as f:\r\n\t\t\t\tf.write(cookie)\r\n\t\t\tf.close()\r\n\t\tself.textEdit.append('你输入的cookie为:\\n%s\\n' % cookie)\r\n\t\tself.textEdit.append('起始数字为 :%s' % startNum)\r\n\t\tself.cookie = cookie\r\n\t\tself.startNum = startNum\r\n\t\tself.myThread = spiderThread( self.cookie, self.startNum)\r\n\t\t# 6.接收信号并产生回调函数\r\n\t\tself.myThread.updata_date.connect(self.Display)\r\n\t\tself.myThread.start()\r\n\t\r\n\t# 7我是回调函数,显示输出\r\n\tdef Display(self, data):\r\n\t\tif data == 'finish':\r\n\t\t\tself.myThread.exit()\r\n\t\t\tself.beginBtn.setEnabled(True)\r\n\t\telse:\r\n\t\t\tself.textEdit.append(data)\r\n\t\r\n\tdef center(self):\r\n\t\t\r\n\t\tqr = self.frameGeometry()\r\n\t\tcp = QDesktopWidget().availableGeometry().center()\r\n\t\tqr.moveCenter(cp)\r\n\t\tself.move(qr.topLeft())\r\n\t\r\n\tdef closeEvent(self, event):\r\n\t\treply = QMessageBox.question(self, 'Message', \"确定要退出吗?\", QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\r\n\t\t\r\n\t\tif reply == QMessageBox.Yes:\r\n\t\t\tevent.accept()\r\n\t\telse:\r\n\t\t\tevent.ignore()\r\n\r\n\r\nif __name__ == '__main__':\r\n\tapp = QApplication(sys.argv)\r\n\twin = MainWindow()\r\n\twin.show()\r\n\tsys.exit(app.exec_())\r\n","sub_path":"spider/zhilian/ui_zhlian.py","file_name":"ui_zhlian.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"220668954","text":"\"\"\"Letter guessing game\"\"\"\n# make a list of words\n# pick a random word\n# draw spaces\n# take guess\n# draw guessed letters and strikes\n# print out win/lose\nimport os\nimport random\nimport sys\n\nWORDS = [\n 'apple',\n 'kiwi',\n 'banana',\n 'kumquat',\n 'grapefruit',\n 'orange',\n 'lemon',\n 'melon'\n]\n\ndef clear():\n \"\"\"Clear the screen\"\"\"\n if os.name == 'nt':\n os.system('cls')\n else:\n os.system('clear')\n\ndef draw(bad_guesses, good_guesses, secret):\n \"\"\"draw out the words\"\"\"\n clear()\n print('Strikes: {}/7\\n'. format(len(bad_guesses)))\n print('')\n\n for letter in bad_guesses:\n print(letter, end='')\n print('\\n\\n')\n\n for letter in secret:\n if letter in good_guesses:\n print(letter, end='')\n else:\n print('_', end='')\n print('')\n\ndef get_guess(bad_guesses, good_guesses):\n \"\"\"gets the guess\"\"\"\n while True:\n guess = input('Guess a letter: ').lower()\n if len(guess) != 1:\n print(\"You can only guess a single letter!\")\n elif guess in bad_guesses or guess in good_guesses:\n print(\"You've already guessed that one!\")\n elif not guess.isalpha():\n print(\"You can only guess letters!\")\n else:\n return guess\n\ndef play(done):\n clear()\n secret = random.choice(WORDS)\n bad_guesses = []\n good_guesses = []\n\n while True:\n draw(bad_guesses, good_guesses, secret)\n guess = get_guess(bad_guesses, good_guesses)\n\n while True:\n start = input(\"Press enter to start, or enter q to quit\")\n if start.upper() == 'Q':\n break\n secret = random.choice(WORDS)\n bad_guesses = []\n good_guesses = []\n\n while len(bad_guesses) < 8 and len(good_guesses) != len(list(secret)):\n\n\n\n if guess in secret:\n good_guesses.append(guess)\n if len(good_guesses) == len(secret):\n print('You won! The word was {}'.format(secret))\n else:\n bad_guesses.append(guess)\n\nmain()\n","sub_path":"foundations/letter_guessing_game.py","file_name":"letter_guessing_game.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"54014237","text":"#!/usr/bin/python3\n\nfrom typing import List\nfrom pandas.core.frame import DataFrame #class\n\nfrom aggregation.aggrDHont import AggrDHont #class\nfrom aggregation.aggrDHontNegativeImplFeedback import AggrDHontNegativeImplFeedback #class\n\nimport pandas as pd\nfrom history.aHistory import AHistory #class\nfrom history.historyDF import HistoryDF #class\n\nfrom userBehaviourDescription.userBehaviourDescription import UserBehaviourDescription #class\nfrom userBehaviourDescription.userBehaviourDescription import observationalLinearProbabilityFnc #function\n\nfrom aggregation.toolsDHontNF.penalizationOfResultsByNegImpFeedback.penalUsingReduceRelevance import PenalizationOfResultsByNegImpFeedbackUsingReduceRelevance #class\n\n\ndef test01():\n print(\"Test 01\")\n\n # number of recommended items\n N = 120\n\n #a = observationalLinearProbabilityFnc(0.1, 0.9, 5)\n #print(a)\n\n uBehaviourDesc:UserBehaviourDescription = UserBehaviourDescription(observationalLinearProbabilityFnc, [0.1, 0.9])\n\n # method results, items=[1,2,4,5,6,7,8,12,32,64,77]\n methodsResultDict:dict[str,pd.Series] = {\n \"metoda1\":pd.Series([0.2,0.1,0.3,0.3,0.1],[32,2,8,1,4],name=\"rating\"),\n \"metoda2\":pd.Series([0.1,0.1,0.2,0.3,0.3],[1,5,32,6,7],name=\"rating\"),\n \"metoda3\":pd.Series([0.3,0.1,0.2,0.3,0.1],[7,2,77,64,12],name=\"rating\")\n }\n #print(methodsResultDict)\n\n # methods parametes\n methodsParamsData:List[tuple] = [['metoda1',100], ['metoda2',80], ['metoda3',60]]\n methodsParamsDF:DataFrame = pd.DataFrame(methodsParamsData, columns=[\"methodID\",\"votes\"])\n methodsParamsDF.set_index(\"methodID\", inplace=True)\n #print(methodsParamsDF)\n\n userID:int = 0\n itemID:int = 7\n\n historyDF:AHistory = HistoryDF(\"test01\")\n historyDF.insertRecommendation(userID, itemID, 1, 0.9, True)\n historyDF.insertRecommendation(userID, itemID, 1, 0.9, True)\n historyDF.insertRecommendation(userID, itemID, 1, 0.9, True)\n historyDF.print()\n\n ignoringValue:float = historyDF.getIgnoringValue(userID, itemID, limit=3)\n print(\"IgnoringValue: \" + str(ignoringValue))\n\n aggr:AggrDHont = AggrDHontNegativeImplFeedback(historyDF, {AggrDHontNegativeImplFeedback.ARG_SELECTORFNC:(AggrDHontNegativeImplFeedback.selectorOfTheMostVotedItem,[]),\n AggrDHontNegativeImplFeedback.AGR_LENGTH_OF_HISTORY:10,\n AggrDHontNegativeImplFeedback.AGR_BORDER_NEGATIVE_FEEDBACK:1.0})\n\n itemIDs:List[tuple] = aggr.runWithResponsibility(methodsResultDict, methodsParamsDF, userID, N)\n print(itemIDs)\n\n\ndef test02():\n print(\"Test 02\")\n\n methodsResultDict:dict[str,pd.Series] = {\n \"metoda1\":pd.Series([0.2,0.1,0.3,0.3,0.1],[32,2,8,1,4],name=\"rating\"),\n \"metoda2\":pd.Series([0.1,0.1,0.2,0.3,0.3],[1,5,32,6,7],name=\"rating\"),\n \"metoda3\":pd.Series([0.3,0.1,0.2,0.3,0.1],[7,2,77,64,12],name=\"rating\")\n }\n print(methodsResultDict)\n print()\n\n userID:int = 0\n itemID:int = 1\n\n historyDF:AHistory = HistoryDF(\"test01\")\n historyDF.insertRecommendation(userID, itemID, 0, 0.9, False)\n historyDF.insertRecommendation(userID, itemID, 0, 0.9, False)\n historyDF.insertRecommendation(userID, itemID, 0, 0.9, False)\n #historyDF.print()\n\n ###################\n maxPenaltyValue:float = 1.2\n minPenaltyValue:float = 0.2\n lengthOfHistory:int = 5\n\n p = PenalizationOfResultsByNegImpFeedbackUsingReduceRelevance(historyDF, maxPenaltyValue, minPenaltyValue, lengthOfHistory)\n methodsResultDict:dict[str, pd.Series] = p.proportionalRelevanceReduction(methodsResultDict, userID)\n print(\"methodsResultDict\")\n print(methodsResultDict)\n\n\n ###################\n i:int = 2\n maxPenaltyValue:float = 1.2\n minPenaltyValue:float = 0.2\n lengthOfHistory:int = 5\n\n value:float = PenalizationOfResultsByNegImpFeedbackUsingReduceRelevance.getPenaltyLinear2(i, maxPenaltyValue, minPenaltyValue, lengthOfHistory)\n print(\"value: \" + str(value))\n\n\n ###################\n minTimeDiff:float = 1.0\n maxTimeDiff:float = 1.5\n minPenalty:float = 0.0\n maxPenalty:float = 1.0\n\n timeDiff:float = minTimeDiff + 0.25\n\n value:float = PenalizationOfResultsByNegImpFeedbackUsingReduceRelevance.getPenaltyLinear(timeDiff, minTimeDiff, maxTimeDiff, minPenalty, maxPenalty)\n print(\"value: \" + str(value))\n\n#test01()\ntest02()","sub_path":"src/tests/aggrDHontNegativeImplFeedback.py","file_name":"aggrDHontNegativeImplFeedback.py","file_ext":"py","file_size_in_byte":4435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"184002237","text":"metadata = \"\"\"\nsummary @ Mozilla Network Security Services\nhomepage @ http://www.mozilla.org/projects/security/pki/nss/\nlicense @ MPL GPL\nsrc_url @ ftp://ftp.mozilla.org/pub/security/nss/releases/NSS_3_13_3_RTM/src/nss-3.13.3.tar.gz\narch @ ~x86\n\"\"\"\n\ndepends = \"\"\"\nruntime @ dev-libs/nspr dev-db/sqlite sys-libs/zlib\nbuild @ dev-lang/perl\n\"\"\"\n\ndef prepare():\n cd(\"mozilla\")\n makedirs(\"mozilla/dist/pkgconfig\")\n copy(\"%s/nss.pc.in\" % filesdir, \"dist/pkgconfig/nss.pc.in\")\n copy(\"%s/nss-config.in\" % filesdir, \"dist/pkgconfig/nss-config.in\")\n # pathces from archlinux\n #patch(\"bug702090.patch\", level=2)\n #patch(\"add_spi+cacert_ca_certs.patch\", level=2)\n patch(\"ssl-renegotiate-transitional.patch\", level=2)\n patch(\"nss-no-rpath.patch\", level=2)\n sed(\"-e 's/\\$(MKSHLIB) -o/\\$(MKSHLIB) \\$(LDFLAGS) -o/g' -i security/coreconf/rules.mk\")\n\ndef build():\n cd(\"mozilla/security/nss/lib/ckfw/builtins\")\n make(\"generate\")\n \n cd(build_dir)\n\n export(\"BUILD_OPT\", \"1\")\n export(\"NSS_ENABLE_ECC\", \"1\")\n export(\"NSS_USE_SYSTEM_SQLITE\", \"1\")\n export(\"OPT_FLAGS\",\"%s -g -fno-strict-aliasing\" % get_env(\"CFLAGS\"))\n \n export(\"PKG_CONFIG_ALLOW_SYSTEM_LIBS\", \"1\")\n export(\"PKG_CONFIG_ALLOW_SYSTEM_CFLAGS\", \"1\")\n export(\"NSPR_INCLUDE_DIR\", \"/usr/include/nspr\")\n export(\"XCFLAGS\", get_env(\"CFLAGS\"))\n\n make(\"-C mozilla/security/coreconf\", j=1)\n make(\"-C mozilla/security/dbm\", j=1)\n make(\"-C mozilla/security/nss\", j=1)\n\ndef install():\n cd(\"mozilla\")\n\n for binary in [\"certutil\", \"modutil\", \"pk12util\", \"signtool\", \"ssltap\"]:\n insinto(\"dist/Linux*/bin/%s\" % binary, \"/usr/bin\", sym=False)\n\n for lib in [\"*.a\",\"*.chk\",\"*.so\"]:\n insinto(\"dist/Linux*/lib/%s\" % lib, \"/usr/lib/nss\", sym=False)\n\n # Headers\n for header in [\"dist/private/nss/*.h\",\"dist/public/nss/*.h\"]:\n insinto(header, \"/usr/include/nss\", sym=False)\n\n # Drop executable bits from headers\n setmod(\"0644\", \"%s/usr/include/nss/*.h\" % install_dir)\n\n # Install nss-config and nss.pc\n insfile(\"dist/pkgconfig/nss.pc.in\", \"/usr/lib/pkgconfig/nss.pc\")\n insexe(\"dist/pkgconfig/nss-config.in\", \"/usr/bin/nss-config\")\n\n for lib in ('libssl3.so', 'libsmime3.so', 'libnssutil3.so', 'libnss3.so', \n 'libsoftokn3.so', 'libfreebl3.so', 'libnssckbi.so', 'libnssdbm3.so'):\n makesym(\"nss/%s\" % lib, \"/usr/lib/%s\" % lib)\n","sub_path":"dev-libs/nss/nss-3.13.3.py","file_name":"nss-3.13.3.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"29976998","text":"from __future__ import print_function\nimport pyorbit\nimport argparse\nimport os\nimport sys\n\nif __name__ == '__main__':\n\n print('PyORBIT v8.x')\n print()\n print('Python version in use:')\n print(sys.version)\n print()\n\n parser = argparse.ArgumentParser(prog='PyORBIT_run.py', description='PyORBIT runner')\n parser.add_argument('sampler', type=str, nargs=1, help='sampler (emcee or polychord)')\n parser.add_argument('config_file', type=str, nargs=1, help='config file')\n\n args = parser.parse_args()\n sampler = args.sampler[0]\n file_conf = args.config_file[0]\n\n config_in = pyorbit.yaml_parser(file_conf)\n\n sampler_keyword = {\n 'multinest':['multinest', 'MultiNest', 'multi'],\n 'polychord':['polychord', 'PolyChord', 'polychrod', 'poly'],\n 'emcee': ['emcee', 'MCMC', 'Emcee'],\n 'dynesty': ['dynesty', 'DyNesty', 'Dynesty', 'DYNESTY'],\n 'optimize': ['optimize', 'scipy', 'Optimize', 'OPTIMIZE'],\n }\n\n if sampler in sampler_keyword['emcee']:\n pyorbit.pyorbit_emcee(config_in)\n\n if sampler in sampler_keyword['multinest']:\n pyorbit.pyorbit_multinest(config_in)\n\n if sampler in sampler_keyword['polychord']:\n pyorbit.pyorbit_polychord(config_in)\n\n if sampler in sampler_keyword['dynesty']:\n pyorbit.pyorbit_dynesty(config_in)\n\n if sampler in sampler_keyword['optimize']:\n pyorbit.pyorbit_optimize(config_in)\n\n# This line was used to check if imprtation was working\n# else:\n# print 'I am being imported from another module'\n","sub_path":"PyORBIT_Run.py","file_name":"PyORBIT_Run.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"259657778","text":"'''\nCreated on Aug 13, 2012\n\n@author: teddydestodes, MrLoom\n'''\n\nimport socket, time, struct\nimport threading\nimport pickle\nimport spidev\nimport hashlib\n\nclass RefreshThread(threading.Thread):\n SPILock = threading.Lock()\n \n def __init__(self, length):\n threading.Thread.__init__(self)\n self.buffer = []\n self.length = length\n for i in range(0,length*3):\n self.buffer.append(0)\n self.spi = spidev.SpiDev(2,0)\n self.spi.max_speed_hz = 1000000\n \n def run(self):\n while True:\n self.flush_buffer()\n time.sleep(.5)\n \n def flush_buffer(self):\n RefreshThread.SPILock.acquire()\n self.spi.writebytes(self.buffer)\n time.sleep(0.0005)\n RefreshThread.SPILock.release()\n \n def set_color(self, color, pixel=None, flush=True):\n if pixel != None:\n self.buffer[pixel*3] = color[0]\n self.buffer[pixel*3+1] = color[1]\n self.buffer[pixel*3+2] = color[2]\n else:\n for p in range(0,self.length):\n self.buffer[p*3] = color[0]\n self.buffer[p*3+1] = color[1]\n self.buffer[p*3+2] = color[2]\n if flush:\n self.flush_buffer()\n\n def push_color(self, color):\n self.buffer.insert(0,color[2])\n self.buffer.insert(0,color[1])\n self.buffer.insert(0,color[0])\n self.buffer.pop()\n self.buffer.pop()\n self.buffer.pop()\n self.flush_buffer()\n \n def fade(self, color):\n steps = 50\n for s in range(0,steps):\n for p in range(0, self.length):\n self.set_color(self.avg_color(self.get_color(p), color, steps-s), p, flush=False)\n self.flush_buffer()\n self.set_color(color)\n \n\n def get_color(self, pixel):\n return [self.buffer[pixel*3],self.buffer[pixel*3+1],self.buffer[pixel*3+2]]\n\n def avg_color(self, color, color2, weight=1):\n return ((color[0]*weight+color2[0])/(1+weight),(color[1]*weight+color2[1])/(1+weight),(color[2]*weight+color2[2])/(1+weight))\n\nclass RecieveThread(threading.Thread):\n \n def __init__(self,address,port, refresh):\n threading.Thread.__init__(self)\n self.sock = socket.socket( socket.AF_INET,\n socket.SOCK_DGRAM )\n self.sock.bind( (address,port) )\n self.rthread = refresh\n \n def run(self):\n while True:\n data, addr = self.sock.recvfrom( 1024 ) # buffer size is 1024 bytes\n \n checksumData = data[:16]\n pickledData = data[16:]\n \n checksum = hashlib.sha256(pickledData + \"TESTSALT\").hexdigest()[:16]\n if checksum != checksumData:\n continue\n \n t = pickle.loads(pickledData) \n if t[0] == 0:\n self.command_off()\n if t[0] == 1:\n self.command_setcolor(t[1])\n if t[0] == 2:\n self.command_pushcolor(t[1])\n if t[0] == 3:\n self.command_fade(t[1])\n def command_off(self):\n self.rthread.set_color((0,0,0))\n def command_setcolor(self, param):\n self.rthread.set_color(param[0],param[1])\n def command_pushcolor(self, param):\n self.rthread.push_color(param[0])\n def command_fade(self, param):\n self.rthread.fade(param)\nclass LichtServer(object):\n '''\n classdocs\n '''\n\n\n def __init__(self,address,port, size):\n '''\n Constructor\n '''\n self.refresh_thread = RefreshThread(size)\n self.recieve_thread = RecieveThread(address,port, self.refresh_thread)\n self.recieve_thread.setDaemon(True)\n self.refresh_thread.setDaemon(True)\n self.recieve_thread.start() \n self.refresh_thread.start()\n \n \n \nserver = LichtServer('192.168.2.145',16321, 25)\n\nwhile True:\n time.sleep(100)","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"22873730","text":"import tensorflow as tf\nimport numpy as np\n\n\ndef parse_csv(st):\n #ary = st.decode(\"utf-8\").split(\",\")\n #st = tf.expand_dims(st, -1)\n ary = tf.decode_csv(st, record_defaults=[tf.zeros([1],dtype=tf.float32),tf.zeros([1],dtype=tf.float32)])\n return [{\"x\":ary[0]}, ary[1]]\n\n\ndataset = tf.data.Dataset.from_tensor_slices([\"sample2_sin.csv\"])\\\n .flat_map(lambda x : tf.data.TextLineDataset(x))\\\n .map(parse_csv)\n #.map(lambda ary,ary2 : tf.cast(ary, dtype=tf.float32))\n# .map(func_a) #\\\nprint (dataset.output_types)\nprint (dataset.output_shapes)\n\niterator = dataset.make_one_shot_iterator()\nnext_element = iterator.get_next()\nsess = tf.InteractiveSession()\nfor i in range(10):\n print(str(sess.run(next_element)))\n\n\n\n\n\n\n\ndef load_rawdata():\n source = []\n with open(\"sample2_sin.csv\", \"r\") as f:\n for line in f:\n source.append(line.split(\",\"))\n\n data = np.array(source, dtype=\"float\").transpose()\n feature = data[0]\n label = [x for x in data[1]]\n return [{\"x\":feature},label]\n\ndef train_input_fn(features, labels, batch_size):\n dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))\n print(dataset)\n return dataset\n #return (dataset.shuffle(5000).batch(batch_size).repeat().make_one_shot_iterator().get_next())\n\n#\n\"\"\"\nr = load_rawdata()\ndataset2 = train_input_fn(r[0],r[1], 100)\n\nnext_element = dataset2.make_one_shot_iterator().get_next()\nsess = tf.InteractiveSession()\nfor i in range(10):\n print(str(sess.run(next_element)))\n\n\"\"\"\n\n\n\n","sub_path":"ml/local/experiment/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"235682634","text":"from django.urls import reverse\nfrom django.test import TestCase\n\nfrom users.models import Account\nfrom ..models import Feed, FeedItem\nfrom ..views import FeedItemListView\n\n\nclass FeedCreateViewTestCase(TestCase):\n \"\"\"Ensure feeds can be created\n \"\"\"\n def setUp(self):\n super().setUp()\n self.title = 'Test Feed'\n self.link = 'https://www.nu.nl/rss/Algemeen'\n self.description = 'Test Description'\n self.url = reverse('feeds:create')\n self.email = 'johnny@doedoe.com'\n self.password = 'Secret123456'\n Account.objects.create_user(self.email, self.password)\n self.credentials = {'username': self.email, 'password': self.password}\n\n def tearDown(self):\n super().tearDown()\n Feed.objects.all().delete()\n Account.objects.all().delete()\n\n def _log_user_in(self):\n self.client.post(reverse('users:login'), self.credentials)\n\n def test_feed_url_cant_access(self):\n \"\"\"Ensure users can't access feed url if they're not logged\n \"\"\"\n response = self.client.get(self.url)\n\n self.assertEqual(response.status_code, 302)\n self.assertIn(reverse('users:login'), response.url)\n\n def test_feed_url_can_be_accessed(self):\n \"\"\"Ensure users can access feed url\n \"\"\"\n self._log_user_in()\n response = self.client.get(self.url)\n\n self.assertEqual(response.status_code, 200)\n\n def test_feed_cannot_be_created_without_link(self):\n \"\"\"Ensure feeds cannot be created without links\n \"\"\"\n self._log_user_in()\n data = {\n 'title': self.title,\n }\n\n response = self.client.post(self.url, data, follow=True)\n\n form = response.context_data.get('form')\n self.assertIn('link', form.errors.keys())\n\n def test_feed_can_be_created(self):\n \"\"\"Ensure users can create feeds through feed url\n \"\"\"\n self._log_user_in()\n data = {\n 'title': self.title,\n 'link': self.link,\n 'description': self.description\n }\n\n response = self.client.post(self.url, data, follow=True)\n\n self.assertEqual(response.status_code, 200)\n self.assertGreater(Feed.objects.count(), 0)\n\n\nclass FeedListViewTestCase(TestCase):\n \"\"\"Ensure feeds are listed\n \"\"\"\n\n def setUp(self):\n super().setUp()\n self.url = reverse('feeds:feeds')\n\n def tearDown(self):\n super().tearDown()\n Feed.objects.all().delete()\n\n def test_list_of_feeds_is_empty(self):\n \"\"\"Ensure when no feeds the returned list is empty\n \"\"\"\n response = self.client.get(self.url)\n object_list = response.context_data.get('object_list')\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(object_list.count(), 0)\n\n def test_feeds_are_listed(self):\n \"\"\"Ensure feeds are listed after being created\n \"\"\"\n Feed.objects.bulk_create([\n Feed(\n title='This is a test',\n link='https://www.nu.nl/rss/Algemeen'\n ),\n Feed(\n title='This is only a test',\n link='https://feeds.feedburner.com/tweakers/mixed'\n ),\n ])\n\n response = self.client.get(self.url)\n object_list = response.context_data.get('object_list')\n\n self.assertEqual(response.status_code, 200)\n self.assertIsNotNone(object_list)\n self.assertEqual(object_list.count(), 2)\n\n\nclass FeedItemListViewTestCase(TestCase):\n \"\"\"Ensure FeedItems are listed\n \"\"\"\n def setUp(self):\n super().setUp()\n self.feed = Feed.objects.create(\n title='This is a test',\n link='https://www.nu.nl/rss/Algemeen'\n )\n self.feed_item = FeedItem.objects.create(\n feed=self.feed, link='http://test.com'\n )\n\n def tearDown(self):\n super().tearDown()\n Feed.objects.all().delete()\n FeedItem.objects.all().delete()\n\n def test_get_queryset(self):\n \"\"\"Ensure get_queryset return the expected object\n \"\"\"\n self.assertIsNotNone(\n FeedItemListView(kwargs={'pk': self.feed_item.pk}).get_queryset()\n )\n self.assertEqual(FeedItem.objects.count(), 1)\n\n\nclass RunTaskRedirectViewTestCase(TestCase):\n \"\"\"Ensure task runs when called through url\n \"\"\"\n def setUp(self):\n super().setUp()\n self.feed = Feed.objects.create(\n title='This is a test', link='https://www.nu.nl/rss/Algemeen'\n )\n self.feed_item = FeedItem.objects.create(\n feed=self.feed, link='http://test.com'\n )\n\n def tearDown(self):\n super().tearDown()\n Feed.objects.all().delete()\n FeedItem.objects.all().delete()\n\n def test_view_can_be_accessed(self):\n \"\"\"Ensure view can be accessed through url with pk as arg\n \"\"\"\n response = self.client.get(\n reverse('feeds:run_task', args=[self.feed.pk]), follow=True\n )\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.redirect_chain[0][0],\n reverse('feeds:feeds')\n )\n","sub_path":"feedscraper/feeds/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":5210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"60521881","text":"import matplotlib.pyplot as plt\nimport ES, optimFunctions\n\nN = input(\"nombre d'essais: \")\nN = int(N)\n\nresultats = []\n\nfor i in range(N):\n#evolutionaryOptimisation(nVar,minBound,maxBound,maxIt,popSize,nOffspring,tournSize,pm,rayon,costFunction,...\n bestFits = ES.evolutionaryOptimisation(1, -5, 5,\n maxIt = 20,\n popSize = 20,\n nOffspring = 20,\n tournSize = 2,\n pm = 0.1,\n rayon = 0.1,\n costFunction = optimFunctions.rastriginFunction, Print = False)\n\n resultats.append(bestFits[-1])\n\nmeanMean = 0\nmeanBest = 0\nmeanStd = 0\n\nfor i in resultats:\n print(\"best: \" , i[0] , \" mean: \" , i[1] , \" worst: \" , i[2] , \"std: \" , i[3])\n meanMean += i[1]\n meanBest += i[0]\n meanStd += i[3]\n\nmeanMean /= N\nmeanBest /= N\nmeanStd /= N\n\nprint(\"\\nmean des Means: \", meanMean, \"\\nmean des Bests: \", meanBest, \"\\nmean des Std: \", meanStd)\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"310391121","text":"from coco import CocoDataset\nimport matplotlib.pyplot as plt\nimport os\nimport pdb\nimport pickle\n\ndef analysis(dataset_info):\n count = []\n scales = []\n for element in dataset_info:\n count.append(element[0])\n scales.append(element[1])\n #pdb.set_trace()\n\ndef save_info(dataset_info, PATH, set_name):\n \n folder = 'fine-tune/analysis'\n set_name = set_name[:-4] + '.txt'\n fname = os.path.join(PATH, folder, set_name)\n \n print(\"Saving dataset info to {}\".format(fname))\n with open(fname, \"wb\") as fp:\n pickle.dump(dataset_info, fp)\n\ndef load_info(PATH, set_name):\n folder = 'fine-tune/analysis'\n set_name = set_name[:-4] + '.txt'\n fname = os.path.join(PATH, folder, set_name)\n\n print(\"Loading log file from {}\".format(fname))\n with open(fname, \"rb\") as fp:\n dataset_info = pickle.load(fp)\n \n return dataset_info\n\ndef do_analysis(dataset_info, PATH, set_name):\n count = []\n scales = []\n folder = 'fine-tune/analysis'\n plt_name = os.path.join(PATH, folder, set_name[:-4])\n for element in dataset_info:\n count.append(element[0])\n scales.append(element[1])\n scales_ = [val for scale in scales for val in scale]\n \n plt.subplot(2, 1, 1)\n plt.hist(count)\n plt.title(\"Number of parts per image\")\n plt.ylabel(\"Count\")\n \n plt.subplot(2, 1, 2)\n plt.hist(scales_)\n plt.title(\"Scales for each part\")\n plt.xlabel(\"Bins\")\n plt.ylabel(\"Count\")\n \n plt.tight_layout()\n plt.savefig(plt_name+'.png')\n\ndef main(PATH, set_name, extract, analysis):\n \n if extract:\n coco_ds = CocoDataset(PATH, set_name)\n dataset_info = coco_ds.process_images()\n save_info(dataset_info, PATH, set_name)\n \n if analysis:\n dataset_info = load_info(PATH, set_name)\n do_analysis(dataset_info, PATH, set_name)\n \n\nif __name__ == '__main__':\n PATH = '/home/cancam/workspace/gradcam_plus_plus-pytorch/data/coco'\n set_name = 'val2017'\n extract = True\n analysis = False\n main(PATH, set_name, extract, analysis)\n","sub_path":"coco_utils/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"327706745","text":"import pytest\nfrom pydantic import ValidationError\n\nfrom devmaua.src.enum.tipo_email import TipoEmail\nfrom devmaua.src.enum.tipo_telefone import TipoTelefone\nfrom devmaua.src.enum.tipo_endereco import TipoEndereco\n\nfrom devmaua.src.models.contato import Contato\nfrom devmaua.src.models.email import Email\nfrom devmaua.src.models.telefone import Telefone\nfrom devmaua.src.models.endereco import Endereco\n\n\nclass Test_Contato():\n \n def test_create_instance_model(self):\n email = Email(email='teste@teste.com',\n tipo=TipoEmail.PRIVADO,\n prioridade = 1)\n end = Endereco(logradouro='rua de tal',\n numero = 20,\n cep='00000-000',\n tipo = TipoEndereco.RESIDENCIAL)\n tel = Telefone(tipo = TipoTelefone.PRIVADO,\n numero = '99999-9999',\n ddd=11,\n prioridade = 3)\n contato = Contato(emails = [email],\n telefones = [tel],\n enderecos = [end])\n \n assert contato.emails[0].email == 'teste@teste.com'\n assert contato.emails[0].tipo == TipoEmail.PRIVADO\n assert contato.emails[0].prioridade == 1\n \n assert contato.telefones[0].tipo == TipoTelefone.PRIVADO\n assert contato.telefones[0].numero == '99999-9999'\n assert contato.telefones[0].ddd == 11\n assert contato.telefones[0]. prioridade == 3\n \n assert contato.enderecos[0].logradouro == 'rua de tal'\n assert contato.enderecos[0].numero == 20\n assert contato.enderecos[0].cep == '00000-000'\n assert contato.enderecos[0].tipo == TipoEndereco.RESIDENCIAL\n \n def test_validator_error_email(self):\n with pytest.raises(ValidationError) as error_info:\n end = Endereco(logradouro='rua de tal',\n numero = 20,\n cep='00000-000',\n tipo = TipoEndereco.RESIDENCIAL)\n tel = Telefone(tipo = TipoTelefone.PRIVADO,\n numero = '99999-9999',\n ddd=11,\n prioridade = 3)\n contato = Contato(emails = [],\n telefones = [tel],\n enderecos = [end])\n \n def test_validator_error_endereco(self):\n with pytest.raises(ValidationError) as error_info:\n email = Email(email='teste@teste.com',\n tipo=TipoEmail.PRIVADO,\n prioridade = 1)\n tel = Telefone(tipo = TipoTelefone.PRIVADO,\n numero = '99999-9999',\n ddd=11,\n prioridade = 3)\n contato = Contato(emails = [email],\n telefones = [tel],\n enderecos = [])\n \n def test_validator_error_telefone(self):\n with pytest.raises(ValidationError) as error_info:\n email = Email(email='teste@teste.com',\n tipo=TipoEmail.PRIVADO,\n prioridade = 1)\n end = Endereco(logradouro='rua de tal',\n numero = 20,\n cep='00000-000',\n tipo = TipoEndereco.RESIDENCIAL)\n contato = Contato(emails = [email],\n telefones = [],\n enderecos = [end])","sub_path":"devmaua/test/models/test_contato.py","file_name":"test_contato.py","file_ext":"py","file_size_in_byte":3485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"149090072","text":"\nimport string\nimport random\nimport requests\nimport json\nimport re\nfrom io import StringIO\nfrom datetime import timedelta\nfrom urllib.parse import parse_qsl\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.urls import reverse\nfrom django.forms.models import model_to_dict\nfrom django.contrib import auth\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom django.db import transaction\nfrom django.db.models import Q, Count\nfrom django.utils import timezone\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import permission_required\nfrom django.core.management.commands import dumpdata\nfrom django.http import HttpResponse, HttpResponseRedirect\n\nfrom my_oauth.models import Service, Token\nfrom true_coders.models import Coder\n\n\ndef generate_state(size=20, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef query(request, name):\n redirect_url = request.GET.get('next', None)\n if redirect_url:\n request.session['next'] = redirect_url\n\n service = get_object_or_404(Service, name=name)\n args = model_to_dict(service)\n args['redirect_uri'] = settings.HTTPS_HOST_ + reverse('auth:response', args=(name, ))\n args['state'] = generate_state()\n request.session['state'] = args['state']\n url = re.sub('[\\n\\r]', '', service.code_uri % args)\n return redirect(url)\n\n\n@login_required\ndef unlink(request, name):\n coder = request.user.coder\n if coder.token_set.count() < 2:\n messages.error(request, 'Not enough services')\n else:\n coder.token_set.filter(service__name=name).delete()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\ndef process_data(request, service, access_token, response):\n if response.status_code != requests.codes.ok:\n raise Exception('Response status code not equal ok.')\n data = json.loads(response.text)\n while isinstance(data, list) or isinstance(data, dict) and len(data) == 1:\n data = (data if isinstance(data, list) else list(data.values()))[0]\n\n data.update(access_token)\n\n for e in ('email', 'default_email', ):\n email = data.get(e, None)\n if email:\n break\n user_id = data.get(service.user_id_field, None)\n if not email or not user_id:\n raise Exception('Email or User ID not found.')\n token, created = Token.objects.get_or_create(\n service=service,\n user_id=user_id,\n )\n token.access_token = access_token\n token.data = json.loads(response.text)\n token.email = email\n token.save()\n\n request.session['token_id'] = token.id\n return redirect('auth:signup')\n\n\ndef process_access_token(request, service, response):\n if response.status_code != requests.codes.ok:\n raise Exception('Response status code not equal ok.')\n try:\n access_token = json.loads(response.text)\n except Exception:\n access_token = dict(parse_qsl(response.text))\n\n if service.data_header:\n args = model_to_dict(service)\n args.update(access_token)\n headers = json.loads(service.data_header % args)\n else:\n headers = None\n\n response = requests.get(service.data_uri % access_token, headers=headers)\n return process_data(request, service, access_token, response)\n\n\ndef response(request, name):\n service = get_object_or_404(Service, name=name)\n state = request.session.get(service.state_field, None)\n try:\n if state is None or state != request.GET.get('state'):\n raise KeyError('Not found state')\n del request.session['state']\n args = model_to_dict(service)\n args.update(dict(list(request.GET.items())))\n args['redirect_uri'] = settings.HTTPS_HOST_ + reverse('auth:response', args=(name, ))\n if 'code' not in args:\n raise ValueError('Not found code')\n\n if service.token_post:\n post = json.loads(service.token_post % args)\n response = requests.post(service.token_uri, data=post)\n else:\n url = re.sub('[\\n\\r]', '', service.token_uri % args)\n response = requests.get(url)\n return process_access_token(request, service, response)\n except Exception as e:\n messages.error(request, \"ERROR: {}\".format(str(e).strip(\"'\")))\n return signup(request)\n\n\ndef login(request):\n redirect_url = request.GET.get('next', 'clist:main')\n if request.user.is_authenticated:\n return redirect(redirect_url)\n\n services = Service.objects.annotate(n_tokens=Count('token')).order_by('-n_tokens')\n\n request.session['next'] = redirect_url\n return render(\n request,\n 'login.html',\n {'services': services},\n )\n\n\ndef signup(request, action=None):\n context = {}\n token_id = request.session.pop('token_id', None)\n if token_id:\n try:\n token = Token.objects.get(id=token_id)\n except Token.DoesNotExist:\n return signup(request)\n\n user = None\n coder = token.coder\n if coder:\n user = coder.user\n else:\n t = Token.objects.filter(email=token.email, coder__isnull=False).filter(~Q(id=token_id)).first()\n if t:\n user = t.coder.user\n token.coder = user.coder\n token.save()\n if user and user.is_active:\n user.backend = 'django.contrib.auth.backends.ModelBackend'\n auth.login(request, user)\n return signup(request)\n\n if request.user.is_authenticated:\n token.coder = request.user.coder\n token.save()\n return signup(request)\n\n request.session['token_id'] = token_id\n\n q_token = Q(email=token.email)\n\n if request.POST and 'signup' in request.POST:\n username = request.POST.get('username', None)\n if not username:\n context['error'] = 'Username can not be empty.'\n elif len(username) > 30:\n context['error'] = '30 characters or fewer.'\n elif not re.match(r'^[\\-A-Za-z0-9_@\\+\\.]{1,30}$', username):\n context['error'] = 'Username may contain alphanumeric, _, @, +, . and - characters.'\n elif User.objects.filter(username=username).exists():\n q_token = q_token | Q(coder__user__username=username)\n context['error'] = 'User already exist.'\n else:\n with transaction.atomic():\n user = User.objects.create_user(username, token.email)\n token.coder = Coder.objects.create(user=user)\n token.save()\n return signup(request)\n\n tokens = Token.objects.filter(q_token).filter(~Q(id=token_id))\n context['tokens'] = tokens\n\n if tokens.count():\n if token.n_viewed_tokens >= settings.LIMIT_N_TOKENS_VIEW:\n now = timezone.now()\n if token.tokens_view_time is None:\n token.tokens_view_time = now\n if token.tokens_view_time + timedelta(hours=settings.LIMIT_TOKENS_VIEW_WAIT_IN_HOURS) < now:\n token.n_viewed_tokens = 0\n token.tokens_view_time = None\n else:\n context['limit_tokens_view'] = True\n token.n_viewed_tokens += 1\n token.save()\n\n context['token'] = token\n else:\n if request.user.is_authenticated:\n return redirect(request.session.pop('next', 'clist:main'))\n return redirect('auth:login')\n\n return render(\n request,\n 'signup.html',\n context,\n )\n\n\ndef logout(request):\n if request.user.is_authenticated:\n auth.logout(request)\n return redirect(\"/\")\n\n\n@permission_required('my_oauth.view_services_dump_data')\ndef services_dumpdata(request):\n out = StringIO()\n dumpdata.Command(stdout=out).run_from_argv([\n 'manage.py',\n 'dumpdata',\n 'my_oauth.service',\n '--format', 'json'\n ])\n services = json.loads(out.getvalue())\n for service in services:\n service['fields']['secret'] = None\n service['fields']['app_id'] = None\n return HttpResponse(json.dumps(services), content_type=\"application/json\")\n","sub_path":"my_oauth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"638472204","text":"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nfrom typing import Optional, Sequence\n\nimport oneflow as flow\nfrom oneflow.nn.module import Module\n\n\nclass TensorBufferToTensor(Module):\n def __init__(self, dtype, instance_shape):\n super().__init__()\n self._op = (\n flow.builtin_op(\"tensor_buffer_to_tensor\")\n .Input(\"in\")\n .Output(\"out\")\n .Attr(\"dtype\", dtype)\n .Attr(\"instance_shape\", instance_shape)\n .Build()\n )\n\n def forward(self, input):\n return self._op(input)[0]\n\n\ndef tensor_buffer_to_tensor_op(x, dtype: flow.dtype, instance_shape: Sequence[int]):\n \"\"\"This operator converts the Tensor's type from TensorBuffer to original type.\n Some operator's output data type is `TensorBuffer`, you can use this operator to convert back\n to `Tensor`.\n\n Refer to `Concept Explanation `_\n for more about TensorBuffer.\n\n Args:\n x (oneflow.Tensor): The input Tensor.\n dtype (flow.dtype): The data dtype.\n instance_shape (Sequence[int]): The shape of each TensorBuffer instance.\n\n Returns:\n oneflow.Tensor: The result Tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n >>> x = np.random.randn(4, 16, 64, 64).astype(np.float32)\n >>> x = flow.Tensor(x)\n >>> x = flow.tensor_to_tensor_buffer(x, instance_dims=2)\n >>> output = flow.tensor_buffer_to_tensor(x, instance_shape=(64, 64), dtype=flow.float)\n >>> output.shape\n oneflow.Size([4, 16, 64, 64])\n\n \"\"\"\n return TensorBufferToTensor(dtype=dtype, instance_shape=instance_shape)(x)\n\n\nclass TensorToTensorBuffer(Module):\n def __init__(self, instance_dims):\n super().__init__()\n self._op = (\n flow.builtin_op(\"tensor_to_tensor_buffer\")\n .Input(\"in\")\n .Output(\"out\")\n .Attr(\"instance_dims\", instance_dims)\n .Build()\n )\n\n def forward(self, input):\n return self._op(input)[0]\n\n\ndef tensor_to_tensor_buffer(x, instance_dims: int):\n \"\"\"This operator converts the Tensor's type to TensorBuffer.\n\n Refer to `Concept Explanation `_\n for more about TensorBuffer.\n\n Args:\n x (oneflow.Tensor): The input Tensor.\n instance_dims (int): The dimensions of dynamic tensor instance.\n\n Returns:\n oneflow.Tensor: The result Tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n >>> x = np.random.randn(4, 16, 64, 64).astype(np.float32)\n >>> x = flow.Tensor(x)\n >>> x = flow.tensor_to_tensor_buffer(x, instance_dims=2)\n >>> output = flow.tensor_buffer_to_tensor(x, instance_shape=(64, 64), dtype=flow.float)\n >>> output.shape\n oneflow.Size([4, 16, 64, 64])\n \n \"\"\"\n return TensorToTensorBuffer(instance_dims=instance_dims)(x)\n\n\nclass GenTensorBuffer(Module):\n def __init__(self, shape, shape_list, value_list, data_type, dynamic_out):\n super().__init__()\n self._op = (\n flow.builtin_op(\"gen_tensor_buffer\")\n .Output(\"out\")\n .Attr(\"shape\", shape)\n .Attr(\"shape_list\", shape_list)\n .Attr(\"value_list\", value_list)\n .Attr(\"data_type\", data_type)\n .Attr(\"dynamic_out\", dynamic_out)\n .Build()\n )\n\n def forward(self):\n return self._op()[0]\n\n\ndef gen_tensor_buffer(\n shape: Sequence[int],\n shape_list: Sequence[Sequence[int]],\n value_list: Sequence[float],\n data_type: Optional[flow.dtype] = flow.float32,\n dynamic_out: Optional[bool] = False,\n):\n return GenTensorBuffer(shape, shape_list, value_list, data_type, dynamic_out)()\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod(raise_on_error=True)\n","sub_path":"python/oneflow/nn/modules/tensor_buffer.py","file_name":"tensor_buffer.py","file_ext":"py","file_size_in_byte":4599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"471509073","text":"from random import randint\ndef randlist(r,usedlist,done):\n\tsum = 0\n\talpha = \"abcdefghijklmnopqrstuvwkyzABCDEFGHIJKLMNOPQRSTUVWKYZ0123456789'`[]\\\\;,.!@#$%^&*()_+{}|:\\\"<>?\"\n\tusedlist[r] = 1\n\tc = alpha[r]\n\t\n\tfor i in range (len(usedlist)):\n\t\tsum = sum + usedlist[i]\n\t#print (c,usedlist,\" sum \",sum)\n\tif sum == 94:\n\t\tdone = True\n\treturn c,usedlist,done\n\t\ndef main():\n\tused = [0]*94\n\tdone = False\n\twhile done == False:\n\t\tr = randint(0,63)\n\t\tc,used,done = randlist(r,used,done)\n\t\tprint(c,end=\"\")\nmain()\n\t\t\n\t\t\n\t\t\n","sub_path":"random_list.py","file_name":"random_list.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"389217657","text":"from collections import defaultdict\n\nclass Solution:\n def findMinHeightTrees(self, n, edges):\n \"\"\"\n :type n: int\n :type edges: List[List[int]]\n :rtype: List[int]\n \"\"\"\n \n nodes = n\n if nodes == 1:\n return [0]\n \n dToN = defaultdict(set)\n es = defaultdict(set)\n nToD = defaultdict(lambda : 0)\n\n for e in edges:\n es[e[0]].add(e[1])\n es[e[1]].add(e[0])\n nToD[e[0]] += 1 \n nToD[e[1]] += 1\n\n for node in nToD:\n dToN[nToD[node]].add(node)\n\n '''\n keep removing leafs until the remaining tree has\n either 1 node or two nodes\n\n make sure leafs are removed in one time step i.e. during a removal \n iteration we don't want to considered a newly created leaf in the same iteration\n\n '''\n while nodes > 2:\n # leafs \n leafs = list(dToN[1])\n for n in leafs:\n for m in es[n]:\n d = nToD[m]\n dToN[d].remove(m)\n dToN[d-1].add(m)\n nToD[m] = d-1\n es[m].remove(n)\n\n del nToD[n]\n dToN[1].remove(n) \n nodes -= 1\n\n return list(nToD.keys())\n\nif __name__ == '__main__':\n c = Solution()\n print(c.findMinHeightTrees(6, [[0, 3], [1, 3], [2, 3], [4, 3], [5, 4]] ))\n","sub_path":"Algos2/Algos2/LC/310.py","file_name":"310.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"256665103","text":"import numpy as np\r\nimport pandas as pd\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.metrics.pairwise import linear_kernel\r\nfrom ast import literal_eval\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\n\r\n\r\nfilmes = pd.read_csv(\"movies_metadata.csv\")\r\ncreditos = pd.read_csv(\"credits.csv\")\r\npalavraschaves = pd.read_csv(\"keywords.csv\")\r\n\r\nfilmes = filmes[[\"id\", \"title\", \"genres\",\"vote_average\", \"vote_count\"]]\r\n\r\n\r\n\r\n#Função de limpar os ID's\r\ndef limpa_id(x):\r\n try:\r\n return int(x)\r\n\r\n except:\r\n return np.nan\r\n\r\nfilmes['id'] = filmes['id'].apply(limpa_id)\r\nfilmes = filmes[filmes['id'].notnull()]\r\n\r\nfilmes['id'] = filmes['id'].astype('int')\r\npalavraschaves['id'] = palavraschaves['id']\r\ncreditos['id'] = creditos['id'].astype('int')\r\n\r\n\r\n\r\n# Juntando através do id\r\nfilmes1 = filmes.merge(creditos, on=\"id\")\r\nfilmes2 = filmes1.merge(palavraschaves, on=\"id\")\r\n\r\n\r\n\r\n# Aplicando literal_eval para fazer string -> objetos.\r\nfilmes2['genres'] = filmes2['genres'].apply(literal_eval)\r\nfilmes2['cast'] = filmes2['cast'].apply(literal_eval)\r\nfilmes2['crew'] = filmes2['crew'].apply(literal_eval)\r\nfilmes2['keywords'] = filmes2['keywords'].apply(literal_eval)\r\n\r\n\r\n\r\n# Pegando o nome dos diretores associados\r\nfilmes2['crew'] = filmes2['crew'].apply(lambda x: [i['name'].lower() for i in x if i['job'] == 'Director'])\r\n\r\n\r\n\r\ndf = filmes2\r\n\r\n\r\n\r\n#Aplicando filtro de filmes mais relevantes\r\nvote = df[df[\"vote_count\"].notnull()][\"vote_count\"].astype('float')\r\navg = df[df[\"vote_average\"].notnull()][\"vote_average\"].astype('float')\r\nC = avg.mean()\r\nm = vote.quantile(0.60)\r\nqualified = df[(df['vote_count'] >= m) & (df['vote_count'].notnull()) & (df['vote_average'].notnull())]\r\n\r\ndef imdb_qualified(x):\r\n v = x[\"vote_count\"]\r\n R = x[\"vote_average\"]\r\n return (v/(v+m) * R) + (m/(m+v) * C)\r\n\r\nqualified['wr'] = qualified.apply(imdb_qualified, axis = 1)\r\ndf = qualified[[\"id\", \"title\", \"genres\", \"cast\", \"crew\", \"keywords\"]]\r\n\r\n\r\n\r\n# Colocando tudo em letra minúscula\r\ndf[\"genres\"] = df[\"genres\"].apply(lambda x: [i[\"name\"].lower() for i in x])\r\ndf[\"cast\"] = df[\"cast\"].apply(lambda x: [i[\"name\"].lower() for i in x])\r\ndf[\"keywords\"] = df[\"keywords\"].apply(lambda x: [i[\"name\"].lower() for i in x])\r\n\r\n\r\n\r\n# Pegando até 3 caracteristicas de cada filme\r\n# Se pegarmos muitas caracteristicas pode aumentar demais a complexidade do algortimo\r\ndf[\"genres\"] = df[\"genres\"].apply(lambda x: x[:3] if len(x)>3 else x)\r\ndf[\"cast\"] = df[\"cast\"].apply(lambda x: x[:3] if len(x)>3 else x)\r\ndf[\"keywords\"] = df[\"keywords\"].apply(lambda x: x[:3] if len(x)>3 else x)\r\n\r\n\r\n\r\n#Removendo os espaços\r\ndf[\"cast\"] = df[\"cast\"].apply(lambda x: [i.replace(\" \",\"\") for i in x])\r\ndf[\"crew\"] = df[\"crew\"].apply(lambda x: [i.replace(\" \",\"\") for i in x])\r\ndf[\"keywords\"] = df[\"keywords\"].apply(lambda x: [i.replace(\" \",\"\") for i in x])\r\ndf[\"genres\"] = df[\"genres\"].apply(lambda x: [i.replace(\" \",\"\") for i in x])\r\n\r\n\r\n\r\n#Removendo filmes com nomes repetidos\r\ndf = df.drop_duplicates(subset='title', keep='first')\r\n\r\n\r\n\r\n#Transformando tudo em uma coluna só\r\ndf[\"metadata\"] = df.apply(lambda x : \" \".join(x[\"genres\"]) + \" \" + \" \".join(x[\"cast\"]) + \" \" + \" \".join(x[\"crew\"]) + \" \" + \" \".join(x[\"keywords\"]), axis = 1)\r\ndf_metadata = df.iloc[:10000, 6]\r\n\r\n\r\n\r\n#Mapeando a função\r\nmapear = pd.Series(df_metadata.index, index = df.iloc[:10000, 1])\r\n\r\n\r\n\r\n# Funcao de recomendacao de acordo com nossos metadados\r\ndef sistema(filmes_input):\r\n '''list -> list'''\r\n # \"Somando os filmes\"\r\n # indice depende do tamanho do dataframe\r\n indice = 15468\r\n df_metadata[indice] = ' '\r\n for filme in filmes_input:\r\n df_metadata[indice] += ' ' + df_metadata[mapear[filme]]\r\n\r\n # Usando a similaridade de cosseno\r\n cv = CountVectorizer(stop_words='english')\r\n contador_matrix = cv.fit_transform(df_metadata)\r\n cosseno_sim_matrix = cosine_similarity(contador_matrix)\r\n\r\n # Resetando a soma de filmes\r\n df_metadata[indice] = ' '\r\n\r\n # Obtendo os valores similares\r\n score = list(enumerate(cosseno_sim_matrix[10000]))\r\n score = sorted(score, key=lambda x: x[1], reverse=True)\r\n\r\n # Amostra de 10 filmes\r\n n_filmes = len(filmes_input)\r\n score = score[(n_filmes + 1):(n_filmes + 11)]\r\n indices = [i[0] for i in score]\r\n\r\n # Transformando de pd.Series pra lista\r\n lista_recomendada = mapear.iloc[indices].index.to_list()\r\n\r\n return (lista_recomendada)","sub_path":"Projeto_parte1.py","file_name":"Projeto_parte1.py","file_ext":"py","file_size_in_byte":4602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"31741592","text":"from django.shortcuts import render, redirect\nfrom apps.uploadfile.form import UploadForm\nfrom apps.uploadfile.models import Fileupload\n\n\ndef upload_file(request):\n if request.method == 'POST':\n form = UploadForm(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n return redirect(\"upload_file\")\n else:\n form = UploadForm()\n\n return render(request, 'upload.html', {'form': form})\n\n","sub_path":"Django_Celery/apps/uploadfile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"487276416","text":"import configparser\n\nconfig = configparser.ConfigParser(allow_no_value=True)\n\nconfig.read('host.ini')\nprint(config.sections())\nwindows=[(key,config['windows'][key]) for key in config['windows']]\nprint(windows)\nlinux=[(key,config['linux'][key]) for key in config['linux']]\nprint(linux)\n","sub_path":"mypywinrm/configParser_host.py","file_name":"configParser_host.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"409696448","text":"class Man:\n def __init__(self, name):\n self.name = name\n print(\"Initialized!: \" + name)\n\n def hello(self):\n print(\"Hello \" + self.name + \"!\")\n\n def goodbye(self):\n print(\"Good-bye \" + self.name + \"!\")\n\n\nm1 = Man(\"David\")\nm2 = Man(\"Foo\")\nm3 = Man(\"Bar\")\nm1.hello()\nm1.goodbye()\nm2.hello()\nm2.goodbye()\nm3.hello()\nm3.goodbye()","sub_path":"01_hellopy/man.py","file_name":"man.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"41844064","text":"import json\nfrom flask_cors import CORS\nfrom flask import Flask, request, render_template\nfrom sqlalchemy.exc import IntegrityError\n\nfrom models import db, Logs\n\n''' Begin boilerplate code '''\ndef create_app():\n app = Flask(__name__, static_url_path='')\n app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\n app.config['SECRET_KEY'] = \"MYSECRET\"\n CORS(app)\n db.init_app(app)\n return app\n\napp = create_app()\n\napp.app_context().push()\n\n''' End Boilerplate Code '''\n\n@app.route('/')\ndef index():\n return render_template('home.html')\n\n@app.route('/app')\ndef client_app():\n return app.send_static_file('app.html')\n\n@app.route('/data', methods=['GET'])\ndef getData():\n token = request.args.get('token')\n ress = 'Hello token='+token if token else \"Hello\"\n return ress\n\n@app.route('/data', methods=['POST'])\ndef addData():\n data = request.json\n res = 'Hello data='+json.dumps(data) if data else \"Hello\"\n return res, 201\n\n@app.route('/data/:id', methods=['DELETE'])\ndef removeData(id):\n res = 'id '+id+' Deleted!'\n return res, 204\n\n@app.route('/data/:id', methods=['UPDATE'])\ndef updateData(id):\n data = request.json\n res = 'id '+id\n res += ' Hello data='+json.dumps(data) if data else \"Hello\"\n return res, 201\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080, debug=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"366828650","text":"# LargeSmall.py - This program calculates the largest and smallest of three integer values. \n# Declare and initialize variables here\nfirstNumber = -50;\nsecondNumber = 53;\nthirdNumber = 78;\n\na = -50\nb = 53\nc = 78\n\nlargest = 0 \nsmallest = 0 \n\n# Write assignment, if, or if else statements here as appropriate\n\n# if thirdNumber > secondNumber:\n# largest = thirdNumber\n# elif thirdNumber > firstNumber:\n# largest = thirdNumber\n\n#WOrking Code\n\nif c > b:\n largest = c\nelif c > a :\n largest = c\nelif b > c:\n largest = b\nelif b > a:\n largest = b\nelif a > c:\n largest = a\nelif a > b:\n largest = a\n\nif a < b:\n smallest = a\nelif a < c :\n smallest = a\nelif b < a:\n smallest = b\nelif b < c:\n smallest = b\nelif c < a:\n smallest = a\nelif c < b:\n smallest = a\n\n\n\n# Output largest and smallest number. \nprint(\"The largest value is \" + str(largest))\nprint(\"The smallest value is \" + str(smallest))","sub_path":"Ch4/ceng_chap4_elif_if_.py","file_name":"ceng_chap4_elif_if_.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"593712556","text":"import sys\nimport random\nimport time\nimport pylab\n\nglobal N \nN = 15\n# A utility function to check if a queen can \n# be placed on board[row][col]. Note that this \n# function is called when \"col\" queens are \n# already placed in columns from 0 to col -1. \n# So we need to check only left side for \n# attacking queens \ndef isSafe(board, row, col): \n\n\t# Check this row on left side \n\tfor i in range(col): \n\t\tif board[row][i] == 1: \n\t\t\treturn False\n\n\t# Check upper diagonal on left side \n\tfor i, j in zip(range(row, -1, -1), \n\t\t\t\t\trange(col, -1, -1)): \n\t\tif board[i][j] == 1: \n\t\t\treturn False\n\n\t# Check lower diagonal on left side \n\tfor i, j in zip(range(row, N, 1), \n\t\t\t\t\trange(col, -1, -1)): \n\t\tif board[i][j] == 1: \n\t\t\treturn False\n\treturn True\n\ndef solveNQUtil(board, col): \n\t\n\t# base case: If all queens are placed \n\t# then return true \n\tif col >= N: \n\t\treturn True\n\n\t# Consider this column and try placing \n\t# this queen in all rows one by one \n\tfor i in range(N): \n\n\t\tif isSafe(board, i, col): \n\t\t\t\n\t\t\t# Place this queen in board[i][col] \n\t\t\tboard[i][col] = 1\n\n\t\t\t# recur to place rest of the queens \n\t\t\tif solveNQUtil(board, col + 1) == True: \n\t\t\t\treturn True\n\n\t\t\t# If placing queen in board[i][col \n\t\t\t# doesn't lead to a solution, then \n\t\t\t# queen from board[i][col] \n\t\t\tboard[i][col] = 0\n\n\t# if the queen can not be placed in any row in \n\t# this colum col then return false \n\treturn False\n\nx = []\ny = []\nfor i in range(4,15):\n\tN = i\n\tx.append(i)\n\tboard = [[0]*i for _ in range(i)]\n\tstart= time.time()\n\tsolveNQUtil(board, 0)\n\tstop= time.time()\n\ty.append(stop-start)\n\npylab.plot(x,y,'ro-')\npylab.title(\"N Queens Algorithim\")\npylab.xlabel('Input size')\npylab.ylabel('Execution time (Seconds)')\npylab.show()","sub_path":"bestone/Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"65926238","text":"\"\"\"\nbenchmark_mjpeg.py\nEthan Toly\nSept 2018\nThis program is made to test mjpeg encoding for frame frame \nacquisition from the PiCamera.\n\"\"\"\n\nimport picamera\nimport time\nimport io\nimport _thread\nimport sys\nimport os\n\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nfrom scipy.ndimage.filters import gaussian_filter\n\ndef plot_time_differences(time_differences):\n plt.plot(time_differences)\n plt.ylabel(\"Microseconds\")\n plt.xlabel(\"Frame number\")\n plt.show()\n\ndef write_to_disk(buff):\n byte.write(buff)\n byte.flush()\n os.fsync(byte.fileno())\n return\n\ndef abs_difference(img1, img2):\n a = img1 - img2\n b = img2 - img1\n c = img1>img2\n return a*c + b*(~c)\n\n\nclass Tracking:\n\n def __init__(self):\n self.last_frame = None\n\n \n def sequential_frame_subtraction(self, frame):\n #converts from jpeg to rgb\n frame = plt.imread(io.BytesIO(frame), format='jpeg')\n #convert to greyscale\n frame = np.dot(frame[...,:3], [.3, .6, .1])\n #blur image\n frame = gaussian_filter(frame, sigma=6)\n \n if self.last_frame is None:\n self.last_frame = frame\n\n #find absolute difference\n tmp = abs_difference(frame, self.last_frame)\n self.last_frame = frame\n frame = tmp\n \n #plt.imshow(frame)\n #plt.show()\n trackingfile.write(frame)\n return\n \n\nclass Processor:\n def __init__(self):\n self.last_ts = 0;\n self.frame_num = 0;\n self.buff = b\"\"\n \n def write(self, f):\n if cam.frame.complete:\n self.frame_num+=1\n ts = cam.frame.timestamp\n tsfile.write(str(ts) + '\\n')\n \n \"\"\"\n if ts != None:\n if (ts - self.last_ts) > int((1/framerate)*1000000*1.5):\n print(\"hang\")\n self.last_ts = ts;\n \"\"\"\n #if self.frame_num % framerate == 0 and trk:\n # _thread.start_new_thread(track.sequential_frame_subtraction, (f, ))\n \n video.write(f)\n \"\"\"\n if self.frame_num % 200 == 0:\n self.buff+=f\n _thread.start_new_thread(write_to_disk,(self.buff,))\n self.buff = b\"\"\n else:\n self.buff+=f\n \"\"\"\n\nframerates = [30, 60, 100]\nresolutions = [(960, 540)]\n\nfor framerate in framerates:\n for resolution in resolutions:\n processor = Processor()\n track = Tracking()\n\n seconds = 60 * 60\n\n (w, h) = resolution\n\n video = open(\"/media/pi/Ethan Black/video.avi\", \"wb\")\n tsfile = open(\"/media/pi/Ethan Black/ts\" + str(framerate) +\"mjpeg\" + str(resolution) + \".txt\", \"w\")\n\n print(\"Recording...\")\n\n with picamera.PiCamera() as cam:\n cam.resolution = (w, h)\n cam.framerate = framerate\n #cam.start_preview()\n start = time.time()\n cam.start_recording(output=processor, format='mjpeg', quality=25)\n cam.wait_recording(seconds)\n cam.stop_recording()\n\n print(\"Done recording\")\n print(\"Actual recording time\", time.time() - start)\n #byte.write(processor.buff)\n tsfile.close()\n video.close()\n","sub_path":"evaluation/benchmark_mjpeg.py","file_name":"benchmark_mjpeg.py","file_ext":"py","file_size_in_byte":3215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"332266261","text":"import urllib.parse\nfrom configparser import ConfigParser\nfrom random import randint\n\nimport requests\nimport rule34\n\nimport controller\nfrom wrappers import cmd\nfrom .basemodule import BaseModule\n\n\nclass Lewd(BaseModule):\n\n def __init__(self, _config: ConfigParser):\n self.config = _config\n\n self.headers = {\"User-Agent\": self.config.get(\"esix\", \"agent\", raw=True)}\n self.name = self.config.get(\"esix\", \"name\", raw=True)\n self.key = self.config.get(\"esix\", \"key\", raw=True)\n self.url = self.config.get(\"esix\", \"url\", raw=True)\n\n self.rule34 = rule34.Sync()\n\n def process(self, cont: 'controller.Controller', source: str, code: str, send_to: str, line: list[str]):\n pass\n\n # -- Commands -- #\n\n @cmd()\n def lewd(self, cont: 'controller.Controller', source: str, code: str, send_to: str, params: list[str]):\n\n spec = False\n\n if len(params) > 0:\n\n if params[0].lower() == \"e6\" or params[0].lower() == \"34\":\n joined = \" \".join(params[1:])\n spec = True\n else:\n joined = \" \".join(params[0:])\n\n if spec and len(params) == 1:\n cont.chat(send_to, \"Please specify some more tags, or use an asterisk for a wildcard.\")\n\n search = urllib.parse.quote_plus(joined)\n\n if params[0] == \"e6\":\n data = self.search_esix(search)\n elif params[0] == \"34\":\n data = self.search_rule(search)\n else:\n data = self.search_esix(search)\n if data[0] == 0:\n data = self.search_rule(search)\n\n if data[0] == 0:\n data[1] = \"No images found on e621 or r34!\"\n\n cont.chat(send_to, data[1])\n\n else:\n cont.chat(send_to, \"Please specify some more tags, or use an asterisk for a wildcard.\")\n\n # -- Helpers -- #\n\n def search_esix(self, search):\n ret = [0, \"No posts found on e621!\"]\n\n esix_url = self.url.format(search)\n esix_get = requests.get(esix_url, headers=self.headers, auth=(self.name, self.key))\n\n try:\n posts = esix_get.json()[\"posts\"]\n ret[0] = len(posts)\n\n if ret[0] > 0:\n ret[1] = posts[randint(0, len(posts) - 1)][\"file\"][\"url\"]\n except ValueError:\n ret[1] = \"Error connecting to e621.\"\n\n return ret\n\n def search_rule(self, search):\n ret = [0, \"No posts found on rule34!\"]\n\n # noinspection PyBroadException\n try:\n posts = self.rule34.getImages(search)\n ret[0] = len(posts)\n\n if ret[0] > 0:\n ret[1] = posts[randint(0, len(posts) - 1)].file_url\n except Exception:\n ret[1] = \"Error connected to rule34.\"\n\n return ret\n","sub_path":"modules/lewd.py","file_name":"lewd.py","file_ext":"py","file_size_in_byte":2842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"566452811","text":"#!/usr/bin/env python\n\n#****************************************************************************\n# printdialogs.py, provides a print preview and print settings dialogs\n#\n# Copyright (C) 2006, Douglas W. Bell\n#\n# This is free software; you can redistribute it and/or modify it under the\n# terms of the GNU General Public License, either Version 2 or any later\n# version. This program is distributed in the hope that it will be useful,\n# but WITTHOUT ANY WARRANTY. See the included LICENSE file for details.\n#*****************************************************************************\n\nimport re\nimport copy\nfrom PyQt4 import QtCore, QtGui\nimport configdialog\nimport nodeformat\nimport optiondefaults\nimport globalref\n\nstdWinFlags = QtCore.Qt.Dialog | QtCore.Qt.WindowTitleHint | \\\n QtCore.Qt.WindowSystemMenuHint\n\n\nclass PrintPrevDlg(QtGui.QDialog):\n \"\"\"Provides a generic print preview with page controls\"\"\"\n def __init__(self, printData, numPages, paperRect, pageCmd, parent=None):\n QtGui.QDialog.__init__(self, parent)\n self.setWindowFlags(stdWinFlags)\n self.setWindowTitle(_('Print Preview'))\n\n self.printData = printData\n self.curPage = 1\n self.minPage = 1\n self.maxPage = numPages\n topLayout = QtGui.QVBoxLayout(self)\n ctrlLayout = QtGui.QHBoxLayout()\n topLayout.addLayout(ctrlLayout)\n\n self.prevButton = QtGui.QPushButton(_('P&rev. Page'))\n ctrlLayout.addWidget(self.prevButton)\n self.connect(self.prevButton, QtCore.SIGNAL('clicked()'),\n self.prevPage)\n\n self.nextButton = QtGui.QPushButton(_('&Next Page'))\n ctrlLayout.addWidget(self.nextButton)\n self.connect(self.nextButton, QtCore.SIGNAL('clicked()'),\n self.nextPage)\n\n self.statusLabel = QtGui.QLabel('')\n ctrlLayout.addWidget(self.statusLabel, 1)\n self.statusLabel.setAlignment(QtCore.Qt.AlignCenter)\n self.statusLabel.setFrameStyle(QtGui.QFrame.Panel |\n QtGui.QFrame.Sunken)\n self.statusLabel.setMargin(2)\n\n previewButton = QtGui.QPushButton(_('Print Option&s...'))\n ctrlLayout.addWidget(previewButton)\n self.connect(previewButton, QtCore.SIGNAL('clicked()'),\n self.showOptions)\n\n printButton = QtGui.QPushButton(_('&Print...'))\n ctrlLayout.addWidget(printButton)\n self.connect(printButton, QtCore.SIGNAL('clicked()'), self.accept)\n\n cancelButton = QtGui.QPushButton(_('&Close'))\n ctrlLayout.addWidget(cancelButton)\n self.connect(cancelButton, QtCore.SIGNAL('clicked()'), self.reject)\n\n self.preview = PrintPrev(paperRect, pageCmd)\n topLayout.addWidget(self.preview, 1)\n\n self.updatePageNum()\n\n def updatePageNum(self):\n \"\"\"Enable/disable prev & next buttons, update status label \n and preview\"\"\"\n self.prevButton.setEnabled(self.curPage > self.minPage)\n self.nextButton.setEnabled(self.curPage < self.maxPage)\n self.statusLabel.setText(_('Page %(current)i of %(max)i') %\n {'current':self.curPage, 'max':self.maxPage})\n self.preview.setPageNum(self.curPage)\n\n def prevPage(self):\n \"\"\"Go to previous page\"\"\"\n if self.curPage > self.minPage:\n self.curPage -= 1\n self.updatePageNum()\n\n def nextPage(self):\n \"\"\"Go to next page\"\"\"\n if self.curPage < self.maxPage:\n self.curPage += 1\n self.updatePageNum()\n\n def showOptions(self):\n \"\"\"Show a modal options dialog\"\"\"\n dlg = PrintOptionsDialog(self.printData, False, self)\n self.setUpdatesEnabled(False) # halt repaint until settings consistent\n if dlg.exec_() == QtGui.QDialog.Accepted:\n QtGui.QApplication.setOverrideCursor(QtCore.Qt.WaitCursor)\n self.printData.setPrintContent()\n self.maxPage = int(globalref.docRef.fileInfoItem.data[nodeformat.\n FileInfoFormat.\n numPagesFieldName])\n if self.curPage > self.maxPage:\n self.curPage = self.maxPage\n self.preview.paperRect = self.printData.printer.paperRect()\n self.updatePageNum()\n QtGui.QApplication.restoreOverrideCursor()\n self.setUpdatesEnabled(True)\n\n\nclass PrintPrev(QtGui.QWidget):\n \"\"\"Provides a widget for the paper\"\"\"\n def __init__(self, paperRect, pageCmd, parent=None):\n QtGui.QWidget.__init__(self, parent)\n self.setSizePolicy(QtGui.QSizePolicy.Expanding,\n QtGui.QSizePolicy.Expanding)\n\n self.paperRect = paperRect\n self.pageCmd = pageCmd\n self.pageNum = 0\n\n def sizeHint(self):\n \"\"\"Return preferred size\"\"\"\n return QtCore.QSize(250, 450)\n\n def setPageNum(self, pageNum):\n \"\"\"Set new page number and update\"\"\"\n self.pageNum = pageNum\n self.update()\n\n def paintEvent(self, event):\n \"\"\"Paint the current page\"\"\"\n paint = QtGui.QPainter(self)\n viewRect = paint.viewport()\n paperViewSize = self.paperRect.size() # used for aspect ratio only\n paperViewSize.scale(viewRect.size(), QtCore.Qt.KeepAspectRatio)\n leftMargin = (viewRect.width() - paperViewSize.width()) // 2\n topMargin = (viewRect.height() - paperViewSize.height()) // 2\n paperViewRect = QtCore.QRect(leftMargin, topMargin,\n paperViewSize.width(),\n paperViewSize.height())\n paint.setWindow(self.paperRect)\n paint.setViewport(paperViewRect)\n paint.fillRect(self.paperRect, QtGui.QBrush(QtCore.Qt.white))\n self.pageCmd(self.pageNum, paint)\n\n\nclass PrintOptionsDialog(QtGui.QDialog):\n \"\"\"Base dialog for print configuration\"\"\"\n def __init__(self, printData, showExtraButtons=True, parent=None):\n QtGui.QDialog.__init__(self, parent)\n self.setWindowFlags(stdWinFlags)\n self.setWindowTitle(_('Print Options'))\n self.printData = printData\n\n topLayout = QtGui.QVBoxLayout(self)\n self.setLayout(topLayout)\n\n tabs = QtGui.QTabWidget()\n topLayout.addWidget(tabs)\n generalPage = GeneralPage()\n tabs.addTab(generalPage, _('&General Options'))\n pageSetupPage = PageSetupPage(self.printData.printer)\n tabs.addTab(pageSetupPage, _('&Page Setup'))\n fontPage = FontPage(self.printData)\n tabs.addTab(fontPage, _('&Font Selection'))\n headerPage = HeaderPage()\n tabs.addTab(headerPage, _('&Header/Footer'))\n self.tabPages = [generalPage, pageSetupPage, fontPage, headerPage]\n\n ctrlLayout = QtGui.QHBoxLayout()\n topLayout.addLayout(ctrlLayout)\n ctrlLayout.addStretch(0)\n if showExtraButtons:\n previewButton = QtGui.QPushButton(_('Print Pre&view...'))\n ctrlLayout.addWidget(previewButton)\n self.connect(previewButton, QtCore.SIGNAL('clicked()'),\n self.preview)\n printButton = QtGui.QPushButton(_('P&rint...'))\n ctrlLayout.addWidget(printButton)\n self.connect(printButton, QtCore.SIGNAL('clicked()'),\n self.quickPrint)\n okButton = QtGui.QPushButton(_('&OK'))\n ctrlLayout.addWidget(okButton)\n self.connect(okButton, QtCore.SIGNAL('clicked()'), self,\n QtCore.SLOT('accept()'))\n cancelButton = QtGui.QPushButton(_('&Cancel'))\n ctrlLayout.addWidget(cancelButton)\n self.connect(cancelButton, QtCore.SIGNAL('clicked()'), self,\n QtCore.SLOT('reject()'))\n\n def quickPrint(self):\n \"\"\"Accept this dialog and go to print dialog\"\"\"\n self.accept()\n self.printData.filePrint()\n\n def preview(self):\n \"\"\"Accept this dialog and go to print preview dialog\"\"\"\n self.accept()\n self.printData.filePrintPreview()\n\n def accept(self):\n \"\"\"Store results before closing dialog\"\"\"\n for page in self.tabPages:\n page.saveChanges()\n globalref.options.writeChanges()\n QtGui.QDialog.accept(self)\n\nclass GeneralPage(QtGui.QWidget):\n \"\"\"Misc print option dialog page\"\"\"\n printWhat = ['tree', 'branch', 'node']\n def __init__(self, parent=None):\n QtGui.QWidget.__init__(self, parent)\n topLayout = QtGui.QGridLayout(self)\n self.setLayout(topLayout)\n\n whatGroupBox = QtGui.QGroupBox(_('What to print'))\n topLayout.addWidget(whatGroupBox, 0, 0)\n whatLayout = QtGui.QVBoxLayout(whatGroupBox)\n self.whatButtons = QtGui.QButtonGroup(self)\n treeButton = QtGui.QRadioButton(_('&Entire tree'))\n self.whatButtons.addButton(treeButton,\n GeneralPage.printWhat.index('tree'))\n whatLayout.addWidget(treeButton)\n branchButton = QtGui.QRadioButton(_('Selected &branches'))\n self.whatButtons.addButton(branchButton,\n GeneralPage.printWhat.index('branch'))\n whatLayout.addWidget(branchButton)\n nodeButton = QtGui.QRadioButton(_('Selected &nodes'))\n self.whatButtons.addButton(nodeButton,\n GeneralPage.printWhat.index('node'))\n whatLayout.addWidget(nodeButton)\n setting = globalref.options.strData('PrintWhat')\n try:\n self.whatButtons.button(GeneralPage.printWhat.\n index(setting)).setChecked(True)\n except ValueError:\n self.whatButtons.button(0).setChecked(True)\n self.connect(self.whatButtons, QtCore.SIGNAL('buttonClicked(int)'),\n self.updateCmdAvail)\n\n optionBox = QtGui.QGroupBox(_('Features'))\n topLayout.addWidget(optionBox, 0, 1)\n optionLayout = QtGui.QVBoxLayout(optionBox)\n self.linesButton = QtGui.QCheckBox(_('Draw &lines to children'))\n optionLayout.addWidget(self.linesButton)\n self.linesButton.setChecked(globalref.options.boolData('PrintLines'))\n self.rootButton = QtGui.QCheckBox(_('&Include root node'))\n optionLayout.addWidget(self.rootButton)\n self.rootButton.setChecked(globalref.options.boolData('PrintRoot'))\n self.openOnlyButton = QtGui.QCheckBox(_('Only open no&de children'))\n optionLayout.addWidget(self.openOnlyButton)\n self.openOnlyButton.setChecked(globalref.options.\n boolData('PrintOpenOnly'))\n self.widowButton = QtGui.QCheckBox(_('&Keep first child with parent'))\n optionLayout.addWidget(self.widowButton)\n self.widowButton.setChecked(globalref.options.\n boolData('PrintKeepFirstChild'))\n topLayout.setRowStretch(1, 1)\n\n def updateCmdAvail(self):\n \"\"\"Update options available\"\"\"\n if GeneralPage.printWhat[self.whatButtons.checkedId()] == 'node':\n self.rootButton.setChecked(False)\n self.rootButton.setEnabled(False)\n self.openOnlyButton.setChecked(False)\n self.openOnlyButton.setEnabled(False)\n else:\n self.rootButton.setEnabled(True)\n self.openOnlyButton.setEnabled(True)\n\n def saveChanges(self):\n \"\"\"Update option data with current dialog settings\"\"\"\n globalref.options.changeData('PrintWhat',\n GeneralPage.printWhat[self.whatButtons.\n checkedId()], True)\n globalref.options.changeData('PrintLines',\n self.linesButton.isChecked() and\n 'yes' or 'no', True)\n globalref.options.changeData('PrintRoot',\n self.rootButton.isChecked() and\n 'yes' or 'no', True)\n globalref.options.changeData('PrintOpenOnly',\n self.openOnlyButton.isChecked() and\n 'yes' or 'no', True)\n globalref.options.changeData('PrintKeepFirstChild',\n self.widowButton.isChecked() and\n 'yes' or 'no', True)\n\n\nclass PageSetupPage(QtGui.QWidget):\n \"\"\"Page setup print option dialog page\"\"\"\n pageSizes = [u'Letter', u'Legal', u'Tabloid', u'A3', u'A4', u'A5',\n u'Comm10E', u'C5E', u'DLE']\n pageSizeDescr = [_('Letter (8.5 x 11 in.)'), _('Legal (8.5 x 14 in.)'),\n _('Tabloid (11 x 17 in.)'), _('A3 (279 x 420 mm)'),\n _('A4 (210 x 297 mm)'), _('A5 (148 x 210 mm)'),\n _('#10 Envelope (4.125 x 9.5 in.)'),\n _('C5 Envelope (163 x 229 mm)'),\n _('DL Envelope (110 x 22 mm)')]\n units = [u'inch', u'centimeter', u'millimeter']\n unitNames = [_('inches'), _('centimeters'), _('millimeters')]\n unitValues = {'inch': 1.0, 'centimeter': 2.54, 'millimeter': 25.4}\n def __init__(self, printer, parent=None):\n QtGui.QWidget.__init__(self, parent)\n self.printer = printer\n topLayout = QtGui.QVBoxLayout(self)\n self.setLayout(topLayout)\n horizLayout = QtGui.QHBoxLayout()\n topLayout.addLayout(horizLayout)\n\n leftLayout = QtGui.QVBoxLayout()\n horizLayout.addLayout(leftLayout)\n\n paperGroup = QtGui.QGroupBox(_('Paper &Size'))\n leftLayout.addWidget(paperGroup)\n paperLayout = QtGui.QVBoxLayout(paperGroup)\n self.paperBox = QtGui.QComboBox()\n paperLayout.addWidget(self.paperBox)\n self.paperBox.addItems(PageSetupPage.pageSizeDescr)\n sizeList = [getattr(QtGui.QPrinter, name) for name in\n PageSetupPage.pageSizes]\n try:\n sizeNum = sizeList.index(self.printer.pageSize())\n except ValueError:\n sizeNum = 0\n self.paperBox.setCurrentIndex(sizeNum)\n\n orientGroup = QtGui.QGroupBox(_('Orientation'))\n leftLayout.addWidget(orientGroup)\n orientLayout = QtGui.QVBoxLayout(orientGroup)\n self.portraitButton = QtGui.QRadioButton(_('&Portrait'))\n orientLayout.addWidget(self.portraitButton)\n self.landscapeButton = QtGui.QRadioButton(_('&Landscape'))\n orientLayout.addWidget(self.landscapeButton)\n if self.printer.orientation() == QtGui.QPrinter.Landscape:\n self.landscapeButton.setChecked(True)\n else:\n self.portraitButton.setChecked(True)\n\n unitsGroup = QtGui.QGroupBox(_('&Units'))\n leftLayout.addWidget(unitsGroup)\n unitsLayout = QtGui.QVBoxLayout(unitsGroup)\n self.unitsBox = QtGui.QComboBox()\n unitsLayout.addWidget(self.unitsBox)\n self.unitsBox.addItems(PageSetupPage.unitNames)\n self.currentUnit = globalref.options.strData('PrintUnits', False)\n try:\n unitNum = PageSetupPage.units.index(self.currentUnit)\n except ValueError:\n self.currentUnit = u'inch'\n unitNum = 0\n self.unitsBox.setCurrentIndex(unitNum)\n self.connect(self.unitsBox, QtCore.SIGNAL('currentIndexChanged(int)'),\n self.changeUnits)\n\n rightLayout = QtGui.QVBoxLayout()\n horizLayout.addLayout(rightLayout)\n\n columnGroup = QtGui.QGroupBox(_('Columns'))\n rightLayout.addWidget(columnGroup)\n columnLayout = QtGui.QGridLayout(columnGroup)\n numLabel = QtGui.QLabel(_('&Number of columns'))\n columnLayout.addWidget(numLabel, 0, 0)\n self.columnSpin = QtGui.QSpinBox()\n columnLayout.addWidget(self.columnSpin, 0, 1)\n numLabel.setBuddy(self.columnSpin)\n self.columnSpin.setMinimum(1)\n self.columnSpin.setMaximum(optiondefaults.maxNumCol)\n self.columnSpin.setValue(globalref.options.intData('PrintNumCols', 1,\n optiondefaults.maxNumCol))\n\n self.spaceLabel = QtGui.QLabel()\n columnLayout.addWidget(self.spaceLabel, 1, 0)\n self.columnSpaceSpin = QtGui.QDoubleSpinBox()\n columnLayout.addWidget(self.columnSpaceSpin, 1, 1)\n self.spaceLabel.setBuddy(self.columnSpaceSpin)\n self.columnSpaceSpin.setMinimum(0.0)\n self.columnSpace = globalref.options.numData('PrintColSpace', 0.0,\n optiondefaults.\n maxPrintMargin)\n\n offsetGroup = QtGui.QGroupBox(_('Offsets'))\n rightLayout.addWidget(offsetGroup)\n offsetLayout = QtGui.QGridLayout(offsetGroup)\n self.indentLabel = QtGui.QLabel()\n offsetLayout.addWidget(self.indentLabel, 0, 0)\n self.indentSpin = QtGui.QDoubleSpinBox()\n offsetLayout.addWidget(self.indentSpin, 0, 1)\n self.indentLabel.setBuddy(self.indentSpin)\n self.indentSpin.setMinimum(0.0)\n self.indent = globalref.options.numData('PrintIndentOffset', 0.0,\n optiondefaults.maxPrintIndent)\n\n self.horizLabel = QtGui.QLabel()\n offsetLayout.addWidget(self.horizLabel, 1, 0)\n self.horizMarginSpin = QtGui.QDoubleSpinBox()\n offsetLayout.addWidget(self.horizMarginSpin, 1, 1)\n self.horizLabel.setBuddy(self.horizMarginSpin)\n self.horizMargin = globalref.options.numData('HorizMargin',\n optiondefaults.minPrintMargin,\n optiondefaults.maxPrintMargin)\n\n self.vertLabel = QtGui.QLabel()\n offsetLayout.addWidget(self.vertLabel, 2, 0)\n self.vertMarginSpin = QtGui.QDoubleSpinBox()\n offsetLayout.addWidget(self.vertMarginSpin, 2, 1)\n self.vertLabel.setBuddy(self.vertMarginSpin)\n self.vertMargin = globalref.options.numData('VertMargin',\n optiondefaults.minPrintMargin,\n optiondefaults.maxPrintMargin)\n self.writeFloatValues()\n topLayout.addStretch()\n\n def saveChanges(self):\n \"\"\"Update option data with current dialog settings\"\"\"\n pageSizeName = PageSetupPage.pageSizes[self.paperBox.currentIndex()]\n pageSize = getattr(QtGui.QPrinter, pageSizeName)\n self.printer.setPageSize(pageSize)\n globalref.options.changeData('PrintPageSize', pageSizeName, True)\n\n if self.portraitButton.isChecked():\n self.printer.setOrientation(QtGui.QPrinter.Portrait)\n globalref.options.changeData('PrintLandscape', 'no', True)\n else:\n self.printer.setOrientation(QtGui.QPrinter.Landscape)\n globalref.options.changeData('PrintLandscape', 'yes', True)\n\n globalref.options.changeData('PrintUnits', self.currentUnit, True)\n globalref.options.changeData('PrintNumCols',\n repr(self.columnSpin.value()), True)\n self.readFloatValues()\n globalref.options.changeData('PrintColSpace', repr(self.columnSpace),\n True)\n globalref.options.changeData('PrintIndentOffset', repr(self.indent),\n True)\n globalref.options.changeData('HorizMargin', repr(self.horizMargin),\n True)\n globalref.options.changeData('VertMargin', repr(self.vertMargin), True)\n\n def writeFloatValues(self):\n \"\"\"Convert and write float values to the spin boxes\"\"\"\n factor = PageSetupPage.unitValues[self.currentUnit]\n stepSize = int(factor * 2) / 10.0\n decimals = 2\n if self.currentUnit == 'millimeter':\n decimals = 1\n unitText = PageSetupPage.unitNames[PageSetupPage.units.\n index(self.currentUnit)]\n\n self.columnSpaceSpin.setMaximum(optiondefaults.maxPrintMargin * factor)\n self.columnSpaceSpin.setSingleStep(stepSize)\n self.columnSpaceSpin.setDecimals(decimals)\n self.columnSpaceSpin.setValue(self.columnSpace * factor)\n self.spaceLabel.setText(_('Space &between columns (%s)') % unitText)\n\n self.indentSpin.setMaximum(optiondefaults.maxPrintIndent * factor)\n self.indentSpin.setSingleStep(stepSize)\n self.indentSpin.setDecimals(decimals)\n self.indentSpin.setValue(self.indent * factor)\n self.indentLabel.setText(_('Child &indent offset (%s)') % unitText)\n\n self.horizMarginSpin.setMinimum(optiondefaults.minPrintMargin * factor)\n self.horizMarginSpin.setMaximum(optiondefaults.maxPrintMargin * factor)\n self.horizMarginSpin.setSingleStep(stepSize)\n self.horizMarginSpin.setDecimals(decimals)\n self.horizMarginSpin.setValue(self.horizMargin * factor)\n self.horizLabel.setText(_('Horizontal page &margins (%s)') % unitText)\n\n self.vertMarginSpin.setMinimum(optiondefaults.minPrintMargin * factor)\n self.vertMarginSpin.setMaximum(optiondefaults.maxPrintMargin * factor)\n self.vertMarginSpin.setSingleStep(stepSize)\n self.vertMarginSpin.setDecimals(decimals)\n self.vertMarginSpin.setValue(self.vertMargin * factor)\n self.vertLabel.setText(_('Vertical page m&argins (%s)') % unitText)\n\n def readFloatValues(self):\n \"\"\"Read and convert float values from the spin boxes\"\"\"\n factor = PageSetupPage.unitValues[self.currentUnit]\n self.columnSpace = self.columnSpaceSpin.value() / factor\n self.indent = self.indentSpin.value() / factor\n self.horizMargin = self.horizMarginSpin.value() / factor\n self.vertMargin = self.vertMarginSpin.value() / factor\n\n def changeUnits(self, unitNum):\n \"\"\"Change the current unit based on a signal\"\"\"\n self.readFloatValues()\n self.currentUnit = PageSetupPage.units[unitNum]\n self.writeFloatValues()\n\n\nclass SmallListWidget(QtGui.QListWidget):\n \"\"\"ListWidget with a smaller size hint\"\"\"\n def __init__(self, parent=None):\n QtGui.QListWidget.__init__(self, parent)\n\n def sizeHint(self):\n \"\"\"Return smaller width\"\"\"\n return QtCore.QSize(100, 80)\n\n\nclass FontPage(QtGui.QWidget):\n \"\"\"Font selection print option dialog page\"\"\"\n def __init__(self, printData, parent=None):\n QtGui.QWidget.__init__(self, parent)\n self.printData = printData\n self.outputFont = self.printData.getOutputFont()\n self.currentFont = self.printData.getOptionPrintFont()\n if not self.currentFont:\n self.currentFont = self.outputFont\n\n topLayout = QtGui.QVBoxLayout(self)\n self.setLayout(topLayout)\n defaultBox = QtGui.QGroupBox(_('Default Font'))\n topLayout.addWidget(defaultBox)\n defaultLayout = QtGui.QVBoxLayout(defaultBox)\n self.outputCheck = QtGui.QCheckBox(_('Use &Data Output font'))\n defaultLayout.addWidget(self.outputCheck)\n self.outputCheck.setChecked(globalref.options.\n boolData('PrintUseOutputFont'))\n self.connect(self.outputCheck, QtCore.SIGNAL('clicked(bool)'),\n self.setFontSelectAvail)\n\n self.fontBox = QtGui.QGroupBox(_('Select Font'))\n topLayout.addWidget(self.fontBox)\n fontLayout = QtGui.QGridLayout(self.fontBox)\n spacing = fontLayout.spacing()\n fontLayout.setSpacing(0)\n\n label = QtGui.QLabel(_('&Font'))\n fontLayout.addWidget(label, 0, 0)\n label.setIndent(2)\n self.familyEdit = QtGui.QLineEdit()\n fontLayout.addWidget(self.familyEdit, 1, 0)\n self.familyEdit.setReadOnly(True)\n self.familyList = SmallListWidget()\n fontLayout.addWidget(self.familyList, 2, 0)\n label.setBuddy(self.familyList)\n self.familyEdit.setFocusProxy(self.familyList)\n fontLayout.setColumnMinimumWidth(1, spacing)\n families = [unicode(fam) for fam in QtGui.QFontDatabase().families()]\n families.sort(lambda x,y: cmp(x.lower(), y.lower()))\n self.familyList.addItems(families)\n self.connect(self.familyList,\n QtCore.SIGNAL('currentItemChanged(QListWidgetItem*, '\\\n 'QListWidgetItem*)'), self.updateFamily)\n\n label = QtGui.QLabel(_('Font st&yle'))\n fontLayout.addWidget(label, 0, 2)\n label.setIndent(2)\n self.styleEdit = QtGui.QLineEdit()\n fontLayout.addWidget(self.styleEdit, 1, 2)\n self.styleEdit.setReadOnly(True)\n self.styleList = SmallListWidget()\n fontLayout.addWidget(self.styleList, 2, 2)\n label.setBuddy(self.styleList)\n self.styleEdit.setFocusProxy(self.styleList)\n fontLayout.setColumnMinimumWidth(3, spacing)\n self.connect(self.styleList,\n QtCore.SIGNAL('currentItemChanged(QListWidgetItem*, '\\\n 'QListWidgetItem*)'), self.updateStyle)\n\n label = QtGui.QLabel(_('&Size'))\n fontLayout.addWidget(label, 0, 4)\n label.setIndent(2)\n self.sizeEdit = QtGui.QLineEdit()\n fontLayout.addWidget(self.sizeEdit, 1, 4)\n self.sizeEdit.setFocusPolicy(QtCore.Qt.ClickFocus)\n validator = QtGui.QIntValidator(1, 512, self)\n self.sizeEdit.setValidator(validator)\n self.sizeList = SmallListWidget()\n fontLayout.addWidget(self.sizeList, 2, 4)\n label.setBuddy(self.sizeList)\n self.connect(self.sizeList,\n QtCore.SIGNAL('currentItemChanged(QListWidgetItem*, '\\\n 'QListWidgetItem*)'), self.updateSize)\n\n fontLayout.setColumnStretch(0, 38)\n fontLayout.setColumnStretch(2, 24)\n fontLayout.setColumnStretch(4, 10)\n\n sampleBox = QtGui.QGroupBox(_('Sample'))\n topLayout.addWidget(sampleBox)\n sampleLayout = QtGui.QVBoxLayout(sampleBox)\n self.sampleEdit = QtGui.QLineEdit()\n sampleLayout.addWidget(self.sampleEdit)\n self.sampleEdit.setAlignment(QtCore.Qt.AlignCenter)\n self.sampleEdit.setText('AaBbCcDdEeFfGg...TtUuVvWvXxYyZz')\n self.sampleEdit.setFixedHeight(self.sampleEdit.sizeHint().height() * 2)\n\n self.setFontSelectAvail()\n\n def setFontSelectAvail(self):\n \"\"\"Disable font selection if default is checked\"\"\"\n if self.outputCheck.isChecked():\n font = self.readFont()\n if font:\n self.currentFont = font\n self.setFont(self.outputFont)\n self.fontBox.setEnabled(False)\n else:\n self.setFont(self.currentFont)\n self.fontBox.setEnabled(True)\n\n def setFont(self, font):\n \"\"\"Set the font selector to the given font\"\"\"\n fontInfo = QtGui.QFontInfo(font)\n family = fontInfo.family()\n matches = self.familyList.findItems(family, QtCore.Qt.MatchExactly)\n if matches:\n self.familyList.setCurrentItem(matches[0])\n self.familyList.scrollToItem(matches[0],\n QtGui.QAbstractItemView.PositionAtTop)\n style = QtGui.QFontDatabase().styleString(fontInfo)\n matches = self.styleList.findItems(style, QtCore.Qt.MatchExactly)\n if matches:\n self.styleList.setCurrentItem(matches[0])\n self.styleList.scrollToItem(matches[0])\n size = repr(fontInfo.pointSize())\n matches = self.sizeList.findItems(size, QtCore.Qt.MatchExactly)\n if matches:\n self.sizeList.setCurrentItem(matches[0])\n self.sizeList.scrollToItem(matches[0])\n\n def updateFamily(self, currentItem, previousItem):\n \"\"\"Update the family edit box and adjust the style and size options\"\"\"\n family = unicode(currentItem.text())\n self.familyEdit.setText(family)\n if self.familyEdit.hasFocus():\n self.familyEdit.selectAll()\n prevStyle = unicode(self.styleEdit.text())\n prevSize = unicode(self.sizeEdit.text())\n fontDb = QtGui.QFontDatabase()\n styles = [unicode(style) for style in fontDb.styles(family)]\n self.styleList.clear()\n self.styleList.addItems(styles)\n if prevStyle:\n try:\n num = styles.index(prevStyle)\n except ValueError:\n num = 0\n self.styleList.setCurrentRow(num)\n self.styleList.scrollToItem(self.styleList.currentItem())\n sizes = [repr(size) for size in fontDb.pointSizes(family)]\n self.sizeList.clear()\n self.sizeList.addItems(sizes)\n if prevSize:\n try:\n num = sizes.index(prevSize)\n except ValueError:\n num = 0\n self.sizeList.setCurrentRow(num)\n self.sizeList.scrollToItem(self.sizeList.currentItem())\n self.updateSample()\n\n def updateStyle(self, currentItem, previousItem):\n \"\"\"Update the style edit box\"\"\"\n if currentItem:\n style = unicode(currentItem.text())\n self.styleEdit.setText(style)\n if self.styleEdit.hasFocus():\n self.styleEdit.selectAll()\n self.updateSample()\n\n def updateSize(self, currentItem, previousItem):\n \"\"\"Update the size edit box\"\"\"\n if currentItem:\n size = unicode(currentItem.text())\n self.sizeEdit.setText(size)\n if self.sizeEdit.hasFocus():\n self.sizeEdit.selectAll()\n self.updateSample()\n\n def updateSample(self):\n \"\"\"Update the font sample edit font\"\"\"\n font = self.readFont()\n if font:\n self.sampleEdit.setFont(font)\n\n def readFont(self):\n \"\"\"Return the selected font or None\"\"\"\n family = unicode(self.familyEdit.text())\n style = unicode(self.styleEdit.text())\n size = unicode(self.sizeEdit.text())\n if family and style and size:\n return QtGui.QFontDatabase().font(family, style, int(size))\n return None\n\n def saveChanges(self):\n \"\"\"Update option data with current dialog settings\"\"\"\n if self.outputCheck.isChecked():\n globalref.options.changeData('PrintUseOutputFont', 'yes', True)\n self.printData.printFont = self.outputFont\n else:\n globalref.options.changeData('PrintUseOutputFont', 'no', True)\n font = self.readFont()\n if font:\n self.currentFont = font\n self.printData.setOptionPrintFont(self.currentFont)\n self.printData.printFont = self.currentFont\n\n\nclass FieldListWidget(QtGui.QTreeWidget):\n \"\"\"TreeWidget with a smaller size hint\"\"\"\n def __init__(self, parent=None):\n QtGui.QTreeWidget.__init__(self, parent)\n self.setRootIsDecorated(False)\n self.setColumnCount(2)\n self.setHeaderLabels([_('Name'), _('Type')])\n\n def sizeHint(self):\n \"\"\"Return smaller width\"\"\"\n return QtCore.QSize(150, 60)\n\n\nclass HeaderPage(QtGui.QWidget):\n \"\"\"Header/footer print option dialog page\"\"\"\n names = [_('&Header Left'), _('Header C&enter'), _('He&ader Right'),\n _('Footer &Left'), _('Footer Ce&nter'), _('Footer R&ight')]\n fieldPattern = re.compile('{\\*.*?\\*}')\n def __init__(self, parent=None):\n QtGui.QWidget.__init__(self, parent)\n self.fileInfoFormat = copy.deepcopy(globalref.docRef.fileInfoFormat)\n self.fileInfoFormatModified = False\n\n topLayout = QtGui.QGridLayout(self)\n self.setLayout(topLayout)\n fieldBox = QtGui.QGroupBox(_('Fiel&ds'))\n topLayout.addWidget(fieldBox, 0, 0, 3, 1)\n fieldLayout = QtGui.QVBoxLayout(fieldBox)\n self.fieldListWidget = FieldListWidget()\n fieldLayout.addWidget(self.fieldListWidget)\n fieldFormatButton = QtGui.QPushButton(_('Field Forma&t'))\n fieldLayout.addWidget(fieldFormatButton)\n self.connect(fieldFormatButton, QtCore.SIGNAL('clicked()'),\n self.fieldFormat)\n\n self.addFieldButton = QtGui.QPushButton('>>')\n topLayout.addWidget(self.addFieldButton, 0, 1)\n self.addFieldButton.setMaximumWidth(self.addFieldButton.sizeHint().\n height())\n self.connect(self.addFieldButton, QtCore.SIGNAL('clicked()'),\n self.addField)\n self.delFieldButton = QtGui.QPushButton('<<')\n topLayout.addWidget(self.delFieldButton, 1, 1)\n self.delFieldButton.setMaximumWidth(self.delFieldButton.sizeHint().\n height())\n self.connect(self.delFieldButton, QtCore.SIGNAL('clicked()'),\n self.delField)\n\n headerFooterBox = QtGui.QGroupBox(_('Header and Footer'))\n topLayout.addWidget(headerFooterBox, 0, 2, 2, 1)\n headerFooterLayout = QtGui.QGridLayout(headerFooterBox)\n spacing = headerFooterLayout.spacing()\n headerFooterLayout.setSpacing(0)\n self.textEdits = []\n for num, name in enumerate(HeaderPage.names):\n if num < 3:\n row = 1\n col = num * 2\n else:\n row = 4\n col = (num - 3) * 2\n label = QtGui.QLabel(name)\n headerFooterLayout.addWidget(label, row - 1, col)\n label.setIndent(2)\n lineEdit = configdialog.TitleEdit()\n headerFooterLayout.addWidget(lineEdit, row, col)\n label.setBuddy(lineEdit)\n self.textEdits.append(lineEdit)\n self.connect(lineEdit,\n QtCore.SIGNAL('cursorPositionChanged(int, int)'),\n self.setButtonAvail)\n self.connect(lineEdit, QtCore.SIGNAL('focusIn'),\n self.setCurrentEditor)\n headerFooterLayout.setColumnMinimumWidth(1, spacing)\n headerFooterLayout.setColumnMinimumWidth(3, spacing)\n headerFooterLayout.setRowMinimumHeight(2, spacing)\n self.loadFields()\n self.loadText()\n self.focusedEditor = self.textEdits[0]\n self.setButtonAvail()\n\n def setButtonAvail(self):\n \"\"\"Update button availability\"\"\"\n currentFieldPos = self.currentFieldPos()\n self.addFieldButton.setEnabled(currentFieldPos == ())\n self.delFieldButton.setEnabled(len(currentFieldPos) > 1)\n\n def setCurrentEditor(self, sender):\n \"\"\"Set focusedEditor based on editor focus change signal\"\"\"\n self.focusedEditor = sender\n self.setButtonAvail()\n\n def loadFields(self, selNum=0):\n \"\"\"Load list with field names\"\"\"\n self.fieldListWidget.clear()\n for field in self.fileInfoFormat.fieldList:\n QtGui.QTreeWidgetItem(self.fieldListWidget,\n [field.name, _(field.typeName)])\n self.fieldListWidget.setItemSelected(self.fieldListWidget.\n topLevelItem(selNum), True)\n\n def loadText(self):\n \"\"\"Load text into editors\"\"\"\n lines = self.fileInfoFormat.getLines()\n lines.extend([''] * (6 - len(lines)))\n for editor, line in zip(self.textEdits, lines):\n editor.blockSignals(True)\n editor.setText(line)\n editor.blockSignals(False)\n\n def addField(self):\n \"\"\"Add selected field to active header\"\"\"\n fieldName = unicode(self.fieldListWidget.selectedItems()[0].text(0))\n text = u'{*!%s*}' % fieldName\n editor = self.focusedEditor\n editor.insert(text)\n editor.setFocus()\n\n def delField(self):\n \"\"\"Remove field at cursor from active header\"\"\"\n start, end = self.currentFieldPos()\n editor = self.focusedEditor\n editor.setSelection(start, end - start)\n editor.insert('')\n editor.setFocus()\n\n def currentFieldPos(self):\n \"\"\"Return tuple of start, end for field at cursorPos in focusedEditor\n or (None,) if selection overlaps a field end,\n or empty tuple if not found\"\"\"\n textLine = unicode(self.focusedEditor.text())\n cursorPos = self.focusedEditor.cursorPosition()\n anchorPos = self.focusedEditor.selectionStart()\n if anchorPos < 0:\n anchorPos = cursorPos\n elif anchorPos == cursorPos: # backward selection\n cursorPos += len(unicode(self.focusedEditor.selectedText()))\n for match in HeaderPage.fieldPattern.finditer(textLine):\n cursorIn = match.start() < cursorPos < match.end()\n anchorIn = match.start() < anchorPos < match.end()\n if cursorIn and anchorIn:\n return (match.start(), match.end())\n if cursorIn or anchorIn:\n return (None,)\n return ()\n\n def fieldFormat(self):\n \"\"\"Show the dialog to change field formats\"\"\"\n fieldName = unicode(self.fieldListWidget.selectedItems()[0].text(0))\n dlg = HeaderFieldFormatDialog(fieldName, self.fileInfoFormat, self)\n if dlg.exec_() == QtGui.QDialog.Accepted:\n if dlg.modified:\n self.fileInfoFormatModified = True\n\n def saveChanges(self):\n \"\"\"Update option data with current dialog settings\"\"\"\n newLines = [unicode(editor.text()) for editor in self.textEdits]\n prevLines = self.fileInfoFormat.getLines()\n prevLines.extend([''] * (6 - len(prevLines)))\n self.fileInfoFormat.lineList = []\n for num, (newLine, prevLine) in enumerate(zip(newLines, prevLines)):\n if newLine:\n self.fileInfoFormat.insertLine(newLine, num)\n if newLine != prevLine:\n self.fileInfoFormatModified = True\n if self.fileInfoFormatModified:\n globalref.docRef.undoStore.addFormatUndo(globalref.docRef.\n treeFormats,\n globalref.docRef.\n fileInfoFormat,\n {}, {})\n globalref.docRef.treeFormats[self.fileInfoFormat.name] = \\\n self.fileInfoFormat\n globalref.docRef.fileInfoFormat = self.fileInfoFormat\n globalref.docRef.treeFormats.updateAllLineFields()\n globalref.docRef.modified = True\n globalref.updateViewMenuStat()\n self.fileInfoFormatModified = False\n\n\nclass HeaderFieldFormatDialog(QtGui.QDialog):\n \"\"\"Dialog to modify file info field formats used in headers and footers\"\"\"\n def __init__(self, fieldName, fileInfoFormat, parent=None):\n QtGui.QDialog.__init__(self, parent)\n self.field = fileInfoFormat.findField(fieldName)\n self.fileInfoFormat = fileInfoFormat\n self.modified = False\n\n self.setWindowFlags(stdWinFlags)\n self.setWindowTitle(_('Field Format for \"%s\"') % fieldName)\n topLayout = QtGui.QVBoxLayout(self)\n self.setLayout(topLayout)\n horizLayout = QtGui.QHBoxLayout()\n topLayout.addLayout(horizLayout)\n\n extraBox = QtGui.QGroupBox(_('Extra Text'))\n horizLayout.addWidget(extraBox)\n extraLayout = QtGui.QVBoxLayout(extraBox)\n spacing = extraLayout.spacing()\n extraLayout.setSpacing(0)\n prefixLabel = QtGui.QLabel(_('&Prefix'))\n extraLayout.addWidget(prefixLabel)\n self.prefixEdit = QtGui.QLineEdit()\n extraLayout.addWidget(self.prefixEdit)\n prefixLabel.setBuddy(self.prefixEdit)\n extraLayout.addSpacing(spacing)\n extraLayout.addStretch(1)\n suffixLabel = QtGui.QLabel(_('Suffi&x'))\n extraLayout.addWidget(suffixLabel)\n self.suffixEdit = QtGui.QLineEdit()\n extraLayout.addWidget(self.suffixEdit)\n suffixLabel.setBuddy(self.suffixEdit)\n\n rightLayout = QtGui.QVBoxLayout()\n horizLayout.addLayout(rightLayout)\n self.formatBox = QtGui.QGroupBox(_('O&utput Format'))\n rightLayout.addWidget(self.formatBox)\n formatLayout = QtGui.QHBoxLayout(self.formatBox)\n self.formatEdit = QtGui.QLineEdit()\n formatLayout.addWidget(self.formatEdit)\n self.helpButton = QtGui.QPushButton(_('Format &Help'))\n formatLayout.addWidget(self.helpButton)\n self.connect(self.helpButton, QtCore.SIGNAL('clicked()'),\n self.formatHelp)\n\n self.handleBox = QtGui.QGroupBox(_('Content Text Handling'))\n rightLayout.addWidget(self.handleBox)\n handleLayout = QtGui.QVBoxLayout(self.handleBox)\n self.htmlButton = QtGui.QRadioButton(_('Allow HT&ML rich text'))\n handleLayout.addWidget(self.htmlButton)\n self.plainButton = QtGui.QRadioButton(_('Plai&n text with '\\\n 'line breaks'))\n handleLayout.addWidget(self.plainButton)\n\n ctrlLayout = QtGui.QHBoxLayout()\n topLayout.addLayout(ctrlLayout)\n ctrlLayout.addStretch(0)\n okButton = QtGui.QPushButton(_('&OK'))\n ctrlLayout.addWidget(okButton)\n self.connect(okButton, QtCore.SIGNAL('clicked()'), self,\n QtCore.SLOT('accept()'))\n cancelButton = QtGui.QPushButton(_('&Cancel'))\n ctrlLayout.addWidget(cancelButton)\n self.connect(cancelButton, QtCore.SIGNAL('clicked()'), self,\n QtCore.SLOT('reject()'))\n\n self.prefixEdit.setText(self.field.prefix)\n self.suffixEdit.setText(self.field.suffix)\n self.formatEdit.setText(self.field.format)\n self.htmlButton.setChecked(self.field.html)\n self.plainButton.setChecked(not self.field.html)\n\n self.formatBox.setEnabled(self.field.defaultFormat != '')\n self.handleBox.setEnabled(self.field.htmlOption)\n\n def formatHelp(self):\n \"\"\"Provide format help menu based on button signal\"\"\"\n menu = QtGui.QMenu(self)\n self.formatDict = {}\n for item in self.field.formatMenuList:\n if item:\n descr, key = item\n self.formatDict[descr] = key\n menu.addAction(descr)\n else:\n menu.addSeparator()\n menu.popup(self.helpButton.\n mapToGlobal(QtCore.QPoint(0, self.helpButton.height())))\n self.connect(menu, QtCore.SIGNAL('triggered(QAction*)'),\n self.insertFormat)\n\n def insertFormat(self, action):\n \"\"\"Insert format text from id into edit box\"\"\"\n self.formatEdit.insert(self.formatDict[unicode(action.text())])\n\n def accept(self):\n \"\"\"Set changes after OK is hit\"\"\"\n prefix = unicode(self.prefixEdit.text())\n if self.field.prefix != prefix:\n self.field.prefix = prefix\n self.modified = True\n suffix = unicode(self.suffixEdit.text())\n if self.field.suffix != suffix:\n self.field.suffix = suffix\n self.modified = True\n format = unicode(self.formatEdit.text())\n if self.field.format != format:\n self.field.format = format\n self.modified = True\n if self.field.htmlOption:\n html = self.htmlButton.isChecked()\n if self.field.html != html:\n self.field.html = html\n self.modified = True\n QtGui.QDialog.accept(self)\n","sub_path":"source/printdialogs.py","file_name":"printdialogs.py","file_ext":"py","file_size_in_byte":43526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"628733678","text":"# 비트마스킹+백트래킹\n# 비트마스킹 기법에서 i번째 노드의 방문 여부를 visited 배열에 추가할 때는 cur_visited = cur_visited|(1< 0):\n return(math.pi/2)\n elif (velo_x == 0 and velo_y < 0):\n return(3*math.pi/2)\n elif (velo_x > 0 and velo_y == 0):\n return(0)\n elif (velo_x < 0 and velo_y == 0):\n return(math.pi)\n elif (velo_x>0 and velo_y > 0):\n return math.atan(velo_y/velo_x)\n elif (velo_x<0 and velo_y > 0):\n return (math.atan(velo_y/velo_x) + math.pi)\n elif (velo_x<0 and velo_y < 0):\n return (math.atan(velo_y/velo_x) + math.pi)\n elif (velo_x>0 and velo_y < 0):\n return (math.atan(velo_y/velo_x))\n #no movement base case\n else:\n if (obj == None):\n return 0\n return (obj.shape.body.angle)\n \n \n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","sub_path":"physicalobject.py","file_name":"physicalobject.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"221542663","text":"from flask import Flask, render_template, jsonify, request\napp = Flask(__name__)\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n user = {'nickname': 'Miguel'} # fake user\n return render_template(\"index.html\",\n title='Home',\n user=user)\n\n@app.route('/extract')\ndef extract():\n # 输入文章,返回 摘要,需要高亮词的数组\n article = request.args[\"article\"]\n abstract1 = str(article)\n abstract2 = str(article)\n print(\"abstract1\", abstract1)\n print(\"abstract2\", abstract2)\n highlight_words = [\"1\"]\n\n # 代表 1-3个词 5-5个词,两边都是闭合的\n return jsonify({\"abstract1\": abstract1, \"abstract2\": abstract2, \"highlight_words\": highlight_words})\n\n\nif __name__ == '__main__':\n app.run(debug=True, host=\"0.0.0.0\", port=80)\n","sub_path":"home_work_show.py","file_name":"home_work_show.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"500368506","text":"class Solution:\n def reverseList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n if head is None or head.next is None: # handling null or single-elment list\n return head\n \n ptr1 = head\n ptr2 = head.next # ptr1 follows ptr2\n \n head.next = None # reverse next\n \n while ptr2 is not None:\n ptr3, ptr2.next = ptr2.next, ptr1 # reverse next\n ptr1, ptr2= ptr2, ptr3 # step to right\n \n return ptr1\n","sub_path":"Solutions/Linked List/#206. Reverse Linked List.py","file_name":"#206. Reverse Linked List.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"177415111","text":"import argparse\r\nimport open3d as o3d\r\n\r\nif __name__ == \"__main__\":\r\n\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('-cloud', dest='cloud_path', help='Point Cloud file path', type=str)\r\n parser.add_argument('-label', dest='label_path', help='Point Cloud label file path', type=str)\r\n args = parser.parse_args()\r\n\r\n point_cloud_path = args.cloud_path\r\n label_path = args.label_path\r\n\r\n # read point cloud\r\n pcd1 = o3d.io.read_point_cloud(point_cloud_path)\r\n # read label\r\n lables0 = open(label_path).read().split(\"\\n\")\r\n len1 = len(lables0)\r\n color0 = []\r\n\r\n # Set the color of the point\r\n for i in range(len1):\r\n if lables0[i] == '0':\r\n color0.append([0.0, 0.0, 0.0])\r\n elif lables0[i] == '1':\r\n color0.append([0.259, 0.522, 0.957])\r\n elif lables0[i] == '2':\r\n color0.append([0.859, 0.267, 0.216])\r\n elif lables0[i] == '3':\r\n color0.append([0.959, 0.651, 0.0])\r\n elif lables0[i] == '4':\r\n color0.append([0.059, 0.616, 0.345])\r\n elif lables0[i] == '5':\r\n color0.append([0.4, 0.0, 0.8])\r\n elif lables0[i] == '6':\r\n color0.append([0.0, 1.0, 0.8])\r\n elif lables0[i] != '':\r\n color0.append([1.0, 1.0, 1.0])\r\n pcd1.colors = o3d.utility.Vector3dVector(color0)\r\n # Point cloud visualization\r\n o3d.visualization.draw_geometries([pcd1])\r\n","sub_path":"viz/Pointcloud-Visualization-With-Open3D.py","file_name":"Pointcloud-Visualization-With-Open3D.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"353437113","text":"#! /usr/bin/python\n\nfrom random import sample\nfrom random import randint\n\nedges = 1000\nnodes = 16384\n\nprint(\"node_id,adj_list\")\n\nline = \"\"\nfor i in range(0,nodes):\t\t\t\t\t\t# range is [a,b)\n\tnum_edges = randint(1,edges)\n\tline = str(i) + \",\"\n\tadj = sample(range(0,nodes),num_edges)\t\t# sample chooses without replacement\t\t\t\t\n\tfor j in adj:\n\t\t# don't add itself to adj list\n\t\tif j != i:\n\t\t\tline += str(j) + \" \"\n\t\telse:\n\t\t\t# try one more time to generate a unique neighbor\n\t\t\ts = randint(0,nodes)\n\t\t\tif not (s in adj):\n\t\t\t\tline += str(s) + \" \"\n\n\tprint(line)","sub_path":"gen_random_graph.py","file_name":"gen_random_graph.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"165892680","text":"import numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom model import RCGAN\nimport os\nimport argparse\n\nFILE_NAME = 'inputs/sin_wave.npz'\n#FILE_NAME = 'inputs/mnist1.npz'\nSEED = 12345\n\ndef seq_mnist_normalize(data):\n \"\"\"\n Normalize for rot_mnist\n \"\"\"\n def MaxMinNorm(data):\n return ( ( (data - data.min()) / (data.max() - data.min()) ) * 2 - 1 ).tolist()\n\n ret_rot_data = []\n for data in data:\n ret_rot_data.append( \n list(\n map(\n lambda seq_data: MaxMinNorm(seq_data), \n data\n )\n )\n )\n\n return np.array(ret_rot_data)\n\ndef main():\n \n parser = argparse.ArgumentParser()\n parser.add_argument('-inputs', default=FILE_NAME)\n args = parser.parse_args()\n \n # load data\n ndarr = np.load(args.inputs)\n X_train, X_eval, y_train, y_eval = train_test_split(ndarr['x'],\n ndarr['y'],\n test_size=0.1,\n random_state=SEED)\n\n assert X_train.ndim == 3, 'x shape is expected 3 dims, but {} shapes'.format(\n X_train.ndim)\n\n if args.inputs == 'inputs/mnist1.npz':\n X_train = seq_mnist_normalize(X_train) \n X_eval = seq_mnist_normalize(X_eval)\n \n print('train x shape:', X_train.shape)\n\n # hyper parameter for training\n args = {}\n args['seq_length'] = X_train.shape[1]\n args['input_dim'] = X_train.shape[2]\n args['latent_dim'] = 500\n args['hidden_dim'] = 500\n args['embed_dim'] = 10\n args['n_epochs'] = 100\n args['batch_size'] = 64\n args['num_classes'] = len(np.unique(y_train))\n args['save_model'] = True\n args['instance_noise'] = False\n args['oneside_smooth'] = True\n args['label_flipping'] = 0.05\n args['dp_sgd'] = True\n args['sigma'] = 0.1\n args['l2norm_bound'] = 0.1\n args['learning_rate'] = 0.1\n args['total_examples'] = X_train.shape[0]\n \n if not os.path.isdir('models') and args['save_model']:\n os.mkdir('models')\n print('make directory for save models')\n\n rcgan = RCGAN(**args)\n\n rcgan.train(args['n_epochs'],\n X_train,\n y_train,\n X_eval,\n y_eval)\n\n\nif __name__ == '__main__':\n # choose GPU devise\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n main()\n","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"418938668","text":"\"\"\" type implementation \"\"\"\nimport sys, logging\nimport sqlalchemy as sqla\nfrom zope.interface import implementer\nfrom pyramid.compat import string_types\nfrom pyramid.threadlocal import get_current_registry\n\nimport ptah\nfrom ptah import config\nfrom ptah.cms.node import Session\nfrom ptah.cms.content import Content\nfrom ptah.cms.container import BaseContainer\nfrom ptah.cms.security import build_class_actions\nfrom ptah.cms.interfaces import Forbidden, ContentSchema, ITypeInformation\n\nlog = logging.getLogger('ptah.cms')\n\nTYPES_DIR_ID = 'ptah.cms:type'\n\n\n@ptah.resolver('cms-type')\ndef typeInfoResolver(uri):\n \"\"\"Type resolver\n\n :Parameters:\n - type scheme, e.g. blob-sql\n :Returns:\n - :py:class:`ptah.cms.TypeInformation`\n \"\"\"\n return config.get_cfg_storage(TYPES_DIR_ID).get(uri)\n\n\ndef get_type(uri):\n \"\"\"\n :param uri: string identifier for TypeInformation, e.g. `cms-type:sqlblob`\n\n :Returns:\n - :py:class:`ptah.cms.TypeInformation`\n\n \"\"\"\n return config.get_cfg_storage(TYPES_DIR_ID).get(uri)\n\n\ndef get_types():\n \"\"\"\n :Returns:\n - mapping of all registered identifier and TypeInformation\n \"\"\"\n return config.get_cfg_storage(TYPES_DIR_ID)\n\n\n@implementer(ITypeInformation)\nclass TypeInformation(object):\n \"\"\" Type information \"\"\"\n\n fieldset = None\n description = ''\n permission = ptah.NOT_ALLOWED\n\n addview = None # addview action, path relative to current container\n filter_content_types = False\n allowed_content_types = ()\n global_allow = True\n\n def __init__(self, cls, name, title, fieldset, **kw):\n self.__dict__.update(kw)\n\n self.__uri__ = 'cms-type:%s'%name\n\n self.cls = cls\n self.name = name\n self.title = title\n self.fieldset = fieldset\n\n def create(self, **data):\n content = self.cls(**data)\n get_current_registry().notify(ptah.events.ContentCreatedEvent(content))\n return content\n\n def is_allowed(self, container):\n if not isinstance(container, BaseContainer):\n return False\n\n if self.permission:\n return ptah.check_permission(self.permission, container)\n return True\n\n def check_context(self, container):\n if not self.is_allowed(container):\n raise Forbidden()\n\n def list_types(self, container):\n if container.__type__ is not self or \\\n not isinstance(container, BaseContainer):\n return ()\n\n types = []\n all_types = config.get_cfg_storage(TYPES_DIR_ID)\n\n if self.filter_content_types:\n allowed_types = self.allowed_content_types\n if callable(allowed_types):\n allowed_types = allowed_types(container)\n\n for tinfo in allowed_types:\n if isinstance(tinfo, string_types):\n tinfo = all_types.get('cms-type:%s'%tinfo)\n\n if tinfo and tinfo.is_allowed(container):\n types.append(tinfo)\n else:\n for tinfo in all_types.values():\n if tinfo.global_allow and tinfo.is_allowed(container):\n types.append(tinfo)\n\n return types\n\n\ndef Type(name, title=None, fieldset=None, **kw):\n \"\"\" Declare new type. This function has to be called within a content\n class declaration.\n\n .. code-block:: python\n\n class MyContent(ptah.cms.Content):\n\n __type__ = Type('My content')\n\n \"\"\"\n info = config.DirectiveInfo(allowed_scope=('class',))\n\n fs = ContentSchema if fieldset is None else fieldset\n\n if title is None:\n title = name.capitalize()\n\n typeinfo = TypeInformation(None, name, title, fs, **kw)\n\n f_locals = sys._getframe(1).f_locals\n if '__mapper_args__' not in f_locals:\n f_locals['__mapper_args__'] = {'polymorphic_identity': typeinfo.__uri__}\n if '__id__' not in f_locals and '__tablename__' in f_locals:\n f_locals['__id__'] = sqla.Column(\n 'id', sqla.Integer,\n sqla.ForeignKey('ptah_content.id'), primary_key=True)\n if '__uri_factory__' not in f_locals:\n f_locals['__uri_factory__'] = ptah.UriFactory('cms-%s'%name)\n\n def resolve_content(uri):\n return typeinfo.cls.__uri_sql_get__.first(uri=uri)\n\n resolve_content.__doc__ = 'CMS Content resolver for %s type'%title\n\n ptah.register_uri_resolver('cms-%s'%name, resolve_content, depth=2)\n\n # config actino and introspection info\n discr = (TYPES_DIR_ID, name)\n intr = config.Introspectable(TYPES_DIR_ID, discr, name, TYPES_DIR_ID)\n intr['name'] = name\n intr['type'] = typeinfo\n intr['codeinfo'] = info.codeinfo\n\n info.attach(\n config.ClassAction(\n register_type_impl, (typeinfo, name, fieldset), kw,\n discriminator=discr, introspectables=(intr,))\n )\n return typeinfo\n\n\nexcludeNames = ('expires', 'contributors', 'creators', 'view', 'subjects',\n 'publisher', 'effective', 'created', 'modified')\ndef names_filter(name, fieldNames=None):\n if fieldNames is not None and name in fieldNames:\n return True\n\n if name in excludeNames:\n return False\n\n return not name.startswith('_')\n\n\ndef register_type_impl(\n config, cls, tinfo, name, fieldset,\n permission = ptah.NOT_ALLOWED, fieldNames=None, **kw):\n\n # generate schema\n if fieldset is None:\n fieldset = ptah.generate_fieldset(\n cls, fieldNames=fieldNames, namesFilter=names_filter)\n log.info(\"Generating fieldset for %s content type.\", cls)\n\n if 'global_allow' not in kw and not issubclass(cls, Content):\n kw['global_allow'] = False\n\n tinfo.__dict__.update(kw)\n\n if fieldset is not None:\n tinfo.fieldset = fieldset\n\n tinfo.cls = cls\n tinfo.permission = permission\n\n config.get_cfg_storage(TYPES_DIR_ID)[tinfo.__uri__] = tinfo\n\n # sql query for content resolver\n cls.__uri_sql_get__ = ptah.QueryFreezer(\n lambda: Session.query(cls) \\\n .filter(cls.__uri__ == sqla.sql.bindparam('uri')))\n\n # build cms actions\n build_class_actions(cls)\n","sub_path":"ptah/cms/tinfo.py","file_name":"tinfo.py","file_ext":"py","file_size_in_byte":6108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"383355297","text":"#coding=utf-8\nimport pytest\n\n\n#测试相等\ndef test_in():\n a = \"hello\"\n b = \"he\"\n assert b in a\n\n\n# 试不相等\ndef test_not_in():\n a = \"hello\"\n b = \"hi\"\n assert b not in a\n\nif __name__ == '__main__':\n pytest.main(\"test_assert2.py\")\n","sub_path":"TestFrameworks/pytest/test_assert2.py","file_name":"test_assert2.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"124521009","text":"#\n# Hello World client in Python\n# Connects REQ socket to tcp://localhost:5555\n# Sends \"Hello\" to server, expects \"World\" back\n#\n\nimport zmq\n\ncontext = zmq.Context()\n\n# Socket to talk to server\nprint(\"Connecting to hello world server…\")\nsocket = context.socket(zmq.REQ)\nsocket.connect(\"tcp://localhost:5555\")\n\n# Do 10 requests, waiting each time for a response\n\n#for request in range(10):\n# print(\"Sending request %s …\" % request)\n# socket.send(b\"Hello\")#\nwhile True:\n data = input('input your data:')\n if data == 'q':\n sys.exit()\n \n print(data)\n socket.send(data.encode('utf-8'))\n # Get the reply.\n response = socket.recv()\n print(response)\n","sub_path":"py_zmq_client.py","file_name":"py_zmq_client.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"380838978","text":"# Definition for binary tree with next pointer.\nclass TreeLinkNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n self.next = None\n\nclass Solution(object):\n def connect(self, root):\n \"\"\"\n :type root: TreeLinkNode\n :rtype: nothing\n \"\"\"\n if root==None:\n return\n nextlevel=root.left\n c=root\n while not nextlevel==None:\n while not c==None:\n c.left.next=c.right\n if not c.next==None:\n c.right.next=c.next.left\n c=c.next\n c=nextlevel\n nextlevel=c.left\n\n\na=TreeLinkNode(1)\nb=TreeLinkNode(2)\nc=TreeLinkNode(3)\na.left=b\na.right=c\nd=TreeLinkNode(4)\ne=TreeLinkNode(5)\nf=TreeLinkNode(6)\ng=TreeLinkNode(7)\nb.left=d\nb.right=e\nc.left=f\nc.right=g\ns=Solution()\ns.connect(a)\nprint(\"1\")","sub_path":"116connect.py","file_name":"116connect.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"236947830","text":"from odoo import api, fields, models, _\n\n\nclass assign_followers_settings(models.Model):\n _name = 'assign.followers.settings'\n _description = 'Settings for Assign and Remove followers to a Record'\n\n name = fields.Char('Name', size=128)\n model_id = fields.Many2one('ir.model', 'Model')\n ref_ir_act_window = fields.Many2one('ir.actions.act_window', 'Sidebar action', readonly=True,\n help=\"Sidebar action to make this template available on records \"\n \"of the related document model\")\n ref_ir_value = fields.Many2one('ir.model.data', 'Sidebar Button', readonly=True,\n help=\"Sidebar button to open the sidebar action\")\n \n @api.multi\n def create_action(self):\n action_obj = self.env['ir.actions.act_window'].sudo()\n \n for template in self:\n src_obj = template.model_id.model\n model_data_id = self.env['ir.model.data'].get_object_reference('assign_followers', 'view_assign_followers')[1] \n button_name = _('Assign/Unassign Followers')\n \n# create follower action\n action_id = action_obj.create({\n 'name': button_name,\n 'type': 'ir.actions.act_window',\n 'res_model': 'assign.followers',\n 'src_model': src_obj,\n 'view_type': 'form',\n 'context': \"{}\",\n 'view_mode':'form,tree',\n 'view_id': model_data_id,\n 'target': 'new',\n 'binding_model_id': template.model_id.id,\n 'auto_refresh':1,\n \n })\n \n template.write({\n 'ref_ir_act_window': action_id.id,\n })\n return True\n \n @api.multi\n def unlink(self):\n # action template remove \n for template in self:\n if template.ref_ir_act_window:\n template.ref_ir_act_window.unlink()\n return super(assign_followers_settings, self).unlink()\n \nclass assign_followers(models.Model):\n _name = 'assign.followers'\n _description = 'Assign and Unassign Followers to Record'\n \n record_followers_ids = fields.Many2many('res.partner', 'record_followers_rel', 'record_id', 'partner_id', 'Followers')\n \n @api.multi\n def assign_followers(self):\n context = self._context\n if context is None:\n context = {}\n if context.get('active_model'):\n # Current model name\n model_obj = self.env[context['active_model']]\n model_follower_obj = self.env['mail.followers']\n followers_ids = self.record_followers_ids.ids\n if context.get('active_ids'): \n # get values from current active_ids\n for value in model_obj.search([('id', 'in', context['active_ids'])]):\n existing_followers_id = followers_to_assign = []\n existing_followers_id = [val.partner_id.id for val in value.message_follower_ids]\n # check existing message followers and assigned followers\n followers_to_assign = list(set(followers_ids) - set(existing_followers_id))\n for val_loop in followers_to_assign:\n model_follower_obj.create({'partner_id': val_loop, 'res_model': context['active_model'], 'res_id': value.id}) \n return True\n \n @api.multi\n def update_followers(self):\n context = self._context\n if context is None:\n context = {}\n if context.get('active_model'):\n model_obj = self.env[context['active_model']]\n model_follower_obj = self.env['mail.followers']\n active_model_id = model_obj.search([('id', 'in', self._context.get('active_ids'))])\n followers_ids = [val.id for val in self.record_followers_ids]\n for line in active_model_id: \n # check existing message followers and record_followers_ids\n followers_to_unassign = list(set(followers_ids))\n for val_loop in followers_to_unassign:\n model_follower_obj.search([('partner_id','=', val_loop),('res_model','=', self._context.get('active_model')),( 'res_id','=', line.id)]).unlink()\n \n return True\n \n \n \n \n","sub_path":"assign_followers/models/assign_followers.py","file_name":"assign_followers.py","file_ext":"py","file_size_in_byte":4517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"477659937","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 19 13:05:23 2019\n\n@author: TempestGuerra\n\"\"\"\n\nimport numpy as np\nimport math as mt\nfrom HerfunChebNodesWeights import chebpolym, cheblb\n\ndef computeChebyshevDerivativeMatrix(DIMS):\n \n # Get data from DIMS\n ZH = DIMS[2]\n NZ = DIMS[4]\n \n # Initialize grid and make column vector\n xi, wcp = cheblb(NZ)\n \n # Get the Chebyshev transformation matrix\n CTD = chebpolym(NZ-1, -xi)\n \n # Make a diagonal matrix of weights\n W = np.diag(wcp)\n \n # Compute scaling for the forward transform\n S = np.eye(NZ)\n \n for ii in range(NZ - 1):\n temp = W.dot(CTD[:,ii])\n temp = ((CTD[:,ii]).T).dot(temp)\n S[ii,ii] = temp ** (-1)\n\n S[NZ-1,NZ-1] = 1.0 / mt.pi\n \n # Compute the spectral derivative coefficients\n SDIFF = np.zeros((NZ,NZ))\n SDIFF[NZ-2,NZ-1] = 2.0 * NZ\n \n for ii in reversed(range(NZ - 2)):\n A = 2.0 * (ii + 1)\n B = 1.0\n if ii > 0:\n c = 1.0\n else:\n c = 2.0\n \n SDIFF[ii,:] = B / c * SDIFF[ii+2,:]\n SDIFF[ii,ii+1] = A / c\n \n # Chebyshev spectral transform in matrix form\n temp = CTD.dot(W)\n STR_C = S.dot(temp);\n # Chebyshev spatial derivative based on spectral differentiation\n # Domain scale factor included here\n temp = (CTD).dot(SDIFF)\n DDM = - (2.0 / ZH) * temp.dot(STR_C);\n \n return DDM, STR_C","sub_path":"computeChebyshevDerivativeMatrix.py","file_name":"computeChebyshevDerivativeMatrix.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"129745773","text":"from datetime import date\nfrom shutil import copy\nimport os\n\ndate_backup = date.today()\nprint(date_backup)\n\nstr_date_backup = str(date_backup).replace('-','.')\nprint(str_date_backup)\n\n# path_input = r'/Users/robindias/Files/Robin Dias - Blue.docx'\npath_input = input(r'Please specify the path of the file to be backed up?')\nfile_name = os.path.basename(path_input)\npath_output = r'/Users/robindias/Backup_files/' + '\\\\' + str_date_backup + ' - ' + file_name\nprint(path_output)\n\ncopy(path_input,path_output)\nprint(\"File was successfully saved\")","sub_path":"auto_backup.py","file_name":"auto_backup.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"110131027","text":"\"\"\"Everything related to building logic for the spires goes here\"\"\"\r\nfrom sc2.constants import SPIRE\r\n\r\n\r\nclass BuildSpire:\r\n \"\"\"Untested\"\"\"\r\n\r\n def __init__(self, ai):\r\n self.ai = ai\r\n\r\n async def should_handle(self, iteration):\r\n \"\"\"Build the spire if only floating buildings left\"\"\"\r\n return (\r\n not self.ai.spires\r\n and self.ai.can_afford(SPIRE)\r\n and self.ai.floating_buildings_bm\r\n and not self.ai.already_pending(SPIRE)\r\n and (self.ai.lairs or self.ai.hives)\r\n )\r\n\r\n async def handle(self, iteration):\r\n \"\"\" Put the spire near the pool\"\"\"\r\n await self.ai.build(SPIRE, near=self.ai.pools.first)\r\n return True\r\n","sub_path":"actions/build/spire.py","file_name":"spire.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"350625178","text":"# first attempt\n\nimport math\n\ndef roots(a, b, c):\n if b * b - 4 * a * c < 0:\n print(\"Solution has no real roots.\")\n elif b * b - 4 * a * c == 0:\n x = -b / ( 2 * a)\n print(\"x =\", x)\n else:\n x1 = (-b + math.sqrt(b * b - 4 * a * c)) / (2 * a)\n x2 = (-b - math.sqrt(b * b - 4 * a * c)) / (2 * a)\n print(\"x1 =\", x1)\n print(\"x2 =\", x2)","sub_path":"tutorials/05/decomposition/discriminant.py","file_name":"discriminant.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"552430795","text":"import os\nimport cv2\ndef Rename(src,first_name,file_format):\n list_file=os.listdir(src)\n\n for i in range(len(list_file)):\n file_dst=src+'/'+first_name + str(i)+file_format\n file_src=src+'/'+list_file[i]\n os.rename(file_src,file_dst)\n print(file_src)\n\ndef RemoveFile(src):\n list_file=os.listdir(src)\n for i in range(len(list_file)):\n file_src=src+'/'+list_file[i]\n img=cv2.imread(file_src)\n print(type(img))\n if str(type(img)) != \"\":\n os.remove(file_src)\n continue\n\n\nroot=\"/home/vuonghn/Downloads/ALL/all\"\n\nlist_folder=os.listdir(root)\n\nfor i in range(len(list_folder)):\n folder=root+'/'+list_folder[i]\n # Rename(folder,\"img_\",\".jpg\")\n RemoveFile(folder) \n\n\n\n","sub_path":"Rename.py","file_name":"Rename.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"617186011","text":"import re , urllib.request,os,time\nopener =urllib.request.build_opener()\nheaders = (\"User-Agent\", \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36\")\nopener.addheaders=[headers]\n\ntime.sleep(10)\n\n#网站主页\nurlhead =\"http://www.ylzzd.com/xgmn/\"\n\n# 创建文件夹方法\ndef dirMaker(dir):\n dirs = os.listdir()\n if dir in dirs:\n return True\n else:\n os.mkdir(dir)\n return False\n\n# 存在当前路径下的这个文件夹下\ndirMaker('girls')\n\nos.chdir('girls')\n\n# 爬取方法\ndef crawler(url,path):\n # opener.open()\n urllib.request.urlretrieve(url,path)\n\n# 91-115页有图\nfor i in range (91,116):\n # print(i)\n # global urlhead\n\n # 创建页面列表\n urllist =[]\n urlreal = urlhead +str(i)+\".html\"\n urllist.append(urlreal)\n for j in range(2,20):\n urlrealinside = urlhead +str(i)+ \"_\"+str(j)+\".html\"\n\n urllist.append(urlrealinside)\n\n # 下载每页下的图\n for suburl in urllist:\n\n try:\n\n #读取网页\n # mainpage = urllib.request.urlopen(\"http://www.ylzzd.com/xgmn/110.html\").read().decode('utf-8',\"ignore\")\n mainpage = urllib.request.urlopen(suburl).read().decode('utf-8',\"ignore\")\n\n print(len(mainpage))\n\n # 大图样式,匹配链接\n picPat = 'img class=\"petImg\" src=\"(.*?.jpg)\"'\n # \n urlhome = \"http://www.ylzzd.com\"\n\n #匹配图片\n pics = re.findall(picPat,mainpage)\n\n print(pics)\n # print(len(pics))\n\n for i in range(0, len(pics)):\n pics[i] = urlhome + pics[i]\n pathlocal = pics[i][-12:]\n\n crawler(pics[i],pathlocal)\n except:\n # print(\"链接不存在\")\n break\n","sub_path":"urlLib/meinv2.py","file_name":"meinv2.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"552631477","text":"import logging\nimport re\n\nimport requests\nfrom dynaconf import settings\nfrom flask import Flask, request\n\n\napp = Flask(__name__)\n\nRE_PATTERN_IN = r\"\"\"[v|V]aga$|[f|F]ree[la|lancer]|[j|J]ob$|[w|W]ork$|[d|D]ev|[d|D]eveloper$|[d|D]esenvolvedor$\n|[j|J]unior$|[p|P]leno$|[s|S]enior$\"\"\"\n\nRE_PATTERN_OUT = r\"\"\"[f|F]ake|[p|P]resnecial|[r|R]emoto\"\"\"\n\nlogging.basicConfig(format=\"%(asctime)s - %(message)s\", level=logging.INFO)\n\n\n@app.route(\"/event/\", methods=[\"POST\"])\ndef reply_message():\n\n # Set Up\n url = f\"https://api.telegram.org/bot{settings.API_TOKEN}/sendMessage\" # noqa\n msg_telegram = request.get_json()\n msg = msg_telegram.get(\"message\", False)\n\n if msg and not msg.get(\"animation\", False) and not msg.get(\"photo\", False):\n if re.search(RE_PATTERN_OUT, msg[\"text\"]):\n return {\"message\": \"out\"}\n if re.search(RE_PATTERN_IN, msg[\"text\"]):\n params = {\n \"chat_id\": msg[\"chat\"][\"id\"],\n \"text\": \"A vaga oferecida aceita trabalho remoto? \\n# Msg enviada pelo ࿇RɨʋɛʀBօȶ࿇!\",\n \"reply_to_message_id\": msg[\"message_id\"],\n }\n\n requests.post(url, params=params)\n\n return {\"message\": \"complet job!\"}\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"177132946","text":"from django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\n\nfrom music.views import SongViewSet, AlbumViewSet, ArtistViewSet\n\nrouter = DefaultRouter()\nrouter.register('songs', SongViewSet)\nrouter.register('albums', AlbumViewSet)\nrouter.register('artist', ArtistViewSet)\n\n\nurlpatterns = [\n path('', include(router.urls)),\n]","sub_path":"music/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"541349481","text":"import json\nimport logging\nimport os\nimport requests\nimport time\nfrom kafka import KafkaConsumer\n\ndef parse_message_google_review(message):\n rec = {}\n try:\n row = message.value\n rec = {\n 'app': row['app'], 'translated_review': row['translated_review'], 'sentiment': row['sentiment'],\n 'sentiment_polarity': row['sentiment_polarity'], 'sentiment_subjectivity': row['sentiment_subjectivity']\n }\n except Exception as ex:\n logging.error('Exception while parsing')\n logging.error(str(ex))\n finally:\n return rec\n\n\nif __name__ == '__main__':\n logging.basicConfig(filename='clientstreamingestapp.log', filemode='w', level=logging.DEBUG,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n print('Running Consumer..')\n topic_name_google = 'google'\n\n while True:\n # Process google topic in kafka\n try:\n consumer = KafkaConsumer(topic_name_google, bootstrap_servers=[os.environ['KAFKA_URL_PORT']],\n api_version=(0, 10), auto_offset_reset='latest', group_id='my-group',\n enable_auto_commit=True, consumer_timeout_ms=1000,\n value_deserializer=lambda x: json.loads(x.decode('utf-8')))\n except Exception as ex:\n logging.error(ex)\n print(consumer)\n for msg in consumer:\n print(msg)\n data = parse_message_google_review(msg)\n try:\n logging.info(\"data is {0}\".format(json.dumps(data)))\n # Send request to API to create a new entry in db\n r = requests.post(url=os.environ['URL'] + \"/google/review/\", json=json.dumps(data))\n logging.info(\"Response r is {0}\".format(r))\n except Exception as ex:\n logging.error(ex)\n consumer.close()\n\n # Sleep 5 minutes before starting check new files again\n time.sleep(60 * 5)\n\n\n","sub_path":"code/databroker/clientstreamingestapp-google.py","file_name":"clientstreamingestapp-google.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"145023870","text":"import bs4\nimport requests\nresponse = requests.get('http://www.senate.gov/legislative/LIS/roll_call_lists/vote_menu_114_1.xml')\nsoup = bs4.BeautifulSoup(response.text)\nvotes=0\n\n\nfor link in soup.select('votes vote'):\n if link.yeas is not None and link.nays is not None:\n if int(link.nays.text)-int(link.yeas.text) < 5:\n votes=votes+1\n\nprint(votes)\n","sub_path":"search-script-scrape/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"448972934","text":"'''\nCreated on Jan 30, 2016\n\n@author: f\n'''\nimport random as r\ndef main():\n print(\"SHITTY SENTENCE GENERATOR\")\n print(\"SHITTY FORMULA: SUBJECT+VERB+INDIRECT_OBJECT+DIRECT_OBJECT\\n\")\n \n inNouns = open(\"dictionaries/nouns\",\"r\")\n nouns=\"\"\n inVerbs = open(\"dictionaries/verbs\",\"r\")\n verbs=\"\"\n inIndirect = open(\"dictionaries/indirectObjects\",\"r\")\n indirect=\"\"\n for str1 in inNouns:\n nouns+=str1[:-1]+\", \"\n for str2 in inVerbs:\n verbs+=str2[:-1]+\", \"\n for str3 in inIndirect:\n indirect+=str3[:-1]+\", \"\n \n nouns=nouns.split(\", \")\n verbs=verbs.split(\", \")\n indirect=indirect.split(\", \")\n print(nouns[r.randrange(0,len(nouns))], verbs[r.randrange(0,len(verbs))], indirect[r.randrange(0,len(indirect))],nouns[r.randrange(0,len(nouns))])\n \nmain()","sub_path":"src/randomSentenceGenerator/RandomSentenceGenerator.py","file_name":"RandomSentenceGenerator.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"602440540","text":"import numpy as np\nfrom sklearn import datasets\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\n\n\n## load data\niris = datasets.load_iris()\nX = iris['data']\ny = iris['target']\n\n## init weight \nW_raw = np.random.rand(4,2)\nW = W_raw/np.linalg.norm(W_raw, axis=0) ## normalize\n\n\n## define Oja update function\ndef Oja_update(x, W):\n y = np.dot(x, W)\n delta_W = 0.01*np.dot((x - np.dot(W,y)).reshape(4,1), y.reshape(1,2))\n W += delta_W\n W = W/np.linalg.norm(W, axis=0) ## normalize\n\n## train and plot result over time\nfig, axis = plt.subplots(3, 3)\nfig.set_size_inches(15,10)\nfor count in range(18):\n if count % 2 == 0:\n data_PCA = np.dot(X, W)\n c = count//2\n axis[c // 3, c % 3].plot(data_PCA[:50,0], data_PCA[:50,1], 'ro')\n axis[c // 3, c % 3].plot(data_PCA[50:100,0], data_PCA[50:100,1], 'bo')\n axis[c // 3, c % 3].plot(data_PCA[100:150,0], data_PCA[100:150,1], 'go')\n for i in range(150):\n Oja_update(X[i], W)\n\n## compare with orginal PCA of sklearn.\nt = PCA(n_components=2).fit_transform(X)\nplt.plot(t[:50,0], t[:50,1], 'ro')\nplt.plot(t[50:100,0], t[50:100,1], 'bo')\nplt.plot(t[100:150,0], t[100:150,1], 'go')\nplt.show()\n","sub_path":"Oja_PCA.py","file_name":"Oja_PCA.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"411621746","text":"import time\n\n# creation d'une liste de 5 000 000 d'elements \n# (a adapter suivant la vitesse de vos machines) \ntaille = 10000000\nprint(\"Creation d'une liste avec %d elements\" % (taille))\ntoto = list(range(taille))\n# la variable 'a' accede a un element de la liste\n# methode 1 \nstart = time.time()\nfor i in range(len(toto)):\n a = toto[i]\nprint(\"methode 1 (for in range) : %.1f secondes\" % (time.time() - start))\n\n# methode 2\nstart = time.time()\nfor ele in toto:\n a = ele\nprint(\"methode 2 (for in) : %.1f secondes\" % (time.time() - start))\n\n# methode 3\nstart = time.time()\nfor idx, ele in enumerate(toto):\n a = ele\nprint(\"methode 4 (for in enumerate): %.1f secondes\" % (time.time() - start))\n","sub_path":"Chap_4_Instruction_Controle/Chap_4_Loops_boucles_speed.py","file_name":"Chap_4_Loops_boucles_speed.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"199834874","text":"import random\r\n\r\npodl = [\"Троллейбус\", \"Альбом\", \"Автобус\", \"Стол\", \"Апрель\", \"Адрес\", \"Лопата\", \"Лагерь\", \"Стул\", \"Вода\"]\r\ndopl = [\"хочет\", \"любит\", \"ненавидет\", \"нравится\", \"меняет\", \"становится\", \"идёт\", \"бежит\"]\r\nzcaz = [\"прыгать\", \"смотреть\", \"играть\", \"кушать\", \"говорть\", \"сидеть\", \"бежать\", \"кататься\", \"убирать\", \"копать\", \"пить\", \"читать\"]\r\n\r\na = random.choice(podl)\r\nb = random.choice(dopl)\r\nc = random.choice(zcaz)\r\nprint(a, b, c)\r\n\r\n# print(word)","sub_path":"aaaa/pytonchik/l07/randomword.py","file_name":"randomword.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"385993725","text":"from pathlib import Path\nfrom typing import Dict, List\nfrom injecta.config.ConfigPathsResolver import ConfigPathsResolver\nfrom injecta.config.ConfigLoaderAndMerger import ConfigLoaderAndMerger\nfrom injecta.config.ConfigReaderInterface import ConfigReaderInterface\n\nclass YamlConfigReader(ConfigReaderInterface):\n\n def __init__(self):\n self.__configPathsResolver = ConfigPathsResolver()\n self.__configLoaderAndMerger = ConfigLoaderAndMerger()\n\n def read(self, configPath: str):\n resolvedPaths = self.__configPathsResolver.resolve(Path(configPath), Path(configPath).parent)\n resolvedPathsByLevels = self.__toListByLevels(resolvedPaths)\n resolvedPathsByLevels.reverse()\n flattenedPaths = [item for sublist in resolvedPathsByLevels for item in sublist]\n\n return self.__configLoaderAndMerger.loadAndMerge(flattenedPaths)\n\n def __toListByLevels(self, resolvedPaths: List[Dict]):\n configPathsByLevels = dict()\n\n for resolvedPath in resolvedPaths:\n if resolvedPath['level'] not in configPathsByLevels:\n configPathsByLevels[resolvedPath['level']] = []\n\n configPathsByLevels[resolvedPath['level']].append(resolvedPath['path'])\n\n return list(configPathsByLevels.values())\n","sub_path":"src/injecta/config/YamlConfigReader.py","file_name":"YamlConfigReader.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"503901925","text":"################################################################################\n# Module: archetypal.template\n# Description:\n# License: MIT, see full license in LICENSE.txt\n# Web: https://github.com/samuelduchesne/archetypal\n################################################################################\n\nimport collections\nimport hashlib\n\nimport numpy as np\nimport pandas as pd\nfrom deprecation import deprecated\nfrom eppy.bunch_subclass import EpBunch\n\nimport archetypal\nfrom archetypal import Schedule, log\nfrom archetypal.template import UmiBase, UniqueName\n\n\nclass UmiSchedule(Schedule, UmiBase):\n \"\"\"Class that handles Schedules as\"\"\"\n\n def __init__(self, *args, quantity=None, **kwargs):\n \"\"\"\n Args:\n *args:\n **kwargs:\n \"\"\"\n super(UmiSchedule, self).__init__(**kwargs)\n self.quantity = quantity\n\n @classmethod\n def constant_schedule(\n cls, hourly_value=1, Name=\"AlwaysOn\", Type=\"Fraction\", idf=None, **kwargs\n ):\n \"\"\"\n Args:\n hourly_value:\n Name:\n idf:\n **kwargs:\n \"\"\"\n return super(UmiSchedule, cls).constant_schedule(\n hourly_value=hourly_value, Name=Name, Type=Type, idf=idf, **kwargs\n )\n\n @classmethod\n def from_values(cls, Name, Values, idf, Type=\"Fraction\", **kwargs):\n \"\"\"\n Args:\n Name:\n Values:\n idf:\n Type:\n **kwargs:\n \"\"\"\n return super(UmiSchedule, cls).from_values(\n Name=Name, Values=Values, Type=Type, idf=idf, **kwargs\n )\n\n @classmethod\n def from_yearschedule(cls, year_sched, idf=None):\n \"\"\"\n Args:\n year_sched:\n idf:\n \"\"\"\n if isinstance(year_sched, YearSchedule):\n return cls.from_values(\n Name=year_sched.Name,\n Values=year_sched.all_values,\n Type=year_sched.Type,\n idf=idf,\n )\n\n def __add__(self, other):\n return UmiSchedule.combine(self, other)\n\n def __repr__(self):\n name = self.Name\n resample = self.series.resample(\"D\")\n min = resample.min().mean()\n mean = resample.mean().mean()\n max = resample.max().mean()\n return (\n name\n + \": \"\n + \"mean daily min:{:.2f} mean:{:.2f} max:{:.2f} \".format(min, mean, max)\n + (f\"quantity {self.quantity}\" if self.quantity is not None else \"\")\n )\n\n def __str__(self):\n return repr(self)\n\n def __hash__(self):\n return hash((self.__class__.__name__, getattr(self, \"Name\", None)))\n\n def __eq__(self, other):\n if not isinstance(other, UmiSchedule):\n return False\n else:\n return all(\n [\n # self.Name == other.Name,\n self.strict == other.strict,\n self.schType == other.schType,\n self.Type == other.Type,\n self.quantity == other.quantity,\n np.array_equal(self.all_values, other.all_values),\n ]\n )\n\n def combine(self, other, weights=None, quantity=None):\n \"\"\"Combine two UmiSchedule objects together.\n\n Args:\n other (UmiSchedule): The other Schedule object to combine with.\n weights (list, dict or string): Attribute of self and other containing the\n weight factor. If a list is passed, it must have len = 2; the first\n element is applied to self and the second element is applied to other.\n If a dict is passed, the self.Name and other.Name are the keys. If a\n str is passed, the\n quantity (list or dict): Scalar value that will be multiplied by self before\n the averaging occurs. This ensures that the resulting schedule\n returns the correct integrated value. If a dict is passed, keys are\n schedules Names and values are quantities.\n\n Returns:\n (UmiSchedule): the combined UmiSchedule object.\n\n Raises:\n TypeError: if Quantity is not of type list, tuple, dict or a callable.\n \"\"\"\n # Check if other is None. Simply return self\n if not other:\n return self\n\n if not self:\n return other\n\n if not isinstance(other, UmiSchedule):\n msg = \"Cannot combine %s with %s\" % (\n self.__class__.__name__,\n other.__class__.__name__,\n )\n raise NotImplementedError(msg)\n\n # check if the schedule is the same\n if self == other:\n if self.quantity and other.quantity:\n self.quantity += other.quantity\n return self\n # check if self is only zeros. Should not affect other.\n if not np.any(self.all_values):\n return other\n # check if other is only zeros. Should not affect self.\n if not np.any(other.all_values):\n return self\n\n if not weights:\n log(\n 'using 1 as weighting factor in \"{}\" '\n \"combine.\".format(self.__class__.__name__)\n )\n weights = [1, 1]\n elif isinstance(weights, str):\n # get the attribute from self and other\n weights = [getattr(self, weights), getattr(other, weights)]\n elif isinstance(weights, (list, tuple)):\n # check if length is 2.\n l = len(weights)\n if l != 2:\n raise ValueError(\n \"USing a list or tuple, the weights attribute must \"\n \"have a length of 2. A length of {}\".format(l)\n )\n elif isinstance(weights, dict):\n weights = [weights[self.Name], weights[other.Name]]\n\n if quantity is None:\n new_values = np.average(\n [self.all_values, other.all_values], axis=0, weights=weights\n )\n elif isinstance(quantity, dict):\n # Multiplying the schedule values by the quantity for both self and other\n # and then using a weighted average. Finally, new values are normalized.\n new_values = np.average(\n [\n self.all_values * quantity[self.Name],\n other.all_values * quantity[other.Name],\n ],\n axis=0,\n weights=weights,\n )\n new_values /= quantity[self.Name] + quantity[other.Name]\n elif callable(quantity):\n new_values = np.average(\n np.stack((self.all_values, other.all_values), axis=1),\n axis=1,\n weights=[\n quantity(self.predecessors.data),\n quantity(other.predecessors.data),\n ],\n )\n elif isinstance(quantity, (list, tuple)):\n # Multiplying the schedule values by the quantity for both self and other\n # and then using a weighted average. Finally, new values are normalized.\n new_values = np.average(\n [self.all_values * quantity[0], other.all_values * quantity[1]],\n axis=0,\n weights=weights,\n )\n new_values /= sum(quantity)\n else:\n raise TypeError(\"Quantity is not of type list, tuple, dict or a callable\")\n\n # the new object's name\n meta = self._get_predecessors_meta(other)\n\n # Overriding meta Name\n hasher = hashlib.md5()\n hasher.update(new_values)\n meta[\"Name\"] = f\"Combined_UmiSchedule_{hasher.hexdigest()}\"\n quantity = np.nansum(\n [self.quantity or float(\"nan\"), other.quantity or float(\"nan\")]\n )\n new_obj = UmiSchedule.from_values(\n Values=new_values, Type=\"Fraction\", quantity=quantity, idf=self.idf, **meta\n )\n new_obj.predecessors.update(self.predecessors + other.predecessors)\n new_obj.weights = sum(weights)\n return new_obj\n\n def develop(self):\n year, weeks, days = self.to_year_week_day()\n lines = [\"- {}\".format(obj) for obj in self.predecessors]\n\n newdays = []\n for day in days:\n newdays.append(\n DaySchedule.from_epbunch(\n day,\n Comments=\"Year Week Day schedules created from: \\n{}\".format(\n \"\\n\".join(lines)\n ),\n allow_duplicates=True,\n )\n )\n Parts = []\n weeks = {schd.Name: schd for schd in weeks}\n\n def chunks(lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(lst), n):\n yield lst[i : i + n]\n\n for fields in chunks(year.fieldvalues[3:], 5):\n weekname, from_month, from_day, to_month, to_day = fields\n Parts.append(\n YearSchedulePart(\n FromMonth=from_month,\n ToMonth=to_month,\n FromDay=from_day,\n ToDay=to_day,\n Schedule=WeekSchedule.from_epbunch(\n weeks[weekname],\n Comments=\"Year Week Day schedules created from:\\n{}\".format(\n \"\\n\".join(lines)\n ),\n ),\n )\n )\n\n _from = \"\\n\".join(lines)\n self.__class__ = YearSchedule\n self.Comments = f\"Year Week Day schedules created from: \\n{_from}\"\n self.epbunch = year\n self.Type = \"Fraction\"\n self.Parts = Parts\n return self\n\n def to_json(self):\n \"\"\"UmiSchedule does not implement the to_json method because it is not\n used when generating the json file. Only Year-Week- and DaySchedule\n classes are used\n \"\"\"\n\n return self.to_dict()\n\n def to_dict(self):\n self.validate() # Validate object before trying to get json format\n self.develop() # Develop into Year-, Week- and DaySchedules\n return {\"$ref\": str(self.id)}\n\n def validate(self):\n \"\"\"Validates UmiObjects and fills in missing values\"\"\"\n return self\n\n def mapping(self):\n self.validate()\n\n return dict(\n Category=self.schType,\n Type=self.Type,\n Comments=self.Comments,\n DataSource=self.DataSource,\n Name=self.Name,\n )\n\n def get_ref(self, ref):\n \"\"\"Gets item matching ref id\n\n Args:\n ref:\n \"\"\"\n return next(\n iter(\n [\n value\n for value in UmiSchedule.CREATED_OBJECTS\n if value.id == ref[\"$ref\"]\n ]\n ),\n None,\n )\n\n\nclass YearSchedulePart:\n \"\"\"Helper Class for YearSchedules that are defined using FromDay FromMonth\n ToDay ToMonth attributes.\n \"\"\"\n\n def __init__(\n self,\n FromDay=None,\n FromMonth=None,\n ToDay=None,\n ToMonth=None,\n Schedule=None,\n **kwargs,\n ):\n \"\"\"\n Args:\n FromDay (int): This numeric field is the starting day for the\n schedule time period.\n FromMonth (int): This numeric field is the starting month for the\n schedule time period.\n ToDay (int): This numeric field is the ending day for the schedule\n time period.\n ToMonth (int): This numeric field is the ending month for the\n schedule time period.\n Schedule (UmiSchedule): The associated UmiSchedule related to this\n object.\n kwargs (dict): Other Keyword arguments.\n \"\"\"\n self.FromDay = FromDay\n self.FromMonth = FromMonth\n self.ToDay = ToDay\n self.ToMonth = ToMonth\n self.Schedule = Schedule\n\n def __eq__(self, other):\n if not isinstance(other, YearSchedulePart):\n return False\n else:\n return all(\n [\n self.FromDay == other.FromDay,\n self.FromMonth == other.FromMonth,\n self.ToDay == other.ToDay,\n self.ToMonth == other.ToMonth,\n self.Schedule == other.Schedule,\n ]\n )\n\n @classmethod\n @deprecated(\n deprecated_in=\"1.3.1\",\n removed_in=\"1.5\",\n current_version=archetypal.__version__,\n details=\"Use from_dict function instead\",\n )\n def from_json(cls, *args, **kwargs):\n\n \"\"\"\n Args:\n all_objects:\n *args:\n **kwargs:\n \"\"\"\n return cls.from_dict(*args, **kwargs)\n\n @classmethod\n def from_dict(cls, Schedule, **kwargs):\n \"\"\"\n Args:\n all_objects:\n *args:\n **kwargs:\n \"\"\"\n ref = UmiBase.get_classref(Schedule)\n ysp = cls(Schedule=ref, **kwargs)\n\n return ysp\n\n def to_dict(self):\n return collections.OrderedDict(\n FromDay=self.FromDay,\n FromMonth=self.FromMonth,\n ToDay=self.ToDay,\n ToMonth=self.ToMonth,\n Schedule={\"$ref\": str(self.Schedule.id)},\n )\n\n def __str__(self):\n return str(self.to_dict())\n\n def mapping(self):\n return dict(\n FromDay=self.FromDay,\n FromMonth=self.FromMonth,\n ToDay=self.ToDay,\n ToMonth=self.ToMonth,\n Schedule=self.Schedule,\n )\n\n def get_unique(self):\n return self\n\n\nclass DaySchedule(UmiSchedule):\n \"\"\"Superclass of UmiSchedule that handles daily schedules.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initialize a DaySchedule object with parameters:\n\n Args:\n **kwargs: Keywords passed to the :class:`UmiSchedule` constructor.\n \"\"\"\n super(DaySchedule, self).__init__(**kwargs)\n\n @classmethod\n def from_epbunch(cls, epbunch, **kwargs):\n \"\"\"Create a DaySchedule from a :class:`~eppy.bunch_subclass.EpBunch`\n object\n\n Args:\n epbunch (EpBunch): The EpBunch object to construct a DaySchedule\n from.\n **kwargs: Keywords passed to the :class:`UmiSchedule` constructor.\n See :class:`UmiSchedule` for more details.\n \"\"\"\n\n sched = cls(\n idf=epbunch.theidf,\n Name=epbunch.Name,\n epbunch=epbunch,\n schType=epbunch.key,\n **kwargs,\n )\n\n return sched\n\n @classmethod\n def from_values(cls, Name, Values, idf, Type=\"Fraction\", **kwargs):\n \"\"\"Create a DaySchedule from an array of size (24,)\n\n Args:\n Name:\n Values (array-like): A list of values of length 24.\n idf (IDF): The idf model.\n Type:\n **kwargs: Keywords passed to the :class:`UmiSchedule` constructor.\n See :class:`UmiSchedule` for more details.\n \"\"\"\n return cls(Name=Name, Values=np.array(Values), Type=Type, idf=idf, **kwargs)\n\n @classmethod\n @deprecated(\n deprecated_in=\"1.3.1\",\n removed_in=\"1.5\",\n current_version=archetypal.__version__,\n details=\"Use from_dict function instead\",\n )\n def from_json(cls, Type, **kwargs):\n\n \"\"\"\n Args:\n Type:\n **kwargs:\n \"\"\"\n return cls.from_dict(Type, **kwargs)\n\n @classmethod\n def from_dict(cls, Name, Values, Type, **kwargs):\n \"\"\"Create a DaySchedule from a Umi Template json file.\n\n Args:\n Type (str): The schedule type limits name.\n **kwargs:\n \"\"\"\n sched = cls.from_values(Name=Name, Values=Values, Type=Type, **kwargs)\n\n return sched\n\n def to_json(self):\n \"\"\"Returns a dict-like representation of the schedule.\n\n Returns:\n dict: The dict-like representation of the schedule\n \"\"\"\n\n data_dict = collections.OrderedDict()\n\n data_dict[\"$id\"] = str(self.id)\n data_dict[\"Category\"] = \"Day\"\n data_dict[\"Type\"] = self.Type\n data_dict[\"Values\"] = self.all_values.round(3).tolist()\n data_dict[\"Comments\"] = self.Comments\n data_dict[\"DataSource\"] = self.DataSource\n data_dict[\"Name\"] = UniqueName(self.Name)\n\n return data_dict\n\n def mapping(self):\n return dict(\n Category=self.schType,\n Type=self.Type,\n Values=self.all_values.round(3).tolist(),\n Comments=self.Comments,\n DataSource=self.DataSource,\n Name=self.Name,\n )\n\n @property\n def all_values(self) -> np.ndarray:\n if self._values is None:\n self._values = self.get_schedule_values(self.epbunch)\n if isinstance(self._values, list):\n self._values = np.array(self._values)\n return self._values\n\n def to_dict(self):\n \"\"\"returns umi template repr\"\"\"\n return {\"$ref\": str(self.id)}\n\n\nclass WeekSchedule(UmiSchedule):\n \"\"\"Superclass of UmiSchedule that handles weekly schedules.\"\"\"\n\n def __init__(self, Days=None, **kwargs):\n \"\"\"Initialize a WeekSchedule object with parameters:\n\n Args:\n Days (list of DaySchedule): list of :class:`DaySchedule`.\n **kwargs:\n \"\"\"\n super(WeekSchedule, self).__init__(**kwargs)\n self.Days = Days\n\n def __eq__(self, other):\n if not isinstance(other, WeekSchedule):\n return False\n else:\n return all(\n [\n # self.Name == other.Name,\n self.Type == other.Type,\n self.Days == other.Days,\n ]\n )\n\n @classmethod\n def from_epbunch(cls, epbunch, **kwargs):\n \"\"\"\n Args:\n epbunch:\n **kwargs:\n \"\"\"\n Days = WeekSchedule.get_days(epbunch)\n sched = cls(\n idf=epbunch.theidf,\n Name=epbunch.Name,\n schType=epbunch.key,\n Days=Days,\n **kwargs,\n )\n\n return sched\n\n @classmethod\n @deprecated(\n deprecated_in=\"1.3.1\",\n removed_in=\"1.5\",\n current_version=archetypal.__version__,\n details=\"Use from_dict function instead\",\n )\n def from_json(cls, **kwargs):\n\n \"\"\"\n Args:\n **kwargs:\n \"\"\"\n return cls.from_dict(**kwargs)\n\n @classmethod\n def from_dict(cls, Type, **kwargs):\n \"\"\"\n Args:\n **kwargs:\n \"\"\"\n refs = kwargs.pop(\"Days\")\n Days = [UmiBase.get_classref(ref) for ref in refs]\n wc = cls(Type=Type, Days=Days, **kwargs)\n return wc\n\n def to_json(self):\n \"\"\"Returns a dict-like representation of the schedule.\n\n Returns:\n dict: The dict-like representation of the schedule\n \"\"\"\n data_dict = collections.OrderedDict()\n\n data_dict[\"$id\"] = str(self.id)\n data_dict[\"Category\"] = \"Week\"\n data_dict[\"Days\"] = [day.to_dict() for day in self.Days]\n data_dict[\"Type\"] = self.Type\n data_dict[\"Comments\"] = self.Comments\n data_dict[\"DataSource\"] = self.DataSource\n data_dict[\"Name\"] = UniqueName(self.Name)\n\n return data_dict\n\n def mapping(self):\n return dict(\n Category=self.schType,\n Days=self.Days,\n Type=self.Type,\n Comments=self.Comments,\n DataSource=self.DataSource,\n Name=self.Name,\n )\n\n @classmethod\n def get_days(cls, epbunch):\n \"\"\"\n Args:\n epbunch (EpBunch):\n \"\"\"\n blocks = []\n dayname = [\n \"Monday\",\n \"Tuesday\",\n \"Wednesday\",\n \"Thursday\",\n \"Friday\",\n \"Saturday\",\n \"Sunday\",\n ]\n for day in dayname:\n week_day_schedule_name = epbunch[\"{}_ScheduleDay_Name\".format(day)]\n blocks.append(\n next(\n (\n x\n for x in UmiBase.CREATED_OBJECTS\n if x.Name == week_day_schedule_name\n and type(x).__name__ == \"DaySchedule\"\n ),\n None,\n )\n )\n\n return blocks\n\n @property\n def all_values(self) -> np.ndarray:\n if self._values is None:\n self._values = np.concatenate([day.all_values for day in self.Days])\n return self._values\n\n def to_dict(self):\n \"\"\"returns umi template repr\"\"\"\n return {\"$ref\": str(self.id)}\n\n\nclass YearSchedule(UmiSchedule):\n \"\"\"Superclass of UmiSchedule that handles yearly schedules.\"\"\"\n\n def __init__(self, Name, Type=\"Fraction\", Parts=None, **kwargs):\n \"\"\"Initialize a YearSchedule object with parameters:\n\n Args:\n Name:\n Type:\n Parts (list of YearSchedulePart): The YearScheduleParts.\n **kwargs:\n \"\"\"\n self.epbunch = kwargs.get(\"epbunch\", None)\n if Parts is None:\n self.Parts = self.get_parts(self.epbunch)\n else:\n self.Parts = Parts\n super(YearSchedule, self).__init__(\n Name=Name, Type=Type, schType=\"Schedule:Year\", **kwargs\n )\n\n def __eq__(self, other):\n if not isinstance(other, YearSchedule):\n return False\n else:\n return all([self.Type == other.Type, self.Parts == other.Parts])\n\n def __hash__(self):\n return super(YearSchedule, self).__hash__()\n\n @classmethod\n def from_parts(cls, *args, Parts, **kwargs):\n \"\"\"\n Args:\n *args:\n Parts (list of YearSchedulePart):\n **kwargs:\n \"\"\"\n ysp = cls(*args, Parts=Parts, **kwargs)\n ysp._values = ysp.all_values\n\n return ysp\n\n @property\n def all_values(self) -> np.ndarray:\n if self._values is None:\n index = pd.date_range(start=self.startDate, freq=\"1H\", periods=8760)\n series = pd.Series(index=index)\n for part in self.Parts:\n start = \"{}-{}-{}\".format(self.year, part.FromMonth, part.FromDay)\n end = \"{}-{}-{}\".format(self.year, part.ToMonth, part.ToDay)\n try: # Get week values from all_values of Days that are DaySchedule object\n one_week = np.array(\n [\n item\n for sublist in part.Schedule.Days\n for item in sublist.all_values\n ]\n )\n except: # Days are not DaySchedule object\n try: # Days is a list of 7 dicts (7 days in a week)\n # Dicts are the id of Days ({\"$ref\": id})\n day_values = [self.get_ref(day) for day in part.Schedule.Days]\n values = []\n for i in range(0, 7): # There is 7 days a week\n values = values + day_values[i].all_values.tolist()\n one_week = np.array(values)\n except:\n msg = (\n 'Days are not a DaySchedule or dictionaries in the form \"{'\n '$ref: id}\" '\n )\n raise NotImplementedError(msg)\n\n all_weeks = np.resize(one_week, len(series.loc[start:end]))\n series.loc[start:end] = all_weeks\n self._values = series.values\n return self._values\n\n @classmethod\n @deprecated(\n deprecated_in=\"1.3.1\",\n removed_in=\"1.5\",\n current_version=archetypal.__version__,\n details=\"Use from_dict function instead\",\n )\n def from_json(cls, **kwargs):\n\n \"\"\"\n Args:\n **kwargs:\n \"\"\"\n return cls.from_dict(**kwargs)\n\n @classmethod\n def from_dict(cls, Type, **kwargs):\n \"\"\"\n Args:\n **kwargs:\n \"\"\"\n Parts = [\n YearSchedulePart.from_dict(**part) for part in kwargs.pop(\"Parts\", None)\n ]\n ys = cls(Type=Type, Parts=Parts, **kwargs)\n ys.schType = \"Schedule:Year\"\n return ys\n\n def to_json(self):\n \"\"\"Returns a dict-like representation of the schedule.\n\n Returns:\n dict: The dict-like representation of the schedule\n \"\"\"\n data_dict = collections.OrderedDict()\n\n data_dict[\"$id\"] = str(self.id)\n data_dict[\"Category\"] = \"Year\"\n data_dict[\"Parts\"] = [part.to_dict() for part in self.Parts]\n data_dict[\"Type\"] = self.Type\n data_dict[\"Comments\"] = self.Comments\n data_dict[\"DataSource\"] = self.DataSource\n data_dict[\"Name\"] = UniqueName(self.Name)\n\n return data_dict\n\n def mapping(self):\n self.validate()\n\n return dict(\n Category=self.schType,\n Parts=self.Parts,\n Type=self.Type,\n Comments=self.Comments,\n DataSource=self.DataSource,\n Name=self.Name,\n )\n\n def get_parts(self, epbunch):\n \"\"\"\n Args:\n epbunch (EpBunch):\n \"\"\"\n parts = []\n for i in range(int(len(epbunch.fieldvalues[3:]) / 5)):\n week_day_schedule_name = epbunch[\"ScheduleWeek_Name_{}\".format(i + 1)]\n\n FromMonth = epbunch[\"Start_Month_{}\".format(i + 1)]\n ToMonth = epbunch[\"End_Month_{}\".format(i + 1)]\n FromDay = epbunch[\"Start_Day_{}\".format(i + 1)]\n ToDay = epbunch[\"End_Day_{}\".format(i + 1)]\n parts.append(\n YearSchedulePart(\n FromDay,\n FromMonth,\n ToDay,\n ToMonth,\n next(\n (\n x\n for x in self.CREATED_OBJECTS\n if x.Name == week_day_schedule_name\n and type(x).__name__ == \"WeekSchedule\"\n )\n ),\n )\n )\n return parts\n\n def to_dict(self):\n \"\"\"returns umi template repr\"\"\"\n return {\"$ref\": str(self.id)}\n","sub_path":"archetypal/template/schedule.py","file_name":"schedule.py","file_ext":"py","file_size_in_byte":26595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"286697171","text":"import os\nimport hashlib\n\n\ndef printer(duplicates):\n \"\"\"\n Function to print duplicates\n\n :param duplicates: duplicates from function check_for_duplicates\n :return: None\n \"\"\"\n\n if duplicates:\n print('There is duplicate:')\n for key, values in duplicates.items():\n print('-------------------\\n')\n print('This files duplicate each other')\n print('------------------\\n')\n print('\\nand\\n'.join(values))\n else:\n print('There is not duplicates here')\n\n\ndef chunk_reader(f_obj, chunk_size: int = 1024):\n \"\"\"\n Generator which read a file in bytes\n :param f_obj: file which will be readed in chunk of bytes\n :param chunk_size: atomic size\n :return: chunk of file\n \"\"\"\n while True:\n chunk = f_obj.read(chunk_size)\n if not chunk:\n return\n yield chunk\n\n\ndef get_hash(filename, hash_func=hashlib.md5):\n \"\"\"\n This function get hash\n\n :param filename: name of file\n :param hash_func: hash function\n :return: hashed file\n \"\"\"\n\n hashobj = hash_func()\n file_object = open(filename, 'rb')\n for chunk in chunk_reader(file_object):\n hashobj.update(chunk)\n hashed = hashobj.hexdigest()\n file_object.close()\n return hashed\n\n\ndef check_for_duplicates(path):\n \"\"\"\n creates the dict of duplicates files\n\n :param path: path to the file\n :return: dictionary wich contains all duplicated files\n \"\"\"\n\n hashes_size = {}\n for dir_path, dir_names, file_names in os.walk(path):\n for filename in file_names:\n full_path = os.path.join(dir_path, filename)\n if hashes_size.get(os.path.getsize(full_path)):\n hashes_size[os.path.getsize(full_path)].append(full_path)\n else:\n hashes_size[os.path.getsize(full_path)] = []\n hashes_size[os.path.getsize(full_path)].append(full_path)\n\n if not os.path.exists(path):\n raise ValueError(\"Directory do not exist\")\n\n result_size = list(filter(lambda entry: len(entry) > 1, hashes_size.values()))\n\n hashes = {}\n for files in result_size:\n for filename in files:\n if hashes.get(get_hash(filename)):\n hashes[get_hash(filename)].append(filename)\n else:\n hashes[get_hash(filename)] = []\n hashes[get_hash(filename)].append(filename)\n return dict(filter(lambda entry: len(entry[1]) > 1,\n hashes.items()))\n\nif __name__ == '__main__': # pragma: no cover\n pass\n","sub_path":"supertool_with_gui/src/supertool/simfiles_finder.py","file_name":"simfiles_finder.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"628566470","text":"# -*- coding:utf-8 -*-\nfrom unittest import TestSuite,makeSuite\nimport HTMLTestRunner,time\nfrom testCase import getDetailByIdTest,createDevTest\n\na=TestSuite()\na.addTest(makeSuite(getDetailByIdTest))\na.addTest(makeSuite(createDevTest))\ntime=time.strftime(\"%y%m%d%H%M%S\")\nb=file(\"./result%s\"%time+\".html\",\"wb\")\nc=HTMLTestRunner.HTMLTestRunner(stream=b,title=u\"接口测试报告\")\nc.run(a)\nb.close()\n","sub_path":"testAPI/untitled9/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"597882505","text":"\r\n\r\nfrom nltk.tokenize import sent_tokenize\r\nfrom nltk.tokenize import word_tokenize\r\nimport re\r\nimport regex\r\nfrom collections import namedtuple\r\n\r\n#from utils.mimic import DEIDENT_PATTERN_NEW\r\n\r\n#DEIDENT_PATTERN_NEW = r'_\\*\\*.+?\\*\\*_'\r\nDEIDENT_PATTERN_NEW = r'_\\*\\*.*?\\*\\*_'\r\n\r\n\r\nTok = namedtuple('Tok', 'token start end')\r\n\r\nNUMBER_PATTERNS = '^((\\d+)|(\\d+[\\.\\,]\\d+)|(one)|(two)|(three)|(four)|(five)|(six)|(seven)|(eight)|(nine)|(ten))$'\r\nNUMBER_TOKEN =''\r\nNLTK_OPENING_QUOTE = r'``'\r\nNLTK_CLOSING_QUOTE = \"''\"\r\nDOUBLE_QUOTE = '\"'\r\n\r\nABBREV_PAT = '((?<={0}[a-zA-Z0-9]{{1,3}}){0})|({0}(?=[a-zA-Z0-9]{{1,3}}{0}))' \r\n\r\nNUM_LIST = '(?<=^[ \\t]*[\\da-zA-Z]{{1,2}}){0}'\r\n#NUM_LIST = '(?<=[\\d]{{1,2}}){0}(?=[ \\t])'\r\n\r\n\r\n\r\ndef pre_norm(doc, norm_pat, norm_tok, norm_tok_pad=True):\r\n '''\r\n Preprocess text to keep pattern matched string together\r\n '''\r\n\r\n # Make sure keep token not in text\r\n assert not bool(re.search(norm_tok, doc)),\"Found keep token token\"\r\n\r\n # Find strings, including white space, that should be kept together\r\n norm_matches = list(re.finditer(norm_pat, doc))\r\n\r\n # Replace keep matches with placeholder \r\n if norm_tok_pad: \r\n norm_tok = ''.join([' ', norm_tok, ' '])\r\n doc, norm_count = re.subn(norm_pat, norm_tok, doc)\r\n\r\n # Check replacement count\r\n assert norm_count == len(norm_matches), \"Keep token sub error\"\r\n \r\n return (doc, norm_matches)\r\n\r\ndef post_norm(doc, norm_matches, norm_tok):\r\n '''\r\n Postprocess text to replace keep together placeholder with\r\n original text\r\n '''\r\n \r\n # Get first match\r\n if len(norm_matches) > 0:\r\n m = norm_matches.pop(0)\r\n else:\r\n m = None\r\n\r\n # Iterate over sentences\r\n for i, sent in enumerate(doc):\r\n \r\n # Iterate over tokens and sentence\r\n for j, tok in enumerate(sent):\r\n \r\n # Current token contains keep token\r\n if norm_tok in tok:\r\n \r\n # Replace keep token\r\n doc[i][j] = tok.replace(norm_tok, m.group(0))\r\n\r\n # Get next match\r\n if len(norm_matches) > 0:\r\n m = norm_matches.pop(0)\r\n else:\r\n m = None\r\n \r\n return doc\r\n\r\n\r\ndef nltk_quote_correction(doc, tokenized):\r\n\r\n '''\r\n Correct NLTK substitutions for double quotes\r\n NLTK substitutes '``' for opening double quote\r\n NLTK substitutes \"''\" for closing double quote\r\n \r\n (r'``', '\"'), # NLTK quotes\r\n (\"''\", '\"'), # NLTK quotes\r\n '''\r\n \r\n # Document as text without white space\r\n doc_wo_ws = \"\".join(doc.split())\r\n \r\n # Iterate over sentences\r\n for i, sent in enumerate(tokenized):\r\n \r\n # Iterate over tokens and sentence\r\n for j, tok in enumerate(sent):\r\n\r\n # Character count\r\n char_count = len(tok)\r\n \r\n # Token characters do not match next characters in string\r\n if not doc_wo_ws[0:char_count] == tok:\r\n \r\n # Next character in string is a double quote\r\n next_is_quote = doc_wo_ws[0] == DOUBLE_QUOTE\r\n \r\n # Is token NLTK opening quote OR closing quote\r\n if (tok in [NLTK_OPENING_QUOTE, NLTK_CLOSING_QUOTE]) \\\r\n and next_is_quote:\r\n \r\n # Correct nltk substitution\r\n tokenized[i][j] = DOUBLE_QUOTE\r\n \r\n # Adjust character count\r\n char_count = len(DOUBLE_QUOTE)\r\n \r\n # Must be an error \r\n else:\r\n msg = '''Token = {}\r\n \\nText = {}\\n\r\n \\nFull text =\\n{}'''.format(tok, doc_wo_ws, doc)\r\n raise ValueError(msg)\r\n \r\n # Delete found characters from text\r\n doc_wo_ws = doc_wo_ws[char_count:]\r\n\r\n return tokenized\r\n\r\ndef tokenize_doc(doc, \\\r\n abbrev = ABBREV_PAT,\r\n dummy = '__DUMMY__',\r\n num_list = NUM_LIST,\r\n parse_sentences = True,\r\n remove_blank = False,\r\n norm_pat = None,\r\n norm_tok = '__KEEP__',\r\n norm_tok_pad = True):\r\n '''\r\n Parse document into sentences and tokenize, using NLTK\r\n NOTE: preprocessing and postprocessing used to avoid splitting\r\n leading token in lists and abbreviations with periods\r\n \r\n Args:\r\n doc = document as string\r\n return_ind: \r\n if False, returns doc as list of \r\n sentences where sentences are lists of tokens\r\n if True, returns doc as list of list of\r\n namedtuple that includes the token and start \r\n and stop indices\r\n '''\r\n\r\n # Get document without white space for check at end\r\n doc_wo_ws = \"\".join(doc.split())\r\n\r\n # Replace keep together text with placeholder \r\n if norm_pat is not None:\r\n doc, norm_matches = \\\r\n pre_norm(doc, norm_pat, norm_tok, norm_tok_pad) \r\n\r\n # Make sure dummy not in text\r\n assert not bool(re.search(dummy, doc)), \"Found dummy token\" \r\n \r\n # Preprocessing for sentence boundary detection and tokenization\r\n sent_preprocess = [\r\n (num_list.format('\\.'), dummy), # Numbered lists\r\n (abbrev.format('\\.'), dummy), # Abbreviations\r\n ('\\/', ' / '), # Slashes\r\n ('\\-', ' - '), # Hyphens\r\n ]\r\n\r\n # Post processing for sentence boundary detection and tokenization\r\n token_postprocess = [\r\n (dummy, '.'), # Any dummy substitution, \r\n # including numbered lists and abbreviations\r\n ]\r\n\r\n # Split on lines\r\n tokenized = doc.split('\\n')\r\n\r\n # Preprocess line before sentence boundary detection\r\n for pattern, repl in sent_preprocess:\r\n tokenized = [regex.sub(pattern, repl, line) for line in tokenized]\r\n\r\n # Parse into sentences\r\n if parse_sentences:\r\n tokenized = [sent for line in tokenized \\\r\n for sent in sent_tokenize(line)]\r\n # Tokenize sentences\r\n tokenized = [word_tokenize(sent) for sent in tokenized]\r\n\r\n # Remove blank sentences\r\n if remove_blank:\r\n tokenized = [sent for sent in tokenized if len(sent) > 0]\r\n\r\n # Postprocess tokens\r\n for pattern, repl in token_postprocess:\r\n tokenized = [\" \".join(sent) for sent in tokenized]\r\n tokenized = [regex.sub(pattern, repl, sent) for sent in tokenized]\r\n tokenized = [sent.split() for sent in tokenized]\r\n\r\n # Correct NLTK quotes change\r\n tokenized = nltk_quote_correction(doc, tokenized)\r\n\r\n # Replace keep together placeholder with original text\r\n if norm_pat is not None:\r\n tokenized = post_norm(tokenized, norm_matches, norm_tok) \r\n\r\n # Check characters before and after tokenization\r\n tok_wo_ws = \"\".join([tok for sent in tokenized for tok in sent])\r\n tok_wo_ws = \"\".join(tok_wo_ws.split())\r\n assert doc_wo_ws == tok_wo_ws, \"tokenization error\"\r\n\r\n return tokenized\r\n\r\n\r\n\r\n\r\ndef find_text_subset(full, partial):\r\n \r\n \r\n \r\n # Get text without white space\r\n full_no_ws = \"\".join(full.split())\r\n partial_no_ws = \"\".join(partial.split())\r\n \r\n # Find partial in full\r\n start_no_ws = full_no_ws.find(partial_no_ws)\r\n end_no_ws = start_no_ws + len(partial_no_ws)\r\n assert start_no_ws >= 0, \"Could not find match\" \r\n\r\n i_no_ws = 0\r\n start_full = -1\r\n end_full = -1\r\n for i_full, char in enumerate(full):\r\n\r\n # Character is not whitespace\r\n if not char.isspace():\r\n \r\n assert full_no_ws[i_no_ws] == full[i_full], \\\r\n '''character mismatch {} {}'''.format( \\\r\n full_no_ws[i_no_ws], full[i_full])\r\n \r\n # Found index of beginning of overlap\r\n if i_no_ws == start_no_ws:\r\n start_full = i_full\r\n\r\n # Found index of end of overlap\r\n if i_no_ws == end_no_ws:\r\n end_full = i_full\r\n \r\n # Increment non-white space count \r\n i_no_ws += 1\r\n\r\n assert start_full >= 0, \"could not find start\" \r\n assert end_full >= 0, \"could not find end\"\r\n\r\n return (start_full, end_full)\r\n\r\n\r\ndef get_char_indices(text, tokenized, subs=None):\r\n \r\n '''\r\n Get token character indices\r\n '''\r\n \r\n if subs is not None:\r\n subs = subs[:]\r\n \r\n # Initialize stop index (index of last character found)\r\n idx_end = 0\r\n \r\n # Loop on tokenized document\r\n doc_indices = []\r\n \r\n # Loop on sentences in document\r\n for sent in tokenized:\r\n \r\n # Initialize new sentence\r\n sent_indices = []\r\n \r\n # Loop on tokens and sentence\r\n for tok in sent:\r\n\r\n tok_tmp = tok\r\n\r\n if (subs is not None) and \\\r\n re.search(DEIDENT_PATTERN_NEW, tok):\r\n\r\n orig, new_ = subs.pop(0)\r\n\r\n # Make sure substituted value matches current token\r\n assert new_ in tok, '''string mismatch:\r\n tok = {}\r\n new_ = {}'''.format(tok, new_)\r\n \r\n tok_tmp = tok_tmp.replace(new_, orig, 1)\r\n\r\n \r\n idx_start = text.index(tok_tmp, idx_end)\r\n skipped = text[idx_end:idx_start]\r\n assert (len(skipped) == 0) or (skipped.isspace()), '''\r\n Tried to skip: \"{}\"'''.format(skipped)\r\n idx_end = idx_start + len(tok_tmp)\r\n \r\n \r\n \r\n # Package as named tuple and depend\r\n sent_indices.append((idx_start, idx_end))\r\n \r\n doc_indices.append(sent_indices) \r\n\r\n if subs is not None:\r\n assert(len(subs)) == 0, \"substitutions remaining\" \r\n \r\n return doc_indices\r\n\r\ndef map_num_token(tok, num_token=NUMBER_TOKEN):\r\n '''\r\n Map numbers to special number token\r\n '''\r\n return re.sub(NUMBER_PATTERNS, num_token, tok)\r\n \r\n\r\ndef map_num_line(line, num_token=NUMBER_TOKEN):\r\n '''\r\n Map sequence of tokens to number token\r\n '''\r\n return [map_num_token(tok, num_token) for tok in line]\r\n \r\n\r\ndef map_num_doc(doc, num_token=NUMBER_TOKEN):\r\n '''\r\n Map sequence of sequence (document) of tokens to number token\r\n '''\r\n return [map_num_line(line, num_token) for line in doc]\r\n\r\n\r\n\r\n","sub_path":"code/utils/tokenization_nltk.py","file_name":"tokenization_nltk.py","file_ext":"py","file_size_in_byte":10614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"53022261","text":"import os, re, shutil\n\ndef download(filePath, outPut):\n listOfShows = []\n for root, dirs, files in os.walk(filePath):\n for file in files:\n if file.endswith('.avi') or file.endswith('.mp4')or file.endswith('.mkv'):\n if tvShow(file) and not 'SAMPLE' in file.upper():\n #file = file.replace(' - ', '.').replace('-','.').replace(' ', '.').replace('_','.').replace('[', '.')\n temp = re.sub('[^A-Za-z0-9\\']+', '.', file)\n seasonNumb = tvShow(temp)\n #print(file)\n \n #print(seasonNumb)\n for i in seasonNumb:\n if i.startswith('S'):\n x = i[1:].split('E')\n x = x[0]\n elif 'X' in i:\n x = i.split('X')\n x = x[0]\n elif '.' in i:\n x = i.split('.')\n x = x[0]\n else:\n x = i[:-2]\n name = temp.upper().split(i)[0]\n name = name.replace('.', ' ')\n listOfShows.append(name.title())\n newpath = outPut+'\\\\'+name.title()\n seaspath = newpath[:-1]+'\\\\Season '+x\n if not os.path.exists(seaspath):\n os.makedirs(seaspath)\n shutil.copy(os.path.abspath(os.path.join(root, file)), os.path.join(seaspath, file))\n \n \n #print(name.title())5\n \n \n\n \ndef tvShow(file):\n return re.findall(r'EPISODE [0-9]+|S[0-9]+E[0-9]+|[0-9]+X[0-9]+|[0-9]{1,2,3}.[0-9]{1,2,3}|\\b[0-9]{3}\\b', file.upper())\n\ndownload(r'C:\\Users\\PC\\Documents\\GitHub\\Skoli\\önn3\\Python\\downloads', r'C:\\Users\\PC\\Desktop\\TV Shows')\n","sub_path":"önn3/Python/Download.py","file_name":"Download.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"268674869","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2021 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom .adaptor import FRAMEWORKS\nfrom .objective import OBJECTIVES\nfrom .conf.config import Conf\nfrom .utils import logger\nfrom .utils.create_obj_from_config import create_eval_func, create_dataloader\nfrom .conf.dotdict import deep_get, deep_set\nfrom .model import BaseModel as LpotModel\n\nclass Benchmark(object):\n \"\"\"Benchmark class can be used to evaluate the model performance, with the objective\n setting, user can get the data of what they configured in yaml\n\n Args:\n conf_fname (string): The path to the YAML configuration file containing accuracy goal,\n tuning objective and preferred calibration & quantization tuning space etc.\n\n \"\"\"\n\n def __init__(self, conf_fname):\n self.conf = Conf(conf_fname)\n self.framework = self.conf.usr_cfg.model.framework.lower()\n self._model = None\n self._b_dataloader = None\n\n def __call__(self):\n cfg = self.conf.usr_cfg\n framework_specific_info = {'device': cfg.device, \\\n 'approach': cfg.quantization.approach, \\\n 'random_seed': cfg.tuning.random_seed}\n framework = cfg.model.framework.lower()\n if framework == 'tensorflow':\n framework_specific_info.update({\"inputs\": cfg.model.inputs, \\\n \"outputs\": cfg.model.outputs, \\\n \"recipes\": cfg.model.recipes, \\\n 'workspace_path': cfg.tuning.workspace.path})\n if framework == 'mxnet':\n framework_specific_info.update({\"b_dataloader\": self._b_dataloader})\n if 'onnxrt' in framework.lower():\n framework_specific_info.update({\"backend\": framework.lower().split('_')[-1], \\\n 'workspace_path': cfg.tuning.workspace.path})\n if framework == 'pytorch':\n framework_specific_info.update({\"q_dataloader\": None,\n \"benchmark\": True})\n if framework == 'pytorch_ipex':\n framework_specific_info.update({\"workspace_path\": cfg.tuning.workspace.path,\n \"q_dataloader\": None,\n \"benchmark\": True})\n\n assert isinstance(self._model, LpotModel), 'need set lpot Model for quantization....'\n\n adaptor = FRAMEWORKS[framework](framework_specific_info)\n\n assert cfg.evaluation is not None, 'benchmark need evaluation filed not be None'\n results = {}\n for mode in cfg.evaluation.keys():\n iteration = -1 if deep_get(cfg, 'evaluation.{}.iteration'.format(mode)) is None \\\n else deep_get(cfg, 'evaluation.{}.iteration'.format(mode))\n metric = deep_get(cfg, 'evaluation.{}.metric'.format(mode))\n b_postprocess_cfg = deep_get(cfg, 'evaluation.{}.postprocess'.format(mode))\n\n if self._b_dataloader is None:\n assert deep_get(cfg, 'evaluation.{}.dataloader'.format(mode)) is not None, \\\n 'dataloader field of yaml file is missing'\n\n b_dataloader_cfg = deep_get(cfg, 'evaluation.{}.dataloader'.format(mode))\n self._b_dataloader = create_dataloader(self.framework, b_dataloader_cfg)\n b_func = create_eval_func(self.framework, \\\n self._b_dataloader, \\\n adaptor, \\\n metric, \\\n b_postprocess_cfg,\n iteration=iteration)\n else:\n b_func = create_eval_func(self.framework, \\\n self._b_dataloader, \\\n adaptor, \\\n metric, \\\n b_postprocess_cfg,\n iteration=iteration)\n\n objective = cfg.tuning.objective.lower()\n self.objective = OBJECTIVES[objective](cfg.tuning.accuracy_criterion, \\\n is_measure=True)\n\n val = self.objective.evaluate(b_func, self._model)\n logger.info('{} mode benchmark done!'.format(mode))\n # measurer contain info not only performance(eg, memory, model_size)\n # also measurer have result list among steps\n acc, _ = val\n batch_size = self._b_dataloader.batch_size\n warmup = 0 if deep_get(cfg, 'evaluation.{}.warmup'.format(mode)) is None \\\n else deep_get(cfg, 'evaluation.{}.warmup'.format(mode))\n\n assert len(self.objective.measurer.result_list()) > warmup, \\\n 'itreation should larger than warmup'\n\n results[mode] = acc, batch_size, \\\n self.objective.measurer.result_list()[warmup:]\n\n return results\n\n @property\n def b_dataloader(self):\n return self._b_dataloader\n\n @b_dataloader.setter\n def b_dataloader(self, dataloader):\n \"\"\"Set Data loader for benchmark, It is iterable and the batched data \n should consists of a tuple like (input, label) or yield (input, _), \n when b_dataloader is set, user can configure postprocess(optional) and metric \n in yaml file or set postprocess and metric cls for evaluation.\n Or just get performance without label in dataloader and configure postprocess/metric.\n\n Args:\n dataloader(generator): user are supported to set a user defined dataloader\n which meet the requirements that can yield tuple of\n (input, label)/(input, _) batched data.\n Another good practice is to use lpot.common.DataLoader\n to initialize a lpot dataloader object.\n Notice lpot.common.DataLoader is just a wrapper of the\n information needed to build a dataloader, it can't yield\n batched data and only in this setter method \n a 'real' eval_dataloader will be created, \n the reason is we have to know the framework info\n and only after the Quantization object created then\n framework infomation can be known. Future we will support\n creating iterable dataloader from lpot.common.DataLoader\n\n \"\"\"\n from .common import _generate_common_dataloader\n self._b_dataloader = _generate_common_dataloader(dataloader, self.framework)\n\n @property\n def model(self):\n return self._model\n\n @model.setter\n def model(self, user_model):\n \"\"\"Set the user model and dispatch to framework specific internal model object\n\n Args:\n user_model: user are supported to set model from original framework model format\n (eg, tensorflow frozen_pb or path to a saved model), but not recommended.\n Best practice is to set from a initialized lpot.common.Model.\n If tensorflow model is used, model's inputs/outputs will be auto inferenced,\n but sometimes auto inferenced inputs/outputs will not meet your requests,\n set them manually in config yaml file. Another corner case is slim model \n of tensorflow, be careful of the name of model configured in yaml file,\n make sure the name is in supported slim model list.\n \n \"\"\"\n from .common import Model as LpotModel\n from .model import MODELS\n if not isinstance(user_model, LpotModel):\n logger.warning('force convert user raw model to lpot model, \\\n better initialize lpot.common.Model and set....')\n user_model = LpotModel(user_model)\n\n framework_model_info = {}\n cfg = self.conf.usr_cfg\n if self.framework == 'tensorflow':\n framework_model_info.update(\n {'name': cfg.model.name,\n 'input_tensor_names': cfg.model.inputs,\n 'output_tensor_names': cfg.model.outputs,\n 'workspace_path': cfg.tuning.workspace.path})\n\n self._model = MODELS[self.framework](\\\n user_model.root, framework_model_info, **user_model.kwargs)\n\n @property\n def metric(self):\n logger.warning('metric not support getter....')\n return None\n\n @metric.setter\n def metric(self, user_metric):\n \"\"\"Set metric class and lpot will initialize this class when evaluation\n lpot have many built-in metrics, but user can set specific metric through\n this api. The metric class should take the outputs of the model or \n postprocess(if have) as inputs, lpot built-in metric always take \n (predictions, labels) as inputs for update,\n and user_metric.metric_cls should be sub_class of lpot.metric.BaseMetric.\n\n Args:\n user_metric(lpot.common.Metric): user_metric should be object initialized from\n lpot.common.Metric, in this method the \n user_metric.metric_cls will be registered to\n specific frameworks and initialized.\n \n \"\"\"\n from .common import Metric as LpotMetric\n assert isinstance(user_metric, LpotMetric), \\\n 'please initialize a lpot.common.Metric and set....'\n\n metric_cfg = {user_metric.name : {**user_metric.kwargs}}\n if deep_get(self.conf.usr_cfg, \"evaluation.accuracy.metric\"):\n logger.warning('already set metric in yaml file, will override it...')\n deep_set(self.conf.usr_cfg, \"evaluation.accuracy.metric\", metric_cfg)\n from .conf.dotdict import DotDict\n self.conf.usr_cfg = DotDict(self.conf.usr_cfg)\n from .metric import METRICS\n metrics = METRICS(self.framework)\n metrics.register(user_metric.name, user_metric.metric_cls)\n\n @property\n def postprocess(self, user_postprocess):\n logger.warning('postprocess not support getter....')\n return None\n\n @postprocess.setter\n def postprocess(self, user_postprocess):\n \"\"\"Set postprocess class and lpot will initialize this class when evaluation. \n The postprocess class should take the outputs of the model as inputs, and\n output (predictions, labels) as inputs for metric update.\n user_postprocess.postprocess_cls should be sub_class of lpot.data.BaseTransform.\n\n Args:\n user_postprocess(lpot.common.Postprocess): \n user_postprocess should be object initialized from lpot.common.Postprocess,\n in this method the user_postprocess.postprocess_cls will be \n registered to specific frameworks and initialized.\n\n \"\"\"\n from .common import Postprocess as LpotPostprocess\n assert isinstance(user_postprocess, LpotPostprocess), \\\n 'please initialize a lpot.common.Postprocess and set....'\n postprocess_cfg = {user_postprocess.name : {**user_postprocess.kwargs}}\n if deep_get(self.conf.usr_cfg, \"evaluation.accuracy.postprocess\"):\n logger.warning('already set postprocess in yaml file, will override it...')\n deep_set(self.conf.usr_cfg, \"evaluation.accuracy.postprocess.transform\", postprocess_cfg)\n from .data import TRANSFORMS\n postprocesses = TRANSFORMS(self.framework, 'postprocess')\n postprocesses.register(user_postprocess.name, user_postprocess.postprocess_cls)\n logger.info(\"{} registered to postprocess\".format(user_postprocess.name))\n\n","sub_path":"lpot/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":12770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"352340128","text":"# Author : Trikarsa Tirtadwipa Manunggal\n\nimport json as JSON\nimport sqlite3\nimport time\nfrom pathlib import Path\n\nfrom sanic import Sanic\nfrom sanic.response import json, text\n\n\ndef json_factory(cursor, row):\n '''\n Return sql rows in dict (json) format\n '''\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n\n\ndef create_table(sqlite_filename=\"user_review.sqlite\", database_filename=\"database.db\"):\n '''\n Create table if not exists\n '''\n USER_TABLE = Path(sqlite_filename).read_text()\n\n db = sqlite3.connect(database_filename)\n db.row_factory = json_factory\n cursor = db.cursor()\n\n cursor.execute(USER_TABLE)\n db.commit()\n\n return db\n\n\ndef param2query(param):\n '''\n Turn html query string to sql query\n '''\n\n equal_field = [\"id\", \"order_id\", \"product_id\", \"user_id\", \"rating\", \"created_at\", \"updated_at\"]\n logic_field = [\"id\", \"rating\", \"created_at\", \"updated_at\"]\n count = 0\n query_condition = \"\"\n query_value = ()\n\n if len(param) != 0:\n for key, value in param.items():\n if query_condition != \"\":\n query_condition += \" AND \"\n\n if key in equal_field:\n query_condition += key + \" = \" + \"?\"\n query_value += (str(value),)\n count += 1\n elif key.endswith(\"_lt\") and key[:-3] in logic_field:\n query_condition += key[:-3] + \" < \" + \"?\"\n query_value += (str(value),)\n count += 1\n elif key.endswith(\"_gt\") and key[:-3] in logic_field:\n query_condition += key[:-3] + \" > \" + \"?\"\n query_value += (str(value),)\n count += 1\n\n return query_condition, query_value\n\n\ndef get_handler(request, db):\n '''\n Handle get request\n '''\n try:\n cursor = db.cursor()\n\n param = request.raw_args\n query = \"SELECT * FROM user_review \"\n\n query_condition, query_value = param2query(param)\n if query_condition != \"\":\n query += \" WHERE \" + query_condition + \";\"\n cursor.execute(query, query_value)\n else:\n cursor.execute(query)\n\n data = []\n for row in cursor.fetchall():\n data.append(row)\n\n retval = {\n 'status': 200,\n 'data': data,\n 'size': len(data),\n 'message': 'GET request is success'\n }\n\n\n except:\n retval = {\n 'status': 400,\n 'message': 'GET request is failed'\n }\n\n return retval\n\n\ndef post_handler(request, db):\n '''\n Handle post request\n '''\n try:\n cursor = db.cursor()\n\n body = request.body.decode('utf-8')\n data = JSON.loads(body)\n\n field_name = \"\"\n field_value = ()\n\n for key, value in data.items():\n field_name += key + ','\n field_value += (value,)\n\n current_timestamp = int(round(time.time() * 1000))\n field_name += \"created_at\" + ','\n field_value += (current_timestamp,)\n field_name += \"updated_at\" + ','\n field_value += (current_timestamp,)\n\n query = \"INSERT INTO user_review(%s) VALUES(%s);\" % (field_name[:-1], ('?,' * len(field_value))[:-1])\n\n cursor.execute(query, field_value)\n db.commit()\n\n retval = {\n 'status': 200,\n 'data': [data],\n 'size': len([data]),\n 'message': 'POST request is success'\n }\n\n except:\n retval = {\n 'status': 400,\n 'message': 'POST request is failed'\n }\n\n return retval\n\n\ndef put_handler(request, db):\n '''\n Handle put request\n '''\n try:\n cursor = db.cursor()\n\n body = request.body.decode('utf-8')\n data = JSON.loads(body)\n\n param = request.raw_args\n query_condition, query_value = param2query(param)\n\n field = \"\"\n\n for key, value in data.items():\n field += key + \"='\" + str(value) + \"',\"\n\n current_timestamp = int(round(time.time() * 1000))\n field += \"updated_at\" + \"='\" + str(current_timestamp) + \"',\"\n\n query = \"UPDATE user_review SET %s WHERE %s;\" % (field[:-1], query_condition)\n\n print(query)\n\n if query_condition != \"\":\n cursor.execute(query, query_value)\n db.commit()\n else :\n raise ValueError(\"No constraint found\")\n\n retval = {\n 'status': 200,\n 'data': data,\n 'size': len(data),\n 'message': 'PUT request is success'\n }\n\n except:\n retval = {\n 'status': 400,\n 'message': 'PUT request is failed'\n }\n\n return retval\n\n\ndef delete_handler(request, db):\n '''\n Handle delete request\n '''\n try:\n cursor = db.cursor()\n\n param = request.raw_args\n query_condition, query_value = param2query(param)\n\n query = \"SELECT * FROM user_review WHERE \" + query_condition + \";\"\n cursor.execute(query, query_value)\n\n counter = 0\n data = []\n for row in cursor.fetchall():\n counter += 1\n data.append(row)\n\n query = \"DELETE FROM user_review WHERE %s;\" % (query_condition)\n\n if query_condition != \"\":\n cursor.execute(query, query_value)\n db.commit()\n else :\n raise ValueError(\"No constraint found\")\n\n retval = {\n 'status': 200,\n 'data': data,\n 'size': len(data),\n 'message': 'DELETE request is success'\n }\n\n except:\n retval = {\n 'status': 400,\n 'message': 'DELETE request is failed'\n }\n\n return retval\n\n\ndef crud_handler(request, db):\n if request.method == 'GET':\n return get_handler(request, db)\n elif request.method == 'POST':\n return post_handler(request, db)\n elif request.method == 'PUT':\n return put_handler(request, db)\n elif request.method == 'DELETE':\n return delete_handler(request, db)\n\n\n#################################################################\n\napp = Sanic()\ndb = create_table()\n\n@app.route('/')\nasync def test(request):\n response = crud_handler(request, db)\n return text(\"Welcome to simple REST.\\nPlease head to /user_review.\\nAuthor : Tirtadwipa Manunggal\")\n\n@app.route('/user_review', methods=['GET', 'POST', 'PUT', 'DELETE'])\nasync def test(request):\n response = crud_handler(request, db)\n return json(response, status=response['status'])\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8000)\n","sub_path":"basic_coding/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":6636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"257077229","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Philipp Temminghoff\n\"\"\"\n\nfrom prettyqt import gui\n\n\nclass NotZeroValidator(gui.Validator):\n\n def __getstate__(self):\n return dict()\n\n def __setstate__(self, state):\n self.__init__()\n\n def validate(self, text, pos=0):\n if text == \"0\":\n return (self.Intermediate, text, pos)\n return (self.Acceptable, text, pos)\n\n\nif __name__ == \"__main__\":\n from prettyqt import widgets\n val = NotZeroValidator()\n app = widgets.app()\n widget = widgets.LineEdit(\"This is a test\")\n widget.setValidator(val)\n widget.show()\n app.exec_()\n","sub_path":"prettyqt/custom_validators/notzerovalidator.py","file_name":"notzerovalidator.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"6375464","text":"import os, sys\nproject_root = os.path.join(os.path.expanduser('~'), 'Dev/NetModules')\nsys.path.append(project_root)\nimport io\nimport requests\nimport matplotlib as mpl\n\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport progressbar\nimport prop_eval_utils\n#\n# prediction_file = '/home/zwei/Dev/NetModules/ActionLocalizationDevs/PropEval/pkl_files/TURN-C3D-16_thumos14.pkl'\n# ground_truth_file = '/home/zwei/Dev/NetModules/ActionLocalizationDevs/PropEval/thumos14_test_groundtruth.csv'\n\n\n\ndef pkl_seconds2dataframe(frm_nums):\n data_frame = []\n # movie_fps = pickle.load(open(\"./movie_fps.pkl\"))\n # pkl_dir = \"./pkl_files/\"\n dt_results = pickle.load(open(prediction_file))\n pbar = progressbar.ProgressBar(max_value=len(dt_results))\n for i, _key in enumerate(dt_results):\n pbar.update(i)\n # fps = movie_fps[_key]\n frm_num = frm_nums[_key]\n for line in dt_results[_key]:\n start = int(line[0] * 30)\n end = int(line[1] * 30)\n score = float(line[2])\n data_frame.append([end, start, score, frm_num, _key])\n return data_frame\n\ndef pkl_frame2dataframe(frm_nums):\n data_frame = []\n # movie_fps = pickle.load(open(\"./movie_fps.pkl\"))\n # pkl_dir = \"./pkl_files/\"\n dt_results = pickle.load(open(prediction_file))\n pbar = progressbar.ProgressBar(max_value=len(dt_results))\n for i, _key in enumerate(dt_results):\n pbar.update(i)\n # fps = movie_fps[_key]\n frm_num = frm_nums[_key]\n for line in dt_results[_key]:\n start = int(line[0])\n end = int(line[1])\n score = float(line[2])\n data_frame.append([end, start, score, frm_num, _key])\n return data_frame\n\nfor file_idx in range(1, 300, 10):\n save_name = 'lstm2heads_{:04d}'.format(file_idx)\n framebased = True\n\n prediction_file = '/home/zwei/Dev/NetModules/ActionLocalizationDevs/PropEval/pkl_files/{:s}_thumos14.pkl'.format(save_name)\n ground_truth_file = '/home/zwei/Dev/NetModules/ActionLocalizationDevs/PropEval/thumos14_test_groundtruth.csv'\n frm_nums = pickle.load(open(\"./frm_num.pkl\"))\n if framebased:\n rows = pkl_frame2dataframe(frm_nums)\n\n else:\n rows = pkl_seconds2dataframe(frm_nums)\n\n daps_results = pd.DataFrame(rows, columns=['f-end', 'f-init', 'score', 'video-frames', 'video-name'])\n\n # Retrieves and loads Thumos14 test set ground-truth.\n # ground_truth_url = ('https://gist.githubusercontent.com/cabaf/'\n # 'ed34a35ee4443b435c36de42c4547bd7/raw/'\n # '952f17b9cdc6aa4e6d696315ba75091224f5de97/'\n # 'thumos14_test_groundtruth.csv')\n # s = requests.get(ground_truth_url).content\n ground_truth = pd.read_csv(ground_truth_file, sep=' ')\n # Computes average recall vs average number of proposals.\n average_recall, average_nr_proposals = prop_eval_utils.average_recall_vs_nr_proposals(daps_results,\n ground_truth)\n\n # Computes average recall vs proposal frequency.\n average_recall_freq, freqs = prop_eval_utils.average_recall_vs_freq(daps_results, ground_truth, frm_nums)\n\n # Computes recall for different tiou thresholds at a fixed average number of proposals.\n recall, tiou_thresholds = prop_eval_utils.recall_vs_tiou_thresholds(daps_results, ground_truth,\n nr_proposals=1000)\n\n recall_freq, tiou_thresholds_freq = prop_eval_utils.recall_freq_vs_tiou_thresholds(daps_results, ground_truth, frm_nums)\n\n\n\n avg_prop_pnt_file = \"./ref_pnt_pairs/{:s}_avg_prop_pnt_pairs.npy\"\n np.save(avg_prop_pnt_file.format(save_name), np.array([average_nr_proposals, average_recall]))\n\n freq_pnt_file = \"./ref_pnt_pairs/{:s}_freq_pnt_pairs.npy\"\n np.save(freq_pnt_file.format(save_name), np.array([freqs, average_recall_freq]))\n\n recall1000_pnt_file = \"./ref_pnt_pairs/{:s}_recall_pnt_pairs.npy\"\n\n np.save(recall1000_pnt_file.format(save_name), np.array([tiou_thresholds, recall]))\n\n recall_freq_pnt_file = \"./ref_pnt_pairs/{:s}_recall_freq_pnt_pairs.npy\"\n np.save(recall_freq_pnt_file.format(save_name), np.array([tiou_thresholds_freq, recall_freq]))\n\n\n","sub_path":"Devs_ActionProp/PropEval/prop_eval_save2file_batch.py","file_name":"prop_eval_save2file_batch.py","file_ext":"py","file_size_in_byte":4310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"294457442","text":"#! /usr/local/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nimport warnings\nfrom datetime import datetime\n\n\n\nsys.path.append(\"../\")\nfrom utils.conf import wd, wd_cur, ai, ai_cur, ai_en\nfrom utils.etl import ETL\n\nwarnings.filterwarnings('ignore')\nos.environ['NLS_LANG'] = 'SIMPLIFIED CHINESE_CHINA.UTF8'\n#全量抽取改下面参数值即可\ntable_name = 'iadvisor_advisor'\nfile_name = 'iadvisor_advisor'\n\n\n# 获取数据映射sql\nsrc_sql = \"SELECT src_sql, data_source,columns_list,unique_key FROM iadvisor.etl_src_tgt_rule WHERE tgt_table = '%s' AND is_use = 1\" % table_name\nai_cur.execute(src_sql)\nrows = ai_cur.fetchall()\nsql = rows[0][0]\ncolumns = rows[0][2]\nunique_key = rows[0][3]\nprint(sql)\ntable_name = 'iadvisor.iadvisor_advisor'\netl = ETL(src_cur=wd_cur,\n src_conn=wd,\n tgt_cur=ai_cur,\n tgt_conn=ai,\n sql=sql,\n table_name=table_name,\n columns=columns,\n unique_key=unique_key)\n\n# 加载数据\netl.dump_data(file_name)\n\n# 写入数据\netl.import_data(file_name)\n\n\nwd_cur.close()\nai_cur.close()\nwd.close()\nai.close()\n","sub_path":"iadvisor/iadvisor_advisor.py","file_name":"iadvisor_advisor.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"227675373","text":"from itertools import groupby\nfrom dateutil.relativedelta import relativedelta\nfrom math import floor\nfrom rest.models import PhoneBill, CallTariff\nfrom django.utils import dateparse, timezone\nfrom datetime import datetime\nfrom decimal import *\ngetcontext().prec = 2\n\n# Main service functionalities to be called by the views\n\ndef calculate_bills(records):\n '''\n Gets the call records and transforms them into bills.\n This returns a list of PhoneBill objects.\n '''\n # Group our records by call_id. This makes it so the start and end\n # records are joined inside one key\n calls = groupby(records, lambda call: call.call_id)\n bills = []\n # Iterating over the grouped call records\n for _, call_records in calls:\n # Initialize variables to avoid scope conflicts\n start = \"\"\n end = \"\"\n destination = \"\"\n # Since the records are grouped by call_id, we have both the\n # start and end records inside the set\n # We check the type and extract the necessary data from each\n # of them\n for call_record in call_records:\n if call_record.type == \"S\":\n start = call_record.timestamp\n destination = call_record.destination\n else:\n end = call_record.timestamp\n # If that record already exists, get it from the database\n # This is cacheable, and it might be a good idea to do so\n try:\n bill = PhoneBill.objects.get(\n destination=destination,\n start_timestamp=start\n )\n # If it doesn't, it needs to be calculated and created\n except PhoneBill.DoesNotExist:\n # Get the latest tariffs for that time period\n try:\n tariff = CallTariff.objects.filter(\n valid_after__lte=start\n ).order_by('-valid_after')[0]\n # If for some reason they aren't there, initialize the\n # database with a tariff set (in this case, the one in\n # the spec)\n except IndexError:\n tariff = CallTariff(\n valid_after=timezone.now().replace(year=1900),\n base_tariff=Decimal('0.36'),\n minute_charge=Decimal('0.09'),\n discount_charge=Decimal('0.00')\n )\n tariff.save()\n # Calculate the charge for this particular call\n charge = calculate_pricing(start, end, tariff)\n # Calculate the duration of this call\n duration = calculate_time_delta(start, end)\n # Create the PhoneBill object\n bill = PhoneBill(\n destination=destination,\n start_timestamp=start,\n call_duration=duration.total_seconds(),\n charge=charge\n )\n bill.save()\n # Add the bill to the list\n bills.append(bill)\n return bills\n\n# Functions to calculate the pricing of a given call\n# They are separated in case of a pricing calculation change,\n# e.g. making the discount tariff a percentage of the full tariff\n\ndef calculate_basic_tariff(start, end, call_tariff):\n '''\n Function to calculate the usual tariff between two time periods\n '''\n # Get the time delta between the start and end of the call\n delta = end - start\n # We only count full minutes\n call_minutes = floor(delta.seconds/60)\n tariff = call_minutes * call_tariff.minute_charge\n return tariff\n\n\ndef calculate_discount_tariff(start, end, call_tariff):\n '''\n Function to calculate a discounted tariff between two time periods\n '''\n # Get the time delta between the start and end of the call\n delta = end - start\n # We only count full minutes\n call_minutes = floor(delta.seconds/60)\n tariff = call_minutes * call_tariff.discount_charge\n return tariff\n\n# Functions to calculate the whole bill of a record\n\ndef calculate_pricing(start, end, call_tariff):\n '''\n This function calculates the full price of a given call from\n timestamp start to timestamp end, given a set of call tariffs\n '''\n # Safety measure for scope problems\n tariff = 0\n # Get the measurement of time between the timestamps\n delta = calculate_time_delta(start, end)\n current = start\n # While we still have time to process...\n while delta_hours(delta) >= 0:\n # Get if we are inside the discount period and how much time\n # is left until the next period change\n is_discount_period, to_break = calculate_period(current)\n # If the next break period comes before the call ends...\n if delta > to_break:\n # ...check if it's the discount period and add the tariff\n # from the current time until the period flip\n if is_discount_period:\n tariff += calculate_discount_tariff(\n current,\n (current+to_break),\n call_tariff\n )\n else:\n tariff += calculate_basic_tariff(\n current,\n (current+to_break),\n call_tariff\n )\n # Take off the time we just processed from the delta\n delta -= to_break\n # and add the time passed to our 'current' timestamp\n current += to_break\n # If we don't cross any periods until the call ends, then it's\n # easy happy dreamland\n else:\n # Check if we're in a discount period and add the tariff\n if is_discount_period:\n tariff += calculate_discount_tariff(current, end, call_tariff)\n else:\n tariff += calculate_basic_tariff(current, end, call_tariff)\n # If we did not cross a billing period then we're done by here\n break\n # Add up the base tariff to the minute charges\n tariff += call_tariff.base_tariff\n return tariff\n\n# Helper functions\n\ndef calculate_period(timestamp):\n '''\n Calculates the rates and time until the next change in tariff rate\n from a given timestamp\n '''\n # If the time period is between the two discount periods\n if (timestamp.hour >= 6) and (timestamp.hour < 22):\n # No discount for you :(\n is_discount_period = False\n # The next discount period starts at 22:00:00\n # so we calculate the delta between the timestamp and that\n # start timestamp\n to_break = (\n timestamp.replace(hour=22, minute=0, second=0)\n - timestamp\n )\n # If the time period is inside one of the discount periods\n elif timestamp.hour < 6:\n # We get the discount!\n is_discount_period = True\n # The normal period starts at 06:00:00\n # so we calculate the delta between the timestamp and that\n # start timestamp\n to_break = (\n timestamp.replace(hour=6, minute=0, second=0)\n - timestamp\n )\n elif timestamp.hour >= 22:\n # We also get the discount!\n is_discount_period = True\n # There is a day flip until the next normal period, so we\n # need to add a day to the current date...\n next_day = timestamp + relativedelta(days=1)\n # and then get the delta to the next 06:00:00 timestamp\n to_break = (\n next_day.replace(hour=6, minute=0, second=0)\n - timestamp\n )\n return is_discount_period, to_break\n\n\ndef calculate_time_delta(start, end):\n '''\n Helper function that calculates a time delta between two time\n periods. Not strictly necessary, just for added readability\n '''\n return end - start\n\n\ndef delta_hours(delta):\n '''\n Helper function that calulates a given time delta in hours.\n Also not strictly necessary, but helps understanding\n '''\n return floor(delta.seconds/3600)\n\n\ndef get_last_month():\n '''\n Gets the time period referring the last month\n '''\n reference_start = timezone.now().replace(\n day=1,\n hour=0,\n minute=0,\n second=0,\n microsecond=0\n )+relativedelta(months=-1)\n reference_end = timezone.now().replace(\n day=1,\n hour=23,\n minute=59,\n second=59,\n microsecond=0\n )+relativedelta(days=-1)\n return reference_start, reference_end\n\ndef get_monthly_period(date):\n '''\n Gets the period referring to a month in which a date is contained\n '''\n reference_start = date.replace(\n day=1,\n hour=0,\n minute=0,\n second=0,\n microsecond=0\n )\n reference_end = date.replace(\n day=1,\n hour=23,\n minute=59,\n second=59,\n microsecond=0\n )+relativedelta(months=1, days=-1)\n return reference_start, reference_end\n\n\ndef parse_month_year(date_string):\n '''\n Usual datetime parsing routine. Tries parsing date and datetime,\n then moves on to the weirder formats.\n '''\n date = dateparse.parse_date(date_string)\n if date is not None:\n return get_monthly_period(date)\n date = dateparse.parse_datetime(date_string)\n if date is not None:\n return get_monthly_period(date)\n return parse_obscure_formats(date_string)\n\n\ndef parse_obscure_formats(date_string):\n '''\n This function tries to parse the more unusual date formats,\n like just the month, or string-based month representations\n '''\n # Try to parse dates in which only the month is passed\n # This assumes that the year is the current one\n try:\n # Try the number format first...\n date = datetime.strptime(date_string, \"%m\")\n date = date.replace(year=timezone.now().year)\n return get_monthly_period(date)\n except ValueError:\n date = None\n try:\n # ... then the shorthand letter format\n date = datetime.strptime(date_string, \"%b\")\n date = date.replace(year=timezone.now().year)\n return get_monthly_period(date)\n except ValueError:\n date = None\n # Try to parse dates in the \"06-1994\" format\n try:\n date = datetime.strptime(date_string, \"%m-%Y\")\n return get_monthly_period(date)\n except ValueError:\n date = None\n # Try to parse dates in the \"Jun-1994\" format\n try:\n date = datetime.strptime(date_string, \"%b-%Y\")\n return get_monthly_period(date)\n except ValueError:\n date = None\n # Try to parse dates in the \"1994-Jun\" format\n try:\n date = datetime.strptime(date_string, \"%Y-%b\")\n return get_monthly_period(date)\n except ValueError:\n date = None\n # Try to parse dates in the \"Jun1994\" format\n try:\n date = datetime.strptime(date_string, \"%b%Y\")\n return get_monthly_period(date)\n except ValueError:\n date = None\n # Couldn't parse the string\n if date is None:\n raise ValueError\n","sub_path":"olistphone/rest/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":10912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"388299824","text":"# -*-coding=utf-8-*-\n# @Time : 2018/8/16 13:35\n# @File : hjl_mine.py\nimport requests\nimport time\nfrom lxml import etree\n\n\ndef get_proxy(retry=5):\n proxyurl = 'http://{}:8081/dynamicIp/common/getDynamicIp.do'.format()\n for i in range(1, retry + 1):\n try:\n r = requests.get(proxyurl, timeout=10)\n except Exception as e:\n print(e)\n print('Failed to get proxy ip, retry ' + str(i))\n time.sleep(1)\n else:\n js = r.json()\n proxy_server = {'http': 'http://{}:{}'.format(js.get('ip'), js.get('port'))}\n # proxyServer = 'http://{0}:{1}'.format(js.get('ip'), js.get('port'))\n return proxy_server\n return None\n\n\nsession = requests.Session()\npage = 3\nheaders = {'User-Agent': 'Fox 4.3'}\nproxy = get_proxy()\nhome = 'http://www.hljcredit.gov.cn/WebCreditQueryService.do?gssearch&type=sxbzxrqg&detail=true&sxbzxrmc=&proselect=&cityselect=&disselect=&curPageNO={}'.format(\n page)\n# s = session.get(url=home,headers=headers)\ns = session.get(url=home, headers=headers, proxies=proxy)\nprint(s.status_code)\n# print(s.text)\ntree = etree.HTML(s.text)\nfor url in tree.xpath('//table[@class=\"list_2_tab\"]/tr/td/a/@href'):\n # next_url = tree.xpath('//table[@class=\"list_2_tab\"]/tr[2]/td[2]/a/@href')[0]\n details = 'http://www.hljcredit.gov.cn/' + url\n # proxy=get_proxy()\n\n s2 = session.get(details, headers=headers, proxies=proxy)\n # s2=session.get(details,headers=headers)\n # print(s2.text)\n detail_tree = etree.HTML(s2.text)\n print(detail_tree.xpath('//table[@class=\"for_letter\"]/tr[2]/td[2]/text()')[0])\n","sub_path":"hjl_mine.py","file_name":"hjl_mine.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"550138960","text":"def jacobi(a, n):\n if n <= 0:\n raise ValueError(\"'n' must be a positive integer.\")\n if n % 2 == 0:\n raise ValueError(\"'n' must be odd.\")\n a %= n\n result = 1\n while a != 0:\n while a % 2 == 0:\n a /= 2\n n_mod_8 = n % 8\n if n_mod_8 in (3, 5):\n result = -result\n a, n = n, a\n if a % 4 == 3 and n % 4 == 3:\n result = -result\n a %= n\n if n == 1:\n return result\n else:\n return 0\n\ndef bookJacobi(a, n):\n result = 1\n while True:\n a %= n\n if a == 0:\n if n == 1:\n return result\n else:\n return 0\n h = 0\n a_pr = a\n while a_pr % 2 == 0:\n a_pr //= 2\n h += 1\n if h % 2 == 1 and (n % 8 != 1 and n % 8 != 7):\n result = -result\n if a_pr % 4 != 1 and n % 4 != 1:\n result = -result\n a, n = n, a_pr\nif __name__=='__main__':\n a = int(input('enter a: '))\n n = int(input('enter n: '))\n print('jacobi', jacobi(a, n))\n print(bookJacobi(a, n))","sub_path":"L8.py","file_name":"L8.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"245031421","text":"# -*- coding: utf-8 -*-\n\"\"\"\nFile Name: test_cmns.py\nAuthor: WillDX\nmail: daixiang1992@gmail.com\nCreated Time: Wed Feb 15 14:31:20 2017\n\"\"\"\n\nfrom godbox.options import define, options\nfrom godbox.utils.cmns import muti_insert_mns, muti_receive_mns, receive_mns\n\nmsgs = [\"[1]\", \"[2]\", \"[3]\", \"[4]\"]\nqn = \"cmns-example-01\"\n\n\ndef func(qn):\n recv_msg = receive_mns(qn)\n print(type(recv_msg.message_body))\n return recv_msg.message_body\n\n\nif __name__ == \"__main__\":\n # 多线程生产和多线程消费不能在一起执行\n # 多线程生产\n muti_insert_mns(msgs, qn)\n # 多线程消费\n res = muti_receive_mns(func, len(msgs), qn)\n","sub_path":"tests/test_cmns.py","file_name":"test_cmns.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"64858882","text":"import sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.signal import get_window\n#toolsdir = '/Users/tometz/Documents/MachineLearningStuff/SignalProcessingForMusicApplications/sms-tools/software/models/'\ntoolsdir = '/Users/tometz/Documents/Audio/TomWav/_analysis/processor/'\nsys.path.insert(0, toolsdir)\nimport stft as STFT\nimport hprModel as HPR\nimport sineModel as SM\nimport harmonicModel as HM\n\n# ////////////////////////////////////////////////////////////\n# processor //////////////////////////////////////////////////\n# ////////////////////////////////////////////////////////////\n\n# this function analyses a chunck of audio using a specified model\n# can display a graph of the model\n# returns the raw results\n\ndef processor(fs, xchunk, analysis, plot):\n\n # TheSFFT ////////////////////////////\n if analysis == 'TheSTFT':\n \"\"\"\n \tAnalysis of a sound using the short-time Fourier transform\n \tx: input array sound, w: analysis window, N: FFT size, H: hop size\n \treturns xmX, xpX: magnitude and phase spectra\n \t\"\"\"\n window = 'hanning'\n M = 2048 # window size 512, 1024, 2048, 4096\n N = 2048 # fft size\n H = 512 # hop\n w = get_window(window, M)\n mX, pX = STFT.stftAnal(xchunk, w, N, H)\n\n #plot\n if plot is True:\n y = STFT.stftSynth(mX, pX, M, H)\n\n plt.figure(figsize=(12, 9)) # create figure to plot\n maxplotfreq = 10000.0 # frequency range to plot\n\n # input sound\n plt.subplot(4,1,1)\n plt.plot(np.arange(xchunk.size)/float(fs), xchunk)\n plt.axis([0, xchunk.size/float(fs), min(xchunk), max(xchunk)])\n plt.ylabel('amplitude')\n plt.xlabel('time (sec)')\n plt.title('input sound: x')\n\n # magnitude spectrogram\n plt.subplot(4,1,2)\n numFrames = int(mX[:,0].size)\n frmTime = H*np.arange(numFrames)/float(fs)\n binFreq = fs*np.arange(N*maxplotfreq/fs)/N\n plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:N*maxplotfreq/fs+1]))\n plt.xlabel('time (sec)')\n plt.ylabel('frequency (Hz)')\n plt.title('magnitude spectrogram')\n plt.autoscale(tight=True)\n\n # phase spectrogram\n '''\n plt.subplot(4,1,3)\n numFrames = int(pX[:,0].size)\n frmTime = H*np.arange(numFrames)/float(fs)\n binFreq = fs*np.arange(N*maxplotfreq/fs)/N\n plt.pcolormesh(frmTime, binFreq, np.transpose(np.diff(pX[:,:N*maxplotfreq/fs+1],axis=1)))\n plt.xlabel('time (sec)')\n plt.ylabel('frequency (Hz)')\n plt.title('phase spectrogram (derivative)')\n plt.autoscale(tight=True)\n\n # output sound\n plt.subplot(4,1,4)\n plt.plot(np.arange(y.size)/float(fs), y)\n plt.axis([0, y.size/float(fs), min(y), max(y)])\n plt.ylabel('amplitude')\n plt.xlabel('time (sec)')\n #plt.title('output sound: y')\n '''\n\n plt.tight_layout()\n plt.show()\n\n return [mX.tolist()] #pX.tolist()\n\n # TheSM //////////////////////////////\n if analysis == 'TheSM':\n \"\"\"\n \tAnalysis of a sound using the sinusoidal model with sine tracking\n \tx: input array sound, w: analysis window, N: size of complex spectrum, H: hop-size, t: threshold in negative dB\n \tmaxnSines: maximum number of sines per frame, minSineDur: minimum duration of sines in seconds\n \tfreqDevOffset: minimum frequency deviation at 0Hz, freqDevSlope: slope increase of minimum frequency deviation\n \treturns xtfreq, xtmag, xtphase: frequencies, magnitudes and phases of sinusoidal tracks\n \t\"\"\"\n window='hamming'\n M=2001\n N=2048\n t=-80\n minSineDur=0.02\n maxnSines=150 #150\n freqDevOffset=10\n freqDevSlope=0.001\n Ns = 512\n H = 128\n w = get_window(window, M)\n tfreq, tmag, tphase = SM.sineModelAnal(xchunk, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)\n\n if plot is True:\n y = SM.sineModelSynth(tfreq, tmag, tphase, Ns, H, fs)\n\n # create figure to show plots\n plt.figure(figsize=(12, 9))\n\n # frequency range to plot\n maxplotfreq = 5000.0\n\n # plot the input sound\n plt.subplot(3,1,1)\n plt.plot(np.arange(xchunk.size)/float(fs), xchunk)\n plt.axis([0, xchunk.size/float(fs), min(xchunk), max(xchunk)])\n plt.ylabel('amplitude')\n plt.xlabel('time (sec)')\n plt.title('input sound: x')\n\n # plot the sinusoidal frequencies\n plt.subplot(3,1,2)\n if (tfreq.shape[1] > 0):\n numFrames = tfreq.shape[0]\n frmTime = H*np.arange(numFrames)/float(fs)\n tfreq[tfreq<=0] = np.nan\n plt.plot(frmTime, tfreq)\n plt.axis([0, xchunk.size/float(fs), 0, maxplotfreq])\n plt.title('frequencies of sinusoidal tracks')\n\n # plot the output sound\n plt.subplot(3,1,3)\n plt.plot(np.arange(y.size)/float(fs), y)\n plt.axis([0, y.size/float(fs), min(y), max(y)])\n plt.ylabel('amplitude')\n plt.xlabel('time (sec)')\n plt.title('output sound: y')\n\n plt.tight_layout()\n plt.show()\n\n return [tfreq.tolist(), tmag.tolist(), tphase.tolist()]\n\n # TheF0 //////////////////////////////\n if analysis == 'TheF0':\n \"\"\"\n \tFundamental frequency detection of a sound using twm algorithm\n \tx: input sound; fs: sampling rate; w: analysis window;\n \tN: FFT size; t: threshold in negative dB,\n \tminf0: minimum f0 frequency in Hz, maxf0: maximim f0 frequency in Hz,\n \tf0et: error threshold in the f0 detection (ex: 5),\n \treturns f0: fundamental frequency\n \t\"\"\"\n M=1024\n N=1024\n H = 128\n t=-90\n minf0=130\n maxf0=5000\n f0et=7\n window='blackman'\n w = get_window(window, M)\n f0 = HM.f0Detection(xchunk, fs, w, N, H, t, minf0, maxf0, f0et)\n\n if plot is True:\n #mXr, pXr = STFT.stftAnal(xchunk, w, N, H)\n\n plt.figure(figsize=(12, 9)) # create figure to plot\n maxplotfreq = 4000.0 # frequency range to plot\n\n # input sound\n '''plt.subplot(4,1,1)\n plt.plot(np.arange(xchunk.size)/float(fs), xchunk)\n plt.axis([0, xchunk.size/float(fs), min(xchunk), max(xchunk)])\n plt.ylabel('amplitude')\n plt.xlabel('time (sec)')\n plt.title('input sound: x')'''\n\n # plot the magnitude spectrogram of residual\n '''plt.subplot(3,1,2)\n maxplotbin = int(N*maxplotfreq/fs)\n numFrames = int(mXr[:,0].size)\n frmTime = H*np.arange(numFrames)/float(fs)\n binFreq = np.arange(maxplotbin+1)*float(fs)/N\n plt.pcolormesh(frmTime, binFreq, np.transpose(mXr[:,:maxplotbin+1]))\n plt.autoscale(tight=True)'''\n\n # plot the f0 frequencies\n #plt.subplot(3,1,2)\n if (f0.size > 0):\n numFrames = f0.size\n frmTime = H*np.arange(numFrames)/float(fs)\n plt.plot(frmTime, f0)\n plt.axis([0, xchunk.size/float(fs), 0, maxplotfreq])\n plt.title('frequencies of f0')\n\n plt.tight_layout()\n plt.show()\n\n\n return [f0.tolist()]\n\n\n # TheHM //////////////////////////////\n if analysis == 'TheHM':\n \"\"\"\n \tAnalysis of a sound using the sinusoidal harmonic model\n \tx: input sound; fs: sampling rate, w: analysis window; N: FFT size (minimum 512); t: threshold in negative dB,\n \tnH: maximum number of harmonics; minf0: minimum f0 frequency in Hz,\n \tmaxf0: maximim f0 frequency in Hz; f0et: error threshold in the f0 detection (ex: 5),\n \tharmDevSlope: slope of harmonic deviation; minSineDur: minimum length of harmonics\n \treturns xhfreq, xhmag, xhphase: harmonic frequencies, magnitudes and phases\n \t\"\"\"\n window='blackman'\n M=2048 #1201, 4096\n N=2048 #2048, 4096\n t=-90 #-90\n minSineDur=0.1 #0.1\n nH=10 #100\n minf0=30 #130\n maxf0=3000 #300\n f0et=7 #7\n harmDevSlope=0.01 #0.01\n H = 128 #128\n w = get_window(window, M)\n hfreq, hmag, hphase = HM.harmonicModelAnal(xchunk, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope, minSineDur)\n\n if plot is True:\n Ns = 512 #512\n y = SM.sineModelSynth(hfreq, hmag, hphase, Ns, H, fs)\n\n # create figure to show plots\n plt.figure(figsize=(12, 9))\n\n # frequency range to plot\n maxplotfreq = 2000.0\n\n # plot the input sound\n plt.subplot(3,1,1)\n plt.plot(np.arange(xchunk.size)/float(fs), xchunk)\n plt.axis([0, xchunk.size/float(fs), min(xchunk), max(xchunk)])\n plt.ylabel('amplitude')\n plt.xlabel('time (sec)')\n plt.title('input sound: x')\n\n # plot the harmonic frequencies\n plt.subplot(3,1,2)\n if (hfreq.shape[1] > 0):\n numFrames = hfreq.shape[0]\n frmTime = H*np.arange(numFrames)/float(fs)\n hfreq[hfreq<=0] = np.nan\n plt.plot(frmTime, hfreq)\n plt.axis([0, xchunk.size/float(fs), 0, maxplotfreq])\n plt.title('frequencies of harmonic tracks')\n\n # plot the output sound\n plt.subplot(3,1,3)\n plt.plot(np.arange(y.size)/float(fs), y)\n plt.axis([0, y.size/float(fs), min(y), max(y)])\n plt.ylabel('amplitude')\n plt.xlabel('time (sec)')\n plt.title('output sound: y')\n\n plt.tight_layout()\n plt.show()\n\n return [hfreq.tolist(), hmag.tolist(), hphase.tolist()]\n\n # TheHPR /////////////////////////////\n if analysis == 'TheHPR':\n \"\"\"Analysis of a sound using the harmonic plus residual model\n \tx: input sound, fs: sampling rate, w: analysis window; N: FFT size, t: threshold in negative dB,\n \tminSineDur: minimum duration of sinusoidal tracks\n \tnH: maximum number of harmonics; minf0: minimum fundamental frequency in sound\n \tmaxf0: maximum fundamental frequency in sound; f0et: maximum error accepted in f0 detection algorithm\n \tharmDevSlope: allowed deviation of harmonic tracks, higher harmonics have higher allowed deviation\n \treturns hfreq, hmag, hphase: harmonic frequencies, magnitude and phases; xr: residual signal\n \t\"\"\"\n window='blackman'\n M=2048 #601\n N=2048\n t=-100\n minSineDur=0.1\n nH=48 #100\n minf0=30 #350\n maxf0=700\n f0et=5\n harmDevSlope=0.01\n #Ns = 512 #512\n H = 128 #128\n w = get_window(window, M)\n hfreq, hmag, hphase, xr = HPR.hprModelAnal(xchunk, fs, w, N, H, t, minSineDur, nH, minf0, maxf0, f0et, harmDevSlope)\n\n M = 1024\n N = 1024\n H = 512\n w = get_window(window, M)\n mXr, pXr = STFT.stftAnal(xr, w, N, H)\n\n if plot is True:\n M = 2048\n N = 2048\n H = 128\n w = get_window(window, M)\n mXr, pXr = STFT.stftAnal(xr, w, N, H)\n y, yh = HPR.hprModelSynth(hfreq, hmag, hphase, xr, Ns, H, fs)\n\n # create figure to plot\n plt.figure(figsize=(12, 9))\n\n # frequency range to plot\n maxplotfreq = 5000.0\n\n # plot the input sound\n plt.subplot(3,1,1)\n plt.plot(np.arange(xchunk.size)/float(fs), xchunk)\n plt.axis([0, xchunk.size/float(fs), min(xchunk), max(xchunk)])\n plt.ylabel('amplitude')\n plt.xlabel('time (sec)')\n plt.title('input sound: x')\n\n # plot the magnitude spectrogram of residual\n plt.subplot(3,1,2)\n maxplotbin = int(N*maxplotfreq/fs)\n numFrames = int(mXr[:,0].size)\n frmTime = H*np.arange(numFrames)/float(fs)\n binFreq = np.arange(maxplotbin+1)*float(fs)/N\n plt.pcolormesh(frmTime, binFreq, np.transpose(mXr[:,:maxplotbin+1]))\n plt.autoscale(tight=True)\n\n # plot harmonic frequencies on residual spectrogram\n if (hfreq.shape[1] > 0):\n harms = hfreq*np.less(hfreq,maxplotfreq)\n harms[harms==0] = np.nan\n numFrames = int(harms[:,0].size)\n frmTime = H*np.arange(numFrames)/float(fs)\n plt.plot(frmTime, harms, color='k', ms=3, alpha=1)\n plt.xlabel('time(s)')\n plt.ylabel('frequency(Hz)')\n plt.autoscale(tight=True)\n plt.title('harmonics + residual spectrogram')\n\n # plot the output sound\n plt.subplot(3,1,3)\n plt.plot(np.arange(y.size)/float(fs), y)\n plt.axis([0, y.size/float(fs), min(y), max(y)])\n plt.ylabel('amplitude')\n plt.xlabel('time (sec)')\n plt.title('output sound: y')\n\n plt.tight_layout()\n plt.show()\n\n return [hfreq.tolist(), hmag.tolist(), hphase.tolist(), mXr.tolist(), pXr.tolist()]\n","sub_path":"processor/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":13419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"444366913","text":"# Import Pandas\nimport pandas as pd\n\n# Load your CSV File\ndf = pd.read_csv(\"feeCharges.csv\")\n\n# Set your XLSX file writer\nxlsxWriter = pd.ExcelWriter('feeCharges.xlsx', engine='xlsxwriter')\n\n# Write the Pandas Dataframe/s to a XLSX file\ndf.to_excel(xlsxWriter, 'Dataset 1')\n\n# Write out the file with Python\nxlsxWriter.save()\n","sub_path":"csv2xls.py","file_name":"csv2xls.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"100607813","text":"\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2018 Zuzeng Lin\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\nimport math\nimport numpy as np\nimport instruments\nimport pickle\n\n\ndef create_loop(host=\"Memory\", prefix=\"tuningrec\"):\n # control loop setup\n powermeter = instruments.powermeter(host)\n oko = instruments.Mirror(host, None, \"oko\")\n # alpao = instruments.Mirror(host, None, \"alpao\")\n thorlabs = instruments.Mirror(host, None, \"thorlabs\")\n router = instruments.Router([oko, thorlabs])\n feedback = instruments.Feedback(powermeter, router, prefix + \"_raw.pkl\")\n\n oko_znk = instruments.ZNKAdapter(oko)\n # alpao = instruments.ZNKAdapter(alpao)\n thorlabs_znk = instruments.ZNKAdapter(thorlabs)\n router_znk = instruments.Router([ thorlabs_znk], False)\n feedback_znk = instruments.Feedback(powermeter, router_znk, prefix + \"_znk.pkl\")\n return feedback, feedback_znk\n\n\ndef load_experiment_record(filename=\"train_dataset.pkl\", sample_rate=1, trunc=None):\n # fetch file form disk\n power = []\n x = []\n experiment_record = open(filename, \"rb\")\n i = 0\n while True:\n try:\n ret = pickle.load(experiment_record)\n i += 1\n if trunc is not None:\n if i > trunc:\n break\n if (i % sample_rate == 0):\n x.append(ret[0])\n power.append(ret[1])\n except EOFError:\n break\n experiment_record.close()\n return x, power\n","sub_path":"feedback.py","file_name":"feedback.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"20538490","text":"from tkinter import *\ndef Hello(event):\n print (\"Yet another hello world\")\n label1 = Label(text=\"Hello Python\", fg=\"#eee\", bg=\"#333\")\n label1.pack()\nroot = Tk()\nbtn = Button(root, \n text=\"Жми\", \n width=30,height=5, \n bg=\"white\",fg=\"black\") \nbtn.bind(\"\", Hello) \nbtn.pack() \nroot.mainloop()\n\n\n","sub_path":"main23.py","file_name":"main23.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"590571770","text":"#!/usr/bin/python3\n\nimport argparse\nimport http.server\nimport os\nimport socket\nimport socketserver\nimport tarfile\nimport tempfile\n\n\nclass WebServer(socketserver.TCPServer):\n\tallow_reuse_address = True\n\n\tdef server_bind(self) -> None:\n\t\t\"\"\"Bind the server socket to an address.\"\"\"\n\n\t\tsuper().server_bind()\n\t\tself.hostname, self.port = self.socket.getsockname()\n\n\tdef run(self) -> None:\n\t\ttry:\n\t\t\tself.serve_forever()\n\t\texcept KeyboardInterrupt:\n\t\t\tprint(\"\\r\")\n\t\tfinally:\n\t\t\tself.server_close()\n\n\nif __name__ == \"__main__\":\n\n\tparser = argparse.ArgumentParser(description=\"Web server for testing purpose only.\")\n\tparser.add_argument(\"-u\",\n\t\t\"--hostname\",\n\t\tdest=\"hostname\",\n\t\tdefault=\"0.0.0.0\",\n\t\ttype=str,\n\t\thelp=\"The hostname to be used.\")\n\tparser.add_argument(\"-p\", \"--port\", dest=\"port\", default=0, type=int, help=\"Port to be used.\")\n\tparser.add_argument(\"-r\",\n\t\t\"--root\",\n\t\tdest=\"root\",\n\t\tdefault=None,\n\t\ttype=str,\n\t\thelp=\"Prefix path where the files to serve are located.\")\n\tparser.add_argument(\"path\",\n\t\tdefault=\".\",\n\t\tnargs='?',\n\t\thelp=\"Serve files from this specific directory or archive, by default the current directory will be used.\")\n\n\targs = parser.parse_args()\n\n\ttempPath = None\n\ttry:\n\n\t\t# If archive is set, serve files from this specific archive\n\t\tif os.path.isfile(args.path):\n\t\t\ttempPath = tempfile.TemporaryDirectory()\n\t\t\tpackage = tarfile.open(args.path)\n\t\t\ttry:\n\t\t\t\tprint(f\"Extracting content of '{args.path}' to temporary directory '{tempPath.name}'.\")\n\t\t\t\tpackage.extractall(tempPath.name)\n\t\t\tfinally:\n\t\t\t\tpackage.close()\n\t\t\tos.chdir(tempPath.name)\n\n\t\t# If special path, serve it\n\t\telif os.path.isdir(args.path):\n\t\t\tos.chdir(args.path)\n\n\t\t# If the root is somewhere else\n\t\tif args.root:\n\t\t\tos.chdir(args.root)\n\n\t\thandler = http.server.SimpleHTTPRequestHandler\n\t\tserver = WebServer((args.hostname, args.port), handler)\n\t\tprint(f\"Web server ready, serving '{args.path}' at 'http://{server.hostname}:{server.port}'.\")\n\t\tserver.run()\n\n\t# Cleanup RAII style\n\tfinally:\n\t\tif tempPath:\n\t\t\tprint(f\"Cleaning up temporary directory '{tempPath.name}'.\")\n\t\t\ttempPath.cleanup()\n","sub_path":"tools/scripts/web_server.py","file_name":"web_server.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"294914873","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreaed on Wed Dec 11 21:59:43 2019\n\n@author: bensr\n\"\"\"\n\nfrom run import get_model\nimport logging as log\nimport models\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport json\nimport itertools\nimport pickle as pkl\nimport random\nimport ruamel.yaml as yaml\nimport sys\nimport csv\nstimfn = './stims/chaotic_2.csv'\nstim = np.genfromtxt(stimfn, dtype=np.float32) \nplt.subplots_adjust(hspace=0.3)\ntimes = [0.025*i for i in range(len(stim))]\ncreate_pdfs = True\ndef get_ml_results(short_name,pnames):\n #data_loc = '/project/m2043/ML4neuron2b/' +short_name +'/cellSpike.sum_pred.yaml\n data_loc = '/project/m2043/ML4neuron2b/' +short_name +'/cellSpike.sum_pred.yaml'\n with open(data_loc, 'r') as f:\n ml_preds = yaml.safe_load(f)\n all_preds = ml_preds['lossAudit']\n \n return all_preds\n \ndef shorten_param_names(rawMeta):\n mapD={'_apical':'_api', '_axonal':'_axn','_somatic':'_som','_dend':'_den'}\n inpL=rawMeta\n outL=[]\n print('M: shorten_param_names(), len=',len(inpL))\n for x in inpL:\n #print('0x=',x)\n for k in mapD:\n x=x.replace(k,mapD[k])\n x=x.replace('_','.')\n #print('1x=',x)\n outL.append(x)\n return outL\n\ndef get_rec_sec(def_volts,adjusted_param):\n probes = list(def_volts.keys())\n rec_sec=adjusted_param\n if 'soma' in adjusted_param:\n rec_sec = probes[0]\n if 'apic' in adjusted_param or 'dend' in adjusted_param:\n res = [i for i in probes if 'apic' in i or 'dend' in i]\n rec_sec = res[2] \n if 'axon' in adjusted_param:\n res = [i for i in probes if 'axon' in i]\n rec_sec = res[2] \n dot_ind = rec_sec.find('.')+1\n return rec_sec[dot_ind:],rec_sec[:dot_ind] \n\n \ndef check_param_sensitivity(all_volts,adjusted_param,files_loc):\n print(adjusted_param)\n def_rec_sec,prefix = get_rec_sec(all_volts[0],adjusted_param)\n #in probe the first will always be the soma then axon[0] (AIS) then a sec that has mid (0.5) distrance\n #ax1.plot(times,def_volts[:-1],'black')\n volt_debug = []\n cum_sum_errs = []\n if(create_pdfs):\n fig, (ax1,ax2,ax3)= plt.subplots(3,figsize=(15,15))\n fig.suptitle(adjusted_param)\n plt.subplots_adjust(hspace=0.3)\n \n for i in range(int(len(all_volts)/2)):\n \n volts1 = all_volts[2*i]\n volts2 = all_volts[2*i + 1]\n \n curr_rec_sec1,prefix1 = get_rec_sec(volts1,adjusted_param)\n curr_rec_sec2,prefix2 = get_rec_sec(volts2,adjusted_param)\n if (curr_rec_sec1 != curr_rec_sec2):\n print(\"curr_rec_sec is \" + curr_rec_sec1 + 'and curr_rec_sec2 is' + curr_rec_sec2 )\n volts_to_plot1 = volts1.get(prefix1 +def_rec_sec)\n volts_to_plot2 = volts2.get(prefix2 +def_rec_sec)\n volt_debug.append(volts_to_plot1)\n volt_debug.append(volts_to_plot2)\n curr_cum_sum1= np.cumsum(np.abs(volts_to_plot1))*0.025\n curr_cum_sum2= np.cumsum(np.abs(volts_to_plot2))*0.025\n cum_sum_err = curr_cum_sum1 - curr_cum_sum2\n cum_sum_errs.append(cum_sum_err)\n if(create_pdfs):\n err = volts_to_plot1 - volts_to_plot2\n ax1.plot(times,volts_to_plot1[:-1])\n ax1.plot(times,volts_to_plot2[:-1])\n ax2.plot(times,err[:-1])\n ax3.plot(times,cum_sum_err[:-1])\n if(create_pdfs): \n ax1.title.set_text('Volts')\n ax1.set_ylim(-200,+200)\n ax2.title.set_text('error')\n ax3.title.set_text('cum_sum_error')\n fig_name = adjusted_param +'.pdf'\n fig.savefig(files_loc + fig_name)\n volt_debug = np.array(volt_debug)\n return cum_sum_errs\n\n\n\ndef test_sensitivity(files_loc,my_model):\n old_param_names = my_model.PARAM_NAMES\n param_names = shorten_param_names(old_param_names)\n all_ECDS ={}\n all_fns = os.listdir(files_loc)\n for i in range(len(param_names)):\n adjusted_param = old_param_names[i]\n adjusted_param_new_name = param_names[i]\n param_files = [files_loc + fn for fn in all_fns if adjusted_param in fn]\n param_files = [ fn for fn in param_files if '.pkl' in fn]\n all_volts = []\n for fn in param_files:\n with open(fn, 'rb') as f:\n curr_volts = pkl.load(f)\n all_volts = all_volts + curr_volts\n if len(all_volts)>0:\n curr_errs = check_param_sensitivity(all_volts,adjusted_param,files_loc)\n curr_ECDs = [ls[-1] for ls in curr_errs]\n all_ECDS[adjusted_param_new_name]=curr_ECDs\n pkl_fn=files_loc + my_model.m_type + my_model.e_type + 'ECDs.pkl'\n with open(pkl_fn, 'wb') as output:\n pkl.dump(all_ECDS,output)\n pkl.dump(param_names,output)\n return all_ECDS\n\n\ndef analyze_ecds(ECDS,def_vals,files_loc,ml_results):\n ymx = 1000\n threshold = 100\n param_names = list(ECDS.keys())\n pnames_soma = []\n pnames_axon = []\n pnames_dend = []\n means_axon = []\n STDs_axon = []\n means_soma = []\n STDs_soma = []\n means_dend = []\n STDs_dend = []\n ml_STDs_soma = []\n ml_STDs_axon = []\n ml_STDs_dend = []\n ml_STDs_raw_soma = []\n ml_STDs_raw_axon = []\n ml_STDs_raw_dend = []\n nsamples = 0\n params_sensitivity_dict = {}\n param_inds = range(len(param_names))\n for i in param_inds:\n if def_vals[i] <= 0:\n continue\n curr_ecds = ECDS[param_names[i]]\n nsamples = len(curr_ecds)\n curr_mean = np.mean(curr_ecds)\n curr_std = np.std(curr_ecds)\n if ml_results is not None:\n for l in ml_results:\n if str(l[0])== param_names[i]:\n ml_std = np.sqrt((1-(l[2]/0.6)**2))\n ml_std_raw = l[2]\n break\n params_sensitivity_dict[param_names[i]] = [curr_mean,curr_std,ml_std,ml_std_raw]\n else:\n ml_std = []\n ml_std_raw=[]\n params_sensitivity_dict[param_names[i]] = [curr_mean,curr_std]\n if 'som' in param_names[i]:\n pnames_soma.append(param_names[i])\n means_soma.append(curr_mean)\n STDs_soma.append(curr_std)\n ml_STDs_soma.append(ml_std)\n ml_STDs_raw_soma.append(ml_std_raw)\n \n if 'api' in param_names[i] or 'den' in param_names[i]:\n pnames_dend.append(param_names[i])\n means_dend.append(curr_mean)\n STDs_dend.append(curr_std)\n ml_STDs_dend.append(ml_std)\n ml_STDs_raw_dend.append(ml_std_raw)\n if 'axn' in param_names[i]:\n pnames_axon.append(param_names[i])\n means_axon.append(curr_mean)\n STDs_axon.append(curr_std)\n ml_STDs_axon.append(ml_std)\n ml_STDs_raw_axon.append(ml_std_raw)\n pkl_fn=files_loc + sys.argv[1] + sys.argv[2] +'mean_std_sensitivity' + '.pkl'\n with open(pkl_fn, 'wb') as output:\n pkl.dump(params_sensitivity_dict,output)\n fig, ((ax_soma,ax_dend),( ax_axon,ax4))= plt.subplots(2,2,figsize=(15,15))\n fig.suptitle('Sensitivity analysis mean/rms ' + sys.argv[1] + sys.argv[2])\n \n ax_axon.title.set_text('Axonal del_ecds')\n means_axon = np.array(means_axon)\n STDs_axon = np.array(STDs_axon)\n yaxis_axon = np.divide(means_axon,STDs_axon)\n #yaxis_axon = np.clip(yaxis_axon,0,1)\n ax_axon.plot(range(len(pnames_axon)),yaxis_axon,'o')\n ax_axon.set_xticks(range(len(pnames_axon)))\n ax_axon.set_xticklabels(pnames_axon,rotation=45)\n ax_axon.grid()\n #ax_axon.set_ylim([0,1])\n ax_axon.set_ylabel('avr/std')\n \n \n ax_soma.title.set_text('somaal del_ecds')\n means_soma = np.array(means_soma)\n STDs_soma = np.array(STDs_soma)\n yaxis_soma = np.divide(means_soma,STDs_soma)\n #yaxis_soma = np.clip(yaxis_soma,0,1)\n ax_soma.plot(range(len(pnames_soma)),yaxis_soma,'o')\n ax_soma.set_xticks(range(len(pnames_soma)))\n ax_soma.set_xticklabels(pnames_soma)\n ax_soma.grid()\n #ax_soma.set_ylim([0,1])\n ax_soma.set_ylabel('avr/std')\n \n ax_dend.title.set_text('dendal del_ecds')\n means_dend = np.array(means_dend)\n STDs_dend = np.array(STDs_dend)\n yaxis_dend = np.divide(means_dend,STDs_dend)\n #yaxis_dend = np.clip(yaxis_dend,0,1)\n ax_dend.plot(range(len(pnames_dend)),yaxis_dend,'o')\n ax_dend.set_xticks(range(len(pnames_dend)))\n ax_dend.set_xticklabels(pnames_dend)\n ax_dend.grid()\n #ax_dend.set_ylim([0,1])\n ax_dend.set_ylabel('avr/std')\n \n \n fig_name = sys.argv[1] + sys.argv[2] + str(nsamples) + 'Analysis_sampling_size.pdf'\n fig.savefig(files_loc + fig_name)\n \n fig1, ax = plt.subplots(1,figsize=(15,15))\n fig1.suptitle('Sensitivity analysis RMS ' + sys.argv[1] + sys.argv[2])\n all_STDs = np.concatenate((STDs_axon,STDs_soma,STDs_dend),axis=None) \n all_STDs = np.clip(all_STDs,0,ymx)\n \n all_pnames = pnames_axon + pnames_soma + pnames_dend\n all_ml_STDs = np.concatenate((ml_STDs_axon,ml_STDs_soma,ml_STDs_dend),axis=None)\n all_ml_STDs = np.array(all_ml_STDs)*ymx\n \n \n ax.title.set_text('Parameters RMS clipped at ' + str(ymx))\n ax.plot(range(len(all_pnames)),all_STDs,'o')\n ax.plot(range(len(all_pnames)),all_ml_STDs,'x',color='red')\n ax.set_xticks(range(len(all_pnames)))\n ax.set_xticklabels(all_pnames,rotation=45)\n ax.axhline(threshold,color='red')\n ax.axvline(len(pnames_axon) + 0.5)\n ax.axvline(len(pnames_axon) +len(pnames_soma) + 0.5)\n ax.grid()\n \n fig_name1 = sys.argv[1] + sys.argv[2] + str(nsamples) + 'Analysis_sensitivity_threshold.pdf'\n fig1.savefig(files_loc + fig_name1)\n #plt.show()\n all_means = np.concatenate((means_axon,means_soma,means_dend),axis=None)\n raw_ml_stds = np.concatenate((ml_STDs_raw_axon,ml_STDs_raw_soma,ml_STDs_raw_dend),axis=None)\n excl_header = ['param_name', 'Mean ECD','STD ECD','Adj ML STD','Raw ML STD']\n# params_sensitivity_dict_csv = {}\n# params_sensitivity_dict_csv['param_name'] = all_pnames\n# params_sensitivity_dict_csv['Mean_ECD'] = all_means\n# params_sensitivity_dict_csv['STD_ECD'] = all_STDs\n# params_sensitivity_dict_csv['Adj_ML_STD'] = raw_ml_stds\n \n excl_fn=files_loc + 'sensitivity' + sys.argv[1] + sys.argv[2] + '.csv'\n with open(excl_fn, 'w',newline='') as out_file:\n writer = csv.writer(out_file)\n writer.writerow(excl_header)\n for (pname,mean,std,ml_std_raw,ml_std) in zip(all_pnames,all_means,all_STDs,all_ml_STDs,raw_ml_stds):\n writer.writerow([pname,mean,std,ml_std_raw,ml_std])\n \n return params_sensitivity_dict\n\ndef analyze_ecds_no_ML(ECDS,def_vals,files_loc):\n ymx = 1000\n threshold = 100\n param_names = list(ECDS.keys())\n pnames_soma = []\n pnames_axon = []\n pnames_dend = []\n means_axon = []\n STDs_axon = []\n means_soma = []\n STDs_soma = []\n means_dend = []\n STDs_dend = []\n nsamples = 0\n params_sensitivity_dict = {}\n param_inds = range(len(param_names))\n for i in param_inds:\n if def_vals[i] <= 0:\n continue\n curr_ecds = ECDS[param_names[i]]\n nsamples = len(curr_ecds)\n curr_mean = np.mean(curr_ecds)\n curr_std = np.std(curr_ecds)\n params_sensitivity_dict[param_names[i]] = [curr_mean,curr_std]\n if 'som' in param_names[i]:\n pnames_soma.append(param_names[i])\n means_soma.append(curr_mean)\n STDs_soma.append(curr_std)\n \n if 'api' in param_names[i] or 'den' in param_names[i]:\n pnames_dend.append(param_names[i])\n means_dend.append(curr_mean)\n STDs_dend.append(curr_std)\n if 'axn' in param_names[i]:\n pnames_axon.append(param_names[i])\n means_axon.append(curr_mean)\n STDs_axon.append(curr_std)\n pkl_fn=files_loc + sys.argv[1] + sys.argv[2] +'mean_std_sensitivity' + '.pkl'\n with open(pkl_fn, 'wb') as output:\n pkl.dump(params_sensitivity_dict,output)\n fig, ((ax_soma,ax_dend),( ax_axon,ax4))= plt.subplots(2,2,figsize=(15,15))\n fig.suptitle('Sensitivity analysis mean/rms ' + sys.argv[1] + sys.argv[2])\n \n ax_axon.title.set_text('Axonal del_ecds')\n means_axon = np.array(means_axon)\n STDs_axon = np.array(STDs_axon)\n yaxis_axon = np.divide(means_axon,STDs_axon)\n #yaxis_axon = np.clip(yaxis_axon,0,1)\n ax_axon.plot(range(len(pnames_axon)),yaxis_axon,'o')\n ax_axon.set_xticks(range(len(pnames_axon)))\n ax_axon.set_xticklabels(pnames_axon,rotation=45)\n ax_axon.grid()\n #ax_axon.set_ylim([0,1])\n ax_axon.set_ylabel('avr/std')\n \n \n ax_soma.title.set_text('somaal del_ecds')\n means_soma = np.array(means_soma)\n STDs_soma = np.array(STDs_soma)\n yaxis_soma = np.divide(means_soma,STDs_soma)\n #yaxis_soma = np.clip(yaxis_soma,0,1)\n ax_soma.plot(range(len(pnames_soma)),yaxis_soma,'o')\n ax_soma.set_xticks(range(len(pnames_soma)))\n ax_soma.set_xticklabels(pnames_soma)\n ax_soma.grid()\n #ax_soma.set_ylim([0,1])\n ax_soma.set_ylabel('avr/std')\n \n ax_dend.title.set_text('dendal del_ecds')\n means_dend = np.array(means_dend)\n STDs_dend = np.array(STDs_dend)\n yaxis_dend = np.divide(means_dend,STDs_dend)\n #yaxis_dend = np.clip(yaxis_dend,0,1)\n ax_dend.plot(range(len(pnames_dend)),yaxis_dend,'o')\n ax_dend.set_xticks(range(len(pnames_dend)))\n ax_dend.set_xticklabels(pnames_dend)\n ax_dend.grid()\n #ax_dend.set_ylim([0,1])\n ax_dend.set_ylabel('avr/std')\n \n \n fig_name = sys.argv[1] + sys.argv[2] + str(nsamples) + 'Analysis_sampling_size.pdf'\n fig.savefig(files_loc + fig_name)\n \n fig1, ax = plt.subplots(1,figsize=(15,15))\n fig1.suptitle('Sensitivity analysis RMS ' + sys.argv[1] + sys.argv[2])\n all_STDs = np.concatenate((STDs_axon,STDs_soma,STDs_dend),axis=None) \n all_STDs = np.clip(all_STDs,0,ymx)\n \n all_pnames = pnames_axon + pnames_soma + pnames_dend\n \n \n ax.title.set_text('Parameters RMS clipped at ' + str(ymx))\n ax.plot(range(len(all_pnames)),all_STDs,'o')\n ax.set_xticks(range(len(all_pnames)))\n ax.set_xticklabels(all_pnames,rotation=45)\n ax.axhline(threshold,color='red')\n ax.axvline(len(pnames_axon) + 0.5)\n ax.axvline(len(pnames_axon) +len(pnames_soma) + 0.5)\n ax.grid()\n \n fig_name1 = sys.argv[1] + sys.argv[2] + str(nsamples) + 'Analysis_sensitivity_threshold.pdf'\n fig1.savefig(files_loc + fig_name1)\n #plt.show()\n all_means = np.concatenate((means_axon,means_soma,means_dend),axis=None)\n excl_header = ['param_name', 'Mean ECD','STD ECD']\n# params_sensitivity_dict_csv = {}\n# params_sensitivity_dict_csv['param_name'] = all_pnames\n# params_sensitivity_dict_csv['Mean_ECD'] = all_means\n# params_sensitivity_dict_csv['STD_ECD'] = all_STDs\n# params_sensitivity_dict_csv['Adj_ML_STD'] = raw_ml_stds\n \n excl_fn=files_loc + 'sensitivity' + sys.argv[1] + sys.argv[2] + '.csv'\n with open(excl_fn, 'w',newline='') as out_file:\n writer = csv.writer(out_file)\n writer.writerow(excl_header)\n for (pname,mean,std,) in zip(all_pnames,all_means,all_STDs):\n writer.writerow([pname,mean,std])\n \n return params_sensitivity_dict\n\ndef main():\n short_name = None\n m_type = sys.argv[1]\n e_type = sys.argv[2]\n try:\n short_name = sys.argv[3]\n except:\n print('no short name')\n short_name = None \n files_loc = './output/' + m_type + '_' + e_type + '/'\n my_model = get_model('BBP',log,m_type=m_type,e_type=e_type,cell_i=0)\n def_vals = my_model.DEFAULT_PARAMS\n \n ECDS = test_sensitivity(files_loc,my_model)\n #if short_name is not None:\n if short_name is not None:\n ml_results = get_ml_results(short_name,list(ECDS.keys())) \n analyze_ecds(ECDS,def_vals,files_loc,ml_results)\n else:\n analyze_ecds_no_ML(ECDS,def_vals,files_loc)\n \n \nmain()\n","sub_path":"anaylze_sensitivity_cori.py","file_name":"anaylze_sensitivity_cori.py","file_ext":"py","file_size_in_byte":15866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"66864352","text":"#!/usr/bin/env python\n\nimport sys\nimport argparse\nfrom collections import defaultdict\n\nparser = argparse.ArgumentParser(prog='convert-to-biallelic.py', description='cat | python convert-to-biallelic.py ')\nparser.add_argument('vcf', metavar='VCF', help='original VCF containing REF/ALT of each Variant ID.')\nargs = parser.parse_args()\n\n# chromosome -> ID -> [start, REF, ALT] per chromosome\nchrom_to_variants = defaultdict(lambda: defaultdict(list))\n\n# read the biallelic VCF containing REF/ALT for all variant IDs and store them\nfor line in open(args.vcf, 'r'):\n\tif line.startswith('#'):\n\t\tcontinue\n\tfields = line.split()\n\tinfo_field = { i.split('=')[0] : i.split('=')[1] for i in fields[7].split(';') if \"=\" in i}\n\tassert 'ID' in info_field\n\tids = info_field['ID'].split(',')\n\tassert len(ids) == 1\n\tchrom_to_variants[fields[0]][ids[0]] = [fields[1], fields[3], fields[4]]\n\nfor line in sys.stdin:\n\tif line.startswith('#'):\n\t\t# header line\n\t\tif any([i in line for i in ['INFO= 7\n\t# parse the INFO field\n\tinfo_field = { i.split('=')[0] : i.split('=')[1] for i in fields[7].split(';') if \"=\" in i}\n\tassert 'ID' in info_field\n\t# determine ID string belonging to each allele (keep empty string for REF, as it does not have an ID)\n\tallele_to_ids = [''] + info_field['ID'].split(',')\n\tinfo_ids = info_field['ID'].split(',')\n\t# allow bi-allelic records with unknown IDs (that are not in annotation VCF)\n\tif (len(info_ids) == 1) and any([x not in chrom_to_variants[fields[0]] for x in info_ids[0].split(':')]):\n\t\t# unknown ID, leave record as is\n\t\tprint(line[:-1]) # not working because they have several some have several ids.\n\t\tcontinue\n\t# collect all variant IDs in this region\n\tids = set([])\n\tfor i in info_field['ID'].split(','):\n\t\tfor j in i.split(':'):\n\t\t\ttry:\n\t\t\t\tids.add((j,int(chrom_to_variants[fields[0]][j][0])))\n\t\t\texcept:\n\t\t\t\tcontinue # so far, it's not working\n\t# sort the ids by the starting coordinate (to ensure the VCF is sorted)\n\tids = list(ids)\n\tids.sort(key=lambda x : x[1])\n\t# create a single, biallelic VCF record for each ID\n\tfor (var_id, coord) in ids:\n\t\tvcf_line = fields[:9]\n\t\t# set start coordinate\n\t\tvcf_line[1] = str(coord)\n\t\t# also add ID to ID column of the VCF\n\t\tvcf_line[2] = var_id\n\t\t# set REF\n\t\tvcf_line[3] = chrom_to_variants[fields[0]][var_id][1]\n\t\t# set ALT\n\t\tvcf_line[4] = chrom_to_variants[fields[0]][var_id][2]\n\t\t# set INFO\n\t\tvcf_line[7] = 'ID=' + var_id\n\t\t# also add other INFO fields (except ID which was replaced)\n\t\tfor k,v in info_field.items():\n\t\t\tif k == 'ID':\n\t\t\t\tcontinue\n\t\t\tif k in ['MA', 'UK']:\n\t\t\t\tvalues = ';' + k + '=' + v\n\t\t\t\tvcf_line[7] = vcf_line[7] + values\n\t\t# keep only GT and GQ\n\t\tvcf_line[8] = 'GT'\n\t\tif 'GQ' in fields[8]:\n\t\t\tvcf_line[8] += ':GQ'\n\t\t# determine the genotype of each sample\n\t\tfor sample_field in fields[9:]:\n\t\t\t# determine position of GT and GQ from FORMAT\n\t\t\tassert 'GT' in fields[8]\n\t\t\tformat_field = fields[8].split(':')\n\t\t\tindex_of_gt = format_field.index('GT')\n\t\t\tgenotype = sample_field.split(':')\n\t\t\tbiallelic_genotype = []\n\t\t\tfor allele in genotype[index_of_gt].replace('|', '/').split('/'):\n\t\t\t\tif allele == '.':\n\t\t\t\t\t# missing allele\n\t\t\t\t\tbiallelic_genotype.append('.')\n\t\t\t\telse:\n\t\t\t\t\tif var_id in allele_to_ids[int(allele)].split(':'):\n\t\t\t\t\t\tbiallelic_genotype.append('1')\n\t\t\t\t\telse:\n\t\t\t\t\t\tbiallelic_genotype.append('0')\n\t\t\tif 'GQ' in fields[8]:\n\t\t\t\tindex_of_gq = format_field.index('GQ')\n\t\t\t\tvcf_line.append('/'.join(biallelic_genotype) + ':' + genotype[index_of_gq])\n\t\t\telse:\n\t\t\t\tvcf_line.append('/'.join(biallelic_genotype))\n\t\tprint('\\t'.join(vcf_line))\n","sub_path":"evaluation_pangenie/scripts/convert-to-biallelic.py","file_name":"convert-to-biallelic.py","file_ext":"py","file_size_in_byte":3777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"528615638","text":"# coding: UTF-8\nimport time\n\nimport train\nfrom sklearn.externals import joblib\nimport os.path\nimport sys\nimport horsetable as ht\nfrom datetime import datetime\n\nif __name__ == '__main__':\n args = sys.argv\n\n if len(args) < 4:\n print('引数を設定してください。')\n sys.exit()\n\n print('引数: ' + ', '.join(args[1:]))\n\n ymd = args[1]\n clf_kind = args[2]\n pkl_file_prefix = args[3]\n\n start = time.time()\n\n t_util = train.TrainUtil(clf_kind)\n\n clf_pkl = train.PKL_FILE_DIR + pkl_file_prefix + train.CLF_PKL_FILE_NAME\n sc_pkl = train.PKL_FILE_DIR + pkl_file_prefix + train.SC_PKL_FILE_NAME\n if os.path.isfile(clf_pkl) and os.path.isfile(sc_pkl):\n t_util.clf = joblib.load(clf_pkl)\n t_util.sc = joblib.load(sc_pkl)\n else:\n print('pklファイルが存在しません。')\n sys.exit()\n\n ht_util = ht.HorseTableUtil()\n test_race_keys = ht_util.get_race_keys_except_debut_after_ymd(ymd)\n if len(test_race_keys) == 0:\n print('入力日に対象レースはありません。')\n sys.exit()\n\n t_util.print_expected_nagashi_return(test_race_keys)\n\n elapsed_time = time.time() - start\n print('elapsed_time:{0}'.format(elapsed_time) + '[sec]')\n print(datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\"))\n","sub_path":"predict_after_ymd_expected_nagashi.py","file_name":"predict_after_ymd_expected_nagashi.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"346293029","text":"#!/usr/bin/env python\n\n# Description: \n#\n# Author: OU Yuyuan \n# Created: 2014-11-16 06:59:29 BJT\n# Last Change: 2014-12-09 15:53:02 BJT\n\nimport os\nimport sys\n\ndef runCmd(cmd):\n print(cmd)\n stat = os.system(cmd)\n if stat != 0:\n print(\"Error happen when run: \"+cmd)\n sys.exit()\n\nclass Soda:\n def __init__(self,datDir):\n if os.path.isdir(datDir):\n self.datDir = datDir\n else:\n print(datDir+\"donesn't exist! Stop.\")\n sys.exit()\n imgDir = \"/home/ou/archive/drawing/soda/\"\n if not os.path.isdir(imgDir):\n runCmd(\"mkdir \"+imgDir)\n self.imgDir = imgDir\n\n def run(self, scriptname):\n ext = scriptname.split('.')[-1]\n if ext == 'ncl':\n calculator = 'nclrun '\n elif ext == 'jnl':\n calculator = 'pyferret -nojnl -script '\n else:\n print('Unknown script extension: ' + ext)\n sys.exit()\n runCmd(calculator+scriptname+' '+self.datDir+' '+self.imgDir)\n\nsoda = Soda(\"/home/ou/archive/data/soda/post/\")\n#soda.run(\"ohc_global.ncl\")\n\nsoda_pcom = Soda(\"/home/ou/archive/data/soda/pcom_grid/post/\")\nsoda_pcom.run(\"ohc_global.ncl\")\n#soda_pcom.run(\"ohc_global_rm_anthropogenic.ncl\")\n","sub_path":"draw/soda/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"275476611","text":"# ensure our signals get loaded at django bootstrap time\nfrom . import signals\nfrom casexml.apps.case.models import CommCareCase\nfrom custom.succeed.reports import VISIT_SCHEDULE, LAST_INTERACTION_LIST, PM3\nimport fluff\nfrom custom.utils.utils import flat_field\nfrom fluff.filters import CustomFilter\n\n\nLAST_VISIT = {\n 'visit_name': 'last',\n 'days': -1\n}\n\n\ndef get_randomization_date(case):\n return case['randomization_date']\n\n\ndef get_next_visit(case):\n actions = list(case['actions'])\n next_visit = VISIT_SCHEDULE[0]\n for visit_key, visit in enumerate(VISIT_SCHEDULE):\n is_ignored = case.get_case_property(visit['ignored_field'])\n completed = case.get_case_property(visit['completion_field'])\n if completed != '' and is_ignored is not None and is_ignored.lower() == 'yes':\n try:\n next_visit = VISIT_SCHEDULE[visit_key + 1]\n except IndexError:\n next_visit = LAST_VISIT\n else:\n for key, action in enumerate(actions):\n if visit['xmlns'] == action['xform_xmlns']:\n try:\n next_visit = VISIT_SCHEDULE[visit_key + 1]\n del actions[key]\n break\n except IndexError:\n next_visit = LAST_VISIT\n return next_visit\n\n\ndef visit_name(case):\n next_visit = get_next_visit(case)\n return next_visit['visit_name']\n\n\ndef visit_days(case):\n next_visit = get_next_visit(case)\n return next_visit['days']\n\n\ndef is_active(case):\n active = 'True'\n for action in case['actions']:\n if PM3 == action['xform_xmlns']:\n active = 'False'\n break\n return active\n\n\ndef last_interaction(case):\n last_inter = None\n for action in case['actions']:\n if action['xform_xmlns'] in LAST_INTERACTION_LIST:\n last_inter = action\n return last_inter['date']\n\n\ndef get_property(case, property):\n try:\n category = case[property]\n except AttributeError:\n category = ''\n return category\n\n\nclass RandomizationDate(fluff.Calculator):\n\n @fluff.date_emitter\n def date(self, case):\n yield {\n 'date': get_randomization_date(case),\n 'value': 1\n }\n\n\nclass UCLAPatientFluff(fluff.IndicatorDocument):\n\n document_class = CommCareCase\n domains = ('succeed',)\n document_filter = CustomFilter(lambda c: c.type == 'participant')\n deleted_types = ('CommCareCase-Deleted', )\n\n group_by = ('domain', )\n\n name = flat_field(lambda case: case.full_name)\n mrn = flat_field(lambda case: case['mrn'])\n\n owner_id = flat_field(lambda case: case.owner_id)\n user_id = flat_field(lambda case: case.user_id)\n\n bp_category = flat_field(lambda case: get_property(case, 'BP_category'))\n care_site = flat_field(lambda case: get_property(case, 'care_site_display').lower())\n is_active = flat_field(lambda case: is_active(case))\n visit_name = flat_field(lambda case: visit_name(case))\n visit_days = flat_field(lambda case: visit_days(case))\n last_interaction = flat_field(lambda case: last_interaction(case))\n\n emitter = RandomizationDate()\n\n\nUCLAPatientFluffPillow = UCLAPatientFluff.pillow()\n","sub_path":"custom/succeed/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"553456653","text":"'''\nSelectOneFromGroup\n============\n\nUser can select one button from the given ToggleButtons\n'''\n\nfrom kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.uix.boxlayout import BoxLayout\n\nBuilder.load_string(\"\"\"\n \n:\n orientation: 'vertical'\n ToggleButton:\n text: 'Cat'\n group: 'pet'\n ToggleButton:\n text: 'Dog'\n group: 'pet'\n ToggleButton:\n text: 'Snake'\n group: 'pet'\n\n\"\"\")\n\nclass Test(BoxLayout):\n pass\n\n\nclass SelectOneFromGroup(App):\n def build(self):\n return Test()\n\nif __name__ == '__main__':\n SelectOneFromGroup().run()","sub_path":"ToggleButton/SelectOneFromGroup.py","file_name":"SelectOneFromGroup.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"407329442","text":"import distutils.core\nimport os\n\n\n# Recursively collect all files in a given directory.\ndef rcollect(path):\n if not os.path.exists(path):\n raise IOError(\"%s does not exist\" % (path))\n return sum(map(lambda x: map(lambda y: x[0] + \"/\" + y, x[2]),\n os.walk(path)),\n [])\n\n\ndef copy_with_dir(files, base):\n return [(base + \"/\" + os.path.dirname(f), [f]) for f in files]\n\n# Build up a list of extra files to install.\n#\n# Include the example configuration files.\ndata_files_list = [(\"share/tangelo/conf\", [\"conf/tangelo.conf.global\",\n \"conf/tangelo.conf.local\"]),\n (\"share/tangelo\", [\"images/tangelo.ico\"])]\n\n# Include the website base files.\ndata_files_list += copy_with_dir(rcollect(\"web\"), \"share/tangelo\")\n\n# Create the package.\ndistutils.core.setup(name=\"tangelo\",\n version=\"0.5-dev1\",\n author=\"Kitware, Inc.\",\n author_email=\"tangelo-users@public.kitware.com\",\n url=\"http://kitware.github.io/tangelo\",\n packages=[\"tangelo\",\n \"tangelo.autobahn\",\n \"tangelo.ws4py\",\n \"tangelo.ws4py.server\"],\n scripts=[\"bin/tangelo\",\n \"bin/tangelo-passwd\",\n \"bin/vtkweb-launcher.py\"],\n data_files=data_files_list,\n description=\"Tangelo Web Framework\",\n long_description=\"Tangelo is a Python-based web \" +\n \"server framework bundled with clientside tools \" +\n \"to help you supercharge your web applications \" +\n \"with the power of Python\",\n license=\"Apache License, Version 2.0\",\n platforms=[\"Linux\", \"OS X\", \"Windows\"],\n install_requires=[\"cherrypy >= 3.2\",\n \"Twisted >= 13.2\"])\n","sub_path":"tangelo/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"147705880","text":"import os\nimport pandas\nimport numpy as np\nimport cv2\nimport json\nfrom scipy.spatial.transform import Rotation as R\nimport panutils\n\n\ndef GetNormal(v1, v2, v3):\n a = v1 - v2\n b = v1 - v3\n\n return np.cross(a, b)\n\n\ndef GetAvg(v1, v2, v3):\n a = (v1 + v2 + v3) / 3\n\n return a\n\n\ndef load_json(file_name):\n with open(file_name) as cfile:\n calib = json.load(cfile)\n return calib\n\n\ndef scale_translate_cameras(calib, cam_pos_x, cam_pos_z):\n cameras = {(cam['panel'], cam['node']): cam for cam in calib['cameras']}\n\n for k, cam in cameras.items():\n cam['K'] = np.matrix(cam['K'])\n cam['distCoef'] = np.array(cam['distCoef'])\n cam['R'] = np.matrix(cam['R'])\n cam['t'] = np.array(cam['t'])\n\n camera_positions = []\n\n for cam in sel_cams:\n current_cam = cameras[0, cam]\n cc = (-current_cam['R'].transpose() * current_cam['t'])\n pos_x = cc[0][0, 0]\n pos_y = cc[1][0, 0]\n pos_z = cc[2][0, 0]\n camera_positions.append([[pos_x], [pos_y], [pos_z]])\n\n camera_positions = np.array(camera_positions).T.reshape(3, 4)\n\n cam0_x = camera_positions[0][0]\n cam0_z = camera_positions[2][0]\n\n camera_positions[0] = camera_positions[0] - cam0_x\n camera_positions[2] = camera_positions[2] - cam0_z\n\n camera_positions = camera_positions * scale_multiplier\n\n offset_x = cam_pos_x - camera_positions[0][0]\n offset_z = cam_pos_z - camera_positions[2][0]\n\n camera_positions[0] = camera_positions[0] + offset_x\n camera_positions[2] = camera_positions[2] + offset_z\n\n return camera_positions, (cam0_x, cam0_z)\n\n\ndef draw_cameras(camera_positions, img, sel_cams):\n\n cam_positions = {}\n\n for i in range(len(camera_positions[1])):\n cam_pos_x = int(camera_positions[0][i])\n cam_pos_z = int(camera_positions[2][i])\n\n camNo = sel_cams[i]\n\n cv2.circle(img, (cam_pos_x, cam_pos_z), 10, (0, 0, 255), -1)\n cv2.putText(img, 'Cam_' + str(camNo), (cam_pos_x - 45, cam_pos_z + 35), font, 0.75, (255, 255, 255), 2,\n cv2.LINE_AA)\n\n cam_positions[camNo] = [cam_pos_x, cam_pos_z]\n\n return cam_positions\n\n\ndef draw_ground_truth(img, init_cam_x, init_cam_z, cam_pos_x, cam_pos_z, calib):\n try:\n gt_pose_file = os.path.join(cwd, \"hdFace3d/faceRecon3D_hd\" + frameNo + \".json\")\n with open(gt_pose_file) as dfile:\n fframe = json.load(dfile)\n except IOError as e:\n print('Error reading {0}\\n'.format(gt_pose_file) + e.strerror)\n i = 0\n cameras = {(cam['panel'], cam['node']): cam for cam in calib['cameras']}\n cam = cameras[0, 0]\n for face in fframe['people']:\n face3d = np.array(face['face70']['landmarks']).reshape((-1, 3)).transpose()\n face2d = panutils.projectPoints(face3d, cam['K'], cam['R'], cam['t'], cam['distCoef'])\n face2d = face2d[0:2, :]\n _, rvec, tvec = cv2.solvePnP(face3d.T, face2d.T, cameraMatrix=cam['K'], distCoeffs=cam['distCoef'])\n color = cv2.cvtColor(np.uint8([[[(130 // len(fframe['people']) * i) + 10, 255, 255]]]), cv2.COLOR_HSV2BGR)\n color = (int(color[0, 0, 0]), int(color[0, 0, 1]), int(color[0, 0, 2]))\n\n [a, b, c] = face3d\n [start_x, _, start_z] = [np.average(a), np.average(b), np.average(c)]\n start_x = int((start_x - init_cam_x) * scale_multiplier)\n start_z = int((start_z - init_cam_z) * scale_multiplier)\n start_x += cam_pos_x\n start_z += cam_pos_z\n top_left = (int(start_x - 6), int(start_z - 6))\n bottom_right = (int(start_x + 6), int(start_z + 6))\n cv2.rectangle(img, top_left, bottom_right, color, -1)\n arrow_vec = np.array([0, 0, 1])*30\n r = R.from_rotvec(rvec.T)\n arrow_vec = r.apply(arrow_vec)\n\n [end_x, _, end_z] = [start_x, 0, start_z] + arrow_vec[0]\n cv2.arrowedLine(img, (int(start_x), int(start_z)), (int(end_x), int(end_z)), color)\n i += 1\n points = []\n '''\n for point in face_points[0]:\n point_x = face3d[0, point]\n point_y = face3d[1, point]\n point_z = face3d[2, point]\n\n cam_rel_x = (face3d[0, point] - init_cam_x) * scale_multiplier\n cam_rel_z = (face3d[2, point] - init_cam_z) * scale_multiplier\n\n cam_rel_x += cam_pos_x\n cam_rel_z += cam_pos_z\n\n cv2.circle(img, (int(cam_rel_x), int(cam_rel_z)), 2, color, -1)\n points.append([point_x, point_y, point_z])\n\n points = np.array(points)\n c, normal = fitPlaneLTSQ(points)\n [a, b, c] = face3d[:, face_points[0]]\n [start_x, _, start_z] = [np.average(a), np.average(b), np.average(c)]\n n = normal / np.linalg.norm(normal) * 30\n start_x = int((start_x - init_cam_x) * scale_multiplier)\n start_z = int((start_z - init_cam_z) * scale_multiplier)\n start_x += cam_pos_x\n start_z += cam_pos_z\n x = int((n[0] - init_cam_x) * scale_multiplier)\n z = int((n[2] - init_cam_z) * scale_multiplier)\n x += cam_pos_x\n z += cam_pos_z\n cv2.arrowedLine(img, (start_x, start_z), (x, z), (127, 255, 0))\n \n for tri in face_tri:\n [a, b, c] = face3d[:, tri].T\n n = GetNormal(a, b, c)\n n = n / np.linalg.norm(n) * 30\n\n [start_x, _, start_z] = GetAvg(a, b, c)\n start_x = int((start_x - init_cam_x) * scale_multiplier)\n start_z = int((start_z - init_cam_z) * scale_multiplier)\n start_x += cam_pos_x\n start_z += cam_pos_z\n\n x = int((n[0] - init_cam_x) * scale_multiplier)\n z = int((n[2] - init_cam_z) * scale_multiplier)\n x += cam_pos_x\n z += cam_pos_z\n\n cv2.line(img, (start_x, start_z), (x, z), (127, 127, 127))\n '''\n return None\n\n\ndef fitPlaneLTSQ(XYZ):\n (rows, cols) = XYZ.shape\n G = np.ones((rows, 3))\n G[:, 0] = XYZ[:, 0] # X\n G[:, 1] = XYZ[:, 1] # Y\n Z = XYZ[:, 2]\n (a, b, c), resid, rank, s = np.linalg.lstsq(G, Z)\n normal = (a, b, -1)\n nn = np.linalg.norm(normal)\n normal = normal / nn\n return (c, normal)\n\n\ncwd = os.getcwd()\n\nsel_cams = [0, 8, 15, 23]\nframes = [\"00001147\", \"00001613\", \"00002319\", \"00003476\", \"00003961\", \"00004905\", \"00005777\", \"00006078\", \"00006328\",\n \"00006577\"]\nframeNo = frames[5]\n\nface_edges = np.array([[0,1],[1,2],[2,3],[3,4],[4,5],[5,6],[6,7],[7,8],[8,9],[9,10],[11,12],[12,13],[14,15],[15,16], #outline (ignored)\n [17,18],[18,19],[19,20],[20,21], #right eyebrow\n [22,23],[23,24],[24,25],[25,26], #left eyebrow\n [27,28],[28,29],[29,30], #nose upper part\n [31,32],[32,33],[33,34],[34,35], #nose lower part\n [36,37],[37,38],[38,39],[39,40],[40,41],[41,36], #right eye\n [42,43],[43,44],[44,45],[45,46],[46,47],[47,42], #left eye\n [48,49],[49,50],[50,51],[51,52],[52,53],[53,54],[54,55],[55,56],[56,57],[57,58],[58,59],[59,48], #Lip outline\n [60,61],[61,62],[62,63],[63,64],[64,65],[65,66],[66,67],[67,60] #Lip inner line \n ])\n\nface_edges = np.array([[36, 37], [37, 38], [38, 39], [39, 40], [40, 41], [41, 36],\n [42, 43], [43, 44], [44, 45], [45, 46], [46, 47], [47, 42]])\nface_tri = np.array(\n [[36, 37, 38], [37, 38, 39], [38, 39, 40], [39, 40, 41], [42, 43, 44], [43, 44, 45], [44, 45, 46], [45, 46, 47]])\nface_points = np.array([range(36,42), range(42,48)])\n\ncam_pos_x = 200\ncam_pos_z = 400\nscale_multiplier = 1.3\n\ncalib = load_json(\"calibration_160906_pizza1.json\")\n\nimg = np.zeros((850, 960, 3), np.uint8)\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\n\ncamera_positions, (init_cam_x, init_cam_z) = scale_translate_cameras(calib, cam_pos_x, cam_pos_z)\n\n\ncamNo = '{0:02d}'.format(sel_cams[0])\n\ncam_positions = draw_cameras(camera_positions, img, sel_cams)\n\nsaveFile = os.path.join(cwd, \"TopDown/Frame_\" + frameNo + \".jpg\")\n\ncv2.arrowedLine(img, (20, 20), (50, 20), (255, 0, 0), 2)\ncv2.putText(img, \"X\", (55, 25), font, 0.5, (255, 255, 255), 2)\n\ncv2.arrowedLine(img, (20, 20), (20, 50), (0, 255, 0), 2)\ncv2.putText(img, \"Z\", (15, 65), font, 0.5, (255, 255, 255), 2)\n\n#draw_ground_truth(img, init_cam_x, init_cam_z, cam_pos_x, cam_pos_z, calib)\n\n#cv2.line(img, (940, 20), (940, 470), (255, 255, 255), 2)\n#cv2.line(img, (930, 470), (950, 470), (255, 255, 255), 2)\n#cv2.line(img, (930, 20), (950, 20), (255, 255, 255), 2)\n#cv2.putText(img, str(max_z) + \"mm\", (850, 225), font, 0.5, (255, 255, 255), 2)\n\nfor camNo in sel_cams:\n [cam_x, cam_z] = cam_positions[camNo]\n camNo = '{0:02d}'.format(camNo)\n\n OpenFace_File = \"hd_00_\"+camNo+\"/processed/\" + frameNo + \".csv\"\n df = pandas.read_csv(OpenFace_File)\n\n poses = df.values.tolist()\n\n people_pose = np.zeros((len(poses), 2))\n people_rot = np.zeros((len(poses), 3))\n people_conf = np.zeros(len(poses))\n\n max_z = 0\n\n # ---- Draw predicted people ----\n\n for i, person in enumerate(poses):\n people_pose[i, 0] = person[2]\n people_pose[i, 1] = person[4]\n people_conf[i] = person[1]\n if people_pose[i, 1] > max_z:\n max_z = people_pose[i, 1]\n people_rot[i, 0] = person[5]\n people_rot[i, 1] = person[6]\n people_rot[i, 2] = person[7]\n\n people_pose = np.divide(people_pose, max_z)\n people_pose = np.multiply(people_pose, 450)\n\n for i in range(len(poses)):\n\n x = int(people_pose[i, 0])\n z = int(people_pose[i, 1])\n rot_x = people_rot[i, 0]\n rot_y = people_rot[i, 1]\n rot_z = people_rot[i, 2]\n\n x = (+x + cam_pos_z)\n z = (cam_pos_x + z)\n\n color = cv2.cvtColor(np.uint8([[[(130 // len(poses) * i) + 10, 255, 255]]]), cv2.COLOR_HSV2BGR)\n color = (int(color[0, 0, 0]), int(color[0, 0, 1]), int(color[0, 0, 2]))\n if people_conf[i] < 0.5:\n color = (127, 127, 127)\n cv2.circle(img, (z, x), 5, color, -1)\n cv2.putText(img, \"P\" + str(i), (z - 10, x + 20), font, 0.5, color, 2)\n\n r = R.from_rotvec((0, rot_y, 0))\n P1 = np.array((+1, 0, 0))\n\n P2 = r.apply(P1)\n\n print(P2)\n\n P2 = np.multiply(P2, 50)\n P2 = P2.astype(int)\n start_point = (z, x)\n end_point = (z - P2[0], x + P2[2])\n cv2.arrowedLine(img, start_point, end_point, color, 2)\n\n cv2.imwrite(saveFile, img)\n cv2.imshow(\"Frame\", img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n","sub_path":"python/Top_Down_main.py","file_name":"Top_Down_main.py","file_ext":"py","file_size_in_byte":10448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"307151172","text":"from __future__ import print_function\n\nimport numpy as np\nimport pandas\nfrom scipy.io import loadmat\n\nstim_names = np.array([\n 'Phi',\n 'Phi',\n 'RPhi',\n 'RPhi',\n 'Glider Random',\n 'Glider Random',\n 'Glider 1P',\n 'Glider 1P',\n 'Glider 1N',\n 'Glider 1N',\n 'Glider 2P',\n 'Glider 2P',\n 'Glider 2N',\n 'Glider 2N',\n 'Glider 3P',\n 'Glider 3P',\n 'Glider 3N',\n 'Glider 3N',\n])\n\nstim_dirs = np.tile(['PD', 'ND'], 9)\n\nif __name__ == \"__main__\":\n\n print(\"Processing...\")\n\n data = loadmat('out.mat')['stim_avgs'].flatten()\n lengths = [x.shape[0] for x in data]\n min_length = np.min(lengths)\n\n buffer = []\n\n for idx in range(len(data)):\n buffer.append(data[idx][:min_length, :])\n\n data_array = np.array(buffer).swapaxes(1, 2)\n print(data_array.shape)\n np.save('raw.npy', data_array)\n\n print(\"Generating data frame...\")\n\n # Info #1: Stimulus type\n _types = np.tile(stim_names, (data_array.shape[2], data_array.shape[1], 1)).swapaxes(0, 2)\n\n # Info #1.5: Stimulus direction\n _dirs = np.tile(stim_dirs, (data_array.shape[2], data_array.shape[1], 1)).swapaxes(0, 2)\n\n # Info #2: Fly ID\n base = np.arange(1, data_array.shape[1]+1)\n _flyid = np.tile(base, (data_array.shape[2], data_array.shape[0], 1)).swapaxes(0, 2).swapaxes(0, 1)\n\n # Info #3: Time\n base = np.linspace(0., 3., num=data_array.shape[2])\n _time = np.tile(base, (data_array.shape[1], data_array.shape[0], 1)).swapaxes(0, 1)\n\n coll = {\n 'Stimulus': _types.flatten(),\n 'Direction': _dirs.flatten(),\n 'Fly': _flyid.flatten(),\n 'Time': _time.flatten(),\n 'Voltage': data_array.flatten(),\n }\n df = pandas.DataFrame(coll).set_index(['Stimulus', 'Direction', 'Fly', 'Time'])\n\n df.to_pickle('./processed.data')","sub_path":"figures/assets/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"450219665","text":"#!/usr/bin/env python\n# -*- coding utf8 -*-\n\nimport os\nimport getpass\nimport subprocess\nimport tkinter as tk\nfrom tkinter import *\nfrom PIL import ImageTk\n\nroot = tk.Tk()\n\nuserprofile = str(os.environ['USERPROFILE'])\n\ndef log_use(program_used):# not currently in use\n \"\"\"Check which programs are being used the most - or even at all\"\"\"\n try:\n os.popen(\"python \" + userprofile + \"/Documents/python/gui_interface/log_uses.py \" + program_used)\n except:\n pass\n\n\n## These are the defs for menubar tabs ##\ndef make_shortcut(Var):\n os.system(\"start \" + userprofile +\"/Documents/python/InstallnRemove/make_desktop_shortcut.vbs\")\n print(Var)\n\ndef install_depends(Var):\n os.system(\"start \" + userprofile +\"/Documents/python/InstallnRemove/install_depend.bat\" )\n print(Var)\n\ndef playsnake(Var):\n os.popen(\"pythonw \" + userprofile +\"/Documents/python/pygame/snake.py\" )\n print(Var)\n\ndef microsoftRewards(Var):\n os.system(\"python \" + userprofile +\"/Documents/python/extra/microsoft_rewards.py\" )\n print(Var)\n\nclass Application(tk.Frame):\n \"\"\"Main Loop for the button creation and commands\"\"\"\n def __init__(self, master=None):\n root.title('Welcome back ' + str(getpass.getuser()).capitalize() + '!')\n root.iconbitmap(r''+ userprofile +'/Documents/python/icons/serpent_icon.ico')\n\n # File Menu Bar Tab\n menubar = Menu(root)\n filemenu = Menu(menubar, tearoff=0)\n filemenu.add_command(label='Desktop Shortcut', command=lambda: make_shortcut(\"Making Shortcut\"))\n filemenu.add_command(label='Install Dependencies', command=lambda: install_depends(\"Installing Modules\"))\n #filemenu.add_command(label='Update', command=lambda: update_user_code(\"Updating Code\"))\n filemenu.add_command(label=\"Exit\", command=root.destroy)\n menubar.add_cascade(label=\"File\", menu=filemenu)\n\n # Powershell Menu Bar Tab\n #filemenu2 = Menu(menubar, tearoff=0)\n #menubar.add_cascade(label=\"Powershell\", menu=filemenu2)\n\n # Extras Menu Bar Tab\n filemenu3 = Menu(menubar, tearoff=0)\n filemenu3.add_command(label='Snake', command=lambda: playsnake(\"Running Game!\"))\n filemenu3.add_command(label='Microsoft Rewards', command=lambda: microsoftRewards(\"Launching Rewards!\"))\n menubar.add_cascade(label=\"Extras\", menu=filemenu3)\n \n root.config(menu=menubar)\n super().__init__(master)\n self.pack()\n self.weather_grab_button()\n self.pinger_button()\n self.tstation_button()\n self.port_scanner_button()\n\n\n## These create the buttons ##\n def weather_grab_button(self):\n \"\"\"Weather Grabber launch button\"\"\"\n image = ImageTk.PhotoImage(file=userprofile + \"/Documents/python/icons/weather.png\")\n b = tk.Button(self, text=\"Current Weather\", image=image, compound=\"top\")\n b.config(image=image)\n b.image = image\n b[\"command\"] = self.weather_grab_launch\n b.pack(padx=3, pady=10, side='left')\n\n def pinger_button(self):\n \"\"\"Multiple pinger\"\"\"\n b = tk.Button(root)\n image = ImageTk.PhotoImage(file=userprofile + \"/Documents/python/icons/pinger.png\")\n b = tk.Button(self, text=\"Multiple Pinger\", image=image, compound=\"top\")\n b.config(image=image)\n b.image = image\n b[\"command\"] = self.multiplepinger_launch\n b.pack(padx=3, pady=10, side='left')\n\n def tstation_button(self):\n \"\"\"Translation Station\"\"\"\n b = tk.Button(root)\n image = ImageTk.PhotoImage(file=userprofile + \"/Documents/python/icons/tstation_icon.png \")\n b = tk.Button(self, text=\"Translation Station\", image=image, compound=\"top\")\n b.config(image=image)\n b.image = image\n b[\"command\"] = self.tstation_launch\n b.pack(padx=3, pady=10, side='left')\n\n def port_scanner_button(self):\n \"\"\"Port Scanner\"\"\"\n b = tk.Button(root)\n image = ImageTk.PhotoImage(file=userprofile + \"/Documents/python/icons/ninja_scan.png\")\n b = tk.Button(self, text=\"Port Scanner\", image=image, compound=\"top\")\n b.config(image=image)\n b.image = image\n b[\"command\"] = self.port_scanner_launch\n b.pack(padx=3, pady=10, side='left')\n\n## These launch the button programs ##\n def weather_grab_launch(self):\n \"\"\"Weather Grabber Launcher\"\"\"\n os.popen(\"python \" + userprofile + \"/Documents/python/weather/guiWeather.py\")\n\n def multiplepinger_launch(self):\n \"\"\"Multiple Pinger Launcher\"\"\"\n os.system(\"start \" + userprofile + \"/Documents/python/gui_interface/bat_launchers/pinglaunch.bat\")\n \n def tstation_launch(self):\n \"\"\"Translation Station Launcher\"\"\"\n os.system(\"start \" + userprofile + \"/Documents/python/gui_interface/bat_launchers/launch_tstation.bat\")\n\n def port_scanner_launch(self):\n \"\"\"Port Scanner Launcher\"\"\"\n os.popen(\"start \" + userprofile + \"/Documents/python/gui_interface/bat_launchers/portScanner.bat\")\n##########################################################\n\napp = Application(master=root)\napp.mainloop()","sub_path":"gui_interface/program_launcher.py","file_name":"program_launcher.py","file_ext":"py","file_size_in_byte":5118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"426861141","text":"\"\"\"\nThis file implements Numpy framework of the\nsimulator. It's main use is in conjunction with the :py:mod:`optimizer`\nmodule, and example programs are listed in :py:mod:`simulator` module.\n\"\"\"\n\nimport numpy as np\nimport copy\nimport qtree.operators as ops\nimport qtree.optimizer as opt\nimport qtree.utils as utils\n\n\ndef get_np_buckets(buckets, data_dict):\n \"\"\"\n Takes buckets and returns their Numpy counterparts.\n\n Parameters\n ----------\n buckets : list of list\n buckets as returned by :py:meth:`circ2buckets`\n and :py:meth:`reorder_buckets`.\n data_dict : dict\n dictionary containing values for the placeholder Tensors\n Returns\n -------\n np_buckets : list of lists\n Buckets having Numpy tensors in place of gate labels\n \"\"\"\n # import pdb\n # pdb.set_trace()\n\n # Create numpy buckets\n np_buckets = []\n for bucket in buckets:\n np_bucket = []\n for tensor in bucket:\n # sort tensor dimensions\n transpose_order = np.argsort(list(map(int, tensor.indices)))\n data = data_dict[tensor.data_key]\n\n new_tensor = tensor.copy(\n indices=(tensor.indices[pp] for pp in transpose_order),\n data=np.transpose(data.copy(), transpose_order))\n\n np_bucket.append(new_tensor)\n np_buckets.append(np_bucket)\n\n return np_buckets\n\n\ndef slice_np_buckets(np_buckets, slice_dict):\n \"\"\"\n Takes slices of the tensors in Numpy buckets\n over the variables in idx_parallel.\n\n Parameters\n ----------\n np_buckets : list of lists\n Buckets containing Numpy tensors\n slice_dict : dict\n Current subtensor along the sliced variables\n in the form {variable: slice}\n Returns\n -------\n sliced_buckets : list of lists\n buckets with sliced tensors\n \"\"\"\n # import pdb\n # pdb.set_trace()\n\n # Create tf buckets from unordered buckets\n sliced_buckets = []\n for bucket in np_buckets:\n sliced_bucket = []\n for tensor in bucket:\n slice_bounds = []\n for idx in tensor.indices:\n try:\n slice_bounds.append(slice_dict[idx])\n except KeyError:\n slice_bounds.append(slice(None))\n sliced_bucket.append(\n tensor.copy(data=tensor.data[tuple(slice_bounds)])\n )\n sliced_buckets.append(sliced_bucket)\n\n return sliced_buckets\n\n\ndef get_sliced_np_buckets(buckets, data_dict, slice_dict):\n \"\"\"\n Takes placeholder buckets and populates them with\n actual sliced values. This function is a sum of\n :func:`get_np_buckets` and :func:`slice_np_buckets`\n\n Parameters\n ----------\n buckets : list of list\n buckets as returned by :py:meth:`circ2buckets`\n and :py:meth:`reorder_buckets`.\n data_dict : dict\n dictionary containing values for the placeholder Tensors\n slice_dict : dict\n Current subtensor along the sliced variables\n in the form {variable: slice}\n Returns\n -------\n sliced_buckets : list of lists\n buckets with sliced Numpy tensors\n \"\"\"\n # import pdb\n # pdb.set_trace()\n\n # Create np buckets from buckets\n sliced_buckets = []\n for bucket in buckets:\n sliced_bucket = []\n for tensor in bucket:\n # get data\n # sort tensor dimensions\n transpose_order = np.argsort(list(map(int, tensor.indices)))\n data = np.transpose(data_dict[tensor.data_key],\n transpose_order)\n # transpose indices\n indices_sorted = [tensor.indices[pp]\n for pp in transpose_order]\n\n # slice data\n slice_bounds = []\n for idx in indices_sorted:\n try:\n slice_bounds.append(slice_dict[idx])\n except KeyError:\n slice_bounds.append(slice(None))\n\n data = data[tuple(slice_bounds)]\n\n # update indices\n indices_sliced = [idx.copy(size=size) for idx, size in\n zip(indices_sorted, data.shape)]\n\n sliced_bucket.append(\n tensor.copy(indices=indices_sliced, data=data))\n sliced_buckets.append(sliced_bucket)\n\n return sliced_buckets\n\n\ndef process_bucket_np(bucket, no_sum=False):\n \"\"\"\n Process bucket in the bucket elimination algorithm.\n We multiply all tensors in the bucket and sum over the\n variable which the bucket corresponds to. This way the\n variable of the bucket is removed from the expression.\n\n Parameters\n ----------\n bucket : list\n List containing tuples of tensors (gates) with their indices.\n\n no_sum : bool\n If no summation should be done over the buckets's variable\n\n Returns\n -------\n tensor : optimizer.Tensor\n wrapper tensor object holding the result\n \"\"\"\n result_indices = bucket[0].indices\n result_data = bucket[0].data\n\n for tensor in bucket[1:]:\n expr = utils.get_einsum_expr(\n list(map(int, result_indices)), list(map(int, tensor.indices))\n )\n\n result_data = np.einsum(expr, result_data, tensor.data)\n\n # Merge and sort indices and shapes\n result_indices = tuple(sorted(\n set(result_indices + tensor.indices),\n key=int)\n )\n\n if len(result_indices) > 0:\n if not no_sum: # trim first index\n first_index, *result_indices = result_indices\n else:\n first_index, *_ = result_indices\n tag = first_index.identity\n else:\n tag = 'f'\n result_indices = []\n\n # reduce\n if no_sum:\n result = opt.Tensor(f'E{tag}', result_indices,\n data=result_data)\n else:\n result = opt.Tensor(f'E{tag}', result_indices,\n data=np.sum(result_data, axis=0))\n return result\n","sub_path":"qtree/np_framework.py","file_name":"np_framework.py","file_ext":"py","file_size_in_byte":6074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"177735593","text":"import re\n\ndef create_main_csv():\n output_file = open(file = './line_numbers/output_file.csv', mode='w', encoding='utf8') \n output_file.write('ELEMENT,DESCRIPTION,x_1,y_1,z_1,d_1,j_1,x_2,y_2,z_2,d_2,j_2,c_x,c_y,c_z,b_x,b_y,b_z,b_d,angle\\n')\n\n with open(file = './line_numbers/zvcbzbc.pcf', mode='r', encoding='utf8') as f:\n\n element = ''\n description = ''\n x_1, y_1, z_1, d_1, j_1 = (\"\", \"\", \"\", \"\", \"\")\n x_2, y_2, z_2, d_2, j_2 = (\"\", \"\", \"\", \"\", \"\")\n b_x, b_y, b_z, b_d, b_d = (\"\", \"\", \"\", \"\", \"\")\n c_x, c_y, c_z = (\"\", \"\", \"\") \n angle = \"\" \n\n \n\n\n element_expression = re.compile('^(ELBOW|PIPE|TEE|FLANGE|GASKET|OLET)\\n')\n no_element_expression = re.compile('^(WELD|BOLT)\\n')\n node_expression = re.compile('^\\s+(CENTRE|END|BRANCH1)-POINT\\s+(\\d+).\\d+\\s+(\\d+).\\d+\\s+(\\d+).\\d+\\s{0,}(\\d{0,}.\\d{0,})\\s{0,}(\\w{0,})\\n') \n description_expression = re.compile('^\\s+ITEM-DESCRIPTION\\s+([\\w\\s,.#\"\\/]+)\\n') \n angle_expression = re.compile('^\\s+ANGLE\\s+(\\d+)\\n')\n\n first_coincidence = False\n founded = False\n end_point_counter = 1\n\n\n for line in f:\n #Si se encuentra con la palabra MATERIALS se corta el programa\n if line == 'MATERIALS\\n': \n output_file.write(f'{element},\\\"{description}\\\",{x_1},{y_1},{z_1},{d_1},{j_1},{x_2},{y_2},{z_2},{d_2},{j_2},{c_x},{c_y},{c_z},{b_x},{b_y},{b_z},{b_d},{b_j},{angle}\\n')\n output_file.close() \n f.close()\n break \n\n if first_coincidence and element_expression.match(line):\n output_file.write(f'{element},\\\"{description}\\\",{x_1},{y_1},{z_1},{d_1},{j_1},{x_2},{y_2},{z_2},{d_2},{j_2},{c_x},{c_y},{c_z},{b_x},{b_y},{b_z},{b_d},{b_j},{angle}\\n') \n founded = False\n\n if element_expression.match(line) and first_coincidence == False:\n first_coincidence = True \n\n\n #Si first_coincidence es false aún no se hace nada\n if not first_coincidence:\n continue \n \n if element_expression.match(line): \n element = element_expression.search(line).group(1)\n founded = True\n\n x_1, y_1, z_1, d_1, j_1 = (\"\", \"\", \"\", \"\", \"\")\n x_2, y_2, z_2, d_2, j_2 = (\"\", \"\", \"\", \"\", \"\")\n b_x, b_y, b_z, b_d, b_j = (\"\", \"\", \"\", \"\", \"\")\n c_x, c_y, c_z = (\"\", \"\", \"\")\n angle = \"\"\n end_point_counter = 1 \n elif no_element_expression.match(line): \n founded = False\n elif description_expression.match(line) and founded: \n description = description_expression.search(line).group(1) \n description = description.replace('\"', ' in')\n elif node_expression.match(line) and founded: \n node_type = node_expression.search(line).group(1)\n x = node_expression.search(line).group(2)\n y = node_expression.search(line).group(3)\n z = node_expression.search(line).group(4)\n d = node_expression.search(line).group(5)\n j = node_expression.search(line).group(6) \n\n if node_type == 'END':\n if end_point_counter == 1:\n x_1, y_1, z_1, d_1, j_1 = (x, y, z, d, j) \n end_point_counter +=1\n elif end_point_counter ==2:\n x_2, y_2, z_2, d_2, j_2 = (x, y, z, d, j)\n elif node_type == 'CENTRE':\n c_x, c_y, c_z = (x, y, z)\n elif node_type == 'BRANCH1':\n b_x, b_y, b_z, b_d, b_j = (x, y, z, d, j)\n elif angle_expression.match(line) and founded: \n angle = int(angle_expression.search(line).group(1))/100 ","sub_path":"create_main_csv/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"152403531","text":"# UserAgent for web browser\nuAgent = b\"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36\"\n\n\n# Web Page reloading interval in miliseconds\nreloadInterval = 5 * 1000\nliveReloadInterval = 1 * 1000\n\n# redis setup\nrpi = 1000 # redis pop interval\nrserver = 'localhost' # redis server\nrchannel = 'redis_channel'\n\n# setup dynamic logging\nlog_file_loc = '/var/log/sbp/'\n","sub_path":"cfg.py","file_name":"cfg.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"610591665","text":"import requests, json\n\n\n\ndef getCustomerDetails():\n\n #Header\n\n serviceName = 'getCustomerDetails'\n\n userID = 'bobsmith1'\n\n PIN = '123456'\n\n OTP = '123456'\n\n \n\n headerObj = {\n\n 'Header': {\n\n 'serviceName': serviceName,\n\n 'userID': userID,\n\n 'PIN': PIN,\n\n 'OTP': OTP\n\n }\n\n }\n\n final_url=\"{0}?Header={1}\".format(\"http://tbankonline.com/SMUtBank_API/Gateway\",json.dumps(headerObj))\n\n response = requests.post(final_url)\n\n serviceRespHeader = response.json()['Content']['ServiceResponse']['ServiceRespHeader']\n\n errorCode = serviceRespHeader['GlobalErrorID']\n\n \n\n if errorCode == '010000':\n\n CDMCustomer = response.json()['Content']['ServiceResponse']['CDMCustomer']\n return CDMCustomer['cellphone']['phoneNumber'], CDMCustomer['profile']['email'], CDMCustomer['profile']['bankID']\n elif errorCode == '010041':\n\n print(\"OTP has expired.\\nYou will receiving a SMS\")\n\n else:\n\n print(serviceRespHeader['ErrorText'])\n\n","sub_path":"getcustomerdetails.py","file_name":"getcustomerdetails.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"517070307","text":"import os as OperatingSystem\nimport time as TimeFunctions\n\n\ndef mountnetworkdrive(mountpoint, networkpath, username, password):\n\tif concatenatepaths(\" \", \" \") == \" / \":\n\t\toutcome = OperatingSystem.system('sudo mount -v -t cifs -o username='+username+',password='+password+',uid=pi,gid=pi,noexec,vers=2.0 '+networkpath+' '+mountpoint)\n\telse:\n\t\tif username != \"\":\n\t\t\toutcome = OperatingSystem.system('NET USE '+mountpoint+' '+networkpath+' '+password+' '+'/USER:'+username+' /PERSISTENT:NO')\n\t\telse:\n\t\t\toutcome = OperatingSystem.system('NET USE '+mountpoint+' '+networkpath)\n\treturn outcome\n\n\n\n\ndef unmountnetworkdrive(mountpoint):\n\tif concatenatepaths(\" \", \" \") == \" / \":\n\t\toutcome = OperatingSystem.system('sudo umount -v '+mountpoint)\n\telse:\n\t\toutcome = OperatingSystem.system('NET USE '+mountpoint+' /DELETE')\n\treturn outcome\n\n# ---------------------------------------------\n# Reads a file from disk and returns a list,\n# where each list item is a line in the file\n# ---------------------------------------------\n\ndef readfromdisk(filename):\n\tnewfilelist = []\n\n\ttry:\n\t\t# Open the file for the duration of this process\n\t\twith open(filename, 'r') as fileobject:\n\n\t\t\t# Loop over all lines in the file\n\t\t\tfor fileline in fileobject.readlines():\n\n\t\t\t\t# Only process if the line isn't blank\n\t\t\t\tif fileline != \"\":\n\t\t\t\t\tnewfilelist.append(fileline.rstrip('\\r\\n'))\n\n\texcept:\n\t\t# Print an error if the file cannot be read\n\t\tprint(\"Cannot read file - \" + filename)\n\n\n\treturn newfilelist\n\n\n\n# ---------------------------------------------\n# Returns a list of strings, extracted from a\n# single string of tab separated substrings\n# ---------------------------------------------\n\ndef extracttabulateddata(fileline):\n\tsplitdata = fileline.split(\"\\t\")\n\treturn splitdata\n\n\n\n# ---------------------------------------------\n# Returns a list of strings, extracted from a\n# single string of comma-space separated substrings\n# ---------------------------------------------\n\ndef extractcommadata(fileline):\n\tsplitdata = fileline.split(\", \")\n\treturn splitdata\n\n\n\n# ---------------------------------------------\n# Returns a list of two strings, extracted from a\n# single string of space-equals-space separated substrings\n# ---------------------------------------------\n\ndef extractdatapair(dataitem):\n\tsplitdata = dataitem.split(\" = \")\n\treturn splitdata[0], splitdata[1]\n\n\n\n# ---------------------------------------------\n# Returns a list items found in the specified\n# folderpath, with File/Folder/Unknown designations\n# ---------------------------------------------\n\ndef getfolderlisting(folderpath):\n\toutcome = {}\n\n\ttry:\n\t\tlisting = OperatingSystem.listdir(folderpath)\n\n\t\tfor listitem in listing:\n\t\t\tfullitempath = OperatingSystem.path.join(folderpath, listitem)\n\t\t\tif OperatingSystem.path.isfile(fullitempath) == True:\n\t\t\t\titemtype = \"File\"\n\t\t\telif OperatingSystem.path.isdir(fullitempath) == True:\n\t\t\t\titemtype = \"Folder\"\n\t\t\telse:\n\t\t\t\titemtype = \"Unknown\"\n\t\t\toutcome[listitem] = itemtype\n\n\texcept:\n\t\tprint(\"Cannot access folder - \" + folderpath)\n\n\treturn outcome\n\n\n\n# ---------------------------------------------\n# Returns a path based on a root and a subitem\n# ---------------------------------------------\n\ndef concatenatepaths(path1, path2):\n\n\tsuffix = path2\n\tif path2 != \"\":\n\t\tif path2[0:1] == \"/\":\n\t\t\tsuffix = path2[1:]\n\tif path1 == \"\":\n\t\tprefix = \" \"\n\t\toutcome = OperatingSystem.path.join(prefix, suffix)\n\t\toutcome = outcome[1:]\n\telse:\n\t\tprefix = path1\n\t\tif len(path1) == 2:\n\t\t\tif path1[1:2] == \":\":\n\t\t\t\tprefix = path1 + \"\\\\\"\n\t\toutcome = OperatingSystem.path.join(prefix, suffix)\n\n\treturn outcome\n\n\n\n# ---------------------------------------------\n# Returns whether a path (file or folder) exists\n# ---------------------------------------------\n\ndef doesexist(fullpath):\n\treturn OperatingSystem.path.exists(fullpath)\n\n\n\n# ---------------------------------------------\n# Returns a file's extension\n# ---------------------------------------------\n\ndef getextension(filename):\n\tif \".\" in filename:\n\t\tfilenamesplit = filename.split(\".\")\n\t\toutcome = filenamesplit[len(filenamesplit) - 1]\n\telse:\n\t\toutcome = \"\"\n\n\treturn outcome\n\n\n\n# ---------------------------------------------\n# Returns a file's name\n# ---------------------------------------------\n\ndef getname(filename):\n\textension = getextension(filename)\n\n\tif extension == \"\":\n\t\tif filename[-1:] == \".\":\n\t\t\toutcome = filename[:-1]\n\t\telse:\n\t\t\toutcome = filename\n\telse:\n\t\textensionlength = 0 - len(extension) - 1\n\t\toutcome = filename[:extensionlength]\n\n\treturn outcome\n\n\n\n# ---------------------------------------------\n# Writes a file to disk from a list\n# ---------------------------------------------\n\ndef writetodisk(filename, outputlist, appendwritemode):\n\n\tif appendwritemode == \"Overwrite\":\n\t\tmodeflag = 'w'\n\telif appendwritemode == \"Append\":\n\t\tmodeflag = 'a'\n\telse:\n\t\tassert 1 == 0, (\"Unknown file write output mode: \" + appendwritemode)\n\n\tnewlist = []\n\tfor originalitem in outputlist:\n\t\tnewlist.append(originalitem)\n\t\tnewlist.append(\"\\n\")\n\n\ttry:\n\t\t# Open the file for the duration of this process\n\t\twith open(filename, modeflag) as targetfile:\n\n\t\t\t# Print out all items in list\n\t\t\ttargetfile.writelines(newlist)\n\n\texcept:\n\t\t# Print an error if the file cannot be written\n\t\tprint(\"Cannot write file - \" + filename)\n\n\n\n# ---------------------------------------------\n# Returns a path based on a list of subfolders\n# ---------------------------------------------\n\ndef createpathfromlist(pathlist):\n\n\toutcome = \"\"\n\tfor pathitem in pathlist:\n\t\toutcome = concatenatepaths(outcome, pathitem)\n\n\treturn outcome\n\n\n\n# ---------------------------------------------\n# Copies a file\n# ---------------------------------------------\n\ndef copyfile(source, target):\n\n\tif concatenatepaths(\" \", \" \") == \" / \":\n\t\toutcome = OperatingSystem.system('cp -v \"' + source + '\" \"' + target + '\"')\n\telse:\n\t\toutcome = OperatingSystem.system('copy \"' + source + '\" \"' + target + '\"')\n\n\treturn outcome\n\n\n\n# ---------------------------------------------\n# Returns filesize\n# ---------------------------------------------\n\ndef getsize(fullpath):\n\treturn OperatingSystem.path.getsize(fullpath)\n\n\n\n# ---------------------------------------------\n# Returns file modified datetime\n# ---------------------------------------------\n\ndef getmodifytimedate(fullpath):\n\tdt = TimeFunctions.localtime(OperatingSystem.path.getmtime(fullpath))\n\toutcome = {\"Year\": dt.tm_year, \"Month\": dt.tm_mon, \"Day\": dt.tm_mday, \"Hour\": dt.tm_hour, \"Minute\": dt.tm_min,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"Second\": dt.tm_sec}\n\n\treturn outcome\n\n\n\n","sub_path":"codebase/common_components/filesystem_framework/filesystem_module.py","file_name":"filesystem_module.py","file_ext":"py","file_size_in_byte":6519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"34709446","text":"# coding: utf-8\n\n\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom zuora_client.models.breakdown_detail import BreakdownDetail # noqa: F401,E501\n\n\nclass CreditMemoItemBreakdown(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'amount': 'float',\n 'apply_to_charge_number': 'str',\n 'breakdown_details': 'list[BreakdownDetail]',\n 'charge_number': 'str',\n 'credit_memo_item_id': 'str',\n 'end_date': 'date',\n 'is_negative_price': 'bool',\n 'start_date': 'date',\n 'subscription_number': 'str'\n }\n\n attribute_map = {\n 'amount': 'amount',\n 'apply_to_charge_number': 'applyToChargeNumber',\n 'breakdown_details': 'breakdownDetails',\n 'charge_number': 'chargeNumber',\n 'credit_memo_item_id': 'creditMemoItemId',\n 'end_date': 'endDate',\n 'is_negative_price': 'isNegativePrice',\n 'start_date': 'startDate',\n 'subscription_number': 'subscriptionNumber'\n }\n\n def __init__(self, amount=None, apply_to_charge_number=None, breakdown_details=None, charge_number=None, credit_memo_item_id=None, end_date=None, is_negative_price=None, start_date=None, subscription_number=None): # noqa: E501\n \"\"\"CreditMemoItemBreakdown - a model defined in Swagger\"\"\" # noqa: E501\n\n self._amount = None\n self._apply_to_charge_number = None\n self._breakdown_details = None\n self._charge_number = None\n self._credit_memo_item_id = None\n self._end_date = None\n self._is_negative_price = None\n self._start_date = None\n self._subscription_number = None\n self.discriminator = None\n\n if amount is not None:\n self.amount = amount\n if apply_to_charge_number is not None:\n self.apply_to_charge_number = apply_to_charge_number\n if breakdown_details is not None:\n self.breakdown_details = breakdown_details\n if charge_number is not None:\n self.charge_number = charge_number\n if credit_memo_item_id is not None:\n self.credit_memo_item_id = credit_memo_item_id\n if end_date is not None:\n self.end_date = end_date\n if is_negative_price is not None:\n self.is_negative_price = is_negative_price\n if start_date is not None:\n self.start_date = start_date\n if subscription_number is not None:\n self.subscription_number = subscription_number\n\n @property\n def amount(self):\n \"\"\"Gets the amount of this CreditMemoItemBreakdown. # noqa: E501\n\n\n :return: The amount of this CreditMemoItemBreakdown. # noqa: E501\n :rtype: float\n \"\"\"\n return self._amount\n\n @amount.setter\n def amount(self, amount):\n \"\"\"Sets the amount of this CreditMemoItemBreakdown.\n\n\n :param amount: The amount of this CreditMemoItemBreakdown. # noqa: E501\n :type: float\n \"\"\"\n\n self._amount = amount\n\n @property\n def apply_to_charge_number(self):\n \"\"\"Gets the apply_to_charge_number of this CreditMemoItemBreakdown. # noqa: E501\n\n Available only when the credit memo item represents a discount invoice item. # noqa: E501\n\n :return: The apply_to_charge_number of this CreditMemoItemBreakdown. # noqa: E501\n :rtype: str\n \"\"\"\n return self._apply_to_charge_number\n\n @apply_to_charge_number.setter\n def apply_to_charge_number(self, apply_to_charge_number):\n \"\"\"Sets the apply_to_charge_number of this CreditMemoItemBreakdown.\n\n Available only when the credit memo item represents a discount invoice item. # noqa: E501\n\n :param apply_to_charge_number: The apply_to_charge_number of this CreditMemoItemBreakdown. # noqa: E501\n :type: str\n \"\"\"\n\n self._apply_to_charge_number = apply_to_charge_number\n\n @property\n def breakdown_details(self):\n \"\"\"Gets the breakdown_details of this CreditMemoItemBreakdown. # noqa: E501\n\n\n :return: The breakdown_details of this CreditMemoItemBreakdown. # noqa: E501\n :rtype: list[BreakdownDetail]\n \"\"\"\n return self._breakdown_details\n\n @breakdown_details.setter\n def breakdown_details(self, breakdown_details):\n \"\"\"Sets the breakdown_details of this CreditMemoItemBreakdown.\n\n\n :param breakdown_details: The breakdown_details of this CreditMemoItemBreakdown. # noqa: E501\n :type: list[BreakdownDetail]\n \"\"\"\n\n self._breakdown_details = breakdown_details\n\n @property\n def charge_number(self):\n \"\"\"Gets the charge_number of this CreditMemoItemBreakdown. # noqa: E501\n\n\n :return: The charge_number of this CreditMemoItemBreakdown. # noqa: E501\n :rtype: str\n \"\"\"\n return self._charge_number\n\n @charge_number.setter\n def charge_number(self, charge_number):\n \"\"\"Sets the charge_number of this CreditMemoItemBreakdown.\n\n\n :param charge_number: The charge_number of this CreditMemoItemBreakdown. # noqa: E501\n :type: str\n \"\"\"\n\n self._charge_number = charge_number\n\n @property\n def credit_memo_item_id(self):\n \"\"\"Gets the credit_memo_item_id of this CreditMemoItemBreakdown. # noqa: E501\n\n\n :return: The credit_memo_item_id of this CreditMemoItemBreakdown. # noqa: E501\n :rtype: str\n \"\"\"\n return self._credit_memo_item_id\n\n @credit_memo_item_id.setter\n def credit_memo_item_id(self, credit_memo_item_id):\n \"\"\"Sets the credit_memo_item_id of this CreditMemoItemBreakdown.\n\n\n :param credit_memo_item_id: The credit_memo_item_id of this CreditMemoItemBreakdown. # noqa: E501\n :type: str\n \"\"\"\n\n self._credit_memo_item_id = credit_memo_item_id\n\n @property\n def end_date(self):\n \"\"\"Gets the end_date of this CreditMemoItemBreakdown. # noqa: E501\n\n\n :return: The end_date of this CreditMemoItemBreakdown. # noqa: E501\n :rtype: date\n \"\"\"\n return self._end_date\n\n @end_date.setter\n def end_date(self, end_date):\n \"\"\"Sets the end_date of this CreditMemoItemBreakdown.\n\n\n :param end_date: The end_date of this CreditMemoItemBreakdown. # noqa: E501\n :type: date\n \"\"\"\n\n self._end_date = end_date\n\n @property\n def is_negative_price(self):\n \"\"\"Gets the is_negative_price of this CreditMemoItemBreakdown. # noqa: E501\n\n Whether the amount of the product rate plan charge, which the credit memo is created from, is negative. # noqa: E501\n\n :return: The is_negative_price of this CreditMemoItemBreakdown. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._is_negative_price\n\n @is_negative_price.setter\n def is_negative_price(self, is_negative_price):\n \"\"\"Sets the is_negative_price of this CreditMemoItemBreakdown.\n\n Whether the amount of the product rate plan charge, which the credit memo is created from, is negative. # noqa: E501\n\n :param is_negative_price: The is_negative_price of this CreditMemoItemBreakdown. # noqa: E501\n :type: bool\n \"\"\"\n\n self._is_negative_price = is_negative_price\n\n @property\n def start_date(self):\n \"\"\"Gets the start_date of this CreditMemoItemBreakdown. # noqa: E501\n\n\n :return: The start_date of this CreditMemoItemBreakdown. # noqa: E501\n :rtype: date\n \"\"\"\n return self._start_date\n\n @start_date.setter\n def start_date(self, start_date):\n \"\"\"Sets the start_date of this CreditMemoItemBreakdown.\n\n\n :param start_date: The start_date of this CreditMemoItemBreakdown. # noqa: E501\n :type: date\n \"\"\"\n\n self._start_date = start_date\n\n @property\n def subscription_number(self):\n \"\"\"Gets the subscription_number of this CreditMemoItemBreakdown. # noqa: E501\n\n\n :return: The subscription_number of this CreditMemoItemBreakdown. # noqa: E501\n :rtype: str\n \"\"\"\n return self._subscription_number\n\n @subscription_number.setter\n def subscription_number(self, subscription_number):\n \"\"\"Sets the subscription_number of this CreditMemoItemBreakdown.\n\n\n :param subscription_number: The subscription_number of this CreditMemoItemBreakdown. # noqa: E501\n :type: str\n \"\"\"\n\n self._subscription_number = subscription_number\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(CreditMemoItemBreakdown, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, CreditMemoItemBreakdown):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"zuora_client/models/credit_memo_item_breakdown.py","file_name":"credit_memo_item_breakdown.py","file_ext":"py","file_size_in_byte":10319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"621546088","text":"from taggit.forms import TagWidget, TagField\n\nclass AutoCompleteTagWidget(TagWidget):\n def __init__(self, endpoint, *args, **kwargs):\n super(AutoCompleteTagWidget, self).__init__(*args,**kwargs)\n self.attrs['class'] = 'tags_autocomplete_widget'\n self.attrs['data-source'] = endpoint\n\n class Media:\n js = ('djangogenerics/js/tags_autocomplete_widget.js',)\n\n\nclass AutoCompleteTagField(TagField):\n widget = AutoCompleteTagWidget\n\n def __init__(self, endpoint, *args, **kwargs):\n widget = kwargs.get(\"widget\", False)\n if not widget or not isinstance(widget, AutoCompleteTagWidget):\n kwargs[\"widget\"] = AutoCompleteTagWidget(endpoint=endpoint)\n super(AutoCompleteTagField, self).__init__(*args, **kwargs)\n","sub_path":"djangogenerics/taggit/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"320594346","text":"class prime:\n\n def __init__(self):\n self.list = [] \n\n def prime(self,r1,r2):\n lis= [] #list to store the prime number the range r1 to r2\n for i in range(r1,r2+1):\n if prime.prime_check(self,i):\n lis.append(i)\n self.list.append(lis)\n return lis\n\n def Print(self,itr): #printing the 2d array\n for i in range(itr):\n print(self.list[i])\n \n def prime_check(self,num):\n if num < 2: # if the num is < 2 its not a prime number\n return False\n if num == 2: # if the given num is 2 return true since 2 is a prime number\n return True\n count = 0 #to count the divisors\n for i in range(2,(num//2)+1):\n if num%i == 0:\n count +=1\n return False\n if count == 0:\n return True\n\n def anagram(self,num):\n temp = 0\n while num >= 10:\n temp = (temp*10) + (num%10)\n num = num//10\n temp = (temp*10) + (num%10)\n return temp\n\n\npr = prime()\n\nnum = []\nfor r in range(10,1001):\n if(pr.prime_check(r)): #checking if number is prime\n ana = pr.anagram(r) #reversing the number\n if(pr.prime_check(ana)): #checking reversed string is prime\n num.append(r) #adding number in list if true\nprint(num)\n","sub_path":"Week2/new/anagramInPrime.py","file_name":"anagramInPrime.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"481714324","text":"from impactModel.FileManager import FileManager\nimport numpy as np\n\nclass StackData(object):\n '''\n Class compiling the data on one ticker into two arrays.\n One numpy array, which rows are trades and columns are [DATE, TICKER, TIMESTAMP, PRICE, SIZE]\n A second one, which rows are quotes and columns are [DATE, TICKER, TIMESTAMP, BIDPRICE, BIDSIZE, ASKPRICE, ASKSIZE]\n '''\n\n def __init__(self, baseDir, startDate, endDate, ticker ):\n '''\n Uses the FileManager class to get the dates, and then loops through them to build the arrays\n via one call to the methods addQuotes and addTrades.\n '''\n self._fm = FileManager( baseDir )\n \n # Retrieve list of trading days for trades & quotes files\n self._datesT = np.sort(self._fm.getTradeDates(startDate, endDate)) # 20070620, 20070621, ...\n self._datesQ = np.sort(self._fm.getQuoteDates(startDate, endDate))\n\n # Ticker searched\n self._ticker = ticker\n\n # Find sizes to allocate the right space\n lengthQ = 0\n for date in self._datesQ:\n try:\n lengthQ += self._fm.getQuotesFile(date, self._ticker).getN()\n except:\n continue\n \n lengthT = 0\n for date in self._datesT:\n try:\n lengthT += self._fm.getTradesFile(date, self._ticker).getN()\n except:\n continue\n \n \n # Stacked data for this ticker\n self._stackedQuotes = np.empty((lengthQ,7), dtype=object)\n self._stackedTrades = np.empty((lengthT,5), dtype=object)\n \n # Quotes\n def addQuotes(self):\n l = 0\n for date in self._datesQ:\n try:\n quoteFile = self._fm.getQuotesFile(date, self._ticker)\n except:\n continue\n # Append the day data [DATE, TICKER, TIMESTAMP, BIDPRICE, BIDSIZE, ASKPRICE, ASKSIZE]\n length = quoteFile.getN()\n for i in range(0, length):\n self._stackedQuotes[l + i] = np.array([date, self._ticker, int(quoteFile.getTimestamp(i)), float(quoteFile.getBidPrice(i)), float(quoteFile.getBidSize(i)), float(quoteFile.getAskPrice(i)), float(quoteFile.getAskSize(i))])\n l += length\n \n # Trades\n def addTrades(self):\n l = 0\n for date in self._datesT:\n try:\n tradeFile = self._fm.getTradesFile(date, self._ticker)\n except:\n continue\n # Append the day data [DATE, TICKER, TIMESTAMP, PRICE, SIZE]\n length = tradeFile.getN()\n for i in range(0, length):\n self._stackedTrades[l + i] = np.array([date, self._ticker, int(tradeFile.getTimestamp(i)), float(tradeFile.getPrice(i)), float(tradeFile.getSize(i))])\n l += length\n \n # Returns the array of stacked adjusted trades \n def getStackedTrades(self):\n return self._stackedTrades\n \n # Returns the array of stacked adjusted quotes\n def getStackedQuotes(self): \n return self._stackedQuotes\n","sub_path":"adjustAndClean/StackData.py","file_name":"StackData.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"65549693","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('yunda_admin', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='depositentry',\n name='created_at',\n field=models.DateTimeField(default=datetime.datetime(2015, 3, 30, 20, 55, 28, 513769)),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='depositwithdraw',\n name='created_at',\n field=models.DateTimeField(default=datetime.datetime(2015, 3, 30, 20, 55, 28, 514797)),\n preserve_default=True,\n ),\n ]\n","sub_path":"yunda_admin/migrations/0002_auto_20150330_2055.py","file_name":"0002_auto_20150330_2055.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"55851977","text":"import glob\nfrom PIL import Image\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\ndir = \"dataset_B_Eye_Images/\"\ncategories = [\"closedRightEyes\", \"openRightEyes\"]\nX = []\nY = []\n\nfor idx, cat in enumerate(categories):\n imageDir = dir + cat\n files = glob.glob(imageDir + \"/*.jpg\")\n for i, f in enumerate(files):\n img = Image.open(f)\n data = np.asarray(img)\n data = data.reshape((24, 24, 1))\n X.append(data)\n Y.append(idx)\nX = np.array(X)\nY = np.array(Y)\n\nxTrain, xTest, yTrain, yTest = train_test_split(X, Y)\nnp.save('dataset/xTrain.npy', xTrain)\nnp.save('dataset/yTrain.npy', yTrain)\nnp.save('dataset/xTest.npy', xTest)\nnp.save('dataset/yTest.npy', yTest)","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"406908431","text":"#!/usr/bin/env python3\n\"\"\"web iface to news stream\"\"\"\nimport argparse\nimport logging\nimport logging.config\nimport signal\n\nfrom core.utils import load_config\n\nAPP_NAME = 'streamserver'\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--config', default='config.yml')\n args = parser.parse_args()\n return args\n\n\ndef on_sigterm(signum, frame):\n \"\"\"gracefully exit on docker stop (SIGTERM)\"\"\"\n raise KeyboardInterrupt()\n\n\ndef main():\n signal.signal(signal.SIGTERM, on_sigterm)\n\n args = parse_args()\n config = load_config(args.config)\n logging.config.dictConfig(config.logging.as_dict())\n config = getattr(config, APP_NAME)\n\n log = logging.getLogger(APP_NAME)\n log.info('starting ...')\n log.info('app params: %s', config)\n\n import streamserver\n try:\n streamserver.run(config)\n except KeyboardInterrupt:\n log.info('app stopped')\n except Exception as ex:\n log.exception('app error: %s', ex)\n raise SystemExit(1)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"streamserver/run-streamserver.py","file_name":"run-streamserver.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"373017061","text":"class Solution:\n def complexNumberMultiply(self, a: str, b: str) -> str:\n \n def splitCompNum(num) :\n real, imag = num.split('+')\n imag , _ = imag.split('i')\n return int(real) , int(imag)\n \n real1 , imag1 = splitCompNum(a)\n real2 , imag2 = splitCompNum(b)\n \n real = real1*real2 - imag1*imag2\n imag = real1*imag2 + real2*imag1\n return \"{}+{}i\".format(real , imag)\n","sub_path":"Leet Code/Practice/#537. Complex Number Multiplication/#2. Split.py","file_name":"#2. Split.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"224730321","text":"'''\r\ndumps:将字典转换为JSON字符串\r\ndefault关键字参数指定一个回调函数,该回调函数会接收一个类实例\r\n回调函数需要返回一个字典,最后,dumps函数会将这个字典转换为JSON字符串\r\nobject -> dict -> JSON\r\n'''\r\nimport json\r\nclass Product:\r\n def __init__(self,name,price,count):\r\n self.name = name\r\n self.price = price\r\n self.count = count\r\ndef product2Dict(obj):\r\n return {\r\n 'name':obj.name,\r\n 'price':obj.price,\r\n 'count':obj.count\r\n }\r\nproduct = Product('特斯拉',1000000,20)\r\njsonStr = json.dumps(product, default = product2Dict,ensure_ascii=False)\r\nprint(jsonStr)\r\n","sub_path":"Python学习基础知识/高级python篇/第14章:数据存储/将对象转换为JSON字符串.py","file_name":"将对象转换为JSON字符串.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"274882679","text":"import sys\nsys.stdin = open('bj15653.txt','r')\n\nfrom collections import deque\ndx=[1,0,-1,0]\ndy=[0,1,0,-1]\ndef Go(y,x):\n t=0\n while Arr[y][x]!='#':\n y+=dy[d]\n x+=dx[d]\n t+=1\n if Arr[y][x]=='O':return y,x,t\n return y-dy[d],x-dx[d],t\nN,M=map(int,input().split())\nArr=[input()for _ in range(N)]\nMap=[[[[0]*M for _ in range(N)]for __ in range(M)]for ___ in range(N)]\nfor i in range(N):\n for j in range(M):\n if Arr[i][j]=='R':R=(i,j)\n elif Arr[i][j]=='B':B=(i,j)\n elif Arr[i][j]=='O':O=(i,j)\nQue=deque([(R,B,1)])\nMap[R[0]][R[1]][B[0]][B[1]]=1\nCheck=-1\nwhile Que and Check==-1:\n (ry,rx),(by,bx),t=Que.popleft()\n for d in range(4):\n rY,rX,rt=Go(ry,rx)\n bY,bX,bt=Go(by,bx)\n if (bY,bX)==O:continue\n if (rY,rX)==(bY,bX):\n if rt>bt:\n rY-=dy[d]\n rX-=dx[d]\n else:\n bY-=dy[d]\n bX-=dx[d]\n if (rY,rX)==O:Check=t;break\n if not Map[rY][rX][bY][bX]:\n Map[rY][rX][bY][bX]=1\n Que.append(((rY,rX),(bY,bX),t+1))\nprint(Check)\n\n","sub_path":"algoritm/20상반기 코딩테스트/구슬 탈출 4/bj15653.py","file_name":"bj15653.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"563090357","text":"import unittest\nimport subprocess\nimport socket\nfrom time import sleep\n\np_kwargs = dict(\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n cwd=\"/home/dparker/pr1/echo/\"\n)\n\nclass TestEcho(unittest.TestCase):\n\n def test_it_compiles(self):\n with subprocess.Popen([\"make\"], **p_kwargs) as p:\n self.assertEqual(p.wait(), 0)\n \n def test_server_starts(self):\n with subprocess.Popen([\"./echoserver\"], **p_kwargs) as p:\n self.assertEqual(p.returncode, None)\n p.kill()\n self.assertNotEqual(p.wait(1), None)\n\n def test_server_can_restart(self):\n def start_stop():\n with subprocess.Popen([\"./echoserver\"], **p_kwargs) as p:\n p.kill()\n return p.wait(1)\n \n for _ in range(3):\n self.assertEqual(start_stop(), -9)\n \n def test_server_echoes(self):\n message = b\"Foo Bar!!!\"\n with subprocess.Popen([\"./echoserver\", \"-p\", \"2345\"], **p_kwargs) as p:\n sleep(0.1) # Wait for startup\n sock = socket.create_connection((\"localhost\", 2345))\n sock.send(message)\n resp = sock.recv(16)\n sock.close()\n p.kill()\n \n self.assertEqual(resp, message)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"gios-cs6200/tests/pr1/test_echo.py","file_name":"test_echo.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"215844938","text":"from django.http import HttpResponseRedirect, HttpResponse, Http404\nfrom django.template import RequestContext, loader\nfrom django.shortcuts import get_object_or_404, render\nfrom django.core.urlresolvers import reverse\nfrom django.utils import timezone\n\nfrom shipping.models import *\n\n\n\n\n\n\n\ndef home(request):\n return render(request, 'shipping/home.html')\n\ndef driver(request):\n return render(request, 'shipping/driver.html')\n\ndef about(request):\n return render(request, 'shipping/about.html')\n\n\n\ndef availablecontracts(request):\n contracts = Contract.objects.all()\n contracts = [p for p in contracts if p.takentime == None and p.deliverystatus.confirmedtime == None]\n drivers = Driver.objects.all()\n return render(request, 'shipping/availablecontracts.html', {'contracts': contracts, 'drivers': drivers})\n\ndef takecontracts(request):\n if request.POST['driver'] == \"\" :\n return HttpResponseRedirect(reverse('shipping:availablecontracts'))\n else :\n ids = request.POST.getlist('check')\n for i in ids :\n c = Contract.objects.get(id=i)\n c.takentime = timezone.now()\n c.driver = Driver.objects.get(id=request.POST['driver'])\n c.save()\n return HttpResponseRedirect(reverse('shipping:drivercontracts'))\n\ndef updatedelivery(request):\n dropped = request.POST.getlist('dropped')\n for d in dropped :\n d = Contract.objects.get(id=d).deliverystatus\n d.droptime = timezone.now()\n d.save()\n picked = request.POST.getlist('picked')\n for p in picked :\n d = Contract.objects.get(id=p).deliverystatus\n d.picktime = timezone.now()\n d.save()\n return HttpResponseRedirect(reverse('shipping:drivercontracts'))\n\ndef drivercontracts(request):\n contracts = Contract.objects.all()\n contracts = [p for p in contracts if p.takentime != None]\n return render(request, 'shipping/drivercontracts.html', {'contracts': contracts})\n\ndef userpage(request):\n return render(request, 'shipping/userpage.html')\n\n\ndef newcontract(request):\n customers = Customer.objects.all()\n return render(request, 'shipping/newcontract.html', {'customers': customers})\n\ndef opencontracts(request):\n contracts = Contract.objects.all()\n contracts = [p for p in contracts if p.deliverystatus.confirmedtime == None and p.signedtime == None]\n return render(request, 'shipping/opencontracts.html', {'contracts': contracts})\n \ndef signedcontracts(request):\n contracts = Contract.objects.all()\n contracts = [p for p in contracts if p.signedtime != None and p.deliverystatus.confirmedtime == None and p.payedtime != None]\n return render(request, 'shipping/signedcontracts.html', {'contracts': contracts})\n \ndef closedcontracts(request):\n closedcontracts = Contract.objects.all()\n closedcontracts = [p for p in closedcontracts if p.deliverystatus.confirmedtime != None and p.settledtime != None]\n return render(request, 'shipping/closedcontracts.html', {'closedcontracts': closedcontracts}) \n \ndef newpackage(request):\n contracts = Contract.objects.order_by('-openedtime')\n return render(request, 'shipping/newpackage.html', {'contracts': contracts})\n\ndef packages(request):\n packages = Package.objects.all()\n return render(request, 'shipping/packages.html', {'packages': packages})\n\n\ndef users(request): \n users = Customer.objects.all()\n return render(request, 'shipping/users.html', {'users': users})\n\n\ndef newuser(request):\n return render(request, 'shipping/newuser.html')\n\ndef drivers(request): \n drivers = Driver.objects.all()\n return render(request, 'shipping/drivers.html', {'drivers': drivers})\n\ndef newdriver(request):\n return render(request, 'shipping/newdriver.html')\n confirmedtime = models.DateTimeField(null = True, blank =True)\n picktime = models.DateTimeField(null = True, blank =True)\n droptime = models.DateTimeField(null = True, blank =True)\n\n\n\n\ndef createcontract(request):\n if request.POST['email_seller'] != \"\" and request.POST['email_buyer'] != \"\" and request.POST['account'] != \"\" and request.POST['routing'] != \"\" and request.POST['adress_pick'] != \"\" and request.POST['adress_deliver'] != \"\" :\n deliverystatus = Deliverystatus()\n deliverystatus.save()\n contract = Contract(deliverystatus=deliverystatus, seller=request.POST['email_seller'], buyer=request.POST['email_buyer'], banknumber=request.POST['account'], bankrouting=request.POST['routing'], pickupaddress=request.POST['adress_pick'], dropofadress=request.POST['adress_deliver'])\n contract.save()\n return HttpResponseRedirect(reverse('shipping:opencontracts'))\n else :\n return HttpResponseRedirect(reverse('shipping:newcontract'))\n\ndef createpackage(request):\n if request.POST['price'] != \"\" and request.POST['contract'] != \"\" and request.POST['description'] != \"\" and request.POST['height'] != \"\" and request.POST['width'] != \"\" and request.POST['length'] != \"\" :\n c = Contract.objects.get(id=request.POST['contract'])\n package = Package(price=request.POST['price'], contract=c, contentdescription=request.POST['description'], height=request.POST['height'], width=request.POST['width'], length=request.POST['length'], weight=request.POST['weight'])\n package.save()\n \n packages = Package.objects.filter(contract=c)\n totalvolume = 0\n for p in packages :\n totalvolume += p.height*p.length*p.width\n c.deliveryprice = totalvolume\n c.save()\n return HttpResponseRedirect(reverse('shipping:packages'))\n else :\n return HttpResponseRedirect(reverse('shipping:newpackage'))\n\ndef createuser(request):\n if request.POST['email'] != \"\" :\n c = Customer(email=request.POST['email'])\n c.save()\n return HttpResponseRedirect(reverse('shipping:users'))\n else :\n return HttpResponseRedirect(reverse('shipping:newuser'))\n\ndef createdriver(request):\n if request.POST['name'] != \"\" and request.POST['account'] != \"\" and request.POST['routing'] != \"\" :\n d = Driver(name=request.POST['name'], account=request.POST['account'], routing=request.POST['routing'])\n d.save()\n return HttpResponseRedirect(reverse('shipping:drivers'))\n else :\n return HttpResponseRedirect(reverse('shipping:newdriver'))\n\n\n\ndef confirmdelivery(request):\n ids = request.POST.getlist('confirm')\n for i in ids :\n d = Contract.objects.get(id=i).deliverystatus\n d.confirmedtime = timezone.now()\n d.save()\n #Money sent\n c = Contract.objects.get(id=i)\n c.settledtime = timezone.now()\n c.save()\n return HttpResponseRedirect(reverse('shipping:closedcontracts'))\n\n\n\ndef signcontracts(request):\n ids = request.POST.getlist('sign')\n for i in ids :\n c = Contract.objects.get(id=i)\n c.signedtime = timezone.now()\n c.save()\n return HttpResponseRedirect(reverse('shipping:signedcontracts'))","sub_path":"D2Dshipping/shipping/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"443040825","text":"import datetime\n\nimport boto3\nimport googlemaps\nfrom boto.dynamodb.condition import CONTAINS\nfrom boto3.dynamodb.conditions import Key\nfrom config import Config\nfrom boto.s3.connection import S3Connection\nimport os.path\nfrom boto.s3.key import Key as keys\n\ndynamodb = boto3.resource('dynamodb',\n aws_access_key_id=Config.AWS_KEY,\n aws_secret_access_key=Config.AWS_SECRET_KEY,\n region_name=Config.REGION)\n\nconn = S3Connection(aws_access_key_id=Config.AWS_KEY, aws_secret_access_key=Config.AWS_SECRET_KEY)\nconn1 = boto3.client('s3', aws_access_key_id=Config.AWS_KEY, aws_secret_access_key=Config.AWS_SECRET_KEY)\n\ntable = dynamodb.Table('userinfo')\ntable_detail = dynamodb.Table('userdetails')\n\ntable_spots = dynamodb.Table('favoritespots')\ntable_time = dynamodb.Table('time_stamps')\ntable_traceroute = dynamodb.Table('traceroute')\ntable_routedetails = dynamodb.Table('route_details')\n\n\ndef insert_into_db(user):\n try:\n table.put_item(Item={\n 'email_address': user.emailAddress,\n 'full_name': user.fullName,\n 'password': user.password,\n 'flag': False,\n 'common_stops': False,\n })\n except Exception as E:\n return False\n return True\n\n\ndef insert_into_userdetails(userInfo, select2, select3):\n print(userInfo.user_name)\n\n selected3 = \", \".join(map(str, select3))\n selected2 = \", \".join(map(str, select2))\n\n try:\n table_detail.put_item(\n Item={\n 'user_name': userInfo.user_name,\n 'business_name': userInfo.business_name,\n 'home_lat': userInfo.home_lat,\n 'home_lng': userInfo.home_lng,\n 'work_lat': userInfo.work_lat,\n 'work_lng': userInfo.work_lng,\n 'commute_type': selected3,\n 'inter_stop': userInfo.inter_stop,\n 'commute_change': userInfo.commute_change,\n 'transport_mode': selected2,\n 'car_year': userInfo.car_years,\n 'car_make': userInfo.car_makes,\n 'car_model': userInfo.car_models,\n 'upgrade_vehicle': userInfo.upgrade_vehicle,\n 'parkpay': userInfo.parkpay\n }\n )\n except Exception as E:\n return False\n return True\n\n\ndef insert_into_userdetails_update(userInfo, select2, select3):\n print(userInfo.user_name)\n print(\"inside usedetails update\")\n selected3 = \", \".join(map(str, select3))\n selected2 = \", \".join(map(str, select2))\n response = table_detail.update_item(\n Key={'user_name': userInfo.user_name},\n UpdateExpression=\"set business_name= :a, home_lat= :b,home_lng= :c,work_lat= :d,work_lng= :e,commute_type= :f,inter_stop= :g,commute_change= :h,transport_mode= :i,car_year= :j,car_make= :k,car_model= :l,upgrade_vehicle= :m,parkpay= :n\",\n ExpressionAttributeValues={\n ':a': userInfo.business_name,\n ':b': userInfo.home_lat,\n ':c': userInfo.home_lng,\n ':d': userInfo.work_lat,\n ':e': userInfo.work_lng,\n ':f': selected3,\n ':g': userInfo.inter_stop,\n ':h': userInfo.commute_change,\n ':i': selected2,\n ':j': userInfo.car_years,\n ':k': userInfo.car_makes,\n ':l': userInfo.car_models,\n ':m': userInfo.upgrade_vehicle,\n ':n': userInfo.parkpay\n },\n ReturnValues=\"UPDATED_NEW\"\n )\n return True\n\n\ndef insert_into_favorite_spots_updateh(lat, lng, user_name, name):\n print(lat, lng, user_name.user_name, name)\n response = table_spots.update_item(\n Key={'user_name': user_name.user_name, 'spot_name': name},\n UpdateExpression=\"set geo_lat= :b, geo_lng= :c\",\n ExpressionAttributeValues={\n ':b': lat,\n ':c': lng\n },\n ReturnValues=\"UPDATED_NEW\"\n )\n return True\n\ndef insert_into_time_stamp(user_name, select3, select2, select1, select):\n\n response = table_time.update_item(\n Key={'user_name': user_name},\n UpdateExpression=\"set april=:a, last_date_april= :d, last_date_may= :c, may =:b\",\n ExpressionAttributeValues={\n ':a': select3,\n ':d': select2,\n ':c': select1,\n ':b': select\n },\n ReturnValues=\"UPDATED_NEW\"\n )\n return True\n\n\ndef insert_into_favorite_spots_updatew(lat, lng, user_name, name):\n print(lat, lng, user_name.user_name, name)\n response = table_spots.update_item(\n Key={'user_name': user_name.user_name, 'spot_name': name},\n UpdateExpression=\"set geo_lat= :b, geo_lng= :c\",\n ExpressionAttributeValues={\n ':b': lat,\n ':c': lng\n },\n ReturnValues=\"UPDATED_NEW\"\n )\n return True\n\n\ndef insert_into_route_details1_update(lat1, lng1, user_name, name1, lat2, lng2, name2, lat3, lng3, name3):\n print(lat1, lng1, user_name, name1, lat2, lng2, name2, lat3, lng3, name3)\n\n response = table_routedetails.update_item(\n Key={'user_name': user_name.user_name},\n UpdateExpression=\"set inter_stop1= :a, inter1_lat= :b,inter1_lng= :c,inter_stop2= :d,inter2_lat= :e,inter2_lng= :f,inter_stop3= :g,inter3_lat= :h,inter3_lng= :i\",\n ExpressionAttributeValues={\n ':a': name1,\n ':b': lat1,\n ':c': lng1,\n ':d': name2,\n ':e': lat2,\n ':f': lng2,\n ':g': name3,\n ':h': lat3,\n ':i': lng3,\n\n },\n ReturnValues=\"UPDATED_NEW\"\n )\n\n return True\n\n\ndef insert_into_favorite_spots(lat, lng, user_name, name):\n print(lat, lng, user_name, name)\n try:\n\n table_spots.put_item(\n Item={\n 'user_name': user_name,\n 'spot_name': name,\n 'geo_lat': lat,\n 'get_lng': lng\n }\n )\n except Exception as E:\n print(E)\n return False\n return True\n\n\ndef insert_into_table_time(user_name, last_date, time):\n print(last_date)\n try:\n\n table_time.put_item(\n Item={\n 'user_name': user_name,\n 'april': time,\n 'last_date_april': last_date,\n\n }\n )\n except Exception as E:\n print(E)\n return False\n return True\n\n\ndef insert_into_table_time1(user_name, last_date, time):\n\n response = table_time.update_item(\n Key={'user_name': user_name},\n UpdateExpression=\"set may= :b, last_date_may= :c\",\n ExpressionAttributeValues={\n ':b': time,\n ':c': last_date\n },\n ReturnValues=\"UPDATED_NEW\"\n )\n return True\n\n\ndef insert_into_route_details1(lat1, lng1, user_name, name1, lat2, lng2, name2, lat3, lng3, name3):\n print(lat1, lng1, user_name, name1, lat2, lng2, name2, lat3, lng3, name3)\n try:\n\n table_routedetails.put_item(\n Item={\n 'user_name': user_name,\n 'inter_stop1': name1,\n 'inter1_lat': lat1,\n 'inter1_lng': lng1,\n 'inter_stop2': name2,\n 'inter2_lat': lat2,\n 'inter2_lng': lng2,\n 'inter_stop3': name3,\n 'inter3_lat': lat3,\n 'inter3_lng': lng3,\n\n }\n )\n except Exception as E:\n print(E)\n return False\n return True\n\n\ndef update_into_userinfo(emailAddress):\n response = table.update_item(\n Key={'email_address': emailAddress},\n UpdateExpression=\"set flag = :r\",\n ExpressionAttributeValues={\n ':r': \"true\"\n },\n ReturnValues=\"UPDATED_NEW\"\n )\n\n return True\n\n\ndef update_into_userinfo_pwd(emailAddress, pwd):\n response = table.update_item(\n Key={'email_address': emailAddress},\n UpdateExpression=\"set password = :r\",\n ExpressionAttributeValues={\n ':r': pwd\n },\n ReturnValues=\"UPDATED_NEW\"\n )\n return True\n\n\ndef update_into_userinfo1(emailAddress):\n response = table.update_item(\n Key={'email_address': emailAddress},\n UpdateExpression=\"set common_stops = :r\",\n ExpressionAttributeValues={\n ':r': \"true\"\n },\n ReturnValues=\"UPDATED_NEW\"\n )\n\n return True\n\n\ndef check_record_present(emailAddress):\n response = table.get_item(\n Key={\n 'email_address': emailAddress,\n })\n if response is not None and 'Item' in response:\n return True\n return False\n\n\ndef get_fav_spot_record(user_name):\n response = table_spots.query(\n KeyConditionExpression=Key('user_name').eq(user_name)\n )\n print(response)\n if response is not None and 'Items' in response:\n return response['Items']\n return []\n\n\ndef get_businessName(user_name):\n response = table_detail.query(\n KeyConditionExpression=Key('user_name').eq(user_name)\n\n )\n print(response)\n if response is not None and 'Items' in response:\n return response['Items']\n return None\n\ndef get_times(user_name):\n response = table_time.query(\n KeyConditionExpression=Key('user_name').eq(user_name)\n\n )\n print(response)\n if response is not None and 'Items' in response:\n return response['Items']\n return None\n\n\n\ndef gettimestamp(user_name):\n response = table_traceroute.query(\n KeyConditionExpression=Key('user_name').eq(user_name)\n )\n\n if response is not None and 'Items' in response:\n return response['Items']\n return None\n\n\ndef get_bucket_time(user, select):\n try:\n bucket_list = []\n\n bucket = conn.get_bucket('bcosapp')\n # paths= os.path.join(select+'/').replace(\"\\\\\",\"/\")\n\n for key in bucket.list(prefix=select, delimiter='/'):\n bucket_list.append(key.last_modified)\n except Exception as E:\n return False\n return bucket_list\n\ndef get_bucket_time1(select1, select):\n try:\n bucket_list = []\n flag=0\n bucket = conn.get_bucket('bcosapp')\n # paths= os.path.join(select+'/').replace(\"\\\\\",\"/\")\n\n for key in bucket.list(prefix=select1):\n if(select in key.last_modified):\n bucket_list.append(key.last_modified)\n else:\n flag = 1\n except Exception as E:\n return False\n return bucket_list\n\n\ndef get_bucket_name(user, select):\n try:\n bucket_list = []\n\n bucket = conn.get_bucket('bcosapp')\n # paths= os.path.join(select+'/').replace(\"\\\\\",\"/\")\n\n for key in bucket.list(prefix=select, delimiter='/'):\n bucket_list.append(key.name)\n\n except Exception as E:\n return False\n return bucket_list\n\ndef get_bucket_name1(select1, select):\n try:\n bucket_list = []\n flag = 0\n bucket = conn.get_bucket('bcosapp')\n # paths= os.path.join(select+'/').replace(\"\\\\\",\"/\")\n\n for key in bucket.list(prefix=select1):\n if(select in key.last_modified):\n bucket_list.append(key.name)\n else:\n flag = 1\n except Exception as E:\n return False\n return bucket_list\n\n\ndef get_data(times):\n try:\n bucket_list = []\n bucket = conn.get_bucket('bcosapp')\n # paths= os.path.join(select+'/').replace(\"\\\\\",\"/\")\n\n k = keys(bucket)\n k.key = times\n\n testfile = k.get_contents_as_string().decode('utf-8')\n\n except Exception as E:\n return False\n return testfile\n\n\ndef getFiletime(dtms):\n seconds, micros = divmod(dtms, 1000000)\n days, seconds = divmod(seconds, 86400)\n return datetime.datetime(1601, 1, 1) + datetime.timedelta(days, seconds, micros)\n\n\ndef get_record(emailAddress):\n response = table.get_item(\n Key={\n 'email_address': emailAddress,\n })\n if response is not None and 'Item' in response:\n return response['Item']\n return None\n\n\ndef check_login_user(emailAddress, password):\n if emailAddress is None or password is None:\n return False\n response = table.query(KeyConditionExpression=(Key('email_address').eq(emailAddress)))\n if response is not None and len(response['Items']) == 0:\n return False\n if response is not None and 'Items' in response and response['Items'][0]['password'] == password:\n return True\n return False\n\n\ndef get_address(lat, lng):\n try:\n gmaps = googlemaps.Client(key='AIzaSyCgn7CkhyDvoLxTXx_iyVle4W_gPCYVD_Q')\n location = gmaps.reverse_geocode((lat, lng))\n except Exception as E:\n return None\n return location[0]['formatted_address']\n\n\ndef main():\n print(get_address(\"29.7604267\", \"29.7604267\"))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"app/infra/infrastructure.py","file_name":"infrastructure.py","file_ext":"py","file_size_in_byte":12792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"170474022","text":"from types import List\n\nclass Solution:\n def wordBreak(self, s: str, wordDict):\n def backtrack(res, ss):\n if ss == \"\":\n return res\n for w in wordDict:\n if ss.startswith(w):\n tt = ss[len(w) - 1 :]\n backtrack()\n \n return 0\n return 0\n\n def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:\n def hasEdge(word1: str, word2: str)->bool:\n cnt = 0\n for i in range(len(word1)):\n if word1[i] != word2[i]:\n cnt += 1\n return cnt == 1\n \n res = []\n visited = []\n def search(start, visited, path, res):\n pass \n \n\n return res\n\n\nsol = Solution()\nr = sol.wordBreak(\"catsanddog\", [\"cat\",\"cats\",\"and\",\"sand\",\"dog\"])\nprint(r)\n\n","sub_path":"wordbreak.py","file_name":"wordbreak.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"162986206","text":"import numpy as np\n\"\"\"\nSeveral methods to perform pre-translation\nAbstracted into this file for ease in applying the heuristics, and potentially increase readability.\n\ninput to heuristic: a tokenized line ( a list of tokens), 'alignment-values' for all words in the line\n\noutput from heuristic: a list with the replacement heuristic applied to each word\n\nGenerating a dictionary at every call is probably expensive, but God no go shame us /\\. Mek I finish project first\n\"\"\"\ndef soft_max (x): \n return np.exp(x) /np.sum(np.exp(x), axis = 0)\n\n\"\"\"\ndef heuristic_1 (align_dict, word_list, most_common_words, unique_words ) : \n for (idx, word) in enumerate(word_list):\n\n alignment_softs = align_dict[word]['soft']\n alignment_words = align_dict[word]['words']\n \n if (word in most_common_words and word in unique_words) or (word in most_common_words and most_common_words.index(word) < 75) :\n choice = [x for x in range(0, len(alignment_words))]\n chosen = np.random.choice(choice, min(2, len(choice)), alignment_softs).tolist()\n\n rep = \"\"\n rep = [(rep + alignment_words[x] + \" \") for x in chosen]\n rep = \"\".join(p for p in rep)\n rep = rep.strip()\n word_list[idx] = rep \n\n return word_list\n\"\"\"\ndef heuristic_2 (align_dict, word_list, most_common_words, unique_words, lookup_range = 1 ): \n\n\n return_list = []\n\n for (idx, word) in enumerate(word_list):\n\n if word not in align_dict.keys(): \n return_list.append(word)\n continue \n\n align_words = align_dict[word]['words']\n align_probs = [float(x) for x in align_dict[word]['probs']]\n #print(align_probs)\n\n # implement edge case where look_left or look_right is not possible\n check_range = [x for x in range (-lookup_range, lookup_range + 1)]\n replace_probs = np.zeros((2 * lookup_range + 1, len(align_words)))\n replace_probs[lookup_range] = align_probs\n\n for i in check_range: \n\n try: \n check_idx = idx + i\n word_list[check_idx] = word_list[check_idx]\n except: \n continue\n\n if word_list[check_idx] in align_dict.keys():\n check_key = word_list[check_idx]\n check_words = align_dict[check_key]['words']\n check_probs = align_dict[check_key]['probs']\n\n for (word_prob, word) in (zip(check_words, check_probs)): \n if word in align_words: \n\n align_word_idx = align_words.index(word)\n\n replace_probs [ (lookup_range + i ), align_word_idx ] = word_prob\n\n weights = np.array([[1], [2], [4], [2], [1]])\n #weights = np.array([[0.25],[0.5],[1],[2],[1],[0.5],[0.25]])\n decision_probs = soft_max ( np.sum( replace_probs * weights , axis = 0) ).tolist()\n #print(decision_probs)\n\n sort_index = np.argsort(decision_probs)[::-1]\n max_prob = max(decision_probs)\n rep_token = ''\n\n for sort_idx in sort_index: \n if decision_probs[sort_idx] >= (max_prob/1.2): \n rep_token = rep_token + \" \" + align_words[sort_idx]\n else: \n break\n \n rep_token = rep_token.strip()\n #max_index = decision_probs.index(max(decision_probs))\n #rep_token = align_words[max_index]\n #decision_probs[max_index] = 0\n\n #max_index = decision_probs.index(max(decision_probs))\n\n #rep_token = align_words[max_index]\n #decision_probs(max_index) = 0\n\n return_list.append(rep_token)\n\n return return_list\n\n\nif __name__ == \"__main__\":\n pass\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n# calculating probabilities: soft_max (2*prob(word_list) + prob(word_list +- 1) + 0.5*prob(word_list +-2))\n# decision factor: If prob(word) > prob(highest_word)/decision_factor: include word\n# if word is in 2 left of list or 2 right of list, don't include word, include second word\n# \n \nif __name__ == \"__main__\":\n pass\n\n\"\"\"\nRobot Programmable, multifunctional manipulator designed to move materials, paths, tools, etc through variable programmed motions to perform\na variety of tasks \n\nWhat makes a Machine a Robot\n\n Programmability is the defining feature of a robot\n\n Capacity to perform a variety of tasks (depending on how it's been programmed)\n\n Ability to move in a variety of ways to carry out its task\n\nClassification of Robots\n** according to Japanese Industrial Robots Association **\n\n\n 1. Manual Robots: | Multiple degrees of freedom, but all actions are performed under direct control of a human operator. Lack Autonomy\n 2. Fixed sequence robots: | Repeat actions (the actions are in a fixed sequence). No need for operator control. Have Autonomy\n 3. Variable sequence robots:| Sequence of actions can be reprogrammed easily. Can be easily programmed to perform new tasks\n 4. Numerical robots: | Controlled via numerical information\n 5. Playback robots.\n 6. Intelligent Robots\n\n\n\"\"\"\n","sub_path":"pretranslation/replacement_heuristics.py","file_name":"replacement_heuristics.py","file_ext":"py","file_size_in_byte":5093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"328629980","text":"# coding=utf-8\n#!/usr/bin/env python\n\nfrom scapy.all import *\n\nsrcmac = '00:00:de:ad:be:ef'\ndstmac = 'ff:ff:ff:ff:ff:ff'\nbssid = '00:11:22:33:44:55'\n\n# short preamble, not wpa/wep, short timeslot\nbeacon = Dot11Beacon(cap=0x2104)\nssid = Dot11Elt(ID=\"SSID\",info=\"MikeHasASmallAntena\")\nrates = Dot11Elt(ID=\"Rates\",info=\"\\x82\\x84\\x8b\\x96\\x24\\x30\\x48\\x6c\")\ndsset = Dot11Elt(ID=\"DSset\",info=\"\\x03\")\ntim = Dot11Elt(ID=\"TIM\",info=\"\\x00\\x01\\x00\\x00\") #no buffered traffic\n\npkt = RadioTap()/Dot11(type=0,subtype=8,addr1=dstmac)","sub_path":"v2/beacon-802.py","file_name":"beacon-802.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"113603754","text":"# python3.5+\nimport subprocess\nimport json\nimport re\n\ndef find_matches(sentence):\n content = '{\\\"text\\\":\\\"%s\\\"}' % (sentence)\n uri = 'https://ncr.ccm.sickkids.ca/curr/annotate/'\n\n command_list = ['curl', '-i', '-H', 'Content-Type: application/json', '-X', 'POST', '-d', content, uri]\n result = subprocess.run(command_list, stdout=subprocess.PIPE) \n\n return result.stdout\n\ndef parse_terms(matches, sentence):\n terms = []\n matches_string = str(matches).split('\\\\n')[-2]\n print(matches_string)\n matches_string = re.sub(r'(? Optional[str]:\n if os.environ.get(\"MOCK_GEDCOM_UPLOAD_RESULT\") is not None:\n return os.environ.get(\"MOCK_GEDCOM_UPLOAD_RESULT\")\n\n try:\n blob_service_client = BlobServiceClient.from_connection_string(AZURE_CONNECTION_STRING)\n blob: BlobClient = blob_service_client \\\n .get_container_client(AZURE_CONTAINER_NAME) \\\n .upload_blob(BlobProperties(name=file_name, blob_type=\"BlockBlob\"), data)\n sas_token = generate_blob_sas(\n blob_service_client.account_name,\n AZURE_CONTAINER_NAME,\n file_name,\n account_key=blob_service_client.credential.account_key,\n permission=BlobSasPermissions(read=True),\n expiry=datetime.utcnow() + timedelta(hours=1))\n\n return blob.url + \"?\" + sas_token\n except Exception as e:\n logger.exception(\"Failed to upload to Azure storage: {}\".format(e))\n return None\n","sub_path":"src/storage/azure_client.py","file_name":"azure_client.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"500198212","text":"import pygame\r\nfrom animations import Animations\r\nfrom settings import Settings\r\nfrom health_bars import HealthBar\r\nfrom energy_ball import EnergyBall\r\n\r\n\r\nclass StickMan(pygame.sprite.Sprite):\r\n \"\"\"Overall class to handle the behaviour and assets of\r\n a stickman character\r\n \"\"\"\r\n\r\n def __init__(self, game):\r\n pygame.sprite.Sprite.__init__(self)\r\n # Instances\r\n self.animation = Animations()\r\n self.settings = Settings()\r\n self.health_bar = HealthBar(game, self)\r\n self.game = game\r\n\r\n # Bools\r\n self.standing = True\r\n self.facing_right = True\r\n self.facing_left = False\r\n self.walking = False\r\n self.jumping = False\r\n self.falling = True\r\n self.crouching = False\r\n self.punching = False\r\n self.is_hit = False\r\n self.kicking = False\r\n self.doing_energy_ball = False\r\n\r\n # Character\r\n self.image = self.animation.idle_ani[self.animation.current_sprite]\r\n self.rect = self.image.get_rect()\r\n self.rect.y = 100\r\n self.health = 100\r\n self.energy = 100\r\n\r\n self.vel = 0\r\n\r\n # Health\r\n self.health = 100\r\n\r\n self.energy_balls = pygame.sprite.Group()\r\n\r\n def walk(self, direction):\r\n \"\"\"A function to handle the stickman's walking behaviour.\"\"\"\r\n if not self.doing_energy_ball:\r\n self.rect.x += self.settings.walk_speed * direction\r\n # Handle animation\r\n if not self.jumping and not self.falling and not self.punching and not self.kicking and not\\\r\n self.doing_energy_ball:\r\n self.animate(self.animation.walking_ani)\r\n\r\n def idle(self):\r\n if self.standing and not self.punching and not self.kicking and not self.doing_energy_ball:\r\n self.animate(self.animation.idle_ani)\r\n\r\n def jump(self):\r\n \"\"\"Handle the jump behaviour.\"\"\"\r\n self.standing = False\r\n self.punching = False\r\n self.jumping = True\r\n self.falling = False\r\n self.current_y = float(self.rect.y)\r\n self.vel = -50\r\n\r\n def _handle_jumping(self):\r\n if self.falling:\r\n # Draw animation\r\n if self.facing_right:\r\n self.image = self.animation.jumping_ani\r\n elif self.facing_left:\r\n self.image = pygame.transform.flip(self.animation.jumping_ani, True, False)\r\n self.image.set_colorkey((255, 255, 255))\r\n\r\n self.vel = 1.1\r\n self.rect.y += self.settings.gravity * self.vel\r\n elif self.jumping:\r\n if self.rect.y > self.current_y - self.settings.jump_limit:\r\n # Draw animation\r\n if self.facing_right:\r\n self.image = self.animation.jumping_ani\r\n elif self.facing_left:\r\n self.image = pygame.transform.flip(self.animation.jumping_ani, True, False)\r\n self.image.set_colorkey((255, 255, 255))\r\n\r\n self.vel -= 1\r\n self.rect.y += self.vel\r\n\r\n else:\r\n self.falling = True\r\n self.jumping = False\r\n\r\n def check_key_downs(self):\r\n # Key Downs\r\n keys = pygame.key.get_pressed()\r\n if keys[pygame.K_d] and self.rect.x < self.settings.screen_width - self.rect.width - 30:\r\n self.facing_right = True\r\n self.facing_left = False\r\n if not self.jumping and not self.falling:\r\n self.standing = False\r\n self.walking = True\r\n self.walk(1)\r\n elif keys[pygame.K_a] and self.rect.x > 0:\r\n self.facing_left = True\r\n self.facing_right = False\r\n if not self.jumping and not self.falling:\r\n self.standing = False\r\n self.walking = True\r\n self.walk(-1)\r\n elif keys[pygame.K_s] and not self.jumping and not self.falling and not self.punching and not self.kicking\\\r\n and not self.doing_energy_ball:\r\n self.standing = False\r\n self.walking = False\r\n self.crouch()\r\n\r\n def check_key_ups(self, event):\r\n # Key UPS\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_d:\r\n if not self.jumping and not self.falling:\r\n self.standing = True\r\n self.walking = False\r\n elif event.key == pygame.K_a:\r\n if not self.jumping and not self.falling:\r\n self.standing = True\r\n self.walking = False\r\n elif event.key == pygame.K_s:\r\n self.standing = True\r\n self.crouching = False\r\n\r\n def crouch(self):\r\n self.rect.y = 384 # 360\r\n self.crouching = True\r\n if self.facing_right:\r\n self.image = self.animation.crouch_ani\r\n elif self.facing_left:\r\n self.image = pygame.transform.flip(self.animation.crouch_ani, True, False)\r\n self.image.set_colorkey((255, 255, 255))\r\n\r\n def punch(self):\r\n if self.health >= 0 and not self.crouching and not self.kicking and not self.doing_energy_ball:\r\n self.animation.current_sprite = 0\r\n self.standing = False\r\n self.punching = True\r\n\r\n def kick(self):\r\n if self.health >= 0 and not self.crouching and not self.punching and not self.doing_energy_ball:\r\n self.animation.current_sprite = 0\r\n self.standing = False\r\n self.kicking = True\r\n\r\n def animate_kick(self):\r\n self.animation.current_sprite += 1\r\n if self.animation.current_sprite == len(self.animation.kick_ani):\r\n self.animation.current_sprite = 0\r\n self.kicking = False\r\n self.standing = True\r\n if self.facing_left:\r\n self.rect.x += self.animation.kick_rect.width - self.animation.idle_rect.w - 50\r\n if self.facing_right:\r\n self.image = self.animation.kick_ani[self.animation.current_sprite]\r\n elif self.facing_left:\r\n self.image = pygame.transform.flip(self.animation.kick_ani[self.animation.current_sprite],\r\n True, False)\r\n if self.animation.current_sprite == 5:\r\n self.rect.x -= self.animation.kick_rect.width - self.animation.idle_rect.w - 50\r\n self.image.set_colorkey((255, 255, 255))\r\n\r\n def animate_punch(self):\r\n self.animation.current_sprite += 1\r\n if self.animation.current_sprite == len(self.animation.punch_ani):\r\n self.animation.current_sprite = 0\r\n self.punching = False\r\n self.standing = True\r\n if self.facing_left:\r\n self.rect.x += self.animation.punch_rect.width - self.animation.idle_rect.w\r\n if self.facing_right:\r\n self.image = self.animation.punch_ani[self.animation.current_sprite]\r\n elif self.facing_left:\r\n self.image = pygame.transform.flip(self.animation.punch_ani[self.animation.current_sprite],\r\n True, False)\r\n if self.animation.current_sprite == 3:\r\n self.rect.x -= self.animation.punch_rect.width - self.animation.idle_rect.w\r\n self.image.set_colorkey((255, 255, 255))\r\n\r\n def energy_ball(self):\r\n if self.health >= 0 and not self.kicking and not self.punching and not self.crouching and not self.energy_balls:\r\n self.animation.current_sprite = 0\r\n self.doing_energy_ball = True\r\n self.standing = False\r\n\r\n def animate_energy_ball(self):\r\n self.animation.current_sprite += 1\r\n if self.animation.current_sprite == len(self.animation.energy_ball_ani):\r\n self.doing_energy_ball = False\r\n self.animation.current_sprite = 0\r\n self.standing = True\r\n if self.facing_right:\r\n self.image = self.animation.energy_ball_ani[self.animation.current_sprite]\r\n elif self.facing_left:\r\n self.image = pygame.transform.flip(self.animation.energy_ball_ani[self.animation.current_sprite],\r\n True, False)\r\n #if self.animation.current_sprite in range(5, 22):\r\n # self.rect.width = self.animation.energy_ball_rect.width - 70\r\n # self.rect.x -= 2.5\r\n if self.animation.current_sprite == 8:\r\n self.current_energy_ball = EnergyBall(self, self.game)\r\n self.energy_balls.add(self.current_energy_ball)\r\n self.image.set_colorkey((255, 255, 255))\r\n\r\n def animate(self, ani_list):\r\n self.animation.current_sprite += 1\r\n if self.animation.current_sprite >= len(ani_list):\r\n self.animation.current_sprite = 0\r\n self.punching = False\r\n if self.facing_right:\r\n self.image = ani_list[self.animation.current_sprite]\r\n elif self.facing_left:\r\n self.image = pygame.transform.flip(ani_list[self.animation.current_sprite],\r\n True, False)\r\n self.image.set_colorkey((255, 255, 255))\r\n\r\n def _check_collisions(self, game):\r\n \"\"\"Handle collisions between sprites\"\"\"\r\n ground_hit_1 = pygame.sprite.spritecollide(self, game.platforms, False)\r\n if ground_hit_1:\r\n self.rect.bottom = ground_hit_1[0].rect.top + 3\r\n self.vel = 0\r\n self.falling = False\r\n self.standing = True\r\n player_col = pygame.sprite.collide_mask(self, game.player2)\r\n if player_col and self.punching and self.animation.current_sprite == 3:\r\n print(\"COLLISION!!\")\r\n game.player2.health -= self.settings.punch_damage\r\n elif player_col and self.kicking and self.animation.current_sprite == 5:\r\n print(\"COLLISION!!\")\r\n game.player2.health -= self.settings.kick_damage\r\n energy_coll = pygame.sprite.spritecollide(game.player2, self.energy_balls, True)\r\n if energy_coll:\r\n game.player2.health -= self.settings.energy_ball_damage\r\n\r\n def update_health(self):\r\n self.health_bar.blitme()\r\n\r\n def update(self, game):\r\n # self.rect.height = self.image.get_rect().height\r\n self.rect.width = self.image.get_rect().width\r\n self.mask = pygame.mask.from_surface(self.image)\r\n self._handle_jumping()\r\n self._check_collisions(game)\r\n self.check_key_downs()\r\n if self.punching:\r\n self.animate_punch()\r\n if self.kicking:\r\n self.animate_kick()\r\n if self.doing_energy_ball:\r\n self.animate_energy_ball()\r\n if self.energy_balls:\r\n self.current_energy_ball.update_me()\r\n if self.health <= 0:\r\n self.kill()\r\n self.idle()\r\n","sub_path":"Stickman fight/stickman.py","file_name":"stickman.py","file_ext":"py","file_size_in_byte":10909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"320906011","text":"\nimport numpy as np\n\nentradas = np.array([[0,0], [0, 1], [1, 0], [1, 1]])\nsaidas = np.array([0,0,0,1])\n\npesos = np.array([0.0, 0.0])\ntaxaAprendizagem = 0.1\n\n#Função de Ativação da rede | StepFunction\ndef stepFunction(soma):\n if (soma >= 1):\n return 1\n return 0\n\ndef calculaSaida(registro):\n saida = registro.dot(pesos)\n return(stepFunction(saida))\n\n\ndef treinar():\n erroTotal = 1\n while erroTotal != 0:\n erroTotal = 0\n for i in range(len(saidas)):\n \n saidaCalculada = calculaSaida(np.asarray(entradas[i]))\n erro = saidas[i] - saidaCalculada\n erroTotal += erro\n for j in range(len(pesos)):\n pesos[j] = pesos[j] + (taxaAprendizagem * entradas[i][j] * erro)\n print(f'Pesos Atualizados: {pesos[j]}')\n \n print(f'total Erros: {erroTotal}')\n\ntreinar()\n","sub_path":"03-Perceptron-ajuste-pesos.py","file_name":"03-Perceptron-ajuste-pesos.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"311330368","text":"\"\"\"\n19.\tFaça um programa que peça para n pessoas a sua idade, ao final o programa\n deverá verificar se a média de idade da turma varia entre 0 e 25,26 e 60\n e maior que 60; e então, dizer se a turma é jovem, adulta ou idosa,\n conforme a média calculada.\n\"\"\"\nsoma = 0\nturma = int(input(\"Digite quantas pessoas serão registradas: \"))\nwhile turma <= 0:\n print(\"Deve ser informado pelo menos um integrante.\")\n turma = int(input(\"Digite quantas pessoas serão registradas: \"))\nfor c in range(turma):\n aluno = int(input(f\"Informe a idade do {c +1} aluno: \"))\n while aluno < 0:\n print(\"Erro... não pode digitar valores negativos.\")\n aluno = int(input(f\"Informe a idade do {c +1} aluno: \"))\n soma += aluno\nmedia = soma / turma\nprint(\"==\"*20)\nprint(f\"A média de idade de um turma de {turma}, é {media}\")\nif 0 <= media <= 25:\n print(\"A turma é jovem.\")\nelif 26 <= media <= 60:\n print(\"A turma é adulta.\")\nelse:\n print(\"A turma é idosa.\")\n\n","sub_path":"Lista03/ex019.py","file_name":"ex019.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"520970883","text":"from keras.callbacks import TensorBoard\nfrom keras import backend\nfrom model import relu_model, tanh_model, relu_with_scaled_sigmoid_model\nfrom test import evaluate_model\nfrom save_load import save, get_last_file_number\nfrom preprocess import get_test_train_data\nfrom utils import print_model_summary\nimport tensorflow as tf\nimport sys, os\n\nif __name__ == '__main__':\n\n # # Print output to file\n # outfolder = 'exp_' + '{0:03d}'.format(get_last_file_number(prefix='exp_', suffix='') + 1); os.makedirs(outfolder)\n # outfile = outfolder + '/' + 'train_' + '{0:03d}'.format(get_last_file_number(path=outfolder) + 1) + '.log'\n # print('Printing to logfile at', outfile)\n # sys.stdout = open(outfile, 'w+')\n\n # Add title to logfile for identification\n # if len(sys.argv)>1: print('Title:',sys.argv[1],'\\n\\n')\n\n # Get test and train data\n file = os.environ['DATA_DIR']+ \"/dataset_mini.pz\"\n x_train, x_test, y_train, y_test = get_test_train_data(file, 2000, tanh=False)\n # x_train, x_test, y_train, y_test = get_test_train_data(file, 1000, tanh=False)\n\n learning_rates = 0.0005\n models = ['tanh', 'relu_with_scaled_sigmoid']\n drop_rates = [0.1, 0.2, 0.3, 0.4]\n\n for i in range(len(models)):\n for j in range(len(drop_rates)):\n\n # Print output to file\n outfolder = 'exp_' + '{0:03d}'.format(get_last_file_number(prefix='exp_', suffix='') + 1); os.makedirs(outfolder)\n outfile = outfolder + '/' + 'train_' + '{0:03d}'.format(get_last_file_number(path=outfolder) + 1) + '.log'\n print('Printing to logfile at', outfile)\n sys.stdout = open(outfile, 'w+')\n\n print('Title:', '{}_adam_{}_dropout_rate_{}'.format(models[i], learning_rate, drop_rates[j]),'\\n\\n')\n\n\n if models[i] == 'relu': model = relu_model(learning_rate=learning_rates[j])\n elif models[i] == 'relu_with_scaled_sigmoid': model = relu_with_scaled_sigmoid_model(learning_rate=learning_rates, drop_rate=drop_rates[i])\n elif models[i] == 'tanh': \n model = tanh_model(learning_rate=learning_rates[j], drop_rate=drop_rates[i])\n x_train = (x_train - 0.5) * 2 \n x_test = (x_test - 0.5) * 2\n\n # Print model summary\n print_model_summary(model)\n\n # Train model\n tbCallBack = TensorBoard(log_dir=outfolder, histogram_freq=0, write_graph=True, write_images=True)\n model.fit(x_train, y_train, validation_split=0.2, epochs=25, batch_size=250, callbacks=[tbCallBack])\n save(model, path=outfolder)\n\n # Evaluate model\n scores = evaluate_model(model, x_test, y_test)\n\n # Print scores\n print('\\n\\n')\n print(\"Loss: \", backend.get_value(scores[0]))\n print(\"Accuracy: \", backend.get_value(scores[1])*100, \"%\")\n\n\n # # Create model and print to log file\n # model = tanh_model()\n # # model = relu_model()\n # print_model_summary(model)\n\n # # Train model\n # tbCallBack = TensorBoard(log_dir=outfolder, histogram_freq=0, write_graph=True, write_images=True)\n # model.fit(x_train, y_train, validation_split=0.2, epochs=10, batch_size=250, callbacks=[tbCallBack])\n # save(model, path=outfolder)\n\n # # Evaluate model\n # scores = evaluate_model(model, x_test, y_test)\n\n # # Print scores\n # print('\\n\\n')\n # print(\"Loss: \", backend.get_value(scores[0]))\n # print(\"Accuracy: \", backend.get_value(scores[1])*100, \"%\")\n","sub_path":"experiments/comparisons/dropout/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"238632634","text":"\"\"\"\n@author:Liushihao\n@time:2020/3/18:15:34\n@email:Liushihao_1224@163.com\n@describe:\n\"\"\"\ncount = 0\nfor i in range(100,1000):\n a = i%10\n b = i//10%10\n c = i//100\n if a**3+b**3+c**3==i:\n print(i)\n count+=1\n\nprint(\"水仙花数一共有:%d个\"%count )","sub_path":"chapter3/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"281934037","text":"import logging\nimport regex\nimport pickle\nimport bz2\nimport os\nfrom tqdm import tqdm\nfrom time import perf_counter\nfrom datetime import datetime\nfrom math import log\nfrom functools import wraps\n\nfrom . types import RegexMatch\nfrom . nb import NB\nfrom . rule import rules, _regex\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass TimeoutError(Exception):\n pass\n\n\ndef _timeout(timeout):\n start_time = perf_counter()\n\n def _tt():\n if timeout == 0:\n return\n if perf_counter() - start_time > timeout:\n raise TimeoutError()\n return _tt\n\n\ndef _timeit(f):\n \"\"\"timeit wrapper, use as `timeit(f)(args)\n\n Will return a tuple (f(args), t) where t the time in seconds the function call\n took to run.\n\n \"\"\"\n @wraps(f)\n def _wrapper(*args, **kwargs):\n start_time = perf_counter()\n res = f(*args, **kwargs)\n return res, perf_counter() - start_time\n return _wrapper\n\n\nclass StackElement:\n '''A partial parse result with\n\n * prod: the current partial production\n * rules: the sequence of regular expressions and rules used/applied to produce prod\n * score: the score assigned to this production\n '''\n @classmethod\n def from_regex_matches(cls, regex_matches, txt_len):\n '''Create new initial stack element based on a production that has not\n yet been touched, i.e. it is only a sequence of matching\n regular expressions\n '''\n se = StackElement()\n se.prod = regex_matches\n se.rules = tuple(r.id for r in regex_matches)\n se.txt_len = txt_len\n se.max_covered_chars = se.prod[-1].mend - se.prod[0].mstart\n se.len_score = log(se.max_covered_chars/se.txt_len)\n se.update_score()\n\n logger.debug('='*80)\n logger.debug('-> checking rule applicability')\n # Reducing rules to only those applicable has no effect for\n # small stacks, but on larger there is a 10-20% speed\n # improvement\n se.applicable_rules, _ts = _timeit(se._filter_rules)(rules)\n logger.debug('of {} total rules {} are applicable in {}'.format(\n len(rules), len(se.applicable_rules), se.prod))\n logger.debug('time in _filter_rules: {:.0f}ms'.format(1000*_ts))\n logger.debug('='*80)\n\n return se\n\n def _filter_rules(self, rules):\n \"\"\"find all rules that can be applied to the current prod sequence\"\"\"\n def _hasNext(it):\n try:\n next(it)\n return True\n except StopIteration as e:\n return False\n\n return {rule_name: r for rule_name, r in rules.items()\n if _hasNext(_seq_match(self.prod, r[1]))}\n\n @classmethod\n def from_rule_match(cls, se_old, rule_name, match, prod):\n se = StackElement()\n se.prod = se_old.prod[:match[0]] + (prod,) + se_old.prod[match[1]:]\n se.rules = se_old.rules + (rule_name,)\n # Refiltering does not give a speedup - actually rather 10%\n # speed loss se.applicable_rules =\n #\n # se._filter_rules(se_old.applicable_rules)\n se.applicable_rules = se_old.applicable_rules\n se.txt_len = se_old.txt_len\n se.max_covered_chars = se.prod[-1].mend - se.prod[0].mstart\n se.len_score = log(se.max_covered_chars/se.txt_len)\n se.update_score()\n return se\n\n def update_score(self):\n if _nb.hasModel:\n self.score = _nb.apply(self.rules) + self.len_score\n else:\n self.score = 0.0\n\n def apply_rule(self, ts, rule, rule_name, match):\n '''Check whether the production in rule can be applied to this stack\n element. If yes, return a copy where this update is\n incorporated in the production, the record of applied rules\n and the score.\n '''\n # prod, prod_name, start, end):\n prod = rule[0](ts, *self.prod[match[0]:match[1]])\n if prod is not None:\n return StackElement.from_rule_match(self, rule_name, match, prod)\n else:\n return\n\n def __lt__(self, other):\n '''Sort stack elements by (a) the length of text they can\n (potentially) cover and (b) the score assigned to the\n production.\n\n a < b <=> a.max_covered_chars < b.max_covered_chars or\n (a.max_covered_chars <= b.max_covered_chars and a.score < b.score)\n '''\n return ((self.max_covered_chars < other.max_covered_chars) or\n (self.max_covered_chars == other.max_covered_chars and\n self.score < other.score))\n\n\nclass CTParse:\n def __init__(self, resolution, production, score):\n self.resolution = resolution\n self.production = production\n self.score = score\n\n def __repr__(self):\n return 'CTParse({}, {}, {})'.format(\n self.resolution, self.production, self.score)\n\n def __str__(self):\n return '{} s={:.3f} p={}'.format(self.resolution,\n self.score,\n self.production)\n\n\ndef _ctparse(txt, ts=None, timeout=0, relative_match_len=0, max_stack_depth=0):\n def get_score(seq, len_match):\n if _nb.hasModel:\n return _nb.apply(seq) + log(len_match/len(txt))\n else:\n return 0.0\n\n t_fun = _timeout(timeout)\n\n try:\n if ts is None:\n ts = datetime.now()\n logger.debug('='*80)\n logger.debug('-> matching regular expressions')\n p, _tp = _timeit(_match_regex)(txt)\n logger.debug('time in _match_regex: {:.0f}ms'.format(1000*_tp))\n\n logger.debug('='*80)\n logger.debug('-> building initial stack')\n stack, _ts = _timeit(_regex_stack)(txt, p, t_fun)\n logger.debug('time in _regex_stack: {:.0f}ms'.format(1000*_ts))\n # add empty production path + counter of contained regex\n stack = [StackElement.from_regex_matches(s, len(txt)) for s in stack]\n logger.debug('initial stack length: {}'.format(len(stack)))\n # sort stack by length of covered string and - if that is equal - score\n # --> last element is longest coverage and highest scored\n stack.sort()\n # only keep initial stack elements that cover at least\n # relative_match_len characters of what the highest\n # scored/covering stack element does cover\n stack = [s for s in stack\n if s.max_covered_chars >= stack[-1].max_covered_chars * relative_match_len]\n logger.debug('stack length after relative match length: {}'.format(len(stack)))\n # limit depth of stack\n stack = stack[-max_stack_depth:]\n logger.debug('stack length after max stack depth limit: {}'.format(len(stack)))\n\n # track what has been added to the stack and do not add again\n # if the score is not better\n stack_prod = {}\n # track what has been emitted and do not emit agin\n parse_prod = {}\n while stack:\n t_fun()\n s = stack.pop()\n logger.debug('-'*80)\n logger.debug('producing on {}, score={:.2f}'.format(s.prod, s.score))\n new_stack = []\n for r_name, r in s.applicable_rules.items():\n for r_match in _match_rule(s.prod, r[1]):\n # apply production part of rule\n new_s = s.apply_rule(ts, r, r_name, r_match)\n if new_s and stack_prod.get(new_s.prod, new_s.score - 1) < new_s.score:\n # either new_s.prod has never been produced\n # before or the score of new_s is higher than\n # a previous identical production\n new_stack.append(new_s)\n logger.debug(' {} -> {}, score={:.2f}'.format(\n r_name, new_s.prod, new_s.score))\n stack_prod[new_s.prod] = new_s.score\n if not new_stack:\n logger.debug('~'*80)\n logger.debug('no rules applicable: emitting')\n # no new productions were generated from this stack element.\n # emit all (probably partial) production\n for x in s.prod:\n if not isinstance(x, RegexMatch):\n # update score to be only relative to the text\n # match by the actual production, not the\n # initial sequence of regular expression\n # matches\n score_x = get_score(s.rules, len(x))\n # only emit productions not emitted before or\n # productions emitted before but scored higher\n if parse_prod.get(x, score_x - 1) < score_x:\n parse_prod[x] = score_x\n logger.debug(' => {}, score={:.2f}, '.format(\n x.__repr__(), score_x))\n yield CTParse(x, s.rules, score_x)\n else:\n # new productions generated, put on stack and sort\n # stack by highst score\n stack.extend(new_stack)\n stack.sort()\n stack = stack[-max_stack_depth:]\n logger.debug('added {} new stack elements, depth after trunc: {}'.format(\n len(new_stack), len(stack)))\n except TimeoutError as e:\n logger.debug('Timeout on \"{}\"'.format(txt))\n return\n\n\n_model_file = os.path.join(os.path.dirname(__file__), 'models', 'model.pbz')\nif os.path.exists(_model_file):\n logger.info('Loading model from {}'.format(_model_file))\n _nb = pickle.load(bz2.open(_model_file, 'rb'))\nelse:\n logger.warning('No model found, initializing empty model')\n _nb = NB()\n\n\n# replace all comma, semicolon, whitespace, invisible control, opening and closing brackets\n_repl1 = regex.compile(r'[,;\\pZ\\pC\\p{Ps}\\p{Pe}]+', regex.VERSION1)\n_repl2 = regex.compile(r'(\\p{Pd}|[\\u2010-\\u2015]|\\u2043)+', regex.VERSION1)\n\n\ndef _preprocess_string(txt):\n return _repl2.sub('-', _repl1.sub(' ', txt, concurrent=True).strip()).strip()\n\n\ndef ctparse(txt, ts=None, timeout=1.0, debug=False, relative_match_len=1.0, max_stack_depth=10):\n '''Parse a string *txt* into a time expression\n\n :param ts: reference time\n :type ts: datetime.datetime\n :param timeout: timeout for parsing in seconds; timeout=0\n indicates no timeout\n :type timeout: int\n :param debug: if True do return iterator over all resolution, else\n return highest scoring one (default=False)\n :type debug: bool\n :param relative_match_len: relative minimum share of\n characters an initial regex match sequence must\n cover compared to the longest such sequence found\n to be considered for productions (default=1.0)\n :type relative_match_len: float\n :param max_stack_depth: limit the maximal number of highest scored candidate productions\n considered for future productions (default=10); set to 0 to not\n limit\n :type max_stack_depth: int\n\n :returns: Time or Interval\n '''\n parsed = _ctparse(_preprocess_string(txt), ts, timeout=timeout,\n relative_match_len=relative_match_len, max_stack_depth=max_stack_depth)\n if debug:\n return parsed\n else:\n parsed = [p for p in parsed]\n if not parsed or (len(parsed) == 1 and not parsed[0]):\n logger.warning('Failed to produce result for \"{}\"'.format(txt))\n return\n parsed.sort(key=lambda p: p.score)\n return parsed[-1]\n\n\ndef _match_rule(seq, rule):\n if not seq:\n return\n if not rule:\n return\n i_r = 0\n i_s = 0\n r_len = len(rule)\n s_len = len(seq)\n while i_s < s_len:\n if rule[0](seq[i_s]):\n i_start = i_s + 1\n i_r = 1\n while i_start < s_len and i_r < r_len and rule[i_r](seq[i_start]):\n i_r += 1\n i_start += 1\n if i_r == r_len:\n yield i_s, i_start\n i_s += 1\n\n\ndef _seq_match(seq, pat, offset=0):\n # :param seq: a list of intermediate productions, either of type\n # RegexMatch or some other Artifact\n #\n # :param pat: a list of rule patterns to be matched, i.e. either a\n # RegexMatch or a callable\n #\n # Determine whether the pattern pat matches the sequence seq and\n # return a list of lists, where each sub-list contains those\n # indices where the RegexMatch objects in pat are located in seq.\n #\n # A pattern pat only matches seq, iff each RegexMatch in pat is in\n # seq in the same order and iff between two RegexMatches aligned\n # to seq there is at least one additional element in seq. Reason:\n #\n # * Rule patterns never have two consequitive RegexMatch objects.\n #\n # * Hence there must be some predicate/dimension between two\n # * RegexMatch objects.\n #\n # * For the whole pat to match there must then be at least one\n # element in seq that can product this intermediate bit\n #\n # If pat does not start with a RegexMatch then there must be at\n # least one element in seq before the first RegexMatch in pat that\n # is alignes on seq. Likewise, if pat does not end with a\n # RegexMatch, then there must be at least one additional element\n # in seq to match the last non-RegexMatch element in pat.\n #\n # STRONG ASSUMPTIONS ON ARGUMENTS: seq and pat do not contain\n # consequiteve elements which are both of type RegexMatch! Callers\n # obligation to ensure this!\n\n if not pat:\n # if pat is empty yield the empty match\n yield []\n elif not seq or not pat:\n # if either seq or pat is empty there will be no match\n return\n elif pat[-1].__name__ != '_regex_match':\n # there must be at least one additional element in seq at the\n # end\n yield from _seq_match(seq[:-1], pat[:-1], offset)\n elif len(pat) > len(seq):\n # if pat is longer than seq it cannot match\n return\n else:\n p1 = pat[0]\n # if p1 is not a RegexMatch, then continue on next pat and\n # advance sequence by one\n if p1.__name__ != '_regex_match':\n yield from _seq_match(seq[1:], pat[1:], offset+1)\n else:\n # Get number of RegexMatch in p\n n_regex = sum(1 for p in pat if p.__name__ == '_regex_match')\n # For each occurance of RegexMatch pat[0] in seq\n for iseq, s in enumerate(seq):\n # apply _regex_match check\n if p1(s):\n # for each match of pat[1:] in seq[iseq+1:], yield a result\n for subm in _seq_match(seq[iseq+1:], pat[1:], offset+iseq+1):\n if len(subm) == n_regex - 1:\n # only yield if all subsequent RegexMatch\n # have been aligned!\n yield [iseq+offset] + subm\n\n\ndef _match_regex(txt):\n \"\"\"Match all known regex in txt and return a list of RegxMatch objects\n sorted by the start of the match. Overlapping matches of the same\n expression are returned as well.\n\n :param txt: the text to match against\n :return: a list of RegexMatch objects ordered my Regex.mstart\n\n \"\"\"\n matches = {RegexMatch(name, m)\n for name, re in _regex.items()\n for m in re.finditer(txt, overlapped=False, concurrent=True)}\n for m in matches:\n logger.debug('regex: {}'.format(m.__repr__()))\n return sorted(matches, key=lambda x: (x.mstart, x.mend))\n\n\ndef _regex_stack(txt, regex_matches, t_fun=lambda: None):\n \"\"\"assumes that regex_matches are sorted by increasing start index\n\n Algo: somewhere on paper, but in a nutshell:\n * stack empty\n\n * add all sequences of one expression to the stack, excluding\n expressions which can be reached from \"earlier\" expressison\n (i.e. there is no gap between them):\n\n - say A and B have no gap inbetween and all sequences starting\n at A have already been produced. These be definition (which?\n :-) include as sub-sequences all sequences starting at B. Any\n other sequences starting at B directly will not add valid\n variations, as each of them could be prefixed with a sequence\n starting at A\n\n * while the stack is not empty:\n\n * get top sequence s from stack\n\n * generate all possible continuations for this sequence,\n i.e. sequences where expression can be appended to the last\n element s[-1] in s and put these extended sequences on the stack\n\n * if no new productions could be generated for s, this is one\n result sequence.\n \"\"\"\n prods = []\n n_rm = len(regex_matches)\n # Calculate the upper triangle of an n_rm x n_rm matrix M where\n # M[i, j] == 1 (for i avoid use of numpy here; since we need column sums below,\n # --> the representation of M is columns major, i.e. M[i] is the i-th\n # --> column; M[i, j] then basically becomes M[j][i]\n M = [[0 for _ in range(n_rm)] for _ in range(n_rm)]\n\n _separator_regex = regex.compile(r'\\s*', regex.VERSION1)\n\n def get_m_dist(m1, m2):\n # 1 if there is no relevant gap between m1 and m2, 0 otherwise\n # assumes that m1 and m2 are sorted be their start index\n if m2.mstart < m1.mend:\n return 0 # Overlap\n gap_match = _separator_regex.fullmatch(\n txt[m1.mend:m2.mstart])\n if gap_match:\n return 1 # No Gap\n else:\n return 0 # Gap\n\n for i in range(n_rm):\n for j in range(i+1, n_rm):\n M[j][i] = get_m_dist(regex_matches[i], regex_matches[j])\n\n stack = [(i,) for i in reversed(range(n_rm)) if sum(M[i]) == 0]\n while stack:\n t_fun()\n s = stack.pop()\n i = s[-1]\n new_prod = False\n for j in range(i+1, n_rm):\n if M[j][i] == 1:\n stack.append(s + (j,))\n new_prod = True\n if not new_prod:\n prod = tuple(regex_matches[i] for i in s)\n logger.debug('regex stack {}'.format(prod))\n prods.append(prod)\n return prods\n\n\ndef run_corpus(corpus):\n \"\"\"Load the corpus (currently hard coded), run it through ctparse with\n no timeout and no limit on the stack depth.\n\n The corpus passes if ctparse generates the desired solution for\n each test at least once. Otherwise it fails.\n\n While testing this, a labeled data set (X, y) is generated based\n on *all* productions. Given a final production p, based on initial\n regular expression matches r_0, ..., r_n, which are then\n subsequently transformed using production rules p_0, ..., p_m,\n will result in the samples\n\n [r_0, ..., r_n, p_0, 'step_0']\n [r_0, ..., r_n, p_0, p_1, 'step_1']\n ...\n [r_0, ..., r_n, p_0, ..., p_m, 'step_m']\n\n All samples from one production are given the same label: 1 iff\n the final production was correct, -1 otherwise.\n\n \"\"\"\n model_old = _nb._model\n _nb._model = None\n at_least_one_failed = False\n # pos_parses: number of parses that are correct\n # neg_parses: number of parses that are wrong\n # pos_first_parses: number of first parses generated that are correct\n # pos_best_scored: number of correct parses that have the best score\n pos_parses = neg_parses = pos_first_parses = pos_best_scored = 0\n total_tests = 0\n Xs = []\n ys = []\n for target, ts, tests in tqdm(corpus):\n ts = datetime.strptime(ts, '%Y-%m-%dT%H:%M')\n all_tests_pass = True\n for test in tests:\n one_prod_passes = False\n first_prod = True\n y_score = []\n for prod in _ctparse(_preprocess_string(test), ts, relative_match_len=1.0):\n y = prod.resolution.nb_str() == target\n # Build data set, one sample for each applied rule in\n # the sequence of rules applied in this production\n # *after* the matched regular expressions\n X_prod, y_prod = _nb.map_prod(prod.production, y)\n Xs.extend(X_prod)\n ys.extend(y_prod)\n one_prod_passes |= y\n pos_parses += int(y)\n neg_parses += int(not y)\n pos_first_parses += int(y and first_prod)\n first_prod = False\n y_score.append((prod.score, y))\n if not one_prod_passes:\n logger.warning('failure: target \"{}\" never produced in \"{}\"'.format(target, test))\n pos_best_scored += int(max(y_score, key=lambda x: x[0])[1])\n total_tests += len(tests)\n all_tests_pass &= one_prod_passes\n if not all_tests_pass:\n logger.warning('failure: \"{}\" not always produced'.format(target))\n at_least_one_failed = True\n logger.info('run {} tests on {} targets with a total of '\n '{} positive and {} negative parses (={})'.format(\n total_tests, len(corpus), pos_parses, neg_parses,\n pos_parses+neg_parses))\n logger.info('share of correct parses in all parses: {:.2%}'.format(\n pos_parses/(pos_parses + neg_parses)))\n logger.info('share of correct parses being produced first: {:.2%}'.format(\n pos_first_parses/(pos_parses + neg_parses)))\n logger.info('share of correct parses being scored highest: {:.2%}'.format(\n pos_best_scored/total_tests))\n if at_least_one_failed:\n raise Exception('ctparse corpus has errors')\n _nb._model = model_old\n return Xs, ys\n\n\n#\n# Not unittested - would take very long time to run these\n#\n\ndef build_model(X, y, save=False): # pragma: no cover\n nb = NB()\n nb.fit(X, y)\n if save:\n pickle.dump(nb, bz2.open(_model_file, 'wb'))\n return nb\n\n\ndef regenerate_model(): # pragma: no cover\n from . time.corpus import corpus as corpus_time\n from . time.auto_corpus import corpus as auto_corpus\n global _nb\n logger.info('Regenerating model')\n _nb = NB()\n X, y = run_corpus(corpus_time + auto_corpus)\n logger.info('Got {} training samples'.format(len(y)))\n build_model(X, y, save=True)\n","sub_path":"ctparse/ctparse.py","file_name":"ctparse.py","file_ext":"py","file_size_in_byte":22593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"405099228","text":"from cicsa_ranking.models import Event, EventActivity, EventTeam, MemberGroup, School, Team\nfrom .AbstractCustomClass import AbstractCustomClass\nfrom panel.component.CustomElements import DBMap, SearchElement\nfrom misc.CustomFunctions import MiscFunctions, RequestFunctions, LogFunctions\n\n\nclass EventTeamView(AbstractCustomClass):\n def __init__(self, request):\n self.base_class = EventTeam\n self.assoc_class_event_activity = EventActivity\n self.assoc_class_event = Event\n self.assoc_class_school = School\n self.assoc_class_team = Team\n self.assoc_class_member_group = MemberGroup\n self.search_name = ['event_team_event_member_group']\n self.validation_table = {\n 'base_table_invalid': {'_state'},\n 'base_form_invalid': {'_state', 'id', 'event_team_member_group_id'},\n }\n super().__init__(request, self.base_class, self.validation_table)\n\n# View Process Functions\n\n def abstractFormProcess(self, action, **kwargs):\n try:\n post_dict = dict(self.request.POST)\n dispatcher = super().populateDispatcher()\n\n if dispatcher.get(action):\n event_team_id = kwargs.pop('id', None)\n event_team = self.useAPI(self.base_class).editSelf(id=event_team_id)\n else:\n event_team = self.base_class()\n\n event_team.event_team_id = RequestFunctions.getSingleRequestObj(post_dict, 'event_team_id')\n event_team.event_team_event_activity_id = RequestFunctions.getSingleRequestObj(\n post_dict, 'event_team_event_activity_id')\n event_team.event_team_member_group_id = [RequestFunctions.getSingleRequestObj(post_dict, name + \"_result\")\n for name in self.search_name][0]\n\n if not action == 'delete':\n event_team.save()\n\n LogFunctions.generateLog(\n self.request, 'admin', LogFunctions.makeLogQuery(\n self.base_class, action.title(), id=event_team.id))\n\n if action == 'delete':\n event_team.delete()\n except Exception:\n print({\"Error\": \"Cannot Process \" + action.title() + \" Request.\"})\n\n# View Generating Functions\n\n # Form Generating Functions\n def getFieldData(self, **kwargs):\n action = kwargs.pop('action')\n element_id = kwargs.pop('element_id')\n field_data_dispatcher = self.populateDispatcher()\n if field_data_dispatcher.get(action):\n field_data = MiscFunctions.filterDict(\n self.useAPI(self.base_class).getSelf(id=element_id).__dict__.items(),\n self.validation_table['base_form_invalid']\n )\n return field_data\n return None\n\n def getChoiceData(self):\n return None\n\n def getDBMap(self, data):\n db_map = dict()\n event_activity = self.useAPI(self.assoc_class_event_activity).getSelf(id=data['event_team_event_activity_id'])\n event_parent = self.useAPI(self.assoc_class_event).getSelf(id=event_activity.event_activity_event_parent)\n db_map['event_team_event_activity_id'] = event_parent.event_name + ' - ' + event_activity.event_activity_name if event_parent is not None else 'Link Broken'\n db_map['event_team_id'] = DBMap().getMap(self.assoc_class_team, data['event_team_id'], 'team_name')\n db_map['event_team_member_group_id'] = (\n lambda x: 'Unlinked' if x is None else\n (\n self.useAPI(\n self.assoc_class_member_group\n ).getSelf(\n id=data['event_team_member_group_id'])\n ).member_group_name\n )(data['event_team_member_group_id'])\n return db_map\n\n def getMultiChoiceData(self):\n return None\n\n def getSearchElement(self, **kwargs):\n def getSearchDefault(id):\n element_id = kwargs['element_id'] if 'element_id' in kwargs else None\n if element_id:\n event_team = self.useAPI(self.base_class).getSelf(id=element_id)\n if event_team.event_team_member_group_id is not None:\n member_group = self.useAPI(\n self.assoc_class_member_group\n ).getSelf(id=event_team.event_team_member_group_id)\n return member_group.id, member_group.member_group_name\n return None, None\n return [\n SearchElement(self.search_name[i], 'Event Team Event Member Group', 'MemberGroup', None,\n 'member_group_name', None, getSearchDefault(i)) for i in range(len(self.search_name))\n ]\n\n # Table Generating Functions\n def getTableSpecificHeader(self):\n return [field.name for field in self.base_class._meta.get_fields()\n if field.name not in self.validation_table['base_table_invalid']]\n\n def getTableRowContent(self, content):\n field_data = MiscFunctions.filterDict(self.useAPI(self.base_class).getSelf(id=content.id).__dict__.items(),\n self.validation_table['base_table_invalid'])\n field_data = self.updateDBMapAsValue(field_data, self.getDBMap(field_data))\n field_data = MiscFunctions.grabValueAsList(field_data)\n return field_data\n","sub_path":"panel/config/team/management_data/CustomPages/EventTeam.py","file_name":"EventTeam.py","file_ext":"py","file_size_in_byte":5387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"640128868","text":"#\n# MLPerf inference; medical imaging; preprocessing\n#\n# Copyright (c) 2019 cTuning foundation.\n# Copyright (c) 2021 OctoML, Inc.\n#\n# Developers:\n# - Grigori Fursin, OctoML, 2021\n#\n\nimport json\nimport os\nimport re\n\ndef ck_preprocess(i):\n\n ck=i['ck_kernel']\n rt=i['run_time']\n deps=i['deps']\n\n env=i['env']\n new_env={} # new environment to be added to the run script\n bat='\\n'\n\n hosd=i['host_os_dict']\n tosd=i['target_os_dict']\n remote=tosd.get('remote','')\n\n # # Get model name from a CK package in MLPerf loadgen format\n ml_model_dict = deps['model']['dict']\n ml_model_env_dict = deps['model']['dict']['env']\n\n # path_to_squad=deps['dataset']['dict']['env']['CK_ENV_DATASET_SQUAD_DEV']\n # path_to_squad_dev=os.path.join(path_to_squad, 'dev-v1.1.json')\n\n # Check extra opts\n opts=env.get('CK_LOADGEN_OPTS','')\n\n # Check performance count (default 16 in reference implementation)\n performance_count=env.get('CK_LOADGEN_PERFORMANCE_COUNT','')\n if performance_count:\n opts+=' --performance_count='+performance_count\n\n # Check plans.pkl path\n # Note: the reference model in pytorch contains the plans.pkl as well as\n # model needed for preprocessing, so the pytorch model is a dependency of\n # other models\n pytorch_dict = ml_model_dict.get('deps', {}).get('ml-model-mlperf-3d-unet-pytorch', {})\n pytorch_env = pytorch_dict.get('dict', {}).get('env', {})\n if not pytorch_env:\n pytorch_env = ml_model_env_dict\n plans_pkl_dir = pytorch_env['ML_MODEL_ROOT']\n new_env['PLANS_PKL_PATH'] = plans_pkl_dir\n\n # Check output directory\n new_env['CK_MLPERF_OUTPUT_DIR']=os.getcwd()\n\n ###############################################################################\n # Prepare options for loadgen based on env vars\n\n i['script_data_uoa']='dbe3fc4fd58fea44' # script:mlperf-inference-language\n i['loadgen_opts']=opts\n\n r=ck.access({'action':'run', 'module_uoa':'script', 'data_uoa':'mlperf-inference', \n 'code':'loadgen_common', 'func':'ck_preprocess', \n 'dict':i})\n if r['return']>0: return r\n\n new_env.update(r['new_env'])\n\n return {'return':0, 'bat':bat, 'new_env':new_env}\n\n# Do not add anything here!\n","sub_path":"script/mlperf-inference-medical-imaging/loadgen_preprocess.py","file_name":"loadgen_preprocess.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"222516442","text":"from puzzle import Pieces \nfrom utils import flatten_with_any_depth\nimport numpy as np\nimport cv2\nimport math\nimport copy\n\n\nclass MixPiece(Pieces):\n def __init__(self, master_piece, slave_piece):\n self.master_piece = master_piece\n self.slave_piece = slave_piece\n self.piece_num = self.init_piece_num()\n self.img = np.zeros((1000, 1000, 3), np.uint8)\n self.unlink_pieces = []\n\n self.fit_side_master_num, self.fit_side_slave_num = self.fit_sides_detection()\n try:\n points = self.connect_pieces()\n except:\n raise\n self.points = Points(points)\n blank_img = np.zeros((1000, 1000, 3), np.uint8)\n self.img = self.points.draw_round(blank_img)\n self.sides = self.create_side(self.points.points, self.init_side_len(points), self.proc_angles(points))\n\n def disconnect_pieces_num(self, pieces_num, disconnect_piece_num):\n copy_pieces_num = copy.deepcopy(pieces_num)\n copy_pieces_num.remove(disconnect_piece_num)\n return disconnect_piece_num, copy_pieces_num\n\n def init_piece_num(self):\n unflat_piece_num = [self.master_piece.piece_num, self.slave_piece.piece_num]\n piece_num = flatten_with_any_depth(unflat_piece_num)\n return piece_num\n\n def resize_fall(self):\n return self.points.resize_fall()\n\n def init_img(self, points):\n blank_img = np.zeros((1000, 1000, 3), np.uint8)\n img = points.draw_round(blank_img)\n return img\n\n def fit_sides_detection(self):\n for mn, master_s in enumerate(self.master_piece.sides):\n for f in master_s.fit_objs:\n for fs in f.fit_sides:\n for sn, slave_s in enumerate(self.slave_piece.sides):\n if fs == slave_s:\n master_fit_sides_num = mn\n slave_fit_sides_num = sn\n return master_fit_sides_num, slave_fit_sides_num \n\n def connect_pieces(self):\n self.move_parallel_all_points()\n self.move_rotate_all_points()\n\n unorder_mixed_points = self.pointing_mixed_points()\n moved_zero_all_points = unorder_mixed_points.move_zero_all_points()\n ordered_mixed_points = moved_zero_all_points.order_mixed_points(\\\n len(self.master_piece.sides), \\\n self.fit_side_master_num, \\\n self.fit_side_slave_num)\n deleted_same_mixed_points = ordered_mixed_points.delete_same_points()\n deleted_pi_angle_points = self.delete_pi_angle_points(ordered_mixed_points.points)\n\n try:\n deleted_pi_angle_points.judge_overlap( \\\n len(self.master_piece.sides), \\\n self.fit_side_master_num, \\\n self.fit_side_slave_num)\n except:\n raise\n return deleted_pi_angle_points.points \n \n def get_angle_tryangle(self):\n self.img = np.zeros((1000, 1000, 3), np.uint8)\n #TODO: 0っておかしい??\n p0 = self.slave_piece.sides[self.fit_side_slave_num].points[0]\n p1 = self.master_piece.sides[self.fit_side_master_num].points[0]\n p2 = self.master_piece.sides[self.fit_side_master_num].points[1]\n\n cv2.line(self.img,(int(p0.x), int(p0.y)),(int(p1.x), int(p1.y)),(0,255,0),3)\n cv2.line(self.img,(int(p1.x), int(p1.y)),(int(p2.x), int(p2.y)),(0,255,0),3)\n cv2.line(self.img,(int(p2.x), int(p2.y)),(int(p0.x), int(p0.y)),(0,255,0),3)\n return self.get_angle(p0, p1, p2)\n\n def move_rotate_all_points(self):\n deg = self.get_angle_tryangle()\n theta = (deg * math.pi) / 180\n\n slave_x = self.slave_piece.sides[self.fit_side_slave_num].points[1].x\n slave_y = self.slave_piece.sides[self.fit_side_slave_num].points[1].y\n\n self.assign_slave(slave_x, slave_y, theta, None)\n\n slave_0 = self.slave_piece.sides[self.fit_side_slave_num].points[0]\n master_1 = self.master_piece.sides[self.fit_side_master_num].points[1]\n\n master_x_vector = np.array([master_1.x, slave_x])\n master_y_vector = np.array([master_1.y, slave_y])\n slave_x_vector = np.array([slave_0.x, slave_x])\n slave_y_vector = np.array([slave_0.y, slave_y])\n\n master_vector = np.add(master_x_vector, master_y_vector)\n slave_vector = np.add(slave_x_vector, slave_y_vector)\n\n master_unit_vector = master_vector / np.linalg.norm(master_vector)\n slave_unit_vector = slave_vector / np.linalg.norm(slave_vector)\n\n # 一回回転させて重ならない場合(反対回転なので360-theta回転させる)\n if master_unit_vector.all() != slave_unit_vector.all():\n self.assign_slave(slave_x, slave_y, -theta, None)\n rad = 2 * math.pi - theta\n self.assign_slave(slave_x, slave_y, rad, None)\n\n def move_parallel_all_points(self):\n # 原点にする回転中心\n master_x = self.master_piece.sides[self.fit_side_master_num].points[0].x\n master_y = self.master_piece.sides[self.fit_side_master_num].points[0].y\n slave_x = self.slave_piece.sides[self.fit_side_slave_num].points[1].x\n slave_y = self.slave_piece.sides[self.fit_side_slave_num].points[1].y\n\n diff_dist_x = master_x - slave_x\n diff_dist_y = master_y - slave_y\n diff_dist = (diff_dist_x, diff_dist_y)\n\n self.assign_slave(slave_x, slave_y, None, diff_dist)\n \n def assign_slave(self, slave_x, slave_y, theta, diff_dist):\n # slaveの他の座標も全部動かしてる\n for i, s in enumerate(self.slave_piece.sides):\n for j, point in enumerate(s.points):\n if diff_dist != None:\n new_x, new_y = self.cal_para_point(point, diff_dist)\n if theta != None:\n new_x, new_y = self.cal_rotate_point(point, theta, slave_x, slave_y)\n\n # 一番最初の点\n if i == 0 and j == 0:\n self.slave_piece.sides[0].points[0].x = new_x\n self.slave_piece.sides[0].points[0].y = new_y\n self.slave_piece.sides[len(self.slave_piece.sides) - 1].points[1].x = new_x\n self.slave_piece.sides[len(self.slave_piece.sides) - 1].points[1].y = new_y\n\n if i == len(self.slave_piece.sides) - 1:\n break\n\n if i != 0 and j == 0:\n continue\n\n if j == 1:\n self.slave_piece.sides[i].points[1].x = new_x\n self.slave_piece.sides[i].points[1].y = new_y\n self.slave_piece.sides[i+1].points[0].x = new_x\n self.slave_piece.sides[i+1].points[0].y = new_y\n \n def cal_para_point(self, point, diff_dist):\n parallel_x = point.x + diff_dist[0]\n parallel_y = point.y + diff_dist[1]\n return parallel_x, parallel_y\n\n def cal_rotate_point(self, point, theta, slave_x, slave_y):\n para_diff_x = point.x - slave_x\n para_diff_y = point.y - slave_y\n # rotate_xとrotate_yは回転によって変化する変位である\n rotate_x = para_diff_x*np.cos(theta) - para_diff_y*np.sin(theta)\n rotate_y = para_diff_x*np.sin(theta) + para_diff_y*np.cos(theta)\n # よって、元座標と足し合わせたものが回転された座標\n origin_rotate_x = rotate_x + slave_x \n origin_rotate_y = rotate_y + slave_y\n return origin_rotate_x, origin_rotate_y\n\n def pointing_mixed_points(self):\n master_points = self.pointing_piece_points(self.master_piece.sides)\n slave_points = self.pointing_piece_points(self.slave_piece.sides)\n unflat_mixed_points = [master_points, slave_points]\n mixed_points = Points(flatten_with_any_depth(unflat_mixed_points))\n return mixed_points\n\n def pointing_piece_points(self, piece_all_sides):\n all_points = []\n for i, s in enumerate(piece_all_sides):\n for j, point in enumerate(s.points):\n if i == 0 and j == 0:\n all_points.append(point)\n if i != 0 and j == 0:\n continue\n if i == len(piece_all_sides) - 1:\n break\n if j == 1:\n all_points.append(point)\n return all_points\n\n\n def delete_pi_angle_points(self, points):\n # TODO:例が悪かった為、取り敢えずうまく動くと思��がテストしてない\n for i, p in enumerate(points):\n if i == 0:\n if self.get_angle(points[len(points) - 1], p , points[1]) == 180:\n points.pop(i)\n continue\n if i == len(points) - 1:\n if self.get_angle(points[i-1], p, points[0]) == 180:\n points.pop(i)\n else:\n if self.get_angle(points[i-1], p , points[i+1]) == 180:\n points.pop(i)\n return Points(points)\n\n def draw_line(self, img, p1, p2):\n # 誤差出そう\n return cv2.line(img,(int(p1.x), int(p1.y)),(int(p2.x), int(p2.y)),(255,255,255),3)\n\n\nclass Points(Pieces):\n def __init__(self, points):\n self.points = points\n\n def for_points_index(self, f):\n for i, p in enumerate(self.points):\n value = f(i, p)\n return value\n\n def draw_round(self, img):\n for i, p in enumerate(self.points):\n img = self.draw_close(i, p, img)\n return img\n\n def delete_same_points(self):\n return self.for_points_index(self.same_points_f)\n\n def same_points_f(self, i, p):\n if i != len(self.points) - 1:\n if abs(p.x - self.points[i+1].x) < 5 and abs(p.y == self.points[i+1].y) < 5:\n self.points.pop(i)\n else:\n if abs(p.x - self.points[0].x) < 5 and abs(p.y == self.points[0].y) < 5:\n self.points.pop(i)\n return Points(self.points)\n\n def draw_close(self, i, p, img):\n if i != len(self.points) - 1:\n img = self.draw_line(img, p, self.points[i+1])\n else:\n img = self.draw_line(img, p, self.points[0])\n return img\n\n def draw_line(self, img, p1, p2):\n # 誤差出そう\n return cv2.line(img,(int(p1.x), int(p1.y)),(int(p2.x), int(p2.y)),(255,255,255),3)\n\n def resize_fall(self):\n max_x_point = max(self.points, key=self.piece_x_key)\n max_y_point = max(self.points, key=self.piece_y_key)\n max_x = int(max_x_point.x) \n max_y = int(max_y_point.y) \n\n blank_img = np.zeros((max_y, max_x, 3), np.uint8)\n img = self.draw_round(blank_img)\n return img\n\n def move_zero_all_points(self):\n min_x_point = min(copy.deepcopy(self.points), key=self.piece_x_key)\n min_y_point = min(copy.deepcopy(self.points), key=self.piece_y_key)\n for p in self.points:\n if min_x_point.x > 0:\n p.x = p.x - min_x_point.x \n\n if min_x_point.x < 0:\n p.x = p.x + abs(min_x_point.x) \n\n if min_y_point.y > 0:\n p.y = p.y - min_y_point.y\n\n if min_y_point.y < 0:\n p.y = p.y + abs(min_y_point.y) \n return Points(self.points)\n\n def piece_x_key(self, p):\n return p.x\n \n def piece_y_key(self, p):\n return p.y\n\n def order_mixed_points(self, master_points_num, fit_master_side_num, fit_slave_side_num):\n copy_mixed_points = copy.deepcopy(self.points)\n\n master_points = copy_mixed_points[:master_points_num]\n slave_points = copy_mixed_points[master_points_num:]\n\n master_0 = master_points[fit_master_side_num]\n if fit_master_side_num != len(master_points) - 1:\n master_1 = master_points[fit_master_side_num + 1]\n else:\n master_1 = master_points[0]\n \n if fit_slave_side_num != len(slave_points) - 1:\n slave_0 = slave_points[fit_slave_side_num]\n slave_1 = slave_points[fit_slave_side_num + 1]\n else:\n slave_0 = slave_points[fit_slave_side_num]\n slave_1 = slave_points[0]\n\n # 頂点の数は辺の数\n ordered_mixed_points = []\n for mp in master_points:\n ordered_mixed_points.append(mp)\n # 最初の結合部\n if mp == master_0:\n slave_1_index = slave_points.index(slave_1)\n # 最初の結合部より飛んだslaveの結合部から最後まで回す\n for sp in slave_points[slave_1_index:]:\n ordered_mixed_points.append(sp)\n # 次の結合部が来たら\n if sp == slave_0:\n break\n # slaveの結合部以前を回す\n for sp in slave_points[:slave_1_index]:\n ordered_mixed_points.append(sp)\n # 次の結合部が来たら\n if sp == slave_0:\n break\n return Points(ordered_mixed_points)\n\n def judge_overlap(self, master_points_num, fit_master_side_num, fit_slave_side_num):\n copy_mixed_points = copy.deepcopy(self.points)\n\n master_points = Points(copy_mixed_points[:master_points_num])\n slave_points = Points(copy_mixed_points[master_points_num:])\n\n if master_points.judge_cross_line(slave_points, fit_master_side_num, fit_slave_side_num):\n raise 'It is a combination of cross overlapping pieces'\n\n if master_points.judge_overlap_inside(slave_points, fit_master_side_num):\n raise 'It is a combination of master piece overlapping slave pieces'\n \n # if slave_points.judge_overlap_inside(master_points, fit_master_side_num):\n # raise 'It is a combination of slave piece overlapping master pieces'\n\n \n def judge_cross_line(self, slave_points, fit_master_side_num, fit_slave_side_num):\n # こまかい処理がNonError\n for i, mp in enumerate(self.points):\n #fit_sideだったらしない\n if i == fit_master_side_num:\n continue\n if i != len(self.points) - 1:\n return self.s1(mp, self.points[i+1], slave_points.points, fit_slave_side_num)\n else:\n return self.s1(mp, self.points[0], slave_points.points, fit_slave_side_num)\n\n def s1(self, mp0, mp1, slave_points, fit_slave_side_num):\n for j, sp in enumerate(slave_points):\n if j == fit_slave_side_num:\n #fit_sideだったらしない\n continue\n if j != len(slave_points) - 1:\n #最後じゃなかったらjとj+1\n # return self.s3(mp0, mp1, sp, slave_points[j+i])\n if self.is_different_points(mp0, mp1, sp, slave_points[j+1]):\n return self.is_two_straights_cross([mp0, mp1], sp, slave_points[j+1])\n else:\n continue\n else:\n # return self.s3(mp0, mp1, sp, slave_points[0])\n if self.is_different_points(mp0, mp1, sp, slave_points[0]):\n return self.is_two_straights_cross([mp0, mp1], sp, slave_points[j+1])\n else:\n continue\n\n def is_different_points(self, m0, m1, s0, s1):\n if (int(m0.x) == int(s0.x) and int(m0.y) == int(s0.y)):\n return false\n if (int(m0.x) == int(s1.x) and int(m0.y) == int(s1.y)):\n return False\n if (int(m1.x) == int(s0.x) and int(m1.y) == int(s0.y)):\n return False\n if (int(m1.x) == int(s1.x) and int(m1.y) == int(s1.y)):\n return False\n return True\n\n def is_point_upper_straight(self, ms, p2):\n # p1とp2でstraightがでてp3が上だったらtrue\n p0 = ms[0]\n p1 = ms[1]\n tc = (p0.x - p1.x) * (p2.y - p0.y) + (p0.y - p1.y) * (p0.x - p2.x) \n if tc < 0:\n return False\n return True\n \n def is_two_straights_cross(self, ms, s0, s1):\n if self.is_point_upper_straight(ms, s0) == self.is_point_upper_straight(ms, s1):\n #点が同じ領域にあったら、crossしてないのでエラーじゃない。よってF\n return False\n return True\n\n def judge_overlap_inside(self, other_points, fit_side_num):\n # masterのfit_points以外が一つでもslaveにoverlapしていたら例外を投げる\n blank_img = np.zeros((1000, 1000, 3), np.uint8)\n copy_points = copy.deepcopy(self.points)\n\n if len(copy_points) - 1 != fit_side_num:\n copy_points.pop(fit_side_num)\n copy_points.pop(fit_side_num) # +1\n else:\n copy_points.pop(0)\n copy_points.pop(len(copy_points) - 1)\n\n img = other_points.draw_round(blank_img) \n\n for p in copy_points:\n if self.is_out_piece(p, img) == False: \n raise 'It is a combination of master piece overlapping slave pieces'\n\n def debug_points(self, points):\n img = np.zeros((1000, 1000, 3), np.uint8)\n for i, p in enumerate(points):\n if i != len(points) - 1:\n img = self.draw_line(img, p, points[i+1])\n else:\n img = self.draw_line(img, p, points[0])\n cv2.imshow('a', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n def debug_points_fast(self, points):\n img = self.init_img(points)\n cv2.imshow(str(self.piece_num), img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n","sub_path":"puzzle/mix_piece.py","file_name":"mix_piece.py","file_ext":"py","file_size_in_byte":17637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"235648660","text":"from builtins import range\nimport FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"READ\")\n\nprocess.source = cms.Source(\"DQMRootSource\",\n reScope = cms.untracked.string(\"\"),\n fileNames = cms.untracked.vstring(\"file:dqm_file1.root\",\"file:dqm_file2.root\"))\n\nseq = cms.untracked.VEventID()\nfor r in range(1,2):\n #begin run\n seq.append(cms.EventID(r,0,0))\n for l in range(1,21):\n #begin lumi\n seq.append(cms.EventID(r,l,0))\n #end lumi\n seq.append(cms.EventID(r,l,0))\n #end run\n seq.append(cms.EventID(r,0,0))\n\nprocess.check = cms.EDAnalyzer(\"RunLumiEventChecker\",\n eventSequence = seq)\n\nreadRunElements = list()\nfor i in range(0,10):\n readRunElements.append(cms.untracked.PSet(name=cms.untracked.string(\"Foo\"+str(i)),\n runs = cms.untracked.vint32(1),\n lumis = cms.untracked.vint32(0),\n means = cms.untracked.vdouble(i),\n entries=cms.untracked.vdouble(2)\n ))\n\nreadLumiElements=list()\nfor i in range(0,10):\n readLumiElements.append(cms.untracked.PSet(name=cms.untracked.string(\"Foo\"+str(i)),\n runs = cms.untracked.vint32([1 for x in range(0,20)]),\n lumis = cms.untracked.vint32([x+1 for x in range(0,20)]),\n means = cms.untracked.vdouble([i for x in range(0,20)]),\n entries=cms.untracked.vdouble([1 for x in range(0,20)])\n ))\n\nprocess.reader = cms.EDAnalyzer(\"DummyReadDQMStore\",\n runElements = cms.untracked.VPSet(*readRunElements),\n lumiElements = cms.untracked.VPSet(*readLumiElements) )\n\nprocess.e = cms.EndPath(process.check+process.reader)\n\nprocess.add_(cms.Service(\"DQMStore\"))\n#process.add_(cms.Service(\"Tracer\"))\n\n","sub_path":"DQMServices/FwkIO/test/read_file1_file2_cfg.py","file_name":"read_file1_file2_cfg.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"7195013","text":"def verifica_primos(lista):\n u= len(lista)\n dicName.update({key:value})\n a+=1\n while a in range(len(dic)):\n if a==2:\n return True\n if a%0:\n return False\n else:\n return True\n ","sub_path":"backup/user_074/ch75_2020_04_13_12_54_45_667857.py","file_name":"ch75_2020_04_13_12_54_45_667857.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"505414996","text":"from openpyxl import load_workbook\nimport glob\nimport os\n\nimport sys\nsys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'excel_validators'))\n\nimport db_funcs_for_subjects_db\nimport configurations\nimport excel_validator_imist\nfrom excel_handler import Excel_handler\n\n\nclass Excel_parser_imist(Excel_handler):\n\n def run_parser(self, route):\n work_files = glob.glob(f'time_tables/{route}/*.xlsx')\n validator = excel_validator_imist.Excel_validator_imist()\n\n for work_file_name in work_files:\n # validator.run_validator_for_excel_parser(route)\n db_name = self.return_db_name(work_file_name)\n work_book = load_workbook(work_file_name)\n constants = self.return_current_file_constants(work_file_name)\n\n self.create_db_for_parsing(work_book, constants, db_name)\n self.prepare_to_parse_work_file(work_book, constants, db_name)\n\n def prepare_to_parse_work_file(self, work_book, constants, db_name):\n number_of_groups = constants['number_of_groups']\n group_cell_constants = self.get_group_cell_constants(constants)\n groups_name = self.get_groups_name(constants)\n\n for worksheet_name in work_book.sheetnames:\n if self.is_reason_to_skip(worksheet_name) == True:\n continue\n worksheet = work_book[worksheet_name]\n\n for number in range(number_of_groups):\n first_row = group_cell_constants[number][0]\n last_row = group_cell_constants[number][1]\n first_column = group_cell_constants[number][2]\n date_column = constants['date_column']\n time_column = constants['time_column']\n group_name = groups_name[number]\n\n self.parse_worksheet(worksheet, db_name, group_name, date_column,\n time_column, first_row, last_row, first_column)\n\n def parse_worksheet(self, worksheet, db_name, group_name, date_column, time_column, first_lesson_cell_row, last_lesson_cell_row, first_lesson_cell_column):\n for row in range(first_lesson_cell_row, last_lesson_cell_row + 1):\n subject, subject_type = self.get_subject_and_subject_type(\n worksheet, row, first_lesson_cell_column)\n location = self.get_loction_or_teacher_value(\n worksheet, row, first_lesson_cell_column + 1)\n teacher = self.get_loction_or_teacher_value(\n worksheet, row, first_lesson_cell_column + 2)\n\n viewed_date_cell = worksheet.cell(row=row, column=date_column)\n viewed_date_cell_value = self.get_merged_cell_value(\n worksheet, viewed_date_cell)\n viewed_time_cell = worksheet.cell(row=row, column=time_column)\n viewed_time_cell_value = viewed_time_cell.value\n db_funcs_for_subjects_db.save_subj_imist(\n db_name, viewed_date_cell_value, viewed_time_cell_value, group_name, subject, subject_type, location, teacher)\n\n def get_loction_or_teacher_value(self, worksheet, row, column):\n viewed_cell = worksheet.cell(row=row, column=column)\n viewed_cell_value = viewed_cell.value\n if viewed_cell_value == None:\n viewed_cell_value = 'Нет предмета'\n return viewed_cell_value\n\n def get_subject_and_subject_type(self, worksheet, row, first_lesson_cell_column):\n viewed_lesson_cell = worksheet.cell(\n row=row, column=first_lesson_cell_column)\n viewed_lesson_cell_value = viewed_lesson_cell.value\n if viewed_lesson_cell_value == None:\n subject = 'Нет предмета'\n subject_type = 'Нет предмета'\n else:\n viewed_lesson_and_type_cell_value = viewed_lesson_cell_value.split(\n '\\n')\n subject = viewed_lesson_and_type_cell_value[0]\n subject_type = viewed_lesson_and_type_cell_value[1]\n return subject, subject_type\n\n def create_db_for_parsing(self, work_book, constants, db_name):\n db_funcs_for_subjects_db.drop_db(db_name)\n db_funcs_for_subjects_db.create_db_imist(db_name)\n\n for worksheet_name in work_book.sheetnames:\n if self.is_reason_to_skip(worksheet_name) == True:\n continue\n worksheet = work_book[worksheet_name]\n self.create_dates_and_times_and_groups_in_db(\n worksheet, db_name, constants)\n\n def create_dates_and_times_and_groups_in_db(self, worksheet, db_name, constants):\n groups_name = self.get_groups_name(constants)\n\n date_column = constants['date_column']\n first_date_row = constants['first_date_row']\n last_date_row = constants['last_date_row']\n\n time_column = constants['time_column']\n\n for number in range(int(constants['number_of_groups'])):\n for row in range(first_date_row, last_date_row + 1):\n viewed_date_cell = worksheet.cell(row=row, column=date_column)\n viewed_date_value = self.get_merged_cell_value(\n worksheet, viewed_date_cell)\n\n viewed_time_cell = worksheet.cell(row=row, column=time_column)\n viewed_time_value = viewed_time_cell.value\n\n group_name = groups_name[number]\n\n db_funcs_for_subjects_db.save_date_and_time_and_group_imist(\n db_name, viewed_date_value, viewed_time_value, group_name)\n\n def get_group_cell_constants(self, constants):\n number_of_groups = constants['number_of_groups']\n\n group_cell_constants = [\n [\n constants['first_group_first_lesson_cell_row'],\n constants['first_group_last_lesson_cell_row'],\n constants['first_group_first_lesson_cell_column'],\n ],\n [\n constants['second_group_first_lesson_cell_row'],\n constants['second_group_last_lesson_cell_row'],\n constants['second_group_first_lesson_cell_column'],\n ],\n [\n constants['third_group_first_lesson_cell_row'],\n constants['third_group_last_lesson_cell_row'],\n constants['third_group_first_lesson_cell_column'],\n ],\n ]\n if number_of_groups == 4:\n fourth_group_first_lesson_cell_row = constants['fourth_group_first_lesson_cell_row']\n fourth_group_last_lesson_cell_row = constants['fourth_group_last_lesson_cell_row']\n fourth_group_first_lesson_cell_column = constants['fourth_group_first_lesson_cell_column']\n group_cell_constants.append(\n [fourth_group_first_lesson_cell_row, fourth_group_last_lesson_cell_row, fourth_group_first_lesson_cell_column])\n\n return group_cell_constants\n\n def get_groups_name(self, constants):\n number_of_groups = constants['number_of_groups']\n\n groups_name = [\n constants['first_group_number'],\n constants['second_group_number'],\n constants['third_group_number'],\n ]\n if number_of_groups == 4:\n groups_name.append(constants['fourth_group_number'])\n\n return groups_name\n\n def return_current_file_constants(self, work_file_name):\n clear_file_name = self.find_clear_file_name(work_file_name)\n constants = configurations.timetable_constants[clear_file_name]\n return constants\n\n def return_db_name(self, file_name):\n if '1_imist' in file_name:\n return 'imist_1'\n elif '2_imist' in file_name:\n return 'imist_2'\n elif '3_imist' in file_name:\n return 'imist_3'\n elif '4_imist' in file_name:\n return 'imist_4'\n\n def find_clear_file_name(self, file_name):\n if '1_imist' in file_name:\n return 'imist_1'\n elif '2_imist' in file_name:\n return 'imist_2'\n elif '3_imist' in file_name:\n return 'imist_3'\n elif '4_imist' in file_name:\n return 'imist_4'\n else:\n return None\n\n\nparser = Excel_parser_imist()\nif __name__ == \"__main__\":\n parser.run_parser('full_time_undergraduate_imist')\n","sub_path":"excel_parser_imist.py","file_name":"excel_parser_imist.py","file_ext":"py","file_size_in_byte":8252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"112468341","text":"# ~license~\n#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\nimport copy\nfrom appy.utils import sequenceTypes\nfrom appy.model.workflow import Role\n\n# Errors - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\nPERMISSION_NOT_FOUND = 'permission \"%s\" is not among permissions dict for ' \\\n 'state %s in workflow %s.'\n \n#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\nclass State:\n '''Represents a workflow state'''\n def __init__(self, permissions, initial=False, phase=None):\n self.usedRoles = {}\n # Dict \"permissions\" lists, for every permission managed by a workflow,\n # the roles for which the permission is granted in this state. Standard\n # permissions are 'read', 'write' and 'delete'. p_permissions must be a\n # dict with format\n # ~{s_permissionName:[s_roleName|Role_role]}~\n\n # It will be converted by m_standardizeRoles into format\n # ~{s_permissionName:{s_roleName:Role_role}}~\n self.permissions = permissions\n self.initial = initial\n self.phase = phase\n # Standardize the way roles are expressed within self.permissions\n self.standardizeRoles()\n\n def init(self, workflow, name):\n '''Lazy initialisation'''\n self.workflow = workflow\n self.name = name\n self.labelId = '%s_%s' % (workflow.name, name)\n\n def __repr__(self):\n return '' % (self.workflow.name, self.name)\n\n def copyPerms(self):\n '''Gets a deep copy of this state's permissions dict'''\n return copy.deepcopy(self.permissions)\n\n def getRole(self, role):\n '''p_role can be the name of a role or a Role instance. If it is the\n name of a role, this method returns self.usedRoles[role] if it\n exists, or creates a Role instance, puts it in self.usedRoles and\n returns it else. If it is a Role instance, the method stores it in\n self.usedRoles if it is not in it yet and returns it.'''\n if isinstance(role, str):\n if role in self.usedRoles:\n return self.usedRoles[role]\n else:\n theRole = Role(role)\n self.usedRoles[role] = theRole\n return theRole\n else:\n if role.name not in self.usedRoles:\n self.usedRoles[role.name] = role\n return role\n\n def standardizeRoles(self):\n '''This method converts, within self.permissions, every role to a\n Role instance. Every used role is stored in self.usedRoles.'''\n for permission, roles in self.permissions.items():\n if isinstance(roles, str) or isinstance(roles, Role):\n role = self.getRole(roles)\n self.permissions[permission] = {role.name: role}\n elif isinstance(roles, dict):\n for name, role in roles.iteritems():\n roles[name] = self.getRole(role)\n else:\n # \"roles\" is a list or tuple, or None (nobody may have this\n # permission).\n d = {}\n if roles is not None:\n for role in roles:\n role = self.getRole(role)\n d[role.name] = role\n self.permissions[permission] = d\n\n def getUsedRoles(self): return self.usedRoles.values()\n\n def getRolesFor(self, permission):\n '''Gets the roles that are granted p_permission on this state. r_ is a\n dict ~{s_roleName: Role}.'''\n if permission not in self.permissions:\n raise Exception(PERMISSION_NOT_FOUND % (permission, self.name, \\\n self.workflow.name))\n return self.permissions[permission]\n\n def addRoles(self, roles, permissions=()):\n '''Adds p_roles in self.permissions. p_roles can be a role name, a Role\n instance or a list of names and/or Role instances. If p_permissions\n is specified, roles are added to those permissions only. Else, roles\n are added for every permission within self.permissions.'''\n # Standardize parameters\n if type(roles) not in sequenceTypes: roles = (roles,)\n if isinstance(permissions, str): permissions = (permissions,)\n for perm, existingRoles in self.permissions.items():\n if permissions and (perm not in permissions): continue\n for role in roles:\n # Do nothing if \"role\" is already among existing roles\n name = role if isinstance(role, str) else role.name\n if name in existingRoles: continue\n # Add the role for this permission\n existingRoles[name] = self.getRole(role)\n\n def removeRoles(self, roleNames, permissions=()):\n '''Removes p_roleNames within dict self.permissions. If p_permissions is\n specified, removal is restricted to those permissions. Else, removal\n occurs throughout the whole dict self.permissions.'''\n if isinstance(roleNames, str): roleNames = (roleNames,)\n if isinstance(permissions, str): permissions = (permissions,)\n for perm, roles in self.permissions.items():\n if permissions and (perm not in permissions): continue\n for name in roleNames:\n # Remove this role if present in roles for this permission\n if name in roles:\n del roles[name]\n\n def setRoles(self, roleNames, permissions=()):\n '''Sets p_rolesNames for p_permissions if not empty, for every\n permission in self.permissions else.'''\n if isinstance(roleNames, str): roleNames = (roleNames,)\n if isinstance(permissions, str): permissions = (permissions,)\n for perm in self.permissions.iterkeys():\n if permissions and (perm not in permissions): continue\n roles = self.permissions[perm] = {}\n for name in roleNames:\n roles[name] = self.getRole(name)\n\n def replaceRole(self, oldRoleName, newRoleName, permissions=()):\n '''Replaces p_oldRoleName by p_newRoleName. If p_permissions is\n specified, the replacement is restricted to those permissions. Else,\n replacements apply to the whole dict self.permissions.'''\n if isinstance(permissions, str): permissions = (permissions,)\n for perm, roles in self.permissions.items():\n if permissions and (perm not in permissions): continue\n # Find and replace p_oldRoleName with p_newRoleName\n if oldRoleName in roles:\n del roles[oldRoleName]\n roles[newRoleName] = self.getRole(newRoleName)\n\n def copyRoles(self, sourcePermission, destPermission):\n '''Overrides p_destPermission's roles with (a deep copy of)\n p_sourcePermission's roles.'''\n copiedRoles = copy.deepcopy(self.permissions[sourcePermission])\n self.permissions[destPermission] = copiedRoles\n\n def isIsolated(self):\n '''Returns True if, from this state, we cannot reach another state.\n Modifying a workflow for getting a state with auto-transitions only\n is a common technique for disabling a state in a workflow.'''\n if self.initial: return\n for tr in self.workflow.transitions.values():\n # Ignore transitions that do not touch this state\n if not tr.hasState(self, True) and not tr.hasState(self, False):\n continue\n # Transition \"tr\" has this state as start or end state. If start and\n # end states are different, it means that the state is not\n # isolated.\n if tr.isSingle():\n for state in tr.states:\n if state != self: return\n else:\n for start, end in tr.states:\n # Bypass (start, end) pairs having nothing to do with self\n if (start != self) and (end != self): continue\n if (start != self) or (end != self): return\n # If we are here, either there was no transition starting from self,\n # either all transitions were auto-transitions: self is then isolated.\n return True\n#- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n","sub_path":"appy/model/workflow/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":8435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"352973123","text":"import socket\nimport threading\nimport socketserver\nimport time\nimport argparse\nimport Adafruit_SSD1306\nfrom PIL import Image, ImageDraw, ImageFont\nimport RPi.GPIO as GPIO\nimport pygame\n\ns=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\ns.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)\n\nfreq=24000\nbitsize=-16\nchannels=1\nsbuffer=2048\nordercheckmp3=\"ordercheck.mp3\"\norderconfirmmp3=\"ordercomfirm.mp3\"\norderdeniedmp3=\"orderdenied.mp3\"\nordercancelmp3=\"ordercancel.mp3\"\nproductout=\"productout.mp3\"\n\nprint ('Socket created')\n\n\n\n\norder_list=['민철','민철','민철','영신','윤수','진영','영신','윤수','선규','선규','찬호','찬호','대원','종진','은오']\ntemp_all=[['민철','민철','민철','영신'],['윤수','진영','영신','윤수','선규'],['선규','찬호','찬호','대원','종진','은오']]\n#order_list=['ham','ber',:\n\ndisp = Adafruit_SSD1306.SSD1306_128_64(rst=None, i2c_address=0x3C)\ndisp.begin()\ndisp.clear()\ndisp.display()\n\nwidth = disp.width\nheight = disp.height\nimage = Image.new('1',(width, height))\ndraw = ImageDraw.Draw(image)\n\ndraw.rectangle((0,0,width,height), outline = 0, fill=0)\n\npadding = -2\ntop = padding\nbutton1 = 24\nbutton2 = 23\nbutton3 = 18\nbutton4 = 25\nbutton5 = 8\nx = 0\n\nfont = ImageFont.load_default()\nfont1=ImageFont.truetype(\"/fonts/frutype/nanum/NanumBarunGothic.ttf\",10)\nGPIO.setwarnings(False)\nGPIO.setup(button1, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\nGPIO.setup(button2, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\nGPIO.setup(button3, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\nGPIO.setup(button4, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\nGPIO.setup(button5, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\n \ndef receive(s):\n global ovlap\n while True:\n try: \n image=Image.new('1',(width,height))\n draw = ImageDraw.Draw(image)\n disp.clear()\n draw.text((x+29,top),'order list',font=font, fill=255)\n ovlap=[]\n ovlap =list(set(order_list)) \n c = []\n c_all=[]\n \n for i in ovlap:\n c.append(order_list.count(str(i)))\n c_all.append(c)\n print(order_list)\n print(ovlap)\n print(c_all)\n time.sleep(1)\n \n #temp_all = [] \n temp=[]\n s = (2*5)+1\n \n '''for i in range(len(ovlap)):\n temp.append(ovlap[i]) \n temp_all.append(temp)'''\n #print(temp_all[0])\n #print(temp_all)\n \n #print(temp_all)\n \n for i in range(0,4):\n draw.text((x,top+(s*(i+1))), '{}'.format(ovlap[i]), font=font1, fill=255)\n draw.text((x+40,top+(s*(i+1))), '{}'.format(c[i]), font=font1, fill=255) \n draw.text((x,top+56),'5.다음페이지',font=font1, fill=255)\n \n if GPIO.input(button5)==GPIO.HIGH:\n while True: \n s = (2*5)+1\n image = Image.new('1',(width, height))\n draw = ImageDraw.Draw(image)\n disp.clear()\n ovlap2=[]\n for i in range(5,len(ovlap)):\n ovlap2.append(ovlap[i])\n for i in range(0,4):\n draw.text((x,top+12+(s*(i))), '{}'.format(ovlap2[i]), font=font1, fill=255)\n draw.text((x+40,top+(s*(i+1))), '{}'.format(c[i]), font=font1, fill=255) \n draw.text((x+29,top),'order list',font=font, fill=255)\n draw.text((x,top+56),'5.이전단계',font=font1, fill=255)\n disp.image(image)\n disp.display()\n if GPIO.input(button5)==GPIO.HIGH: \n break\n \n if GPIO.input(button3)==GPIO.HIGH:\n time.sleep(1)\n while True:\n image = Image.new('1',(width, height))\n draw = ImageDraw.Draw(image)\n disp.clear()\n draw.text((x+29,top),'order list',font=font, fill=255)\n draw.text((x+20,top+29),'1.Client1 2.Clinet2 ',font=font1, fill=255)\n draw.text((x+20,top+41),'3.Client2 4.Clinet4 ',font=font1, fill=255)\n disp.image(image)\n disp.display()\n \n if GPIO.input(button1)==GPIO.HIGH:\n image = Image.new('1',(width, height))\n draw = ImageDraw.Draw(image)\n disp.clear()\n while True: \n for i in range(len(temp_all[0])): \n draw.text((x,top+12+(s*(i))), '{}'.format(temp_all[0][i]), font=font1, fill=255) \n disp.image(image)\n disp.display()\n draw.text((x+40,top+56),'5.이전단계',font=font1, fill=255)\n if GPIO.input(button5)==GPIO.HIGH:\n break\n \n if GPIO.input(button2)==GPIO.HIGH:\n image = Image.new('1',(width, height))\n draw = ImageDraw.Draw(image)\n disp.clear()\n while True: \n for i in range(len(temp_all[1])): \n draw.text((x,top+12+(s*(i))), '{}'.format(temp_all[1][i]), font=font1, fill=255) \n disp.image(image)\n disp.display()\n draw.text((x+40,top+56),'5.이전단계',font=font1, fill=255)\n if GPIO.input(button5)==GPIO.HIGH:\n break\n \n if GPIO.input(button3)==GPIO.HIGH:\n image = Image.new('1',(width, height))\n draw = ImageDraw.Draw(image)\n disp.clear()\n while True: \n for i in range(len(temp_all[2])): \n draw.text((x,top+12+(s*(i))), '{}'.format(temp_all[2][i]), font=font1, fill=255) \n disp.image(image)\n disp.display()\n draw.text((x+40,top+56),'5.이전단계',font=font1, fill=255)\n if GPIO.input(button5)==GPIO.HIGH:\n break\n \n if GPIO.input(button4)==GPIO.HIGH:\n image = Image.new('1',(width, height))\n draw = ImageDraw.Draw(image)\n disp.clear()\n while True:\n for i in range(len(temp_all[3])):\n draw.text((x,top+12+(s*(i))), '{}'.format(temp_all[3][i]), font=font1, fill=255)\n disp.image(image)\n disp.display()\n draw.text((x+40,top+56),'5.이전단계',font=font1, fill=255)\n if GPIO.input(button5)==GPIO.HIGH:\n break\n draw.text((x+40,top+56),'5.이전단계',font=font1, fill=255) \n if GPIO.input(button5)==GPIO.HIGH:\n break\n \n '''if GPIO.input(button2)==GPIO.HIGH:\n sock.send('주문완료'.encode('utf-8'))\n break \n \n if GPIO.input(button2)==GPIO.HIGH:\n sock.send('주문완료'.encode('utf-8'))\n image = Image.new('1',(width, height))\n draw = ImageDraw.Draw(image)\n disp.clear()\n draw.text((x+29,top+25), '주문완료', font=font, fill=255)\n #mp3(orde료rdeniedmp3)\n disp.image(image)\n disp.display()'''\n \n if GPIO.input(button3)==GPIO.HIGH:\n image = Image.new('1',(width, height))\n draw = ImageDraw.Draw(image)\n disp.clear()\n draw.text((x+29,top+25), 'Bye', font=font, fill=255)\n disp.image(image)\n disp.display()\n #mp3(ordercheckmp3)\n \n if GPIO.input(button4)==GPIO.HIGH:\n image = Image.new('1',(width, height))\n draw = ImageDraw.Draw(image)\n disp.clear()\n for i in range(len(temp_all)):\n draw.text((x,top+8*(i+1)), '{}'.format(temp_all[i]), font=font, fill=255)\n disp.image(image)\n disp.display()\n \n \n \n disp.image(image)\n disp.display()\n time.sleep(0.01)\n except KeyboardInterrupt as e:\n \n s.close()\n sock.close()\n break\n\ndef run():\n try:\n #sender=threading.Thread(target=send,args=(s,))\n receiver=threading.Thread(target=receive, args=(s,))\n receiver.start()\n #sender.start()\n #sender.join()\n receiver.join()\n while True:\n time.sleep(1)\n \n except Exception as e:\n print('run Err: %s' % e)\n pass\n \n \ndef sender1(channel):\n global ovlap\n \n order_list = []\n ovlap = []\n print(ovlap)\n \nGPIO.add_event_detect(button1,GPIO.RISING, callback=sender1, bouncetime=200)\n\nrun()\n","sub_path":"L M C/socket_server/lmcTest.py","file_name":"lmcTest.py","file_ext":"py","file_size_in_byte":10027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"236500994","text":"# This files contains your custom actions which can be used to run\n# custom Python code.\n#\n# See this guide on how to implement these action:\n# https://rasa.com/docs/rasa/custom-actions\n\n\n# This is a simple example for a custom action which utters \"Hello World!\"\n\n# from typing import Any, Text, Dict, List\n#\n# from rasa_sdk import Action, Tracker\n# from rasa_sdk.executor import CollectingDispatcher\n#\n#\n# class ActionHelloWorld(Action):\n#\n# def name(self) -> Text:\n# return \"action_hello_world\"\n#\n# def run(self, dispatcher: CollectingDispatcher,\n# tracker: Tracker,\n# domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:\n#\n# dispatcher.utter_message(text=\"Hello World!\")\n#\n# return []\n\n\nfrom typing import Any, Text, Dict, List\n\nfrom rasa_sdk import Action, Tracker\nfrom rasa_sdk.executor import CollectingDispatcher\nfrom rasa_sdk.events import SlotSet\nfrom MainBackend import FindPath\nfrom pyvi import ViUtils\n\n\nclass ActionFindPath(Action):\n\n\tdef name(self) -> Text:\n\t\treturn \"action_find_path\"\n\n\tdef run(self, dispatcher: CollectingDispatcher,\n\t\t\ttracker: Tracker,\n\t\t\tdomain: Dict[Text, Any]) -> List[Dict[Text, Any]]:\n\t\tstart = str(tracker.get_slot(\"departure\"))\n\t\tstart = start.upper()\n\t\tSlotSet(\"departure\", \"none\")\n\t\tend = str(tracker.get_slot(\"location\"))\n\t\tend = end.upper()\n\t\tdispatcher.utter_message(text=start)\n\t\tdispatcher.utter_message(text=end)\n\t\tif start == \"NONE\":\n\t\t\tlocation = end.replace(\" \", \"+\")\n\t\t\tdispatcher.utter_message(text=\"Đây là kết quả mình tìm được trên google map\\nhttps://www.google.com/maps/dir/?api=1&destination={}\".format(location))\t\n\t\t\treturn []\n\t\tif start == \"NONE\" & end == \"NONE\":\n\t\t\tdispatcher.utter_message(text=\"Không tìm thấy địa điểm bạn nhập\")\n\t\t\treturn []\n\t\tdispatcher.utter_message(text=\"Đợi minh tí nha...\")\n\t\tstart = str(ViUtils.remove_accents(start)).replace(\"b'\",\"\").replace(\"'\", '')\n\t\tend = str(ViUtils.remove_accents(end)).replace(\"b'\",\"\").replace(\"'\", '')\n\t\tu = FindPath(start, end)\n\n\t\tif u == -1:\n\t\t\tdispatcher.utter_message(text=\"Không tìm thấy địa điểm bạn nhập\")\n\t\t\treturn [SlotSet(\"departure\", \"none\")]\n\t\tdispatcher.utter_message(text=\"Đây là bản đồ đường đến của bạn\")\n\t\tdispatcher.utter_message(attachment=\"https://d038e9f6b7e7.ngrok.io/ToDrawMap/Path.jpg\")\n\t\tdispatcher.utter_message(text=\"Đây là kết quả mình tìm được trên google map\\nhttps://www.google.com/maps/dir/?api=1&destination={}\".format(end.replace(\" \", \"+\")))\n\n\t\treturn [SlotSet(\"departure\", \"none\"), SlotSet(\"location\", \"none\")]\n","sub_path":"actions/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"105431278","text":"import json\nimport subprocess\nimport os\nimport pandas as pd\n\ndef filecount(dir_name):\n return len([f for f in os.listdir(dir_name) if os.path.isfile(os.path.join(dir_name, f))])\n\n# TO BE CHANGED --> trial, only for new REAL image batches\ntrial = 'trial_12_05_20'\ncsv_file_data = trial + '.csv'\n\nnumPackets_folder = '/home/tom_phelan_ext/Documents/microstructure_analysis/grains2packets/numPackets/'\nfile_data = pd.DataFrame(columns=[\"image name\", \"folder\", \".csv file\"])\n\n# paths for pipeline runs and outputting image data; folder B in packets2blocks is the real block images\npipeline_file = '/home/tom_phelan_ext/gitCode/pix2pix/pytorch-CycleGAN-and-pix2pix/dream3d_pipelines/g2p_analysis.json'\ngrains = '/home/tom_phelan_ext/gitCode/pix2pix/pytorch-CycleGAN-and-pix2pix/datasets/current_run/A/'\npackets = '/home/tom_phelan_ext/gitCode/pix2pix/pytorch-CycleGAN-and-pix2pix/datasets/current_run/B/'\noutput_csv_folder = numPackets_folder + trial + '/'\npipeline_runner = '/home/tom_phelan_ext/Programs/DREAM3D/bin/PipelineRunner'\n\n# creates trial directory if new data\nif (not(os.path.exists(output_csv_folder))): os.makedirs(output_csv_folder)\n\n# subdirs are those listed within image_folder\nsubdirs = os.listdir(grains)\nprint(subdirs)\n\ntotal_index = 1\nfor subdir in subdirs:\n # create path for folders: test, train, val\n grain_image_folder = os.path.join(grains, subdir) + \"/\"\n packet_image_folder = os.path.join(packets, subdir) + \"/\"\n numImages = filecount(grain_image_folder)\n print(\"Number of grain images in \", subdir, \": \", numImages)\n print(\"Number of packet images in \", subdir, \": \", filecount(packet_image_folder))\n\n # iterate thru all images in current image folder\n imageList = os.listdir(grain_image_folder)\n startNumber = 0\n\n for i in range(startNumber, startNumber + numImages):\n # pipeline details, output .csv file\n with open(pipeline_file) as pipeline_json:\n pipeline_json_data = json.load(pipeline_json)\n pipeline_json_data['00']['FileName'] = grain_image_folder + imageList[i]\n pipeline_json_data['12']['OutputFilePath'] = output_csv_folder + str(total_index) + '.csv'\n pipeline_json_data['12']['OutputPath'] = output_csv_folder + str(total_index) + '.csv'\n\n with open(pipeline_file, 'w') as pipeline_json:\n pipeline_json.write(json.dumps(pipeline_json_data, indent=4))\n\n process_call = pipeline_runner + ' -p' + ' ' + pipeline_file\n\n print('*********************************')\n print('Running permutation {} of {}'.format(i, numImages))\n print('*********************************')\n\n subprocess.call(process_call, shell=True)\n \n # add to pandas dataFrame (.csv file later)\n file_data_tuple = pd.DataFrame({\"image name\": imageList[i], \"folder\": subdir, \".csv file\": str(total_index) + \".csv\"}, index=[total_index])\n print(file_data_tuple)\n file_data = pd.concat([file_data, file_data_tuple])\n\n total_index += 1\n\nprint(file_data.head())\n# parse to .csv file with given parameters\nfile_data.to_csv(numPackets_folder + csv_file_data)\n","sub_path":"post_processing/g2p_analysis.py","file_name":"g2p_analysis.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"108366611","text":"class HardDisk:\n def __init__(self):\n self.capacity = 100\n self.us_space = 0\n self.num_file = 0\n\n def print_(self):\n print(f'Capacity on your disk - {self.capacity}\\nUsed space is - {self.us_space}\\nYou have {self.num_file} files on your disk')\n\n def free_space(self):\n free=self.capacity-self.us_space\n return free\n\n def add_file(self,GB_size):\n if GB_size= self.capacity:\n self.us_space=0\n self.freeSpace()\n self.num_file-=1\n else:\n self.us_space-=GB_size\n self.num_file-=1","sub_path":"Class_object/Class_task_5.py","file_name":"Class_task_5.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"262997210","text":"from pyhp.bytecode import compile_ast\nfrom pyhp.functions import CodeFunction\nfrom rpython.rlib.unroll import unrolling_iterable\n\n\nclass Node(object):\n \"\"\" The abstract AST node\n \"\"\"\n def __init__(self):\n pass\n\n def __eq__(self, other):\n return (self.__class__ == other.__class__ and\n self.__dict__ == other.__dict__)\n\n def __ne__(self, other):\n return not self == other\n\n def __repr__(self):\n return self.__class__.__name__\n\n\nclass Statement(Node):\n pass\n\n\nclass Expression(Statement):\n pass\n\n\nclass ListOp(Expression):\n def __init__(self, nodes):\n self.nodes = nodes\n\n\nclass SourceElements(Statement):\n \"\"\"\n SourceElements nodes are found on each function declaration and in global\n code\n \"\"\"\n def __init__(self, func_decl, nodes):\n self.func_decl = func_decl\n self.nodes = nodes\n\n def compile(self, ctx):\n for funcname, funccode in self.func_decl.items():\n funccode.compile(ctx)\n\n if len(self.nodes) > 1:\n for node in self.nodes[:-1]:\n node.compile(ctx)\n ctx.emit('DISCARD_TOP')\n\n if len(self.nodes) > 0:\n node = self.nodes[-1]\n node.compile(ctx)\n else:\n ctx.emit('LOAD_NULL')\n\n if self.nodes and not isinstance(self.nodes[-1], Return):\n ctx.emit('RETURN')\n\n\nclass Program(Statement):\n def __init__(self, body, scope):\n self.body = body\n self.scope = scope\n\n def compile(self, ctx):\n self.body.compile(ctx)\n\n\nclass StatementList(Statement):\n def __init__(self, block):\n self.block = block\n\n def compile(self, ctx):\n self.block.compile(ctx)\n\n\nclass ExprStatement(Node):\n def __init__(self, expr):\n self.expr = expr\n\n def compile(self, ctx):\n self.expr.compile(ctx)\n\n\nclass Function(Node):\n \"\"\" A function\n \"\"\"\n def __init__(self, name, body, scope):\n self.identifier = name.get_literal()\n self.body = body\n self.scope = scope\n\n def compile(self, ctx):\n body = self.body\n if body is None:\n body = Return(None)\n body = compile_ast(body, self.scope)\n\n method = CodeFunction(self.identifier, body)\n\n ctx.emit('DECLARE_FUNCTION', self.identifier, method)\n\n\nclass Call(Node):\n def __init__(self, left, params):\n self.left = left\n self.params = params\n\n def compile(self, ctx):\n self.params.compile(ctx)\n self.left.compile(ctx)\n\n ctx.emit('CALL')\n\n\nclass Identifier(Expression):\n def __init__(self, identifier):\n self.identifier = identifier\n\n def get_literal(self):\n return self.identifier\n\n def compile(self, ctx):\n ctx.emit('LOAD_FUNCTION', self.identifier)\n\n\nclass Constant(Expression):\n def __init__(self, identifier):\n self.identifier = identifier\n\n def compile(self, ctx):\n ctx.emit('LOAD_CONSTANT', self.identifier)\n\n\nclass ArgumentList(ListOp):\n def compile(self, ctx):\n for node in self.nodes:\n if isinstance(node, VariableIdentifier):\n ctx.emit('LOAD_REF', node.index, node.identifier)\n else:\n node.compile(ctx)\n ctx.emit('LOAD_LIST', len(self.nodes))\n\n\nclass Array(ListOp):\n def compile(self, ctx):\n for element in self.nodes:\n element.compile(ctx)\n ctx.emit('LOAD_ARRAY', len(self.nodes))\n\n\nclass Global(ListOp):\n def compile(self, ctx):\n pass\n\n\nclass Member(Expression):\n \"this is for array[name]\"\n def __init__(self, left, expr):\n self.left = left\n self.expr = expr\n\n def compile(self, ctx):\n self.expr.compile(ctx)\n if isinstance(self.left, VariableIdentifier):\n ctx.emit('LOAD_MEMBER_VAR', self.left.index, self.left.identifier)\n else:\n self.left.compile(ctx)\n ctx.emit('LOAD_MEMBER')\n\n\nclass ConstantInt(Node):\n \"\"\" Represent a constant\n \"\"\"\n def __init__(self, intval):\n self.intval = intval\n\n def compile(self, ctx):\n ctx.emit('LOAD_INTVAL', self.intval)\n\n\nclass ConstantFloat(Node):\n \"\"\" Represent a constant\n \"\"\"\n def __init__(self, floatval):\n self.floatval = floatval\n\n def compile(self, ctx):\n ctx.emit('LOAD_FLOATVAL', self.floatval)\n\n\nclass ConstantString(Node):\n \"\"\" Represent a constant\n \"\"\"\n def __init__(self, stringval):\n self.stringval = stringval\n\n def compile(self, ctx):\n ctx.emit('LOAD_STRINGVAL', self.stringval)\n\n\nclass StringSubstitution(Node):\n \"\"\" Represent a constant\n \"\"\"\n def __init__(self, strings):\n self.strings = strings\n\n def compile(self, ctx):\n for part in self.strings:\n part.compile(ctx)\n ctx.emit('LOAD_STRING_SUBSTITUTION', len(self.strings))\n\n\nclass Boolean(Expression):\n def __init__(self, boolval):\n self.bool = boolval\n\n def compile(self, ctx):\n ctx.emit('LOAD_BOOLEAN', self.bool)\n\n\nclass Null(Expression):\n def compile(self, ctx):\n ctx.emit('LOAD_NULL')\n\n\nclass VariableIdentifier(Expression):\n def __init__(self, identifier, index):\n self.identifier = identifier\n self.index = index\n\n def get_literal(self):\n return self.identifier\n\n def compile(self, ctx):\n ctx.emit('LOAD_VAR', self.index, self.identifier)\n\n\nclass Empty(Expression):\n def compile(self, ctx):\n pass\n\n\nclass EmptyExpression(Expression):\n def compile(self, ctx):\n ctx.emit('LOAD_NULL')\n\n\nOPERANDS = {\n '+=': 'ADD',\n '-=': 'SUB',\n '++': 'INCR',\n '--': 'DECR',\n '.=': 'ADD',\n}\n\nOPERATIONS = unrolling_iterable(OPERANDS.items())\n\n\nclass BaseAssignment(Expression):\n noops = ['=']\n post = False\n\n def has_operation(self):\n return self.operand not in self.noops\n\n def compile(self, ctx):\n if self.has_operation():\n self.left.compile(ctx)\n if self.post:\n ctx.emit('DUP')\n self.right.compile(ctx)\n self.compile_operation(ctx)\n else:\n self.right.compile(ctx)\n\n self.compile_store(ctx)\n\n if self.post:\n ctx.emit('DISCARD_TOP')\n\n def compile_operation(self, ctx):\n # calls to 'emit' have to be very very very static\n op = self.operand\n for key, value in OPERATIONS:\n if op == key:\n ctx.emit(value)\n return\n assert 0\n\n def compile_store(self, ctx):\n raise NotImplementedError\n\n\nclass AssignmentOperation(BaseAssignment):\n def __init__(self, left, right, operand, post=False):\n self.left = left\n self.index = left.index\n self.right = right\n if self.right is None:\n self.right = Empty()\n self.operand = operand\n self.post = post\n\n def compile_store(self, ctx):\n ctx.emit('ASSIGN', self.index, self.left.get_literal())\n\n\nclass MemberAssignmentOperation(BaseAssignment):\n def __init__(self, left, right, operand, post=False):\n self.left = left\n self.right = right\n if right is None:\n self.right = Empty()\n\n self.operand = operand\n\n self.w_array = self.left.left\n self.expr = self.left.expr\n self.post = post\n\n def compile_store(self, ctx):\n self.expr.compile(ctx)\n self.w_array.compile(ctx)\n ctx.emit('STORE_MEMBER')\n\n\nclass Unconditional(Statement):\n def __init__(self, count):\n self.count = count\n\n\nclass Break(Unconditional):\n def compile(self, ctx):\n assert self.count is None\n ctx.emit('LOAD_NULL')\n ctx.emit_break()\n\n\nclass Continue(Unconditional):\n def compile(self, ctx):\n assert self.count is None\n ctx.emit('LOAD_NULL')\n ctx.emit_continue()\n\n\nclass If(Node):\n \"\"\" A very simple if\n \"\"\"\n def __init__(self, cond, true_branch, else_branch=None):\n self.cond = cond\n self.true_branch = true_branch\n self.else_branch = else_branch\n\n def compile(self, ctx):\n self.cond.compile(ctx)\n endif = ctx.prealocate_label()\n endthen = ctx.prealocate_label()\n ctx.emit('JUMP_IF_FALSE', endthen)\n self.true_branch.compile(ctx)\n ctx.emit('JUMP', endif)\n ctx.emit_label(endthen)\n\n if self.else_branch is not None:\n self.else_branch.compile(ctx)\n else:\n ctx.emit('LOAD_NULL')\n\n ctx.emit_label(endif)\n\n\nclass WhileBase(Statement):\n def __init__(self, condition, body):\n self.condition = condition\n self.body = body\n\n\nclass While(WhileBase):\n def compile(self, ctx):\n ctx.emit('LOAD_NULL')\n startlabel = ctx.emit_startloop_label()\n ctx.continue_at_label(startlabel)\n\n self.condition.compile(ctx)\n\n endlabel = ctx.prealocate_endloop_label()\n ctx.emit('JUMP_IF_FALSE', endlabel)\n\n self.body.compile(ctx)\n ctx.emit('DISCARD_TOP')\n\n ctx.emit('JUMP', startlabel)\n ctx.emit_endloop_label(endlabel)\n ctx.done_continue()\n\n\nclass For(Statement):\n def __init__(self, setup, condition, update, body):\n self.setup = setup\n self.condition = condition\n self.update = update\n self.body = body\n\n def compile(self, ctx):\n self.setup.compile(ctx)\n ctx.emit('DISCARD_TOP')\n\n ctx.emit('LOAD_NULL')\n\n startlabel = ctx.emit_startloop_label()\n endlabel = ctx.prealocate_endloop_label()\n update = ctx.prealocate_updateloop_label()\n\n self.condition.compile(ctx)\n ctx.emit('JUMP_IF_FALSE', endlabel)\n ctx.emit('DISCARD_TOP')\n\n self.body.compile(ctx)\n\n ctx.emit_updateloop_label(update)\n self.update.compile(ctx)\n ctx.emit('DISCARD_TOP')\n\n ctx.emit('JUMP', startlabel)\n ctx.emit_endloop_label(endlabel)\n\n\nclass Foreach(Statement):\n def __init__(self, lobject, key, variable, body):\n self.w_object = lobject\n self.key = key\n self.variable = variable\n self.body = body\n\n def compile(self, ctx):\n w_object = self.w_object\n key = self.key\n variable = self.variable\n body = self.body\n\n w_object.compile(ctx)\n ctx.emit('LOAD_ITERATOR')\n # load the \"last\" iterations result\n ctx.emit('LOAD_NULL')\n precond = ctx.emit_startloop_label()\n finish = ctx.prealocate_endloop_label(True)\n\n ctx.emit('JUMP_IF_ITERATOR_EMPTY', finish)\n\n # put the next iterator value onto stack\n ctx.emit('NEXT_ITERATOR')\n\n # store iterator key into appropriate place\n if key is None:\n ctx.emit('DISCARD_TOP')\n elif isinstance(key, VariableIdentifier):\n name = key.identifier\n index = key.index\n ctx.emit('ASSIGN', index, name)\n ctx.emit('DISCARD_TOP')\n else:\n raise Exception(u'unsupported')\n\n # store iterator value into appropriate place\n if isinstance(variable, VariableIdentifier):\n name = variable.identifier\n index = variable.index\n ctx.emit('ASSIGN', index, name)\n ctx.emit('DISCARD_TOP')\n else:\n raise Exception(u'unsupported')\n\n body.compile(ctx)\n ctx.emit('JUMP', precond)\n ctx.emit_endloop_label(finish)\n\n\nclass Print(Node):\n def __init__(self, expr):\n self.expr = expr\n\n def compile(self, ctx):\n self.expr.compile(ctx)\n ctx.emit('PRINT')\n\n\nclass Return(Statement):\n def __init__(self, expr):\n self.expr = expr\n\n def compile(self, ctx):\n if self.expr is None:\n ctx.emit('LOAD_NULL')\n else:\n self.expr.compile(ctx)\n ctx.emit('RETURN')\n\n\nclass Block(Statement):\n def __init__(self, nodes):\n self.nodes = nodes\n\n def compile(self, ctx):\n if len(self.nodes) > 1:\n for node in self.nodes[:-1]:\n node.compile(ctx)\n ctx.emit('DISCARD_TOP')\n\n if len(self.nodes) > 0:\n node = self.nodes[-1]\n node.compile(ctx)\n else:\n ctx.emit('LOAD_NULL')\n\n\ndef create_binary_op(name):\n class BinaryOp(Expression):\n def __init__(self, left, right):\n self.left = left\n self.right = right\n\n def compile(self, ctx):\n self.left.compile(ctx)\n self.right.compile(ctx)\n ctx.emit(name)\n BinaryOp.__name__ = name\n return BinaryOp\n\n\ndef create_unary_op(name):\n class UnaryOp(Expression):\n def __init__(self, expr):\n self.expr = expr\n\n def compile(self, ctx):\n self.expr.compile(ctx)\n ctx.emit(name)\n UnaryOp.__name__ = name\n return UnaryOp\n\n\nclass And(Expression):\n def __init__(self, left, right):\n self.left = left\n self.right = right\n\n def compile(self, ctx):\n self.left.compile(ctx)\n one = ctx.prealocate_label()\n ctx.emit('JUMP_IF_FALSE_NOPOP', one)\n self.right.compile(ctx)\n ctx.emit_label(one)\n\n\nclass Or(Expression):\n def __init__(self, left, right):\n self.left = left\n self.right = right\n\n def compile(self, ctx):\n self.left.compile(ctx)\n one = ctx.prealocate_label()\n ctx.emit('JUMP_IF_TRUE_NOPOP', one)\n self.right.compile(ctx)\n ctx.emit_label(one)\n\nComma = create_binary_op('COMMA')\n\nPlus = create_binary_op('ADD') # +\nMult = create_binary_op('MUL') # *\nMod = create_binary_op('MOD') # %\nDivision = create_binary_op('DIV') # /\nSub = create_binary_op('SUB') # -\n\nEq = create_binary_op('EQ') # ==\nGt = create_binary_op('GT') # >\nGe = create_binary_op('GE') # >=\nLt = create_binary_op('LT') # <\nLe = create_binary_op('LE') # <=\n\nUrsh = create_binary_op('URSH') # >>>\nRsh = create_binary_op('RSH') # >>\nLsh = create_binary_op('LSH') # <<\n\nNot = create_unary_op('NOT')\n","sub_path":"pyhp/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":14019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"184651444","text":"import datetime\nfrom multiprocessing import Process\n\nfrom feature_extraction import ChiSquare\nfrom tools import get_accuracy\nfrom tools import Write2File\nimport re\nimport os\n\n\nclass Test:\n def __init__(self, type_, train_num, test_num, feature_num, max_iter, C, k, corpus):\n self.type = type_\n self.train_num = train_num\n self.test_num = test_num\n self.feature_num = feature_num\n self.max_iter = max_iter\n self.C = C\n self.k = k\n self.parameters = [train_num, test_num, feature_num]\n\n # get the f_corpus\n self.train_data, self.train_labels = corpus.get_train_corpus(train_num)\n # print ('self.train_data length: ', len(self.train_data), 'self.train_labels length: ', len(self.train_labels))\n self.test_data, self.test_labels = corpus.get_test_corpus(test_num)\n # print ('self.test_data length: ', len(self.test_data), 'self.test_labels length: ', len(self.test_labels))\n\n # feature extraction\n fe = ChiSquare(self.train_data, self.train_labels)\n self.best_words = fe.best_words(feature_num)\n\n with open('bestwords.txt',\"w\", encoding = 'UTF-8') as f:\n for words in self.best_words:\n f.write(words+'\\n')\n\n self.single_classifiers_got = False\n\n self.precisions = [[0, 0], # bayes\n [0, 0], # maxent\n [0, 0]] # svm\n\n def write(self, filepath, classify_labels, i=-1):\n results = get_accuracy(self.test_labels, classify_labels, self.parameters)\n if i >= 0:\n self.precisions[i][0] = results[10][1] / 100\n self.precisions[i][1] = results[7][1] / 100\n\n Write2File.write_contents(filepath, results)\n \n def test_sentiment_dict_svm(self):\n print(\"SVMClassifier\")\n print(\"---\" * 30)\n print(\"Train num = %s\" % self.train_num)\n print(\"Test num = %s\" % self.test_num)\n print(\"C = %s\" % self.C)\n\n from classifiers import SVMClassifier\n svm = SVMClassifier(self.train_data, self.train_labels, self.best_words, self.C)\n\n from corpus import get_keywords\n keywords_list = get_keywords('data/sentiment_keywords.txt')\n # print (keywords_list)\n irrewords_list = get_keywords('data/sentiment_irrewords.txt')\n # print (irrewords_list)\n\n classify_labels = []\n print(\"Dict and SVM Classifier is testing ...\")\n count = 0\n for data in self.test_data:\n isKeywords = 0\n isIrrewords = 0\n printkey = []\n printirre = []\n isNomeaning = re.findall('#', (data[0]))\n if len(isNomeaning) >= 15:\n prediction = 0\n else:\n for words in keywords_list:\n isKeyword = re.findall(str(words), str(data))\n if len(isKeyword) != 0:\n printkey.append(isKeyword)\n # print ('isKeyword = ', isKeyword)\n isKeywords += len(isKeyword)\n # print ('isKeywords = ', isKeywords)\n for words in irrewords_list:\n isIrreword = re.findall(str(words), str(data))\n if len(isIrreword) != 0:\n printirre.append(isIrreword)\n # print ('isIrreword = ', isIrreword)\n isIrrewords += len(isIrreword)\n if isKeywords > isIrrewords:\n prediction = 1\n elif isKeywords < isIrrewords:\n prediction = 0\n # classify_labels.append(prediction)\n # elif isKeywords == 0 and isIrrewords == 1:\n # prediction = 0\n # classify_labels.append(prediction)\n else:\n prediction = svm.classify(data)\n classify_labels.append(prediction)\n result = str(prediction) + str(self.test_labels[count]) + str(printkey) + str(printirre) + str(data) +'\\n'\n with open('f_runout/prediction_%s.txt'%(datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")),\"a\", encoding = 'UTF-8') as f:\n f.write(result)\n count += 1\n print(\"Dict and SVM Classifier tests over.\")\n\n filepath = \"f_runout/SVM-%s-train-%d-test-%d-f-%d-C-%d-%s-lin.xls\" % \\\n (self.type,\n self.train_num, self.test_num,\n self.feature_num, self.C,\n datetime.datetime.now().strftime(\n \"%Y-%m-%d-%H-%M-%S\"))\n\n self.write(filepath, classify_labels, 2)\n\ndef test_twitter_sentiment():\n from corpus import TwitterCorpus\n\n type_ = \"twitter_sentiment\"\n train_num = 20\n test_num = 10\n feature_num = 5000\n max_iter = 50000\n C = 150\n k = 13\n # k = [1, 3, 5, 7, 9, 11, 13]\n corpus = TwitterCorpus()\n\n test = Test(type_, train_num, test_num, feature_num, max_iter, C, k, corpus)\n\n test.test_sentiment_dict_svm()\n\ndef test_dict():\n \"\"\"\n test the classifier based on Sentiment Dict\n \"\"\"\n print(\"DictClassifier\")\n print(\"---\" * 45)\n\n from classifiers import DictClassifier\n\n ds = DictClassifier()\n\n # 对一个单句进行情感分析\n # a_sentence = \"剁椒鸡蛋好咸,土豆丝很好吃\" # result值: 修改前(1)/修改后(1)\n # a_sentence = \"要是米饭再多点儿就好了\" # result值: 修改前(1)/修改后(0)\n # a_sentence = \"要是米饭再多点儿就更好了\" # result值: 修改前(0)/修改后(0)\n # a_sentence = \"不太好吃,相当难吃,要是米饭再多点儿就好了\" # result值: 修改前(1)/修改后(0)\n a_sentence = \"今天特别悲伤,不想去学习,但又必须码代码\"\n result = ds.analyse_sentence(a_sentence)\n print(result)\n\n # 对一个文件内语料进行情感分析\n # corpus_filepath = \"D:/My Data/NLP/SA/waimai/positive_corpus_v1.txt\"\n # runout_filepath_ = \"f_runout/f_dict-positive_test.txt\"\n # pos_results = ds.analysis_file(corpus_filepath, runout_filepath_, start=3000, end=4000-1)\n #\n # corpus_filepath = \"D:/My Data/NLP/SA/waimai/negative_corpus_v1.txt\"\n # runout_filepath_ = \"f_runout/f_dict-negative_test.txt\"\n # neg_results = ds.analysis_file(corpus_filepath, runout_filepath_, start=3000, end=4000-1)\n #\n # origin_labels = [1] * 1000 + [0] * 1000\n # classify_labels = pos_results + neg_results\n #\n # print(len(classify_labels))\n #\n # filepath = \"f_runout/Dict-waimai-%s.xls\" % (\n # datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n # results = get_accuracy(origin_labels, classify_labels, [1000, 1000, 0])\n #\n # Write2File.write_contents(filepath, results)\n\n\nif __name__ == \"__main__\":\n # test_dict()\n test_twitter_sentiment()\n\n\n","sub_path":"analysis/sentiment/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"93766259","text":"# ConeVolumeCalculator.py\n# Your job is to write a function in ConeVolumeCalculator.py (call\n# it **calculateConeVolume()** that calculates the volume of a cone\n# factor based on the Volume Calculator\n# Calculator.net (http://www.calculator.net/volume-calculator.html)\n\nimport math\nimport time\n## volume = 1/3πr2h\n## where r is the radius of the base, h is the height \n\n# Define Function below\n# be sure to return an integer\n\ndef calculateConeVolume(r, h):\n \n volume = 1/3*math.pi*r**2*h\n volume = round(volume, 2) \n return volume\n\nif __name__ == '__main__':\n # Call the function in here if you want to test it\n # Make sure it's indented\n\n print(calculateConeVolume(3, 5))\n","sub_path":"ConeVolumeCalculator.py","file_name":"ConeVolumeCalculator.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"227843652","text":"from random import choice, randint\nimport sys\nfrom datetime import datetime, date\nimport time\nimport psycopg2\n\ngenders = ['H', 'M']\nfemale_names = []\nmale_names = []\nlast_names = []\nmost_ancient_date = int(datetime.strptime(\"1950/1/1\", \"%Y/%m/%d\").strftime(\"%s\"))\nmost_recent_date = int(datetime.strptime(\"2018/1/1\", \"%Y/%m/%d\").strftime(\"%s\"))\npeople = 0 if len(sys.argv) == 1 else int(sys.argv[1])\ncounter = 0\n\nfor name in open('female_names.dictionary'):\n female_names.append(name.rstrip('\\n'))\n\nfor name in open('male_names.dictionary'):\n male_names.append(name.rstrip('\\n'))\n\nfor name in open('last_names.dictionary'):\n last_names.append(name.rstrip('\\n'))\n\n\nconn = psycopg2.connect(\"dbname=test user=postgres password=123\")\ncur = conn.cursor()\n\n\ndef generate_person(aux_dad_lastname, aux_gender, aux_date, prob):\n global counter\n\n if counter >= people:\n return None\n\n gender = \"\"\n if aux_gender == None:\n gender = choice(genders)\n else:\n gender = aux_gender\n name = choice(male_names) if gender == 'H' else choice(female_names)\n dad_lastname = \"\"\n if aux_dad_lastname == None:\n dad_lastname = choice(last_names)\n else:\n dad_lastname = aux_dad_lastname\n mom_lastname = choice(last_names)\n date = generate_date(aux_date)\n\n counter += 1\n\n statistic = randint(0, 100)\n dad = None\n mom = None\n if statistic < prob:\n dad = generate_person(dad_lastname, 'H', date, prob - 15)\n statistic = randint(0, 100)\n if statistic < prob:\n mom = generate_person(mom_lastname, 'M', date, prob - 15)\n id = insert_person(name, dad_lastname, mom_lastname, datetime.fromtimestamp(date).strftime('%Y-%m-%d'), dad, mom, gender)\n while True:\n statistic = randint(0, 100)\n if statistic < prob / 3:\n generate_sibling(dad_lastname, mom_lastname, date, dad, mom)\n else:\n break\n return id\n\ndef generate_sibling(dad_lastname, mom_lastname, aux_date, dad, mom):\n global counter\n\n if counter >= people:\n return\n\n gender = choice(genders)\n name = choice(male_names) if gender == 'H' else choice(female_names)\n date = generate_similar_date(aux_date)\n insert_person(name, dad_lastname, mom_lastname, datetime.fromtimestamp(date).strftime('%Y-%m-%d'), dad, mom, gender)\n\n counter += 1\n\ndef generate_similar_date(date):\n tmp_date = datetime.fromtimestamp(date).date()\n final_date = tmp_date.replace(year = tmp_date.year + 5, month = tmp_date.month, day = 1)\n initial_date = final_date.replace(year = final_date.year - 5, month = tmp_date.month, day = 1)\n return randint(time.mktime(initial_date.timetuple()), time.mktime(final_date.timetuple()))\n\ndef generate_date(date = 0):\n if date == 0:\n rnd_date = randint(most_ancient_date, most_recent_date)\n else:\n tmp_date = datetime.fromtimestamp(date).date()\n final_date = tmp_date.replace(year = tmp_date.year - 20, month = tmp_date.month, day = 1)\n initial_date = final_date.replace(year = final_date.year - 10, month = final_date.month, day = 1)\n rnd_date = randint(time.mktime(initial_date.timetuple()), time.mktime(final_date.timetuple()))\n return rnd_date\n\ndef insert_person(name, dad_lastname, mom_lastname, date, dad_id, mom_id, gender):\n # print(\"%(name)s %(dad_lastname)s %(mom_lastname)s - %(gender)s, %(date)s\" % { 'name': name, 'dad_lastname': dad_lastname, 'mom_lastname': mom_lastname, 'gender': gender, 'date': date} )\n cur.execute(\"INSERT into persona (nombre, apaterno, amaterno, nacimiento, id_padre, id_madre, sexo) VALUES (%s, %s, %s, %s, %s, %s, %s) RETURNING id_persona;\",\n (name, dad_lastname, mom_lastname, date, dad_id, mom_id, gender))\n id = cur.fetchone()[0]\n return id\n\nwhile counter < people:\n # print(\"----------------------- NEW FAMILY TREE HERE -----------------------\")\n generate_person(None, None, 0, 90)\n # print(counter)\nconn.commit()\ncur.close()\nconn.close()\n","sub_path":"registro_civil/people_generator.py","file_name":"people_generator.py","file_ext":"py","file_size_in_byte":3991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"588852324","text":"import pdb\nfrom asyncio import sleep\n# from time import sleep\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\nfrom django.contrib.auth.views import LoginView\nfrom django.shortcuts import render, redirect\nfrom django.template import RequestContext\nfrom django.views.decorators.csrf import ensure_csrf_cookie\nfrom django.views.generic import CreateView\nfrom django.contrib.auth.models import User\n\nfrom chess.models import SavedGame\nfrom src.chess import Game, HotSeatGame, VersusAIGame, AIPlayer\nfrom django.views import View\nfrom django.http import HttpResponse, Http404\nimport json, random\nfrom src.utils import States\nimport src.chess as chess\nfrom src.utils import EmptyFieldError, MoveNotAvailableError\n\n\nclass ProfileView(View):\n def get(self, request, *args, **kwargs):\n if not request.user.is_authenticated:\n raise Http404()\n games = SavedGame.objects.filter(user_id=request.user.id)\n hotseat_games = games.filter(mode=\"hotseat\")\n melee_games = games.filter(mode=\"melee\")\n ai_games = games.filter(mode=\"ai\")\n context = {\n 'username': request.user.username,\n 'games': games,\n 'hotseat_games': hotseat_games,\n 'melee_games': melee_games,\n 'ai_games': ai_games\n }\n return render(request, 'chess/profile.html', context=context)\n\n\nclass GameReplayView(View):\n replayInfoDict = {\n\n }\n def __init__(self, *args, **kwargs):\n self.turns = None\n self.board = None\n self.current_turn = None\n super().__init__(*args, *kwargs)\n\n def get(self, request, game_id, *args, **kwargs):\n context = {\n\n }\n return render(request, 'chess/gameReplay.html', context=context)\n\n def post(self, request, game_id, *args, **kwargs):\n chess_turn_json = request.body\n chess_turn_dict = json.loads(chess_turn_json)\n\n if chess_turn_dict.get(\"command\", None) is not None:\n chess_command = chess_turn_dict.get(\"command\")\n if chess_command == \"init\":\n turns = SavedGame.objects.get(id=game_id).turns_history.split(\"\\n\")\n board = chess.Board()\n current_turn = 0\n GameReplayView.replayInfoDict[game_id] = {\n \"turns\": turns,\n \"board\": board,\n \"current_turn\": current_turn\n }\n json_response = {\"response\": \"success\",\n \"field\": GameReplayView.replayInfoDict[game_id][\"board\"].current_board,\n \"victory\": \"null\"}\n # print(current_turn)\n json_data = json.dumps(json_response, default=lambda obj: obj.to_json())\n return HttpResponse(json_data, content_type='application/json')\n else:\n game_info = GameReplayView.replayInfoDict[game_id]\n # print(self.current_turn)\n if chess_command == \"next\":\n if game_info[\"current_turn\"] >= len(game_info[\"turns\"]) - 1:\n json_response = {\"response\": \"reject\",\n \"victory\": \"black\" if (game_info[\"current_turn\"]) % 2 == 0 else \"white\"}\n print(\"hi!\")\n print(json_response[\"victory\"])\n return HttpResponse(json.dumps(json_response), content_type='application/json')\n turn = game_info[\"turns\"][game_info[\"current_turn\"]]\n game_info[\"current_turn\"] += 1\n print(game_info[\"current_turn\"])\n print(len(game_info[\"turns\"]))\n figure, move = game_info[\"board\"].get_turn_from_input(turn)\n game_info[\"board\"].make_turn(figure, move, calculate_turns=False)\n json_response = {\n \"response\": \"success\",\n \"field\": game_info[\"board\"].current_board\n }\n\n json_data = json.dumps(json_response, default=lambda obj: obj.to_json())\n return HttpResponse(json_data, content_type='application/json')\n else: # chess_command == \"previous\"\n if game_info[\"current_turn\"] <= 0:\n json_response = {\n \"response\": \"reject\"\n }\n return HttpResponse(json.dumps(json_response), content_type='application/json')\n game_info[\"board\"].undo_turn(calculate_turns=False)\n game_info[\"current_turn\"] -= 1\n json_response = {\n \"response\": \"success\",\n \"field\": game_info[\"board\"].current_board\n }\n\n json_data = json.dumps(json_response, default=lambda obj: obj.to_json())\n return HttpResponse(json_data, content_type='application/json')\n\n\nclass HotSeatChessView(View):\n # @ensure_csrf_cookie\n def get(self, request, *args, **kwargs):\n # context = RequestContext(request)\n return render(request, 'chess/chessHotSeat.html')\n\n\nclass AIChessView(View):\n def get(self, request, *args, **kwargs):\n return render(request, 'chess/chessAIWhite.html')\n\n\nclass MeleeChessView(View):\n def get(self, request, *args, **kwargs):\n return render(request, 'chess/chessMeleeWhite.html')\n\n\nclass MeleeWaitRoomView(View):\n def get(self, request, game_id, *args, **kwargs):\n return render(request, 'chess/waiting_game.html')\n\n def post(self, request, game_id, *args, **kwargs):\n request_json = request.body\n request_dict = json.loads(request_json)\n\n while True:\n try:\n game = chess.MeleeGame.pending_games.get(str(game_id), None)\n break\n except KeyError:\n continue\n while True:\n if request_dict[\"request\"] == \"start\":\n if game is None:\n print('game is none')\n response = json.dumps(\n {\"start\": \"success\", \"href\": request.build_absolute_uri().replace('waitRoom', '')})\n return HttpResponse(response, content_type='application/json')\n if game.players_amount >= 2:\n game.state = States.IN_PROGRESS.value\n game.add_game_in_collections()\n response = json.dumps(\n {\"start\": \"success\", \"href\": request.build_absolute_uri().replace('waitRoom', '')})\n return HttpResponse(response, content_type='application/json')\n sleep(1000)\n\n\nclass HotSeatStartGameView(View):\n def get(self, request):\n game = HotSeatGame()\n return redirect(f'/games/hotSeat/{game.id}/')\n\n\nclass MeleeStartGameView(View):\n def get(self, request):\n for i in range(0, 20):\n pending_games = chess.MeleeGame.pending_games\n for i in range(0, 20):\n len_pending_games = len(pending_games)\n if len_pending_games == 0:\n game = chess.MeleeGame()\n else:\n game = list(pending_games.values())[0]\n game.players_amount += 1\n return redirect(f'/games/melee/{game.id}/waitRoom')\n\n\nclass VersusAIStartGameView(View):\n def get(self, request, difficulty):\n game = VersusAIGame(int(difficulty))\n return redirect(f'/games/ai/{game.id}/')\n\n\nclass MainPageView(View):\n def get(self, request):\n # SavedGame.objects.all().delete()\n form = AuthenticationForm()\n context = {\n \"current_user_name\": \"None\" if not request.user.is_authenticated else request.user.username,\n \"form\": form\n }\n return render(request, 'chess/index.html', context=context)\n\n\nclass ChessInfoView(View):\n\n def post(self, request, game_id, *args, **kwargs):\n\n chess_turn_json = request.body\n chess_turn_dict = json.loads(chess_turn_json)\n\n if chess_turn_dict.get(\"command\", None) is not None:\n HotSeatGame(game_id=game_id)\n return HttpResponse(json.dumps({\"response\": \"success\"}), content_type='application/json')\n while True:\n try:\n game = HotSeatGame.games_dict[str(game_id)]\n break\n except KeyError:\n continue\n if chess_turn_dict[\"chess_turn\"] is None:\n return HttpResponse(json.dumps(game.get_chess_json(), default=lambda obj: obj.to_json()),\n content_type='application/json')\n\n chess_turn = chess_turn_dict[\"chess_turn\"]\n game.current_player.set_user_input(chess_turn)\n\n try:\n game.make_turn()\n except MoveNotAvailableError:\n response = json.dumps({\"error\": \"move is not available\"})\n return HttpResponse(response, content_type='application/json')\n except EmptyFieldError:\n response = json.dumps({\"error\": \"the field is empty\"})\n return HttpResponse(response, content_type='application/json')\n\n if not game.is_in_progress:\n if request.user.is_authenticated:\n game_state = 'tie' if game.victorious is None else \\\n 'victory' if game.victorious == game.current_player else \\\n 'defeat'\n SavedGame.objects.create(player_color=game.current_player, turns_history=game.turns,\n user_id=request.user.id,\n enemy_user=None, result=game_state, mode=\"hotseat\")\n\n json_data = json.dumps(game.get_chess_json(), default=lambda obj: obj.to_json())\n game.set_next_player()\n\n return HttpResponse(json_data, content_type='application/json')\n\n\nclass MeleeChessInfoView(View):\n\n def __init__(self):\n self.user = {'white': None, 'black': None}\n super().__init__()\n\n def post(self, request, game_id, *args, **kwargs):\n print('Post request accepted')\n chess_turn_json = request.body\n chess_turn_dict = json.loads(chess_turn_json)\n print('Post request accepted')\n if chess_turn_dict.get(\"command\", None) is not None:\n game = chess.MeleeGame(game_id=game_id)\n game.state = States.IN_PROGRESS.value\n game.add_game_in_collections()\n return HttpResponse(json.dumps({\"response\": \"success\"}), content_type='application/json')\n\n while True:\n try:\n game = chess.MeleeGame.games_dict[str(game_id)]\n break\n except KeyError:\n continue\n\n if chess_turn_dict.get(\"request\", None) is not None:\n if chess_turn_dict[\"request\"] == 'user_color':\n if game.colors_dict['white'] is None:\n game.colors_dict['white'] = True\n answer = \"white\"\n if request.user.is_authenticated:\n self.user['white'] = request.user\n print(\"sending white color\")\n elif game.colors_dict['black'] is None:\n game.colors_dict['black'] = True\n answer = 'black'\n if request.user.is_authenticated:\n self.user['black'] = request.user\n print(\"sending black color\")\n else:\n answer = 'reject'\n print(\"sending reject\")\n return HttpResponse(json.dumps({\"request\": answer}), content_type='application/json')\n elif chess_turn_dict[\"request\"] == 'update_board':\n while True:\n user_color = chess_turn_dict[\"user_color\"]\n if user_color == game.current_player.color:\n answer = game.get_chess_json()\n answer[\"request\"] = \"success\"\n return HttpResponse(json.dumps(answer, default=lambda obj: obj.to_json()),\n content_type='application/json')\n sleep(1000)\n\n if chess_turn_dict[\"chess_turn\"] is None:\n return HttpResponse(json.dumps(game.get_chess_json(), default=lambda obj: obj.to_json()),\n content_type='application/json')\n\n chess_turn = chess_turn_dict[\"chess_turn\"]\n game.current_player.set_user_input(chess_turn)\n\n try:\n game.make_turn()\n except MoveNotAvailableError:\n response = json.dumps({\"error\": \"move is not available\"})\n return HttpResponse(response, content_type='application/json')\n except EmptyFieldError:\n response = json.dumps({\"error\": \"the field is empty\"})\n return HttpResponse(response, content_type='application/json')\n\n if not game.is_in_progress:\n if self.user['white'] is not None and self.user['black'] is not None:\n game_state = 'tie' if game.victorious is None else \\\n 'victory' if game.victorious == game.current_player else \\\n 'defeat'\n enemy_color = 'white' if game.current_player.color == 'black' else 'white'\n SavedGame.objects.create(player_color=game.current_player, turns_history=game.turns,\n user_id=request.user.id,\n enemy_user_id=self.user[enemy_color].id,\n result=game_state, mode=\"melee\")\n enemy_game_state = 'tie' if game.victorious is None else \\\n 'defeat' if game_state == 'victory' else \\\n 'victory'\n SavedGame.objects.create(player_color=game.current_player, turns_history=game.turns,\n user=User.objects.get(self.user[enemy_color].id),\n enemy_user=User.objects.get(request.user.id),\n result=enemy_game_state, mode=\"melee\")\n json_data = json.dumps(game.get_chess_json(), default=lambda obj: obj.to_json())\n game.set_next_player()\n\n return HttpResponse(json_data, content_type='application/json')\n\n\nclass AIChessInfoView(View):\n\n def post(self, request, game_id, *args, **kwargs):\n # Getting post info\n chess_turn_json = request.body\n chess_turn_dict = json.loads(chess_turn_json)\n # Whether game started\n if chess_turn_dict.get(\"command\", None) is not None:\n VersusAIGame(game_id=game_id)\n return HttpResponse(json.dumps({\"response\": \"success\"}), content_type='application/json')\n # Making turn\n while True:\n try:\n game = VersusAIGame.games_dict[str(game_id)]\n break\n except KeyError:\n continue\n # If game started\n if chess_turn_dict[\"chess_turn\"] is None:\n if isinstance(game.current_player, AIPlayer):\n game.current_player.set_user_input()\n game.make_turn()\n game.set_next_player()\n return HttpResponse(json.dumps(game.get_chess_json(), default=lambda obj: obj.to_json()),\n content_type='application/json')\n # If game in progress\n # Human turn\n if isinstance(game.current_player, AIPlayer):\n game.current_player.set_user_input()\n else:\n chess_turn = chess_turn_dict[\"chess_turn\"]\n game.current_player.set_user_input(chess_turn)\n\n try:\n game.make_turn()\n except MoveNotAvailableError:\n response = json.dumps({\"error\": \"move is not available\"})\n return HttpResponse(response, content_type='application/json')\n except EmptyFieldError:\n response = json.dumps({\"error\": \"the field is empty\"})\n return HttpResponse(response, content_type='application/json')\n\n if not game.is_in_progress:\n if request.user.is_authenticated:\n game_state = 'tie' if game.victorious is None else \\\n 'victory' if game.victorious == game.current_player else \\\n 'defeat'\n SavedGame.objects.create(player_color=game.current_player, turns_history=game.turns,\n user_id=request.user.id, enemy_user=None,\n result=game_state, mode=\"ai\")\n print(SavedGame.objects.first().user.id)\n json_data = json.dumps(game.get_chess_json(), default=lambda obj: obj.to_json())\n game.set_next_player()\n return HttpResponse(json_data, content_type='application/json')\n\n\nclass HyperSignUpView(CreateView):\n form_class = UserCreationForm\n success_url = '/login'\n template_name = 'chess/signup.html'\n\n\nclass HyperLoginView(LoginView):\n form_class = AuthenticationForm\n redirect_authenticated_user = True\n template_name = 'chess/login.html'\n","sub_path":"chess/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"35381881","text":"############## Importar modulos #####################\n\nfrom tkinter import *\nfrom PIL import Image, ImageTk \n\nimport cv2\nimport numpy as np\nimport sys\n\n \ndef onClossing():\n \n print(f\"Threshold value= {umbralValue.get()}\")\n root.quit() #Salir del bucle de eventos.\n cap.release() #Cerrar camara\n print(\"Ip Cam Disconected\")\n root.destroy() #Destruye la ventana tkinter creada\n\n\ndef thresholdValue(int):\n umbralValue.set(slider1.get())\n \ndef callback(): #codigo propio que se correra junto con el mainloop\n\n ################## Adquisición de la Imagen ############\n \n cap.open(url) # Antes de capturar el frame abrimos la url\n ret, frame = cap.read() # Leer Frame\n\n if ret:\n ################# Procesamiento de la Imagen ##########\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)# Convertir a BGR a grises\n\n t, binary = cv2.threshold(gray,umbralValue.get(),255, cv2.THRESH_BINARY)\n \n closing = cv2.morphologyEx(binary, cv2.MORPH_CLOSE,kernel)\n\n ################# Segmentacion de la Imagen ################\n contours,_ = cv2.findContours(closing.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n print(len(contours))\n\n for cnt in contours:\n cv2.drawContours (frame, [cnt], 0, (0,255,0), 3)\n \n # Mostrar imagen en el HMI \n img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n img = Image.fromarray(img)\n img.thumbnail((400,400))\n tkimage = ImageTk.PhotoImage(img)\n label.configure(image = tkimage )\n label.image = tkimage\n \n img1 = Image.fromarray(binary)\n img1.thumbnail((400,400))\n tkimage1 = ImageTk.PhotoImage(img1)\n label1.configure(image = tkimage1 )\n label1.image = tkimage1\n\n img2 = Image.fromarray(closing)\n img2.thumbnail((400,400))\n tkimage2 = ImageTk.PhotoImage(img2)\n label2.configure(image = tkimage2 )\n label2.image = tkimage2\n\n\n root.after(10,callback)# Llamar a callback despues de 10 ms\n \n else:\n onClossing()\n \n########################### Mascara ########################\n \nkernel = np.ones((5,5),np.uint8) # Nucleo\n\n########################### Ip Cam ###########################\n \nurl='http://192.168.1.7:8080/shot.jpg'\ncap = cv2.VideoCapture(url)\n\nif cap.isOpened():\n print(\"Ip Cam initializatized\")\nelse:\n sys.exit(\"Ip Cam disconnected\")\n \n############################## HMI design ################# \nroot = Tk()\nroot.protocol(\"WM_DELETE_WINDOW\",onClossing)\nroot.title(\"Vision Artificial\") # titulo de la ventana\n\numbralValue = IntVar()\n\nlabel=Label(root)\nlabel.grid(row=0,padx=20,pady=20)\n\nlabel1=Label(root)\nlabel1.grid(row=0,column=1,padx=20,pady=20)\n\nlabel2=Label(root)\nlabel2.grid(row=1,column=0,padx=20,pady=20)\n\n\nslider1 = Scale(root,label = 'Threshold value', from_=0, to=255, orient=HORIZONTAL,command=thresholdValue,length=400) #Creamos un dial para recoger datos numericos\nslider1.grid(row=1,column=1)\n\nroot.after(10,callback) # Llamar a callback despues de 10 ms\nroot.mainloop()\n\n","sub_path":"SegmentacionContornos.py","file_name":"SegmentacionContornos.py","file_ext":"py","file_size_in_byte":3323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"447305076","text":"import os\nimport csv\nimport json\nfrom bs4 import BeautifulSoup\n\n__location__ = os.path.dirname(os.path.realpath(__file__))\n\n\nurlFile = os.path.join(__location__, \"longURLs.csv\")\n\ncount = 0\ngoodCount = 0\nbadCount = 0\ndomainList = []\ndomainData = {}\n\ndataList = []\n\nwith open(urlFile, 'r', encoding='utf-8') as urlData:\n\tdata = csv.reader(urlData, delimiter=\"|\")\n\t\n\tfor line in data:\n\t\tcount += 1\n\t\tif count > 1:\n\t\t\tif len(line[2]) == 3:\n\t\t\t\tbadCount += 1\n\t\t\t\tif str(line[2]) != \"200\":\n\t\t\t\t\tprint (line[2])\n\t\t\telse:\n\t\t\t\tgoodCount += 1\n\t\t\t\troot = line[2].split(\"//\")[1].split(\"/\")[0]\n\t\t\t\tif \".\" in root:\n\t\t\t\t\tif root.count('.') == 1:\n\t\t\t\t\t\tdomain = root.lower()\n\t\t\t\t\telif root.count('.') == 2:\n\t\t\t\t\t\tdomain = root.split(\".\")[1] + \".\" + root.split(\".\")[2].lower()\n\t\t\t\t\telse:\n\t\t\t\t\t\tif root.lower().startswith(\"www.\"):\n\t\t\t\t\t\t\tdomain = root.lower().split(\"www.\")[1]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tendlist = [\"com\", \"org\", \"net\"]\n\t\t\t\t\t\t\tif root.split(\".\")[-1] in endlist:\n\t\t\t\t\t\t\t\tdomain = root.split(\".\")[-2] + \".\" + root.rsplit(\".\")[-1]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tdomain = root\n\t\t\t\t\tdomainList.append(domain.strip())\n\t\t\t\t\tif domain in domainData.keys():\n\t\t\t\t\t\tdomainData[domain] += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tdomainData[domain] = 1\n\t\t\t\tif domain.lower == \"twitter.com\" or domain.lower() == \"t.co\":\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tdataList.append(line[0])\n\t\t\t\t\t\nwith open(os.path.join(__location__, \"output2\", \"domainList.txt\"), \"w\", encoding='utf-8') as listFile:\n\tlistFile.write(\"\\n\".join(domainList))\n\t\nsortedDomains = []\nsortedList = sorted(domainData, key=domainData.get)\nfor domainString in reversed(sortedList):\n\thash = {'value': domainData[domainString], 'Domain': domainString, 'name': domainString}\n\tsortedDomains.append(hash)\n\nwith open(os.path.join(__location__, \"output2\", \"domainData.json\"), \"w\", encoding='utf-8') as outFile:\n\toutFile.write(json.dumps(sortedDomains))\n\t\t\t\t\t\n\t\t\t\t\ntotal = goodCount + badCount\nprint (str(goodCount) + \" of \" + str(total))\nprint (str(goodCount / total) + \" %\")\n\nfor item in dataList:\n\tprint (item)\n\tfilepath = os.path.join(__location__, \"outputData\", \"outputData\", str(item) + \".html\")\n\toutpath = os.path.join(__location__, \"textData\", str(item) + \".txt\")\n\twith open(filepath, \"r\", encoding='utf-8') as file:\n\t\tdata = file.read()\n\t\tsoup = BeautifulSoup(data, 'html.parser')\n\t\thtmlText = soup.text\n\t\twith open(outpath, \"w\", encoding='utf-8') as output:\n\t\t\toutput.write(htmlText)\n","sub_path":"processURLs.py","file_name":"processURLs.py","file_ext":"py","file_size_in_byte":2382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"96591923","text":"\nimport time\nimport asyncio\n\nasync def abc():\n asyncio.sleep(2)\n print('haha')\n\nasync def bcd():\n print('afafafafa')\n\nloop = asyncio.get_event_loop()\nbegin = time.time()\nloop.run_until_complete(asyncio.wait([abc(),bcd()]))\nend = time.time()\nprint('spend time:{}'.format(end-begin))","sub_path":"async_sleep.py","file_name":"async_sleep.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"298374098","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 23 21:44:12 2017\n\n@author: Yunshi_Zhao\n\"\"\"\nfrom functions import find_cars, add_heat, apply_threshold, draw_labeled_bboxes \nfrom scipy.ndimage.measurements import label\nimport numpy as np\nimport pickle\nfrom moviepy.editor import VideoFileClip\n\n\ntrained_clf = '../Data/vehicle-detection-basic/trained_clf3.p'\n\nwith open(trained_clf, mode='rb') as f:\n clf = pickle.load(f)\n \nsvc = clf['svc']\nX_scaler = clf['scaler'] \ncolorspace = clf['colorspace'] \nhog_channel = clf['hog_channel']\norient = clf['orient']\npix_per_cell = clf['pix_per_cell']\ncell_per_block = clf['cell_per_block'] \nspatial = clf['spatial_size']\nhistbin = clf['hist_bins']\n\"\"\"\nsearch_boxes = [(1,400,530), (1.5,400,600), \n (2,400,700), (2.5,400,700), (3,400,700)]\n\n\ndef vehicle_detection(img):\n total_heat = np.zeros_like(img[:,:,0]).astype(np.float)\n for scale, ystart, ystop in search_boxes:\n bbox_list = find_cars(img, colorspace, ystart, ystop, scale, svc, X_scaler, orient, \n pix_per_cell, cell_per_block, (spatial, spatial), histbin,\n hog_channel, spatial_feat=True, hist_feat=True)\n heat = np.zeros_like(img[:,:,0]).astype(np.float)\n heat = add_heat(heat,bbox_list)\n heat = apply_threshold(heat, 1)\n total_heat = total_heat + heat\n\n heatmap = np.clip(total_heat, 0, 255)\n labels = label(heatmap)\n draw_img = draw_labeled_bboxes(np.copy(img), labels)\n \n return draw_img\n\ninput_video = './project_video.mp4'\noutput_video = './output_project_video3.mp4'\nclip = VideoFileClip(input_video)\noutput_clip = clip.fl_image(vehicle_detection) \noutput_clip.write_videofile(output_video, audio=False) \n\"\"\"","sub_path":"detection_pipeline.py","file_name":"detection_pipeline.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"259761298","text":"from __future__ import division, print_function, unicode_literals\nimport six\n\nimport sys\nimport os\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))\n\nimport pyglet\nfrom pyglet.gl import *\nfrom pyglet import image\n\nfrom cocos.director import *\nfrom cocos.menu import *\nfrom cocos.scene import *\nfrom cocos.layer import *\nfrom cocos.actions import *\nfrom cocos.sprite import Sprite\nfrom cocos.text import Label\n\nfrom Images import *\nfrom const import *\nfrom HUD import *\nfrom Skills import Skills\nfrom monsters import DM_Monsters\nfrom magic import DM_Magic\nfrom artefacts import ArtEffects\nfrom controler import Controler\n\n\nimport random\n\nclass GameModel( pyglet.event.EventDispatcher):\n\n\tdef __init__(self):\n\t\tself.end_of_init()\n\tdef end_of_init(self):\n\t\tsuper(GameModel, self).__init__()\n\t\tself.mapx = Quad_side\n\t\tself.mapy = Quad_side\n\t\tself.init_map()\n\t\tw, h = director.get_window_size()\n\t\t\n\t\tsc = 1920//w\n\t\t\n\t\tself.heroes = {}\n\t\tself.heroes['wizard'] = Hero('wizard', 0, self, 0, 0)\n\t\tself.heroes['priest'] = Hero('priest', 1, self, 0, Quad_side - 1)\n\t\tself.heroes['warrior'] = Hero('warrior', 2, self, Quad_side - 1, Quad_side - 1)\n\t\tself.heroes['rogue'] = Hero('rogue', 3, self, Quad_side - 1, 0)\n\t\tself.interface_DM = Interface(self)\n\t\t\n\t\tself.monsters = []\n\t\tself.alive_heroes = ['wizard', 'priest', 'warrior', 'rogue']\n\t\tself.actual_hero = ['wizard']\n\t\tself.controler = Controler(self)\n\t\tself.skill_use = 0\n\t\t\n\t\tc = self.map.get((0, 0))\n\t\tc.open_P = 1\n\t\tc = self.map.get((Quad_side - 1, 0))\n\t\tc.open_P = 1\n\t\tc = self.map.get((Quad_side - 1, Quad_side - 1))\n\t\tc.open_P = 1\n\t\tc = self.map.get((0, Quad_side - 1))\n\t\tc.open_P = 1\n\t\tc = self.map.get(((Quad_side - 1)/2, (Quad_side - 1)/2))\n\t\tc.name = 'treasure'\n\t\tc.open_P = 1\n\t\t\n\tdef init_map(self):\n\t\tself.map = {}\n\t\tself.map['actual_tile'] = (-1, -1)\n\t\tw, h = director.get_window_size()\n\t\tsc = 1920/w\n\t\tfor i in range (self.mapx):\n\t\t\tfor j in range (self.mapy):\n\t\t\t\tself.map[(i, j)] = Tile(Images.floortile, ((2*i + 1)*Tile_size/2 + left_space)//sc, \n\t\t\t\t\t\t\t\t\t\t((2*j + 1)*Tile_size/2 + (h*sc - (Quad_side*Tile_size))/2)//sc, i, j)\n\t\n\t\n\tdef on_gameover(self):\n\t\t\n\t\tif (len(self.alive_heroes) == 0):\n\t\t\tself.dispatch_event(\"on_game_over\")\n\t\n\tdef on_youwin(self):\n\t\tself.dispatch_event(\"on_you_win\")\n\t\t\n\tdef on_artget(self, hero):\n\t\tArt_menu = Art_Menu(self, hero)\n\t\tself.dispatch_event(\"on_art_get\", Art_menu)\n\t\t\t\n\n\t\t\nclass Tile():\n\n\tdef __init__(self, image, posx, posy ,map_posx, map_posy):\n\t\tw, h = director.get_window_size()\n\t\tsc = 1920/w\n\t\tself.namenumber = 0\n\t\tself.name = 'floor'\n\t\tself.map_pos_x = map_posx\n\t\tself.map_pos_y = map_posy\n\t\tself.open_P = 0\n\t\tself.sprite = Sprite(Images.tile_image[self.name], (posx, posy), scale = 1/sc)\n\t\tself.sprite_smoke = Sprite(Images.tile_image['smoke'], (posx, posy), scale = 1/sc)\n\t\tself.monster = 0\n\t\tself.smoke = 0\n\t\tself.buildav = 1\n\t\n\tdef next_turn(self):\n\t\tif self.smoke:\n\t\t\tself.smoke = self.smoke - 1\n\t\n\tdef next_turn_DM(self):\n\t\tself.buildav = 1\n\t\tif (self.open_P) or (self.smoke):\n\t\t\tself.buildav = 0\n\t\n\tdef on_click_P(self):\n\t\tself.open_P = 1\n\t\n\tdef draw(self):\n\t\tself.sprite.image = Images.tile_image[self.name]\n\t\tself.sprite.draw()\n\t\tif self.smoke:\n\t\t\tself.sprite_smoke.draw()\n\t\t\t\n\nclass Interface(Layer):\n\tdef __init__(self, model):\n\t\tself.model = model\n\t\tself.portraits = {'wizard': Hero_portriat_DM(), 'priest': Hero_portriat_DM(),\n\t\t\t\t\t\t 'warrior': Hero_portriat_DM(),'rogue': Hero_portriat_DM()}\n\t\tfor c in self.portraits:\n\t\t\tself.portraits[c].reload(model, c)\n\t\tself.money = starting_money\n\t\t\n\tdef draw(self):\n\t\tw, h = director.get_window_size()\n\t\tsc = 1920/w\n\t\tself.label = Label('%d' %self.money, font_name='Times New Roman', font_size=20//sc, anchor_x='center', anchor_y='center', color = (255, 0, 0, 255))\n\t\tself.label.position = (1670//sc, 1030//sc)\n\t\tself.label.draw()\n\t\tfor c in self.portraits:\n\t\t\tself.portraits[c].draw()\n\t\t\nclass BuildingMenu(Layer):\n\n\tdef __init__(self): \n\t\tsuper(BuildingMenu, self).__init__()\n\t\tself.end_of_init()\n\t\t\n\tdef end_of_init(self):\n\t\tw, h = director.get_window_size()\t\t\n\t\tsc = 1920//w\n\t\tself.visible = 0\n\t\tself.b_types = {}\n\t\tself.b_types[0] = SecondB_menu('trap', 0)\n\t\tself.b_types[1] = SecondB_menu('monster', 1)\n\t\tself.b_types[2] = SecondB_menu('magic', 2)\n\t\tself.b_types[3] = SecondB_menu('wall', 3)\n\t\tself.scroll = Sprite(Images.menu_scroll, (1500//sc, 380//sc), scale = 1/sc)\n\t\tself.active = -1\n\t\n\tdef draw(self):\n\t\tif self.visible:\n\t\t\tw, h = director.get_window_size()\t\t\n\t\t\tsc = 1920//w\n\t\t\tself.scroll.draw()\n\t\t\tfor b_type in self.b_types:\n\t\t\t\tself.b_types[b_type].sprite.draw()\n\t\t\t\tif self.active == b_type:\n\t\t\t\t\tself.b_types[b_type].draw()\n\t\t\t\n\nclass SecondB_menu(Layer):\n\t\n\tdef __init__(self, name, number):\n\t\tsuper(SecondB_menu, self).__init__()\n\t\tw, h = director.get_window_size()\t\t\n\t\tsc = 1920//w\n\t\tself.name = name\n\t\tself.number = number\n\t\tself.sprite = Sprite(Images.building_menu[self.name], (1100//sc, (580 - self.number*B_Menu_size*1.1)//sc), scale = 1/sc)\n\t\tself.objects = Object_list[self.name]\n\t\n\tdef draw(self):\n\t\tw, h = director.get_window_size()\n\t\tsc = 1920//w\n\t\tc = Sprite(Images.frame_black, (1100//sc, (580 - self.number*B_Menu_size*1.1)//sc), scale = 1/sc)\n\t\tc.draw()\n\t\tfor object in range(len(self.objects)):\n\t\t\tdx, dy = self.get_coordinates(object)\n\t\t\tc = Sprite(Images.B_images[self.name][object],\n\t\t\t\t\t\t((1315 + dx)//sc, (505 + dy)//sc), scale = 1/sc)\n\t\t\tc.draw()\n\t\n\tdef get_coordinates(self, number):\n\t\tx = number%4\n\t\ty = number//4\n\t\tdx = x*(m_a+ m_b) \n\t\tdy = -y*(m_a+ m_b)\n\t\treturn(dx, dy)\n\t\t\nclass Magic():\n\tdef __init__(self, name, model):\n\t\tself.name = name\n\t\tself.model = model\n\tdef cast(self):\n\t\tspell = DM_Magic[self.name]\n\t\tspell.cast(self.model)\n\t\t\nclass Hero_portriat_DM():\n\tdef reload(self, model, hero_name):\n\t\tw, h = director.get_window_size()\n\t\tsc = 1920/w\n\t\tself.model = model\n\t\tself.name = hero_name\n\t\tif self.name == 'wizard':\n\t\t\tself.hero = self.model.heroes[self.name]\n\t\t\tself.sprite = Sprite(Images.hero_icons[self.name], (1600//sc, 900//sc), scale = 1/sc)\n\t\t\tself.sprite_black = Sprite(Images.hero_icons_black[self.name], (1600//sc, 900//sc), scale = 1/sc)\n\t\t\tself.label = Label('%d' %self.hero.stats.health, font_name='Times New Roman', font_size=20//sc, anchor_x='center', anchor_y='center', color = (255, 0, 0, 255) )\n\t\t\tself.label.position = 1600//sc, 870//sc\n\t\tif self.name == 'priest':\n\t\t\tself.hero = self.model.heroes[self.name]\n\t\t\tself.sprite = Sprite(Images.hero_icons[self.name], (1720//sc, 900//sc), scale = 1/sc)\n\t\t\tself.sprite_black = Sprite(Images.hero_icons_black[self.name], (1720//sc, 900//sc), scale = 1/sc)\n\t\t\tself.label = Label('%d' %self.hero.stats.health, font_name='Times New Roman', font_size=20//sc, anchor_x='center', anchor_y='center', color = (255, 0, 0, 255) )\n\t\t\tself.label.position = 1720//sc, 870//sc\n\t\tif self.name == 'warrior':\n\t\t\tself.hero = self.model.heroes[self.name]\n\t\t\tself.sprite = Sprite(Images.hero_icons[self.name], (1600//sc, 780//sc), scale = 1/sc)\n\t\t\tself.sprite_black = Sprite(Images.hero_icons_black[self.name], (1600//sc, 780//sc), scale = 1/sc)\n\t\t\tself.label = Label('%d' %self.hero.stats.health, font_name='Times New Roman', font_size=20//sc, anchor_x='center', anchor_y='center', color = (255, 0, 0, 255) )\n\t\t\tself.label.position = 1600//sc, 750//sc\n\t\tif self.name == 'rogue':\n\t\t\tself.hero = self.model.heroes[self.name]\n\t\t\tself.sprite = Sprite(Images.hero_icons[self.name], (1720//sc, 780//sc), scale = 1/sc)\n\t\t\tself.sprite_black = Sprite(Images.hero_icons_black[self.name], (1720//sc, 780//sc), scale = 1/sc)\n\t\t\tself.label = Label('%d' %self.hero.stats.health, font_name='Times New Roman', font_size=20//sc, anchor_x='center', anchor_y='center', color = (255, 0, 0, 255) )\n\t\t\tself.label.position = 1720//sc, 750//sc\n\tdef draw(self):\n\t\tif self.hero.alive:\n\t\t\tself.sprite.draw()\n\t\t\tself.label.font_name = '%d' %self.hero.stats.health\n\t\t\tself.label.draw()\n\t\telse:\n\t\t\tself.sprite_black.draw()\n\t\t\n\t\t\nclass Hero():\n\n\tdef __init__(self, name, number ,model, map_posx, map_posy):\n\t\tw, h = director.get_window_size()\n\t\tsc = 1920/w\n\t\tself.name = name\n\t\tself.number = number\n\t\tself.map_posx = map_posx\n\t\tself.map_posy = map_posy\n\t\tself.sprite = Sprite(Images.heroes[self.name], scale = 1/sc)\n\t\tself.sprite.position = (((2*self.map_posx + 1)*Tile_size/2 + left_space)//sc, ((2*self.map_posy + 1)*Tile_size/2 + (h*sc - Quad_side*Tile_size)/2)//sc)\n\t\tself.portrait = Portraits(self)\n\t\tself.icon = Icons(self)\n\t\t\n\t\tself.alive = 1\n\t\tself.techstats = Tech_Stats(self.name)\n\t\tself.stats = Stats(self.name)\n\t\tself.staff = []\n\t\tself.av_art = list(Artefacts)\n\t\tself.model = model\n\t\tself.turnav = self.techstats.speed\n\t\tself.stats.health = self.techstats.max_health\n\t\tself.skills = []\n\t\tfor s in Hero_skills[self.name]:\n\t\t\tself.skills.append(Skill(s[0], s[1], self, self.model))\n\t\n\tdef replace_hero(self, map_posx, map_posy):\n\t\tw, h = director.get_window_size()\n\t\tsc = 1920/w\n\t\tself.map_posx = map_posx\n\t\tself.map_posy = map_posy\n\t\tself.sprite.position = (((2*self.map_posx + 1)*Tile_size/2 + left_space)//sc, ((2*self.map_posy + 1)*Tile_size/2 + (h*sc - Quad_side*Tile_size)/2)//sc)\n\t\n\tdef on_turn(self, tile):\n\t\tself.turnav = self.turnav - 1\n\t\treplace_hero = 1\n\t\tif (tile.name == 'lava'):\n\t\t\tself.model.controler.damage_hero(self.name, lava_damage)\n\t\tif (tile.name == 'wall'):\n\t\t\treplace_hero = 0\n\t\tif (tile.monster):\n\t\t\tresult = tile.monster.monster.fight(self)\n\t\t\tif result == 'lose':\n\t\t\t\treplace_hero = 0\n\t\tif (tile.open_P == 0):\n\t\t\tself.stats.exp = self.stats.exp + self.techstats.exp_per_tile\n\t\t\tif self.stats.lvl < maxlvl:\n\t\t\t\tif (self.stats.exp >= ExpNeed[self.stats.lvl]):\n\t\t\t\t\tself.model.controler.lvlup_hero(self.name)\n\t\t\tif (tile.name == 'floor'):\t\n\t\t\t\tself.stats.luck = self.stats.luck + self.techstats.luck_per_tile\n\t\t\t\tif self.stats.luck >= 100:\n\t\t\t\t\tself.stats.luck = self.stats.luck - 100\n\t\t\t\t\tif (len(self.staff) < 5):\n\t\t\t\t\t\tself.stats.luck = 0\n\t\t\t\t\t\tself.model.on_artget(self)\n\t\t\t\t\t\t'''art_name = self.av_art[random.randint(0, len(self.av_art) - 1)]\n\t\t\t\t\t\tart = Artefact(art_name, len(self.staff))\n\t\t\t\t\t\tself.av_art.remove(art_name)\n\t\t\t\t\t\tself.staff.append(art)\n\t\t\t\t\t\tart.on_get(self)'''\n\t\tif replace_hero:\n\t\t\tself.replace_hero(tile.map_pos_x, tile.map_pos_y)\n\t\tif (tile.name == 'treasure'):\n\t\t\tself.model.on_youwin()\n\t\n\tdef draw(self):\n\t\tself.sprite.draw()\n\t\tself.icon.draw()\n\t\t\nclass Artefact():\n\tdef __init__(self, name, number):\n\t\tw, h = director.get_window_size()\n\t\tsc = 1920/w\n\t\tself.name = name\n\t\tself.number = number\n\t\tself.sprite = Sprite(Images.art_image[self.name], ((1562 + 75/2)//sc, (899 - art_pos[self.number])//sc), scale = 1/sc)\n\t\n\tdef on_get(self, hero):\n\t\tif (ArtEffects.get(self.name)):\n\t\t\tArtEffects[self.name](hero).effect()\n\t\n\tdef draw(self):\n\t\tself.sprite.draw()\n\t\t\nclass Skill():\n\tdef __init__(self, name, number, hero, model):\n\t\tw, h = director.get_window_size()\n\t\tsc = 1920/w\n\t\tself.name = name\n\t\tself.number = number\n\t\tself.sprite = Sprite(Images.skill_image[self.name], ((1208)//sc, (899 - skill_pos[self.number])//sc), scale = 1/sc)\n\t\tself.skill = Skills[self.name](hero , model)\n\t\n\tdef draw(self):\n\t\tw, h = director.get_window_size()\n\t\tsc = 1920/w\n\t\tif (self.skill.learnt):\n\t\t\tself.sprite.draw()\n\t\t\tif self.skill.cd_left:\n\t\t\t\tself.label = Label('%d' %self.skill.cd_left, font_name='Times New Roman', font_size=20//sc, anchor_x='center', anchor_y='center', color = (255, 0, 0, 255) )\n\t\t\t\tself.label.position = ((1208)//sc, (899 - skill_pos[self.number])//sc)\n\t\t\t\tself.label.draw()\n\t\n\tdef use(self):\n\t\tif self.skill.available:\n\t\t\tself.skill.use()\n\t\t\nclass Tech_Stats():\n\tdef __init__(self, hero_name):\n\t\tself.speed = Tech_stat[hero_name]['speed']\n\t\tself.max_health = Tech_stat[hero_name]['maxhp']\n\t\tself.exp_per_tile = exp_per_tile\n\t\tself.luck_per_tile = luck_per_tile\n\t\t\t\nclass Stats():\n\tdef __init__(self, hero_name):\n\t\tself.exp = 0\n\t\tself.int = 0\n\t\tself.health = 0\n\t\tself.lvl = 1\n\t\tself.luck = 0\n\t\tself.attack = Tech_stat[hero_name]['attack']\n\t\tself.armor = Tech_stat[hero_name]['armor']\n\t\tself.power = self.attack*2 + self.armor\n\nclass HeroStats(Label):\n\tdef __init__(self, hero):\n\t\tw, h = director.get_window_size()\n\t\tsc = 1920/w\n\t\tself.hero = hero\n\t\tself.health_label = Label('%d' %self.hero.stats.health, font_name='Times New Roman', font_size=28//sc, anchor_x='center', anchor_y='center', color = (255, 0, 0, 255) )\n\t\tself.health_label.position = 1300//sc, 315//sc\n\t\tself.exp_label = Label('%d' %self.hero.stats.exp, font_name='Times New Roman', font_size=28//sc, anchor_x='center', anchor_y='center', color = (255, 0, 0, 255) )\n\t\tself.exp_label.position = 1300//sc, 155//sc\n\t\tself.luck_label = Label('%d%%' %self.hero.stats.luck, font_name='Times New Roman', font_size=28//sc, anchor_x='center', anchor_y='center', color = (255, 0, 0, 255) )\n\t\tself.luck_label.position = 1300//sc, 245//sc\n\t\tself.lvl_label = Label('%d' %self.hero.stats.lvl, font_name='Times New Roman', font_size=18//sc, anchor_x='center', anchor_y='center', color = (0, 0, 0, 255) )\n\t\tself.lvl_label.position = 1618//sc, 112//sc\n\t\tself.attack_label = Label('%d' %self.hero.stats.attack, font_name='Times New Roman', font_size=28//sc, anchor_x='center', anchor_y='center', color = (255, 0, 0, 255) )\n\t\tself.attack_label.position = 1550//sc, 315//sc\n\t\tself.armor_label = Label('%d' %self.hero.stats.armor, font_name='Times New Roman', font_size=28//sc, anchor_x='center', anchor_y='center', color = (255, 0, 0, 255))\n\t\tself.armor_label.position = 1550//sc, 245//sc\n\n\tdef draw(self):\n\t\tself.health_label.draw()\n\t\tself.lvl_label.draw()\n\t\tself.exp_label.draw()\n\t\tself.luck_label.draw()\n\t\tself.attack_label.draw()\n\t\tself.armor_label.draw()\n\t\t\nclass Portraits():\n\n\tdef __init__(self, hero):\n\t\tself.hero = hero\n\t\n\tdef draw(self):\n\t\tw, h = director.get_window_size()\n\t\tsc = 1920/w\n\t\tc = Sprite(Images.portraits[self.hero.name], (1400//sc, 480//sc), scale = 1/sc)\n\t\tc.draw()\n\t\tc = HeroStats(self.hero)\n\t\tc.draw()\n\t\tfor art in self.hero.staff:\n\t\t\tart.draw()\n\t\tfor skill in self.hero.skills:\n\t\t\tskill.draw()\n\t\nclass Icons():\n\t\n\tdef __init__(self, hero):\n\t\tself.hero = hero\n\t\tif self.hero.name == 'wizard':\n\t\t\tself.number = 0\n\t\tif self.hero.name == 'priest':\n\t\t\tself.number = 1\n\t\tif self.hero.name == 'warrior':\n\t\t\tself.number = 2\t\n\t\tif self.hero.name == 'rogue':\n\t\t\tself.number = 3\n\t\t\t\n\tdef draw(self):\n\t\tw, h = director.get_window_size()\n\t\tsc = 1920/w\n\t\tif (self.hero.turnav > 0):\n\t\t\tc = Sprite(Images.hero_icons[self.hero.name], ((1235 + Icon_size*self.number*1.1)//sc, 960//sc), scale = 1/sc)\n\t\telse:\n\t\t\tc = Sprite(Images.hero_icons_black[self.hero.name], ((1235 + Icon_size*self.number*1.1)//sc, 960//sc), scale = 1/sc)\n\t\tc.draw()\n\t\t\nclass Monster():\n\tdef __init__(self, model, tile, m_name):\n\t\tw, h = director.get_window_size()\t\t\n\t\tsc = 1920//w\n\t\tself.name = m_name\n\t\tself.tile = tile\n\t\tself.monster = DM_Monsters[self.name](model, tile)\n\t\t\nclass Art_Menu():\n\tdef __init__(self, model, hero):\n\t\tw, h = director.get_window_size()\t\t\n\t\tsc = 1920//w\n\t\tself.hero = hero\n\t\tself.model = model\n\t\tself.arts = Art_menu[hero.name][hero.stats.lvl]\n\t\tself.art_sprites = {}\n\t\tfor number in range(len(self.arts)):\t\n\t\t\tself.art_sprites[number] = Sprite(Images.art_image[self.arts[number]], \n\t\t\t\t\t\t\t\t\t(art_menu_pos_x[number]//sc, art_menu_pos_y[number]//sc), scale = 2/sc)\n\t\t\n\tdef draw(self):\n\t\tfor art in self.art_sprites:\n\t\t\tif (self.hero.av_art.count(self.arts[art])):\n\t\t\t\tself.art_sprites[art].draw()\n\t\t\nGameModel.register_event_type('on_game_over')\nGameModel.register_event_type('on_you_win')\nGameModel.register_event_type('on_art_get')","sub_path":"gamemodel.py","file_name":"gamemodel.py","file_ext":"py","file_size_in_byte":15402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"639385233","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 23 00:14:17 2018\r\n\r\n@author: csr20\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport cv2\r\nimport matplotlib.pyplot as pt\r\nimport pandas as pd\r\nfrom sklearn.neural_network import MLPClassifier\r\n\r\ndata_train = pd.read_csv(\"C:/Users/Subiyyal/Desktop/Machine Learning/digits/train.csv\").as_matrix()\r\ndata_test = pd.read_csv(\"C:/Users/Subiyyal/Desktop/Machine Learning/digits/test.csv\").as_matrix()\r\n\r\n#print(data_test)\r\nclf = MLPClassifier()\r\n\r\n#training dataset\r\nxtrain = data_train[0:42000,1:]\r\ntrain_label = data_train[0:42000, 0]\r\n\r\nclf.fit(xtrain, train_label)\r\n\r\n#testing data\r\nxtest = data_test[0:28000, 0:]\r\nactual_label = data_train[0:42000, 0]\r\n\r\n#\r\nd = xtest[20]\r\nd.shape = (28,28)\r\npt.imshow(255-d, cmap = 'gray')\r\nprint(clf.predict([xtest[20]]))\r\npt.show\r\n\r\n#p = clf.predict(xtest)\r\n#\r\n#count = 0\r\n#for i in range(0,28000):\r\n# count+=1 if p[i] == actual_label[i] else 0\r\n##print(\"accuracy = \", (count/28000)*100)\r\n#\r\n#print(\"accuracy = \", count)","sub_path":"Hamza-MLPClassifier.py","file_name":"Hamza-MLPClassifier.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"652776036","text":"def balancedBrackets(s) :\n\topeningBrackets = \"{([\"\n\tclosingBrackets = \"})]\"\n\tmatchingBrackets = { \"}\":\"{\", \"]\":\"[\", \")\":\"(\" }\n\t\n\tstack = []\n\tfor char in s:\n\t\tif char in closingBrackets :\n\t\t\tif len(stack) == 0 or stack.pop() != matchingBrackets[char] :\n\t\t\t\treturn False\n\t\telif char in openingBrackets :\n\t\t\tstack.append(char)\n\n\treturn len(stack) == 0\n\nstring = \"{[]}\"\nprint(balancedBrackets(string))\t\n\t\n\t","sub_path":"python/balancedBrackets.py","file_name":"balancedBrackets.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"454295750","text":"import numpy as np\nimport sys, os\nimport argparse\nfrom openrec import ModelTrainer\nfrom openrec.recommenders import PMF # this openrec implementation shall be referred to as WRMF to avoid confusion\nfrom openrec.utils import Dataset\nfrom openrec.utils.evaluators import AUC \nfrom openrec.utils.samplers import StratifiedPointwiseSampler, EvaluationSampler\nfrom negative_pointwise_sampler import NegativePointwiseSampler\nfrom dataloader import *\n\n\n### training parameter ###\ntotal_iter = 10000 # iterations for training \nbatch_size = 1000 # training batch size\neval_iter = 1000 # iteration of evaluation\nsave_iter = eval_iter # iteration of saving model\n\n### embeding ### \ndim_user_embed = 100 # dimension of user embedding\ndim_item_embed = 100 # dimension of item embedding\n\n\ndef exp(dataset, l2_reg, pos_ratio, neg_ratio, eval_explicit, save_log, eval_rank):\n \n if neg_ratio is not None:\n if pos_ratio + neg_ratio > 1.0 or pos_ratio + neg_ratio <= 0.0:\n print (\"Invalid sampling ratios...\")\n return\n \n if dataset == 'spotify':\n data = loadSpotify()\n \n elif dataset == 'bytedance':\n data = loadByteDance()\n \n else:\n print (\"Unsupported dataset...\")\n return \n \n # save logging and model\n log_dir = \"validation_logs/{}_{}_{}_{}_{}_{}/\".format(dataset, l2_reg, pos_ratio, neg_ratio, eval_explicit, eval_rank)\n os.popen(\"mkdir -p %s\" % log_dir).read()\n if save_log:\n log = open(log_dir + \"validation.log\", \"w\")\n sys.stdout = log\n \n \n # prepare train, val, test sets and samplers\n train_dataset = Dataset(data['train'], data['total_users'], data['total_items'], name='Train') \n if neg_ratio is None:\n train_sampler = StratifiedPointwiseSampler(batch_size=batch_size, \n dataset=train_dataset, \n pos_ratio=pos_ratio, \n num_process=5)\n else:\n train_sampler = NegativePointwiseSampler(batch_size=batch_size, \n dataset=train_dataset, \n pos_ratio=pos_ratio, \n neg_ratio=neg_ratio, \n num_process=5)\n if neg_ratio > 0.0:\n print (\"Re-weighting implicit negative feedback\")\n else:\n print (\"Corrected negative feedback labels but not re-weighting\")\n \n \n eval_num_neg = None if eval_explicit else 500 # num of negative samples for evaluation\n if eval_rank:\n # show evaluation metrics for click-complete and click-skip items separately\n pos_dataset = Dataset(data['pos_test'], data['total_users'], data['total_items'], \n implicit_negative=not eval_explicit, name='Pos_Test', num_negatives=eval_num_neg)\n neg_dataset = Dataset(data['neg_test'], data['total_users'], data['total_items'], \n implicit_negative=not eval_explicit, name='Neg_Test', num_negatives=eval_num_neg)\n pos_sampler = EvaluationSampler(batch_size=batch_size, dataset=pos_dataset)\n neg_sampler = EvaluationSampler(batch_size=batch_size, dataset=neg_dataset)\n eval_samplers = [pos_sampler, neg_sampler]\n else:\n val_dataset = Dataset(data['val'], data['total_users'], data['total_items'], \n implicit_negative=not eval_explicit, name='Val', num_negatives=eval_num_neg)\n test_dataset = Dataset(data['test'], data['total_users'], data['total_items'], \n implicit_negative=not eval_explicit, name='Test', num_negatives=eval_num_neg)\n val_sampler = EvaluationSampler(batch_size=batch_size, dataset=val_dataset)\n test_sampler = EvaluationSampler(batch_size=batch_size, dataset=test_dataset)\n eval_samplers = [val_sampler, test_sampler]\n \n # set evaluators\n auc_evaluator = AUC()\n evaluators = [auc_evaluator]\n \n \n # set model parameters\n model = PMF(l2_reg=l2_reg, \n batch_size=batch_size, \n total_users=train_dataset.total_users(), \n total_items=train_dataset.total_items(), \n dim_user_embed=dim_user_embed, \n dim_item_embed=dim_item_embed, \n save_model_dir=log_dir, \n train=True, \n serve=True)\n \n \n # set model trainer\n model_trainer = ModelTrainer(model=model) \n model_trainer.train(total_iter=total_iter, \n eval_iter=eval_iter, \n save_iter=save_iter, \n train_sampler=train_sampler, \n eval_samplers=eval_samplers, \n evaluators=evaluators)\n\n \n \nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser(description='Parse parameters')\n parser.add_argument('--dataset', type=str, default='bytedance', help='dataset to use')\n parser.add_argument('--l2_reg', type=float, default=0.01, help='l2 regularization of latent factor')\n parser.add_argument('--pos_ratio', type=float, default=0.5, help='pos ratio of sampling')\n parser.add_argument('--neg_ratio', type=float, default=None, help='negative ratio of sampling')\n parser.add_argument('--eval_explicit', action='store_true', help='turn on to use labels to evaluate, by default treat click as positive and non-click as negative')\n parser.add_argument('--eval_rank', action='store_true', help='show ranking accuracy for pos and neg samples')\n parser.add_argument('--log', action='store_true', help='turn on for logging results to file, by default will print on screen')\n args = parser.parse_args()\n print (args)\n \n # run experiments\n exp(dataset=args.dataset, l2_reg=args.l2_reg, pos_ratio=args.pos_ratio, neg_ratio=args.neg_ratio, eval_explicit=args.eval_explicit, save_log=args.log, eval_rank=args.eval_rank)\n\n\n \n","sub_path":"wrmf_postclick.py","file_name":"wrmf_postclick.py","file_ext":"py","file_size_in_byte":6099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"73243226","text":"from datetime import date\nfrom sqlalch.forms import Book, User, UserBook\nfrom sqlalch.db_conn import engine\nfrom sqlalchemy.orm import sessionmaker\n\nSession = sessionmaker(bind=engine)\nsession = Session()\nnew_book = Book.add_book(1, \"For whom the Bell Tolls\", \"Ernest\",\n\"Hemingway\",\n\"https://briefly.ru/heminguej/po_kom_zvonit_kolokol/\")\nsession.commit()\nsession = Session()\nnew_book_2 = Book.add_book(2, \"Hotel\", \"Arthur\",\n\"Heiley\",\n\"https://www.goodreads.com/book/show/124920.Hotel\")\nsession.commit()","sub_path":"Kostya Zinchuk/workshop5/source/python/sqlalch/populate.py","file_name":"populate.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"649965185","text":"import board\nimport digitalio\n#from digitalio import DigitalInOut, Direction\nimport time\nimport busio\nimport adafruit_bno055\n\n# Bluetooth\nuart = busio.UART(board.TX, board.RX, baudrate=9600)\n\n# Gyro\ni2c = busio.I2C(board.SCL, board.SDA)\nsensor = adafruit_bno055.BNO055(i2c)\n\n# LED\nled = digitalio.DigitalInOut(board.D13)\nled.direction = digitalio.Direction.OUTPUT\n\n# Button\nbutton = digitalio.DigitalInOut(board.D6)\nbutton.direction = digitalio.Direction.INPUT\n\nfor i in range(3):\n led.value = True\n time.sleep(0.1)\n led.value = False\n time.sleep(0.1) \nprint(\"Setup done!\\n\")\n\n# Main loop runs forever printing acceleration and Euler angles every second.\nwhile True:\n if button.value:\n txString = \"bp+\"+str(sensor.euler)+\"+\"+str(sensor.quaternion)+\"+\"+str(sensor.gyroscope)+\"@\"\n #print('Accelerometer (m/s^2): {}'.format(sensor.accelerometer))\n #print('Euler angle: {}'.format(sensor.euler))\n #print('Quaterion: {}'.format(sensor.quaternion))\n #print('Qyroscope: {}'.format(sensor.gyroscope))\n #print(\"- \"*20)\n #print(txString)\n #x = input(\"Message to send\\n\")\n print(uart.write(txString))\n time.sleep(0.1)\n \n \n txString = \"sp+\"+str(sensor.euler)+\"+\"+str(sensor.quaternion)+\"+\"+str(sensor.gyroscope)+\"@\" \n uart.write(txString) \n \n #data = None\n data = uart.read(2) # read up to 32 bytes\n if data:\n datastr = ''.join([chr(b) for b in data])# convert bytearray to string\n if datastr == \"r@\":\n txString = \"bp+\"+str(sensor.euler)+\"+\"+str(sensor.quaternion)+\"+\"+str(sensor.gyroscope)+\"@\" \n uart.write(txString)\n else:\n print(datastr)\n ","sub_path":"demo/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"245013593","text":"#https://techtutorialsx.com/2017/04/23/python-subscribing-to-mqtt-topic/\n\nimport paho.mqtt.client as mqtt\nimport time\nfrom datetime import datetime\n\ndef on_connect(client, userdata, flags, rc):\n global connected\n if rc == 0:\n # print(\"Connected to broker\")\n connected = True\n else:\n print(\"Connection failed\")\n\ndef on_message(client, userdata, msg):\n print(\"Message received: \" + msg.payload.decode(\"utf-8\") + \" (\" + str(datetime.now()) + \")\")\n #print(\"Message topic: \" + msg.topic)\n #print(\"Message qos: \" + msg.qos)\n #print(\"Message retain flag=\" + msg.retain)\n with open(\"values.csv\", \"a\") as file:\n file.write(msg.payload.decode(\"utf-8\") + \",\" + str(datetime.now()) + \"\\n\")\n\nbroker = \"192.168.178.62\"\nport = 1883\ntopic = \"temperature\"\n\nprint(\"Creating new instance\")\nclient = mqtt.Client(\"P1\")\nclient.on_message = on_message\nclient.on_connect = on_connect\n\nprint(\"Connecting to broker\")\nclient.connect(broker, port=port)\n\nclient.loop_start()\n\nclient.subscribe(topic, qos = 0)\ntime.sleep(1)\nclient.publish(topic, \"value\")\ntime.sleep(1)\n\ntry:\n print(\"Waiting for messages\")\n while True:\n time.sleep(0.1)\n\nexcept KeyboardInterrupt:\n print(\"Exiting\")\n client.loop_stop()\n client.disconnect()\n","sub_path":"weather_station/Visualization/mqtt_subscribe.py","file_name":"mqtt_subscribe.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"192734540","text":"from django.core.management.base import BaseCommand\nfrom movie.management.commands import crawlingMixin\nfrom datetime import datetime\nimport time\nimport requests\nimport os\nfrom bs4 import BeautifulSoup\nfrom movie.models import BoxofficeRank\n\n\ndef get_boxoffice_moviename():\n today = datetime.now().strftime(\"%Y%m%d\")\n boxoffice_url = \"http://movie.daum.net/boxoffice/weekly?\" \\\n \"startDate={date}\".format(date=today)\n\n r = requests.get(boxoffice_url)\n bs = BeautifulSoup(r.text, \"html.parser\")\n\n movie_list = bs.select(\".tit_join > a\")\n\n movie_names = [\n movie.text\n for movie in movie_list\n ]\n\n return movie_names\n\n\ndef create_boxoffice(rank, movie):\n BoxofficeRank.objects.create(rank=rank, movie=movie)\n\n\ndef init_boxoffice():\n movie_arr = get_boxoffice_moviename()\n boxiffice_list = []\n\n for movie in movie_arr:\n time.sleep(1)\n boxiffice_list.append(crawlingMixin.insert_db(movie))\n\n BoxofficeRank.objects.all().delete()\n for rank, movie in enumerate(boxiffice_list):\n create_boxoffice(rank=rank+1, movie=movie)\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n init_boxoffice()\n","sub_path":"django_app/movie/management/commands/get_boxoffice.py","file_name":"get_boxoffice.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"552247726","text":"from mint.signal import Signal\nfrom mint import signal\nfrom mint import utils\nimport mint\n\nclass Entity(object):\n\n def __init__(self, name=None):\n if not name:\n name = str(id(self))\n self.name = name\n self.ports = []\n # add to network\n self.network = mint.network\n self.network.add(self)\n\n def __repr__(self):\n return '<{}:{}>'.format(self.__class__.__name__, self.name)\n\n def install(self, *ports):\n self.ports.extend(ports)\n for port in ports:\n port.attach_to(self)\n # notify ports installation to network\n self.network.add(*ports)\n\nclass Port(object):\n\n def __init__(self, rate=1):\n self.entity = None\n self.peer = None\n self.rate = rate\n self.obuffer = ''\n self.ibuffer = ''\n self.bit_arrive = Signal()\n #self.bit_sent = ''\n self.bits_received = ''\n\n def __repr__(self):\n try:\n peer_entity = self.peer and self.peer.entity or None\n s = '<{} of {}>'.format(\n self.__class__.__name__,\n self.entity)\n #s = '<{} of {:} to {:}>\\n'.format(\n # self.__class__.__name__,\n # self.entity,\n # peer_entity)\n #s += '\\ti:{:20} o:{:20}'.format(\n # self.ibuffer,\n # self.obuffer,\n # )\n return s\n except Exception:\n return super(Port, self).__repr__()\n\n def attach_to(self, entity):\n self.entity = entity\n\n def fuse_with(self, port):\n self.peer = port\n if port.peer != self:\n port.fuse_with(self)\n\n def put(self, data, is_bits=False):\n if not is_bits:\n data = utils.bitify(data)\n self.obuffer += data\n\n def get(self, nbits=None):\n if not nbits:\n nbits = len(self.ibuffer)\n ret, self.ibuffer = utils.split_at(self.ibuffer, nbits)\n return ret\n\n def handoff(self):\n if not self.peer:\n return\n sent, self.obuffer = utils.split_at(self.obuffer,\n min(self.rate, self.peer.rate))\n self.peer.ibuffer += sent\n self.peer.bits_received += sent\n # TODO\n # this ought to be implemented like this:\n # buffer as a descriptor\n # when altered, emit the signal\n # so bit_arrive then is no need to be emitted here\n # writting the ugly self.peer... etc.\n self.peer.bit_arrive(self.peer)\n\n def proc(self, env):\n while True:\n self.handoff()\n yield env.timeout(1)\n\nclass Endpoint(Entity):\n\n def __init__(self, name=None):\n super(Endpoint, self).__init__(name)\n self.install(Port())\n self.port = self.ports[0]\n\nclass Intersection(Entity):\n\n def __init__(self, name=None, nports=3):\n super(Intersection, self).__init__(name)\n self.install(*[Port() for _ in range(nports)])\n\nclass Link(Entity):\n\n def __init__(self, port1=None, port2=None, name=None):\n super(Link, self).__init__(name)\n self.install(Port(), Port())\n if isinstance(port1, Endpoint):\n port1 = port1.port\n if isinstance(port2, Endpoint):\n port2 = port2.port\n self.connect(port1, port2)\n self.latency = 3\n\n def connect(self, port1, port2):\n self.ports[0].fuse_with(port1)\n self.ports[1].fuse_with(port2)\n\n def get_bit(self, port):\n if port.ibuffer:\n self.pipes[port] += port.ibuffer\n port.ibuffer = ''\n else:\n self.pipes[port] += self.noise_bit()\n bit, self.pipes[port] = utils.split_at(self.pipes[port], 1)\n return bit\n\n def transfer(self, port1, port2):\n bit = self.get_bit(port1)\n port2.put(bit, True)\n\n def proc(self, env):\n self.pipes = dict(zip(self.ports, [self.noise_bit() * self.latency] * 2))\n while True:\n self.transfer(*self.ports)\n self.transfer(*reversed(self.ports))\n yield env.timeout(1)\n\n def noise_bit(self):\n return '0'\n","sub_path":"mint/_versions/20151101213853 layer2 framer/mint/components.py","file_name":"components.py","file_ext":"py","file_size_in_byte":4163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"23253284","text":"import unittest\nfrom mantid.simpleapi import *\nfrom mantid.api import MatrixWorkspace, WorkspaceGroup, ITableWorkspace\n\n\nclass IqtFitMultipleTest(unittest.TestCase):\n\n _iqt_ws = None\n _function = r'name=LinearBackground,A0=0.027668,A1=0,ties=(A1=0);name=UserFunction,Formula=Intensity*exp(-(x/Tau)^Beta),Intensity=0.972332,Tau=0.0247558,Beta=1;ties=(f1.Intensity=1-f0.A0)'\n\n def setUp(self):\n self._iqt_ws = Load(Filename='iris26176_graphite002_iqt.nxs',\n OutputWorkspace='iris26176_graphite002_iqt')\n\n#-----------------------------------Validation of result-------------------------------------\n\n def _validate_output(self, params, result, fit_group):\n self.assertTrue(isinstance(params, ITableWorkspace))\n self.assertTrue(isinstance(result, MatrixWorkspace))\n self.assertTrue(isinstance(fit_group, WorkspaceGroup))\n\n self._validate_table_shape(params)\n self._validate_matrix_shape(result)\n self._validate_group_shape(fit_group)\n\n self._validate_table_values(params)\n self._validate_matrix_values(result)\n self._validate_group_values(fit_group)\n\n self._validate_sample_log_values(result)\n self._validate_sample_log_values(fit_group.getItem(0))\n\n\n def _validate_table_shape(self, tableWS):\n # Check length of rows and columns\n rows = tableWS.rowCount()\n columns = tableWS.columnCount()\n self.assertEquals(rows, 17)\n self.assertEquals(columns, 11)\n\n # Check some column names\n column_names = tableWS.getColumnNames()\n self.assertEquals('axis-1', column_names[0])\n self.assertEquals('f0.A0', column_names[1])\n self.assertEquals('f0.A0_Err', column_names[2])\n\n def _validate_matrix_shape(self, matrixWS):\n # Check no. bins and no. hists\n nbins = matrixWS.blocksize()\n nhists = matrixWS.getNumberHistograms()\n self.assertEquals(nbins, 17)\n self.assertEquals(nhists, 4)\n\n # Check histogram names\n text_axis = matrixWS.getAxis(1)\n self.assertTrue(text_axis.isText())\n self.assertEquals('f0.A0',text_axis.label(0))\n self.assertEquals('f1.Intensity',text_axis.label(1))\n self.assertEquals('f1.Tau',text_axis.label(2))\n self.assertEquals('f1.Beta',text_axis.label(3))\n\n # Check bin units\n self.assertEquals('MomentumTransfer', matrixWS.getAxis(0).getUnit().unitID())\n\n def _validate_group_shape(self, groupWS):\n # Check number of workspaces and size\n nitems = groupWS.getNumberOfEntries()\n self.assertEquals(nitems, 17)\n sub_ws = groupWS.getItem(0)\n nbins = sub_ws.blocksize()\n nhists = sub_ws.getNumberHistograms()\n self.assertEquals(nbins, 49)\n self.assertEquals(nhists, 3)\n\n # Check histogram names\n text_axis = sub_ws.getAxis(1)\n self.assertTrue(text_axis.isText())\n self.assertEquals('Data',text_axis.label(0))\n self.assertEquals('Calc',text_axis.label(1))\n self.assertEquals('Diff',text_axis.label(2))\n\n # Check bin units\n self.assertEquals('ns', str(sub_ws.getAxis(0).getUnit().symbol()))\n\n\n def _validate_table_values(self, tableWS):\n # Check column data\n column = tableWS.column(0)\n self.assertEquals(round(column[0], 6), 0.483619)\n self.assertEquals(round(column[1], 6), 0.607871)\n self.assertEquals(round(column[-1], 5), 1.84519)\n\n # Check row data\n row = tableWS.row(0)\n self.assertEquals(round(row['axis-1'], 6), 0.483619)\n self.assertEquals(round(row['f1.Intensity'], 6), 0.979517)\n self.assertEquals(round(row['f1.Tau'], 6), 0.024672)\n\n def _validate_matrix_values(self, matrixWS):\n # Check f0.A0\n a0 = matrixWS.readY(0)\n self.assertEquals(round(a0[0], 7), 0.0204827)\n self.assertEquals(round(a0[-1],7), 0.0229125)\n\n # Check f1.Intensity\n intensity = matrixWS.readY(1)\n self.assertEquals(round(intensity[0], 6), 0.979517)\n self.assertEquals(round(intensity[-1],6), 0.977088)\n\n # Check f1.Tau\n tau = matrixWS.readY(2)\n self.assertEquals(round(tau[0], 6), 0.024672)\n self.assertEquals(round(tau[-1],8), 0.00253487)\n\n # Check f1.Beta\n beta = matrixWS.readY(3)\n self.assertEquals(round(beta[0], 6), 0.781177)\n self.assertEquals(round(beta[-1],6), 0.781177)\n\n def _validate_group_values(self, groupWS):\n sub_ws = groupWS.getItem(0)\n # Check Data\n data = sub_ws.readY(0)\n self.assertEquals(round(data[0], 5), 1)\n self.assertEquals(round(data[-1],7),0.0450769)\n # Check Calc\n calc = sub_ws.readY(1)\n self.assertEquals(round(calc[0], 5), 1)\n self.assertEquals(round(calc[-1],6),0.026465)\n # Check Diff\n diff = sub_ws.readY(2)\n self.assertEquals(round(diff[0], 19), -5.31797e-14)\n self.assertEquals(round(diff[-1],6), 0.018612)\n\n def _validate_sample_log_values(self, matrixWS):\n run = matrixWS.getRun()\n # Check additionally added logs\n self.assertEqual(run.getProperty('fit_type').value, '1S')\n self.assertEqual(run.getProperty('intensities_constrained').value, 'True')\n self.assertEqual(run.getProperty('beta_constrained').value, 'True')\n self.assertEqual(run.getProperty('end_x').value, 0.2)\n self.assertEqual(run.getProperty('start_x').value, 0.0)\n\n # Check copied logs from input\n self.assertEqual(run.getProperty('current_period').value, 1)\n self.assertEqual(run.getProperty('iqt_resolution_workspace').value, 'iris26173_graphite002_res')\n self.assertEqual(run.getProperty('iqt_sample_workspace').value, 'iris26176_graphite002_red')\n\n\n#---------------------------------------Success cases--------------------------------------\n\n def test_basic(self):\n \"\"\"\n Tests a basic run of IqtfitMultiple.\n \"\"\"\n result, params, fit_group = IqtFitMultiple(InputWorkspace=self._iqt_ws,\n Function=self._function,\n FitType='1S_s',\n StartX=0,\n EndX=0.2,\n SpecMin=0,\n SpecMax=16,\n ConstrainIntensities=True)\n self._validate_output(params, result, fit_group)\n\n#----------------------------------------Failure cases-------------------------------------\n\n def test_minimum_spectra_number_less_than_0(self):\n self.assertRaises(ValueError, IqtFitMultiple,\n InputWorkspace=self._iqt_ws,\n Function=self._function,\n FitType='1S_s',\n EndX=0.2,\n SpecMin=-1,\n SpecMax=16,\n OutputResultWorkspace='result',\n OutputParameterWorkspace='table',\n OutputWorkspaceGroup='fit_group')\n\n def test_maximum_spectra_more_than_workspace_spectra(self):\n self.assertRaises(RuntimeError, IqtFitMultiple, InputWorkspace=self._iqt_ws,\n Function=self._function,\n FitType='1S_s',\n EndX=0.2,\n SpecMin=0,\n SpecMax=20,\n OutputResultWorkspace='result',\n OutputParameterWorkspace='table',\n OutputWorkspaceGroup='fit_group')\n\n def test_minimum_spectra_more_than_maximum_spectra(self):\n self.assertRaises(RuntimeError, IqtFitMultiple, InputWorkspace=self._iqt_ws,\n Function=self._function,\n FitType='1S_s',\n EndX=0.2,\n SpecMin=10,\n SpecMax=5,\n OutputResultWorkspace='result',\n OutputParameterWorkspace='table',\n OutputWorkspaceGroup='fit_group')\n\n def test_minimum_x_less_than_0(self):\n self.assertRaises(ValueError, IqtFitMultiple, InputWorkspace=self._iqt_ws,\n Function=self._function,\n FitType='1S_s',\n StartX=-0.2,\n EndX=0.2,\n SpecMin=0,\n SpecMax=16,\n OutputResultWorkspace='result',\n OutputParameterWorkspace='table',\n OutputWorkspaceGroup='fit_group')\n\n def test_maximum_x_more_than_workspace_max_x(self):\n self.assertRaises(RuntimeError, IqtFitMultiple, InputWorkspace=self._iqt_ws,\n Function=self._function,\n FitType='1S_s',\n StartX=0,\n EndX=0.4,\n SpecMin=0,\n SpecMax=16,\n OutputResultWorkspace='result',\n OutputParameterWorkspace='table',\n OutputWorkspaceGroup='fit_group')\n\n def test_minimum_spectra_more_than_maximum_spectra(self):\n self.assertRaises(RuntimeError, IqtFitMultiple, InputWorkspace=self._iqt_ws,\n Function=self._function,\n FitType='1S_s',\n StartX=0.2,\n EndX=0.1,\n SpecMin=0,\n SpecMax=16,\n OutputResultWorkspace='result',\n OutputParameterWorkspace='table',\n OutputWorkspaceGroup='fit_group')\n\n\nif __name__==\"__main__\":\n unittest.main()\n","sub_path":"Framework/PythonInterface/test/python/plugins/algorithms/IqtFitMultipleTest.py","file_name":"IqtFitMultipleTest.py","file_ext":"py","file_size_in_byte":10039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"89286794","text":"#!/usr/bin/env python\n'''\nHola Mundo! [Python]\n---------------------------\nAutor: Inove Coding School\nVersion: 1.3\n\nDescripcion:\nPrograma creado para ensayar el correcto funcionamiento\ndel entorno de instalación Python\n'''\n\n__author__ = \"Alejandro\"\n__email__ = \"Alejandrocesarv@gmail.com\"\n__version__ = \"Clase numero 1\"\n\nprint(\"Hola Mundo!\")\n","sub_path":"hola_mundo.py","file_name":"hola_mundo.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"455737860","text":"from context_task import TaskType,TaskContext\n#from interface_data import DataLoadFactory\n#from interface_transformation import DataTransformationFactory\n\nconfigs = {'operation': TaskType.LOAD,\n \"source\": \"csv\",\n \"params\": {\"path\": \"path/to/csv\"}\n }\n\ndataload_context = TaskContext()\ndata_load_factory = dataload_context.get_context(configs)\nprint(type(data_load_factory))\n\n#v1","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"274501439","text":"from typing import Iterator, Tuple\n\nfrom amortization.amount import calculate_amortization_amount\n\n\ndef amortization_schedule(\n principal: float, interest_rate: float, period: int\n) -> Iterator[Tuple[int, float, float, float, float]]:\n \"\"\"\n Generates amortization schedule\n\n :param principal: Principal amount\n :param interest_rate: Interest rate per period\n :param period: Total number of periods\n :return: Rows containing period, interest, principal, balance, etc\n \"\"\"\n amortization_amount = calculate_amortization_amount(principal, interest_rate, period)\n number = 1\n balance = principal\n while number <= period:\n interest = balance * interest_rate\n principal = amortization_amount - interest\n balance -= principal\n yield number, amortization_amount, interest, principal, balance if balance > 0 else 0\n number += 1\n","sub_path":"amortization/schedule.py","file_name":"schedule.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"324590256","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport numpy as np\n\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\n# define inputs/outputs\nx = tf.placeholder(tf.float32, [None, 784])\n\ndef conv2d(x, y):\n return tf.nn.conv2d(x, y, strides=[1, 1, 1, 1], padding='SAME')\n\ndef max_pool_2x2(x):\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\ndef read_file(file_name):\n file = open(file_name, \"rb\")\n return tf.constant(np.load(file))\n\nx_image = tf.reshape(x, [-1, 28, 28, 1])\n\nW_conv1 = read_file(\"W_conv1.npy\")\nb_conv1 = read_file(\"b_conv1.npy\")\n\nh1_conv = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\nh1_pool = max_pool_2x2(h1_conv)\n\nW_conv2 = read_file(\"W_conv2.npy\")\nb_conv2 = read_file(\"b_conv2.npy\")\n\nh2_conv = tf.nn.relu(conv2d(h1_pool, W_conv2) + b_conv2)\nh2_pool = max_pool_2x2(h2_conv)\n\nh2_pool_flat = tf.reshape(h2_pool, [-1, 7*7*64])\n\nW_fc1 = read_file(\"W_fc1.npy\")\nb_fc1 = read_file(\"b_fc1.npy\")\n\nh1_fc = tf.nn.relu(tf.matmul(h2_pool_flat, W_fc1) + b_fc1)\n\nkeep_prob = tf.placeholder(tf.float32)\n\nh1_fc_drop = tf.nn.dropout(h1_fc, keep_prob)\n\nW_fc2 = read_file(\"W_fc2.npy\")\nb_fc2 = read_file(\"b_fc2.npy\")\n\ny_conv = tf.matmul(h1_fc_drop, W_fc2) + b_fc2\n\ny_ = tf.placeholder(tf.float32, [None, 10])\n\ncross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y_conv, labels=y_))\n\nsess = tf.InteractiveSession()\n\ncorrect_predictions = tf.equal(tf.argmax(y_, 1), tf.argmax(y_conv, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))\n\n# evaluate the mean accuracy of the test set by evaluating 200 test examples 200 times\ntotal_sum = 0\nfor i in range(200):\n batch = mnist.train.next_batch(200)\n temp_accuracy = sess.run(accuracy, feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})\n total_sum += temp_accuracy\n\ntest_accuracy = total_sum / 200\nprint(\"test accuracy is %f\"%test_accuracy)\n","sub_path":"MNIST/mnist_cnn_predict.py","file_name":"mnist_cnn_predict.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"246679115","text":"import json\nimport seaborn as sns\nsns.set()\nimport matplotlib.pyplot as plt\n\nimport pandas as pd\nimport numpy as np\n\nimport os\nfrom os.path import expanduser\n\nclass ExperimentConfiguration:\n def __init__(self, **kwargs):\n self.data = kwargs\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n def __str__(self):\n return '-'.join(str(k) + \":\" + str(v) for k, v in self.data.items())\n\n @classmethod\n def from_json(cls, json_data):\n return cls(**json_data)\n\ntotal_regret_data = {}\naverage_reward_data = {}\naverage_regret_data = {}\n\ndef show_value(path='ray_results/my_experiment', N=10, plot_variable=\"total_regret\"):\n home = expanduser(\"~\")\n path = os.path.join(home, path)\n avg_regret_test = []\n for i, p in enumerate(os.listdir(path)):\n if not \"experiment\" in p:\n continue\n file_path = os.path.join(path, p)\n list_tracker = []\n with open(os.path.join(file_path, 'result.json')) as f:\n list_tracker = [json.loads(lines) for lines in f.readlines()]\n\n with open(os.path.join(file_path, 'params.json')) as f:\n config = json.load(f)\n config = ExperimentConfiguration.from_json(config)\n display_config = str(config)\n\n if not display_config in [\"lr:3e-05-unit:48\", \"lr:0.0007-unit:48\", \"lr:1e-05-unit:48\", \"lr:0.0001-unit:10\"]:\n continue\n\n total_regret,\\\n average_reward,\\\n average_regret, \\\n timestep = map(list, zip(*map(lambda x: (x['total_regret'],\n x['average_reward'], x['average_regret'], x['timesteps_this_iter']),list_tracker)))\n\n # Smoothing using window avg\n total_regret_data[display_config] = np.convolve(total_regret, np.ones((N,))/N, mode='valid')\n average_reward_data[display_config] = np.convolve(average_reward, np.ones((N,))/N, mode='valid')\n average_regret_data[display_config] = np.convolve(average_regret, np.ones((N,))/N, mode='valid')\n\n avg_regret_test.append((display_config, np.mean(total_regret)))\n\n total_regret_df = pd.DataFrame(total_regret_data)\n average_reward_df = pd.DataFrame(average_reward_data)\n average_regret_df = pd.DataFrame(average_regret_data)\n\n switch={\n \"total_regret\": total_regret_df,\n \"average_reward\": average_reward_df,\n \"average_regret\": average_regret_df\n }\n\n plt.figure(figsize=(15,5))\n ax = sns.lineplot(\n data=switch[plot_variable],\n dashes=False\n )\n ax.set_title(plot_variable)\n plt.show()\n\n\n# if __name__ == '__main__':\n# show_value()\n","sub_path":"plot_result.py","file_name":"plot_result.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"646847960","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('gogetthegood/', views.gogetthegood, name='gogetthegood'),\n path('happyhappyjoyjoy/', views.happyhappyjoyjoy, name='happyhappyjoyjoy'),\n path('youheard/', views.youheard, name='youheard')\n\n]","sub_path":"endpoint66_project/endpoint66_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"338634252","text":"import numpy as np\nimport tensorflow as tf\n\nclass model:\n def __init__(self,x,ind5,ind4,ind3,ind2,ind1):\n \n # x : input -> output of pool5 with size [batch_size,7,7,512]\n # ind5 = maxpool indices of pool5, size = [batch_size,14,14,512]\n # ind4 = maxpool indices of pool5, size = [batch_size,28,28,512]\n # ind3 = maxpool indices of pool5, size = [batch_size,56,56,256]\n # ind2 = maxpool indices of pool5, size = [batch_size,112,112,128]\n # ind1 = maxpool indices of pool5, size = [batch_size,224,224,64]\n \n unpool5 = self.unpool(x,ind5) # [14 14 512]\n deconv5_3 = self.upsample_layer(unpool5,3,3,512,1,1,\"deconv5_3\") # [14 14 512]\n deconv5_2 = self.upsample_layer(deconv5_3,3,3,512,1,1,\"deconv5_2\") # [14 14 512]\n deconv5_1 = self.upsample_layer(deconv5_2,3,3,512,1,1,\"deconv5_1\") # [14 14 512]\n \n \n unpool4 = self.unpool(deconv5_1,ind4) # [28 28 512]\n deconv4_3 = self.upsample_layer(unpool4,3,3,512,1,1,\"deconv4_3\") # [28 28 512]\n deconv4_2 = self.upsample_layer(deconv4_3,3,3,512,1,1,\"deconv4_2\") # [28 28 512]\n deconv4_1 = self.upsample_layer(deconv4_2,3,3,256,1,1,\"deconv4_1\") # [28 28 256]\n \n \n unpool3 = self.unpool(deconv4_1,ind3) # [56 56 256]\n deconv3_3 = self.upsample_layer(unpool3,3,3,256,1,1,\"deconv3_3\") # [56 56 256]\n deconv3_2 = self.upsample_layer(deconv3_3,3,3,256,1,1,\"deconv3_2\") # [56 56 256]\n deconv3_1 = self.upsample_layer(deconv3_2,3,3,128,1,1,\"deconv3_1\") # [56 56 128]\n \n \n unpool2 = self.unpool(deconv3_1,ind2) # [112 112 128]\n deconv2_2 = self.upsample_layer(unpool2,3,3,128,1,1,\"deconv2_2\") # [112 112 128]\n deconv2_1 = self.upsample_layer(deconv2_2,3,3,64,1,1,\"deconv2_1\") # [112 112 64]\n \n \n \n unpool1 = self.unpool(deconv2_1,ind1) # [224 224 64]\n deconv1_2 = self.upsample_layer(unpool1,3,3,64,1,1,\"deconv1_2\") # [224 224 64]\n deconv1_1 = self.upsample_layer(deconv1_2,3,3,3,1,1,\"deconv1_1\") # [224 224 3]\n \n self.final = self.conv1(deconv1_1,3,3,12,1,1,name=\"final\")\n \n \n # unpooling process \n def unpool(self,bottom,indices):\n \n # bottom: inpoot layer , size: [batch_size,m,m,c]\n # indices: indices of maxpool , size:[batch_size,2m,2m,c]\n \n sz = bottom.shape\n \n wd = sz[1] # width\n hg = sz[2] # height \n channels = sz[3] # channels\n \n \n # create matriz with all 1 of size [heightt, 2*height]\n w = self.create_w(hg)\n w = tf.constant(w,dtype=tf.float32)\n\n\n \n output1 = []\n \n \n for i in range(sz[0]):\n output = []\n for c in range(channels):\n\n inp = bottom[i,:,:,c]\n\n ind = indices[i,:,:,c]\n\n\n out1 = tf.tensordot(inp,w,1)\n\n out2 = tf.tensordot(tf.transpose(out1),w,1)\n out = tf.multiply(tf.transpose(out2),ind)\n \n output.append(out);\n\n output1.append(tf.stack(output,axis=2))\n \n output2 = tf.stack(output1,axis=0)\n \n return output2\n \n def create_w(self,size):\n w = np.zeros([size,2*size])\n for i in range(size):\n for j in range(2*i,2*i+2):\n w[i,j] = 1\n return w\n \n \n def upsample_layer(self,bottom,filter_height,filter_width,num_filters,px,py,name):\n \n input_channels = int(bottom.get_shape()[-1])\n sz = bottom.shape.as_list()\n \n sz[-1] = num_filters\n output_shape = tf.stack(sz)\n \n with tf.variable_scope(name) as scope:\n # Create tf variables for the weights and biases of the conv layer\n weights = tf.get_variable('weights', shape=[filter_height,\n filter_width,\n num_filters,input_channels])\n \n ct = tf.nn.conv2d_transpose(bottom,weights,sz,[1,1,1,1],padding='SAME')\n return tf.nn.relu(ct,name=name)\n \n def conv1(self,bottom,filter_height,filter_width,num_filters,px,py,name):\n \n input_channels = int(bottom.get_shape()[-1])\n \n convolve = lambda i, k: tf.nn.conv2d(i, k,\n strides=[1, 1, 1, 1],\n padding='SAME')\n\n with tf.variable_scope(name) as scope:\n # Create tf variables for the weights and biases of the conv layer\n weights = tf.get_variable('weights', shape=[filter_height,\n filter_width,\n input_channels,\n num_filters])\n biases = tf.get_variable('biases', shape=[num_filters])\n \n conv = convolve(bottom, weights)\n \n # Add biases\n bias = tf.reshape(tf.nn.bias_add(conv, biases), tf.shape(conv))\n\n # Apply relu function\n relu = tf.nn.relu(bias, name=scope.name)\n\n return relu","sub_path":"Deconv/deconvmodel.py","file_name":"deconvmodel.py","file_ext":"py","file_size_in_byte":5498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"176917812","text":"import time\nimport webbrowser\n\nprint('Time to take a break')\n\ntotal_breaks = 3\nwait = 0\nprint(\"This program started on \" + time.ctime())\nwhile wait <= total_breaks:\n time.sleep(10)\n wait = wait + 1\n webbrowser.open(\"https://www.youtube.com/watch?v=1SJ7RZFvEhA\")\n","sub_path":"breaktime.py","file_name":"breaktime.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"206164682","text":"from nut import Print\nfrom os import remove\nfrom tqdm import tqdm\nfrom pathlib import Path\nfrom traceback import format_exc\nfrom SectionFs import isNcaPacked, sortedFs\nfrom Fs import factory, Ticket, Pfs0, Nca, Type\nfrom zstandard import FLUSH_FRAME, COMPRESSOBJ_FLUSH_FINISH, ZstdCompressor\n\ndef solidCompress(filePath, compressionLevel = 18, outputDir = None, threads = -1):\n\tncaHeaderSize = 0x4000\n\tfilePath = str(Path(filePath).resolve())\n\tcontainer = factory(filePath)\n\tcontainer.open(filePath, 'rb')\n\tCHUNK_SZ = 0x1000000\n\tnszPath = str(Path(filePath[0:-1] + 'z' if outputDir == None else Path(outputDir).joinpath(Path(filePath[0:-1] + 'z').name)).resolve(strict=False))\n\n\tfor nspf in container:\n\t\tif isinstance(nspf, Ticket.Ticket):\n\t\t\tnspf.getRightsId()\n\t\t\tbreak # No need to go for other objects\n\n\tPrint.info('compressing (level %d) %s -> %s' % (compressionLevel, filePath, nszPath))\n\tnewNsp = Pfs0.Pfs0Stream(nszPath)\n\n\ttry:\n\t\tfor nspf in container:\n\t\t\tif isinstance(nspf, Nca.Nca) and nspf.header.contentType == Type.Content.DATA:\n\t\t\t\tPrint.info('skipping delta fragment')\n\t\t\t\tcontinue\n\t\t\tif isinstance(nspf, Nca.Nca) and (nspf.header.contentType == Type.Content.PROGRAM or nspf.header.contentType == Type.Content.PUBLICDATA):\n\t\t\t\tif isNcaPacked(nspf, ncaHeaderSize):\n\t\t\t\t\tnewFileName = nspf._path[0:-1] + 'z'\n\t\t\t\t\tf = newNsp.add(newFileName, nspf.size)\n\t\t\t\t\tstart = f.tell()\n\t\t\t\t\tnspf.seek(0)\n\t\t\t\t\tf.write(nspf.read(ncaHeaderSize))\n\t\t\t\t\tsections = []\n\n\t\t\t\t\tfor fs in sortedFs(nspf):\n\t\t\t\t\t\tsections += fs.getEncryptionSections()\n\n\t\t\t\t\tif len(sections) == 0:\n\t\t\t\t\t\traise Exception(\"NCA can't be decrypted. Outdated keys.txt?\")\n\t\t\t\t\theader = b'NCZSECTN'\n\t\t\t\t\theader += len(sections).to_bytes(8, 'little')\n\n\t\t\t\t\tfor fs in sections:\n\t\t\t\t\t\theader += fs.offset.to_bytes(8, 'little')\n\t\t\t\t\t\theader += fs.size.to_bytes(8, 'little')\n\t\t\t\t\t\theader += fs.cryptoType.to_bytes(8, 'little')\n\t\t\t\t\t\theader += b'\\x00' * 8\n\t\t\t\t\t\theader += fs.cryptoKey\n\t\t\t\t\t\theader += fs.cryptoCounter\n\t\t\t\t\t\n\t\t\t\t\tf.write(header)\n\t\t\t\t\tdecompressedBytes = ncaHeaderSize\n\n\t\t\t\t\twith tqdm(total=nspf.size, unit_scale=True, unit=\"B\") as bar:\n\t\t\t\t\t\tpartitions = [nspf.partition(offset = section.offset, size = section.size, n = None, cryptoType = section.cryptoType, cryptoKey = section.cryptoKey, cryptoCounter = bytearray(section.cryptoCounter), autoOpen = True) for section in sections]\n\t\t\t\t\t\tpartNr = 0\n\t\t\t\t\t\tbar.update(f.tell())\n\t\t\t\t\t\tcctx = ZstdCompressor(level=compressionLevel, threads=threads) if threads > 1 else ZstdCompressor(level=compressionLevel)\n\t\t\t\t\t\tcompressor = cctx.stream_writer(f)\n\n\t\t\t\t\t\twhile True:\n\t\t\t\t\t\t\tbuffer = partitions[partNr].read(CHUNK_SZ)\n\n\t\t\t\t\t\t\twhile (len(buffer) < CHUNK_SZ and partNr < len(partitions)-1):\n\t\t\t\t\t\t\t\tpartitions[partNr].close()\n\t\t\t\t\t\t\t\tpartitions[partNr] = None\n\t\t\t\t\t\t\t\tpartNr += 1\n\t\t\t\t\t\t\t\tbuffer += partitions[partNr].read(CHUNK_SZ - len(buffer))\n\n\t\t\t\t\t\t\tif len(buffer) == 0:\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\tcompressor.write(buffer)\n\t\t\t\t\t\t\tdecompressedBytes += len(buffer)\n\t\t\t\t\t\t\tbar.update(len(buffer))\n\n\t\t\t\t\t\tpartitions[partNr].close()\n\t\t\t\t\t\tpartitions[partNr] = None\n\n\t\t\t\t\tcompressor.flush(FLUSH_FRAME)\n\t\t\t\t\tcompressor.flush(COMPRESSOBJ_FLUSH_FINISH)\n\t\t\t\t\twritten = f.tell() - start\n\t\t\t\t\tprint('compressed %d%% %d -> %d - %s' % (int(written * 100 / nspf.size), decompressedBytes, written, nspf._path))\n\t\t\t\t\tnewNsp.resize(newFileName, written)\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tprint('not packed!')\n\t\t\tf = newNsp.add(nspf._path, nspf.size)\n\t\t\tnspf.seek(0)\n\t\t\twhile not nspf.eof():\n\t\t\t\tbuffer = nspf.read(CHUNK_SZ)\n\t\t\t\tf.write(buffer)\n\texcept KeyboardInterrupt:\n\t\tremove(nszPath)\n\t\traise KeyboardInterrupt\n\texcept BaseException:\n\t\tPrint.error(format_exc())\n\t\tremove(nszPath)\n\tfinally:\n\t\tnewNsp.close()\n\t\tcontainer.close()\n\treturn nszPath","sub_path":"nsz/SolidCompressor.py","file_name":"SolidCompressor.py","file_ext":"py","file_size_in_byte":3741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"400735768","text":"class Solution:\n def search(self, nums: List[int], target: int) -> int:\n start = 0\n end = len(nums) - 1\n while start <= end:\n mid = int((start + end) / 2)\n\n if nums[mid] == target:\n return mid\n elif nums[mid] > target:\n end = mid - 1\n else:\n start = mid + 1\n \n return -1\n\n# completed 2022-12-13 (YYYY-MM-DD)\n# Runtime: 248 ms, faster than 94.34% \n# Memory Usage: 15.4 MB, less than 74.5%\n# notes: i'm not gonna lie, the pointers broke me. this was such a simple problem and it took me a long time to solve. WTF?\n","sub_path":"completed/leetcode/704-binary-search.py","file_name":"704-binary-search.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"550585523","text":"#!/usr/bin/env python\n#! -*- coding: utf-8 -*-\n\n\"\"\" patrol_smach_concurrence.py - Version 1.0 2013-04-12\n\n Control a robot using SMACH to patrol around a square a specified number of times\n while monitoring battery levels using the Concurrence container.\n\n Created for the Pi Robot Project: http://www.pirobot.org\n Copyright (c) 2013 Patrick Goebel. All rights reserved.\n\n This program is free software; you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation; either version 2 of the License, or\n (at your option) any later version.5\n \n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details at:\n \n http://www.gnu.org/licenses/gpl.html\n \n\"\"\"\n\nimport rospy\nfrom smach import State, StateMachine, Concurrence, Container, UserData\nfrom smach_ros import MonitorState, ServiceState, SimpleActionState, IntrospectionServer\nfrom std_msgs.msg import Float32\nfrom geometry_msgs.msg import Twist\nfrom rbx2_msgs.srv import *\nfrom rbx2_tasks.history_task_setup import *\n\n# 讲解任务的列表\ntask_list = {'written_words':['explanatory_text'], 'rule':['explain_the_rule'], 'life':['explain_life'], 'faith':['explain_faith']}\n\nclass Stop(State):\n def __init__(self):\n State.__init__(self, outcomes=['succeeded','aborted','preempted'])\n pass\n \n def execute(self, userdata):\n rospy.loginfo(\"Shutting down the state machine\")\n return 'succeeded'\n\nclass WrittenWords(State):\n def __init__(self, section, timer):\n State.__init__(self, outcomes=['succeeded','aborted','preempted'])\n \n self.task = 'explanatory_text'\n self.section = section\n self.timer = timer\n self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=5)\n\n def execute(self, userdata):\n rospy.loginfo('Explaining the text in the ' + str(self.section))\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = 0.05\n counter = self.timer\n while counter > 0:\n if self.preempt_requested():\n self.service_preempt()\n return 'preempted'\n self.cmd_vel_pub.publish(cmd_vel_msg)\n cmd_vel_msg.linear.x *= -1\n rospy.loginfo(counter)\n counter -= 1\n rospy.sleep(1)\n \n self.cmd_vel_pub.publish(Twist())\n message = \"Finished explanatoring text the \" + str(self.section) + \"!\"\n rospy.loginfo(message)\n # easygui.msgbox(message, title=\"Succeeded\")\n \n # update_task_list(self.section, self.task)\n\n return 'succeeded'\n\nclass Rule(State):\n def __init__(self, section, timer):\n State.__init__(self, outcomes=['succeeded','aborted','preempted'])\n \n self.task = 'explain_the_rule'\n self.section = section\n self.timer = timer\n self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=5)\n \n def execute(self, userdata):\n rospy.loginfo('Explaining the rule in the ' + str(self.section))\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = 0.05\n cmd_vel_msg.angular.z = 1.2\n counter = self.timer\n while counter > 0:\n self.cmd_vel_pub.publish(cmd_vel_msg)\n cmd_vel_msg.linear.x *= -1\n rospy.loginfo(counter)\n counter -= 1\n rospy.sleep(1)\n \n self.cmd_vel_pub.publish(Twist())\n message = \"Done explaining the rule the \" + str(self.section) + \"!\"\n rospy.loginfo(message)\n # easygui.msgbox(message, title=\"Succeeded\")\n \n # update_task_list(self.section, self.task)\n \n return 'succeeded'\n\nclass Life(State):\n def __init__(self, section, timer):\n State.__init__(self, outcomes=['succeeded','aborted','preempted'])\n \n self.task = 'explain_life'\n self.section = section\n self.timer = timer\n self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=5)\n\n def execute(self, userdata):\n rospy.loginfo('Explain the life...')\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = 0.4\n cmd_vel_msg.angular.z = 0.2\n counter = self.timer\n while counter > 0:\n self.cmd_vel_pub.publish(cmd_vel_msg)\n cmd_vel_msg.linear.x *= -1\n if counter % 2 == 5:\n cmd_vel_msg.angular.z *= -1\n rospy.loginfo(counter)\n counter -= 1\n rospy.sleep(0.2)\n \n self.cmd_vel_pub.publish(Twist())\n message = \"Done explaining the life!\"\n rospy.loginfo(message)\n # easygui.msgbox(message, title=\"Succeeded\")\n \n # update_task_list(self.section, self.task)\n \n return 'succeeded'\n\nclass Faith(State):\n def __init__(self, section, timer):\n State.__init__(self, outcomes=['succeeded','aborted','preempted'])\n \n self.task = 'explain_faith'\n self.section = section\n self.timer = timer\n self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=5)\n\n def execute(self, userdata):\n rospy.loginfo('Explain the faith...')\n cmd_vel_msg = Twist()\n cmd_vel_msg.linear.x = 0.4\n cmd_vel_msg.angular.z = 0.2\n counter = self.timer\n while counter > 0:\n self.cmd_vel_pub.publish(cmd_vel_msg)\n cmd_vel_msg.linear.x *= -1\n if counter % 2 == 5:\n cmd_vel_msg.angular.z *= -1\n rospy.loginfo(counter)\n counter -= 1\n rospy.sleep(0.2)\n \n self.cmd_vel_pub.publish(Twist())\n message = \"Done explaining the faith!\"\n rospy.loginfo(message)\n # easygui.msgbox(message, title=\"Succeeded\")\n \n # update_task_list(self.section, self.task)\n \n return 'succeeded'\n\ndef update_task_list(section, task):\n task_list[section].remove(task)\n if len(task_list[section]) == 0:\n del task_list[section] \n \n\nclass History_Smach():\n def __init__(self):\n rospy.init_node('explain_history_concurrence', anonymous=False)\n \n # 设置关闭机器人函数(stop the robot)\n rospy.on_shutdown(self.shutdown)\n \n # 初始化一些参数和变量\n setup_task_environment(self)\n \n # 跟踪到达目标位置的成功率\n self.n_succeeded = 0\n self.n_aborted = 0\n self.n_preempted = 0\n \n # 保存上一个或者当前的导航目标点的变量\n self.last_nav_state = None\n \n # 指示是否正在充电的标志\n self.recharging = False\n \n # 保存导航目标点的列表\n nav_states = {}\n \n # 把waypoints变成状态机的状态\n for waypoint in self.room_locations.iterkeys(): \n nav_goal = MoveBaseGoal()\n nav_goal.target_pose.header.frame_id = 'map'\n nav_goal.target_pose.pose = self.room_locations[waypoint]\n move_base_state = SimpleActionState('move_base', MoveBaseAction, goal=nav_goal, result_cb=self.move_base_result_cb,\n exec_timeout=rospy.Duration(10.0),\n server_wait_timeout=rospy.Duration(10.0))\n # nav_states.append(move_base_state)\n nav_states[waypoint] = move_base_state\n \n # 为扩展底座(docking station)创建一个MoveBaseAction state\n nav_goal = MoveBaseGoal()\n nav_goal.target_pose.header.frame_id = 'map'\n nav_goal.target_pose.pose = self.docking_station_pose\n nav_docking_station = SimpleActionState('move_base', MoveBaseAction, goal=nav_goal, result_cb=self.move_base_result_cb,\n exec_timeout=rospy.Duration(20.0),\n server_wait_timeout=rospy.Duration(10.0))\n\n # 为written words子任务创建一个状态机\n sm_written_words = StateMachine(outcomes=['succeeded','aborted','preempted'])\n # 然后添加子任务\n with sm_written_words:\n StateMachine.add('EXPLAIN_HISTORY', WrittenWords('written_words', 5), transitions={'succeeded':'','aborted':'','preempted':''})\n\n # 为rule子任务创建一个状态机\n sm_rule = StateMachine(outcomes=['succeeded','aborted','preempted'])\n # 然后添加子任务\n with sm_rule:\n StateMachine.add('EXPLAIN_HISTORY', Rule('rule', 5), transitions={'succeeded':'','aborted':'','preempted':''})\n\n # 为life子任务创建一个状态机\n sm_life = StateMachine(outcomes=['succeeded','aborted','preempted'])\n # 然后添加子任务\n with sm_life:\n StateMachine.add('EXPLAIN_HISTORY', Life('life', 5), transitions={'succeeded':'','aborted':'','preempted':''})\n\n # 为faith子任务创建一个状态机\n sm_faith = StateMachine(outcomes=['succeeded','aborted','preempted'])\n # 然后添加子任务\n with sm_faith:\n StateMachine.add('EXPLAIN_HISTORY', Faith('faith', 5), transitions={'succeeded':'','aborted':'','preempted':''})\n\n # 初始化导航的状态机\n self.sm_nav = StateMachine(outcomes=['succeeded', 'aborted', 'preempted'])\n \n # 使用transitions将导航的状态添加到状态机\n with self.sm_nav:\n StateMachine.add('START', nav_states['explanatory_text'], transitions={'succeeded':'WRITTEN_WORDS','aborted':'WRITTEN_WORDS','preempted':'WRITTEN_WORDS'})\n \n ''' Add the living room subtask(s) '''\n StateMachine.add('WRITTEN_WORDS', nav_states['explanatory_text'], transitions={'succeeded':'WRITTEN_WORDS_TASKS','aborted':'RULE','preempted':'RULE'})\n \n # 当任务完成时, 继续进行kitchen任务\n StateMachine.add('WRITTEN_WORDS_TASKS', sm_written_words, transitions={'succeeded':'RULE','aborted':'RULE','preempted':'RULE'})\n \n ''' Add the kitchen subtask(s) '''\n StateMachine.add('RULE', nav_states['explain_the_rule'], transitions={'succeeded':'RULE_TASKS','aborted':'LIFE','preempted':'LIFE'})\n \n # 当任务完成时, 继续进行bathroom任务\n StateMachine.add('RULE_TASKS', sm_rule, transitions={'succeeded':'LIFE','aborted':'LIFE','preempted':'LIFE'})\n \n ''' Add the bathroom subtask(s) '''\n StateMachine.add('LIFE', nav_states['explain_life'], transitions={'succeeded':'LIFE_TASKS','aborted':'FAITH','preempted':'FAITH'})\n \n # 当任务完成时, 继续进行hallway任务\n StateMachine.add('LIFE_TASKS', sm_life, transitions={'succeeded':'FAITH','aborted':'FAITH','preempted':'FAITH'}) \n \n ''' Add the hallway subtask(s) '''\n StateMachine.add('FAITH', nav_states['explain_faith'], transitions={'succeeded':'FAITH_TASKS','aborted':'','preempted':''})\n \n # 当任务完成时, stop\n StateMachine.add('FAITH_TASKS', sm_faith, transitions={'succeeded':'','aborted':'','preempted':''}) \n \n\n # 在sm_nav状态机中注册一个回调函数以启动状态转换(state transitions)\n self.sm_nav.register_transition_cb(self.nav_transition_cb, cb_args=[])\n\n # 初始化充电的状态机\n self.sm_recharge = StateMachine(outcomes=['succeeded', 'aborted', 'preempted'])\n \n with self.sm_recharge:\n StateMachine.add('NAV_DOCKING_STATION', nav_docking_station, transitions={'succeeded':'RECHARGE_BATTERY'})\n StateMachine.add('RECHARGE_BATTERY', ServiceState('battery_simulator/set_battery_level', SetBatteryLevel, 100, response_cb=self.recharge_cb), \n transitions={'succeeded':''}) \n\n # 使用并发容器(Concurrence container)创建nav_patrol状态机\n self.nav_patrol = Concurrence(outcomes=['succeeded', 'recharge', 'stop'],\n default_outcome='succeeded',\n child_termination_cb=self.concurrence_child_termination_cb,\n outcome_cb=self.concurrence_outcome_cb)\n \n # 将sm_nav machine和battery MonitorState添加到nav_patrol状态机里面 \n with self.nav_patrol:\n Concurrence.add('SM_NAV', self.sm_nav)\n Concurrence.add('MONITOR_BATTERY', MonitorState(\"battery_level\", Float32, self.battery_cb))\n \n # 创建顶层状态机\n self.sm_top = StateMachine(outcomes=['succeeded', 'aborted', 'preempted'])\n \n # 将nav_patrol,sm_recharge和Stop添加到sm_top状态机\n with self.sm_top:\n StateMachine.add('PATROL', self.nav_patrol, transitions={'succeeded':'PATROL', 'recharge':'RECHARGE', 'stop':'STOP'}) \n StateMachine.add('RECHARGE', self.sm_recharge, transitions={'succeeded':'PATROL'})\n StateMachine.add('STOP', Stop(), transitions={'succeeded':''})\n\n # 创建并开始SMACH introspection server\n intro_server = IntrospectionServer('patrol', self.sm_top, '/SM_ROOT')\n intro_server.start()\n \n # 运行状态机\n sm_outcome = self.sm_top.execute()\n \n rospy.loginfo('State Machine Outcome: ' + str(sm_outcome))\n \n intro_server.stop()\n \n def nav_transition_cb(self, userdata, active_states, *cb_args):\n self.last_nav_state = active_states\n \n # 当任何子状态终止时调用\n def concurrence_child_termination_cb(self, outcome_map):\n # 如果当前导航任务完成, return True\n if outcome_map['SM_NAV'] == 'succeeded':\n return True\n # 如果MonitorState状态变成False则储存当前的导航目标点并充电\n if outcome_map['MONITOR_BATTERY'] == 'invalid':\n rospy.loginfo(\"LOW BATTERY! NEED TO RECHARGE...\")\n if self.last_nav_state is not None:\n self.sm_nav.set_initial_state(self.last_nav_state, UserData())\n return True\n else:\n return False\n \n # 当任何子状态终止时调用\n def concurrence_outcome_cb(self, outcome_map):\n # 如果电池电量低于设定的阈值,返回'recharge' outcome\n if outcome_map['MONITOR_BATTERY'] == 'invalid':\n return 'recharge'\n # 否则,如果最后一个导航目标点成功,返回'succeeded' 或者 'stop'\n elif outcome_map['SM_NAV'] == 'succeeded':\n self.patrol_count += 1\n rospy.loginfo(\"FINISHED PATROL LOOP: \" + str(self.patrol_count))\n # 如果没有完成所有的巡逻,重新开始导航\n if self.n_patrols == -1 or self.patrol_count < self.n_patrols:\n # self.sm_nav.set_initial_state(['NAV_STATE_0'], UserData())\n return 'succeeded'\n # 否则,完成所有导航并返回 'stop'\n else:\n # self.sm_nav.set_initial_state(['NAV_STATE_4'], UserData())\n return 'stop'\n # 如果其他操作失败了,重新充电\n else:\n return 'recharge'\n \n def battery_cb(self, userdata, msg):\n if msg.data < self.low_battery_threshold:\n self.recharging = True\n return False\n else:\n self.recharging = False\n return True\n \n def recharge_cb(self, userdata, response):\n return 'succeeded'\n \n def move_base_result_cb(self, userdata, status, result):\n if not self.recharging:\n if status == actionlib.GoalStatus.SUCCEEDED:\n self.n_succeeded += 1\n elif status == actionlib.GoalStatus.ABORTED:\n self.n_aborted += 1\n elif status == actionlib.GoalStatus.PREEMPTED:\n self.n_preempted += 1\n \n try:\n rospy.loginfo(\"Success rate: \" + str(100.0 * self.n_succeeded / (self.n_succeeded + self.n_aborted + self.n_preempted)))\n except:\n pass\n\n def shutdown(self):\n rospy.loginfo(\"Stopping the robot...\")\n \n self.sm_nav.request_preempt()\n \n self.cmd_vel_pub.publish(Twist())\n \n rospy.sleep(1)\n\nif __name__ == '__main__':\n try:\n History_Smach()\n except rospy.ROSInterruptException:\n rospy.loginfo(\"SMACH test finished.\")\n","sub_path":"rbx2_tasks/nodes/history_smach_concurrence.py","file_name":"history_smach_concurrence.py","file_ext":"py","file_size_in_byte":16773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"303546","text":"import csv\n#read from a file\nfileName='Demo.csv'\nWRITE='w'\nREAD='r'\nAPPEND='a'\nREADWRITE='w+'\n#dataFromFIle=[]\nwith open(fileName,READ) as myCSVFile:\n UsersList=csv.reader(myCSVFile)\n \n for currentRow in UsersList:\n print(currentRow)\n print(', '.join(currentRow))\n for currentWord in currentRow:\n print(currentWord) \n \n\n\n\n#file=open(fileName,mode=READ)\n\n#read Line\n#fileContent=file.readline()\n#print(fileContent)\n#nextLine=file.readline()\n#print(nextLine)\n\n\n#read all file contents\n#allContent=file.read()\n#print(allContent)","sub_path":"Module12/Module12.py","file_name":"Module12.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"574857512","text":"from tkinter import *\nfrom tkinter.ttk import Combobox\n\n\ndef solver(a):\n a1=int(a)\n oper1 = operator1.get()\n oper2 = operator2.get()\n res=0\n if oper1==\"Площадь\" and oper2==\"Круг\":\n res=3.14*a1**2\n elif oper1==\"Площадь\" and oper2==\"Квадрат\":\n res=a1**2\n elif oper1==\"Периметр\" and oper2==\"Квадрат\":\n res=a1*4\n elif oper1==\"Периметр\" and oper2==\"Круг\":\n res=2*3.14*a1\n res=\"Ответ:\" + str(res)\n return res\n\ndef inserter(value):\n output.delete(\"0.0\", \"end\")\n output.insert(\"0.0\", value)\n\ndef handler():\n try:\n a_val = int(inp.get())\n inserter(solver(a_val))\n except ValueError:\n inserter(\"Ошибка\")\n\ndef clear(event):\n caller=event.widget\n caller.delete(\"0\", \"end\")\n\nwindow=Tk()\nwindow.title(\"Calculator\")\nwindow.minsize(500,350)\n\n\ninp=Entry(window,width=5)\ninp.bind(\"\", clear)\ninp.grid(row=0, column=0)\n\noperator1=Combobox(window, width=15)\noperator1['values']=(\"Площадь\", \"Периметр\")\noperator1.current(0)\noperator1.grid(row=0, column=1)\n\noperator2=Combobox(window, width=10)\noperator2['values']=(\"Круг\", \"Квадрат\")\noperator2.current(0)\noperator2.grid(row=0, column=2)\n\noutput = Text(window, bg=\"pink\", fg='white', font = \"Arial 20\", width=25, height=10)\noutput.grid(row=1, columnspan=8)\n\nbtn=Button(window, text=\"=\", command=handler)\nbtn.grid(row=0, column=3)\n\n\nwindow.mainloop()\n\n","sub_path":"homeworks/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"609682093","text":"from argparse import ArgumentParser\nimport logging\nfrom nltk.util import ngrams\nimport random\n\n\ndef get_args():\n parser = ArgumentParser(description='Clean data for crfsuite')\n parser.add_argument(\n '-f', '--inputFile',\n help='Input file',\n required=True\n )\n parser.add_argument(\n '-r', '--randomLines',\n type=int,\n help='Number of lines to sample at random'\n )\n return parser.parse_args()\n\n\ndef clean(input_file, random_lines=0):\n random_file = input_file + '.random'\n clean_file = input_file + '.clean'\n cleaned = 0\n if bool(random_lines):\n with open(random_file, 'w') as f:\n pass\n with open(clean_file, 'w') as f:\n pass\n if bool(random_lines):\n with open(input_file, 'r') as f:\n lines = []\n for line in f:\n lines.append(line)\n lines = random.sample(lines, random_lines)\n input_file = input_file + '.random'\n with open(random_file, 'w') as rf:\n for line in lines:\n rf.write(line)\n input_file = random_file\n with open(input_file, 'r') as f:\n for line in f:\n tokens = line.split()\n output = []\n output = list(ngrams(\n tokens,\n 2,\n pad_left=True,\n pad_right=True,\n left_pad_symbol='',\n right_pad_symbol=''\n ))\n with open(clean_file, 'a') as cf:\n for output_item in output:\n output_item = ' '.join(output_item)\n cf.write(output_item + '\\n')\n cf.write('\\n')\n cleaned += 1\n if cleaned % 1000 == 0:\n logging.info('Processed line {:d}'.format(clean))\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n random.seed(1)\n args = get_args()\n input_file = args.inputFile\n random_lines = args.randomLines\n clean(input_file, random_lines)\n","sub_path":"crf/clean_data.py","file_name":"clean_data.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"520565197","text":"#!/usr/bin/python\r\n# -*- coding:utf-8 -*-\r\n\r\n\"\"\"\r\n @File: spiders_executor.py\r\n @author: ginger \r\n @software: PyCharm \r\n @time: 17-11-25 下午12:55\r\n @desc: \r\n\"\"\"\r\nimport Queue\r\nfrom multiprocessing import Pool\r\nfrom multiprocessing.pool import ThreadPool\r\nfrom multiprocessing import cpu_count\r\nimport threading\r\nimport sys, traceback,commands\r\nimport logging\r\nfrom utils import file_util\r\nimport executor\r\n\r\n\r\ndef spider_wrapper(func,context): # 直接导入包装函数\r\n try:\r\n func()\r\n except BaseException:\r\n exc_type, exc_value, exc_traceback = sys.exc_info()\r\n stacktrace = traceback.format_exception(exc_type, exc_value, exc_traceback)\r\n context[func.__name__] = stacktrace\r\n else:\r\n context[func.__name__] = \"success\"\r\n return context\r\n\r\n'''\r\npy_path 脚本所在的位置\r\n'''\r\ndef spider_shell(py_path,context):\r\n try:\r\n cmd = 'python '+py_path\r\n (status, output) = commands.getstatusoutput(cmd)\r\n # 失败\r\n if status != 0:\r\n raise RuntimeError(u\"%s command execute error: %s\" % (cmd, output))\r\n except BaseException:\r\n exc_type, exc_value, exc_traceback = sys.exc_info()\r\n stacktrace = traceback.format_exception(exc_type, exc_value, exc_traceback)\r\n context[py_path] = stacktrace\r\n else:\r\n context[py_path] = \"success\"\r\n return context\r\n\r\n\r\nclass spiders_executor(executor):\r\n def __init__(self, usethread=True, maxworkers=10, recovery_path = None, maxtasksperworker=None):\r\n self.__usethread = usethread\r\n self.__maxworkers=maxworkers\r\n self.__maxtasksperworker = maxtasksperworker\r\n self.__isShutdown = False\r\n self.__recovery_info = None\r\n self.__recovery_path = recovery_path\r\n self.__initialize()\r\n\r\n\r\n def __initialize(self):\r\n self.__recovery_info = file_util.read_file_by_line(self.__recovery_path)\r\n self.__spider_queue = Queue.Queue()\r\n self.done_queue = Queue.Queue()\r\n if self.__usethread:\r\n self.__pool = ThreadPool(processes=self.__maxworkers) # 线程池\r\n else:\r\n self.__pool = Pool(processes=self.__maxworkers, maxtasksperchild=self.__maxtasksperworker) # 多进程\r\n\r\n self.__mutex = threading.Lock()\r\n self.__idleCounter = self.__maxworkers # 空闲Worker计数器\r\n if self.__idleCounter is None:\r\n self.__idleCounter = cpu_count() # 默认worker是CPU的核数\r\n\r\n\r\n def push(self,spider): # spider函数\r\n self.__spider_queue.put(spider)\r\n\r\n def pop(self):\r\n return self.__spider_queue.get()\r\n\r\n def __callback(self, context): # 回调函数\r\n try:\r\n self.done_queue.put(context)\r\n finally:\r\n self.__increaseIdleCounter() # task运行完成,空闲进程数+1\r\n\r\n def work_func(self):\r\n while not self.__isShutdown:\r\n if self.__idleCounter > 0:\r\n try:\r\n self.__decreaseIdleCounter()\r\n context = {}\r\n func = self.__spider_queue.get_nowait()\r\n self.__pool.apply_async(spider_wrapper, (func, context), callback=self.__callback)\r\n # print \"11\r\n # self.__pool.apply_async(self.pop(), callback=self.__callback)\r\n except Queue.Empty:\r\n self.__pool.close()\r\n self.__pool.join()\r\n self.shutdown()\r\n # print '222'\r\n\r\n def work_shell(self):\r\n while self.__recovery_info != 0:\r\n self.__spider_queue.get_nowait()\r\n self.__recovery_info -=1\r\n while not self.__isShutdown:\r\n if self.__idleCounter > 0:\r\n try:\r\n self.__decreaseIdleCounter()\r\n context = {}\r\n py_path = self.__spider_queue.get_nowait()\r\n self.__pool.apply_async(spider_shell, (py_path, context), callback=self.__callback)\r\n # print \"11111\"\r\n # self.__pool.apply_async(self.pop(), callback=self.__callback)\r\n except Queue.Empty:\r\n self.__pool.close()\r\n self.__pool.join()\r\n self.shutdown()\r\n # print '222'\r\n\r\n def shutdown(self):\r\n self.__isShutdown = True\r\n\r\n\r\n def __increaseIdleCounter(self):\r\n self.__mutex.acquire()\r\n self.__idleCounter += 1\r\n self.__mutex.release()\r\n\r\n def __decreaseIdleCounter(self):\r\n self.__mutex.acquire()\r\n self.__idleCounter -= 1\r\n self.__mutex.release()\r\n\r\n# for test\r\ndef func1():\r\n 1/0\r\n print (func1.__name__)\r\n\r\ndef func2():\r\n print (func2.__name__)\r\n\r\nif __name__ == '__main__':\r\n # 扫描脚本\r\n py_path_list = file_util.find_all_file('/home/XXX/py_path')\r\n\r\n executor = spiders_executor()\r\n for i in range(0, len(py_path_list)):\r\n executor.push(py_path_list[i])\r\n executor.work_shell()\r\n # executor.push(func1)\r\n # executor.push(func2)\r\n # executor.work_func()\r\n while not executor.done_queue.empty():\r\n print (executor.done_queue.get())","sub_path":"BigdataStack/Schedule/spiders_executor.py","file_name":"spiders_executor.py","file_ext":"py","file_size_in_byte":5227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"536221966","text":"import sys\n\n# PROGRAM COUNTER = pc.. built into computer. used to increment count\n# operation coes\n# OP Codes\nPRINT_HELLO_WORLD = 1 # 0b00000001 binary\nHALT = 2 # 0b00000010 binary\nPRINT_NUM = 3 # 0b00000011 binary\nSAVE_REG = 4 # OB00000100\nPRINT_REG = 5\nADD = 6\nPUSH = 7\nPOP = 8\nCALL = 9\nRET = 10\n\n\n# memory = [\n# PRINT_HELLO_WORLD, # instruction\n# SAVE_REG, # instruction\n# 123, # not an instruction, this is input. be mindful of memory position\n# 1, # register 1\n# PRINT_REG, # 5\n# 1, # register 1\n# HALT # 2\n# ] # memory is simply an array\n\nmemory = [0] * 256\n\nrunning = True\n\nregisters = [0] * 8\n\nSP = 7\n\npc = 0\n\n# open a file and load into memory\n\n#### LECTURE 2#####\n\n# get file name from command line arguments\nprint(sys.argv)\n\nif len(sys.argv) != 2:\n print(\"Usage: example_cpu.py filename\")\n sys.exit(1) # 0 error means good, 1 error means crash\n\n\ndef load_memory(filename):\n # open file and load into mem\n address = 0\n\n try:\n with open(sys.argv[1]) as f: # open file\n for line in f:\n # break up lines on char that creates a comment\n split_line = line.split('#') # split at the comment marker\n print(split_line) # check the split\n\n # Removes whitespace and \\n char\n # take the split line at 0, where the code should be and strip it\n code_value = split_line[0].strip()\n # Make sure that the value before the # symbol is not empty\n if split_line[0] == '':\n continue\n\n num = int(code_value)\n memory[address] = num\n address += 1\n\n except FileNotFoundError:\n print(f\"{sys.argv[1]} file not found\")\n sys.exit(2)\n\n\nload_memory(sys.argv[1])\n\n# Set the top of the stack correctly:\nregisters[SP] = len(memory)\n\nwhile running:\n # read line by line from memory\n instruction = memory[pc]\n\n if instruction == PRINT_HELLO_WORLD:\n # print hello world,\n # move the pc up one to next instruction\n print('Hello World')\n pc += 1\n\n elif instruction == PRINT_NUM:\n # print the number in the next memory slot\n num = memory[pc + 1]\n print(num)\n pc += 2 # move the pointer 2 out, since we're using memory +1 for input material\n\n elif instruction == SAVE_REG:\n # save some value to some register\n # first number after instruction will be the value to store\n # second number after instrutcion will be register\n num = memory[pc + 1]\n reg_loc = memory[pc + 2]\n registers[reg_loc] = num\n pc += 3 # move 3 slots.\n\n elif instruction == PRINT_REG:\n reg_loc = memory[pc+1]\n print(registers[reg_loc])\n pc += 2\n\n elif instruction == ADD:\n reg_1 = memory[pc + 1]\n reg_2 = memory[pc + 2]\n registers[reg_1] += registers[reg_2]\n pc += 3\n # ADD takes TWO registers, adds their values,\n # and stores the result in the first register given\n # GET register 1\n # get register 2\n # add values to gether\n # store in register 1\n\n elif instruction == HALT:\n running = False\n pc += 1\n\n elif instruction == PUSH:\n given_register = memory[pc + 1]\n value_in_register = registers[given_register]\n # decrement the stack pointer\n registers[SP] -= 1\n # write the value of the given register to memory AT the SP location\n memory[registers[SP]] = value_in_register\n pc += 2\n\n elif instruction == POP:\n given_register = memory[pc + 1]\n # write the value in memory at the top of the stack to the given register\n value_from_memory = memory[registers[SP]]\n registers[given_register] = value_from_memory\n # incrememt the stack pointer\n registers[SP] += 1\n pc += 2\n\n elif instruction == CALL:\n # Get the given register in the operand\n given_register = memory[pc + 1]\n # Store the return address pc + 2 onto the stack\n # decrement the stack pointer\n registers[SP] -= 1\n # write the return address\n memory[registers[SP]] = pc + 2\n # SET PC TO THE VALUE INSIDE GIVEN REGISTER\n pc = registers[given_register]\n\n elif instruction == RET:\n # set pc to the value at the stop of the stack\n pc = memory[registers[SP]]\n # POP from stack\n registers[SP] = 1\n\n else:\n print(f'Unknown instruction {instruction}')\n sys.exit(1) # this says the program exits not cleanly/crashed\n","sub_path":"lecture/lecture1.py","file_name":"lecture1.py","file_ext":"py","file_size_in_byte":4638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"542106843","text":"#!/usr/bin/python\n\nimport sys\nfrom subprocess import Popen, PIPE, STDOUT\n\nif (len(sys.argv) < 3):\n print(\"Usage: \" + sys.argv[0] + \" \")\n sys.exit(1)\n\ninput_file = sys.argv[1]\noutput_file = sys.argv[2]\n\nf = open(input_file)\ndot_cmds = \"digraph finite_state_machine {\\n\"\ndot_cmds += \"rankdir=LR; \\n\"\ndot_cmds += \"node[shape = circle]; \\n\"\n\nfor line in f:\n elem = line.split(\",\")\n v1 = elem[0].strip()\n v2 = elem[1].strip()\n dot_cmds += v1 + \" -> \" + v2 + \";\\n\"\n\ndot_cmds += \"}\\n\"\n\nf.close()\n\nproc = Popen([\"circo\", \"-Tsvg\", \"-o\" + output_file], stdout=PIPE, stdin=PIPE, stderr=STDOUT)\n\nproc_stdout = proc.communicate(input=dot_cmds)[0]\nproc.stdin.close()\n# -> four\n","sub_path":"Graphs/display_graph.py","file_name":"display_graph.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"80564845","text":"from __future__ import annotations\nfrom typing import List\nimport numpy as np\nimport cv2\nfrom logger import logger\nfrom common_utils.common_types.bbox import BBox\nfrom common_utils.common_types.segmentation import Segmentation\nfrom common_utils.check_utils import check_required_keys, check_list_length\n\nfrom common_utils.common_types.point import Point2D, Point3D\nfrom ..common.cuboid import Cuboid2D, Cuboid3D\nfrom ..common.angle import Quaternion\nfrom common_utils.base.basic import BasicLoadableObject\n\nclass NDDS_Annotation_Object(BasicLoadableObject['NDDS_Annotation_Object']):\n def __init__(\n self,\n class_name: str, instance_id: int, visibility: float, location: Point3D, quaternion_xyzw: Quaternion,\n pose_transform: np.ndarray, cuboid_centroid: Point3D, projected_cuboid_centroid: Point2D,\n bounding_box: BBox, cuboid: Cuboid3D, projected_cuboid: Cuboid2D\n ):\n super().__init__()\n self.class_name = class_name\n self.instance_id = instance_id\n if visibility < 0 or visibility > 1:\n logger.error(f'visibility must be between 0 and 1')\n logger.error(f'visibility: {visibility}')\n raise Exception\n self.visibility = visibility\n self.location = location\n self.quaternion_xyzw = quaternion_xyzw\n self.pose_transform = pose_transform\n self.cuboid_centroid = cuboid_centroid\n self.projected_cuboid_centroid = projected_cuboid_centroid\n self.bounding_box = bounding_box\n self.cuboid = cuboid\n self.projected_cuboid = projected_cuboid\n\n def to_dict(self) -> dict:\n return {\n 'class': self.class_name,\n 'instance_id': self.instance_id,\n 'visibility': self.visibility,\n 'location': self.location.to_list(),\n 'quaternion_xyzw': self.quaternion_xyzw.to_list(),\n 'pose_transform': self.pose_transform.tolist(),\n 'cuboid_centroid': self.cuboid_centroid.to_list(),\n 'projected_cuboid_centroid': self.projected_cuboid_centroid.to_list(),\n 'bounding_box': {\n 'top_left': [self.bounding_box.xmin, self.bounding_box.ymin][::-1],\n 'bottom_right': [self.bounding_box.xmax, self.bounding_box.ymax][::-1]\n },\n 'cuboid': self.cuboid.to_list(demarcation=True),\n 'projected_cuboid': self.projected_cuboid.to_list(demarcation=True)\n }\n\n @classmethod\n def from_dict(self, object_dict: dict) -> NDDS_Annotation_Object:\n check_required_keys(\n object_dict,\n required_keys=[\n 'class', 'instance_id', 'visibility',\n 'location', 'quaternion_xyzw', 'pose_transform',\n 'cuboid_centroid', 'projected_cuboid_centroid', 'bounding_box',\n 'cuboid', 'projected_cuboid'\n ]\n )\n check_required_keys(\n object_dict['bounding_box'],\n required_keys=['top_left', 'bottom_right']\n )\n return NDDS_Annotation_Object(\n class_name=object_dict['class'],\n instance_id=object_dict['instance_id'],\n visibility=object_dict['visibility'],\n location=Point3D.from_list(object_dict['location']),\n quaternion_xyzw=Quaternion.from_list(object_dict['quaternion_xyzw']),\n pose_transform=np.array(object_dict['pose_transform']),\n cuboid_centroid=Point3D.from_list(object_dict['cuboid_centroid']),\n projected_cuboid_centroid=Point2D.from_list(object_dict['projected_cuboid_centroid']),\n bounding_box=BBox.from_list(object_dict['bounding_box']['top_left'][::-1]+object_dict['bounding_box']['bottom_right'][::-1], input_format='pminpmax'),\n cuboid=Cuboid3D.from_list(object_dict['cuboid'], demarcation=True),\n projected_cuboid=Cuboid2D.from_list(object_dict['projected_cuboid'], demarcation=True)\n )\n\n def parse_obj_info(self, naming_rule: str='type_object_instance_contained', delimiter: str='_') -> (str, str, str):\n if naming_rule == 'type_object_instance_contained':\n class_name_parts = self.class_name.split(delimiter)\n if len(class_name_parts) == 4:\n obj_type, obj_name, instance_name, contained_name = class_name_parts\n elif len(class_name_parts) == 3:\n obj_type, obj_name, instance_name = class_name_parts\n contained_name = None\n elif len(class_name_parts) == 2:\n obj_type, obj_name = class_name_parts\n instance_name, contained_name = None, None\n elif len(class_name_parts) == 1:\n obj_name = class_name_parts\n obj_type = 'seg'\n instance_name, contained_name = None, None\n else:\n logger.error(f\"Too many delimiters ('{delimiter}') found in class_name: {self.class_name}\")\n logger.error(f'Parsed {len(class_name_parts)} parts. Expected <= 4.')\n logger.error(f'self.instance_id: {self.instance_id}')\n raise Exception\n return obj_type, obj_name, instance_name, contained_name\n else:\n logger.error(f'Invalid naming rule: {naming_rule}')\n raise NotImplementedError\n \n def get_color_from_id(self) -> List[int]:\n RGBint = self.instance_id\n pixel_b = RGBint & 255\n pixel_g = (RGBint >> 8) & 255\n pixel_r = (RGBint >> 16) & 255\n color_instance_bgr = [pixel_b,pixel_g,pixel_r]\n return color_instance_bgr\n\n def get_instance_segmentation(self, img: np.ndarray, target_bgr: List[int]=None, interval: int=1, exclude_invalid_polygons: bool=True):\n target_bgr = target_bgr if target_bgr is not None else self.get_color_from_id()\n check_list_length(target_bgr, correct_length=3)\n lower_bgr = [val - interval if val - interval >= 0 else 0 for val in target_bgr]\n upper_bgr = [val + interval if val + interval <= 255 else 255 for val in target_bgr]\n color_mask = cv2.inRange(src=img, lowerb=tuple(lower_bgr), upperb=tuple(upper_bgr))\n color_contours, _ = cv2.findContours(color_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n seg = Segmentation.from_contour(contour_list=color_contours, exclude_invalid_polygons=exclude_invalid_polygons)\n return seg\n\n def is_in_frame(self, frame_shape: List[int]) -> bool:\n frame_h, frame_w = frame_shape[:2]\n frame_bbox = BBox(xmin=0, ymin=0, xmax=frame_w, ymax=frame_h)\n return self.bounding_box.within(frame_bbox)\n \nclass CameraData(BasicLoadableObject['CameraData']):\n def __init__(self, location_worldframe: Point3D, quaternion_xyzw_worldframe: Quaternion):\n super().__init__()\n self.location_worldframe = location_worldframe\n self.quaternion_xyzw_worldframe = quaternion_xyzw_worldframe\n \n def to_dict(self) -> dict:\n return {\n 'location_worldframe': self.location_worldframe.to_list(),\n 'quaternion_xyzw_worldframe': self.quaternion_xyzw_worldframe.to_list()\n }\n \n @classmethod\n def from_dict(cls, item_dict: dict) -> CameraData:\n check_required_keys(\n item_dict,\n required_keys=['location_worldframe', 'quaternion_xyzw_worldframe']\n )\n return CameraData(\n location_worldframe=Point3D.from_list(coords=item_dict['location_worldframe']),\n quaternion_xyzw_worldframe=Quaternion.from_list(coords=item_dict['quaternion_xyzw_worldframe'])\n )\n \n","sub_path":"annotation_utils-master/build/lib/annotation_utils/ndds/structs/objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":7611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"428893840","text":"import shelve\nfrom SQL import SQLighter\nfrom random import shuffle\nimport config\n\nstoragename=\"botStorage.db\"\n\ndef IsTest(chatid):\n with shelve.open(storagename) as storage:\n try:\n return storage[str(chatid)]!=None\n except KeyError:\n return False\n\ndef set_user_test(chatid,userid):\n\tli=SQLighter().getListQuestions()\n\twith shelve.open(storagename) as storage:\n\t\tstorage[str(chatid)]={\n \t\"user\":[userid,chatid],\n \"list\":li,\n \"quest\":li[0][1],\n \"correct\":li[0][2],\n \"current\":0,\n }\n\t\tdic={\n 'quest':li[0][1],\n 'list':li[0][2].split(\";\")+li[0][3].split(\";\")\n }\n\t\tshuffle(dic['list'])\n\t\treturn dic\n\ndef nextQuestion(chatid):\n with shelve.open(storagename) as storage:\n cur=storage[str(chatid)]['current']+1\n try:\n li=storage[str(chatid)]['list'][cur]\n \n dic={\n \"user\":storage[str(chatid)]['user'],\n \"list\":storage[str(chatid)]['list'],\n \"current\":cur,\n 'quest':li[1],\n 'correct':li[2]\n }\n storage[str(chatid)]=dic\n \n dic={\n 'quest':li[1],\n 'list':li[2].split(\";\")+li[3].split(\";\")\n }\n shuffle(dic['list'])\n return dic\n \n except IndexError:\n \n if getMember(storage[str(chatid)]['user'][0]):\n SQLighter().AddMember(storage[str(chatid)]['user'][0],storage[str(chatid)]['user'][1])\n finish_user_test(chatid)\n return {\n 'quest':\"Тест пройден!\",\n 'list': None\n } \n\ndef getMember(userid): \n if (config.bot.get_chat_member(config.groupId,userid).status == 'left'):\n return True\n else:\n return False\n\ndef finish_user_test(chatid):\n with shelve.open(storagename) as storage:\n del storage[str(chatid)]\n\ndef get_answer_for_user(chatid):\n with shelve.open(storagename) as storage:\n try:\n return storage[str(chatid)]['correct'].split(\";\")\n except KeyError:\n return None","sub_path":"Storage.py","file_name":"Storage.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"398462925","text":"from typing import Tuple\n\n\nclass FeatureSpec:\n def __init__(\n self,\n name: str,\n dtype: str,\n shape: Tuple[int, ...],\n categories: Tuple[str, ...] = None,\n ):\n self.name = name\n self.dtype = dtype\n self.shape = shape\n self.categories = categories\n\n def __repr__(self):\n return \"FeatureSpec(name={0}, dtype={1}, shape={2}\".format(\n self.name, self.dtype, self.shape\n ) + (\n \", categories={0})\".format(self.categories)\n if self.categories\n else \")\"\n )\n","sub_path":"sidekick/data_models.py","file_name":"data_models.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"566559159","text":"\"\"\"\n# Environment shared variables read from airflow variable config, provided by infrastracture\n# https://airflow.apache.org/docs/stable/concepts.html?highlight=variable#storing-variables-in-environment-variables\n# Variables set using Environment Variables would not appear in the Airflow UI but you will be able to use it in your DAG file\nAudit check:\n date: 21/04/2021\n\"\"\"\nfrom airflow.models import Variable\n\n# secrets name available in processing namespace\nC3_LANDSAT_INDEXING_USER_SECRET = Variable.get(\n \"c3_landsat_indexing_user_secret\", \"processing-aws-creds-c3-landsat\"\n) # qa\nC3_ALCHEMIST_SECRET = Variable.get(\n \"alchemist_c3_indexing_user_secret\", \"alchemist-c3-user-creds\"\n) # qa\nC3_BA_ALCHEMIST_SECRET = Variable.get(\n \"alchemist_s2_c3_nrt_user_creds\", \"alchemist-s2-c3-user-creds\"\n)\n\nSECRET_EXPLORER_WRITER_NAME = Variable.get(\n \"db_explorer_writer_secret\", \"explorer-writer\"\n) # qa\nSECRET_OWS_WRITER_NAME = Variable.get(\"db_ows_writer_secret\", \"ows-writer\") # qa\nSECRET_ODC_WRITER_NAME = Variable.get(\"db_odc_writer_secret\", \"odc-writer\") # qa\nSECRET_ODC_READER_NAME = Variable.get(\"db_odc_reader_secret\", \"odc-reader\") # qa\nSECRET_DBA_ADMIN_NAME = Variable.get(\"db_dba_admin_secret\", \"dba-admin\") # qa\n\nSECRET_ODC_ADMIN_NAME = Variable.get(\"db_odc_admin_secret\", default_var=\"odc-admin\")\n\nSECRET_EXPLORER_ADMIN_NAME = Variable.get(\n \"db_explorer_admin_secret\", default_var=\"explorer-admin\"\n)\n\nSECRET_OWS_ADMIN_NAME = Variable.get(\"db_ows_admin_secret\", default_var=\"ows-admin\")\n\nSECRET_EXPLORER_NCI_ADMIN_NAME = Variable.get(\n \"db_explorer_nci_admin_secret\", \"explorer-nci-admin\"\n) # qa\nSECRET_EXPLORER_NCI_WRITER_NAME = Variable.get(\n \"db_explorer_nci_writer_secret\", \"explorer-nci-writer\"\n) # qa\n\n# ARD\nS2_NRT_AWS_CREDS = \"wagl-nrt-aws-creds\"\nARD_NRT_LS_CREDS = \"ard-nrt-ls-aws-creds\"\nAWS_DEA_PUBLIC_DATA_UPLOAD_CREDS = \"sentinel-2-ard-sync-creds\"\n\n# DB config\nDB_DATABASE = Variable.get(\"db_database\", \"odc\") # qa\nDB_HOSTNAME = Variable.get(\"db_hostname\", \"db-writer\") # qa\nDB_READER_HOSTNAME = Variable.get(\"db_reader_hostname\", \"db-reader\") # qa\nDB_PORT = Variable.get(\"db_port\", \"5432\") # qa\n\nAWS_DEFAULT_REGION = Variable.get(\"region\", \"ap-southeast-2\") # qa\n\n# automated-reporting\nAWS_STORAGE_STATS_POD_COUNT = Variable.get(\n \"AWS_STORAGE_STATS_POD_COUNT\", default_var=\"30\"\n)\nREPORTING_DB_DEV_SECRET = Variable.get(\n \"reporting_db_dev_secret\", default_var=\"reporting-db-dev\"\n)\nREPORTING_IAM_NEMO_PROD_SECRET = Variable.get(\n \"reporting_iam_nemo_prod_secret\", default_var=\"reporting-iam-nemo-production\"\n)\nREPORTING_DB_SECRET = Variable.get(\"reporting_db_secret\", default_var=\"reporting-db\")\n\nSTATSD_HOST = Variable.get(\"statsd_host\", default_var=\"localhost\")\nSTATSD_PORT = Variable.get(\"statsd_port\", default_var=\"8125\")\n\n# dea-access\nDEA_ACCESS_RESTO_API_ADMIN_SECRET = Variable.get(\n \"dea_access_resto_api_admin_secret\", default_var=\"dea-access-resto-api-admin\"\n) # qa\n","sub_path":"dags/infra/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"634048167","text":"class Solution(object):\n \"\"\"\n :type candidates: List[int]\n :type target: int\n :rtype: List[List[int]]\n \"\"\"\n def combinationSum2(self, cans, t):\n if cans == None or len(cans) == 0 or t <= 0:\n return []\n \n res = []\n cans.sort()\n self.find(cans, t, 0, [], res)\n return res\n \n def find(self, cans, t, index, path, res):\n if t == 0:\n res.append(list(path))\n return\n \n pre = -1\n for i in range(index, len(cans), 1):\n if cans[i] > t:\n break\n if pre != -1 and pre == cans[i]:\n continue\n \n path.append(cans[i])\n self.find(cans, t-cans[i], i+1, path, res)\n del path[len(path)-1]\n \n pre = cans[i]\n ","sub_path":"Python/Combination Sum II.py","file_name":"Combination Sum II.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"455978186","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 7 12:11:34 2021\n\n@author: laramos\n\"\"\"\n\nimport os\nimport pandas as pd\n\n\nPATH_RESULTS = r\"\\\\amc.intra\\users\\L\\laramos\\home\\Desktop\\Postdoc eHealth\\Results\"\n#PATH_RESULTS = r\"\\\\amc.intra\\users\\L\\laramos\\home\\Desktop\\Postdoc eHealth\\Results\\Results_test_missing_goal\\drop_particpants_mising_goal\"\n#PATH_RESULTS = r\"\\\\amc.intra\\users\\L\\laramos\\home\\Desktop\\Postdoc eHealth\\Results\"\n\nprogram_list = ['Alcohol','Cannabis', 'Smoking']\n#program_list = ['Alcohol']\nfeature_type = 'safe'\nexperiment = 'exp3'\ndrop_afspraak = False\nthresh_corr = 0.8\nall_goal_phase = [4,5,6]\n#all_goal_phase = [6]\nmin_goaldays = 6\ntime_hours = 72\n\nfor goal_phase in all_goal_phase:\n for i,program in enumerate(program_list):\n path_read = os.path.join(PATH_RESULTS,program+str(time_hours)+\"_\"+feature_type+\"_min_days_\"+str(min_goaldays)+\"_min_phase_\"+str(goal_phase)+experiment)\n df = pd.read_excel(os.path.join(path_read,'results.xls'))\n df['Program'] = program\n if i==0:\n df_merge = df\n else:\n df_merge = pd.concat([df_merge,df])\n \n df_merge = df_merge[['Program','Methods', 'AUC 95% CI ', 'F1-Score', 'Sensitivity', 'Specificity','PPV', 'NPV']]\n df_merge.to_excel(os.path.join(PATH_RESULTS,str(time_hours)+\"_\"+feature_type+\"_min_days_\"+str(min_goaldays)+\"_min_phase_\"+str(goal_phase)+experiment+'merged.xls'))\n\n\n#%% this is for checking the Pr curve for the results\n\nimport pickle\nfrom sklearn.metrics import accuracy_score,precision_recall_curve\nimport matplotlib.pyplot as plt\nimport numpy as np\n\npath = r\"\\\\amc.intra\\users\\L\\laramos\\home\\Desktop\\Postdoc eHealth\\Results\\Final_manuscript\\Alcohol72_safe_min_days_7_min_phase_6exp3\\measures_RFC.pkl\"\n\n\n\nwith open(path, 'rb') as f: \n meas = pickle.load(f)\n\ny = np.concatenate(meas.labels, axis=0 )\nprobas = np.concatenate(meas.probas, axis=0 )\n \nprecision, recall, _ = precision_recall_curve(y,probas) \n\nno_skill = len(y[y==1]) / len(y)\n\nplt.figure(figsize=(10,7))\nplt.plot([0, 1], [no_skill, no_skill], linestyle='--', label='No Skill')\nplt.plot(recall, precision, marker='.', label='Logistic')\n\nplt.title('PR curve - Smoking')\nplt.xlabel('Recall')\nplt.ylabel('Precision')\n# show the legend\nplt.legend()\n# show the plot\nplt.show()\n\nfrom sklearn.metrics import roc_auc_score\n\n#%% calibration plots\n\nimport pickle\nimport os\nimport seaborn as sns\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n \n#program = 'Alcohol'\n#program = 'Cannabis'\nprogram = 'Smoking'\n\npath = os.path.join(r\"\\\\amc.intra\\users\\L\\laramos\\home\\Desktop\\Postdoc eHealth\\Results\",program+\"72_safe_min_days_7_min_phase_6exp3\\measures_RFC.pkl\")\n\nwith open(path, 'rb') as f:\n meas = pickle.load(f)\n \nprobas = np.concatenate(meas.probas, axis=0 ) \ny_test = np.concatenate(meas.labels, axis=0 ) \n\nfrom sklearn.calibration import calibration_curve\n\nfraction_of_positives, mean_predicted_value = \\\n calibration_curve(y_test, probas, n_bins=10)\n\nfig = plt.figure(1, figsize=(10, 10))\n \nax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)\n\n\nax1.plot([0, 1], [0, 1], \"k:\", label=\"Perfectly calibrated\")\n\nax1.plot(mean_predicted_value, fraction_of_positives, \"s-\")\n\npath = os.path.join(r\"\\\\amc.intra\\users\\L\\laramos\\home\\Desktop\\Postdoc eHealth\\Results\",program+\"72_safe_min_days_7_min_phase_6exp3\\measures_LR.pkl\")\n\nwith open(path, 'rb') as f:\n meas = pickle.load(f)\n \nprobas = np.concatenate(meas.probas, axis=0 ) \ny_test = np.concatenate(meas.labels, axis=0 ) \n\nfrom sklearn.calibration import calibration_curve\n\nfraction_of_positives, mean_predicted_value = \\\n calibration_curve(y_test, probas, n_bins=10)\n\n\nax1.plot(mean_predicted_value, fraction_of_positives, \"s-\")\n\n\nfrom sklearn import metrics\nfpr, tpr, thresholds = metrics.roc_curve(y_test, probas)\nprint(metrics.auc(fpr, tpr))\n\nauc = list()\nbrier = list()\nfor y_test,probas in zip(meas.labels,meas.probas):\n fpr, tpr, thresholds = metrics.roc_curve(y_test, probas)\n print(metrics.auc(fpr, tpr))\n auc.append(metrics.auc(fpr, tpr))\n brier.append(metrics.brier_score_loss(y_test, probas))\n \n \n#%% calibrating a classifier \nfrom sklearn.datasets import make_classification\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.calibration import CalibratedClassifierCV\n\n\ncalibrated_clf = CalibratedClassifierCV(base_estimator=grid_lr.clf, cv=3)\ncalibrated_clf.fit(X_test,y_test)\nprobas = calibrated_clf.predict_proba(X_test)\nprint(metrics.brier_score_loss(y_test, probas[:,0]))\n \n#%% YOuden index \nfrom methods import Mean_Confidence_Interval, find_optimal_cutoff\n\n\nprogram = 'Alcohol'\n\npath = os.path.join(r\"\\\\amc.intra\\users\\L\\laramos\\home\\Desktop\\Postdoc eHealth\\Results\",program+\"72_safe_min_days_7_min_phase_6exp3\\measures_RFC.pkl\")\n\nwith open(path, 'rb') as f:\n meas = pickle.load(f)\n \n\nsens = list()\nspec = list()\nppv = list()\nnpv = list()\ntn_l = list()\nfp_l = list()\nfn_l = list()\ntp_l = list()\n\n\nfor y_test,probas in zip(meas.labels,meas.probas):\n t = find_optimal_cutoff(y_test,probas)[0]\n preds = probas>=t\n \n tn, fp, fn, tp = metrics.confusion_matrix(y_test, preds).ravel() \n \n sens.append(tp/(tp+fn))\n spec.append(tn/(tn+fp))\n ppv.append(tp/(tp+fp))\n npv.append(tn/(tn+fn))\n \n tn_l.append(tn)\n fp_l.append(fp)\n fn_l.append(fn)\n tp_l.append(tp)\n \nprint(Mean_Confidence_Interval(sens)) \nprint(Mean_Confidence_Interval(spec))\nprint(Mean_Confidence_Interval(ppv)) \nprint(Mean_Confidence_Interval(npv))\n\n\narray = [[np.sum(tp_l),np.sum(fp_l)],[np.sum(fn_l),np.sum(tn_l)]]\ndf_cm = pd.DataFrame(array, index = [i for i in [\"Successful\",\"Early Dropout\"]],\n columns = [i for i in [\"Successful\",\"Early Dropout\"]])\nplt.figure(figsize = (11,8))\nsns.set(font_scale=2.0)\nplot = sns.heatmap(df_cm, annot=True,fmt = \".1f\",cmap=\"Blues\") \nif program=='Smoking':\n program = 'Tobacco'\nplot.set_title(str(\"Confusion matrix: \"+program))\n\nh,_ = os.path.split(path)\nplot.figure.savefig(os.path.join(h,'confusion_matrix.png'))","sub_path":"merge_results.py","file_name":"merge_results.py","file_ext":"py","file_size_in_byte":6081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"153881203","text":"#!/usr/bin/python\n\nimport sys\nfrom itertools import groupby\nfrom db import WCA_Database\n\n# Print table\ndef table(rank):\n sys.stdout=open(\"dnfstreak.txt\",\"w\")\n print('[spoiler=Longest streaks]')\n print('[table]')\n print('[tr][td][td]Name[/td][td]Streak[/td][/tr]')\n # If more than 100 people have an average, just take top 100\n for k in range(0, len(rank)):\n i = k+1\n for l in range(0,k):\n if rank[k][1] == rank[k-l][1]:\n i = k-l+1\n print('[tr][td]', i, '[/td][td]', rank[k][0],'[/td][td]', rank[k][1], '[/td][/tr]')\n if k > 100:\n break\n print('[/table]')\n print('[/spoiler]')\n sys.stdout.close()\n\n\ncur = WCA_Database.query(\"SELECT eventId, personName, value1, value2, value3, value4, value5 FROM Results WHERE eventId = '333fm'\")\n\nrows = cur.fetchall()\n\nresults = {}\nres = []\n\n# Get everyone's results\nfor row in rows:\n for i in ('value1', 'value2', 'value3', 'value4', 'value5'):\n if row[i] > 0 or row[i] == -1:\n results.setdefault(row['personName'], []).append(row[i])\n\n# Find longest success streak\nfor i in results:\n attempts = 0 # Count of all attempts\n solves = 0 # Count of all solved attempts\n count = 0 # Count of current solvestreak\n best = 0 # Count of best solvestreak\n sum = 0 # Sum of all solves of best mean\n mean = 0 # Calculated mean of best streak\n meanres = [] # Moves of each attempt of current streak\n safemeanres = [] # Moves of each attempt of best streak\n for k in range(0,len(results[i])):\n if results[i][k] == -1 and k != (len(results[i])-1):\n count = count + 1\n attempts = attempts + 1\n solves = solves + 1\n meanres.append(results[i][k])\n if results[i][k] > 0 and k != (len(results[i])-1):\n if count > best:\n best = count\n safemeanres = meanres\n meanres = []\n count = 0\n attempts = attempts + 1\n if k == (len(results[i])-1):\n if results[i][k] == -1 and count > best:\n best = count + 1\n solves = solves + 1\n meanres.append(results[i][k])\n safemeanres = meanres\n if results[i][k] > 0 and count > best:\n best = count\n safemeanres = meanres\n attempts = attempts + 1\n\n res.append((i, best))\n\n# Sort all results by longest streak\nsorted_x = sorted(res, key=lambda x:x[1], reverse=True)\n\n# Build table with results\ntable(sorted_x)\n","sub_path":"dnfstreak.py","file_name":"dnfstreak.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"240453142","text":"from django.contrib import admin\n# Register your models here.\nfrom rfweb.models import Burn, State, Test\n\nclass StateAdmin(admin.ModelAdmin):\n list_display = (\"Id\",\"Serial_Number\" ,\"Finish\",\"Remakes\")\nadmin.site.register(State,StateAdmin)\n\nclass BurnAdmin(admin.ModelAdmin):\n list_display = (\"Id\",\"Serial_Number\" ,\"TEST_SOFTWARE\", \"START_DATE\",\"START_TIME\",\"END_DATE\",\"END_TIME\",)\nadmin.site.register(Burn,BurnAdmin)\n\n\nclass TestAdmin(admin.ModelAdmin):\n list_display = (\"Id\",\"Serial_Number\" ,\"Test_Process\", \"Station_ID\",\"Test_Date_and_Time\",\"CAN_Node_ID\",\"UUT_Model\",\n \"Analogic_Test_Software\",\"Tested_by\" ,\"Operating_Frequency\",\"Power_Output\",\n \"Amplifier_Type\",\"Coldfire_version\" ,\"FPGA_software_version\", \"CIB_CPLD_software_version\",\n \"Product_Status\",\"Amplifier_revision\" ,\"Amplifier_SubType\", \"Manufacturer\",\"Amplifier_Sub_type\",\n \"UUT_field_Strength\",\"CIB_FPGA_software_version\" ,\"Body_Gain\", \"Body_Gain_Min\",\"Body_Gain_Max\",\n \"Body_Head_Diff\",\"Body_Head_Diff_Min\" ,\"Body_Head_Diff_Max\", \"Body_Test_Diff\",\"Head_Gain\",\"Test_Gain\",\n \"Head_I_D_LIM\",\"Head_PFWD_LIM\" ,\"Head_PRFL_LIM\", \"Head_PF_AVG_LIM\",\"Head_PR_AVG_LIM\",\"Body_I_D_LIM\",\n \"Body_PFWD_LIM\",\"Body_PRFL_LIM\" ,\"Body_PF_AVG_LIM\", \"Body_PR_AVG_LIM\",\"Body_JTR_FIN_LIM\",\"Body_JT_FIN_LIM\",\n \"Body_TMP_HOT_LIM\",\"DC_Voltages_Checked_OK\" ,\"Total_Test_Time\")\nadmin.site.register(Test,TestAdmin)\n","sub_path":"testweb2/rfweb/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"364720436","text":"import random\nfrom game.style import *\n\n\nclass Dealer:\n \"\"\"\n The Dealer Class represents the dealer in the High Low game.\n \"\"\"\n def __init__(self):\n \"\"\"Initializes the deck, current_card and last_card\"\"\"\n self.deck = list(range(1, 14))\n self.current_card = None\n self.last_card = None\n\n def draw_card(self):\n \"\"\"Gets a random card from the remaining cards & removes it from the deck.\n Args:\n self (Dealer): An instance of Dealer.\n \"\"\"\n card = self.deck.pop(self.deck.index(random.choice(self.deck)))\n print_white(f\"\\nDealer Drew The Card: {card}\")\n print_reset()\n # Sets last card to the current card before the current card it set to the card just drawn\n self.last_card = self.current_card\n # Sets the current card to the one just drawn\n self.current_card = card\n return card\n","sub_path":"hilo/game/dealer.py","file_name":"dealer.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"630717090","text":"speak_multiplier=10\n\ndef my_calculator(first_number, operator, second_number):\n if (not first_number.isnumeric()) or (not second_number.isnumeric()):\n print('You did not enter numbers. Try again!')\n elif not (operator == '+' or operator == '-' or operator == '*' or operator == '/'):\n print('You did not enter one of the operators. Try again!')\n else:\n if operator == '+':\n total_number = int(first_number)+int(second_number)\n elif operator == '-':\n total_number = int(first_number)-int(second_number)\n elif operator == '*':\n total_number = int(first_number)*int(second_number)\n elif operator == '/':\n if second_number == '0':\n print('You cannot divide by zero. Try again!') \n else:\n total_number = int(first_number)/int(second_number)\n return total_number\n ","sub_path":"ex_0411/project_modules/simple_math.py","file_name":"simple_math.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"357048185","text":"import unittest\nimport pytest\nfrom selenium import webdriver\nimport HtmlTestRunner\n\n@pytest.mark.usefixtures(\"test_setup\")\nclass MyTestCase(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.driver=webdriver.Chrome(executable_path=\"../drivers/chromedriver.exe\")\n cls.driver.implicitly_wait(10)\n cls.driver.maximize_window();\n\n def test_search1(self):\n self.driver.get(\"http://google.com\")\n self.driver.find_element_by_name(\"q\").send_keys(\"Automation step by step\")\n self.driver.find_element_by_xpath(\"//img[@id='hplogo']\").click()\n self.driver.find_element_by_xpath(\"(//input[@value='Google Search'])[2]\").click()\n x=self.driver.title\n print(x)\n self.assertEqual(x,\"Automation step by step - Google Search\")\n\n def test_search2(self):\n self.driver.get(\"http://google.com\")\n self.driver.find_element_by_name(\"q\").send_keys(\"manju\")\n self.driver.find_element_by_xpath(\"//img[@id='hplogo']\").click()\n self.driver.find_element_by_xpath(\"(//input[@value='Google Search'])[2]\").click()\n x = self.driver.title\n print(x)\n self.assertEqual(x, \"manju - Google Search\")\n\n @unittest.skip(\"This is a skipped test.\")\n def test_skip(self):\n \"\"\" This test should be skipped. \"\"\"\n\n\n @classmethod\n def tearDownClass(cls):\n cls.driver.close()\n cls.driver.quit()\n\n\nif __name__ == '__main__':\n unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(output='C:/Users/manjunathk/PycharmProjects/AutomatioFramework/reports'),verbosity=2)\n\n","sub_path":"tests/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"187661619","text":"from appium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom time import sleep\n\ndesired_caps = {'platformName': 'Android', # 平台名称\n 'platformVersion': '5.1', # 系统版本\n 'deviceName': '127.0.0.1:62001', # 设备名称\n 'appPackage': 'com.tal.kaoyan', # apk的包名\n 'appActivity': 'com.tal.kaoyan.ui.activity.SplashActivity' # activity名称\n }\n\ndriver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', desired_caps)\n\n# 强制等待\nsleep(2)\n# 隐式等待\ndriver.implicitly_wait(5)\n# 显式等待\nWebDriverWait(driver, 5).until(lambda x: x.find_element_by_id('android:id/button2'))\n\n# 屏幕快照\ndriver.save_screenshot('image1.png') # 与当前脚本同一路径\ndriver.get_screenshot_as_file('./image/image2.png') # 在image文件夹中\n\n# id定位\ndriver.find_element_by_id('android:id/button2').click() # 点击\"取消\"按钮\nsleep(1)\n\n# 滑动\n# swipe(start_x, start_y, end_x, end_y, duration) duration是间隔时间,单位ms\n# 1.水平滑动\n# 向左滑\nl = driver.get_window_size() # 获取界面尺寸\nx1 = l['width']*0.8\ny1 = l['height']*0.5\nx2 = l['width']*0.2\n\nfor i in range(3):\n driver.swipe(x1, y1, x2, y1, 500)\n sleep(1)\n\n# 向右滑\n# l = driver.get_window_size() # 获取界面尺寸\n# x1 = l['width']*0.15\n# y1 = l['height']*0.5\n# x2 = l['width']*0.85\n# driver.swipe(x1, y1, x2, y1, 500)\n\n# 2.垂直滑动\n# 向上滑\n# l = driver.get_window_size() # 获取界面尺寸\n# x1 = l['width']*0.5\n# y1 = l['height']*0.9\n# y2 = l['height']*0.1\n# driver.swipe(x1, y1, x1, y2, 500)\n\n# 向下滑\n# l = driver.get_window_size() # 获取界面尺寸\n# x1 = l['width']*0.5\n# y1 = l['height']*0.3\n# y2 = l['height']*0.7\n# driver.swipe(x1, y1, x1, y2, 500)\n\ndriver.find_element_by_id('com.tal.kaoyan:id/activity_splash_guidfinish').click() # 点击\"立即体验\"按钮\n\n# 登录\n# driver.find_element_by_id('com.tal.kaoyan:id/login_email_edittext').send_keys('ruima2020')\n# driver.find_element_by_id('com.tal.kaoyan:id/login_password_edittext').send_keys('1234ruima')\n# driver.find_element_by_id('com.tal.kaoyan:id/login_login_btn').click()\n#\n# driver.find_element_by_id('com.tal.kaoyan:id/view_wemedia_cacel').click() # 关闭广告\n#\n# driver.find_element_by_id('com.tal.kaoyan:id/date_task_layout').click() # 点击\"我知道了\"按钮\n\n# text定位 appium1.5废弃\n# driver.find_element_by_name('考研帮用户名/邮箱').send_keys('ruima2020') # InvalidSelectorException\n\n# class定位 有重复\n# driver.find_element_by_class_name('android.widget.EditText').send_keys('ruima2020')\n# driver.find_element_by_class_name('android.widget.EditText').send_keys('1234ruima')\n\n# 层级定位\n# 通过父节点的元素属性,减少子节点元素属性重复的可能性。\n# driver.find_element_by_id('com.tal.kaoyan:id/login_register_text').click() # 点击\"立即注册\"按钮\n# parent = driver.find_element_by_id('com.tal.kaoyan:id/activity_register_parentlayout') # 有id的父节点\n# parent.find_element_by_class_name('android.widget.ImageView').click() # 选中\"头像\"\n\n# list定位\n# 对一组属性完全相同的元素定位,可以先定位到这组元素上,再通过列表下标来定位其中的具体某个元素。\n# images = driver.find_elements_by_id('com.tal.kaoyan:id/item_image') # 定位一组图片元素\n# images[2].click() # 选中第3张图片\n# driver.find_element_by_id('com.tal.kaoyan:id/save').click() # 点击\"保存\"按钮\n\n# xpath定位\n# 绝对路径xpath执行效率较低,一般使用较少,通常使用xpath相对路径和属性定位。\n# xpath路径表达式 / 从根节点选取 //从匹配选择的当前节点选择文档中的节点,而不考虑它们的位置。\n# nodename 选取此节点的所有子节点 . 选取当前节点 .. 选取当前节点的父节点 @ 选取属性\n# xpath匹配符 * 匹配任何元素节点 @* 匹配任何元素节点 node()匹配任何类型的节点\n# text //*[@text='text文本属性']\n# id //*[@resource-id='id属性']\n# class //class属性 或者 //*[@class='class属性']\n# content-desc //*[@content-desc='desc文本']\n# contains模糊定位 //*[contains(@text, 'text文本')]\n# 定位一组元素效率突出 //*[contains(@content-desc, 'desc文本')]\n# \t //*[contains(@resource-id, 'id属性')]\n# //*[contains(@clsss, 'class属性')]\n# driver.find_element_by_xpath(\"//android.widget.EditText[@text='考研帮用户名/邮箱']\").send_keys('ruima2020')\n# driver.find_element_by_xpath(\"//*[@resource-id='com.tal.kaoyan:id/login_password_edittext']\").send_keys('1234ruima')\n# driver.find_element_by_xpath(\"//*[contains(@text, '登录')]\").click()\n# driver.find_element_by_xpath(\"//android.widget.Button[contains(@text, '登录')]\").click()\n# driver.find_element_by_xpath(\"//android.widget.Button\").click()\n\n# uiautomator定位\n# 通过android sdk自带的uiautomator库定位元素 这是android特有的定位方法\n# resource-id定位\ndriver.find_element_by_android_uiautomator\\\n ('new UiSelector().resourceId(\"com.tal.kaoyan:id/login_email_edittext\")').send_keys('ivancheny')\n# class定位\n# driver.find_element_by_android_uiautomator\\\n# ('new UiSelector().className(\"android.widget.EditText\")').send_keys('ruima2020')\ndriver.find_element_by_android_uiautomator\\\n ('new UiSelector().resourceId(\"com.tal.kaoyan:id/login_password_edittext\")').send_keys('hnnj19961012')\ndriver.find_element_by_android_uiautomator\\\n ('new UiSelector().resourceId(\"com.tal.kaoyan:id/login_login_btn\")').click()","sub_path":"2 Position.py","file_name":"2 Position.py","file_ext":"py","file_size_in_byte":5634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"403542352","text":"# -*- coding: utf-8 -*-\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.shortcuts import render,redirect,get_object_or_404\nfrom django.forms.models import model_to_dict\nfrom django.http import HttpResponse\nfrom cmdb.models import HostVirtual \nfrom django.core.context_processors import csrf\nfrom django.contrib.auth.decorators import login_required,permission_required\nfrom form import HostVirtualForm\nimport cmdb_log\nimport json\n\n@login_required\ndef virtual_get(request):\n if request.method == 'GET':\n objects = HostVirtual.objects.all().order_by('-id')\n context = {'objects':objects}\n return render(request,'host_virtual_list.html',context)\n\n@login_required\n@permission_required('cmdb.change_hostvirtual',raise_exception=True)\ndef virtual_add(request):\n if request.method == 'POST':\n form = HostVirtualForm(request.POST or None)\n if form.is_valid():\n form_data = form.cleaned_data\n data = {}\n for key in form_data:\n if form_data[key]:\n data[key] = form_data[key]\n i = HostVirtual(**data)\n i.save()\n cmdb_log.log_addition(request,i,i.HostName,data)\n return redirect('host_virtual_get')\n else:\n return HttpResponse(form.errors)\n else:\n form = HostVirtualForm()\n return render(request, 'host_virtual_add_form.html', {'form':form})\n\n@login_required\n@permission_required('cmdb.change_hostvirtual',raise_exception=True)\ndef virtual_edit(request,pk):\n object= get_object_or_404(HostVirtual,pk=pk)\n object_data = model_to_dict(object)\n objects = HostVirtual.objects.filter(pk=pk)\n form = HostVirtualForm(request.POST or None,instance=object)\n if form.is_valid():\n form_data = form.cleaned_data\n data = {}\n for key in form_data:\n if form_data[key]:\n data[key] = form_data[key]\n objects.update(**data)\n new_object= get_object_or_404(HostVirtual,pk=pk)\n new_object_data = model_to_dict(new_object)\n message = cmdb_log.cmp(new_object_data,object_data)\n cmdb_log.log_change(request,new_object,form_data['HostName'],message)\n return redirect('host_virtual_get')\n return render(request,'host_virtual_update_form.html', {'form':form})\n\n@login_required\n@permission_required('cmdb.change_hostvirtual',raise_exception=True)\ndef virtual_delete(request,pk):\n object= get_object_or_404(HostVirtual,pk=pk)\n object_data = object.__dict__.copy()\n if request.method=='POST':\n object.delete()\n cmdb_log.log_deletion(request,object,object.HostName,object_data)\n return redirect('host_virtual_get')\n return render(request,'host_virtual_confirm_delete.html', {'object':object})\n\n\n@login_required\ndef virtual_detail(request,pk):\n object = get_object_or_404(HostVirtual,pk=pk)\n return render(request,'host_virtual_detail.html', {'object':object})\n\n@login_required\ndef virtual_search(request):\n if request.method == 'GET':\n search_key = request.GET['search_key']\n search_value = request.GET['search_value']\n if search_value:\n params = { search_key+'__startswith':search_value}\n objects_list = HostVirtual.objects.filter(**params).order_by(search_key)\n else:\n objects_list = HostVirtual.objects.all().order_by(search_key)\n num = objects_list.count()\n paginator = Paginator(objects_list,15)\n page = request.GET.get('page')\n try:\n objects = paginator.page(page)\n except PageNotAnInteger:\n objects = paginator.page(1)\n except EmptyPage:\n objects = paginator.page(paginator.num_pages)\n context = {'objects':objects,'total':num,'search_key':search_key,'search_value':search_value}\n return render(request,'host_virtual_list.html',context)\n","sub_path":"cmdb/host_virtual_views.py","file_name":"host_virtual_views.py","file_ext":"py","file_size_in_byte":3925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"394255876","text":"from django.shortcuts import render, redirect\n\nfrom expenses_tracker.forms import ExpenseForm, ProfileForm\nfrom expenses_tracker.models import Profile, Expense\n\n\ndef calculate_budget(profile, expenses):\n return profile.budget - sum(expense.price for expense in expenses)\n\n\ndef homepage(request):\n profile = Profile.objects.all()[0]\n if profile:\n expenses = Expense.objects.all()\n profile.budget_left = calculate_budget(profile, expenses)\n context = {\n 'profile': profile,\n 'expenses': expenses,\n }\n return render(request, 'home-with-profile.html', context)\n profile_form = ProfileForm()\n context = {\n 'profile_form': profile_form,\n }\n return render(request, 'home-no-profile.html', context)\n\n\ndef expense_create(request):\n profile = Profile.objects.all()[0]\n if request.method == 'GET':\n expense_form = ExpenseForm()\n context = {\n 'expense_form': expense_form,\n }\n return render(request, 'expense-create.html', context)\n else:\n expense_form = ExpenseForm(request.POST)\n if expense_form.is_valid():\n title = expense_form.cleaned_data['title']\n image_url = expense_form.cleaned_data['image_url']\n description = expense_form.cleaned_data['description']\n price = expense_form.cleaned_data['price']\n expense = Expense(\n title=title,\n image_url=image_url,\n description=description,\n price=price\n )\n expense.save()\n return redirect(homepage)\n context = {\n 'expense_form': expense_form,\n }\n return render(request, 'expense-create.html', context)\n\n\ndef expense_edit(request, pk):\n expense = Expense.objects.get(pk=pk)\n if request.method == 'GET':\n expense_form = ExpenseForm()\n context = {\n 'expense': expense,\n 'expense_form': expense_form,\n }\n return render(request, 'expense-edit.html', context)\n else:\n expense_form = ExpenseForm(request.POST)\n if expense_form.is_valid():\n expense.title = expense_form.cleaned_data['title']\n expense.description = expense_form.cleaned_data['description']\n expense.image_url = expense_form.cleaned_data['image_url']\n expense.price = expense_form.cleaned_data['price']\n expense.save()\n return redirect('home page')\n context = {\n 'expense': expense,\n 'expense_form': expense_form,\n }\n return render(request, 'expense-edit.html', context)\n\n\ndef expense_delete(request, pk):\n expense = Expense.objects.get(pk=pk)\n if request.method == 'GET':\n context = {\n 'expense': expense,\n }\n return render(request, 'expense-delete.html', context)\n else:\n expense.delete()\n return redirect(homepage)\n\n\ndef profile(request):\n if request.method == 'GET':\n profile = Profile.objects.all()[0]\n expenses = Expense.objects.all()\n profile.budget_left = calculate_budget(profile, expenses)\n context = {\n 'profile': profile,\n }\n return render(request, 'profile.html', context)\n else:\n profile_form = ProfileForm(request.POST)\n if profile_form.is_valid():\n budget = profile_form.cleaned_data['budget']\n first_name = profile_form.cleaned_data['first_name']\n last_name = profile_form.cleaned_data['last_name']\n profile = Profile(budget=budget, first_name=first_name, last_name=last_name)\n profile.save()\n return redirect('home page')\n\n\ndef profile_edit(request):\n profile = Profile.objects.all()[0]\n if request.method == 'GET':\n profile_form = ProfileForm()\n context = {\n 'profile': profile,\n 'profile_form': profile_form,\n }\n return render(request, 'profile-edit.html', context)\n else:\n profile_form = ProfileForm(request.POST)\n if profile_form.is_valid():\n profile.budget = profile_form.cleaned_data['budget']\n profile.first_name = profile_form.cleaned_data['first_name']\n profile.last_name = profile_form.cleaned_data['last_name']\n profile.save()\n return redirect('profile page')\n context = {\n 'profile': profile,\n 'profile_form': profile_form,\n }\n return render(request, 'profile-edit.html', context)\n\n\ndef profile_delete(request):\n if request.method == 'GET':\n return render(request, 'profile-delete.html')\n else:\n profile = Profile.objects.all()[0]\n [expense.delete() for expense in Expense.objects.all()]\n profile.delete()\n return render(request, 'home-no-profile.html')\n","sub_path":"python_web_basics/exam_prep/expenses_tracker/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"53389510","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom django.contrib import auth\nfrom django.contrib import admindocs\nfrom django.conf import settings\n\nurlpatterns = patterns('',\n\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$' , 'Hoteles.views.principal'),\n url(r'^alojamiento/(\\d+)/idioma', 'Hoteles.views.idioma'),\n url(r'^actividad/css/(?P.*)$', 'django.views.static.serve', {'document_root' : settings.STATIC_URL2}),\n url(r'^alojamiento/(\\d+)', 'Hoteles.views.alojamiento'),\n url(r'^(\\d+)/xml', 'Hoteles.views.user_xml'),\n url(r'(\\d+)', 'Hoteles.views.usuario'),\n url(r'^alojamientos$', 'Hoteles.views.alojamientos'),\n url(r'^about$', 'Hoteles.views.about'),\n url(r'^hoteles_filt$', 'Hoteles.views.hoteles_filt'),\n url(r'^login$','django.contrib.auth.views.login'),\n url(r'^logout$','django.contrib.auth.views.logout'),\n url(r'^accounts/profile/', 'Hoteles.views.principal'),\n url(r'^anadir_css', 'Hoteles.views.anadir_css'),\n url(r'^css/(?P.*)$', 'django.views.static.serve', {'document_root' : settings.STATIC_URL2}),\n\n\n)\n","sub_path":"Practica_Hoteles/Practica_Hoteles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"188051871","text":"from tmdbv3api import Movie\nfrom tmdbv3api import TMDb\nimport redis\n\n# Opening a Connection to Redis\ncache = redis.Redis(\n host='127.0.0.1',\n port= 6379\n )\n\n# Return the output if the input is found in the database\n# Return None otherwise\ndef in_cache(key):\n value = cache.get(key)\n return value\n\n# Add a new set of input/output to the database\n# Default expire time of 1 week\ndef cache_set(key, value, expire=604800):\n print (\"caching ---THIS--- {} ---TO--- {}\".format(key, value))\n cache.set(key, value)\n cache.expire(key, expire)\n\ntmdb = TMDb()\ntmdb.api_key = '59bead7577721bcf9f8e685bf64f7060'\n\nmovie = Movie()\n\ndef closest_name(name):\n cache_result = in_cache('cn-' + name)\n if cache_result:\n splt = cache_result.decode('utf-8').split('-')\n return [(splt[0], int(splt[1]))]\n else:\n lst = movie.search(name)\n if len(lst) == 0:\n return (\"No movie found!\", 0)\n res = lst[0].title+'-'+str(lst[0].id)\n cache_set('cn-' + name, res)\n ret = [(lst[0].title, lst[0].id)]\n return ret\n\n\ndef get_details(id_):\n cache_result = in_cache('gde-'+str(id_))\n if cache_result:\n result = cache_result.decode('utf-8').split('|')\n ret = {\n 'title': result[0],\n 'overview': result[1],\n 'poster': result[2],\n 'id': id_\n }\n return ret\n else:\n try:\n result = movie.details(id_)\n ret = {\n 'title': result.title,\n 'overview': result.overview,\n 'poster': 'https://image.tmdb.org/t/p/w500' + result.poster_path,\n 'id': id_\n }\n\n res = [result.title, result.overview, 'https://image.tmdb.org/t/p/w500' + result.poster_path]\n except AttributeError:\n ret = {\n 'title': 'NotFound',\n 'overview': 'NotFound',\n 'poster': 'NotFound',\n 'id': id_\n }\n res = ['NotFound', 'NotFound', 'NotFound']\n cache_set('gde-'+str(id_), '|'.join(res))\n return ret\n\n\ndef poster_link(name, size=500):\n id_ = closest_name(name)[0][1]\n poster = get_details(id_)['poster']\n return poster\n\n\ndef get_description(name):\n id_ = closest_name(name)[0][1]\n overview = get_details(id_)['overview']\n return overview\n\n\ndef get_description_by_id(id_):\n return get_details(id_)['overview']\n\n\ndef get_title_by_id(id_):\n return get_details(id_)['title']\n\n\ndef get_rec(id_):\n cache_result = in_cache('gr-' + str(id_))\n if cache_result:\n splt = cache_result.decode('utf-8').split(',')\n return [int(x) for x in splt]\n else:\n recommendations = movie.recommendations(movie_id=id_)\n ret = [m.id for m in recommendations]\n cache_set('gr-'+str(id_), str(ret)[1:-1])\n return ret\n\n\ndef rank_rec(movies, num=5):\n ret = {}\n for m in movies:\n recommendations = get_rec(m[0])\n for r in recommendations:\n if r in ret:\n ret[r] += m[1]\n else:\n ret[r] = m[1]\n\n ret = sorted(ret.items(), key=lambda x: x[1], reverse=True)\n return [get_details(x[0]) for x in ret[:num]]\n\n\n\n","sub_path":"src/movie.py","file_name":"movie.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"524771997","text":"import numpy as np\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport random\n\n\nexample1 = [[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3], [4, 5], [4,6], [4,7], [5,6], [5,7], [6,7], [3,4]]\nexample1 = [[str(edge[0]), str(edge[1])] for edge in example1]\n\nexample2 = [[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3], [4, 5], [4,6], [4,7], [5,6], [5,7], [6,7]]\nexample2 = [[str(edge[0]), str(edge[1])] for edge in example2]\n\nexample3 = [[0, 1], [0, 2], [0, 3], [0, 4], [5, 6], [5, 7], [5, 8], [5, 9], [0, 5]]\nexample3 = [[str(edge[0]), str(edge[1])] for edge in example3]\n\n\ndef find_distances(graph):\n N = nx.number_of_nodes(graph)\n\n spl = nx.shortest_path_length(graph)\n\n dist = np.zeros(shape=(N, N), dtype=np.int)\n\n for p in spl:\n for target in p[1]:\n dist[int(p[0]), int(target)] = p[1][target]\n\n return dist\n\n\ndef find_neighbors(g):\n N = g.number_of_nodes()\n nb_list = [[] for _ in range(N)]\n\n for node in g.nodes():\n for nb in nx.neighbors(g, node):\n if int(nb) not in nb_list[int(node)]:\n nb_list[int(node)].append(int(nb))\n for nb_nb in nx.neighbors(g, nb):\n if int(nb_nb) not in nb_list[int(node)]:\n nb_list[int(node)].append(int(nb_nb))\n\n\n return nb_list\n\ndef grad(g, E, nb_list, node, dist):\n N = g.number_of_nodes()\n\n var = 1.0\n\n grad_sum = 0.0\n for v in range(N):\n for u in nb_list[v]:\n if node == v:\n grad_sum += +(E[u, :] - E[node, :]) / var\n\n if node == u:\n grad_sum += -(E[node, :] - E[v, :]) / var\n\n return grad_sum\n\n\ndef compute_score(g, E, nb_list, dist):\n\n N = g.number_of_nodes()\n\n var = 1.0\n #*float(dist[v, u])*float(dist[v, u])\n score = 0.0\n for v in range(N):\n for u in nb_list[v]:\n score += -np.dot(E[u, :] - E[v, :], E[u, :] - E[v, :]) / (var*2.0)\n\n return score\n\ndef draw_points(E, name=\"\", g=None, base=False):\n if E.shape[1] != 2:\n raise ValueError(\"Dim must be 2\")\n\n\n if name == \"Karate\":\n groundTruth = [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1]\n plt.figure()\n for inx in range(g.number_of_nodes()):\n if groundTruth[inx] == 0:\n plt.plot(E[inx, 0], E[inx, 1], 'r.')\n if groundTruth[inx] == 1:\n plt.plot(E[inx, 0], E[inx, 1], 'b.')\n plt.show()\n\n else:\n plt.figure()\n plt.plot(T[:4, 0], T[:4, 1], 'b.')\n plt.plot(T[4:, 0], T[4:, 1], 'r.')\n plt.show()\n\n\ndef run(g, dim, num_of_iters, eta):\n N = nx.number_of_nodes(g)\n\n # Initialize parameters\n E = np.random.normal(size=(N, dim))\n\n\n nb_list = find_neighbors(g)\n\n #dist = find_distances(g)\n dist = []\n\n for iter in range(num_of_iters):\n #if iter % 10 == 0:\n # draw_points(E, \"Karate\", g, base=True)\n for node in range(N):\n\n node_grad_E = grad(g, E, nb_list, node, dist)\n\n E[node, :] += eta * node_grad_E\n\n score = compute_score(g, E, nb_list, dist)\n print(\"Iter: {} Score {}\".format(iter, score))\n\n return E\n\n\nedges = example1\n#g = nx.Graph()\n#g.add_edges_from(edges)\ng = nx.read_gml(\"../datasets/citeseer.gml\")\n\n\nE = run(g, dim=128, num_of_iters=5000, eta=0.001)\nnp.save(\"./numpy_files/citeseer_gaussian_v5\", E)\n#draw_points(E, \"Karate\", g)","sub_path":"comm/gaussian_comm_v5.py","file_name":"gaussian_comm_v5.py","file_ext":"py","file_size_in_byte":3461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"628057346","text":"'''Trains a simple convnet on the MNIST dataset.\nGets to 99.25% test accuracy after 12 epochs\n(there is still a lot of margin for parameter tuning).\n16 seconds per epoch on a GRID K520 GPU.\n'''\n\nfrom __future__ import print_function\nimport keras\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras import backend as K\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.preprocessing import image\nimport numpy as np\nimport backend as B\nimport os\nimport math\n\nbatch_size = 512\nepochs = 100\nstepsPE = 10\n\n# input image dimensions\niDim = (192, 108)\n\nX_dir = 'data/imgs'\nY_dir = 'data/data'\n\nX_loc = os.listdir(X_dir)\n\nx = np.zeros([len(X_loc),iDim[0], iDim[1],1])\n\nfor i in range(len(x)):\n theImg = f'{X_dir}/{X_loc[i]}'\n\n g = B.Resize([theImg, theImg.replace('.','_t.')], iDim)\n imp = image.img_to_array(g)\n x[i] = imp.reshape(iDim[0], iDim[1],1)\n\nload = Y_dir + '/labels.txt'\n\nwith open(load, 'r') as f:\n store = f.read()[:-1].split('\\n')\n\ny = np.array(store).reshape(len(store),1)\n\n# print(y.shape, y)\n\ninput_shape = (iDim[0], iDim[1], 1)\n\nx = x.astype('float32')\nx /= 255\nprint('x_train shape:', x.shape)\nprint('y_train shape:', y.shape)\nprint(x.shape[0], 'train samples')\n\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(3, 3),\n activation='relu',\n input_shape=input_shape))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(512, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(1, activation='linear'))\n\nmodel.compile(loss=keras.losses.mean_squared_error,\n optimizer=keras.optimizers.Adadelta())\n\nmodel.fit(x, y,\n steps_per_epoch=stepsPE,\n epochs=epochs,\n verbose=1,\n shuffle=True)\n \n# score = model.evaluate(x, y, verbose=0)\n\nmodel.save('model.h5')\n# print('Test loss:', score[0])\n# print('Test accuracy:', score[1])\n\np = model.predict(x)\n\nprint(\"Pred: \", p)","sub_path":"Regual Machine Learning/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"111207916","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\nimport codecs\n\nfrom pykafka import KafkaClient\n\nclient = KafkaClient(hosts=\"192.168.239.130:9092\")\n\nretopic=client.topics[\"retest1\"]\n\nrefilename=\"auth.json\"\n\ndef produce_kafka_file(filename,topic):\n with topic.get_sync_producer() as producer:\n with codecs.open(filename,\"r\") as rf:\n for line in rf:\n line = line.strip()\n if not line:\n continue\n producer.produce(line)\n\n\nproduce_kafka_file(refilename,retopic)","sub_path":"demo/kafka/producer_demo02.py","file_name":"producer_demo02.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"514335339","text":"from PIL import Image\nimport os\nimport glob\nfrom tqdm import tqdm\nimport pdb\nimport numpy as np\nimport sys\nsys.path.append(\"../\")\nfrom facenet_pytorch import MTCNN, InceptionResnetV1\nimport pandas as pd\nimport re\nfrom facenet_pytorch.models.mtcnn import prewhiten\nfrom torchvision.transforms import functional as F\n\nrunQuery = sys.argv[1]\n# Get data path\ndata_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), 'data/')\n\n# Check if datapath exists\nif not os.path.isdir(data_path):\n raise Exception(\"X0X: Path Not Found\")\n\n# Get test and val data paths\n# Currrently only test and val images are being used\nceleb_path = os.path.join(data_path, 'img_align_celeba')\n# Get all image filenames from test and val folders\nceleb_img_filenames = os.listdir(celeb_path)\n\nceleb_img_filenames = [os.path.join(celeb_path, l) for l in celeb_img_filenames if not l.startswith(\"._\")]\nquery_path = os.path.join(data_path, 'celebTest')\n# Get all image filenames from test and val folders\nquery_img_filenames = os.listdir(query_path)\n\nquery_img_filenames = [os.path.join(query_path, l) for l in query_img_filenames if not l.startswith(\"._\")]\nprint(\"Num of images: \", len(celeb_img_filenames))\nprint(\"Num of images: \", len(query_img_filenames))\n\n\nresnet = InceptionResnetV1(pretrained='vggface2').eval()\nimage_size = 160\nid_ = 0\nfeatures = []\nif runQuery == 'False':\n # Get the ground truth bounding boxes\n with open('imageListCeleb.txt','w') as myfile:\n for im_path in tqdm(celeb_img_filenames):\n img = Image.open(im_path)\n img_name = re.findall(r\"[\\w']+\", os.path.basename(im_path))[0]\n img_resized = img.resize((image_size, image_size), 2)\n\n img_resized = F.to_tensor(np.float32(img_resized))\n img_proc = prewhiten(img_resized)\n img_embedding = resnet(img_proc.unsqueeze(0)).cpu().data.numpy()\n features.append(img_embedding)\n myfile.writelines(im_path+'\\n')\n id_ += 1\n if id_ > 60000:\n break\n np.save(\"faceFeaturesCeleb.npy\", np.concatenate(features, axis=0))\nelse:\n # Get the ground truth bounding boxes\n with open('imageListCelebProbe.txt','w') as myfile:\n for im_path in tqdm(query_img_filenames):\n img = Image.open(im_path)\n img_name = re.findall(r\"[\\w']+\", os.path.basename(im_path))[0]\n img_resized = img.resize((image_size, image_size), 2)\n\n img_resized = F.to_tensor(np.float32(img_resized))\n img_proc = prewhiten(img_resized)\n img_embedding = resnet(img_proc.unsqueeze(0)).cpu().data.numpy()\n features.append(img_embedding)\n myfile.writelines(im_path+'\\n')\n id_ += 1\n if id_ > 60000:\n break\n np.save(\"faceFeaturesCelebProbe.npy\", np.concatenate(features, axis=0))\n","sub_path":"Person Matching Code/code/Celeb/run_celeb.py","file_name":"run_celeb.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"257538154","text":"# -*- coding: utf-8 -*-\nimport json\n\nfrom django.core import serializers\nfrom django.core.cache import cache\nfrom django.shortcuts import resolve_url, redirect\n\njson_serializer = serializers.get_serializer(\"json\")()\nfrom django.shortcuts import render, get_object_or_404, HttpResponse\nfrom ..models import Library, District\nfrom ..settings import PARTICIPANTS_SHOW_ORG_TYPES\n\n\ndef make_library_dict(library):\n return {\n 'id': library.id,\n 'code': library.code,\n 'name': library.name,\n 'postal_address': getattr(library, 'postal_address', u\"не указан\"),\n 'phone': getattr(library, 'phone', u\"не указан\"),\n 'plans': getattr(library, 'plans', u\"не указано\"),\n 'http_service': getattr(library, 'http_service', u\"не указан\"),\n 'latitude': library.latitude,\n 'longitude': library.longitude,\n }\n\n\ndef index(request):\n cbs_list = Library.objects.filter(parent=None, hidden=False, org_type__in=PARTICIPANTS_SHOW_ORG_TYPES).order_by(\n 'weight')\n js_orgs = []\n for org in cbs_list:\n js_orgs.append(make_library_dict(org))\n\n js_orgs = json.dumps(js_orgs, encoding='utf-8', ensure_ascii=False)\n return render(request, 'participants/frontend/cbs_list.html', {\n 'cbs_list': cbs_list,\n 'js_orgs': js_orgs\n })\n\n\ndef branches(request, code=None):\n if request.method == \"POST\":\n code = request.POST.get('code', None)\n library = None\n if code:\n library = get_object_or_404(Library, code=code)\n libraries = Library.objects.filter(parent=library, hidden=False).order_by('weight')\n\n js_orgs = []\n for org in libraries:\n js_orgs.append(make_library_dict(org))\n\n js_orgs = json.dumps(js_orgs, encoding='utf-8', ensure_ascii=False)\n\n if request.is_ajax():\n return HttpResponse(js_orgs)\n\n return render(request, 'participants/frontend/branch_list.html', {\n 'library': library,\n 'libraries': libraries,\n 'js_orgs': js_orgs\n })\n\n\ndef detail(request, code):\n library = get_object_or_404(Library, code=code)\n return redirect('participant_site:frontend:index', library_code=library.code)\n js_orgs = []\n js_orgs.append(make_library_dict(library))\n\n js_orgs = json.dumps(js_orgs, encoding='utf-8', ensure_ascii=False)\n\n return render(request, 'participants/frontend/detail.html', {\n 'library': library,\n 'js_orgs': js_orgs\n })\n\n\ndef get_district_letters(request):\n letters_dict = {}\n districts = District.objects.all()\n for district in districts:\n name = district.name.lower().replace(' ', '')\n if name.startswith(u'г.'):\n letter = name.replace(u'г.', '').strip()[0:1].upper()\n else:\n letter = name[0:1].upper()\n exist_districts = letters_dict.get(letter)\n if exist_districts is None:\n exist_districts = []\n letters_dict[letter] = exist_districts\n exist_districts.append({\n 'id': district.id,\n 'name': district.name\n })\n\n letters = []\n for letter, districts in sorted(letters_dict.items()):\n letters.append({\n 'name': letter,\n 'districts': districts\n })\n return HttpResponse(\n json.dumps(letters, ensure_ascii=False),\n content_type='application/json; charset=utf-8'\n )\n\n\ndef filter_by_districts(request):\n lat = float(request.GET.get('lat', 0))\n lon = float(request.GET.get('lon', 0))\n if lat and lon:\n return geo_nearest(request)\n\n district_id = request.GET.get('districtId', '')\n\n if not district_id:\n return HttpResponse(\n json.dumps({\n 'error': u'Не указан район'\n }, ensure_ascii=False),\n content_type='application/json; charset=utf-8',\n status=400)\n\n districts = District.objects.filter(id=district_id)\n fields = ('id', 'code', 'name', 'latitude', 'longitude', 'postal_address')\n libraries = list(\n Library.objects.filter(district__in=districts, hidden=False, org_type__in=PARTICIPANTS_SHOW_ORG_TYPES)\n .exclude(parent=None).order_by('-republican').order_by('name').values(*fields)\n )\n\n geo_libraries = []\n for library in libraries:\n latitude = library.get('latitude', 0)\n longitude = library.get('longitude', 0)\n if not latitude or not longitude:\n continue\n geo_libraries.append({\n 'library': library,\n # 'distance': geodistance(lat, lon, latitude, longitude),\n 'href': resolve_url('participants:frontend:detail', code=library.get('code'))\n })\n\n # geo_libraries.sort(key=lambda item: item.get('distance'))\n\n result = {\n 'count': len(geo_libraries),\n 'object_list': geo_libraries,\n\n }\n return HttpResponse(json.dumps(result, ensure_ascii=False), content_type='application/json')\n\n\ndef geosearch(request):\n return render(request, 'participants/frontend/geosearch.html')\n\n\ndef geo_nearest(request):\n page = int(request.GET.get('page', 1))\n lat = float(request.GET.get('lat', 0))\n lon = float(request.GET.get('lon', 0))\n fields = ('id', 'code', 'name', 'latitude', 'longitude', 'postal_address')\n cache_key = 'geo_libs'\n cached_libraies = cache.get(cache_key)\n\n if not cached_libraies:\n libraries = list(\n Library.objects.filter(hidden=False, org_type__in=PARTICIPANTS_SHOW_ORG_TYPES)\n .exclude(parent=None).values(*fields)\n )\n cached_libraies = json.dumps(libraries).encode('zlib')\n cache.set(cache_key, cached_libraies, timeout=60)\n\n libraries = json.loads(cached_libraies.decode('zlib'))\n\n geo_libraries = []\n for library in libraries:\n latitude = library.get('latitude', 0)\n longitude = library.get('longitude', 0)\n\n if not latitude or not longitude:\n continue\n geo_libraries.append({\n 'library': library,\n 'distance': geodistance(lat, lon, latitude, longitude),\n 'href': resolve_url('participants:frontend:detail', code=library.get('code'))\n })\n\n geo_libraries.sort(key=lambda item: item.get('distance'))\n\n per_page = 10\n # objects_page = get_page(request, geo_libraries, per_page)\n offset = (page - 1) * per_page\n\n result = {\n 'page': page,\n 'per_page': per_page,\n 'count': len(geo_libraries),\n 'object_list': geo_libraries[offset:per_page],\n\n }\n return HttpResponse(json.dumps(result, ensure_ascii=False), content_type='application/json')\n #\n # return render(request, 'participants/frontend/nearest_results.html', {\n # 'objects': objects_page.paginator.object_list[offset::per_page]\n # })\n\n\nimport math\n\n\ndef geodistance(lat1, lon1, lat2, lon2, unit='K'):\n rlat1 = math.pi * float(lat1) / 180.0\n rlat2 = math.pi * float(lat2) / 180.0\n theta = lon1 - lon2\n rtheta = math.pi * theta / 180.0\n dist = math.sin(rlat1) * math.sin(rlat2) + math.cos(rlat1) * math.cos(rlat2) * math.cos(rtheta)\n dist = math.acos(dist)\n dist = dist * 180 / math.pi\n dist = dist * 60 * 1.1515\n if unit == \"K\":\n dist = dist * 1.609344\n else:\n dist = dist * 0.8684\n return dist\n\n# def geodistance(lat1, lon1, lat2, lon2):\n# lat1 = math.radians(lat1)\n# lon1 = math.radians(lon1)\n# lat2 = math.radians(lat2)\n# lon2 = math.radians(lon2)\n#\n# dlon = lon1 - lon2\n#\n# EARTH_R = 6372.8\n#\n# y = math.sqrt(\n# (math.cos(lat2) * math.sin(dlon)) ** 2\n# + (math.cos(lat1) * math.sin(lat2) - math.sin(lat1) * math.cos(lat2) * math.cos(dlon)) ** 2\n# )\n# x = math.sin(lat1) * math.sin(lat2) + math.cos(lat1) * math.cos(lat2) * math.cos(dlon)\n# c = math.atan2(y, x)\n# return EARTH_R * c\n","sub_path":"libcms/apps/participants/frontend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"423256305","text":"from django.conf.urls import patterns, include, url\n\nfrom django.conf import settings\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n url(r'^$','evenlik.views.index'),\n url(r'^confirm/(?P\\S+)/$','evenlik.views.confirm'),\n url(r'^admin/$','evenlik.admin.subscribers'),\n url(r'^admin/checkin/$','evenlik.admin.checkin'),\n url(r'^login/$','evenlik.admin.login'),\n url(r'^admin/editor/$','evenlik.admin.edit_event'),\n url(r'^admin/mail/$','evenlik.admin.massMail'),\n url(r'^admin/logout/$','evenlik.admin.logout'),\n url(r\"^certificate/(?P\\S+)/$\", 'evenlik.admin.generatePDF'),\n # url(r'^$', 'localServer.views.home', name='home'),\n # url(r'^localServer/', include('localServer.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n # url(r'^admin/', include(admin.site.urls)),\n url(r\"^media/(?P.*)$\", 'django.views.static.serve',{'document_root' : settings.MEDIA_ROOT, 'show_indexes' : True}),\n)\n","sub_path":"localServer/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"612490148","text":"import numpy as np\nimport pandas as pd\nfrom PIL import Image\nimport os\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.utils.data as data\n\n#------------------------------------------------------------------------------#\nbatch_size = 200\n\n#------------------------------------------------------------------------------#\nn_points = 20000\nsigma = 0.5\n\npoints = np.zeros((n_points,2))\ntarget = np.zeros((n_points,1))\nfor k in range(n_points):\n\trandom = np.random.rand()\n\tif random<0.25:\n\t\tcenter = np.array([0,0])\n\t\ttarget[k,0] = 0\n\telif random<0.5:\n\t\tcenter = np.array([2,2])\n\t\ttarget[k,0] = 1\n\telif random<0.75:\n\t\tcenter = np.array([2,0])\n\t\ttarget[k,0] = 2\n\telse:\n\t\tcenter = np.array([0,2])\n\t\ttarget[k,0] = 3\n\tnoise = np.random.randn(1,2)\n\tpoints[k,:] = center + sigma*noise\n\npoints_and_label = np.concatenate((points,target),axis=1)\npoints_and_label = pd.DataFrame(points_and_label)\npoints_and_label.to_csv('./data/clas.csv',index=False)\n\n#------------------------------------------------------------------------------#\nclass mypoints(data.Dataset):\n\tdef __init__(self, filename):\n\t\tpddata = pd.read_csv(filename).values\n\t\tself.target = pddata[:,2:]\n\t\tself.data = pddata[:,0:2]\n\t\tself.n_data = self.data.shape[0]\n\t\n\tdef __len__(self):\n\t\treturn self.n_data\n\t\n\tdef __getitem__(self, index):\n\t\treturn torch.Tensor(self.data[index]), torch.Tensor(self.target[index])\n\nmydata = mypoints('./data/clas.csv')\n\nmyloader = data.DataLoader(mydata,batch_size=batch_size,num_workers=0)\n\n#------------------------------------------------------------------------------#\nclass mymodel(nn.Module):\n\tdef __init__(self,n_in=2,n_out=4):\n\t\tsuper(mymodel,self).__init__()\n\t\tself.n_in = n_in\n\t\tself.n_out = n_out\n\t\t\n\t\tself.linear = nn.Linear(self.n_in,self.n_out,bias=True)\n\t\tself.prob = nn.LogSoftmax(dim=1)\n\t\n\tdef forward(self,x):\n\t\tx = self.linear(x)\n\t\tx = self.prob(x)\n\t\treturn x\n\n#------------------------------------------------------------------------------#\nmodel = mymodel()\n\noptimizer = torch.optim.Adam(model.parameters(),lr=0.1)\ncriterium = nn.NLLLoss()\n\n#------------------------------------------------------------------------------#\nfor iter, (data, target) in enumerate(myloader):\n\tdata = Variable(data,requires_grad=False)\n\ttarget = Variable(target.long(),requires_grad=False)\n\t\n\toptimizer.zero_grad()\n\tpred = model(data)\n\tloss = criterium(pred,target.view(-1))\n\tloss.backward()\n\toptimizer.step()\n\t\n\tif iter%10==0:\n\t\tprint(loss.item())\n\n#------------------------------------------------------------------------------#\ntarget = target.numpy()\npoints = data.numpy()\n\nselect = target[:,0]==0\np0 = points[select,:]\nplt.scatter(p0[:,0],p0[:,1],facecolors='b')\n\nselect = target[:,0]==1\np0 = points[select,:]\nplt.scatter(p0[:,0],p0[:,1],facecolors='g')\n\nselect = target[:,0]==2\np0 = points[select,:]\nplt.scatter(p0[:,0],p0[:,1],facecolors='tab:orange')\n\nselect = target[:,0]==3\np0 = points[select,:]\nplt.scatter(p0[:,0],p0[:,1],facecolors='r')\n\n\n\npred = pred.exp().detach()\n_, index = torch.max(pred,1)\npred = pred.numpy()\nindex = index.numpy()\n\nselect = index==0\np0 = points[select,:]\nplt.scatter(p0[:,0],p0[:,1],s=60,marker='s',edgecolors='b',facecolors='none')\n\nselect = index==1\np0 = points[select,:]\nplt.scatter(p0[:,0],p0[:,1],s=60,marker='s',edgecolors='g',facecolors='none')\n\nselect = index==2\np0 = points[select,:]\nplt.scatter(p0[:,0],p0[:,1],s=60,marker='s',edgecolors='tab:orange',facecolors='none')\n\nselect = index==3\np0 = points[select,:]\nplt.scatter(p0[:,0],p0[:,1],s=60,marker='s',edgecolors='r',facecolors='none')\n\n\nplt.show()\n\n\n\n","sub_path":"2-4classes_classifier_with_CPU.py","file_name":"2-4classes_classifier_with_CPU.py","file_ext":"py","file_size_in_byte":3603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"156115398","text":"import requests\nimport textwrap\nimport os\nfrom bs4 import BeautifulSoup\n\n# =========================================================\n# This terminal application scrapes the daily news from the\n# Daily Wire. We can see top articles, most recent articles\n# and hopefully top trends in the future\n# =========================================================\n\nclass DailyWire:\n \"\"\" This class is responsible for scraping the Daily Wire website\n \"\"\"\n\n def __init__( self ):\n \"\"\" Start with automatically getting the content of the main page\n \"\"\"\n self.result = requests.get( \"https://www.dailywire.com\" )\n self.content = self.result.content\n self.soup = BeautifulSoup( self.content, \"html.parser\" )\n\n\n def scrape_top_headlines( self ):\n \"\"\" A method that grabs the current top headlines. Should be 5-6 headlines\n \"\"\"\n top_headlines = self.soup.find_all( \"h2\" )\n top_headlines_list = []\n counter = 1\n\n print( \"\\n\\nTOP HEADLINES:\\n\\n\" )\n\n for title in top_headlines:\n # Need to avoid getting ads with this method\n if title.text != \"Get This eBookFREE!\" and title.text != \"We're taking a whole new approach.\":\n # Getting the extended url link for the article\n headline_extended_link = title.a.get( 'href' )\n stripped_text = title.text.strip( '\\n' )\n top_headlines_list.append( stripped_text )\n print( \"{}. {}\\n( {} )\\n\".format( counter, stripped_text, \"https://www.dailywire.com\" + headline_extended_link ) )\n counter += 1\n\n\n def scrape_front_page_headlines( self ):\n \"\"\" A method that grabs rest of the headlines prior to clicking the 'load more' button\n \"\"\"\n headlines = self.soup.find_all( \"h3\" )\n headlines_list = []\n counter = 1\n\n print( \"\\n\\nOTHER HEADLINES:\\n\\n\" )\n\n for title in headlines:\n # Need to avoid getting ads with this method\n if title.text != \"Make the web’s best conservative commentary even better\":\n # Getting the extended url link for the article\n headline_extended_link = title.a.get( 'href' )\n stripped_text = title.text.strip( '\\n' )\n headlines_list.append( stripped_text )\n print( \"{}. {}\\n( {} )\\n\".format( counter, stripped_text, \"https://www.dailywire.com\" + headline_extended_link ) )\n counter += 1\n \n\n def scrape_article_title_and_content( self ):\n \"\"\" A method that gets a specific article's content and stores the content into a single string variable\n \"\"\"\n\n # Let the user enter a URL of a Daily Wire article to scrape\n print( \"\\nPlease enter the URL of an article you wish to scrape\\n\" )\n article_to_scrape = input( \"> \" )\n\n # Try to get a result from the url, otherwise print that an error occurred\n try:\n result = requests.get( article_to_scrape )\n content = result.content\n soup = BeautifulSoup( content, \"html.parser\" )\n\n # Get the article's title\n article_title_string = \"\"\n # article_title = soup.select( \".page-title\" )\n article_title = soup.h1.getText().upper()\n article_title_string = article_title\n # print( \"\\n\\n\\n{}\\n\\n\\n\".format( article_title ) )\n\n # Grabbing the article by a div class and then traversing down to the p tag children to grab the content\n article_content_string = \"\"\n article_content = soup.select( \".field-body > p\" )\n index = 0\n for row in article_content:\n article_content_string += article_content[index].text + \" \"\n index += 1\n\n # Using the module textwrap in order to limit character column width to make the content more legible\n print( \"\\n\\n--------------------------------------------------------------------------------\\nTITLE:\\n\\n{}\\n\\n\\nCONTENT:\\n\\n{}\\n--------------------------------------------------------------------------------\".format( textwrap.fill( article_title_string, width=80 ), textwrap.fill( article_content_string, width=80 ) ) )\n except:\n print( \"\\n\\nAn error has occurred. Please try again.\")\n\n\ndef daily_wire_menu():\n \"\"\" The main menu for the terminal application\n \"\"\"\n \n # Instantiate the Daily Wire class\n dailywire = DailyWire()\n while True:\n \n print( \"\\n\\n\\n-------------------\\n| DAILY WIRE MENU |\\n-------------------\\n\\n1) View the top headlines\\n2) View the other front page headlines\\n3) View a specific article\\n4) Clear the terminal screen\\n5) Exit the program\\n\\n\" )\n user_option_choice = input( \"> \" )\n\n # Menu options\n if int( user_option_choice ) == 1:\n dailywire.scrape_top_headlines()\n elif int( user_option_choice ) == 2:\n dailywire.scrape_front_page_headlines()\n elif int( user_option_choice ) == 3:\n dailywire.scrape_article_title_and_content()\n elif int( user_option_choice ) == 4:\n os.system('cls' if os.name == 'nt' else 'clear')\n elif int( user_option_choice ) == 5:\n break\n else:\n print( \"\\nPLEASE ENTER A VALID VALUE\\n\" )\n\n\nif __name__ == \"__main__\":\n daily_wire_menu()\n","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":5399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"301189238","text":"import os\nimport json\nfrom pprint import pprint\nfrom parser import webtotrain \nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\n\nall_unseen = \"0\"\n\n# Cost of correction, in seconds, per word\ncost = {\n \"not_gt_uncorrectable\": 16,\n \"gt_correctable\": 5,\n \"gt_uncorrectable\": 5,\n \"not_gt_correctable\": 5\n}\n\n# Cost of human per day\nhuman_cost = 350 # Per working day\nworking_hours = 6 # Hours working a day\nhuman_cost_per_hour = human_cost/working_hours\n\ndef cost_seconds(save):\n new = {\n \"not_gt_uncorrectable\": save[\"uncorrectable\"] - \\\n save[\"ocr_equals_gt\"][\"uncorrectable\"] ,\n \"gt_correctable\" : save[\"ocr_equals_gt\"][\"correctable\"],\n \"not_gt_correctable\": save[\"correctable\"] - save[\"ocr_equals_gt\"][\"correctable\"],\n \"gt_uncorrectable\": save[\"ocr_equals_gt\"][\"uncorrectable\"]\n }\n\n total_cost = 0\n for key in cost.keys():\n total_cost += cost[key] * new[key]\n return total_cost\n\n\ndef final_error(save):\n return save[\"real_word_error\"]/save[\"total\"]\n\ndef tohours(seconds):\n return seconds/3600;\n\ndef to_money_units(hours):\n return human_cost_per_hour * hours\n\n\nfmap = {}\nfmap['hi'] = dict(map(lambda x: x.strip().split('_'), open(\"hi.fmap\")))\nfmap['ml'] = dict(map(lambda x: x.strip().split('_'), open(\"ml.fmap\")))\n\ndef extract_bcode(save):\n bname = save[\"book_dir\"].split('/')[-2]\n return bname\n\ndef get_total_words(save):\n td = save[all_unseen][\"unincluded\"]\n return td[\"total\"]\n\ndef get_xs_ys(save):\n page_count = int(save[\"pages\"])\n batchSize = 10\n results = []\n review_cost = 0\n for pages_included in range(0, page_count, batchSize):\n key = str(pages_included)\n pages_unincluded = page_count - pages_included\n \"\"\"\n total_cost_seconds = cost_seconds(save[key][\"unincluded\"]) + review_cost\n #+ cost_seconds(save[key][\"included\"]) # Part of review\n\n #total_cost_rupees = to_money_units(tohours(total_cost_seconds))\n cost_per_page = total_seconds/pages_unincluded\n review_cost += cost_per_page * 10\n \"\"\"\n cost_seconds_per_page= cost_seconds(save[key][\"unincluded\"])/pages_unincluded\n total_cost_seconds = review_cost + cost_seconds_per_page*pages_unincluded\n total_cost_rupees = to_money_units(tohours(total_cost_seconds))\n results.append((pages_included,total_cost_rupees))\n review_cost += cost_seconds_per_page * 10\n return list(zip(*results))\n\nprint('Lang,Book,Cost,Error,Word')\nfor lang in ['hi', 'ml']:\n saves = []\n for dr, drs, fls in os.walk('output/%s'%(lang)):\n for fn in fls:\n fn_with_path = dr + '/' + fn\n saves.append(json.load(open(fn_with_path)))\n\n # words = list(map(get_total_words, saves))\n plt.figure(figsize=(20,10))\n for save in saves:\n xs, ys = get_xs_ys(save)\n bname = fmap[lang][extract_bcode(save)]\n plt.plot(xs, ys, label=bname)\n label_str = \"%s, %s\"%(bname[:5], save[\"pages\"])\n plt.text(xs[-1], ys[-1], label_str)\n\n plt.xlabel(\"no of pages included in dictionary\")\n plt.ylabel(\"estimated cost for entire book\")\n plt.savefig('output/images/cost-projected-%s.png'%(lang), dpi=300)\n plt.clf()\n\n\n","sub_path":"src/experiments/new_cost_analysis.py","file_name":"new_cost_analysis.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"285350173","text":"\"\"\"\nEnforces common logging conventions and code reuse.\n\nOriginal Issue = DC-637\n\nThe intent of this module is to allow other modules to setup logging easily without\nduplicating code.\n\"\"\"\n\n# Python imports\nimport logging\nimport os\nfrom datetime import datetime\n\n\ndef generate_paths(log_filepath_list):\n \"\"\"\n Generates filepaths from the list of passed filepaths\n\n :param log_filepath_list: desired string path and or name of the log file\n example: ['path/', 'faked.log', 'path/fake.log']\n \"\"\"\n default_file_name = datetime.now().strftime('curation%Y%m%d_%H%M%S.log')\n\n # iterates through log_filepath_list and sets path to\n # provided path and default_file_name\n default_output_log_path = [\n os.path.join(filepath, default_file_name)\n for filepath in log_filepath_list\n if 'log' not in os.path.basename(filepath)\n ]\n\n # iterates through log_filepath_list and sets path to default path\n # if just filename was item passed in log_filepath_list\n default_path = 'logs/'\n default_log_path = [\n os.path.join(default_path, filename)\n for filename in log_filepath_list\n if not os.path.dirname(filename)\n ]\n\n # iterates through log_filepath_list and sets path to log files\n output_log_path = [\n os.path.join(filepath)\n for filepath in log_filepath_list\n if os.path.dirname(filepath) and 'log' in os.path.basename(filepath)\n ]\n\n # appends all generated filepaths to list of log_paths\n log_path = default_output_log_path + default_log_path + output_log_path\n\n # if any path in log_path list doesn't exist, it will be created\n for path in log_path:\n if not os.path.isdir(path):\n try:\n os.makedirs(os.path.dirname(path))\n except OSError:\n # directory already exists\n pass\n\n return log_path\n\n\ndef create_logger(filename, console_logging=False):\n \"\"\"\n Sets up python logging to file\n\n :param filename: name of the log file\n :param console_logging: if False will only create FileHandler, if True\n will create both FileHandler and StreamHandler\n \"\"\"\n # gets new logger, will be created if doesn't exist\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.DEBUG)\n\n # formatters for both FileHandler and StreamHandler\n file_formatter = logging.Formatter(\n fmt='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%(asctime)s')\n stream_formatter = logging.Formatter(\n '%(levelname)s - %(name)s - %(message)s')\n\n file_handler = logging.FileHandler(filename)\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(file_formatter)\n logger.addHandler(file_handler)\n\n if console_logging is True:\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(logging.INFO)\n stream_handler.setFormatter(stream_formatter)\n logger.addHandler(stream_handler)\n\n return logging.getLogger(filename)\n\n\ndef setup_logger(log_filepath_list, console_logging=True):\n \"\"\"\n Sets up python logging to file and console for use in other modules.\n\n :param log_filepath_list: desired string path and or name of the log file\n example: ['path/', 'faked.log', 'path/fake.log']\n :param console_logging: determines if log is output to desired and/or default file\n and console, or just the desired and/or default file\n \"\"\"\n\n log_path = generate_paths(log_filepath_list)\n\n log_list = []\n\n for filename in log_path:\n log_list.append(create_logger(filename, console_logging))\n return log_list\n","sub_path":"data_steward/utils/pipeline_logging.py","file_name":"pipeline_logging.py","file_ext":"py","file_size_in_byte":3732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"407530704","text":"import requests\nimport json\nimport config\n\nBASE_URL = 'https://api.github.com'\nLink_URL = 'https://gist.github.com'\n\nusername = 'wittionary'\ngist_id = 'd1bca6d9263a6255724d2e9de5512cf4'\n\nheader = { 'X-Github-Username': '%s' % username,\n 'Content-Type': 'application/json',\n 'Authorization': 'token %s' % config.api_token,\n }\n\nurl = \"/gists/%s\" % gist_id\nr = requests.get('%s%s' % (BASE_URL, url), headers=header)\n\nprint(r.json())","sub_path":"gist-api2.py","file_name":"gist-api2.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"650005907","text":"def reverse_graph(g):\n result = {}\n for node in g:\n for nb, w in g[node]:\n if nb not in result:\n result[nb] = [[node, w]]\n else:\n result[nb].append([node, w])\n return result\nmemo = {}\ndef SP(graph, snode, enode):\n rev_graph = reverse_graph(graph)\n return d(rev_graph, snode, enode)\n\ndef d(g, s, v):\n if v in memo:\n return memo[v]\n elif s == v:\n return 0\n else:\n ans = min([d(g, s, u) + w for u, w in g[v]])\n memo[v] = ans\n return ans\n","sub_path":"Algorithms/Dynamic_Programming/Shortest_Path/SP.py","file_name":"SP.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"356164167","text":"import os\nimport numpy as np\nimport matplotlib\nmatplotlib.use('PS')\nimport matplotlib.pyplot as plt\nimport math\nimport re\nimport scipy.linalg as spla\nfrom scipy import interpolate\nfrom scipy import linalg\n\ndef input_file(file_path,matrix):\n with open(file_path, 'r') as f_1:\n data = f_1.readlines()\n temp_1 = re.findall(r\"[-+]?\\d+\\.?\\d*\",data[0])\n subspace_dimension = int(temp_1[0])\n for loop1 in range (0, subspace_dimension):\n temp_2 = re.findall(r\"[-+]?\\d+\\.?\\d*\",data[1+loop1])\n matrix[loop1,:] = temp_2[:]\n# print(loop1)\n# print(matrix[loop1,:])\n#LECs = [200,-91.85]\nmagic_no = 2\n\n\n######################################################\n######################################################\n### read LECs set from file\n######################################################\n######################################################\ndef read_LEC(file_path):\n LEC = np.zeros(LEC_num)\n with open(file_path,'r') as f_1:\n count = len(open(file_path,'rU').readlines())\n data = f_1.readlines()\n wtf = re.match('#', 'abc',flags=0)\n for loop1 in range(0,count):\n if ( re.search('cE and cD', data[loop1],flags=0) != wtf):\n temp_1 = re.findall(r\"[-+]?\\d+\\.?\\d*\",data[loop1+1])\n LEC[0] = float(temp_1[0])\n LEC[1] = float(temp_1[1])\n if ( re.search('LEC ci', data[loop1],flags=0) != wtf):\n temp_1 = re.findall(r\"[-+]?\\d+\\.?\\d*\",data[loop1+1])\n LEC[2] = float(temp_1[0])\n LEC[3] = float(temp_1[1])\n LEC[4] = float(temp_1[2])\n LEC[5] = float(temp_1[3])\n if ( re.search('c1s0 & c3s1', data[loop1],flags=0) != wtf):\n temp_1 = re.findall(r\"[-+]?\\d+\\.?\\d*\",data[loop1+1])\n LEC[6] = float(temp_1[0])\n LEC[7] = float(temp_1[1])\n LEC[8] = float(temp_1[2])\n LEC[9] = float(temp_1[3])\n if ( re.search('cnlo', data[loop1],flags=0) != wtf):\n temp_1 = re.findall(r\"[-+]?\\d+\\.?\\d*\",data[loop1+1])\n LEC[10] = float(temp_1[0])\n LEC[11] = float(temp_1[1])\n LEC[12] = float(temp_1[2])\n LEC[13] = float(temp_1[3])\n LEC[14] = float(temp_1[4])\n LEC[15] = float(temp_1[5])\n LEC[16] = float(temp_1[6])\n return LEC\n\ndef read_LEC_2(file_path):\n LEC = np.zeros(LEC_num)\n with open(file_path,'r') as f_1:\n count = len(open(file_path,'rU').readlines())\n data = f_1.readlines()\n wtf = re.match('#', 'abc',flags=0)\n for loop1 in range(0,count):\n if ( re.search('cD,cE', data[loop1],flags=0) != wtf):\n temp_1 = re.findall(r\"[-+]?\\d+\\.?\\d*\",data[loop1])\n LEC[0] = float(temp_1[0])\n LEC[1] = float(temp_1[1])\n if ( re.search('LEC=', data[loop1],flags=0) != wtf):\n temp_1 = re.findall(r\"[-+]?\\d+\\.?\\d*\",data[loop1])\n LEC[2] = float(temp_1[0])\n LEC[3] = float(temp_1[1])\n LEC[4] = float(temp_1[2])\n LEC[5] = float(temp_1[3])\n if ( re.search('c1s0, c3s1', data[loop1],flags=0) != wtf):\n temp_1 = re.findall(r\"[-+]?\\d+\\.?\\d*\",data[loop1])\n LEC[6] = float(temp_1[4])\n LEC[7] = float(temp_1[5])\n LEC[8] = float(temp_1[6])\n LEC[9] = float(temp_1[7])\n if ( re.search('cnlo_pw', data[loop1],flags=0) != wtf):\n temp_1 = re.findall(r\"[-+]?\\d+\\.?\\d*\",data[loop1])\n LEC[10] = float(temp_1[2])\n LEC[11] = float(temp_1[3])\n LEC[12] = float(temp_1[4])\n LEC[13] = float(temp_1[5])\n LEC[14] = float(temp_1[6])\n LEC[15] = float(temp_1[7])\n LEC[16] = float(temp_1[8])\n return LEC\n\ndef read_sm_vec(vec_num,vec_dimension,sm_vec,database_dir):\n for loop1 in range(vec_num):\n file_path = database_dir + str(loop1+1)+\"_sm.txt\"\n with open(file_path,'r') as f_1:\n count = len(open(file_path,'rU').readlines())\n data = f_1.readlines()\n wtf = re.match('#', 'abc',flags=0)\n for loop2 in range(0,vec_dimension):\n temp_1 = re.findall(r\"[-+]?\\d+\\.?\\d*\",data[loop2])\n sm_vec[loop1][loop2] = float(temp_1[0])\n\n######################################################\n######################################################\n### generate infile for solve_general_EV\n######################################################\n######################################################\ndef generate_ccm_in_file(file_path,vec_input,particle_num,matter_type,density,nmax,cal_type):\n with open(file_path,'w') as f_1:\n f_1.write('!Chiral order for Deltas(LO = 0,NLO=2,NNLO=3,N3LO=4) and cutoff'+'\\n')\n f_1.write('3, 450\\n')\n f_1.write('! cE and cD 3nf parameters:'+'\\n' )\n f_1.write('%.12f, %.12f\\n' % (vec_input[0],vec_input[1]))\n f_1.write('! LEC ci \\n')\n f_1.write('%.12f, %.12f, %.12f, %.12f \\n' % (vec_input[2],vec_input[3],vec_input[4],vec_input[5]))\n f_1.write('!c1s0 & c3s1 \\n')\n f_1.write('%.12f, %.12f, %.12f, %.12f, %.12f, %.12f \\n' % (vec_input[6],vec_input[7],vec_input[8],vec_input[9],vec_input[9],vec_input[9]))\n f_1.write('! cnlo(7) \\n')\n f_1.write('%.12f, %.12f, %.12f, %.12f, %.12f, %.12f, %.12f \\n' % (vec_input[10],vec_input[11],vec_input[12],vec_input[13],vec_input[14],vec_input[15],vec_input[16]))\n f_1.write('! number of particles'+'\\n')\n f_1.write('%d\\n' % (particle_num) )\n f_1.write('! specify: pnm/snm, input type: density/kfermi'+'\\n')\n f_1.write(matter_type+', density'+'\\n')\n f_1.write('! specify boundary conditions (PBC/TABC/TABCsp/subspace_cal/subspace_cal_dens/solve_general_EV)'+'\\n')\n f_1.write('%s\\n' %(cal_type) )\n f_1.write('! dens/kf, ntwist, nmax'+'\\n')\n f_1.write('%.12f, 1, %d\\n' % (density, nmax))\n f_1.write('! specify cluster approximation: CCD, CCDT'+'\\n')\n f_1.write('CCD(T)'+'\\n')\n f_1.write('! tnf switch (T/F) and specify 3nf approximation: 0=tnf0b, 1=tnf1b, 2=tnf2b'+'\\n')\n f_1.write('T, 3'+'\\n')\n f_1.write('! 3nf cutoff(MeV),non-local reg. exp'+'\\n')\n f_1.write('450, 3'+'\\n')\n\n\n\n######################################################\n######################################################\n### call solve_general_EV \n######################################################\n######################################################\ndef call_solve_general_EV(vec_input,in_dir,out_dir):\n neutron_num = 2 #test\n particle_num = 28\n density = 0.16\n density_min = 0.14\n density_max = 0.22\n nmax = 1 #test\n\n generate_ccm_in_file(in_dir,vec_input,neutron_num,'pnm',density,nmax,'solve_general_EV_sm')\n os.system('./'+nucl_matt_exe+' '+in_dir+' > '+out_dir)\n\n\n\n######################################################\n######################################################\n### print H matrix for individual LEC\n######################################################\n######################################################\ndef print_LEC_matrix(out_dir,subspace_dimension,matrix):\n with open(out_dir,'w') as f_1:\n f_1.write(matrix)\n # for loop1 in range (subspace_dimension):\n # f_1.write(matrix[loop1,:]+'\\n')\n\n\n######################################################\n######################################################\n### sm calculation with different LECs\n######################################################\n######################################################\ndef sm_calculation(vec_input,in_dir,out_dir):\n neutron_num = 2 #test\n particle_num = 28\n density = 0.16\n density_min = 0.14\n density_max = 0.22\n nmax = 1 #test\n generate_ccm_in_file(in_dir,vec_input,neutron_num,'pnm',density,nmax,'sm')\n os.system('./'+nucl_matt_exe+' '+in_dir+' > '+out_dir)\n\n with open('sm_result.txt','r') as f_1:\n data = f_1.readlines()\n wtf = re.match('#', 'abc',flags=0)\n temp_1 = re.findall(r\"[-+]?\\d+\\.?\\d*\",data[0])\n sm_cal = float(temp_1[0])\n return sm_cal\n\n\n######################################################\n######################################################\n### generate emulator_matrix\n######################################################\n######################################################\ndef generate_emulator_matrix(subspace_dimension):\n C_matrix = np.zeros((subspace_dimension,subspace_dimension))\n N_matrix = np.zeros((subspace_dimension,subspace_dimension))\n H_matrix = np.zeros((subspace_dimension,subspace_dimension))\n LEC_all_matrix = np.zeros((LEC_num,subspace_dimension,subspace_dimension))\n\n LEC = np.zeros(LEC_num)\n call_solve_general_EV(LEC,\"ccm_in_test\",\"a.out\")\n N_matrix = np.loadtxt(\"N_matrix_sm.txt\")\n H_matrix = np.loadtxt(\"H_matrix_sm.txt\")\n #out_dir = \"./N_matrix_sm.txt\"\n #np.savetxt(out_dir,N_matrix)\n \n C_matrix = H_matrix\n out_dir = \"./C_matrix_sm.txt\"\n np.savetxt(out_dir,C_matrix)\n\n for loop1 in range(LEC_num):\n LEC = np.zeros(LEC_num)\n LEC[loop1] = 1 \n call_solve_general_EV(LEC,\"ccm_in_test\",\"a.out\")\n H_matrix = np.loadtxt(\"H_matrix_sm.txt\")\n #K_matrix = np.loadtxt(\"K_matrix_sm.txt\")\n LEC_all_matrix[loop1,:,:] = H_matrix + - C_matrix\n out_dir = \"./emulator/LEC_\"+str(loop1+1)+\"_matrix_sm\"\n np.savetxt(out_dir,LEC_all_matrix[loop1,:,:])\n\n\n\n######################################################\n######################################################\n### solve_general_EV_sm!!!\n######################################################\n######################################################\ndef solve_general_EV_sm(LEC_target,database_dir):\n H = np.zeros((subspace_dimension,subspace_dimension))\n N = np.zeros((subspace_dimension,subspace_dimension))\n # C = np.zeros((subspace_dimension,subspace_dimension))\n H_matrix = np.zeros((LEC_num,subspace_dimension,subspace_dimension))\n N = np.loadtxt(database_dir+\"N_matrix_sm.txt\")\n H = np.loadtxt(database_dir+\"H_matrix_sm.txt\")\n\n #subtract = [4,30, 56 ]\n subtract = []\n H = np.delete(H,subtract,axis = 0)\n H = np.delete(H,subtract,axis = 1) \n N = np.delete(N,subtract,axis = 0)\n N = np.delete(N,subtract,axis = 1) \n\n eigvals,eigvec = spla.eig(N)\n print (\"N eigvals = \"+str(sorted(eigvals)))\n \n #np.set_printoptions(suppress=True)\n #np.set_printoptions(precision=6) \n # np.savetxt('H.test',H,fmt='%.01f')\n# np.savetxt('H.test',H)\n\n # test \n# N_new = np.zeros((subspace_dimension-2,subspace_dimension-2))\n# H_new = np.zeros((subspace_dimension-2,subspace_dimension-2))\n#\n# for loop1 in range(subspace_dimension-2):\n# for loop2 in range(subspace_dimension-2):\n# loop3 = loop1\n# loop4 = loop2\n# if (loop3 >= 13):\n# loop3 = loop1+1\n# if (loop4 >= 13):\n# loop4 = loop2+1\n# if (loop3 >= 33):\n# loop3 = loop1+1\n# if (loop4 >= 33):\n# loop4 = loop2+1\n# N_new[loop1,loop2] = N[loop3,loop4]\n#\n# for loop1 in range(subspace_dimension-2):\n# for loop2 in range(subspace_dimension-2):\n# loop3 = loop1\n# loop4 = loop2\n# if (loop3 >= 13):\n# loop3 = loop1+1\n# if (loop4 >= 13):\n# loop4 = loop2+1\n# if (loop3 >= 33):\n# loop3 = loop1+1\n# if (loop4 >= 33):\n# loop4 = loop2+1\n# H_new[loop1,loop2] = H[loop3,loop4]\n##\n##\n# np.savetxt(\"./N_new.txt\",N)\n# np.savetxt(\"./H_new.txt\",H)\n# print(\"H=\"+str(H))\n# print(\"rank of N =\"+str(np.linalg.matrix_rank(N)))\n #Ni = N.I\n #print (N)\n #Ni = np.linalg.inv(N)\n #\n #print (np.dot(Ni,N_matrix))\n #print (Ni*N_matrix)\n \n #Ni_dot_H = np.dot(Ni,H)\n #D,V = np.linalg.eig(Ni_dot_H)\n #print (Ni_dot_H)\n #print (\"D=\"+str(D))\n #print (\"V=\"+str(V))\n print('H',np.size(H,1)) \n eigvals,eigvec_L, eigvec_0 = spla.eig(H,N,left =True,right=True)\n print('eigvalsize,', eigvals.shape) \n loop2 = 0\n for loop1 in range(np.size(H,1)):\n ev = eigvals[loop1] \n if ev.imag != 0:\n continue\n # if ev.real < 0:\n # continue\n loop2 = loop2+1\n \n ev_all = np.zeros(loop2)\n loop2 = 0\n for loop1 in range(np.size(H,1)):\n ev = eigvals[loop1] \n if ev.imag != 0:\n continue\n # if ev.real < 0:\n # continue\n ev_all[loop2] = ev.real\n loop2 = loop2+1\n \n \n ev_sorted = sorted(ev_all)\n print('eigvals='+str (ev_sorted))\n #print('eigvec_L='+str (eigvec_L))\n #print('eigvec_0='+str (eigvec_0))\n print(eigvec_L[np.where(eigvals==ev_sorted[0])]) \n \n print('eigvals_gs='+str (ev_sorted[0]))\n \n \n \n #D,V = np.linalg.eig(H_matrix)\n #print (\"D=\"+str(D))\n #print(np.linalg.matrix_rank(N_matrix))\n #print(np.linalg.matrix_rank(H_matrix))\n \n \n #print(N_matrix)\n #print(H_matrix) \n\ndef test_1():\n vec_num =64\n vec_dimension = 53\n sm_vec = np.zeros((vec_num,vec_dimension))\n LEC = read_LEC(\"ccm_in_DNNLO450\")\n #LEC_14th = read_LEC_2(\"/home/slime/work/Eigenvector_continuation/CCM_kspace_deltafull/test/backup/DNNLOgo450_test_sm_vs_ccd_nmax1_n_2/14_sm.txt\") \n #print(\"LEC_14th\"+str(LEC_14th))\n sm_calculation(LEC,\"ccm_in_test\",\"a.out\")\n H_1 = np.loadtxt(\"H_temp_real.txt\")\n \n print (\"H= \"+str(H_1.shape))\n vec_num = 64\n read_sm_vec(vec_num,vec_dimension,sm_vec,\"/home/slime/work/Eigenvector_continuation/CCM_kspace_deltafull/test/backup/DNNLOgo450_test_sm_vs_ccd_nmax1_n_2/\")\n\n #eigvals,eigvec_L, eigvec_0 = spla.eig(H,N,left =True,right=True)\n eigvals, eigvec = spla.eig(H_1)\n\n loop2 = 0\n for loop1 in range(np.size(H_1,1)):\n ev = eigvals[loop1]\n if ev.imag != 0:\n continue\n loop2 = loop2+1\n \n ev_all = np.zeros(loop2)\n loop2 = 0\n for loop1 in range(np.size(H_1,1)):\n ev = eigvals[loop1] \n if ev.imag != 0:\n continue\n # if ev.real < 0:\n # continue\n ev_all[loop2] = ev.real\n loop2 = loop2+1\n ev_sorted = sorted(ev_all)\n# print (\"H eigvals = \"+str(ev_sorted))\n# print(\"sm_vec=\"+str( sm_vec.shape))\n H_temp = np.dot(sm_vec,H_1)\n H = np.dot(H_temp,sm_vec.T)\n N = np.dot(sm_vec,sm_vec.T)\n print(\"H=\"+str( H.shape))\n# print(\"N=\"+str( N))\n np.savetxt(\"H_matrix_sm_test.txt\",H,fmt='%.15f')\n np.savetxt(\"N_matrix_sm_test.txt\",N,fmt='%.15f')\n\n H = np.loadtxt(\"./H_matrix_sm_test.txt\")\n N = np.loadtxt(\"./N_matrix_sm_test.txt\")\n \n\n eigvals,eigvec_L, eigvec_0 = spla.eig(H,N,left =True,right=True)\n print('eigvalsize,', eigvals.shape) \n loop2 = 0\n for loop1 in range(np.size(H,1)):\n ev = eigvals[loop1] \n if ev.imag != 0:\n continue\n # if ev.real < 0:\n # continue\n loop2 = loop2+1\n \n ev_all = np.zeros(loop2)\n loop2 = 0\n for loop1 in range(np.size(H,1)):\n ev = eigvals[loop1] \n if ev.imag != 0:\n continue\n # if ev.real < 0:\n # continue\n ev_all[loop2] = ev.real\n loop2 = loop2+1\n \n \n ev_sorted = sorted(ev_all)\n print('eigvals='+str (ev_sorted))\n #print('eigvec_L='+str (eigvec_L))\n #print('eigvec_0='+str (eigvec_0))\n \n print('eigvals_gs='+str (ev_sorted[1]))\n \n \n\n H_2 = np.loadtxt(\"H_matrix_sm.txt\")\n N_2 = np.loadtxt(\"N_matrix_sm.txt\")\n\n np.savetxt(\"H-H_2.txt\",H-H_2)\n np.savetxt(\"N-N_2.txt\",N-N_2)\n\n\n######################################################\n######################################################\n#### MAIN\n######################################################\n######################################################\nsubspace_dimension = 64\nLEC_num = 17\nnucl_matt_exe = './prog_ccm.exe'\n\n\n#database_dir= \"/home/slime/work/Eigenvector_continuation/CCM_kspace_deltafull/test/backup/DNNLOgo450_test_sm_vs_ccd_nmax1_n_2/\"\ndatabase_dir= \"./\"\nfile_path = \"ccm_in_DNNLO450\"\nLEC = read_LEC(file_path)\n#solve_general_EV_sm(LEC,database_dir)\n\ngenerate_emulator_matrix(subspace_dimension)\n\n#test_1()\n\n\n\n\n\n\n\n###LEC_new = np.zeros(LEC_num)\n####sm_cal_new = np.zeros(LEC_num)\n###\n###LEC_new = LEC.copy()\n###sm_count = 10\n###sm_cal_new = np.zeros(sm_count)\n###LEC_new_shift = np.zeros(sm_count)\n###\n###count = 0\n###which_LEC = 10\n###for loop1 in np.arange(0,1,1./sm_count):\n### LEC_range = 10\n### LEC_max = LEC * ( 1 + LEC_range)\n### LEC_min = LEC * ( 1 - LEC_range)\n### LEC_new[which_LEC] = LEC_min[which_LEC] + loop1 * (LEC_max[which_LEC] - LEC_min[which_LEC])\n#### print(LEC_new[which_LEC])\n### LEC_new_shift[count] = LEC_new[which_LEC]\n### sm_cal_new[count] = sm_calculation(LEC_new,\"ccm_in_test\",\"a.out\")\n### count = count + 1\n###\n###print(sm_cal_new)\n###\n###fig1 = plt.figure('fig1')\n###\n###matplotlib.rcParams['xtick.direction'] = 'in'\n###matplotlib.rcParams['ytick.direction'] = 'in'\n###ax1 = plt.subplot(111)\n###plt.tick_params(top=True,bottom=True,left=True,right=True,width=2)\n###ax1.spines['bottom'].set_linewidth(2)\n###ax1.spines['top'].set_linewidth(2)\n###ax1.spines['left'].set_linewidth(2)\n###ax1.spines['right'].set_linewidth(2)\n###\n###\n#### sm calculation\n###y_list_1 = sm_cal_new\n###x_list_1 = LEC_new_shift\n###\n###\n####l0 = plt.scatter (x_list_0,y_list_0,color = 'k', marker = 's',s = 200 ,zorder = 4, label=r'$\\Delta$NNLO$_{\\rm{go}}$(450)')\n###l1 = plt.scatter (x_list_1, y_list_1,color = 'cornflowerblue', edgecolor = 'k', marker = 'o',s = 120 ,zorder=2,label = 'sm_cal')\n####l2 = plt.plot([-10, 40], [-10, 40], ls=\"-\",color = 'k', lw = 3, zorder = 3)\n###\n####plt.xlim((-10,40))\n####plt.ylim((-10,40))\n####plt.xticks(np.arange(-10,41,10),fontsize = 15)\n####plt.yticks(np.arange(-10,41,10),fontsize = 15)\n###\n###\n###plt.legend(loc='upper left',fontsize = 15)\n###plt.xlabel(r\"$\\rm{CCSD} \\ [\\rm{MeV}]$\",fontsize=20)\n###plt.ylabel(r\"$\\rm{SP-CC} \\ [\\rm{MeV}]$\",fontsize=20)\n###\n###plot_path = 'sm_test.pdf'\n###plt.savefig(plot_path,bbox_inches='tight')\n###plt.close('all')\n###\n###\n\n","sub_path":"test/eigenvector_diagonal_sm.py","file_name":"eigenvector_diagonal_sm.py","file_ext":"py","file_size_in_byte":18434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"296809995","text":"\"\"\"Skrifið Python forrit sem biður notandann að slá inn nafn skrár sem inniheldur eitt\norð/tóka í línu með auðri línu á milli setninga. Forritið prentar út lengsta orðið í\nskránni ásamt lengd þess. Ef inntaksskrá finnst ekki þá skal fallið open_file skila\nNone og aðalforritið prentar þá út viðeigandi villuskilaboð. Aðal forritið er gefið og\nþví má EKKI breyta.\"\"\"\n\n# Function definitions start here\nlongest_word = ''\nfilename = input(\"Enter name of file: \") #opnar skrá\n\n\nopen_file = open(filename, 'r')#opnar til að lesa\n\n\ndef get_longest_word(filename):\n\t#with open(filename, 'r') as f: #opnar til að lesa\n\tlongest_word = ''\n\tfor word in filename:\n\t\tstripped = word.strip().replace(' ', '') #skiptir á bili\n\tif len(stripped) > len(longest_word):\n\t\t\t\tlongest_word = stripped\n\t\t#except FileNotFoundError: #ef forritið finnur ekki skrá\n\t\t#\tprint(filename, \"not found!\")\n\t\t#except FileNotFoundError:\n\t\t#\tprint(filename, \"not found!\")\n\nlength_of_longest_word = len(longest_word)\n\n# The main program starts here\nfilename = input(\"Enter name of file: \")\nfile_stream = open_file(filename)\nif file_stream:\n\tlongest_word = get_longest_word(file_stream)\n\tprint(\"Longest word is '{:s}' of length {:d}\".format(longest_word, len(longest_word)))\n\tfile_stream.close\nelse:\n\tprint('File',filename,'not found!')","sub_path":"Próf 1/Dæmi 1.py","file_name":"Dæmi 1.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"35271234","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Copyright (c) 2020 The PIVX developers\n# Distributed under the MIT software license, see the accompanying\n# file LICENSE.txt or http://www.opensource.org/licenses/mit-license.php.\n\nimport logging\nimport os\nimport platform\n\n#get system information to setup according platform lock and unlock methods\nHOST_OS = platform.system()\n\ndef get_default_destination_dir():\n dest_dir = os.path.expanduser('~')\n if HOST_OS == 'Linux':\n dest_dir = os.path.join(dest_dir, \".pivx-params\")\n elif HOST_OS == 'Windows':\n dest_dir = os.path.join(os.getenv('APPDATA'), \"PIVXParams\")\n elif HOST_OS == 'Darwin':\n dest_dir = os.path.join(dest_dir, \"PIVXParams\")\n else:\n raise Exception(\" %s is not currently supported.\", HOST_OS)\n return dest_dir\n\ndef init_logs(dest_dir):\n filename = os.path.join(dest_dir, \"logs.txt\")\n filemode = 'w'\n format = '%(asctime)s - %(levelname)s - %(threadName)s | %(message)s'\n level = logging.DEBUG\n logging.basicConfig(filename=filename,\n filemode=filemode,\n format=format,\n level=level\n )\n","sub_path":"src/ZkParamsWizard/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"400021349","text":"import pandas as pd\nimport random\nimport datetime\n\ndef create_data() :\n print(\"Creating dummy data...\")\n df_office = pd.DataFrame(\n data={\n \"office_id\": list(range(1,6)),\n \"office_name\": [ f\"office_{i}\" for i in range(1,6)],\n \"street\": [ f\"street_{i}\" for i in range(1,6)],\n \"city\": [\"city_{}\".format(random.randint(1,4)) for i in range(1,6)],\n \"state_name\": [\"state_{}\".format(random.randint(1,3)) for i in range(1,6)],\n \"zip_code\": [random.randint(1,100) for i in range(1,6)],\n }\n )\n\n df_brand = pd.DataFrame(\n data={\n \"brand_id\": list(range(1,5)),\n \"brand_name\": [\"Volkswagen\", \"Nissan\", \"Chevrolet\", \"Toyota\"]\n }\n )\n car_brand = {\n \"Jetta\":1,\n \"Tiguan\":1,\n \"Gol\":1,\n \"Sentra\":2,\n \"March\":2,\n \"Kicks\":2,\n \"Beat\":3,\n \"Cavalier\":3,\n \"Tracker\":3,\n \"Yaris\":4,\n \"Corolla\":4,\n \"Rav4\":4\n }\n\n car_type = {\n \"Jetta\":\"Sedan\",\n \"Tiguan\":\"SUV\",\n \"Gol\":\"Subcompacto\",\n \"Sentra\":\"Sedan\",\n \"March\":\"Subcompacto\",\n \"Kicks\":\"SUV\",\n \"Beat\":\"Subcompacto\",\n \"Cavalier\":\"Sedan\",\n \"Tracker\":\"SUV\",\n \"Yaris\":\"Subcompacto\",\n \"Corolla\":\"Sedan\",\n \"Rav4\":\"SUV\"\n }\n\n rdn_cars = random.choices(list(car_brand.keys()), k=25)\n\n df_car = pd.DataFrame(\n data={\n \"car_id\": list(range(1,26)),\n \"current_office_id\": random.choices(list(range(1,6)), k=25),\n \"brand_id\": [ car_brand[i] for i in rdn_cars],\n \"car_name\": rdn_cars,\n \"car_type\": [ car_type[i] for i in rdn_cars],\n \"color\": random.choices([\"azul\", \"negro\", \"blanco\"], k=25),\n \"model_name\": [\"base\"]*25,\n \"model_year\": random.choices([2017,2018,2019], k=25),\n \"registration_date\": [datetime.date(year=2020, month=1, day=21)]*25\n }\n )\n\n df_customer = pd.DataFrame(\n data={\n \"customer_id\": list(range(1,36)),\n \"firts_name\": [ f\"cliente_{i}\" for i in range(1,36)],\n \"last_name\": [ f\"apellido_{i}\" for i in range(1,36)],\n \"city\": random.choices(list(range(1,4)), k=35),\n \"state_name\": random.choices(list(range(1,3)), k=35),\n \"zip_code\": random.choices(list(range(1,100)), k=35),\n \"phone_number\": random.choices(list(range(1,100)), k=35),\n \"email\": [ f\"email_{i}\" for i in range(1,36)],\n \"registration_date\": [datetime.date(year=2020, month=1, day=21)]*35\n }\n )\n\n aux_l = []\n counter = 1\n for car in list(range(1,26)) :\n res = random.randint(1,10)\n tmp_date = datetime.datetime(year=2020, month=random.randint(2,6), day=random.randint(1,28), hour=random.randint(8,22))\n for r in range(res) :\n aux_d = {}\n aux_d[\"rental_id\"] = counter\n aux_d[\"customer_id\"] = random.randint(1,35)\n aux_d[\"car_id\"] = car\n aux_d[\"pickup_office_id\"] = random.randint(1,5)\n aux_d[\"return_office_id\"] = random.randint(1,5)\n aux_d[\"pickup_date\"] = tmp_date\n tmp_date = tmp_date + datetime.timedelta(hours=random.randint(2,168))\n aux_d[\"return_date\"] = tmp_date\n aux_d[\"booked_date\"] = tmp_date - datetime.timedelta(days=1)\n delta = aux_d[\"return_date\"] - aux_d[\"pickup_date\"]\n aux_d[\"amount\"] = int(delta.total_seconds() / 3600) * 100\n counter = counter + 1\n\n aux_l.append(aux_d)\n\n df_rental = pd.DataFrame.from_dict(aux_l)\n\n df_brand.to_csv(\"script/data/brand.csv\", index=False)\n df_car.to_csv(\"script/data/car.csv\", index=False)\n df_customer.to_csv(\"script/data/customer.csv\", index=False)\n df_office.to_csv(\"script/data/office.csv\", index=False)\n df_rental.to_csv(\"script/data/rental.csv\", index=False)\n print(\"Dummy data created and saved!\")\n\nif __name__ == \"__main__\":\n create_data()","sub_path":"script/create_data.py","file_name":"create_data.py","file_ext":"py","file_size_in_byte":4048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"312147","text":"# Everything\nimport pygame # need pygame lib to work\n# other modules go here\npygame.init() # initiation function with pygame\n\ndisplay_width = 800\ndisplay_height = 600\n\nblack = (0, 0, 0) # rgb\nwhite = (255, 255, 255) # rgb\n\n# width of car pic\ncar_width = 100\n\n# width and height\ngameDisplay = pygame.display.set_mode((display_width, display_height)) # touple\n\n# title of window\npygame.display.set_caption('A bit Racey')\n\n# define the game's clock\n# time things for us\n# frames per second\nclock = pygame.time.Clock()\n\ncarImg = pygame.image.load('Car.png') # loading image\n\ndef car(x, y):\n gameDisplay.blit(carImg, (x, y)) # draws imagewith a touple\n\ndef crash(): # message displays\n message_display('You Crashed')\n\n# VARY\ndef game_loop():\n\n x = (display_width * 0.45)\n y = (display_height * 0.8)\n\n x_change = 0 # location\n\n # Game Loop = logic for the game\n # Stop the game? - crashed / quit\n gameExit = False # games starts with no crash\n\n # EVENT HANDLING\n # while still no crash\n while not gameExit:\n for event in pygame.event.get(): # gets any events, mouse on screen, pressing keys etc., per frame per secon\n if event.type == pygame.QUIT: # xs out of the window\n gameExit = True # break out of this loop\n\n if event.type == pygame.KEYDOWN: # did someone push any key down\n if event.key == pygame.K_LEFT: # Left arrow key\n x_change = -5 # moving\n elif event.key == pygame.K_RIGHT:\n x_change = 5\n\n if event.type == pygame.KEYUP: # key has been released\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n x_change = 0 # does nothing \n\n x += x_change # change location of car\n\n gameDisplay.fill(black)\n\n car(x, y) # show our car\n\n # Boundaries\n if x > display_width - car_width or x < 0: # wide our screen is\n gameExit = True\n crash()\n\n # update our display\n pygame.display.update() # update that one THING in the brackets\n # no parameters, it updates the entire SURFACE (window)\n # pygame.display.flip() - ALWAYS UPDATE THE ENTIRE SURFACE\n\n # move the frame on or how fast are we gonna do this stuff\n clock.tick(60) # 60 fps - higher the fps - more smooth things are\n\ngame_loop() # GAME_LOOP RUNNING\npygame.quit() # stop pygame from running\nquit() # stop everything else \"are you sure?\" in IDLE\n","sub_path":"Python/Racey/Racey5.py","file_name":"Racey5.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"468798762","text":"__author__ = 'shmakovs'\n\nimport Helper1603\n\nSeedsFileName = \"Seeds.tsv\"\n#SeedsFileName = \"FalseRun/EffectorSeeds.tsv\"\n\n#IslandsFileName = \"Test.pty\"\nIslandsFileName = \"Islands.pty\"\nIslandsFileName = \"Islands.ann_CRISPR\"\n#IslandsFileName = \"Islands.ann_w_types\"\n#IslandsFileName = \"/panfs/pan1/prokdata/DefenseIslands/DefenseIslands_Complete.ann_clust_profiles\"\n\n# AllLoci = set()\n#\n# for Line in open(IslandsFileName, \"r\"):\n# LineValues = Line[:-1].split(\"\\t\")\n#\n# if LineValues[0] == \"===\":\n# continue\n#\n# AllLoci.add(LineValues[1] + LineValues[4])\n#\n#\n# for Line in open(SeedsFileName, \"r\"):\n# LineValues = Line[:-1].split(\"\\t\")\n#\n# if LineValues[0] == \"===\":\n# continue\n#\n# if not LineValues[2] + \"..\" + LineValues[3] + LineValues[1].split(\".\")[0] in AllLoci:\n# print(Line[:-1])\n\n\nLociSeeds = dict()\n\nContig = \"\"\nStart = 0\nEnd = 0\n\nfor Line in open(IslandsFileName, \"r\"):\n LineValues = Line[:-1].split(\"\\t\")\n\n if LineValues[0] == \"===\":\n if not Contig in LociSeeds:\n LociSeeds[Contig] = []\n LociSeeds[Contig].append([max(0, Start - 10000), End + 10000])\n\n Contig = \"\"\n Start = 0\n End = 0\n continue\n\n Coords = LineValues[1].split(\"..\")\n if Contig == \"\":\n Start = int(Coords[0])\n End = int(Coords[1])\n Contig = LineValues[4].split(\".\")[0]\n\n\nif not Contig in LociSeeds:\n LociSeeds[Contig] = []\nLociSeeds[Contig].append([Start, End])\n\nfor Line in open(SeedsFileName, \"r\"):\n LineValues = Line[:-1].split(\"\\t\")\n\n if not Helper1603.IsInSeeds(LineValues[1].split(\".\")[0], int(LineValues[2]), int(LineValues[3]), LociSeeds):\n print(Line[:-1])\n\n","sub_path":"Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"275136214","text":"\"\"\"bioblastfactory.\"\"\"\nfrom typing import List\nfrom typing import Tuple\n\nfrom Bio.SeqRecord import SeqRecord\n\nfrom .blast import BioBlast\nfrom pyblast.blast.seqdb import SeqRecordDB\nfrom pyblast.log import logger\nfrom pyblast.utils import clean_records\n\n\nclass BioBlastFactory:\n \"\"\"Instance factory for :class:`BioBlast` instance that share the same.\n\n :class:`SeqRecordDB`.\n\n Usage:\n\n ::\n\n factory = BioBlastFactory()\n factory[\"plasmid_templates\"] = records1\n factory[\"primers\"] = records2\n factory[\"queries\"] = records3\n\n blast1 = factory(\"plasmid_templates\", \"queries\")\n blast2 = factory(\"primers\", \"queries\")\n \"\"\"\n\n def __init__(self, seq_db=None, span_origin=True, config=None):\n \"\"\"Initialize a new BioBlastFactory.\n\n :param seq_db: the optional SeqRecordDB. If not provided, a new one will be\n created.\n :type seq_db: SeqRecordDB\n \"\"\"\n if seq_db is None:\n self.db = SeqRecordDB()\n else:\n self.db = seq_db\n self.span_origin = span_origin\n self.record_groups = {}\n self.logger = logger(self)\n self.config = config\n\n def __setitem__(self, record_group_name: str, records: List[SeqRecord]):\n \"\"\"See add_records.\"\"\"\n self.add_records(records, record_group_name)\n\n def __getitem__(self, record_group_name):\n return self.record_groups[record_group_name]\n\n def add_records(\n self, records: List[SeqRecord], record_group_name: str\n ) -> Tuple[List[str], List[SeqRecord]]:\n \"\"\"Add records to the SeqRecordDB by keyword.\n\n :param records:\n :type records:\n :param record_group_name:\n :type record_group_name:\n :return:\n :rtype:\n \"\"\"\n self.logger.info(\n \"Adding {} records to '{}'\".format(len(records), record_group_name)\n )\n clean_records(records)\n keys, records = BioBlast.add_records(\n records, self.db, span_origin=self.span_origin\n )\n if record_group_name:\n self.record_groups.setdefault(record_group_name, list())\n self.record_groups[record_group_name] += records\n return keys, records\n\n def __call__(self, subject_key, query_key, config=None):\n \"\"\"Create a new BioBlast instance with the factory's SeqRecordDB.\n\n :param subject_key: the subject key\n :type subject_key: str\n :param query_key: the query key\n :type query_key: str\n :param config: BioBlast config\n :type config: dict\n :return:\n :rtype:\n \"\"\"\n self.logger.info(\"new Blast({}, {})\".format(subject_key, query_key))\n if isinstance(subject_key, str):\n subjects = self.record_groups[subject_key]\n else:\n subjects = []\n for key in subject_key:\n if issubclass(type(key), SeqRecord):\n subjects += key\n else:\n subjects += self.record_groups[key]\n if isinstance(query_key, str):\n queries = self.record_groups[query_key]\n else:\n queries = []\n for key in query_key:\n if issubclass(type(key), SeqRecord):\n queries += key\n else:\n queries += self.record_groups[key]\n if config is None:\n config = {}\n if self.config:\n config.update(self.config)\n return BioBlast(\n subjects=subjects,\n queries=queries,\n seq_db=self.db,\n span_origin=self.span_origin,\n config=config,\n )\n","sub_path":"pyblast/blast/bioblastfactory.py","file_name":"bioblastfactory.py","file_ext":"py","file_size_in_byte":3681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"372609889","text":"'''\nProgrammed by: Albert Chan\nProgrammed on: January 31, 2019\nProgrammed for: ICS3U1-04\nPurpose: Create a function which prompts the user to enter the roots of a\n randomly generated quadratic equation for Round 2 of the Math Quiz.\n All randomly generated quadratic equations will have 2 distinct real\n integer roots. Thus, no randomly generated quadratic equations will\n have no roots, only 1 root, nor any non-integer roots.\n'''\n\n# Modules are imported in order to invoke functions from the module\nimport random, math\n\n'''\nParameter: Not applicable for this function (no parameters)\nPurpose: This function prompts the user to enter the roots\n of a randomly quadratic equation (f(x) = ax^2 + bx + c) given,\n after this function is invoked. The random quadratic\n equations given in this function will always have\n 2 integer roots. After each question the user will\n be notified whether they answered the question\n correctly or not. This function will allow the user\n see an up-to-date score after each question.\n \nReturn: True if user answer correctly, False otherwise\n'''\n\n# Round #2 function\ndef round2():\n # Discriminant is originally defined as 0 (explained in next comment)\n d = 0\n\n '''\n The discriminant of a quadratic equation cannot be less than 0\n (if discriminant is than 0 there are no real roots); in this\n function the discriminant cannot be equal to 0 (if the discriminant\n is 0, there will be only be one real root); this function only asks\n the user to enter the roots of quadratic equations with 2 distinct\n real roots\n '''\n while d <= 0:\n \n # The standard form of quadratic equation is f(x) = ax2 + bx + c\n \n # Coefficient a is between -100 and 100 and is randomly generated\n a = random.randint(-100,100)\n\n '''\n Coefficient a in any quadratic equation cannot be equal to 0,\n thus coefficient a is randomly generated until it is not 0\n '''\n while a == 0:\n a = random.randint(-100,100)\n\n # Coefficient b is between -100 and 100 and is randomly generated\n b = random.randint(-100,100)\n\n # Coefficient c is between -100 and 100 and is randomly generated\n c = random.randint(-100,100)\n\n # Calculation of the discriminant (explanation of what the discriminant can be is explained previously)\n d = (b**2) - (4*a*c)\n\n # 2 roots of the quadratic equation are calculated\n rt1 = (-b-(math.sqrt(d)))/(2*a)\n rt2 = (-b+(math.sqrt(d)))/(2*a)\n\n '''\n Determines whether the 2 roots of the quadratic equation are integers; the 2 roots must be integers in this program;\n Coefficients are generated randomly until the 2 roots of the quadratic equation are integers\n '''\n while math.floor(rt1) != math.ceil(rt1) or math.floor(rt2) != math.ceil(rt2):\n\n # Discriminant is originally defined as 0 (explained in next comment)\n d = 0\n\n '''\n The discriminant of a quadratic equation cannot be less than 0\n (if discriminant is than 0 there are no real roots); in this\n function the discriminant cannot be equal to 0 (if the discriminant\n is 0, there will be only be one real root); this function only asks\n the user to enter the roots of quadratic equations with 2 distinct\n real roots\n '''\n while d <= 0:\n\n # The standard form of quadratic equation is f(x) = ax2 + bx + c\n \n # Coefficient a is between -100 and 100 and is randomly generated\n a = random.randint(-100,100)\n\n '''\n Coefficient a in any quadratic equation cannot be equal to 0,\n thus coefficient a is randomly generated until it is not 0\n '''\n while a == 0 :\n a = random.randint(-100,100)\n\n # Coefficient b is between -100 and 100 and is randomly generated\n b = random.randint(-100,100)\n\n # Coefficient c is between -100 and 100 and is randomly generated\n c = random.randint(-100,100)\n\n # Calculation of the discriminant (explanation of what the discriminant can be is explained previously)\n d = (b**2) - (4*a*c)\n\n # 2 roots of the quadratic equation are calculated\n rt1 = (-b-(math.sqrt(d)))/(2*a)\n rt2 = (-b+(math.sqrt(d)))/(2*a)\n\n # Formating the output of the quadratic equation to the user based on the values of coefficients (all cases considered)\n print(\"f(x) = \", end=\"\")\n print((\"-\" if a<0 else \"\")+(str(abs(a)) if abs(a)>1 else \"\")+\"x^2 \", end=\"\")\n print((\"- \" if b<0 else \"\")+((\"+ \" if b>0 else \"\")+str(abs(b)) if abs(b)>1 else \"\")+(\"x \" if b!=0 else \"\"), end=\"\")\n print((\"- \" if c<0 else \"\")+((\"+ \" if c>0 else \"\")+str(abs(c)) if abs(c)>1 else \"\"))\n\n # User enters what they believe to be the two roots (separated by a comma) of the quadratic equation that is given (this is not a multiple choice quiz)\n ans1, ans2 = map(int, input(\"Enter the 2 roots separated by a comma: \").split(\",\"))\n\n # Determining whether the two roots the user enters are correct\n # If the user enters both the correct rooots\n if ans1 == rt1 and ans2 == rt2:\n\n print()\n \n # Outputs if the user enters both the 2 correct roots\n print(\"Congratulations! You are correct!\")\n\n # Used to determine the score of the user\n return True\n\n # If the user enters both the correct roots\n elif ans2 == rt1 and ans1 == rt2:\n\n print()\n \n # Outputs if the user enters both the 2 correct roots\n print(\"Congratulations! You are correct!\")\n # Used to determine the score of the user\n return True\n\n # If the user enters at least one incorrect root\n else:\n\n print()\n\n print(\"You are incorrect!\")\n \n # Outputs the 2 correct roots\n print(\"The 2 correct roots are:\", str(int(round(rt1))) + \",\" + str(int(round(rt2))))\n \n # Used to determine the score of the user\n return False\n","sub_path":"round2.py","file_name":"round2.py","file_ext":"py","file_size_in_byte":6180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"140408505","text":"\"\"\"\nCreated on September 17, 2019\n\n@author: Yi Wang\n\"\"\"\n\nimport datetime\n\nfrom mylib.pro_omi_no2_l3.plot_omi_no2_l3 import plot_monthly_anomaly_for_6yrs\n\n#######################\n# Start user parameters\n#\n\nstart_year = '2005'\nend_year = '2019'\n\nmean_dir = '../data/multi_year_monthly_mean/'\n\nmonth_dir = '../data/monthly_mean/'\n\nfig_dir = '../figure/anomaly_6yrs/'\n\n\nyear_list = ['2014', '2015', '2016', \\\n '2017', '2018', '2019']\n\nmonth_list = [\n '01', '02', '03', '04', '05', '06',\n '07', '08', '09', '10', '11', '12'\n ]\n#month_list = ['07']\n\nregion_limit = [15.0, 60.0, 40.0, 100.0]\nname = 'India_'\n\nhspace=0.2\n\nvmax_ano_dict = {\n '01' : 0.1,\n '02' : 0.1,\n '03' : 0.1,\n '04' : 0.05,\n '05' : 0.05,\n '06' : 0.05,\n '07' : 0.05,\n '08' : 0.05,\n '09' : 0.05,\n '10' : 0.1,\n '11' : 0.1,\n '12' : 0.1,\n }\n\nverbose = True\n\n\n#\n# End user parameters\n#####################\n\nfor month in month_list:\n\n print('------------------------------------------------------')\n print('processing ' + month)\n\n vmax_ano = vmax_ano_dict[month]\n vmin_ano = -vmax_ano\n\n # Calculate multi-year mean of monthly OMI L3 NO2 and output.\n plot_monthly_anomaly_for_6yrs(mean_dir, month_dir, \n start_year, end_year, year_list, month, verbose=verbose,\n dir_fig=fig_dir, region_limit=region_limit,\n name=name, hspace=hspace,\n vmin_ano=vmin_ano, vmax_ano=vmax_ano)\n\n\n","sub_path":"monthly_OMI_NO2/code/main_plot_monthly_no2_anomaly_6yrs_India.py","file_name":"main_plot_monthly_no2_anomaly_6yrs_India.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"157438354","text":"import json\nimport logging\nimport os\n\nfrom boto.s3.connection import S3Connection\nfrom boto.exception import S3ResponseError\nfrom boto.s3.key import Key\n\nfrom cluster import Cluster\nfrom dbhelper import parseConnectionString, getS3Connection\n\nclass ProcessedClusterStore:\n \"\"\"\n Store for processed clusters.\n\n Contains functions for CRUD operations on processed clusters\n \"\"\"\n\n def __init__(self):\n self.bucketConnString = os.environ['PROCESSEDCLUSTERS_BUCKET']\n\n def __getBucket(self):\n bucketConnParams = parseConnectionString(self.bucketConnString)\n conn = getS3Connection(self.bucketConnString)\n\n return conn.get_bucket(bucketConnParams['bucketName'], validate=False)\n\n def getProcessedCluster(self, cluster):\n \"\"\"\n If the cluster has been previously processed and cached, returns cached result.\n Otherwise, processes if freshly and returns the result after saving to cache\n \"\"\"\n\n clusterId = cluster.id\n try:\n k = Key(self.__getBucket())\n k.key = clusterId\n keyContents = k.get_contents_as_string()\n logging.info(\"Preprocessed cluster found for: \" + clusterId)\n\n preProcessedCluster = Cluster([])\n preProcessedCluster.deserializeFromString(keyContents)\n return preProcessedCluster\n except S3ResponseError:\n logging.info(\"Preprocessed cluster not found for: \" + clusterId)\n return self.processAndSaveCluster(cluster)\n\n def processAndSaveCluster(self, cluster):\n \"\"\"\n Process the cluster and store it in cache.\n Returns the processed cluster.\n \"\"\"\n clusterId = cluster.id\n k = Key(self.__getBucket())\n k.key = clusterId\n\n cluster.process()\n logging.info(\"Processed cluster with id: \" + clusterId)\n k.set_contents_from_string(cluster.serializeToString())\n logging.info(\"Processed cluster saved for: \" + clusterId)\n return cluster\n\n def deleteClusters(self, clusters):\n keysToDelete = [cluster.id for cluster in clusters]\n bucket = self.__getBucket()\n bucket.delete_keys(keysToDelete)\n","sub_path":"newsApp/processedClusterStore.py","file_name":"processedClusterStore.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"513254706","text":"import face_recognition\nimport cv2\nimport sys\nimport os \n\n# open camera\nvideo_capture = cv2.VideoCapture(0)\n\nf=open(\"./faces/all_names.txt\", \"r\")\nfl = f.readlines()\nd={}\n\nknown_face_encodings = []\nknown_face_names = []\n\n# foreach name in the all_names.txt document, get a photo in either png or jpg format.\nfor x in fl:\n if (os.path.isfile(\"./faces/scanned_photo_\"+(x.rstrip())+\".png\")):\n d[\"face_{0}\".format(x.rstrip())]=face_recognition.load_image_file(\"./faces/scanned_photo_\"+(x.rstrip())+\".png\")\n d[\"face_encode_{0}\".format(x.rstrip())]=face_recognition.face_encodings(d[\"face_{0}\".format(x.rstrip())])[0]\n known_face_encodings.append(d[\"face_encode_{0}\".format(x.rstrip())])\n known_face_names.append(x.rstrip())\n elif (os.path.isfile(\"./faces/scanned_photo_\"+(x.rstrip())+\".jpg\")):\n d[\"face_{0}\".format(x.rstrip())]=face_recognition.load_image_file(\"./faces/scanned_photo_\"+(x.rstrip())+\".jpg\")\n d[\"face_encode_{0}\".format(x.rstrip())]=face_recognition.face_encodings(d[\"face_{0}\".format(x.rstrip())])[0]\n known_face_encodings.append(d[\"face_encode_{0}\".format(x.rstrip())])\n known_face_names.append(x.rstrip())\n\n\n# Initialize some variables\nface_locations = []\nface_encodings = []\nface_names = []\nprocess_this_frame = True\n\nwhile True:\n # Grab a single frame of video\n ret, frame = video_capture.read()\n\n # Resize frame of video to 1/4 size for faster face recognition processing\n small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)\n\n # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)\n rgb_small_frame = small_frame[:, :, ::-1]\n\n # Only process every other frame of video to save time\n if process_this_frame:\n # Find all the faces and face encodings in the current frame of video\n face_locations = face_recognition.face_locations(rgb_small_frame)\n face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)\n\n face_names = []\n for face_encoding in face_encodings:\n # See if the face is a match for the known face(s)\n matches = face_recognition.compare_faces(known_face_encodings, face_encoding)\n name = \"Unknown\"\n\n # If a match was found in known_face_encodings, just use the first one.\n if True in matches:\n first_match_index = matches.index(True)\n name = known_face_names[first_match_index]\n \n\n face_names.append(name)\n\n # print and flush the data (to electron)\n print(face_names)\n sys.stdout.flush()\n\n process_this_frame = not process_this_frame\n\n# Release handle to the webcam\nvideo_capture.release()\ncv2.destroyAllWindows()","sub_path":"plugins/facial_test.py","file_name":"facial_test.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"255591186","text":"# Please complete the TODO items in this code\n\nimport asyncio\n\nfrom confluent_kafka import Consumer, Producer\nfrom confluent_kafka.admin import AdminClient, NewTopic\n\n\nBROKER_URL = \"PLAINTEXT://localhost:9092\"\n\n\ndef topic_exists(client, topic_name):\n \"\"\"Checks if the given topic exists\"\"\"\n # TODO: Check to see if the given topic exists\n # See: https://docs.confluent.io/current/clients/confluent-kafka-python/#confluent_kafka.Consumer.list_topics\n cluster_metadata = client.list_topics(timeout=5)\n return cluster_metadata.topics.get(topic_name) is not None\n\n\ndef create_topic(client, topic_name):\n \"\"\"Creates the topic with the given topic name\"\"\"\n # TODO: Create the topic. Make sure to set the topic name, the number of partitions, the\n # replication factor. Additionally, set the config to have a cleanup policy of compact, a\n # compression type of lz4, delete retention milliseconds of 2 seconds, and a file delete delay\n # milliseconds of 2 second.\n #\n # See: https://docs.confluent.io/current/clients/confluent-kafka-python/#confluent_kafka.admin.NewTopic\n # See: https://docs.confluent.io/current/installation/configuration/topic-configs.html\n futures = client.create_topics(\n [\n NewTopic(topic_name, \n num_partitions=5, \n replication_factor=1,\n config = {\n \"cleanup.policy\": \"compact\",\n \"compression.type\": \"lz4\",\n \"delete.retention.ms\": 100,\n \"file.delete.delay.ms\": 100\n }\n )\n ]\n )\n\n for topic, future in futures.items():\n try:\n future.result()\n print(\"topic created\")\n except Exception as e:\n print(f\"failed to create topic {topic_name}: {e}\")\n raise\n\n\ndef main():\n \"\"\"Checks for topic and creates the topic if it does not exist\"\"\"\n client = AdminClient({\"bootstrap.servers\": BROKER_URL})\n\n #\n # TODO: Decide on a topic name\n #\n topic_name = \"example_topic\"\n exists = topic_exists(client, topic_name)\n print(f\"Topic {topic_name} exists: {exists}\")\n\n if exists is False:\n create_topic(client, topic_name)\n\n try:\n asyncio.run(produce_consume(topic_name))\n except KeyboardInterrupt as e:\n print(\"shutting down\")\n\n\nasync def produce_consume(topic_name):\n \"\"\"Runs the Producer and Consumer tasks\"\"\"\n t1 = asyncio.create_task(produce(topic_name))\n t2 = asyncio.create_task(consume(topic_name))\n await t1\n await t2\n\n\nasync def produce(topic_name):\n \"\"\"Produces data into the Kafka Topic\"\"\"\n p = Producer({\"bootstrap.servers\": BROKER_URL})\n\n curr_iteration = 0\n while True:\n p.produce(topic_name, f\"iteration {curr_iteration}\".encode(\"utf-8\"))\n curr_iteration += 1\n await asyncio.sleep(0.5)\n\n\nasync def consume(topic_name):\n \"\"\"Consumes data from the Kafka Topic\"\"\"\n c = Consumer({\"bootstrap.servers\": BROKER_URL, \"group.id\": \"0\"})\n c.subscribe([topic_name])\n while True:\n message = c.poll(1.0)\n if message is None:\n print(\"no message received by consumer\")\n elif message.error() is not None:\n print(f\"error from consumer {message.error()}\")\n else:\n print(f\"consumed message {message.key()}: {message.value()}\")\n await asyncio.sleep(2.5)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"tutorials/create_topic.py","file_name":"create_topic.py","file_ext":"py","file_size_in_byte":3489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"611435458","text":"from wtg.alerts.events_monitor import EventsMonitor\nfrom math import trunc\n\n\nclass AlertsManager(object):\n\n def __init__(self, log, db_session, alertsDataSource, pushService, forecastManager):\n self.log = log\n self.db_session = db_session\n self.alertsDataSource = alertsDataSource\n self.pushService = pushService\n self.forecastManager = forecastManager\n self.checkInterval = EventsMonitor.CHECK_INTERVAL\n self.scheduler = None\n\n #Start monitoring for alertable events.\n self.eventMonitor = EventsMonitor(log).start(self.sendAlerts)\n ##########\n\n\n def sendAlerts(self):\n self.sendEventAlerts()\n self.sendSummaryAlerts()\n\n\n def sendEventAlerts(self):\n eventUserTuples = self.alertsDataSource.alertableEventsWithUsers()\n if len(eventUserTuples) > 0:\n for eventUser in eventUserTuples:\n event = eventUser[0]\n user = eventUser[1]\n\n forecast = self.forecastManager.forecastForEvent(event, user)\n message = self.eventMessage(event, user, forecast)\n if message is not None:\n self.log.info(\"Message: %s\" % message.encode('utf-8'))\n self.pushService.sendAlert(user.apns_token, message)\n else:\n self.log.info(\"No alert triggered.\")\n\n\n def sendSummaryAlerts(self):\n eventsPerUserList = self.alertsDataSource.summarizableEventsPerUser()\n if eventsPerUserList is not None and len(eventsPerUserList) > 0:\n self.alertsDataSource.cleanUpData()\n for dict in eventsPerUserList:\n user, eventList = list(dict.items())[0]\n message = None\n result = self.summaryMessageForUser(user, eventList)\n message = result[0]\n alertableEventCount = result[1]\n\n if message is not None:\n self.log.info(\"Summary: %s\" % message.encode('utf-8'))\n self.pushService.sendAlert(user.apns_token, message, alertableEventCount)\n else:\n self.log.info(\"No alert triggered.\")\n\n\n def summaryMessageForUser(self, user, eventList):\n ''' Sample: 'Rain and feels-like high temps to affect 3 calendar events tomorrow.'\n Returns tuple: (message, eventCount)\n '''\n precipType = None\n highTempAlertable = False\n lowTempAlertable = False\n feelsLikeHighAlertable = False\n feelsLikeLowAlertable = False\n alertableEventCount = 0\n\n for event in eventList:\n forecast = self.forecastManager.forecastForEvent(event, user)\n if forecast is not None:\n eventAlertable = False\n if forecast.highestPrecipProbability >= user.precip_thresh: # and user.precip_thresh > 0: *****DEBUG ONLY\n precipType = forecast.precipType\n eventAlertable = True\n if forecast.highestTemperature >= user.high_temp_thresh:\n highTempAlertable = True\n eventAlertable = True\n if forecast.lowestTemperature <= user.low_temp_thresh:\n lowTempAlertable = True\n eventAlertable = True\n if forecast.highestFeelsLike >= user.high_temp_thresh:\n feelsLikeHighAlertable = True\n eventAlertable = True\n if forecast.lowestFeelsLike <= user.low_temp_thresh:\n feelsLikeLowAlertable = True\n eventAlertable = True\n if eventAlertable == True:\n alertableEventCount += 1\n\n if alertableEventCount == 0:\n return (None, None)\n\n message = None\n\n if precipType is not None:\n message = precipType.capitalize()\n\n if highTempAlertable:\n if message == None:\n message = \"High temps\"\n else:\n message += \" and high temps\"\n elif lowTempAlertable:\n if message == None:\n message = \"Low temps\"\n else:\n message += \" and low temps\"\n\n if not highTempAlertable and not lowTempAlertable:\n if feelsLikeHighAlertable:\n if message == None:\n message = \"Feels-like high temps\"\n else:\n message += \" and feels-like high temps\"\n elif feelsLikeLowAlertable:\n if message == None:\n message = \"Feels-like low temps\"\n else:\n message += \" and feels-like low temps\"\n\n eventsString = \"events\"\n if alertableEventCount == 1:\n eventsString = \"event\"\n\n message = \"{} to affect {} upcoming {}.\".format(message, alertableEventCount, eventsString)\n return (message, alertableEventCount)\n\n\n def eventMessage(self, event, user, forecast):\n ''' if returns None, then no alerts were triggered '''\n\n message = self.forecastMessage(event, user, forecast)\n if message is not None:\n message = \"{} {}\".format(message, self.concludingMessageForEvent(event, forecast))\n return message\n return None\n\n\n def forecastMessage(self, event, user, forecast):\n message = None\n if forecast.highestPrecipProbability >= user.precip_thresh:\n message = self.precipMessage(forecast)\n\n if forecast.highestTemperature >= user.high_temp_thresh:\n if message is None:\n message = \"High of {}°\".format(forecast.highestTemperature)\n else:\n message += \" and high of {}°\".format(forecast.highestTemperature)\n\n if forecast.lowestTemperature <= user.low_temp_thresh:\n if message is None:\n message = \"Low of {}°\".format(forecast.lowestTemperature)\n else:\n message += \" and low of {}°\".format(forecast.lowestTemperature)\n\n if forecast.highestFeelsLike >= user.high_temp_thresh:\n if message is None:\n message = \"Will feel like {}°\".format(forecast.highestFeelsLike)\n else:\n message += \" and feel like {}°\".format(forecast.highestFeelsLike)\n\n if forecast.lowestFeelsLike <= user.low_temp_thresh:\n if message is None:\n message = \"Will feel like {}°\".format(forecast.lowestFeelsLike)\n else:\n message += \" and will feel like {}°\".format(forecast.lowestFeelsLike)\n return message\n\n\n def precipMessage(self, forecast):\n prettyPercentage = \"{}%\".format(trunc(forecast.highestPrecipProbability * 100))\n return \"{} chance of {}\".format(prettyPercentage, forecast.precipType)\n\n\n def highTemperatureMessage(self, forecast):\n return \"{}°\"\n\n\n def concludingMessageForEvent(self, event, forecast):\n ''' Samples: \"during Company Off-site (Feb 22, 1PM - Feb 23, 2PM PST)\"\n \"during Morning Bike commute (tmr Thurs at 10am-11am)\" '''\n\n prettyTimeRange = event.pretty_date_range\n title = event.title\n if len(title) > 33:\n title = event.title[0:30] + \"...\"\n\n return \"during {} ({})\".format(title, prettyTimeRange)\n","sub_path":"wtg/alerts/alerts_manager.py","file_name":"alerts_manager.py","file_ext":"py","file_size_in_byte":7366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"278182486","text":"# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %% Change working directory from the workspace root to the ipynb file location. Turn this addition off with the DataScience.changeDirOnImportExport setting\n# ms-python.python added\nimport os\ntry:\n\tos.chdir(os.path.join(os.getcwd(), 'es.rcs.tfm.nlp/src/main/python'))\n\tprint(os.getcwd())\nexcept:\n\tpass\n\n# %%\nimport sys\nimport os\nimport tensorflow as tf\nimport shutil\nsys.path.append('../')\nfrom ner.embeddings_resolver import BertEmbeddingsResolver\nfrom ner.ner_model_saver import NerModelSaver\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.ml import Pipeline\n\n# Manully add sparknlp developer library\nsparknlp_path = '../../'\nif sparknlp_path:\n sys.path.append(sparknlp_path)\n\nfrom sparknlp.annotator import *\nfrom sparknlp.common import *\nfrom sparknlp.base import *\nfrom sparknlp.embeddings import *\nimport sparknlp \n\nimport time\nimport zipfile\n#Setting location of resource Directory\nresource_path= \"../../../src/test/resources/\"\n\n\n# %%\nspark = sparknlp.start()\n\nprint(\"Spark NLP version\")\nsparknlp.version()\nprint(\"Apache Spark version\")\nspark.version\n\n\n# %%\ndef download_model(url, folder):\n import os\n from pathlib import Path\n import urllib.request\n import zipfile\n zip_file = folder + \".zip\"\n if not Path(zip_file).is_file():\n print(\"Downloading \" + url)\n urllib.request.urlretrieve(url, zip_file)\n if not os.path.exists(folder):\n print(\"Unziping \")\n zip_ref = zipfile.ZipFile(zip_file, 'r')\n zip_ref.extractall(\"./\")\n zip_ref.close()\n\n\ndef get_service_token_ids(source_bert_folder):\n start_id = 0\n end_id = 0\n with open(os.path.join(source_bert_folder, \"vocab.txt\")) as f:\n for line, row in enumerate(f):\n row = row.strip()\n if row == '[CLS]':\n start_id = line\n if row == '[SEP]':\n end_id = line\n return (start_id, end_id)\n\n\ndef create_model(source_bert_folder, export_dir, max_sentence_length = 128, batch_size = 32):\n tf.reset_default_graph()\n is_cased = 'uncased' not in source_bert_folder.lower()\n print(\"source_bert_folder: {}\".format(source_bert_folder))\n print(\"is_cased: {}\".format(is_cased))\n print(\"lowercase: {}\".format(not is_cased))\n resolver = BertEmbeddingsResolver(source_bert_folder, max_sentence_length, lowercase = not is_cased)\n saver = NerModelSaver(resolver, None)\n saver.save_models(export_dir)\n resolver.session.close()\n shutil.copyfile(os.path.join(source_bert_folder, 'vocab.txt'),\n os.path.join(export_dir, 'vocab.txt'))\n\n dim = resolver.config.hidden_size\n layers = resolver.config.num_hidden_layers\n print(\"Number of hidden units: {}\".format(dim))\n print(\"Number of layers: {}\".format(layers))\n \n model = BertEmbeddings.loadFromPython(export_dir, spark) .setInputCols([\"sentence\", \"token\"]) .setOutputCol(\"embeddings\") .setMaxSentenceLength(max_sentence_length) .setBatchSize(batch_size) .setDimension(dim) .setCaseSensitive(is_cased)\n \n return model\n\n\ndef download_and_convert(url, name, max_sentence_length = 128, batch_size = 32, dst_folder = 'models'):\n if not os.path.exists(dst_folder):\n os.makedirs(dst_folder)\n download_model(url, name)\n model = create_model(name, name + 'export_dir', max_sentence_length, batch_size)\n # Remove but it's possible to use this model\n shutil.rmtree(name + 'export_dir')\n shutil.rmtree(name)\n final_model_name = name + '_M-{}'.format(max_sentence_length) + '_B-{}'.format(batch_size)\n model.write().overwrite().save(os.path.join(dst_folder, final_model_name))\n print(\"BERT model has been saved: {}\".format(dst_folder+'/'+final_model_name))\n return model\n\n# %% [markdown]\n# ## Find models and source code here https://github.com/google-research/bert \n\n# %%\n# 1. Base uncased\nurl = 'https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-12_H-768_A-12.zip'\nname = 'uncased_L-12_H-768_A-12'\ndownload_and_convert(url, name, max_sentence_length = 128, batch_size = 32)\n\n\n# %%\n# 2. Large uncased\nurl = 'https://storage.googleapis.com/bert_models/2018_10_18/uncased_L-24_H-1024_A-16.zip'\nname = 'uncased_L-24_H-1024_A-16'\ndownload_and_convert(url, name, max_sentence_length = 128, batch_size = 32)\n\n\n# %%\n# 3. Base cased\nurl = 'https://storage.googleapis.com/bert_models/2018_10_18/cased_L-12_H-768_A-12.zip'\nname = 'cased_L-12_H-768_A-12'\ndownload_and_convert(url, name, max_sentence_length = 128, batch_size = 32)\n\n\n# %%\n# 4. Large cased\nurl = 'https://storage.googleapis.com/bert_models/2018_10_18/cased_L-24_H-1024_A-16.zip'\nname = 'cased_L-24_H-1024_A-16'\ndownload_and_convert(url, name, max_sentence_length = 128, batch_size = 32)\n\n\n# %%\n# 5. Multilingual Cased (New, recommended)\nurl = 'https://storage.googleapis.com/bert_models/2018_11_23/multi_cased_L-12_H-768_A-12.zip'\nname = 'multi_cased_L-12_H-768_A-12'\ndownload_and_convert(url, name, max_sentence_length = 128, batch_size = 32)\n\n\n# %%\nprint('All generated models are inside \"models/\" directory')\n\n\n# %%\n\n\n\n","sub_path":"es.rcs.tfm/es.rcs.tfm.nlp/src/test/python/sparknlp-2.3.5/create_bert.py","file_name":"create_bert.py","file_ext":"py","file_size_in_byte":5149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"51799438","text":"from ceqr.helper.geocode import get_hnum, get_sname, g, GeosupportError, create_geom, geo_parser\nfrom ceqr.helper.engines import recipe_engine, edm_engine, ceqr_engine\nfrom ceqr.helper.config_loader import load_config\nfrom ceqr.helper.exporter import exporter\nfrom multiprocessing import Pool, cpu_count\nfrom urllib.request import Request, urlopen\nfrom bs4 import BeautifulSoup\nimport ssl\nimport pandas as pd\nimport geopandas as gpd\nfrom pathlib import Path\nimport numpy as np\nimport os\nimport re\n\ndef geocode(inputs):\n hnum = inputs.get('housenum', '')\n sname = inputs.get('streetname', '')\n zip_code = inputs.get('zipcode', '')\n borough_code = inputs.get('borough', '')\n street_name_1 = inputs.get('streetname_1', '')\n street_name_2 = inputs.get('streetname_2', '')\n\n hnum = str('' if hnum is None else hnum)\n sname = str('' if sname is None else sname)\n zip_code = str('' if zip_code is None else zip_code)\n borough_code = str('' if borough_code is None else borough_code)\n street_name_1 = str('' if street_name_1 is None else street_name_1)\n street_name_2 = str('' if street_name_2 is None else street_name_2)\n\n try:\n geo = g['1B'](street_name=sname, house_number=hnum, zip_code=zip_code)\n geo = geo_parser(geo)\n geo.update(dict(geo_function='1B'))\n except GeosupportError:\n try:\n geo = g['1B'](street_name=sname, house_number=hnum, zip_code=zip_code, mode='tpad')\n geo = geo_parser(geo)\n geo.update(dict(geo_function='1B-tpad'))\n except GeosupportError:\n try:\n if street_name_1 != '':\n geo = g['2'](street_name_1=street_name_1, street_name_2=street_name_2, borough_code=borough_code)\n geo = geo_parser(geo)\n geo.update(dict(geo_function='Intersection'))\n else:\n geo = g['1B'](street_name=sname, house_number=hnum, zip_code=zip_code)\n geo = geo_parser(geo)\n geo.update(dict(geo_function='1B'))\n except GeosupportError as e:\n geo = e.result\n geo = geo_parser(geo)\n geo.update(dict(geo_function=''))\n\n geo.update(inputs)\n return geo\n\ndef clean_address(x):\n x = '' if x is None else x\n sep = ['|', '&', '@', ' AND ']\n for i in sep:\n x = x.split(i, maxsplit=1)[0]\n return x\n\ndef clean_streetname(x, n):\n x = '' if x is None else x\n if ('&' in x)|(' AND ' in x.upper()):\n x = re.split('&| AND | and ',x)[n]\n else: x = ''\n return x\n\nif __name__ == \"__main__\":\n # Load configuration\n config = load_config(Path(__file__).parent/'config.json')\n input_table = config['inputs'][0]\n input_table_nyc = config['inputs'][1]\n input_table_boro = config['inputs'][2]\n output_table = config['outputs'][0]['output_table']\n DDL = config['outputs'][0]['DDL']\n output_table_schema = config['outputs'][0]['output_table'].split('.')[0]\n\n # import data\n df = pd.read_sql(f'''SELECT * FROM {input_table}''', con=recipe_engine)\n nyc = pd.read_sql(f'''SELECT zipcode, UPPER(city) AS city FROM {input_table_nyc}''', con=recipe_engine)\n zip_boro = pd.read_sql(f'''SELECT zipcode, borough FROM {input_table_boro}''', con=recipe_engine)\n corr = pd.read_csv('https://raw.githubusercontent.com/NYCPlanning/ceqr-app-data/master/ceqr/data/ceqr_input_research.csv')\n\n # rename column names\n df.rename(columns={\"facility_zip\": \"zipcode\"}, inplace=True)\n corr.rename(columns={\"id\": \"permit_id\", \"location\": \"facility_location\", \"city\": \"facility_city\"}, inplace=True)\n corr = corr[corr.datasource==output_table_schema].drop(columns='datasource')\n\n # fill boroughs, update location based on ceqr_input_research\n df['permit_id'] = df['permit_id'].apply(lambda x: x.replace(')','').strip() if x!=None else x)\n df['facility_city'] = df['facility_city'].apply(lambda x: x.upper() if x!=None else x)\n df = pd.merge(df, zip_boro, how = 'left', on = 'zipcode')\n df = pd.merge(df, corr, how = 'left', on =['permit_id','facility_location', 'facility_city'])\n df.fillna({'correction':'', 'longitude':'', 'latitude':''}, inplace = True)\n\n # filter out cities or zipcode outside NYC\n df['facility_city'] = df.facility_city.apply(lambda x: x.upper() if x != None else '')\n df = df[(df.facility_city.isin(nyc.city.values))|(df.zipcode.isin(nyc.zipcode.values))|\n (df.facility_city == '')|(df.zipcode == '')]\n\n # generate inputs for geocoding\n df['address'] = df['facility_location'].apply(lambda x: clean_address(x))\n df['address'] = np.where(df.correction!='',df.correction,df.address) # correct the locations in ceqr_input_research\n df['housenum'] = df['address'].apply(get_hnum)\\\n .apply(lambda x: x.split('/',maxsplit=1)[0] if x != None else x)\n df['streetname'] = df['address'].apply(get_sname)\n df['streetname_1'] = df['facility_location'].apply(lambda x: clean_streetname(x, 0)).apply(get_sname)\n df['streetname_2'] = df['facility_location'].apply(lambda x: clean_streetname(x, -1)).apply(get_sname)\n\n records = df.to_dict('records')\n\n # geocoding\n with Pool(processes=cpu_count()) as pool:\n it = pool.map(geocode, records, 10000)\n df = pd.DataFrame(it)\n df = df[df['geo_grc'] != '71']\n\n # fill lat, lon based on ceqr_input_research\n df['geo_function'] = np.where((df.longitude!='')&(df.geo_x_coord=='')&(df.geo_longitude==''),'DCP_Manual',df.geo_function)\n df['geo_longitude'] = np.where(df.geo_function=='DCP_Manual',df.longitude,df.geo_longitude)\n df['geo_latitude'] = np.where(df.geo_function=='DCP_Manual',df.latitude,df.geo_latitude)\n\n # generate geom\n df['geo_address'] = None\n df['geo_longitude'] = pd.to_numeric(df['geo_longitude'],errors='coerce')\n df['geo_latitude'] = pd.to_numeric(df['geo_latitude'],errors='coerce')\n df = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.geo_longitude, df.geo_latitude))\n df['geom'] = df['geometry'].apply(lambda x: None if np.isnan(x.xy[0]) else str(x))\n\n # remove geo_lat and geo_lon for manual research \n df['geo_longitude'] = np.where(df.geo_function=='DCP_Manual',None,df.geo_longitude)\n df['geo_latitude'] = np.where(df.geo_function=='DCP_Manual',None,df.geo_latitude)\n\n ################# webscrape title v permit url###############\n url= 'http://www.dec.ny.gov/dardata/boss/afs/issued_atv.html'\n hdr = {'User-Agent': 'Mozilla/5.0'}\n req = Request(url,headers=hdr)\n gcontext = ssl.SSLContext()\n page = urlopen(req, context=gcontext)\n soup = BeautifulSoup(page, features=\"html.parser\")\n tb = soup.find('table', class_='dataTable')\n data = []\n for info in tb.find_all('td'):\n row = info.find('a')\n if row != None:\n if row.get_text('target')!='PRR':\n result = {}\n url_prefix = 'http://www.dec.ny.gov/dardata/boss/afs/'\n result['permit_id'] = row.get_text('target')\n result['url'] = url_prefix + row['href']\n data.append(result)\n permit_url_lookup = pd.DataFrame.from_dict(data, orient='columns')\n\n ###################### finalizing ####################\n # merge url to title v\n df = pd.merge(df, permit_url_lookup, how = 'left', on = 'permit_id')\n df['url_to_permit_text'] = np.where(df.url_to_permit_text != df.url, df.url, df.url_to_permit_text)\n\n # deduping\n SQL = f'''\n UPDATE {output_table} SET geo_address=geo_housenum||' '||geo_streetname,\n geom = (CASE\n WHEN geo_function = 'Intersection'\n THEN ST_TRANSFORM(ST_SetSRID(ST_MakePoint(geo_x_coord,geo_y_coord),2263),4326)\n ELSE geom\n END);\n\n ALTER TABLE {output_table}\n ADD COLUMN id SERIAL PRIMARY KEY;\n\n DELETE FROM {output_table}\n WHERE id NOT IN(\n WITH date AS(\n SELECT facility_name||address AS facility, MAX(issue_date::date) as latest_issue_date\n FROM {output_table}\n GROUP BY facility_name||address\n )\n SELECT min(id)\n FROM {output_table} p, date d\n WHERE p.facility_name||address = d.facility\n AND p.issue_date::date = d.latest_issue_date\n OR d.latest_issue_date IS NULL\n GROUP BY p.facility_name||address\n )\n ;\n\n ALTER TABLE {output_table} DROP COLUMN id;\n\n DROP TABLE IF EXISTS {output_table_schema}.geo_rejects;\n SELECT * INTO {output_table_schema}.geo_rejects\n FROM {output_table}\n WHERE geom IS NULL;\n\n DELETE FROM {output_table}\n WHERE geom IS NULL;\n\n ALTER TABLE {output_table}\n DROP COLUMN geo_grc,\n DROP COLUMN geo_grc2,\n DROP COLUMN geo_reason_code,\n DROP COLUMN geo_message;\n '''\n\n os.system('echo \"exporting table ...\"')\n # export table to EDM_DATA\n exporter(df=df,\n output_table=output_table,\n DDL=DDL,\n sql=SQL,\n sep='~',\n geo_column='geom')","sub_path":"ceqr/recipes/nysdec_title_v_facility_permits/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":9299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"272182711","text":"\n# coding: utf-8\n\n# In[1]:\n\n\n#-------------------------------------------------------------------------------------------------------------------------------\n# By Alexandra Lee (July 2018) \n#\n# Take the difference of the encoded gene expression of the two extreme experimental conditions\n# This will be the offset for the latent space\n#-------------------------------------------------------------------------------------------------------------------------------\nimport os\nimport pandas as pd\nimport numpy as np\nnp.random.seed(123)\n\n\n# In[2]:\n\n\n# load arguments\nmax_file = os.path.join(os.path.dirname(os.getcwd()), \"encoded\", \"PA1673\", \"train_minExp_2layer_10latent_encoded.txt\")\nmin_file = os.path.join(os.path.dirname(os.getcwd()), \"encoded\", \"PA1673\", \"train_maxExp_2layer_10latent_encoded.txt\")\n\n# output files\nout_file = os.path.join(os.path.dirname(os.getcwd()), \"data\", \"PA1673\", \"train_offset_2layer_10latent.txt\")\n\n\n# In[3]:\n\n\n# read in data\nmax_data = pd.read_table(max_file, header=0, sep='\\t', index_col=0)\nmin_data = pd.read_table(min_file, header=0, sep='\\t', index_col=0)\n\nmin_data\n\n\n# In[4]:\n\n\nmax_data\n\n\n# In[5]:\n\n\n# Generate offset using average gene expression in original dataset\ntrain_offset_latent = min_data.values - max_data.values\ntrain_offset_latent = pd.DataFrame(train_offset_latent, columns=min_data.columns)\ntrain_offset_latent\n\n\n# In[6]:\n\n\n# output\ntrain_offset_latent.to_csv(out_file, sep='\\t')\n\n","sub_path":"exploration/scripts/nbconverted/latent_offset_continuum.py","file_name":"latent_offset_continuum.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"126817102","text":"#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n\r\nimport os\r\nimport mimetypes\r\nimport smtplib\r\nfrom email.header import Header\r\nfrom email.mime.base import MIMEBase\r\nfrom email.mime.text import MIMEText\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.image import MIMEImage\r\nfrom email import encoders\r\n\r\n\r\ndef send_email(login=None, mail=None, images=None, attachments=None, use_ssl=None):\r\n \"\"\"\r\n 发送常规邮件,可以有图片和其它格式的附件\r\n\r\n :param login: dict:登录信息,包括smtp服务器地址,帐号,密码:\r\n\r\n .. code:: python\r\n\r\n {\r\n 'smtpserver': 'smtp.163.com',\r\n 'username': 'xxx@163.com',\r\n 'password': 'xxx'\r\n }\r\n\r\n :param mail: dict,邮件内容,包含邮件类型,发送人,收件人(多人的话要使用列表),标题,内容:\r\n\r\n .. code:: python\r\n\r\n {\r\n 'email_type': \"html\", # 可以是:plain\\html\r\n 'from': 'xxx@163.com',\r\n 'to': 'xxx@126.com',\r\n 'subject': \"标题\",\r\n 'content': \"正文\"\r\n }\r\n\r\n :param images: list,图片附件列表(由路径组成)\r\n :param attachments: list,其它附件列表(由路径组成)\r\n :param use_ssl: 否使用 ssl(暂时无用0\r\n :return:\r\n \"\"\"\r\n\r\n smtpserver = login.get(\"smtpserver\")\r\n username = login.get(\"username\")\r\n password = login.get(\"password\")\r\n\r\n email_type = mail.get('email_type')\r\n From = mail.get('from')\r\n To = mail.get('to')\r\n Subject = mail.get('subject')\r\n content = mail.get('content')\r\n\r\n if not From:\r\n From = username\r\n\r\n if isinstance(To, list): # To 是列表,就用分隔符合并\r\n To = ','.join(To)\r\n\r\n if not email_type or (email_type not in (\"plain\", \"html\")):\r\n email_type = \"html\"\r\n\r\n main_msg = MIMEMultipart() # 构造MIME Multipart 对象做为根容器\r\n\r\n # 添加公共信息\r\n main_msg['Subject'] = Subject\r\n main_msg['From'] = From\r\n main_msg['To'] = To\r\n # main_msg.preamble = content[:100] # 序文\r\n\r\n # 构造MIMEText对象做为邮件显示内容并附加到根容器,统一使用 utf-8\r\n text_msg = MIMEText(content, email_type, 'utf-8')\r\n main_msg.attach(text_msg)\r\n\r\n if images:\r\n for f in images:\r\n fp = open(f, 'rb')\r\n img_msg = MIMEImage(fp.read()) # 没有 _subtype 参数,MIMEImage 会自己探测图片类型\r\n fp.close()\r\n\r\n # 设置附件头\r\n basename = os.path.basename(f)\r\n img_msg.add_header('content-disposition',\r\n 'image' + str(images.index(f)), filename=basename)\r\n main_msg.attach(img_msg)\r\n\r\n if attachments:\r\n # 构造MIMEBase对象做为文件附件内容并附加到根容器\r\n for f in attachments:\r\n basename = os.path.basename(f)\r\n # 判断文件 MIME\r\n if \".\" in basename: # 带扩展名的\r\n content_type = mimetypes.types_map[\".\" + basename.split(\".\")[-1]]\r\n else: # 无扩展名的\r\n content_type = 'application/octet-stream'\r\n maintype, subtype = content_type.split('/', 1)\r\n\r\n fp = open(f, 'rb')\r\n file_msg = MIMEBase(maintype, subtype)\r\n file_msg.set_payload(fp.read())\r\n fp.close()\r\n\r\n encoders.encode_base64(file_msg)\r\n\r\n # 设置附件头\r\n file_msg.add_header('Content-Disposition',\r\n 'attachment' + str(images.index(f)), filename=basename)\r\n main_msg.attach(file_msg)\r\n\r\n smtp = smtplib.SMTP(smtpserver)\r\n\r\n if use_ssl: # 使用 ssl 的情况\r\n smtp.ehlo()\r\n smtp.starttls()\r\n smtp.ehlo()\r\n\r\n smtp.login(username, password)\r\n # smtp.set_debuglevel(1) # 调试模式\r\n smtp.sendmail(From, To, main_msg.as_string())\r\n smtp.quit()\r\n\r\n return True\r\n","sub_path":"send_email.py","file_name":"send_email.py","file_ext":"py","file_size_in_byte":3958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"545490616","text":"# @authors: Anthony Korkhin, Christopher Le, James Hoffoss, Khannaroth Phay, Patrick Saxton\n# Built/tested with Python v3.5\n# Programming Assignment, for ICS 370\n# Due: 4/27/2016\n# OBJECTIVE:\n# - Given a DIRECTORY, scan for *.xml and *.json file_\n# - For each file, parse the information into the program\n# - Take the parsed information and generate a console-output of KEY VALUE as well\n# as generate and write to a 'master' .json file with all the data_ intact\n################################################################################\n\n\n# Block imports, none of which need be included in zip file under normal circumstance.\n# tkinter can cause issues on linux boxes, but if you have python 3.5 installed\n# it should be a default library.\nimport xml.etree.ElementTree as ET\nimport json\nimport tkinter\nfrom tkinter import filedialog\nfrom os import listdir\nfrom os.path import isfile, join\nimport configparser\n\n\n# The main method. Asks for the direcotory, sends that directory to be scanned\n# for JSON and XML files, which are sorted and parsed accordingly.\n# The information is all appended to a dictionary, with a few levels of\n# dictionaries, lists and items. This is then exported to JSON.\ndef main():\n data = {}\n directory = getDirectory()\n try:\n print(\"\\nSelected Directory: \", directory,\"\\n\")\n data[getRecords()] = directoryScanner(directory)\n if(data[getRecords()] == []):\n print(\"No valid files in that directory.\")\n exit()\n except Exception as e:\n print(e)\n exit()\n # Final exporting of JSON Obects.\n exportJSON(data, getOutput())\n\n #FIN! The program is done here. Everything below are the helped methods.\n\n################################################################################\n\n# This is what will ask for the directory of your xml/json files and return it.\ndef getDirectory():\n directory = tkinter.filedialog.askdirectory(initialdir='.')\n return(directory)\n\n# Creates a list with all the files in the given directory\n# Cycles through the list of files searching for XML and JSON files.\n# Converted to lowercase in case a file is named File.XML or something.\n# Returns the list to be sorted for parsing.\ndef directoryScanner(rootPath):\n product_record = []\n file_ = [d for d in listdir(rootPath) if isfile(join(rootPath, d))]\n for openFile in file_:\n openFile = join(rootPath, openFile)\n if openFile.lower().endswith(\".xml\"):\n product_record.append(parseXML(openFile))\n elif openFile.lower().endswith(\".json\") and not openFile == getOutput() and not openFile.startswith(\"test\"):\n product_record.append(parseJSON(openFile))\n return(product_record)\n\n\n# This will take in a filename and a list, and parse the XML file and append to the list's strings\n# Creates a dictionary and list to help sort the data_\ndef parseXML(file_):\n data_ = {}\n root = ET.parse(file_).getroot()\n for child in root:\n for key,val in child.attrib.items():\n print(child.text, val)\n data_[getID()] = val\n data_[getName()] = child.text\n product_record = {getRecord():data_}\n return(product_record)\n\n\n# This will take in a JSON file and parse it, then append to the list's strings\n# Separates items into key and value pairs to put into a dictionary, and output.\ndef parseJSON(file_):\n dictItem = {}\n nameID_output = [\"\",\"\",\"\"]\n with open(file_, 'r') as openFile:\n parsedFile = json.load(openFile)\n for key,value in parsedFile.items():\n for k,v in value.items():\n # I had an issue with the name not always showing first,\n # so I had to implement a quick check first.\n if(k == getName()):\n nameID_output[0] = v\n else:\n nameID_output[1] = v\n dictItem[k] = v\n product_record = {key:dictItem}\n nameID_output[2] = nameID_output[0] + \" \" + nameID_output[1]\n print(nameID_output[2])\n return(product_record)\n\n\n# This takes in a output file name and the JSON formatted string from we've been building\n# and exports it into a new file which is called 'ics370_FINAL_OUTPUT.json'\ndef exportJSON(data_,out):\n with open(out, 'wt') as openFile:\n openFile.write(json.dumps(data_, separators=(',',':') ,indent=4))\n\n\n# in config.ini are the json object names, and output file name. This parses the\n# file to be returned to the getters below.\ndef parseConfig():\n variables = {}\n config = configparser.ConfigParser()\n config.read(\"config.ini\")\n opts = config.options(\"SETTINGS\")\n for key in opts:\n variables[key] = config.get(\"SETTINGS\",key)\n return(variables)\ndef getRecords():\n return parseConfig()['records']\ndef getRecord():\n return parseConfig()['record']\ndef getName():\n return parseConfig()['name']\ndef getID():\n return parseConfig()['id']\ndef getOutput():\n return parseConfig()['outputfile']\n\n# Initiates the program.\nif __name__ == '__main__':\n main()\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"520326196","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# 导入系统模块\nfrom django.urls import path, re_path\nfrom . import views\n# 设置环境变量\n\n# 导入自定义模块\n\n\nurlpatterns = [\n path('option_add//', views.option_add, name='option_add'),\n path('option_list//', views.option_list, name='option_list'),\n path('option_del///', views.option_del, name='option_del'),\n path('option_edit///', views.option_edit, name='option_edit'),\n path('one_book_list///', views.one_book_list, name='one_book_list'),\n]\n","sub_path":"APP_Book/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"383718833","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : Mike\n# @Contact : 597290963@qq.com\n# @Time : 2021/2/10 11:35\n# @File : SearchRange.py\nfrom typing import List\n\n\nclass SearchRange(object):\n\n def searchRange(self, nums: List[int], target: int) -> List[int]:\n \"\"\"\n 范围搜索\n :param nums:\n :param target:\n :return:\n \"\"\"\n # 查找目标值左边界\n if not nums:\n return [-1, -1]\n left = self.binary_search_l(nums, target)\n if left == -1:\n return [-1, -1]\n # 查找目标值右边界\n right = self.binary_search_r(nums, target)\n return [left, right]\n\n def binary_search_l(self, nums, target):\n left, right = 0, len(nums) - 1\n while left < right:\n mid = (right - left) // 2 + left\n if nums[mid] == target:\n right = mid\n elif nums[mid] > target:\n right = mid - 1\n else:\n left = mid + 1\n\n return left if nums[left] == target else - 1\n\n def binary_search_r(self, nums, target):\n left, right = 0, len(nums) - 1\n while left < right:\n mid = (right - left + 1) // 2 + left\n if nums[mid] == target:\n left = mid\n elif nums[mid] > target:\n right = mid - 1\n else:\n left = mid + 1\n\n return left\n\n\nif __name__ == '__main__':\n print(SearchRange().searchRange([5, 7, 7, 8, 8, 10], 8))\n","sub_path":"datastructure/binary_array/SearchRange.py","file_name":"SearchRange.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"115043000","text":"# -*- coding:utf-8 -*-\n# /usr/bin/env python\n\"\"\"\nAuthor: Albert King\ndate: 2019/10/30 11:28\ncontact: jindaxiang@163.com\ndesc:\n\"\"\"\nimport requests\nimport demjson\nimport pandas as pd\nimport execjs\n\nfrom akshare.stock.cons import (hk_js_decode,\n hk_sina_stock_dict_payload,\n hk_sina_stock_list_url,\n hk_sina_stock_hist_url,\n hk_sina_stock_hist_hfq_url,\n hk_sina_stock_hist_qfq_url)\n\n\ndef get_hk_stock_name():\n res = requests.get(hk_sina_stock_list_url, params=hk_sina_stock_dict_payload)\n data_json = [demjson.decode(tt) for tt in [item + \"}\" for item in res.text[1:-1].split(\"},\") if not item.endswith(\"}\")]]\n data_df = pd.DataFrame(data_json)\n return data_df\n\n\ndef get_hk_stock_hist_data(symbol=\"00001\"):\n res = requests.get(hk_sina_stock_hist_url.format(symbol))\n js_code = execjs.compile(hk_js_decode)\n dict_list = js_code.call('d', res.text.split(\"=\")[1].split(\";\")[0].replace('\"', \"\")) # 执行js解密代码\n data_df = pd.DataFrame(dict_list)\n data_df[\"date\"] = data_df[\"date\"].str.split(\"T\", expand=True).iloc[:, 0]\n data_df.index = pd.to_datetime(data_df[\"date\"])\n del data_df[\"date\"]\n data_df.astype(\"float\")\n res = requests.get(hk_sina_stock_hist_hfq_url.format(symbol))\n hfq_factor_df = pd.DataFrame(eval(res.text.split(\"=\")[1].split(\"\\n\")[0])['data'])\n res = requests.get(hk_sina_stock_hist_qfq_url.format(symbol))\n qfq_factor_df = pd.DataFrame(eval(res.text.split(\"=\")[1].split(\"\\n\")[0])['data'])\n return data_df, hfq_factor_df, qfq_factor_df\n\n\nif __name__ == \"__main__\":\n a, b, c = get_hk_stock_hist_data(symbol=\"00005\")\n df = get_hk_stock_name()","sub_path":"akshare/stock/hk_stock_sina.py","file_name":"hk_stock_sina.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"470785593","text":"from automated_survey.models import Survey, Question, QuestionResponse\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.views.generic import View\nfrom django.core.urlresolvers import reverse\nfrom django.shortcuts import render_to_response\nfrom django.views.decorators.http import require_POST, require_GET\nfrom twilio import twiml\n\n\n@require_GET\ndef show_survey_results(request, survey_id):\n responses = QuestionResponse.objects.filter(question__survey__id=survey_id)\n survey = Survey.objects.get(id=survey_id)\n responses_to_render = list(map(lambda qr: _to_response(qr), responses))\n\n template_context = {\n 'responses': responses_to_render,\n 'survey_title': survey.title\n }\n\n return render_to_response('results.html', context=template_context)\n\n\n@require_GET\ndef show_survey(request, survey_id):\n survey = Survey.objects.get(id=survey_id)\n first_question = Question.objects.order_by('id').first()\n\n first_question_ids = {\n 'survey_id': survey.id,\n 'question_id': first_question.id\n }\n\n first_question_url = reverse('question', kwargs=first_question_ids)\n voice_response = twiml.Response()\n\n voice_response.say(\n 'Hello and thank you for taking the %s survey' %\n survey.title)\n voice_response.redirect(first_question_url, method='GET')\n\n return HttpResponse(voice_response, content_type='application/xml')\n\n\n@require_POST\ndef redirect_to_first_survey(request):\n first_survey = Survey.objects.first()\n first_survey_url = reverse('survey', kwargs={'survey_id': first_survey.id})\n\n return HttpResponseRedirect(first_survey_url)\n\n\n@require_GET\ndef redirect_to_first_results(request):\n first_survey = Survey.objects.first()\n results_for_first_survey = reverse(\n 'survey_results', kwargs={\n 'survey_id': first_survey.id})\n return HttpResponseRedirect(results_for_first_survey)\n\n\ndef _to_response(question_response):\n return {\n 'body': question_response.question.body,\n 'kind': question_response.question.kind,\n 'response': question_response.response,\n 'call_sid': question_response.call_sid,\n 'phone_number': question_response.phone_number,\n }\n","sub_path":"automated_survey/views/surveys.py","file_name":"surveys.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"186409274","text":"# Printing the any oject\nprint(\"Hi world\")\n\n# Identation is importent in python\nif 5>2 :\n\tprint(\"Five is greater than two!!!\")\n\n# Variable declearation and to find the type of the variables\nx = 5\ny = \"Hi\"\nz = 3+6j\nprint(type(x),type(y),type(z))\n\n#Type conversion\n\nX = 6\nY = 3.23\nZ = 3+5j\n\na = float(X)\nb = int(Y)\nc = complex(Y)\nprint(a,type(a))\nprint(b,type(b))\nprint(c,type(c))\n\n#Random random()\nimport random\nprint(random.randrange(1,10))\n\n# String declearation\n\"\"\"Lorem ipsum dolor sit amet,\nconsectetur adipiscing elit,\nsed do eiusmod tempor incididunt\nut labore et dolore magna aliqua.\"\"\"\n\na = \"\"\"Lorem ipsum dolor sit amet,\nconsectetur adipiscing elit,\nsed do eiusmod tempor incididunt\nut labore et dolore magna aliqua.\"\"\"\nb = '''Lorem ipsum dolor sit amet,\nconsectetur adipiscing elit,\nsed do eiusmod tempor incididunt\nut labore et dolore magna aliqua.'''\nprint(a)\nprint(b)\n\n# Substring. Get the characters from position 2 to position 5 (not included):\nsub = \"Hello, World!\"\nprint(sub[2:5])\n\n#The strip() method removes any whitespace from the beginning or the end:\n\nst = \" end \"\nprint(st.strip())\n\n#len() is to find out the length of the string\nprint(len(st))\n#lower() is write the string is the lower case\nprint(sub.lower())\n#upper() is write the string in the upper case\nprint(sub.upper())\n#he replace() method replaces a string with another string:\nprint(sub.replace(\"o\",\"X\"))\n#The split() method splits the string into substrings if it finds instances of the separator:\nprint(sub.split(\"o\"))\n\n#we can combine strings and numbers by using the format() method!\nage = 23\n# txt = \"Vaishnavi's age is\"+age we can't do that\ntxt = 'Vaishnavi is {} years old' \nprint(txt.format(age))\n\nquantity = 3\nitemno = 567\nprice = 49.95\n#myorder = \"I want {} pieces of item {} for {} dollars.\"\nmyorder = \"I want {0} pieces of item {2} for {1} dollars.\"\n#myorder = \"I want {2} pieces of item {1} for {0} dollars.\"\nprint(myorder.format(quantity, itemno, price))\n\n\n\n\n\n","sub_path":"python/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"481604904","text":"from dropbox import dropbox\n\nimport dbadmin_service\n\nwith open('token.txt', 'r')as f:\n global token\n token = f.read()\n\ndbx = dropbox.Dropbox(token)\n\nservice = dbadmin_service.DropboxService(token)\n\nprint(service.list_all_shared_folders())\n","sub_path":"dbadmin/fiddle_dropbox.py","file_name":"fiddle_dropbox.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"429363889","text":"import os, json, django\n\nimport requests, pickle, time\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'delta_hedge.settings')\n\ndjango.setup()\n\n#####\n#\n# A automatically-called script to look for Funds with no prior Holdings\n# and to get all Holdings from revelant quarters up until current day\n#\n#####\n\nfrom search.models import Fund\nfrom portfolio.models import Holding, FundInfo, Security\n\nfrom scrape_for_Holdings import scrape_edgar\n\ndef getNewFunds():\n newCiks = Fund.objects.filter(last_update='1900Q1').values('cik_num')\n newFunds = []\n\n for cik in newCiks:\n # Wipe any Holding objects there might be for this \"new\" CIK\n # This is for the event population script crashes halfway and\n # requires restart, preventing double entries\n #\n # However this is a bit slower and should be made more efficient\n # in future updates\n # Holding.objects.filter(cik=cik['cik_num']).delete()\n newFunds.append(cik['cik_num'])\n\n return newFunds\n\nif __name__ == '__main__':\n # Relevant Quarters of interest\n qInterested = ['2018Q3', '2018Q2', '2018Q1']\n\n print(\"Looking through Fund table to see if there are new Funds\")\n newFundList = getNewFunds()\n\n if len(newFundList) is not 0:\n print(\"Guess there are new Funds, sending them off to be updated\")\n scrape_edgar(newFundList, qInterested)\n print(\"All the new Funds have been added!\")\n else:\n print(\"All funds have Holdings in the DB\\nDone looking for new Funds\")\n","sub_path":"script_payload/check_for_new_Funds.py","file_name":"check_for_new_Funds.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"443501455","text":"import sys\nfrom datetime import datetime as dt\n\nclass User:\n def __init__(self, ID = \"\"):\n self.totals = [0.0] * 7\n self.counts = [0] * 7\n self.avgs = [\"\"] * 7\n self.ID = ID\n self.count = 0\n def nextLine(self, entry):\n arr = entry.split(',')\n\n if self.ID != arr[0]:\n if self.count > 0:\n print(str(self))\n self.__init__(arr[0])\n\n self.count += 1\n\n try:\n reading = float(arr[4])\n index = dt.strptime(arr[3].split(' ')[0],'%d-%b-%Y').weekday()\n\n self.totals[index] += reading\n self.counts[index] += 1\n except Exception as e:\n #print(e)\n pass\n\n def calcAvgs(self):\n for i in range(len(self.avgs)):\n self.avgs[i] = str(0 if self.counts[i] == 0 else self.totals[i] / self.counts[i])\n def __str__(self):\n self.calcAvgs()\n return self.ID + \",\" + \",\".join(self.avgs)\n\n\nprevID = \"\"\nuser = User(\"\")\n\nprint('\"Id\",\"{}\"'.format('\",\"'.join([str(i) for i in range(len(user.avgs))])))\n\nwith open(sys.argv[1], \"r\") as f:\n f.readline()\n for line in f:\n user.nextLine(line)\n","sub_path":"Separation_Scripts/FindWeeklyDayAvgs.py","file_name":"FindWeeklyDayAvgs.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"189633640","text":"from random import uniform, randint, choice, seed, gauss\n\nviewing_presets = {\n\"bevel\": True,\n\"do_update\": True,\n\"showLeaves\": False,\n\"useArm\": False,\n}\n\npreferences = {\n\"scale\": 10.,\n\"scaleV\": 0.,\n\"customShape\": (0.5, 1, 0.3, 0.5),\n\"nrings\": 0,\n\"closeTip\": False,\n\"autoTaper\": True,\n\"splitByLen\": True,\n}\n\nlowpoly_presets = {\n\"handleType\": \"0\", # Auto (not Vector)\n\"bevelRes\": 1, # Bevel Resolution\n\"resU\": 2, # Curve Resolution\n\"levels\": 3,\n\"curveRes\": [3, 4, 3, 1],\n}\n\nwind = {\n\"wind\": 0.35,\n}\n\nexport_presets = {\n\"do_update\": True,\n\"showLeaves\": True,\n\"useArm\": True,\n\"armLevels\": 0, # means all levels. earlier tried 2 which is n-1\n\"makeMesh\": False,\n\"armAnim\": True,\n\"frameRate\": 30,\n\"loopFrames\": 60\n}\n\nleaves = {\n\"leafShape\": \"rect\",\n\"leafDist\": (\"6\", \"4\", \"10\"), # shapeList4\n\"leaves\": (8, 14),\n\"leafScale\": (0.8, 1.13),\n}\n\n# custom_leaves = {\n# \"leafShape\": \"dFace\" # DUPLI LEAVES\n# }\n\n\"\"\"\n######## Very coarse randomization\n\"\"\"\n\nvery_coarse_randomize = {\n\"shape\": (\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"10\"), # whole tree. shapeList3\n\"shapeS\": \"4\", # secondary branches shape. shapeList4\n\"branches\": [0, (5, 30), (5, 22), 0],\n\"baseSplits\": (0, 1),\n\"baseSize\": (0.1, 0.55),\n\"baseSize_s\": (0.2, 1.),\n\"ratio\": (0.009, 0.014),\n\"downAngle\": [90., (55., 130.), (45., 110.), 45.],\n}\n\ndef very_coarse_rules(vc):\n #is baseSize high, let baseSize_s be under 0.6\n if vc[\"baseSize\"] > 0.35:\n vc[\"baseSize_s\"] = rnd((0.2, 0.6))\n #avoid secondary branches growing towards stem\n if vc[\"downAngle\"][1] > 90.:\n vc[\"downAngle\"][2] = rnd((45., 90.))\n return vc\n\n\"\"\"\n######## Coarse randomization\n\"\"\"\n\ncoarse_randomize = {\n\"leafDist\": \"6\", # leaf distribution. shapeList4\n\"rootFlare\": (1.0, 1.8),\n\"rMode\": (\"rotate\", \"original\", \"random\"),\n\"nSegSplits\": [0., (0., 0.6), (0., 0.6), 0.],\n\"length\": [1., (0.3, 0.5), (0.5, 0.6), 0.45], # default [1., 0.3, 0.6, 0.45]\n}\n\ndef coarse_rules(c):\n return c\n\n\"\"\"\n######## Odd branch\n\"\"\"\nquirk_randomize = {\n\"lengthV\": [0., (0.4, 0.8), 0., 0.]\n}\n\ndef quirk_rules(q):\n return q\n\n\"\"\"\n######## Utilities\n\"\"\"\ndef rand_dict(d):\n return {k: randomize(v) for k, v in d.items()}\n\ndef randomize(var):\n seed()\n #a parameter\n #may be string, so a single one is iterable\n if isinstance(var, list):\n return list(map(rnd, var))\n else:\n return rnd(var)\n\ndef rnd(var):\n var_type = type(var)\n if var_type in (float, int, str, bool): # a single value\n return var\n elif var_type is tuple: # randomize tuple\n first_type = type(var[0])\n if first_type is float:\n return uniform(*var)\n elif first_type is int:\n return randint(*var)\n else: # string\n return choice(var)\n else:\n print(\"rnd failure, no tuple\")\n\ndef merge_dicts(*dicts):\n merged = {}\n for d in dicts:\n merged.update(d)\n return merged\n\nshapeList3 = [('0', 'Conical', ''),\n ('6', 'Inverse Conical', ''),\n ('1', 'Spherical', ''),\n ('2', 'Hemispherical', ''),\n ('3', 'Cylindrical', ''),\n ('4', 'Tapered Cylindrical', ''),\n ('10', 'Inverse Tapered Cylindrical', ''),\n ('5', 'Flame', ''),\n ('7', 'Tend Flame', ''),\n ('8', 'Custom Shape', '')]\n\nshapeList4 = [('0', 'Conical', ''),\n ('6', 'Inverse Conical', ''),\n ('1', 'Spherical', ''),\n ('2', 'Hemispherical', ''),\n ('3', 'Cylindrical', ''),\n ('4', 'Tapered Cylindrical', ''),\n ('10', 'Inverse Tapered Cylindrical', ''),\n ('5', 'Flame', ''),\n ('7', 'Tend Flame', '')]\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"299296174","text":"\"\"\"\r\nThis code is written by Isaac Sibley, Jordan Powell, and Jacob Jiang for the Comp Org group project.\r\n\r\nThe purpose of this code is to read in a mips file and to output up to the first 16 clock cycles.\r\n\r\n\"\"\"\r\nimport sys\r\n\r\n\r\n# A class to hold all of the characteristics of an input and its state\r\nclass Expression:\r\n stages = [\"IF\", \"ID\", \"EX\", \"MEM\", \"WB\"]\r\n\r\n # Initialize the class by taking in an unseparated input statement and offset\r\n def __init__(self, statement, offset):\r\n # Store the statement, then split at the space and store the opperand\r\n self.statement = statement\r\n #Check if a loop statement\r\n if ':' in statement:\r\n #Store operand as jump and label in register[0]\r\n self.operand = \"jump\"\r\n self.format = 'J'\r\n self.register = []\r\n self.register.append(statement.split(':')[0])\r\n #Otherwise is a command line\r\n else:\r\n temp = statement.split(' ')\r\n self.operand = temp[0]\r\n # Split the registers and check if a load operation or not\r\n self.register = temp[1].split(',')\r\n if len(self.register) == 3:\r\n self.format = 'R'\r\n else:\r\n self.format = 'L'\r\n # Set the default values\r\n self.currentCycle = 1\r\n self.isWaiting = False\r\n self.waitCount = 0\r\n self.offset = offset\r\n self.controlHazard = 0\r\n if self.offset > 0:\r\n self.canExecute = False\r\n else:\r\n self.canExecute = True\r\n self.nopString = ''\r\n #Set a line for the jump label to be on\r\n if self.operand == 'beq' or self.operand == 'bne':\r\n self.jumpLabel = 0\r\n\r\n def __str__(self):\r\n # Keep currentCycle in Bounds\r\n if self.waitCount > 0:\r\n self.printNop()\r\n\r\n if self.currentCycle > 5:\r\n self.currentCycle = 5\r\n\r\n spacing = 16 - self.offset - self.currentCycle\r\n string = \"{:20}\".format(self.statement)\r\n\r\n # Print offset decimal\r\n if self.offset > 0:\r\n for i in range(self.offset):\r\n string += '{:4}'.format('.')\r\n\r\n # Print previous to current cycle\r\n for i in range(self.currentCycle):\r\n # if self.waitCount > 0 and i == 1:\r\n # string += '{0:4}'.format(self.stages[1])\r\n # spacing -= 1\r\n #if self.controlHazard > i and self.controlHazard > 0:\r\n # string += '{0:4}'.format(self.stages[i])\r\n if (self.controlHazard - i) <= 0 and self.controlHazard > 0:\r\n string += '{0:4}'.format('*')\r\n else:\r\n string += '{0:4}'.format(self.stages[i])\r\n\r\n # Print Trailing decimal points\r\n for i in range(spacing - 1):\r\n string += '{0:4}'.format('.')\r\n string += '.'\r\n \r\n #Truncate if beyond bounds\r\n if self.controlHazard > 0:\r\n string = string[:81] \r\n elif (self.currentCycle == 4):\r\n string = string[:83]\r\n elif self.currentCycle == 1 or self.currentCycle == 2 or self.currentCycle == 3 or self.currentCycle == 5:\r\n string = string[:82]\r\n else:\r\n string = string[:81]\r\n return string\r\n\r\n def printNop(self):\r\n # Keep currentCycle in Bounds\r\n if self.currentCycle > 5:\r\n self.currentCycle = 5\r\n\r\n # Calculate Spacing for trailing decimal point\r\n spacing = 16 - self.offset - self.currentCycle - self.waitCount\r\n string = \"{:20}\".format('nop')\r\n\r\n # Print offset decimal\r\n if self.offset > 0:\r\n for i in range(self.offset):\r\n string += '{:4}'.format('.')\r\n\r\n # Print previous to current cycle\r\n for i in range(2):\r\n string += '{0:4}'.format(self.stages[i])\r\n\r\n # print nop bubble\r\n for i in range(self.waitCount):\r\n string += '{0:4}'.format('*')\r\n\r\n # Print Trailing decimal points\r\n for i in range(spacing - 1):\r\n string += '{0:4}'.format('.')\r\n string += '.'\r\n\r\n # Return the string with proper spacing (20 spaces) \r\n print(string)\r\n\r\n def calculateExpression(self, reg):\r\n\r\n if self.format == 'L':\r\n # L Format [ Operation rt, IMM(rs) ]\r\n return\r\n elif self.format == 'R':\r\n # R Format [ Operation rd, rs, rt ] reg refers to global registers that are printed while self.registers\r\n # refers to rt, rs, and rd registers after the expression\r\n if self.operand == 'add':\r\n reg[self.register[0]] = reg[self.register[1]] + reg[self.register[2]]\r\n elif self.operand == 'addi':\r\n reg[self.register[0]] = reg[self.register[1]] + int(self.register[2])\r\n elif self.operand == 'sub':\r\n reg[self.register[0]] = reg[self.register[1]] - reg[self.register[2]]\r\n elif self.operand == 'subi':\r\n reg[self.register[0]] = reg[self.register[1]] - int(self.register[2])\r\n elif self.operand == 'and':\r\n reg[self.register[0]] = reg[self.register[1]] & reg[self.register[2]]\r\n elif self.operand == 'andi':\r\n reg[self.register[0]] = reg[self.register[1]] & int(self.register[2])\r\n elif self.operand == 'or':\r\n reg[self.register[0]] = reg[self.register[1]] | reg[self.register[2]]\r\n elif self.operand == 'ori':\r\n reg[self.register[0]] = reg[self.register[1]] | int(self.register[2])\r\n #Check if slt or slti\r\n elif self.operand == 'slt':\r\n if reg[self.register[1]] < reg[self.register[2]]:\r\n reg[self.register[0]] = 1\r\n else:\r\n reg[self.register[0]] = 0\r\n elif self.operand == 'slti':\r\n if reg[self.register[1]] < int(self.register[2]):\r\n reg[self.register[0]] = 1\r\n else:\r\n reg[self.register[0]] = 0 \r\n #Check if jumping to a loop (stored in register[2])\r\n elif self.operand == 'beq':\r\n #Only run if the equals is true\r\n if reg[self.register[0]] == reg[self.register[1]]:\r\n #Tell the variable to move back to where the loop label is located\r\n return self.jumpLabel\r\n return (-1)\r\n \r\n elif self.operand == 'bne':\r\n #Only run if the not equals is true\r\n if reg[self.register[0]] != reg[self.register[1]]:\r\n #Tell the variable to move back to where the loop label is located\r\n return self.jumpLabel\r\n return (-1)\r\n else:\r\n # J Format [ OP Label ]\r\n return\r\n\r\n\r\n# A function to determine if a nop is needed at any given line, i\r\n# It takes in: i, as well as the array of lines\r\ndef add_nop(index, MIPSExpressions):\r\n # Edge Cases: Index at zero wont be checked and if its not reached the execute phase\r\n if index == 0 or index == 1 or MIPSExpressions[index].currentCycle < 2:\r\n return\r\n\r\n # print(MIPSExpressions[index].waitCount)\r\n\r\n try:\r\n two = MIPSExpressions[index - 2]\r\n for i in range(1, len(MIPSExpressions[index].register)):\r\n if two.register[0] == MIPSExpressions[index].register[i]:\r\n #Commented out test code to try submitting\r\n #print(\"rs: \" + two.register[0] + '\\nreg: ' + MIPSExpressions[index].register[i])\r\n MIPSExpressions[index].isWaiting = True\r\n MIPSExpressions[index].waitCount += 1\r\n\r\n except IndexError:\r\n return\r\n\r\n if MIPSExpressions[index].isWaiting is True:\r\n if two.currentCycle > 5:\r\n MIPSExpressions[index].isWaiting = False\r\n\r\n\r\n# A main function to be the driver for our code\r\ndef main():\r\n # Variables\r\n MIPSExpressions = [] # Hold the MIPS Expressions\r\n separateline = '{:-^82}'.format('') # Print a dashed Line\r\n registers = { # Hold the registers as a dictionary\r\n \"$zero\": 0, \"$ra\": 0,\r\n # S Registers\r\n \"$s0\": 0, \"$s1\": 0, \"$s2\": 0, \"$s3\": 0,\r\n \"$s4\": 0, \"$s5\": 0, \"$s6\": 0, \"$s7\": 0,\r\n\r\n # T Registers\r\n \"$t0\": 0, \"$t1\": 0, \"$t2\": 0, \"$t3\": 0, \"$t4\": 0,\r\n \"$t5\": 0, \"$t6\": 0, \"$t7\": 0, \"$t8\": 0, \"$t9\": 0,\r\n }\r\n\r\n # Check Command line Arguments\r\n if len(sys.argv) != 3:\r\n print(\"Invalid number of inputs, needs 2!\\n\")\r\n exit(1)\r\n\r\n # Check argv[1] for forwarding option\r\n if sys.argv[1].upper() == 'N':\r\n optionForwarding = \" (no forwarding)\"\r\n else:\r\n optionForwarding = \" (forwarding)\"\r\n\r\n # Read the File; Create Expressions with statement and Offset\r\n lineCount = 0\r\n with open(sys.argv[2], 'r') as file:\r\n line = file.readline()\r\n while line:\r\n MIPSExpressions.append(Expression(line.rstrip('\\n'), lineCount))\r\n #Decrement if a jump label line to format output\r\n if ':' in line:\r\n lineCount = lineCount - 1\r\n line = file.readline()\r\n lineCount = lineCount + 1\r\n file.close()\r\n\r\n #Set up the jump labels to point to the correct points in the code\r\n i = 0\r\n j = 0\r\n while i < (len(MIPSExpressions) - 1):\r\n #Find beq or bne calls\r\n if MIPSExpressions[i].operand == 'bne' or MIPSExpressions[i].operand == 'beq':\r\n #Have them point to the correct jump label in the code\r\n while j < (len(MIPSExpressions) - 1):\r\n if MIPSExpressions[j].register[0] == MIPSExpressions[i].register[2]:\r\n MIPSExpressions[i].jumpLabel = j\r\n j += 1\r\n i += 1\r\n # Print The Simulation\r\n print('START OF SIMULATION' + optionForwarding, end='\\n')\r\n print(separateline, end='\\n')\r\n cycles = '{:<20}'.format('CPU Cycles ===>')\r\n for i in range(1, 16):\r\n cycles += '{:<4}'.format(i)\r\n cycles += '{:<0}'.format('16')\r\n\r\n # TODO: I believe were supposed to stop after 48 cycles or something so we might have to change this to correct\r\n # number of cycles Loop through MIPS Simulator until all instructions have complete WB stage.\r\n j = 0\r\n while MIPSExpressions[len(MIPSExpressions) - 1].currentCycle != 6 and j < 16:\r\n\r\n print(cycles)\r\n for i in range(len(MIPSExpressions)):\r\n #Skip over any jump labels\r\n if MIPSExpressions[i].operand == \"jump\":\r\n MIPSExpressions[i].currentCycle += 1\r\n continue\r\n # Check the last expression to see if its completed the IF stage before the second node can execute\r\n if i > 0 and MIPSExpressions[i - 1].currentCycle > 2:\r\n MIPSExpressions[i].canExecute = True\r\n\r\n # Calculate Registers on WB Cycle\r\n if MIPSExpressions[i].currentCycle == 5 and MIPSExpressions[i].controlHazard == 0:\r\n #Check if a jump operation\r\n if MIPSExpressions[i].operand == \"beq\" or MIPSExpressions[i].operand == \"bne\":\r\n #Check if jump\r\n temp = MIPSExpressions[i].calculateExpression(registers)\r\n #Check if a value line value recieved (gets -1 if not true)\r\n if temp >= 0:\r\n #NEED TO GO TO THIS LINE NEXT (whatever value is stored in temp + 1, as temp points to the jump label)\r\n #Needs to tell next 3 registers to print '*' and to immediately start printing what is stored in line after temp\r\n #TENTATIVE: REPLACE ALL REGISTERS AFTER THE CONTROL ERRORED WITH INPUT FROM THE temp+1 value\r\n while len(MIPSExpressions) > (i + 4):\r\n MIPSExpressions.pop()\r\n #Replace any remaining inputs with new ones fresh from the jump line\r\n file = open(sys.argv[2], 'r')\r\n lineCount = 0\r\n line = file.readline()\r\n while line:\r\n if lineCount > (temp):\r\n MIPSExpressions.append(Expression(line.rstrip('\\n'), (lineCount + (i-temp) + 2)))\r\n #Decrement if a jump label line to format output\r\n if ':' in line:\r\n lineCount = lineCount - 1\r\n line = file.readline()\r\n lineCount = lineCount + 1\r\n file.close()\r\n #Increment control hazard of any used values\r\n MIPSExpressions[i+1].controlHazard = 3\r\n MIPSExpressions[i+2].controlHazard = 2\r\n MIPSExpressions[i+3].controlHazard = 1\r\n else:\r\n MIPSExpressions[i].calculateExpression(registers)\r\n\r\n # If the Expression can execute increment cycle so next step can execute\r\n if MIPSExpressions[i].canExecute:\r\n # Print the Expression\r\n print(MIPSExpressions[i], end='\\n')\r\n #Only print nop's when there is no forwarding\r\n if sys.argv[1].upper() == 'N':\r\n add_nop(i, MIPSExpressions)\r\n\r\n if MIPSExpressions[i].isWaiting is False:\r\n MIPSExpressions[i].currentCycle += 1\r\n\r\n # if MIPSExpressions[i].isWaiting is True:\r\n # MIPSExpressions[i].waitCount += 1\r\n if (len(MIPSExpressions) > (i + 1)):\r\n if MIPSExpressions[i+1].operand == \"jump\":\r\n MIPSExpressions[i+1].currentCycle += 1\r\n MIPSExpressions[i+2].canExecute = True\r\n print(MIPSExpressions[i+2], end='\\n')\r\n MIPSExpressions[i+2].currentCycle += 1\r\n else:\r\n MIPSExpressions[i+1].canExecute = True\r\n print(MIPSExpressions[i+1], end='\\n')\r\n MIPSExpressions[i+1].currentCycle += 1\r\n \r\n print(end='\\n') # Print Newline\r\n\r\n # Print Dictionary; set newline every 4 registers\r\n i = 0\r\n for key, value in registers.items():\r\n if key == '$zero' or key == '$ra':\r\n continue\r\n if i == 3 or i == 7 or i == 11 or i == 15 or i == 17:\r\n print(\"{0:<7}\".format(key + ' = ' + str(value)), end='\\n')\r\n else:\r\n print(\"{0:<20}\".format(key + ' = ' + str(value)), end='')\r\n\r\n i += 1\r\n j += 1\r\n print(separateline, end='\\n')\r\n print('END OF SIMULATION', end='\\n')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":14959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"91163674","text":"import cv2 \nimport numpy as np\nimport matplotlib.pyplot as plt\n\nnp.set_printoptions(threshold='nan')\n\n\ndef closewindows():\n\tk = cv2.waitKey(0)\n\tif k & 0xFF == ord('s'):\n\t\tcomment = input(\"Comment:-\\n \")\n\t\tcv2.imwrite('./data/test_result/'+comment+'_thres'+'.jpg',final_thr)\n\t\tcv2.imwrite('./data/test_result/'+comment+'_src'+'.jpg',src_img)\n\t\tcv2.imwrite('./data/test_result/'+comment+'_contr'+'.jpg',final_contr)\n\t\tprint(\"Completed\")\n\telif k & 0xFF == int(27):\n\t\tcv2.destroyAllWindows()\n\telse:\n\t\tclosewindows()\n\ndef line_array(array):\n\tlist_x = []\n\tfor y in range(len(array)):\n\t\tif all(i >= 3 for i in array[y:y+9]) == True:\n\t\t\tlist_x.append(y-1)\n\treturn list_x\n\n\n#-------------Thresholding Image--------------#\n\nsrc_img = cv2.imread('./data/img_2.jpg', 1)\n# copy = src_img.copy()\n# src_img = cv2.resize(copy, dsize =(1500, 1000), interpolation = cv2.INTER_AREA)\nheight = src_img.shape[0]\nwidth = src_img.shape[1]\nprint(\"#----------------------------#\")\nprint(\"Image Info:-\")\nprint(\"Height =\",height,\"\\nWidth =\",width)\nprint(\"#----------------------------#\")\n\ngrey_img = cv2.cvtColor(src_img, cv2.COLOR_BGR2GRAY)\n\ngud_img = cv2.adaptiveThreshold(grey_img,255,cv2.ADAPTIVE_THRESH_MEAN_C,cv2.THRESH_BINARY_INV,101,2)\n\nkernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))\nnoise_remove = cv2.erode(gud_img,kernel,iterations = 2)\n\nkernel1 = np.array([[1,0,1],[0,1,0],[1,0,1]], dtype = np.uint8)\n\nopening = cv2.morphologyEx(gud_img, cv2.MORPH_OPEN, kernel, iterations = 2) # To remove \"pepper-noise\"\nkernel1 = np.array([[1,0,1],[0,1,0],[1,0,1]], dtype = np.uint8)\nfinal_thr = cv2.dilate(noise_remove,kernel1,iterations = 3)\n\n#-------------/Thresholding Image-------------#\n\n\n#-------------Line Detection------------------#\n\ncount_x = np.zeros(shape= (height))\nfor y in range(height):\n\tfor x in range(width):\n\t\tif noise_remove[y][x] == 255 :\n\t\t\tcount_x[y] = count_x[y]+1\n\t# print(count_x[y])\n\nline_list = line_array(count_x)\n# print(line_list) \n\n# t = np.arange(0,height, 1)\n# plt.plot(t, count_x[t])\n# plt.axis([0, height, 0, 350])\n\n\n# for y in range(len(line_list)):\n# \tif :\n# \t\tmain_list.append(line_list[y-1]+10)\n# \t\tmain_list.append(line_list[y]-5)\n\n# main_list.append(line_list[-1]+10)\n\n# print(main_list)\n\n# for y in main_list:\n# \tsrc_img[y][:] = (0 , 255,0)\n\n\n\n# final_thr = cv2.erode(final_thr,kernel1,iterations = 1)\n\n\n#----------------------------------------------------------#\n \n\n\n\n\n\n\n#-------------------------------------------------------------#\n\n\n#-------------Character segmenting------------#\n\nchr_img = final_thr.copy()\n\ncontr_img, contours, hierarchy = cv2.findContours(chr_img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n# print(len(contours))\nfinal_contr = np.zeros((final_thr.shape[0],final_thr.shape[1],3), dtype = np.uint8)\ncv2.drawContours(final_contr, contours, -1, (0,255,0), 3)\n\nfor cnt in contours:\n\tif cv2.contourArea(cnt) > 100:\n\t\tx,y,w,h = cv2.boundingRect(cnt)\n\t\tcv2.rectangle(src_img,(x,y),(x+w,y+h),(0,255,0),2)\n\n#-------------/Character segmenting-----------# \n\n\n\n\n\n#-------------Displaying Image----------------#\n\ncv2.namedWindow('Source Image', cv2.WINDOW_NORMAL)\ncv2.namedWindow('Threshold Image', cv2.WINDOW_NORMAL)\ncv2.namedWindow('Contour Image', cv2.WINDOW_NORMAL)\n\ncv2.imshow(\"Source Image\", src_img)\ncv2.imshow(\"Threshold Image\", final_thr)\ncv2.imshow(\"Contour Image\", final_contr)\n\n# plt.show()\n\n#-------------/Displaying Image---------------#\n\n\n#-------------Closing Windows-----------------#\n\nclosewindows()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"34031889","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# Analisador de arquivos repetidos\n\nx=1\nsair=\"nao\"\nwhile sair==\"nao\":\n nano = str(x)+\".txt\"\n try:\n arquivo = open(nano,\"r\")\n except:\n print(\"\\n\\nNão achei conteúdo repetido!\\n\\n\")\n break\n string = arquivo.read()\n arquivo.close()\n y=1\n while True:\n analise = str(y)+\".txt\"\n if x==y:\n pass\n else:\n try:\n arquivo = open(analise,\"r\")\n except:\n break\n exibir = arquivo.read()\n arquivo.close()\n print(exibir,string)\n if exibir == string:\n sair=\"sim\"\n print(\"Arquivo {}.txt é igual ao arquivo {}.txt\".format(x,y))\n break\n y=y+1\n x=x+1\n","sub_path":"conversas/Analizador de arquivos repetidos.py","file_name":"Analizador de arquivos repetidos.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"129528318","text":"#!/usr/bin/python\n\nimport sys\nimport os\nimport random\nimport atexit\nimport signal\nimport subprocess\n\nfrom os.path import expanduser\nfrom threading import Thread\nfrom time import sleep\nimport time\nimport datetime\n\nch_queries = [\n \"\"\"\n-- Q1\nselect ol_number,\n sum(ol_quantity) as sum_qty,\n sum(ol_amount) as sum_amount,\n avg(ol_quantity) as avg_qty,\n avg(ol_amount) as avg_amount,\n count(*) as count_order\nfrom order_line\ngroup by ol_number order by ol_number LIMIT 10;\n \"\"\",\n\"\"\"\n-- Q2\nselect su_suppkey, su_name, n_name, i_id, i_name, su_address, su_phone, su_comment\nfrom item, supplier, stock, nation, region,\n (select s_i_id as m_i_id,\n min(s_quantity) as m_s_quantity\n from stock, supplier, nation, region\n where mod((s_w_id*s_i_id),10000)=su_suppkey\n and su_nationkey=n_nationkey\n and n_regionkey=r_regionkey\n and r_name like 'Europ%'\n group by s_i_id) m\nwhere i_id = s_i_id\n and mod((s_w_id * s_i_id), 10000) = su_suppkey\n and su_nationkey = n_nationkey\n and n_regionkey = r_regionkey\n and i_data like '%b'\n and r_name like 'Europ%'\n and i_id=m_i_id\n and s_quantity = m_s_quantity\norder by n_name, su_name, i_id LIMIT 10;\n\"\"\",\n\"\"\"\n-- Q3\nselect ol_o_id, ol_w_id, ol_d_id,\n sum(ol_amount) as revenue, o_entry_d\nfrom customer, new_order, orders, order_line\nwhere c_state like 'A%'\n and c_id = o_c_id\n and c_w_id = o_w_id\n and c_d_id = o_d_id\n and no_w_id = o_w_id\n and no_d_id = o_d_id\n and no_o_id = o_id\n and ol_w_id = o_w_id\n and ol_d_id = o_d_id\n and ol_o_id = o_id\ngroup by ol_o_id, ol_w_id, ol_d_id, o_entry_d\norder by revenue desc, o_entry_d LIMIT 10;\n\"\"\",\n\"\"\"\n-- Q4\nselect o_ol_cnt, count(*) as order_count\nfrom orders\n where exists (select *\n from order_line\n where o_id = ol_o_id\n and o_w_id = ol_w_id\n and o_d_id = ol_d_id\n and ol_delivery_d >= o_entry_d)\ngroup by o_ol_cnt\norder by o_ol_cnt LIMIT 10;\n\"\"\",\n\"\"\"\n-- Q5\nselect n_name,\n sum(ol_amount) as revenue\nfrom customer, orders, order_line, stock, supplier, nation, region\nwhere c_id = o_c_id\n and c_w_id = o_w_id\n and c_d_id = o_d_id\n and ol_o_id = o_id\n and ol_w_id = o_w_id\n and ol_d_id=o_d_id\n and ol_w_id = s_w_id\n and ol_i_id = s_i_id\n and mod((s_w_id * s_i_id),10000) = su_suppkey\n and ascii(substr(c_state,1,1)) = su_nationkey\n and su_nationkey = n_nationkey\n and n_regionkey = r_regionkey\n and r_name = 'Europe'\n and o_entry_d >= '2015-01-02 00:00:00.000000'\ngroup by n_name\norder by revenue desc LIMIT 10;\n\"\"\",\n\"\"\"\n-- Q6\nselect sum(ol_amount) as revenue\nfrom order_line\nwhere ol_quantity between 1 and 100000 LIMIT 10;\n\"\"\",\n\"\"\"\n-- Q7\nselect su_nationkey as supp_nation,\n substr(c_state,1,1) as cust_nation,\n extract(year from o_entry_d) as l_year,\n sum(ol_amount) as revenue\nfrom supplier, stock, order_line, orders, customer, nation n1, nation n2\nwhere ol_supply_w_id = s_w_id\n and ol_i_id = s_i_id\n and mod((s_w_id * s_i_id), 10000) = su_suppkey\n and ol_w_id = o_w_id\n and ol_d_id = o_d_id\n and ol_o_id = o_id\n and c_id = o_c_id\n and c_w_id = o_w_id\n and c_d_id = o_d_id\n and su_nationkey = n1.n_nationkey\n and ascii(substr(c_state,1,1)) = n2.n_nationkey\n and (\n (n1.n_name = 'Germany' and n2.n_name = 'Cambodia')\n or\n (n1.n_name = 'Cambodia' and n2.n_name = 'Germany')\n )\ngroup by su_nationkey, substr(c_state,1,1), extract(year from o_entry_d)\norder by su_nationkey, cust_nation, l_year LIMIT 10;\n\"\"\",\n\"\"\"\n-- Q8\nselect extract(year from o_entry_d) as l_year,\n sum(case when n2.n_name = 'Germany' then ol_amount else 0 end) / sum(ol_amount) as mkt_share\nfrom item, supplier, stock, order_line, orders, customer, nation n1, nation n2, region\nwhere i_id = s_i_id\n and ol_i_id = s_i_id\n and ol_supply_w_id = s_w_id\n and mod((s_w_id * s_i_id),10000) = su_suppkey\n and ol_w_id = o_w_id\n and ol_d_id = o_d_id\n and ol_o_id = o_id\n and c_id = o_c_id\n and c_w_id = o_w_id\n and c_d_id = o_d_id\n and n1.n_nationkey = ascii(substr(c_state,1,1))\n and n1.n_regionkey = r_regionkey\n and ol_i_id < 1000\n and r_name = 'Europe'\n and su_nationkey = n2.n_nationkey\n and i_data like '%b'\n and i_id = ol_i_id\ngroup by extract(year from o_entry_d)\norder by l_year LIMIT 10;\n\"\"\",\n\"\"\"\n-- Q9\nselect n_name, extract(year from o_entry_d) as l_year, sum(ol_amount) as sum_profit\nfrom item, stock, supplier, order_line, orders, nation\nwhere ol_i_id = s_i_id\n and ol_supply_w_id = s_w_id\n and mod((s_w_id * s_i_id), 10000) = su_suppkey\n and ol_w_id = o_w_id\n and ol_d_id = o_d_id\n and ol_o_id = o_id\n and ol_i_id = i_id\n and su_nationkey = n_nationkey\n and i_data like '%BB'\ngroup by n_name, extract(year from o_entry_d)\norder by n_name, l_year desc LIMIT 10;\n\"\"\",\n\"\"\"\n-- Q10\nselect c_id, c_last, sum(ol_amount) as revenue, c_city, c_phone, n_name\nfrom customer, orders, order_line, nation\nwhere c_id = o_c_id\n and c_w_id = o_w_id\n and c_d_id = o_d_id\n and ol_w_id = o_w_id\n and ol_d_id = o_d_id\n and ol_o_id = o_id\n and o_entry_d <= ol_delivery_d\n and n_nationkey = ascii(substr(c_state,1,1))\ngroup by c_id, c_last, c_city, c_phone, n_name\norder by revenue desc LIMIT 10;\n\"\"\",\n\"\"\"\n-- Q11\nselect s_i_id, sum(s_order_cnt) as ordercount\nfrom stock, supplier, nation\nwhere mod((s_w_id * s_i_id),10000) = su_suppkey\n and su_nationkey = n_nationkey\n and n_name = 'Germany'\ngroup by s_i_id\nhaving sum(s_order_cnt) >\n (select sum(s_order_cnt) * .005\n from stock, supplier, nation\n where mod((s_w_id * s_i_id),10000) = su_suppkey\n and su_nationkey = n_nationkey\n and n_name = 'Germany')\norder by ordercount desc LIMIT 10;\n\"\"\",\n\"\"\"\n-- Q12\nselect o_ol_cnt,\n sum(case when o_carrier_id = 1 or o_carrier_id = 2 then 1 else 0 end) as high_line_count,\n sum(case when o_carrier_id <> 1 and o_carrier_id <> 2 then 1 else 0 end) as low_line_count\nfrom orders, order_line\nwhere ol_w_id = o_w_id\n and ol_d_id = o_d_id\n and ol_o_id = o_id\n and o_entry_d <= ol_delivery_d\ngroup by o_ol_cnt\norder by o_ol_cnt LIMIT 10;\n\"\"\",\n\"\"\"\n-- Q13\nselect c_count, count(*) as custdist\nfrom (select c_id, count(o_id)\n from customer left outer join orders on (\n c_w_id = o_w_id\n and c_d_id = o_d_id\n and c_id = o_c_id\n and o_carrier_id > 8)\n group by c_id) as c_orders (c_id, c_count)\ngroup by c_count\norder by custdist desc, c_count desc LIMIT 10;\n\"\"\",\n\"\"\"\n-- Q14\nselect 100.00 * sum(case when i_data like 'PR%' then ol_amount else 0 end) / (1+sum(ol_amount)) as promo_revenue\nfrom order_line, item\nwhere ol_i_id = i_id\n LIMIT 10;\n\"\"\",\n\"\"\"\n-- Q15\nwith revenue (supplier_no, total_revenue) as (\n select mod((s_w_id * s_i_id),10000) as supplier_no,\n sum(ol_amount) as total_revenue\n from order_line, stock\n where ol_i_id = s_i_id and ol_supply_w_id = s_w_id\n group by mod((s_w_id * s_i_id),10000))\nselect su_suppkey, su_name, su_address, su_phone, total_revenue\nfrom supplier, revenue\nwhere su_suppkey = supplier_no\n and total_revenue = (select max(total_revenue) from revenue)\norder by su_suppkey LIMIT 10;\n\"\"\",\n\"\"\"\n-- Q16\nselect i_name,\n substr(i_data, 1, 3) as brand,\n i_price,\n count(distinct (mod((s_w_id * s_i_id),10000))) as supplier_cnt\nfrom stock, item\nwhere i_id = s_i_id\n and i_data not like 'zz%'\n and (mod((s_w_id * s_i_id),10000) not in\n (select su_suppkey\n from supplier\n where su_comment like '%bad%'))\ngroup by i_name, substr(i_data, 1, 3), i_price\norder by supplier_cnt desc LIMIT 10;\n\"\"\",\n\"\"\"\n-- Q17\nselect sum(ol_amount) / 2.0 as avg_yearly\nfrom order_line, (select i_id, avg(ol_quantity) as a\n from item, order_line\n where i_data like '%b'\n and ol_i_id = i_id\n group by i_id) t\nwhere ol_i_id = t.i_id\n and ol_quantity < t.a LIMIT 10;\n\"\"\",\n\"\"\"\n-- Q18\nselect c_last, c_id o_id, o_entry_d, o_ol_cnt, sum(ol_amount)\nfrom customer, orders, order_line\nwhere c_id = o_c_id\n and c_w_id = o_w_id\n and c_d_id = o_d_id\n and ol_w_id = o_w_id\n and ol_d_id = o_d_id\n and ol_o_id = o_id\ngroup by o_id, o_w_id, o_d_id, c_id, c_last, o_entry_d, o_ol_cnt\nhaving sum(ol_amount) > 200\norder by sum(ol_amount) desc, o_entry_d LIMIT 10;\n\"\"\",\n\"\"\"\n-- Q19\nselect sum(ol_amount) as revenue\nfrom order_line, item\nwhere (\n ol_i_id = i_id\n and i_data like '%a'\n and ol_quantity >= 1\n and ol_quantity <= 10\n and i_price between 1 and 400000\n and ol_w_id in (1,2,3)\n ) or (\n ol_i_id = i_id\n and i_data like '%b'\n and ol_quantity >= 1\n and ol_quantity <= 10\n and i_price between 1 and 400000\n and ol_w_id in (1,2,4)\n ) or (\n ol_i_id = i_id\n and i_data like '%c'\n and ol_quantity >= 1\n and ol_quantity <= 10\n and i_price between 1 and 400000\n and ol_w_id in (1,5,3)\n ) LIMIT 10;\n\"\"\",\n\"\"\"\n-- Q20\nselect su_name, su_address\nfrom supplier, nation\nwhere su_suppkey in\n (select mod(s_i_id * s_w_id, 10000)\n from stock, order_line\n where s_i_id in\n (select i_id\n from item\n where i_data like 'co%')\n and ol_i_id=s_i_id\n group by s_i_id, s_w_id, s_quantity\n having 2*s_quantity > sum(ol_quantity))\n and su_nationkey = n_nationkey\n and n_name = 'Germany'\norder by su_name LIMIT 10;\n\"\"\",\n\"\"\"\n-- Q21\nselect su_name, count(*) as numwait\nfrom supplier, order_line l1, orders, stock, nation\nwhere ol_o_id = o_id\n and ol_w_id = o_w_id\n and ol_d_id = o_d_id\n and ol_w_id = s_w_id\n and ol_i_id = s_i_id\n and mod((s_w_id * s_i_id),10000) = su_suppkey\n and l1.ol_delivery_d > o_entry_d\n and not exists (select *\n from order_line l2\n where l2.ol_o_id = l1.ol_o_id\n and l2.ol_w_id = l1.ol_w_id\n and l2.ol_d_id = l1.ol_d_id\n and l2.ol_delivery_d > l1.ol_delivery_d)\n and su_nationkey = n_nationkey\n and n_name = 'Germany'\ngroup by su_name\norder by numwait desc, su_name LIMIT 10;\n\"\"\",\n\"\"\"\n-- Q22\nselect substr(c_state,1,1) as country,\n count(*) as numcust,\n sum(c_balance) as totacctbal\nfrom customer\nwhere substr(c_phone,1,1) in ('1','2','3','4','5','6','7')\n and c_balance > (select avg(c_BALANCE)\n from customer\n where c_balance > 0.00\n and substr(c_phone,1,1) in ('1','2','3','4','5','6','7'))\n and not exists (select *\n from orders\n where o_c_id = c_id\n and o_w_id = c_w_id\n and o_d_id = c_d_id)\ngroup by substr(c_state,1,1)\norder by substr(c_state,1,1) LIMIT 10;\n\"\"\"\n]\n\nsent_query_amount = 0\nis_terminated = False\nfile_suffix=\"0\"\n\nRANDOM_SEED = 123\n\n\ndef save_pid_to_file():\n my_pid = str(os.getpid())\n print(my_pid)\n home = expanduser(\"~\")\n\n f_pid = open(os.path.join(home, \"ch.pid\"), 'w')\n f_pid.write(my_pid)\n f_pid.close()\n\ndef start_ch_thread(start_index):\n global sent_query_amount\n global ch_queries\n global is_terminated\n\n size = len(ch_queries)\n\n cur_index = start_index\n while not is_terminated:\n return_code = send_query(ch_queries[cur_index],cur_index)\n # if there was an error, we will retry the same query\n if return_code != 0:\n continue\n sent_query_amount += 1\n\n cur_index += 1\n cur_index %= size\n\ndef send_query(query,cur_index):\n global coord_ip\n pg = ['psql', '-P', 'pager=off', '-v', 'ON_ERROR_STOP=1', '-h', coord_ip, '-c', query]\n\n start_datetime = datetime.datetime.now()\n start_time = int(round(time.time() * 1000))\n return_code = subprocess.call(pg)\n end_time = int(round(time.time() * 1000))\n\n f = open(\"results/ch_queries_{}.txt\".format(file_suffix), \"a\")\n # we print cur_index + 1 to be human readable (e.g: 1th query will be 1)\n f.write(\"{} started at {}\\n\".format(cur_index+1, start_datetime))\n f.write(\"{} finished in {} milliseconds\\n\".format(cur_index+1, end_time - start_time))\n f.close()\n\n return return_code\n\ndef give_stats(sent_query_amount, time_lapsed_in_secs):\n f = open(\"results/ch_results_{}.txt\".format(file_suffix), \"w\")\n f.write(\"queries {} in {} seconds\\n\".format(sent_query_amount, time_lapsed_in_secs))\n f.write(\"QPH {}\\n\".format(3600.0 * sent_query_amount / time_lapsed_in_secs))\n f.close()\n\ndef get_curtime_in_seconds():\n return int(round(time.time()))\n\n\ndef terminate():\n global is_terminated\n global sent_query_amount\n global start_time_in_secs\n\n end_time_in_secs = get_curtime_in_seconds()\n\n give_stats(sent_query_amount, end_time_in_secs - start_time_in_secs)\n\n is_terminated = True\n\nclass GracefulKiller:\n kill_now = False\n def __init__(self):\n signal.signal(signal.SIGINT, self.exit_gracefully)\n signal.signal(signal.SIGTERM, self.exit_gracefully)\n\n def exit_gracefully(self,signum, frame):\n global is_terminated\n self.kill_now = True\n print(\"got a kill signal\")\n terminate()\n\nif __name__ == \"__main__\":\n\n thread_count = int(sys.argv[1])\n coord_ip = sys.argv[2]\n initial_sleep_in_mins=int(sys.argv[3])\n file_suffix=sys.argv[4]\n\n random.seed(RANDOM_SEED)\n\n all_start_indexes = [i for i in range(0, len(ch_queries))]\n random.shuffle(all_start_indexes)\n start_indexes = all_start_indexes[:thread_count]\n\n save_pid_to_file()\n jobs = []\n for i in range(0, thread_count):\n thread = Thread(target = start_ch_thread, args=(start_indexes[i], ))\n jobs.append(thread)\n\n sleep(initial_sleep_in_mins * 60)\n\n start_time_in_secs = get_curtime_in_seconds()\n for j in jobs:\n j.start()\n\n killer = GracefulKiller()\n while not killer.kill_now:\n time.sleep(10)\n","sub_path":"hammerdb/ch_benchmark.py","file_name":"ch_benchmark.py","file_ext":"py","file_size_in_byte":14272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"532861034","text":"#! /usr/bin/env python\n#\ndef i4_fall_values ( n_data ):\n\n#*****************************************************************************80\n#\n## I4_FALL_VALUES returns values of the integer falling factorial function.\n#\n# Discussion:\n#\n# The definition of the falling factorial function is\n#\n# (m)_n = (m)! / (m-n)!\n# = ( m ) * ( m - 1 ) * ( m - 2 ) ... * ( m - n + 1 )\n# = Gamma ( m + 1 ) / Gamma ( m - n + 1 )\n#\n# We assume 0 <= N <= M.\n#\n# In Mathematica, the function can be evaluated by:\n#\n# FactorialPower[m,n]\n#\n# Licensing:\n#\n# This code is distributed under the GNU LGPL license.\n#\n# Modified:\n#\n# 15 December 2014\n#\n# Author:\n#\n# John Burkardt\n#\n# Reference:\n#\n# Milton Abramowitz and Irene Stegun,\n# Handbook of Mathematical Functions,\n# US Department of Commerce, 1964.\n#\n# Stephen Wolfram,\n# The Mathematica Book,\n# Fourth Edition,\n# Wolfram Media / Cambridge University Press, 1999.\n#\n# Parameters:\n#\n# Input/output, integer N_DATA. The user sets N_DATA to 0 before the\n# first call. On each call, the routine increments N_DATA by 1, and\n# returns the corresponding data; when there is no more data, the\n# output value of N_DATA will be 0 again.\n#\n# Output, integer M, N, the arguments of the function.\n#\n# Output, integer FMN, the value of the function.\n#\n import numpy as np\n\n n_max = 15\n\n fmn_vec = np.array ( [ \n 1, 5, 20, 60, 120, \\\n 120, 0, 1, 10, 4000, \\\n 90, 4896, 24, 912576, 0 ] )\n m_vec = np.array ( [ \n 5, 5, 5, 5, 5, \\\n 5, 5, 50, 10, 4000, \\\n 10, 18, 4, 98, 1 ] )\n n_vec = np.array ( [ \n 0, 1, 2, 3, 4, \\\n 5, 6, 0, 1, 1, \\\n 2, 3, 4, 3, 7 ] )\n\n if ( n_data < 0 ):\n n_data = 0\n\n if ( n_max <= n_data ):\n n_data = 0\n m = 0\n n = 0\n fmn = 0\n else:\n m = m_vec[n_data]\n n = n_vec[n_data]\n fmn = fmn_vec[n_data]\n n_data = n_data + 1\n\n return n_data, m, n, fmn\n\ndef i4_fall_values_test ( ):\n\n#*****************************************************************************80\n#\n## I4_FALL_VALUES_TEST tests I4_FALL_VALUES.\n#\n# Licensing:\n#\n# This code is distributed under the GNU LGPL license.\n#\n# Modified:\n#\n# 15 December 2014\n#\n# Author:\n#\n# John Burkardt\n#\n import platform\n\n print ( '' )\n print ( 'I4_FALL_VALUES_TEST:' )\n print ( ' Python version: %s' % ( platform.python_version ( ) ) )\n print ( ' I4_FALL_VALUES returns values of the integer falling factorial.' )\n print ( '' )\n print ( ' M N I4_FALL(M,N)' )\n print ( '' )\n\n n_data = 0\n\n while ( True ):\n\n n_data, m, n, fmn = i4_fall_values ( n_data )\n\n if ( n_data == 0 ):\n break\n\n print ( ' %8d %8d %8d' % ( m, n, fmn ) )\n#\n# Terminate.\n#\n print ( '' )\n print ( 'I4_FALL_VALUES_TEST:' )\n print ( ' Normal end of execution.' )\n return\n\nif ( __name__ == '__main__' ):\n from timestamp import timestamp\n timestamp ( )\n i4_fall_values_test ( )\n timestamp ( )\n\n","sub_path":"i4lib/i4_fall_values.py","file_name":"i4_fall_values.py","file_ext":"py","file_size_in_byte":2969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"380594767","text":"# Copyright 2019 NEC Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\n[概要]\n アクションドライバ アクション実行処理(子プロセス)\n\"\"\"\n\nimport pytest\nimport os\nimport datetime\nimport pytz\n\nfrom importlib import import_module\nfrom django.db import transaction\nfrom django.conf import settings\n\nfrom libs.commonlibs.define import *\nfrom libs.commonlibs.aes_cipher import AESCipher\nfrom libs.webcommonlibs.events_request import EventsRequestCommon\nfrom web_app.models.models import ActionType, EventsRequest, RhdmResponseAction, ActionHistory\n\n\noase_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../..')\nos.environ['OASE_ROOT_DIR'] = oase_root_dir\nos.environ['RUN_INTERVAL'] = '10'\nos.environ['PYTHON_MODULE'] = '/usr/bin/python3'\nos.environ['LOG_LEVEL'] = \"TRACE\"\nos.environ['LOG_DIR'] = oase_root_dir + \"/logs/backyardlogs/oase_action\"\n\n\nfrom backyards.action_driver.oase_action_sub import ActionDriverSubModules\n\n\n################################################\n# テスト用DB操作(抑止回数/抑止間隔)\n################################################\ndef set_data_action_stop(action_stop_interval, action_stop_count, exec_order):\n \"\"\"\n 抑止回数/抑止間隔のテストに必要なデータをDBに登録\n \"\"\"\n\n now = datetime.datetime.now(pytz.timezone('UTC'))\n trace_id = EventsRequestCommon.generate_trace_id(now)\n rhdm_response_action = None\n action_history = None\n\n try:\n with transaction.atomic():\n\n # アクション種別\n action_type = ActionType(\n driver_type_id=1,\n disuse_flag='0',\n last_update_timestamp=now,\n last_update_user='administrator'\n )\n action_type.save(force_insert=True)\n\n # イベントリクエスト\n events_request = EventsRequest(\n trace_id=trace_id,\n request_type_id=1,\n rule_type_id=1,\n request_reception_time=now,\n request_user='pytest_user',\n request_server='pytest_server',\n event_to_time=now,\n event_info='{\"EVENT_INFO\":[\"1\"]}',\n status='3',\n status_update_id='pytest_id',\n retry_cnt=0,\n last_update_timestamp=now,\n last_update_user='administrator'\n )\n events_request.save(force_insert=True)\n\n # ルールマッチング結果詳細\n rhdm_response_action = RhdmResponseAction(\n response_id=1,\n rule_name='pytest_rule',\n execution_order=exec_order,\n action_type_id=action_type.pk,\n action_parameter_info='{\"ACTION_PARAMETER_INFO\": [\"SERVER_LIST=localhost\", \"ITA_NAME=ITA176\", \"SYMPHONY_CLASS_ID=1\"]}',\n action_pre_info='',\n action_retry_interval=1,\n action_retry_count=1,\n action_stop_interval=action_stop_interval,\n action_stop_count=action_stop_count,\n last_update_timestamp=now,\n last_update_user='administrator',\n )\n rhdm_response_action.save(force_insert=True)\n\n # アクション履歴\n action_history = ActionHistory(\n response_id=rhdm_response_action.response_id,\n trace_id=trace_id,\n rule_type_id=events_request.rule_type_id,\n rule_type_name='pytest_ruletable',\n rule_name=rhdm_response_action.rule_name,\n execution_order=rhdm_response_action.execution_order,\n action_start_time=now,\n action_type_id=rhdm_response_action.action_type_id,\n status=2,\n status_detail=0,\n status_update_id='pytest_id',\n retry_flag=False,\n retry_status=None,\n retry_status_detail=None,\n action_retry_count=0,\n last_act_user='administrator',\n last_update_timestamp=now,\n last_update_user='administrator',\n )\n action_history.save(force_insert=True)\n\n except Exception as e:\n print(e)\n\n return trace_id, rhdm_response_action, action_history\n\n\ndef delete_data_action_stop():\n \"\"\"\n テストで使用したデータの削除\n テーブル初期化\n \"\"\"\n module = import_module('web_app.models.ITA_models')\n ItaDriver = getattr(module, 'ItaDriver')\n\n EventsRequest.objects.all().delete()\n RhdmResponseAction.objects.all().delete()\n ActionHistory.objects.all().delete()\n ItaDriver.objects.all().delete()\n ActionType.objects.all().delete()\n\n\n@pytest.fixture()\ndef setup_data_action():\n \"\"\"\n テストデータのリセット\n \"\"\"\n\n delete_data_action_stop()\n\n module = import_module('web_app.models.ITA_models')\n ItaDriver = getattr(module, 'ItaDriver')\n\n encryptpassword = AESCipher(settings.AES_KEY).encrypt('pytest')\n ita_driver = ItaDriver(\n ita_disp_name='ITA176',\n protocol='https',\n hostname='pytest-host-name',\n port='443',\n username='pytest',\n password=encryptpassword,\n last_update_timestamp=datetime.datetime.now(pytz.timezone('UTC')),\n last_update_user='pytest',\n ).save(force_insert=True)\n\n yield\n\n delete_data_action_stop()\n\n\n@pytest.mark.django_db\ndef test_is_prevented(setup_data_action):\n \"\"\"\n 抑止判定テスト\n \"\"\"\n ############################################\n # 回数なし、間隔なしのテスト\n ############################################\n # テストデータ作成\n trace_id, rhdm_res_act, act_his = set_data_action_stop(0, 0, 1)\n\n # 抑止判定テスト\n act_sub = ActionDriverSubModules(rhdm_res_act.response_id, trace_id, 0)\n act_sub.action_history = act_his\n result1 = act_sub._prevent_by_interval_and_times(rhdm_res_act)\n\n # 抑止しないこと\n assert result1 == \"both_none\"\n\n ############################################\n # 回数あり、間隔ありのテスト(WAITパターン)\n ############################################\n # テストデータ作成\n trace_id, rhdm_res_act, act_his = set_data_action_stop(10, 10, 2)\n\n # 抑止判定テスト\n act_sub = ActionDriverSubModules(rhdm_res_act.response_id, trace_id, 0)\n act_sub.action_history = act_his\n result1 = act_sub._prevent_by_interval_and_times(rhdm_res_act)\n result2 = act_sub._is_prevented(rhdm_res_act, trace_id, act_his.pk)\n\n assert result1 == \"both_existence\"\n assert result2 == WAITING\n\n\n@pytest.mark.django_db\ndef test_regist_exastro(ita_table, setup_data_action, monkeypatch):\n \"\"\"\n regist_exastro()テスト\n \"\"\"\n # テストデータ作成\n trace_id, rhdm_res_act, act_his = set_data_action_stop(0, 0, 1)\n act_sub = ActionDriverSubModules(rhdm_res_act.response_id, trace_id, 0)\n act_sub.action_history = act_his\n\n ITAManager = getattr(import_module('libs.backyardlibs.action_driver.ITA.ITA_driver'), 'ITAManager')\n monkeypatch.setattr(ITAManager, 'act_with_menuid', lambda x, y, z : (ACTION_HISTORY_STATUS.EXASTRO_REQUEST, ACTION_HISTORY_STATUS.DETAIL_STS.NONE))\n # 正常系\n result = act_sub.regist_exastro(trace_id, act_his.pk)\n assert result\n\n # 異常系 第2引数に不正な値を渡す\n result = act_sub.regist_exastro(trace_id, 'a')\n assert result == False\n","sub_path":"oase-root/tests/backyards/action_driver/test_oase_action_sub.py","file_name":"test_oase_action_sub.py","file_ext":"py","file_size_in_byte":8058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"313318096","text":"class Node:\n def __init__(self, value = None, next_node = None):\n # the value at this linked list Node\n self.value = value\n # refernce tot he next node in the list\n self.next_node = next_node\n\n def get_value(self):\n return self.value\n\n def get_next(self):\n return self.next_node\n\n def set_next(self, new_next_node):\n self.next_node = new_next_node\n\nclass LinkedList:\n def __init__(self):\n #reference to the head of the list\n self.head = None\n #reference to the tail of the list\n self.tail = None\n\n def add_to_tail(self, value):\n # init a node with a value of ValueError\n new_node = Node(value, None)\n # check if there is no head( i.e list is empty)\n if self.tail is None:\n self.head = new_node\n self.tail = new_node\n else:\n #set the current tails next reference to our new node\n self.tail.set_next(new_node)\n self.tail = new_node \n \n def remove_head(self):\n # Return none if there is no head\n if self.head is None:\n return None\n # Check to see if there is only one element\n elif not self.head.get_next():\n head = self.head\n self.head = None\n self.tail = None\n return head.get_value()\n else:\n value = self.head\n self.head = self.head.get_next()\n return value.get_value()\n\n def contains(self, value):\n # if list of empty\n if not self.head:\n return False\n\n else:\n current = self.head\n while current:\n if value == current.get_value():\n return True\n else:\n current = current.get_next()\n\n return False\n\n def add_to_head(self, value):\n # inti node wth value of ValueError\n new_node = Node(value, None)\n # If list is empty\n if not self.head:\n self.head = new_node\n self.tail = new_node\n # If list only has one item\n elif not self.head.get_next():\n new_node.set_next = self.head\n self.head = new_node\n \n else:\n prev_head = self.head\n self.head = new_node\n self.head.set_next(prev_head)\n\nclass Queue:\n def __init__(self):\n self.size = 0\n # what data structure should we\n # use to store queue elements?\n self.storage = LinkedList()\n\n def enqueue(self, item):\n self.size += 1\n self.storage.add_to_tail(item)\n \n def dequeue(self):\n if self.size == 0:\n return None\n else:\n self.size -= 1\n return self.storage.remove_head()\n\n def len(self):\n return self.size\n ","sub_path":"queue/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"393105986","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport sys\n\nfrom model_persistence import ModelPersistence\n\nif __name__ == \"__main__\":\n\n if len(sys.argv) > 1:\n for modelpath in sys.argv[1:]:\n model = ModelPersistence().load_model(modelpath)\n print(model)\n else:\n print(\"usage: [...]\")\n","sub_path":"show_model.py","file_name":"show_model.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"252332086","text":"class DisjointSet:\n\n def __init__(self, n):\n self.makeset(n)\n\n def makeset(self, n):\n self.S = [-1 for x in range(n)]\n\n def find(self, x):\n if self.S[x] < 0:\n return x\n else:\n return self.find(self.S[x])\n\n def union(self, el1, el2):\n root1 = self.find(el1)\n root2 = self.find(el2)\n if root1 == root2:\n return\n\n if self.S[root2] < self.S[root1]:\n self.S[root2] += self.S[root1]\n self.S[root1] = root2\n else:\n self.S[root1] += self.S[root2]\n self.S[root2] = root1\n\nds = DisjointSet(7)\nds.union(5, 6)\nds.union(1, 2)\nds.union(0, 2)\n\nprint(ds.find(5), ds.find(1), ds.find(2))","sub_path":"greedy/disjoint_sets_union_by_size.py","file_name":"disjoint_sets_union_by_size.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"597193307","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport threading\nimport rospy, os, time, sys, termios, select\n\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import Twist\nfrom std_msgs.msg import Empty, UInt8, Int8, Int32, String\nfrom bebop_msgs.msg import Ardrone3PilotingStateAltitudeChanged\nif os.name == 'nt':\n import msvcrt\nelse:\n import tty, termios\n\n\nclass Bebop:\n def __init__(self):\n rospy.init_node('bebop_move', anonymous=True)\n\n # bebop info\n #sub\n self.bebop_odom_sub = rospy.Subscriber('/bebop/odom', Odometry, self.callback_bebop_odom, queue_size=5)\n self.bebop_altitude_sub = rospy.Subscriber('/bebop/states/ardrone3/PilotingState/AltitudeChanged',Ardrone3PilotingStateAltitudeChanged,self.\n callback_bebop_takeoff_stat,queue_size = 1)\n\n # bebop control\n #pub\n self.bebop_pub_land = rospy.Publisher('/bebop/land', Empty, queue_size=1)\n self.bebop_pub_takeoff = rospy.Publisher('/bebop/takeoff', Empty, queue_size=1)\n self.bebop_control_pub = rospy.Publisher('/bebop/cmd_vel', Twist, queue_size=1)\n\n #with isyn\n #sub\n self.isyn_person_to_drone_Alignment_sub = rospy.Subscriber('/person_to_drone_Alignment', String, self.callback_person_to_drone_Alignment)\n self.isyn_status_sub = rospy.Subscriber('/status_isyn', Int32,self.callback_isyn_status, queue_size=1)\n self.isyn_found_object_sub = rospy.Subscriber('/found_person', Int8, self.callback_found_person)\n self.isyn_save_image_clear_sub = rospy.Subscriber('/isyn_save_image_clear',Int8,self.callback_isyn_save_image_clear,queue_size=1)\n #pub\n self.bebop_mode_pub = rospy.Publisher('/bebop_mode', UInt8,queue_size=1)\n self.bebop_status_pub = rospy.Publisher('bebop_status',Int32,queue_size = 1)\n self.bebop_req_save_image_pub = rospy.Publisher('/bebop_req_save_image',Int8,queue_size = 1)\n\n\n #with bebop mode_1\n #sub\n self.point_move_all_clear_sub = rospy.Subscriber('/point_reached',Int8,self.callback_point_move_all_clear,queue_size=1)\n #pub\n self.scan_all_clear_pub = rospy.Publisher('/scan_all_clear',Int8, queue_size=1)\n\n # msg init\n self.empty_msg = Empty()\n self.msg = Twist()\n\n # init bebop\n self.curr_angular_speed = 0.1\n self.curr_isyn_status_msg = 0\n self.curr_Found_person = 0\n self.curr_Alignment = 0\n self.thresh_Alignment_max = 50\n self.thresh_Alignment_min = -50\n self.curr_bebop_takeoff_stat = 0\n #init flag\n self.set_Alignment_flag = 0\n self.error_check_num = 0\n self.point_local_scan_clear = 0\n self.scan_delay = 0\n\n # pub msgs\n self.bebop_mode_msg = 0\n self.curr_bebop_status_msg = 0\n\n def take_off(self):\n rospy.sleep(1)\n self.bebop_pub_takeoff.publish(self.empty_msg)\n print(\"bebop_will_take_off\")\n\n def land(self):\n rospy.sleep(1)\n self.bebop_pub_land.publish(self.empty_msg)\n print(\"bebop_is_landing\")\n\n def callback_point_move_all_clear(self,point_move_all_clear_data):\n self.curr_point_move_all_clear = point_move_all_clear_data.data\n\n\n def callback_bebop_odom(self,bebop_odom_data):\n self.curr_bebop_odom_z = bebop_odom_data.pose.pose.orientation.z\n self.curr_bebop_odom_z = self.curr_bebop_odom_z * 100\n self.curr_bebop_odom_z = int(self.curr_bebop_odom_z)\n \n def callback_person_to_drone_Alignment(self,Alignment_data):\n self.curr_Alignment = Alignment_data.data\n if self.curr_Alignment != 0 and self.curr_Alignment != 'not data':\n self.split_Alignment_data = self.curr_Alignment[1:-1]\n self.split_Alignment_data = map(int,self.split_Alignment_data.split(','))\n elif self.curr_Alignment == 'not data':\n self.split_Alignment_data = [-1]\n\n def callback_found_person(self,found_person_data):\n self.curr_found_person = found_person_data.data\n\n def callback_bebop_takeoff_stat(self,bebop_takeoff_stat_data):\n self.curr_bebop_takeoff_stat = bebop_takeoff_stat_data.altitude\n\n def callback_isyn_status(self,isyn_status_data):\n self.curr_isyn_status_msg = isyn_status_data.data\n\n def callback_isyn_save_image_clear(self,isyn_save_image_clear):\n self.curr_isyn_save_image_clear = isyn_save_image_clear.data\n\n def bebop_change_stat(self):\n\n #send msg of bebop status\n if self.bebop_mode_msg == 1:\n self.curr_bebop_status_msg = 0\n\n # if self.curr_bebop_takeoff_stat == 0 :\n # self.curr_bebop_status_msg = 0\n # self.bebop_mode_msg = 0\n\n if self.bebop_mode_msg == 2 and self.curr_isyn_status_msg == 0 and self.curr_point_move_all_clear == 1:\n self.curr_bebop_status_msg = 1\n self.point_local_scan_clear = 0\n self.msg.angular.x = self.msg.angular.y = self.msg.angular.z = 0\n self.bebop_control_pub.publish(self.msg)\n\n if self.bebop_mode_msg == 2 and self.curr_isyn_status_msg == 1:\n self.curr_bebop_status_msg = 2\n\n if self.bebop_mode_msg == 2 and self.curr_isyn_status_msg == 2:\n self.curr_bebop_status_msg = 3\n\n\n def set_detect_person_center(self):\n try:\n if self.split_Alignment_data[0] == -1:\n print('not split Alignment data')\n elif self.curr_bebop_status_msg == 3 and self.split_Alignment_data > 0:\n for i in range(0,len(self.split_Alignment_data),1):\n while (1):\n if (self.split_Alignment_data[i] < self.thresh_Alignment_max and self.split_Alignment_data[i] > self.thresh_Alignment_min):\n self.msg.angular.x = self.msg.angular.y = self.msg.angular.z = 0\n if self.curr_isyn_save_image_clear == 1:\n self.bebop_req_save_image_pub.publish(0)\n rospy.sleep(3)\n print('clear detect person center')\n break\n if self.curr_isyn_save_image_clear == 0:\n self.bebop_req_save_image_pub.publish(1)\n\n if (self.split_Alignment_data[i] < self.thresh_Alignment_max):\n self.msg.angular.z = self.curr_angular_speed\n self.msg.angular.x = self.msg.angular.y = 0\n if (self.split_Alignment_data[i] > self.thresh_Alignment_min):\n self.msg.angular.z = - self.curr_angular_speed\n self.msg.angular.x = self.msg.angular.y = 0\n\n self.bebop_control_pub.publish(self.msg)\n rospy.sleep(0.1)\n self.point_local_scan_clear = 1\n\n except AttributeError:\n self.error_check_num = self.error_check_num + 1\n if self.error_check_num >= 5:\n print(\"wait Alignment data\")\n self.error_check_num = 0\n\n except IndexError as e:\n self.point_local_scan_clear = 1\n if self.curr_isyn_save_image_clear == 1:\n self.bebop_req_save_image_pub.publish(0)\n print(e)\n\n\n def init_location(self):\n while (self.curr_bebop_odom_z > 0):\n self.msg.angular.z = self.curr_angular_speed\n self.bebop_control_pub.publish(self.msg)\n\n while (self.curr_bebop_odom_z < 0):\n self.msg.angular.z = self.curr_angular_speed\n self.bebop_control_pub.publish(self.msg)\n\n self.msg.angular.x = self.msg.angular.y = self.msg.angular.z = 0\n self.bebop_control_pub.publish(self.msg)\n\n # thread func\n def bebop_move(self):\n while(1):\n time.sleep(0.045)\n #change_bebop_status\n self.bebop_change_stat()\n\n # send msg of isyn status\n self.bebop_mode_pub.publish(self.bebop_mode_msg)\n self.bebop_status_pub.publish(self.curr_bebop_status_msg)\n\n if self.curr_bebop_status_msg == 3 and self.point_local_scan_clear == 0:\n limit_odom_z = self.curr_bebop_odom_z\n print(limit_odom_z)\n while(1) :\n self.msg.angular.z = self.curr_angular_speed\n self.bebop_control_pub.publish(self.msg)\n if self.curr_found_person > 0:\n self.scan_delay += 1\n if self.scan_delay == 20 and self.point_local_scan_clear == 0:\n print(\"start detect person center\")\n self.set_detect_person_center()\n if self.curr_bebop_odom_z == limit_odom_z and self.point_local_scan_clear == 1:\n self.scan_all_clear_pub.publish(1)\n self.curr_point_move_all_clear = 0\n self.bebop_mode_msg = 1\n break\n rospy.sleep(0.045)\n\n self.msg.angular.x = self.msg.angular.y = self.msg.angular.z = 0\n self.bebop_control_pub.publish(self.msg)\n\n\ndef getKey():\n if os.name == 'nt':\n return msvcrt.getch()\n tty.setraw(sys.stdin.fileno())\n rlist, _, _ = select.select([sys.stdin], [], [], 0.1)\n if rlist:\n key = sys.stdin.read(1)\n else:\n key = ''\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)\n return key\n\ndef key_control():\n while (1):\n try:\n key = getKey()\n if key == 't':\n print(\"thread take off call\")\n bebop.take_off()\n elif key == 'q':\n print(\"thread landing call\")\n bebop.land()\n break\n elif key == 'a':\n print('change bebop stop detecting mode num 0 ')\n bebop.bebop_mode_msg = 0\n elif key == 's':\n print('change bebop detecting mode num 2')\n bebop.bebop_mode_msg = 2\n bebop.point_local_scan_clear = 0\n elif key == 'd':\n print('change bebop detecting mode num 3')\n bebop.init_location()\n\n time.sleep(0.1)\n except KeyboardInterrupt as e:\n print(e)\n break\n\nif __name__ == \"__main__\":\n try:\n if os.name != 'nt':\n settings = termios.tcgetattr(sys.stdin)\n bebop = Bebop()\n key_thread = threading.Thread(target=key_control)\n key_thread.daemon = True\n key_thread.start()\n bebop_move_thread = threading.Thread(target=bebop.bebop_move)\n bebop_move_thread.daemon = True\n bebop_move_thread.start()\n rospy.spin()\n\n except KeyboardInterrupt :\n print(\"main program exit\")\n\n except rospy.ROSInterruptException as e:\n print(\"ROS program exit\")","sub_path":"catkin_ws/src/opencv_isyn/node/bebop_move.py","file_name":"bebop_move.py","file_ext":"py","file_size_in_byte":11003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"560129644","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\"\"\"Pyunit for h2o.utils.progressbar.\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport math\nimport random\nimport time\n\nfrom h2o.utils.progressbar import ProgressBar, PBWBar, PBWPercentage\n\n\ndef test_progressbar():\n \"\"\"Test functionality for the progress bar.\"\"\"\n def progress_generator(duration):\n interval = duration / 20\n for i in range(20):\n yield (i + 1) / 20, interval\n\n ProgressBar().execute(progress_generator(5))\n ProgressBar(\"With file_mode\", file_mode=True).execute(progress_generator(5))\n ProgressBar(widgets=[\"Clowncopterization in progress, stand WAY back!\", PBWBar(), PBWPercentage()])\\\n .execute(progress_generator(3))\n\n def random_progress_generator(duration, interrupted=False):\n progress = 0\n n_steps = 10\n last_t = time.time()\n beta = n_steps / duration\n while progress < n_steps:\n delta = time.time() - last_t\n last_t = time.time()\n if interrupted and random.random() > math.exp(-beta * delta / (n_steps / 4)):\n raise StopIteration(\"planets did not align properly\")\n if random.random() > math.exp(-beta * delta):\n progress += 1\n yield progress / n_steps\n\n ProgressBar(\"Random 1s\").execute(random_progress_generator(1))\n ProgressBar(\"Random 5s\").execute(random_progress_generator(5))\n ProgressBar(\"Random 10s\").execute(random_progress_generator(10))\n ProgressBar(\"Hope this one works\").execute(random_progress_generator(5, True))\n\n\n\n# This test doesn't really need a connection to H2O cluster.\ntest_progressbar()\n","sub_path":"h2o-py/tests/testdir_misc/pyunit_progressbar.py","file_name":"pyunit_progressbar.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"606839358","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\ndef update_airports(apps, schema_editor):\n Airport = apps.get_model('itinerary', 'Airport')\n Report = apps.get_model('reports', 'Report')\n\n for airport in Airport.objects.all():\n airport.hub = True\n airport.automatic_report = False\n airport.save()\n\n for airport in Airport.objects.all().values('country').distinct():\n name = u\"Delay Report {}\".format(airport['country'].capitalize())\n Report.objects.create(\n name=name, descr=name, airport=Airport.objects.filter(country=airport['country']).first()\n )\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('itinerary', '0017_auto_20151214_1439'),\n ('reports', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='airport',\n name='automatic_report',\n field=models.BooleanField(default=False),\n ),\n migrations.AddField(\n model_name='airport',\n name='hub',\n field=models.BooleanField(default=False),\n ),\n migrations.RunPython(\n update_airports,\n ),\n ]\n","sub_path":"itinerary/migrations/0018_auto_20160113_1314.py","file_name":"0018_auto_20160113_1314.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"626608700","text":"import pygame\nimport sys\nfrom bullet import Bullet\nfrom alien import Alien\nimport time\n\n\ndef check_events(ai_settings, screen, ship, aliens, bullets, play_button, status, board):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n check_keydown_events(event, ai_settings, screen, ship, bullets, status)\n elif event.type == pygame.KEYUP:\n check_keyup_events(ship)\n elif event.type == pygame.MOUSEBUTTONDOWN:\n mouse_x, mouse_y = pygame.mouse.get_pos()\n check_button_event(ai_settings, screen, ship, aliens, bullets, play_button, status, board, mouse_x, mouse_y)\n\n\ndef check_button_event(ai_settings, screen, ship, aliens, bullets, play_button, status, board, mouse_x, mouse_y):\n button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)\n if button_clicked and not status.game_active:\n ai_settings.initialize_settings()\n\n pygame.mouse.set_visible(False)\n status.reset_status()\n status.game_active = True\n board.prep_score()\n board.prep_high_score()\n board.prep_level()\n board.prep_left_ships()\n aliens.empty()\n bullets.empty()\n\n create_fleet(ai_settings, screen, aliens, ship)\n\n ship.ship_center()\n\n\ndef check_keydown_events(event, ai_settings, screen, ship, bullets, status):\n # keys = pygame.key.get_pressed()\n # if keys[pygame.K_SPACE]:\n # fire(ai_settings, screen, ship, bullets)\n if event.key == pygame.K_d:\n ship.move_right = True\n if event.key == pygame.K_a:\n ship.move_left = True\n if event.key == pygame.K_w:\n ship.move_up = True\n if event.key == pygame.K_s:\n ship.move_down = True\n if event.key == pygame.K_SPACE:\n fire(ai_settings, screen, ship, bullets)\n # if event.key == pygame.K_p:\n # status.game_active = False\n # pygame.mouse.set_visible(True)\n\n\ndef check_keyup_events(ship):\n ship.move_right = False\n ship.move_left = False\n ship.move_up = False\n ship.move_down = False\n\n\ndef update_screen(ai_settings, screen, ship, aliens, bullets, play_button, status, board):\n screen.fill(ai_settings.bg_color)\n ship.blitme()\n aliens.draw(screen)\n for bullet in bullets.sprites():\n bullet.draw_bullet()\n board.draw_scoreboard()\n if not status.game_active:\n play_button.draw_button()\n pygame.display.flip()\n\n\ndef update_ship(ship):\n ship.update()\n\n\ndef update_bullets(ai_settings, screen, ship, aliens, bullets, status, board):\n bullets.update()\n for bullet in bullets.sprites():\n if bullet.rect.bottom <= 0:\n bullets.remove(bullet)\n check_bullet_hit_alien(ai_settings, screen, ship, aliens, bullets, status, board)\n\n\ndef fire(ai_settings, screen, ship, bullets):\n if len(bullets) < ai_settings.bullet_allowed:\n new_bullet = Bullet(ai_settings, screen, ship)\n bullets.add(new_bullet)\n\n\ndef check_bullet_hit_alien(ai_settings, screen, ship, aliens, bullets, status, board):\n hits = pygame.sprite.groupcollide(aliens, bullets, True, True)\n if hits:\n status.score += ai_settings.alien_points * len(hits.keys())\n board.prep_score()\n check_high_score(status, board)\n\n if len(aliens) == 0:\n bullets.empty()\n ai_settings.increase_speed()\n status.level += 1\n board.prep_level()\n create_fleet(ai_settings, screen, aliens, ship)\n\n\ndef check_high_score(status, board):\n if status.score > status.highest_score:\n status.highest_score = status.score\n board.prep_high_score()\n\n\ndef get_alien_number_x(ai_settings, alien: Alien) -> int:\n return (ai_settings.screen_width - 2 * alien.rect.width) // (2 * alien.rect.width)\n\n\ndef get_alien_number_y(ai_settings, alien: Alien) -> int:\n return (ai_settings.screen_height * 2 // 3 - alien.rect.height) // (2 * alien.rect.height)\n\n\ndef create_alien(ai_settings, screen, ship, col: int, row: int) -> Alien:\n alien = Alien(ai_settings, screen)\n alien.x = alien.rect.width + 2 * alien.rect.width * col\n alien.y = ship.rect.height + alien.rect.height + 2 * alien.rect.height * row\n alien.rect.x, alien.rect.y = alien.x, alien.y\n return alien\n\n\ndef create_fleet(ai_settings, screen, aliens, ship):\n alien = Alien(ai_settings, screen)\n alien_number_x = get_alien_number_x(ai_settings, alien)\n alien_number_y = get_alien_number_y(ai_settings, alien)\n for row in range(alien_number_y):\n for col in range(alien_number_x):\n aliens.add(create_alien(ai_settings, screen, ship, col, row))\n\n\ndef update_aliens(ai_settings, screen, ship, aliens, bullets, status, board):\n check_fleet_edge(ai_settings, aliens)\n aliens.update()\n if pygame.sprite.spritecollideany(ship, aliens):\n ship_hit(ai_settings, screen, ship, aliens, bullets, status, board)\n check_fleet_bottom(ai_settings, screen, ship, aliens, bullets, status, board)\n\n\ndef check_fleet_edge(ai_settings, aliens):\n for alien in aliens.sprites():\n if alien.check_edges():\n check_fleet_direction(ai_settings, aliens)\n break\n\n\ndef check_fleet_direction(ai_settings, aliens):\n for alien in aliens.sprites():\n alien.rect.y += ai_settings.fleet_drop_speed\n ai_settings.fleet_direction *= -1\n\n\ndef ship_hit(ai_settings, screen, ship, aliens, bullets, status, board):\n if status.ships_left > 0:\n status.ships_left -= 1\n\n board.prep_left_ships()\n bullets.empty()\n aliens.empty()\n create_fleet(ai_settings, screen, aliens, ship)\n ship.ship_center()\n time.sleep(0.5)\n else:\n status.game_active = False\n pygame.mouse.set_visible(True)\n\n\ndef check_fleet_bottom(ai_settings, screen, ship, aliens, bullets, status, board):\n for alien in aliens.sprites():\n if alien.rect.bottom >= screen.get_rect().bottom:\n ship_hit(ai_settings, screen, ship, aliens, bullets, status, board)\n break\n","sub_path":"game_functions.py","file_name":"game_functions.py","file_ext":"py","file_size_in_byte":6053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"516199043","text":"#!/usr/bin/python3\n\n# Considering N-8 neighbourhood, your task is to find the number of islands\n# in the given landscape.\n# Assume that the the 2D matrix is surrounded by water beyond the boundaries.\n\n\ndef no_of_islands(mat, R, C):\n # print(mat)\n cnt = 0\n for i in range(0, R):\n for j in range(0, C):\n if mat[i][j] == 1:\n cnt += 1\n DFS(mat, i, j, R, C)\n return cnt\n\n\ndef valid(i, j, R, C):\n if i < 0 or j < 0 or i >= R or j >= C:\n return False\n return True\n\n\ndef DFS(mat, i, j, R, C):\n # N-8 neighbourhood\n di = [-1, -1, -1, 0, 0, 1, 1, 1]\n dj = [-1, 0, 1, -1, 1, -1, 0, 1]\n\n if valid(i, j, R, C):\n if mat[i][j] == 1:\n mat[i][j] = 0\n for k in range(0, 8):\n DFS(mat, i + di[k], j + dj[k], R, C)\n\n\nif __name__ == '__main__':\n for _ in range(int(input())):\n mat = []\n R, C = map(int, input().split())\n for _ in range(0, R):\n arr = input()\n row = []\n for i in arr:\n row.append(int(i))\n mat.append(row)\n print(no_of_islands(mat, R, C))\n","sub_path":"hackerrank/si/graph/si-number-of-islands-optimize.py","file_name":"si-number-of-islands-optimize.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"297273698","text":"import numpy as np\nimport pandas as pd\nimport scipy as sc\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport plotly as ply\nimport random\nimport scipy.sparse as sp\nimport matplotlib as mpl\nimport statistics\n# from keras.models import Sequential\n# import tensorflow\nfrom itertools import product\nimport itertools\nimport math\nimport matplotlib.colors as mcolors\nfrom mpl_toolkits.mplot3d import Axes3D\nimport time\n# import tensorflow as tf\nimport scipy.linalg as la\nfrom decimal import *\nfrom sklearn.model_selection import train_test_split\n\n# from sklearn.linear_model import LinearRegression as LR\n# from sklearn.gaussian_process import kernels\n# from sklearn.gaussian_process import GaussianProcessClassifier as GPC\nimport sklearn.metrics.pairwise\n\n##%matplotlib inline\nplt.show()\nsns.set_theme(font=\"tahoma\", font_scale=0.6)\n\n\n# %%Generators\ndef fun_y(a, b):\n Y = []\n for i in range(len(a)):\n if a[i] == 0:\n y = a_AY\n else:\n y = np.sin(a[i] * a_AY) / a[i]\n\n Y.append(y)\n return Y\n\n\ndef fun_w(a):\n W = []\n m_u = np.log2(min(U[U != 0]))\n\n for i in range(len(a)):\n if a[i] <= 0:\n w = m_u - 1\n else:\n w = np.log2(a[i])\n W.append(w)\n return W\n\n\n# W=fun_w(a=U)\n# plt.scatter(U,W)\n# %%\n# param\nN = [5, 200, 10000, 100000]\n# [a,y,z,w] heat as a catalyst z temprature w\na_AY = 2\na_AZ = 0.5\n\n# [u,a,y,z,w]\nm_e = [1, 1, 0, -1, 0]\n# cov_e=[[1],[,1],[,,1],[,,,1]]\nC = [1, 1, 1, 1, 4]\n\n# U is a chi2 distribution\nrandom.seed(100)\nU = np.random.chisquare(m_e[0], N[1]).round(2)\nU_inst = np.ones(N[1]).round(2)\n\n# Z is noisy reading of U\nrandom.seed(110)\neZ = np.random.normal(m_e[3], C[3], N[1])\nZ = (eZ - U).round(2)\nZ_conU = (eZ - U_inst).round(2)\n\nrandom.seed(120)\neW = np.random.normal(m_e[4], C[4], N[1])\nW0 = fun_w(U)\nW = (eW + W0).round(2)\nW_conU = (eW + fun_w(U_inst)).round(2)\n\nrandom.seed(130)\neA = np.random.normal(m_e[1], C[1], size=N[1])\n# A = (eA + a_AZ * Z +2* U**0.5).round(3)\n# A_conU=(eA + a_AZ * Z_conU +2* U_inst**0.5).round(3)\nA = (eA + U).round(2)\nA_conU = (eA + U_inst).round(2)\n\nrandom.seed(19500)\neY = np.random.normal(m_e[2], C[2], N[1])\n# Y = (np.exp(a_AY *A)+ (eY -np.log10(U+10))).round(3)\n# Y_conU=(np.exp(a_AY * A_conU)+ eY -np.log10(U_inst+10)).round(3)\nY0 = fun_y(a=A, b=a_AY)\nY = (Y0 + eY + U).round(2)\nY_conU = (Y0 + eY + (U_inst)).round(2)\n\nD = pd.DataFrame([U, A, Y, Z, W]).T\nD.columns = ['U', 'A', 'Y', 'Z', 'W']\nO = pd.DataFrame([A, Y, Z, W]).T\nO.columns = ['A', 'Y', 'Z', 'W']\nD_conU = pd.DataFrame([U_inst, A_conU, Y_conU, Z_conU, W_conU]).T\nD_conU.columns = ['U', 'A', 'Y', 'Z', 'W']\n# plt.scatter(data=D, x='A',y='Y')\n\n#%%Kernel parameters\nl_1=0.1\nl_2=.1\nn_test=int(N[1]*.1)\nn_train=N[1]-n_test\nm1_test=int(n_test*.5)\nm2_test=n_test-m1_test\nm1_train=int(n_train*0.5)## I have to change it back to 50%\nm2_train=n_train-m1_train\nlow_b=.001\n\nprint(N[1], n_test, m1_test, m2_test, n_train, m1_train, m2_train)\n\n# %% corr structure\necov_v = pd.DataFrame.cov(D)\n\necorr_v = D.corr()\necorr_v.columns = ['U', 'A', 'Y', 'Z', 'W']\necorr_O = O.corr()\necorr_O.columns = ['A', 'Y', 'Z', 'W']\necov_v_conU = pd.DataFrame.cov(D_conU)\necov_v_conU.columns = ['U', 'A', 'Y', 'Z', 'W']\necorr_v_conU = D_conU.corr()\necorr_v_conU.columns = ['U', 'A', 'Y', 'Z', 'W']\n\nO_train, O_test = train_test_split(O, test_size=n_test, train_size=n_train)\n\nsamp1, samp2 = train_test_split(O_train, test_size=m2_train, train_size=m1_train)\nsamp1_test, samp2_test = train_test_split(O_test, test_size=m2_test, train_size=m1_test)\n\n# %%\nsns.displot(D, x='U', label=\"U\", kde=True), plt.show()\nsns.displot(D, x='A', label=\"A\", kde=True), plt.show()\nsns.displot(D, x='Y', label=\"y\", kde=True), plt.show()\nsns.displot(D, x='Z', label=\"Z\", kde=True), plt.show()\nsns.displot(D, x='W', label=\"W\", kde=True), plt.show()\n\nsns.set_theme(font=\"tahoma\", font_scale=1) # this gives us the pariswise plots\n\n\nsns.displot(O, x='A', y='Y', kind=\"kde\"), plt.show()\nsns.displot(O, x='Z', y='Y', kind=\"kde\"), plt.show()\nsns.displot(O, x='W', y='Y', kind=\"kde\"), plt.show()\nsns.displot(D, x='U', y='Y', kind=\"kde\"), plt.show()\n\n# sns.heatmap(ecorr_v,annot=True, fmt='g'),plt.show()\nsns.heatmap(ecorr_v, annot=True, fmt=\".2\"), plt.show()\nsns.heatmap(ecorr_O, annot=True, fmt=\".2\"), plt.show()\n\n# sns.heatmap(ecov_v_conU,annot=True, fmt='g'),plt.show()\n\n# sns.heatmap(ecorr_v_conU,annot=True, fmt='g'),plt.show()\nsns.heatmap(ecorr_v_conU, annot=True, fmt=\".2\"), plt.show()","sub_path":"simulation/simulation_afsaneh_2_deprecated.py","file_name":"simulation_afsaneh_2_deprecated.py","file_ext":"py","file_size_in_byte":4461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"236082975","text":"import streamlit as st\r\nfrom PIL import Image, ImageOps\r\nimport base64 \r\nfrom io import BytesIO\r\ndef app():\r\n st.markdown(\r\n \"\"\"\r\n \r\n \"\"\",\r\n unsafe_allow_html=True\r\n )\r\n st.title(\"Add Border To Image\")\r\n im1=st.file_uploader(label=\"UPLOAD IMAGE\")\r\n if im1:\r\n # open the image\r\n img = Image.open(im1)\r\n a=st.slider(\"border width\",1,100)\r\n # add border to the image\r\n img2 = ImageOps.expand(img, border=int(a), fill='blue')\r\n # display image\r\n st.image(img2)\r\n btn = st.button(\"Save\")\r\n if btn:\r\n buffered = BytesIO()\r\n img2.save(buffered, format=\"JPEG\")\r\n img_str = base64.b64encode(buffered.getvalue()).decode()\r\n href = f'

Download final image

'\r\n st.markdown(href, unsafe_allow_html=True)\r\n","sub_path":"border.py","file_name":"border.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"464601302","text":"import cPickle as pickle\n\nfrom common import *\n\n\nclass TestPickling(TestCase):\n\n def test_pickle_session(self):\n\n sg = Session(Shotgun())\n\n x = pickle.dumps(sg)\n\n sg = pickle.loads(x)\n\n # It lost it's Shotgun object.\n self.assertIs(sg._shotgun, None)\n\n def test_pickle_entity(self):\n\n sg = Session(False)\n e1 = sg.merge({'type': 'Dummy', 'id': 1234})\n\n e1['key'] = 'value'\n\n x = pickle.dumps(e1)\n e2 = pickle.loads(x)\n\n # It lost the extra \"key\".\n self.assertEqual(e2, {'type': 'Dummy', 'id': 1234})\n\n def test_pickle_session_identity(self):\n\n sg = Session(False)\n e1 = sg.merge({'type': 'Dummy', 'id': 1234})\n e2 = sg.merge({'type': 'Dummy', 'id': 5678})\n\n x = pickle.dumps([e1, e2])\n e1, e2 = pickle.loads(x)\n\n # The sessions are the same.\n self.assertIs(e1.session, e2.session)\n","sub_path":"tests/test_pickling.py","file_name":"test_pickling.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"14485240","text":"'''\nAuthor: Dr. John T. Hwang \n Dr. Mohamed A. Bouhlel \n\nThis package is distributed under New BSD license.\n'''\n\nfrom numpy.distutils.core import setup, Extension\nimport os\nfrom subprocess import call\nimport numpy as np\n\nimport pip\npip.main(['install', 'Cython'])\n\nfrom Cython.Build import cythonize\n\n\next = cythonize(\n Extension(\"smt.methods.rbfclib\",\n sources=[\n 'smt/src/rbf/rbf.cpp',\n 'smt/src/rbf/rbfclib.pyx',\n ],\n language=\"c++\", extra_compile_args=['-std=c++11'],\n include_dirs=[np.get_include(),\n])) + cythonize(\n Extension(\"smt.methods.idwclib\",\n sources=[\n 'smt/src/idw/idw.cpp',\n 'smt/src/idw/idwclib.pyx',\n ],\n language=\"c++\", extra_compile_args=['-std=c++11'],\n include_dirs=[np.get_include(),\n])) + cythonize(\n Extension(\"smt.methods.rmtsclib\",\n sources=[\n 'smt/src/rmts/rmtsclib.pyx',\n 'smt/src/rmts/utils.cpp',\n 'smt/src/rmts/rmts.cpp',\n 'smt/src/rmts/rmtb.cpp',\n 'smt/src/rmts/rmtc.cpp',\n ],\n language=\"c++\", extra_compile_args=['-std=c++11'],\n include_dirs=[np.get_include(),\n]))\n\nsetup(name='smt',\n version='0.1',\n description='The Surrogate Model Toolbox (SMT)',\n author='Mohamed Amine Bouhlel',\n author_email='mbouhlel@umich.edu',\n license='BSD-3',\n packages=[\n 'smt',\n 'smt/methods',\n 'smt/problems',\n 'smt/sampling',\n 'smt/utils',\n ],\n install_requires=[\n 'scikit-learn',\n 'pyDOE',\n 'matplotlib',\n 'numpydoc',\n ],\n zip_safe=False,\n ext_modules=ext,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"626475544","text":"\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom datetime import timedelta, datetime\nimport pickle\nimport os\nimport sys\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import r2_score as R2\nimport matplotlib\nimport matplotlib.style as style\n\nfrom funcs import *\nfrom model_funcs import *\n\ncurrDir = os.getcwd()\nparent = os.path.abspath(os.path.join(currDir, os.pardir))\n\n# quick load to avoid doing hourly averaging!\nfile = 'hourlyData'\nf = open(os.path.join(parent, file+'.pkl'), 'rb')\nprocessed = pickle.load(f)\nf.close()\n\n# quick load to avoid doing hourly averaging!\nfile = 'times'\nf = open(os.path.join(parent, file+'.pkl'), 'rb')\nnewTimes = pickle.load(f)\nf.close()\n\n# steps ahead to forecast\nsteps = [1, 2, 3, 4, 5, 6]\n# diff -> T/F\n# whether target variable should be differenced\ndiff=False\n# number of tests to run\nn_tests = 1\n\nfor step in steps:\n\n print('\\n')\n print(step, 'hours ahead')\n tPrint = str(step)+'_step_target'\n pPrint = str(step)+'_step_prediction'\n ePrint = str(step)+'_step_error'\n testTimePrint = str(step)+'_step_test_time'\n \n # arrays for saved data\n targetArray = []\n predictionArray = []\n errorArray = []\n \n avg_rmse = np.zeros((n_tests, 1))\n\n for i in range(n_tests):\n\n trainTimes, testTimes, trainInputs, testInputs, trainTarget, testTarget, train_target_var, test_target_var, multi_train_target, multi_test_target = dataForNwpModel(processed, newTimes, step, diff)\n\n # produce results for test set\n testing=False\n # save model parameters for future use\n saveModel=False\n\n Umodel, TImodel, DirNSmodel, DirEWmodel, Tmodel = models(trainInputs, testInputs, trainTarget, testTarget, train_target_var, test_target_var, step, diff, testing, saveModel)\n\n col=0\n truePred = prediction(Umodel, TImodel, DirNSmodel, DirEWmodel, Tmodel, testInputs, testTimes, step, diff)\n \n pred = truePred[:,col]\n target = multi_test_target[:, col]\n error = target-pred\n rmse = np.sqrt(error**2).mean()\n avg_rmse[i] = rmse\n \n targetArray.append(target)\n predictionArray.append(pred)\n errorArray.append(error)\n\n print('RMSE =', np.round(rmse, 3), 'm/s')\n \n print('Average RMSE', np.round(np.mean(avg_rmse), 3), 'm/s')\n","sub_path":"RS_Sample/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"367379303","text":"import asyncio\n\nimport pytest\n\nfrom lightbus.message import RpcMessage\n\n\npytestmark = pytest.mark.unit\n\n\n@pytest.mark.run_loop\nasync def test_get_redis(redis_rpc_transport):\n \"\"\"Does get_redis() provide a working redis connection\"\"\"\n pool = await redis_rpc_transport.get_redis_pool()\n with await pool as redis:\n assert await redis.info()\n\n\n@pytest.mark.run_loop\nasync def test_call_rpc(redis_rpc_transport, redis_client):\n \"\"\"Does call_rpc() add a message to a stream\"\"\"\n rpc_message = RpcMessage(\n rpc_id='123abc',\n api_name='my.api',\n procedure_name='my_proc',\n kwargs={'field': 'value'},\n return_path='abc',\n )\n await redis_rpc_transport.call_rpc(rpc_message, options={})\n assert await redis_client.keys('*') == [b'my.api:stream']\n\n messages = await redis_client.xrange('my.api:stream')\n assert len(messages) == 1\n assert messages[0][1] == {\n b'rpc_id': b'\"123abc\"',\n b'api_name': b'\"my.api\"',\n b'procedure_name': b'\"my_proc\"',\n b'kw:field': b'\"value\"',\n b'return_path': b'\"abc\"',\n }\n\n\n@pytest.mark.run_loop\nasync def test_consume_rpcs(redis_client, redis_rpc_transport, dummy_api):\n\n async def co_enqeue():\n await asyncio.sleep(0.01)\n return await redis_client.xadd('my.dummy:stream', fields={\n b'rpc_id': b'\"123abc\"',\n b'api_name': b'\"my.api\"',\n b'procedure_name': b'\"my_proc\"',\n b'kw:field': b'\"value\"',\n b'return_path': b'\"abc\"',\n })\n\n async def co_consume():\n return await redis_rpc_transport.consume_rpcs(apis=[dummy_api])\n\n enqueue_result, messages = await asyncio.gather(co_enqeue(), co_consume())\n message = messages[0]\n assert message.rpc_id == '123abc'\n assert message.api_name == 'my.api'\n assert message.procedure_name == 'my_proc'\n assert message.kwargs == {'field': 'value'}\n assert message.return_path == 'abc'\n","sub_path":"tests/redis_transports/test_unit_redis_rpc.py","file_name":"test_unit_redis_rpc.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"311500734","text":"#!/usr/bin/env python\n\nimport os\nimport simplejson\n\nfrom delve.model import *\nfrom delve.model.focuses import Focuses\nfrom delve.model.charClass import *\nfrom delve.lib.common import *\n\ndef makeStartingAttrField(base, attr, num, size):\n return StartingAttrField(base=base, attribute=attr,\n roll=Roll(num=num, size=size))\n\ndef load_all_races():\n CLSFILE = os.path.join(os.path.dirname(__file__),\n '..',\n 'delve',\n 'data',\n 'classes.json')\n CLSFILE = open(CLSFILE)\n data = simplejson.loads(CLSFILE.read())\n print('Read in %d records.' % (len(data)))\n mongoAwesome()\n for clss in data:\n print('Class: %s' % clss['name'])\n CharClass(name=clss['name'],\n description=clss['description'],\n attributes=clss['attributes'],\n health=makeStartingAttrField(\n clss['health']['base'],\n clss['health']['attribute'],\n clss['health']['roll']['num'],\n clss['health']['roll']['size']),\n magicPoints=makeStartingAttrField(\n clss['mp']['base'],\n clss['mp']['attribute'],\n clss['mp']['roll']['num'],\n clss['mp']['roll']['size']),\n boons=[boonFactory(boon) for boon in clss['boons']],\n ).save()\n\nif __name__ == '__main__':\n load_all_races()\n","sub_path":"scripts/load_classes.py","file_name":"load_classes.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"11381944","text":"from django.contrib import admin, messages\nfrom django.contrib.admin.utils import get_deleted_objects\nfrom django.shortcuts import get_object_or_404\nfrom django.db import router\n\nfrom django.urls import reverse\nfrom django.utils.html import mark_safe\n\nfrom django.template.response import TemplateResponse\n\nfrom core.models import *\n\nfrom core.admin.task import *\n\n\nclass ProjectAdmin(admin.ModelAdmin):\n exclude = ['creator']\n inlines = [TaskInline,]\n actions = ['delete_project']\n list_display = [\n 'title',\n 'get_percentage',\n 'get_visibility',\n 'get_creator',\n 'jump_timeline',\n 'jump_tasks',\n ]\n\n def get_actions(self, request):\n actions = super(ProjectAdmin, self).get_actions(request)\n if 'delete_selected' in actions:\n del actions['delete_selected']\n return actions\n\n def delete_project(self, request, queryset):\n opts = self.model._meta\n app_label = opts.app_label\n using = router.db_for_write(self.model)\n deletable_objects, model_count, perms_needed, protected = get_deleted_objects(\n queryset, opts, request.user, self.admin_site, using)\n\n if request.POST.get('post'):\n all_deleted = True\n for project in queryset:\n if (project.creator == request.user or\n request.user.has_perm('core.supervisor')):\n project.delete()\n self.log_deletion(request, project, project.title)\n else:\n all_deleted = False\n if not all_deleted:\n messages.warning(request,\n 'Not owned projects were not deleted.')\n else:\n messages.success(request,\n 'Selected projects were successfully deleted.')\n return None\n\n if len(queryset) == 1:\n objects_name = opts.verbose_name\n else:\n objects_name = opts.verbose_name_plural\n\n if perms_needed or protected:\n title = 'Cannot delete %(name)s' % {'name': objects_name}\n else:\n title = 'Are you sure?'\n\n context = dict(\n self.admin_site.each_context(request),\n title=title,\n objects_name=objects_name,\n deletable_objects=[deletable_objects],\n model_count=dict(model_count).items(),\n queryset=queryset,\n perms_lacking=perms_needed,\n protected=protected,\n opts=opts,\n media=self.media,\n )\n\n return TemplateResponse(request, \"admin/delete_project_confirmation.html\", context)\n delete_project.short_description = 'Delete'\n\n def has_delete_permission(self, request, obj=None):\n if obj != None:\n if obj.creator == request.user:\n return True\n else:\n return False\n else:\n return False\n\n def change_view(self, request, object_id, form_url='', extra_context=None):\n obj = get_object_or_404(Project, pk=object_id)\n if (request.user.has_perm('core.supervisor') or\n request.user.is_superuser or\n request.user == obj.creator):\n return super(ProjectAdmin, self).change_view(request, object_id, form_url, extra_context)\n else:\n extra_context = extra_context or {}\n extra_context['title'] = 'Details of '+obj.title\n extra_context['saveonly'] = True\n extra_context['hidesave'] = True\n return super(ProjectAdmin, self).change_view(request, object_id, form_url, extra_context)\n\n def get_readonly_fields(self, request, obj=None):\n if request.user.is_superuser:\n return []\n if request.user.has_perm('core.supervisor'):\n return []\n else:\n if obj != None:\n if request.user != obj.creator:\n return [\n 'title',\n 'summary',\n 'visibility',\n ]\n return []\n\n def get_percentage(self, obj):\n all_tasks = len(Task.objects.filter(project=obj))\n finished_tasks = len(Task.objects.filter(project=obj, is_done=True))\n if all_tasks > 0:\n return mark_safe('%(perc)d%% (%(fin)s / %(all)s)' % {\n 'perc': (finished_tasks/all_tasks)*100,\n 'fin': finished_tasks,\n 'all': all_tasks,\n })\n else:\n return ''\n get_percentage.short_description = 'Complete'\n\n def get_visibility(self, obj):\n visibilities = {\n 'DEF': 'Default (Creator and Supervisors)',\n 'PUB': 'Public',\n 'PRI': 'Private',\n }\n return visibilities[obj.visibility]\n get_visibility.admin_order_field = 'visibility'\n get_visibility.short_description = 'Visibility'\n\n def get_creator(self, obj):\n return obj.creator.username\n get_creator.admin_order_field = 'creator'\n get_creator.short_description = 'Creator'\n\n def jump_timeline(self, obj):\n if len(Task.objects.filter(project=obj)) > 0:\n return mark_safe('Timeline' % {\n 'href': reverse('timeline', args=[obj.pk])\n })\n else:\n return ''\n jump_timeline.short_description = ''\n\n def jump_tasks(self, obj):\n if len(Task.objects.filter(project=obj)) > 0:\n return mark_safe('Tasks' % {\n 'href': reverse('admin:index')+'%(app_label)s/task/?project__id__exact=%(pk)s' % {\n 'app_label': obj._meta.app_label,\n 'pk': obj.pk\n }\n })\n else:\n return ''\n jump_tasks.short_description = ''\n\n def get_queryset(self, request):\n qs = super(ProjectAdmin, self).get_queryset(request)\n if request.user.is_superuser:\n return qs\n if request.user.has_perm('core.supervisor'):\n # Not superuser, exlude Privates, and return every other task (Default and Public):\n return qs.exclude(visibility='PRI')\n else:\n # Regular user (not superuser, nor supervisor), view only owned and Public:\n public_projects = Project.objects.filter(visibility='PUB')\n user_projects = Project.objects.filter(creator=request.user)\n return public_projects | user_projects\n\n def get_formsets_with_inlines(self, request, obj=None):\n if obj != None:\n if (request.user.has_perm('core.supervisor') or\n request.user.is_superuser or\n request.user == obj.creator):\n for inline in self.get_inline_instances(request, obj):\n # Hide TaskInline in the add view\n if isinstance(inline, TaskInline) and obj is None:\n continue\n yield inline.get_formset(request, obj), inline\n\n def save_formset(self, request, form, formset, change):\n for f in formset.forms:\n obj = f.instance\n if f.has_changed():\n if isinstance(obj, Task):\n obj.creator = request.user\n obj.save()\n formset.save()\n\n def save_model(self, request, obj, form, change):\n if not hasattr(obj, 'creator'):\n obj.creator = request.user\n obj.save()\n","sub_path":"core/admin/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":7577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"651036244","text":"from vpython import *\n#GlowScript 3.1 VPython\n\nfrom vpython import *\nscene = display( width = 600, height = 600, center = vector(0,0,0))\n\nlamp = local_light(pos=vector(0,0,0), color=color.orange)\nG = 6.67 *pow(10,-11)\nuserspin = True\n\nME = 5.973 *pow(10,24)\nMM = 7.347 *pow(10,22)\nMMa = 6.39 *pow(10,23)\nMS = 1.989 *pow(10,30)\nREM = 384400000\nRSE = 149600000000\nRMS = 227900000000\nFEM = G*(ME*MM)/pow(REM,2)\nFES = G*(MS*ME)/pow(RSE,2)\nFEMa = G*(MMa*MS)/pow(RMS,2)\n\nwM = sqrt(FEM/(MM * REM))\nvM = wM * REM\nprint(\"Angular velocity of the Moon with respect to the Earth: \",wM,\" rad/s\")\nprint(\"Velocity v of the Moon: \",vM/1000,\" km/s\")\n\nwE = sqrt(FES/(ME * RSE))\nwMa = sqrt(FEMa/(MMa * RMS))\nvE = 10*wE * RSE\nvMa = 10*wMa * RMS\nprint(\"Angular velocity of the Earth with respect to the Sun: \",wE,\" rad/s\")\nprint(\"Velocity v of the Earth: \",vE/1000,\" km/s\")\n\n\ntheta0 = 20\n\ndef positionMoon(t): \n theta = theta0 + wM * t\n return theta\n\ndef positionMars(t): \n theta = theta0 + wMa * t\n return theta\n\ndef positionEarth(t):\n theta = theta0 + wE * t\n return theta\n\n\ndef fromDaysToS(d):\n s = d*24*60*60\n return s\n\ndef fromStoDays(s):\n d = s/60/60/24\n return d\n\ndef fromDaysToh(d):\n h = d * 24\n return h\n\nprint(\"\\nSimulation Earth-Moon-Sun motion\\n\")\ndays = 365\nseconds = fromDaysToS(days)\nprint(\"Days: \",days)\nprint(\"Seconds: \",seconds)\n\nv = vector(384,0,0)\nE = sphere(pos = vector(1500,0,0), color = color.blue, radius = 60, make_trail=True)\nMa = sphere(pos = vector(2300,0,0), color = color.orange, radius = 30, make_trail=True)\nM = sphere(pos = E.pos + v, color = color.white,radius = 10, make_trail=True)\nS = sphere(pos = vector(0,0,0), color = color.yellow, radius=700)\n\nt = 0\nthetaTerra1 = 0\ndt = 5000\ndthetaE = positionEarth(t+dt)- positionEarth(t)\ndthetaM = positionMoon(t+dt) - positionMoon(t)\ndthetaMa = positionMars(t+dt) - positionMars(t)\nprint(\"delta t:\",dt,\"seconds. Days:\",fromStoDays(dt),\"hours:\",fromDaysToh(fromStoDays(dt)),sep=\" \")\nprint(\"Variation angular position of the Earth:\",dthetaE,\"rad/s that's to say\",degrees(dthetaE),\"degrees\",sep=\" \")\nprint(\"Variation angular position of the Moon:\",dthetaM,\"rad/s that's to say\",degrees(dthetaM),\"degrees\",sep=\" \")\n\nwhile t < seconds:\n rate(500)\n thetaEarth = positionEarth(t+dt)- positionEarth(t)\n thetaMoon = positionMoon(t+dt) - positionMoon(t)\n thetaMars = positionMars(t+dt) - positionMars(t)\n E.pos = rotate(E.pos,angle=thetaEarth,axis=vector(0,1,0))\n Ma.pos = rotate(Ma.pos,angle=thetaMars,axis=vector(0,1,0))\n v = rotate(v,angle=thetaMoon,axis=vector(0,1,0))\n M.pos = E.pos + v\nt += dt","sub_path":"Orb_Sim.py","file_name":"Orb_Sim.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"221863800","text":"class lobject(object):\n def __init__(this, conf={}):\n for i in conf:\n this[i] = conf[i]\n\n def __getitem__(this, i):\n l = i.find('.')\n if l >= 1:\n p = i[:l]\n c = i[l+1:]\n return this.__getattribute__(p)[c]\n elif l < 0 and i != '':\n return this.__getattribute__(i)\n\n def __next__(this):\n return this.__dict__.__next__()\n\n def __iter__(this):\n return this.__dict__.__iter__()\n \n\n def __getattr__(this, k):\n if k[:2] != '__':\n i = this.__new__(this.__class__)\n this.__setattr__(k, i)\n return i\n else:\n return object.__getattr__(this,k)\n\n def __setitem__(this, i, v):\n l = i.find('.')\n if l >= 1:\n p = i[:l]\n c = i[l+1:]\n\n tmp = this.__new__(this.__class__)\n tmp.__setitem__(c,v)\n if p in this:\n if type(this[p]) == this.__class__:\n this[p](tmp)\n else:\n this.__setattr__(p,tmp)\n else:\n this.__setattr__(p,tmp)\n return \n elif l < 0 and i != '':\n this.__setattr__(i,v)\n return\n print('can\\' set item')\n errrrrrrrrrrr()\n return\n\n def __delitem__(this, i):\n v = this.__getattribute__(i)\n del(v)\n\n # def __repr__(this):\n # ret = ''\n # line = ''\n # for i in this.__dict__:\n # line = '%s='%i\n # line += this.__dict__[i]\n # line += '\\n'\n # ret += line\n # return ret\n\n#} //class lobject\n\n\nclass Conf(lobject):\n __parentname = None\n __name = None\n __sync = None\n\n def __init__(this, template={}):\n if type(template) == dict:\n this.__fromdict(template)\n if type(template) == this.__class__:\n this.__fromdict(template.__todict())\n\n\n def __todict(this):\n ret = {}\n for k,v in this.__dict__.items():\n if k[:7] == '_Conf__':\n continue\n if type(v) == this.__class__:\n ret[k] = v.__todict()\n elif type(v) == dict:\n ret[k] = ('__realdict',v)\n elif type(v).__name__ == 'method':\n continue\n elif type(v).__name__ == 'instancemethod':\n continue\n elif type(v).__name__ == 'function':\n continue\n else:\n ret[k] = v\n return ret\n\n\n\n def __todict_withname(this):\n ret = {}\n for k,v in this.__dict__.items():\n if type(v) == this.__class__:\n ret[k] = v.__todict_withname()\n elif type(v) == dict:\n ret[k] = ('__realdict',v)\n elif type(v).__name__ == 'method':\n continue\n elif type(v).__name__ == 'instancemethod':\n continue\n elif type(v).__name__ == 'function':\n continue\n else:\n ret[k] = v\n return ret\n\n def __todict_all(this):\n ret = {}\n for k,v in this.__dict__.items():\n if type(v) == this.__class__:\n ret[k] = v.__todict_all()\n elif type(v) == dict:\n ret[k] = ('__realdict',v)\n else:\n ret[k] = v\n return ret\n\n def __fromdict(this,dic):\n if type(dic) != dict:\n print('err fromdict')\n errrrrrrrrr()\n for k,v in dic.items():\n if type(v) == tuple and v[0] == '__realdict':\n this[k] = v[1]\n elif type(v) == dict:\n tmp = this.__new__(this.__class__)\n tmp.__fromdict(v)\n this[k] = tmp\n else:\n this[k] = v\n\n def __str__(this):\n return this.__tostr()\n\n def __tostr(this):\n ret = ''\n ret2 = ''\n tmp = this.__new__(this.__class__)\n tmp.__init__(this.__todict_withname())\n this = tmp\n if not this.__parentname and not this.__name:\n for k,i in this.__dict__.items():\n if type(i) == Conf:\n i.__name = k\n tmp = i.__tostr()\n ret2 += tmp\n if tmp == '':\n ret2+= k+'=Conf()\\n'\n elif k[:7]!='_Conf__' :\n #elif k!='_Conf__name' and k!='_Conf__parentname':\n ret += '%s=%s\\n'%(str(k),repr(i))\n elif not this.__parentname and this.__name:\n for k,i in this.__dict__.items():\n if type(i) == Conf:\n i.__name = k\n i.__parentname = '%s'%(this.__name)\n tmp = i.__tostr()\n ret2 += tmp\n if tmp == '':\n ret2+= this.__name+'.'+k+'=Conf()\\n'\n elif k[:7]!='_Conf__' :\n #elif k!='_Conf__name' and k!='_Conf__parentname':\n ret += '%s.%s=%s\\n'%(this.__name, k, repr(i))\n else :\n for k,i in this.__dict__.items():\n if type(i) == Conf:\n i.__name = k\n i.__parentname = '%s.%s'%(this.__parentname, this.__name)\n tmp = i.__tostr()\n ret2 += tmp\n if tmp == '':\n ret2+= this.__parentname+'.'+this.__name+'.'+k+'=Conf()\\n'\n elif k[:7]!='_Conf__' :\n #elif k!='_Conf__name' and k!='_Conf__parentname':\n ret += '%s.%s.%s=%s\\n'%(this.__parentname, this.__name, k, repr(i))\n return ret+ret2\n\n @staticmethod\n def showsync(this):\n ret = ''\n ret2 = ''\n tmp = this.__new__(this.__class__)\n tmp.__init__(this)\n this = tmp\n if not this.__parentname and not this.__name:\n for k,i in this.__dict__.items():\n if type(i) == Conf:\n i.__name = k\n tmp = str(i)\n ret2 += tmp\n if tmp == '':\n ret2+= k+'=Conf()\\n'\n elif k =='_Conf__sync' :\n ret += '%s=%s\\n'%(str(k),repr(i))\n elif not this.__parentname and this.__name:\n for k,i in this.__dict__.items():\n if type(i) == Conf:\n i.__name = k\n i.__parentname = '%s'%(this.__name)\n tmp = str(i)\n ret2 += tmp\n if tmp == '':\n ret2+= this.__name+'.'+k+'=Conf()\\n'\n elif k =='_Conf__sync' :\n ret += '%s.%s=%s\\n'%(this.__name, k, repr(i))\n else :\n for k,i in this.__dict__.items():\n if type(i) == Conf:\n i.__name = k\n i.__parentname = '%s.%s'%(this.__parentname, this.__name)\n tmp = str(i)\n ret2 += tmp\n if tmp == '':\n ret2+= this.__parentname+'.'+this.__name+'.'+k+'=Conf()\\n'\n elif k =='_Conf__sync' :\n ret += '%s.%s.%s=%s\\n'%(this.__parentname, this.__name, k, repr(i))\n return ret+ret2\n\n @staticmethod\n def show(this, name):\n this.__name = name\n print(this)\n\n\n @staticmethod\n def update(this, a):\n if type(a) == dict:\n tmp = this.__new__(this.__class__)\n tmp.__fromdict(a)\n a = tmp\n if type(a) == Conf:\n for k,i in a.__dict__.items():\n if type(i) == Conf:\n if k in this :\n if type(this[k]) == Conf:\n Conf.update(this[k], i)\n continue\n this[k] = i\n else:\n print('Conf can only update from Conf/dict')\n errrrrrrrrrrrrrrrrrrrr()\n\n def __add__(this, a):\n if type(a) != Conf:\n print('Conf can only add Conf')\n errrrrrrrrrrrrrrrrrrrr()\n return\n merge = this.__new__(this.__class__)\n merge.__init__(this)\n for k,i in a.__dict__.items():\n if k not in merge:\n merge[k] = i\n elif type(i) == Conf and type(merge[k]) == Conf:\n merge[k] = merge[k] + i\n else:\n merge[k] = i\n return merge\n\n def __setitem__(this,i,v):\n super(Conf, this).__setitem__(i,v)\n if i[:7] != '_Conf__':\n if this.__sync:\n this.__dosync()\n else:\n if type(v).__name__ == 'instancemethod':\n object.__setattr__(this, '_Conf__sync', 1)\n v(this)\n elif type(v).__name__ == 'function':\n object.__setattr__(this, '_Conf__sync', 1)\n v(this)\n elif type(v).__name__ == 'method':\n object.__setattr__(this, '_Conf__sync', 1)\n v(this)\n\n\n def __setattr__(this,i,v):\n super(Conf, this).__setattr__(i,v)\n if i[:7] != '_Conf__':\n if this.__sync:\n this.__dosync()\n else:\n #if i == 'sync_skill':\n # print(type(v).__name__)\n if type(v).__name__ == 'instancemethod':\n object.__setattr__(this, '_Conf__sync', 1)\n v(this)\n elif type(v).__name__ == 'function':\n object.__setattr__(this, '_Conf__sync', 1)\n v(this)\n elif type(v).__name__ == 'method':\n object.__setattr__(this, '_Conf__sync', 1)\n v(this)\n\n def __call__(this, a):\n if type(a) == this.__class__:\n Conf.update(this, a)\n elif type(a) == dict:\n Conf.update(this, a)\n\n\n# all method in conf will be sync funtion, so use [function] to set a config to function\n def __dosync(this):\n func = []\n for k,i in this.__dict__.items():\n if type(i).__name__ == 'instancemethod':\n func.append(i)\n elif type(i).__name__ == 'function':\n func.append(i)\n elif type(i).__name__ == 'method':\n func.append(i)\n for i in func:\n i(this)\n\n\nclass Test(object):\n def d1(this,c):\n print('d1')\n #print(c)\n\n\nif __name__ == '__main__':\n def test(c):\n print('test')\n print(c)\n\n\n t = Test()\n\n a = Conf()\n b = Conf()\n\n\n a.a.a = 'aa'\n a.a.c = 'ac'\n a.a.sync = t.d1\n\n print(a)\n exit()\n\n b.a.a = 'ba'\n b.a.b = 'bb'\n a.a.a = 'change'\n","sub_path":"core/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":10787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"116573656","text":"\"\"\"\n test script to display working UI used the class that the UI is based\n\"\"\"\n\nimport sys\nfrom PySide2.QtWidgets import QApplication, QDialog, QMainWindow\nfrom Ui_PreferencesDialog import Ui_PreferencesDialog\nfrom Ui_SearchTextDialog import Ui_SearchTextDialog\n\n#class MainWindow(QMainWindow):\n# def __init__(self):\n# super().__init__()\n# self.ui = Ui_MainWindow()\n# self.ui.setupUi(self)\n\nclass PreferencesDialog(QDialog):\n def __init__(self):\n super().__init__()\n self.ui = Ui_PreferencesDialog()\n self.ui.setupUi(self)\n\nclass SearchTextDialog(QDialog):\n def __init__(self):\n super().__init__()\n self.ui = Ui_SearchTextDialog()\n self.ui.setupUi(self)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n\n window = PreferencesDialog()\n window.show()\n\n sys.exit(app.exec_())\n","sub_path":"MKVBatchMultiplex/ui/displayUI.py","file_name":"displayUI.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"75798513","text":"\"\"\"\n* user: VR437653\n* fname: LUCA\n* lname: PESERICO\n* task: conta_multipli\n* score: 0.0\n* date: 2018-12-05 12:47:04.084390\n\"\"\"\n#!/usr/bin/env python3\n# Template per soluzione conta_multipli\n\n# Devi modificare l'implementazione di questa funzione per fare \n# quanto richiesto dal testo dell'esercizio\ndef conta_multipli(a, b, c):\n div=0 \n n=1 \n while n<=c:\n if n%a==0:\n div=div+1 \n if n%b==0:\n div=div-1\n n=n+1\n return div\n \n# Lettura input: non devi modificare il codice sotto questa riga\na, b, c = map(int, input().split())\nprint(a)\nprint(b)\nprint(c)\nprint(conta_multipli(a, b, c))\n","sub_path":"2018.12.05.provetta/all-CMS-submissions-2018-12-05/2018-12-05.12:47:04.084390.VR437653.conta_multipli.py","file_name":"2018-12-05.12:47:04.084390.VR437653.conta_multipli.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"5800112","text":"# -*- coding: utf-8 -*-\nimport decimal\n\n\nclass RepositoryBase(object):\n def __init__(self, conn):\n self.conn = conn\n self.cursor = self.conn.cursor()\n\n def session(self):\n return self.cursor\n\n def commit(self):\n self.conn.commit()\n\n def create(self, item):\n self.commit()\n item.id = self.session().lastrowid\n return item\n\n def save(self, item):\n self.commit()\n if self.session().rowcount:\n return True\n return False\n\n\nclass MerchantRepository(RepositoryBase):\n def create(self, merchant):\n self.session().execute('''\n INSERT OR REPLACE INTO merchants (name, address, id)\n VALUES ( ?, ?, (\n SELECT id FROM merchants WHERE name=? AND address=?\n )\n )\n ''', ( merchant.name, merchant.address,\n merchant.name, merchant.address,))\n return super(MerchantRepository, self).create(merchant)\n\n\nclass SaleRepository(RepositoryBase):\n def create(self, sale):\n self.session().execute(\n 'INSERT INTO sales VALUES (?, ?, ?, ?, ?, ?, ?)',\n (None, sale.upload.id, sale.merchant.id, sale.purchaser_name,\n sale.description, decimal.Decimal(sale.unit_price), sale.count)\n )\n return super(SaleRepository, self).create(sale)\n\n\nclass UploadRepository(RepositoryBase):\n def create(self, upload):\n from datetime import datetime\n if not upload.created_at:\n upload.created_at = datetime.now()\n\n self.session().execute(\n 'INSERT INTO uploads VALUES (?, ?, ?)',\n (None, decimal.Decimal(upload.total), upload.created_at)\n )\n return super(UploadRepository, self).create(upload)\n\n def save(self, upload):\n self.session().execute(\n 'UPDATE uploads SET total=?, created_at=? WHERE id=?',\n (decimal.Decimal(upload.total), upload.created_at, upload.id)\n )\n return super(UploadRepository, self).save(upload)\n\n\ndef init_db(conn):\n cur = conn.cursor()\n\n create_merchants = \"\"\"\n CREATE TABLE IF NOT EXISTS merchants (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n name VARCHAR(100),\n address VARCHAR(200));\n \"\"\"\n cur.execute(create_merchants)\n\n create_uploads = \"\"\"\n CREATE TABLE IF NOT EXISTS uploads (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n total DECIMAL(5,2),\n created_at DATETIME);\n \"\"\"\n cur.execute(create_uploads)\n\n create_sales = \"\"\"\n CREATE TABLE IF NOT EXISTS sales (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n upload_id INTEGER,\n merchant_id INTEGER,\n purchaser_name VARCHAR(200),\n description VARCHAR(200),\n unit_price DECIMAL(5,2),\n count INTEGER);\n \"\"\"\n cur.execute(create_sales)\n\n conn.commit()\n","sub_path":"desafio_standard_library/repositories.py","file_name":"repositories.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"410360112","text":"import pandas as pd\nfrom datetime import datetime\n\ndataSourcePath = '数据源.xlsx'\nbookNameStr = '书名'\npressStr = '出版社'\npriceStr = '定价'\ndiscountStr = '折扣'\nstockStr = '库存'\nISBNStr = 'ISBN'\ncountStr = '数量'\ncommentStr = '备注'\nlowestDiscountStr = '最高折扣'\nhighestDiscountStr = '最低折扣'\nlowestDiscountVendorStr = '最高折扣供应商'\nhighestDiscountVendorStr = '最低折扣供应商'\n\n\nclass SplitMode():\n NotSplit = 1 # 优先选择库存够折扣最低的供应商\n\n\nschoolType = ['小学', '初中', '高中']\n\n\ndef getTimeString():\n return datetime.now().strftime(\"%Y_%m_%d_%H_%M_%S\")\n\n\ndef getVendorList(headers):\n vendors = []\n for header in headers:\n if not header.find(discountStr) == -1:\n vendor = header.split('_')[0]\n vendors.append(vendor)\n return vendors\n\n\ndef getVendorStockStr(vendor):\n return vendor+'_'+stockStr\n\n\ndef getVendorDiscountStr(vendor):\n return vendor+'_'+discountStr\n\n\ndef dataSource2Excel(dataSource, changer, path):\n dataSource.to_excel(path+'/数据源.xlsx', index=None)\n dataSource.to_excel(path+'/数据源_'+getTimeString() + '_' +\n changer+'.xlsx', index=None)\n\n\ndef readVendorData(filePath):\n fileData = pd.read_excel(filePath)\n if not '库存' in fileData:\n fileData['库存'] = 0\n data = fileData[[bookNameStr, ISBNStr,\n pressStr, priceStr, discountStr, stockStr]]\n return data\n\n\ndef readDataSource(filePath):\n fileData = pd.read_excel(filePath)\n return fileData\n\n\ndef readSchoolOrder(filePath):\n data = pd.read_excel(filePath)\n return data\n\n\ndef getFilePath(file):\n flag = '/'\n index = file.rfind(flag)\n path = file[0: index]\n return path\n\n\ndef mergeVendorData(vendorName, vendorFilePath, dataSourcePath):\n path = getFilePath(dataSourcePath)\n vendorData = readVendorData(vendorFilePath)\n dataSource = readDataSource(dataSourcePath)\n\n vendorStock = vendorName+'_'+stockStr\n vendorDiscount = vendorName+'_'+discountStr\n\n dataSource[vendorStock] = 0\n dataSource[vendorDiscount] = 0\n\n # tempData = readDataSource(dataSourcePath)\n tempData = dataSource.drop(index=dataSource.index)\n dataSource[vendorStock] = 0\n dataSource[vendorDiscount] = 0\n\n for vendorItem in vendorData.iterrows():\n vendorBook = vendorItem[1]\n exist = False\n for index, item in dataSource.iterrows():\n if item['ISBN'] == vendorBook['ISBN']:\n exist = True\n item[vendorStock] = vendorBook[stockStr]\n item[vendorDiscount] = vendorBook[discountStr]\n dataSource.iloc[index] = item\n break\n if not exist:\n new_row = {bookNameStr: vendorBook[bookNameStr], ISBNStr: vendorBook[ISBNStr],\n pressStr: vendorBook[pressStr], priceStr: vendorBook[priceStr],\n vendorStock: vendorBook[stockStr], vendorDiscount: vendorBook[discountStr]}\n tempData = tempData.append(new_row, ignore_index=True)\n dataSource = dataSource.append(tempData, ignore_index=True)\n dataSource2Excel(dataSource, vendorName, path)\n\n\ndef mergeMultiVendorsData(vendorFiles, dataSourcePath):\n for vendorFile in vendorFiles:\n vendor = vendorFile.split('.')[0].split('/').pop()\n mergeVendorData(vendor, vendorFile, dataSourcePath)\n return True\n\n\ndef splitSchoolOrder(schoolName, orderFilePath, dataSourcePath,\n mode=SplitMode.NotSplit):\n path = getFilePath(dataSourcePath)\n schoolData = readSchoolOrder(orderFilePath)\n dataSource = readDataSource(dataSourcePath)\n vendorDict = {}\n vendorList = getVendorList(dataSource.columns.tolist())\n for vendor in vendorList:\n vendorDict[vendor] = schoolData.drop(index=schoolData.index)\n vendorDict[vendor][discountStr] = 0\n\n notFound = schoolData.drop(index=schoolData.index)\n notFound[commentStr] = ''\n\n for schoolIndex, order in schoolData.iterrows():\n if order[countStr] == 0 or pd.isnull(order[countStr]):\n continue\n\n for sourceIndex, source in dataSource.iterrows():\n if not order[ISBNStr] == source[ISBNStr]:\n continue\n\n finalVendor = ''\n for vendor in vendorList:\n if source[vendor+'_'+stockStr] >= order[countStr]:\n if finalVendor == '':\n finalVendor = vendor\n if source[vendor+'_'+discountStr] < source[finalVendor+'_'+discountStr]:\n finalVendor = vendor\n if not finalVendor == '':\n order[discountStr] = source[finalVendor+'_'+discountStr]\n source[finalVendor+'_'+stockStr] = source[finalVendor +\n '_'+stockStr] - order[countStr]\n dataSource.iloc[sourceIndex] = source\n vendorDict[finalVendor] = vendorDict[finalVendor].append(\n order, ignore_index=True)\n else:\n order[commentStr] = '没有库存充足的供应商'\n notFound = notFound.append(order, ignore_index=True)\n\n time = getTimeString()\n if not notFound.empty:\n notFound.to_excel(path+'/'+schoolName + '_库存不足_' +\n time+'.xlsx', index=None)\n\n vendorOrders = vendorDict.items()\n for vendor, order in vendorOrders:\n if not order.empty:\n order.to_excel(path+'/'+schoolName + '_' +\n vendor+'_'+time+'.xlsx', index=None)\n\n dataSource2Excel(dataSource, schoolName, path)\n\n return True\n\n\ndef generateSchoolSupplyList(schoolName, schoolTypes, copyCount, dataSourcePath=dataSourcePath):\n path = getFilePath(dataSourcePath)\n\n dataSource = readDataSource(dataSourcePath)\n vendorList = getVendorList(dataSource.columns.tolist())\n\n dataSource[countStr] = copyCount\n dataSource[lowestDiscountVendorStr] = ''\n dataSource[lowestDiscountStr] = 0\n dataSource[highestDiscountVendorStr] = ''\n dataSource[highestDiscountStr] = 0\n\n dropRows = []\n for index, item in dataSource.iterrows():\n isRightType = False\n for type in schoolTypes:\n if item[type] == 1:\n isRightType = True\n break\n\n if not isRightType:\n dropRows.append(index)\n continue\n\n lowestVendor = ''\n hightestVendor = ''\n for vendor in vendorList:\n if item[getVendorStockStr(vendor)] < copyCount or pd.isnull(item[getVendorStockStr(vendor)]):\n continue\n if lowestVendor == '':\n lowestVendor = vendor\n else:\n if item[getVendorDiscountStr(lowestVendor)] > item[getVendorDiscountStr(vendor)]:\n lowestVendor = vendor\n if hightestVendor == '':\n hightestVendor = vendor\n else:\n if item[getVendorDiscountStr(hightestVendor)] < item[getVendorDiscountStr(vendor)]:\n hightestVendor = vendor\n if lowestVendor == '':\n dropRows.append(index)\n continue\n\n item[lowestDiscountVendorStr] = lowestVendor\n item[lowestDiscountStr] = item[getVendorDiscountStr(lowestVendor)]\n item[highestDiscountVendorStr] = hightestVendor\n item[highestDiscountStr] = item[getVendorDiscountStr(hightestVendor)]\n dataSource.iloc[index] = item\n\n for vendor in vendorList:\n del dataSource[getVendorDiscountStr(vendor)]\n del dataSource[getVendorStockStr(vendor)]\n\n dataSource = dataSource.drop(dropRows)\n\n dataSource.to_excel(path+'/学校书目_'+schoolName + '_' +\n getTimeString() + '.xlsx', index=None)\n return True\n\n\n# mergeVendorData('供应商3', '供应商.xlsx')\n# splitSchoolOrder('啦啦小学', '学校订单.xlsx')\n# generateSchoolSupplyList('啦啦小学', [SchoolType.primarySchool], 3)\nprint(type(1))\n","sub_path":"bookManager.py","file_name":"bookManager.py","file_ext":"py","file_size_in_byte":8041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"627004465","text":"#!/bin/python\nfrom befh.clients.zmq import ZmqClient\nfrom befh.clients.csv import FileClient\nfrom befh.clients.mysql import MysqlClient\nfrom befh.clients.sqlite import SqliteClient\nfrom befh.market_data import L2Depth, Trade, Snapshot\nfrom datetime import datetime\nfrom threading import Lock\n\nclass ExchangeGateway:\n ############################################################################\n # Static variable\n # Applied on all gateways whether to record the timestamp in local machine,\n # rather than exchange timestamp given by the API\n is_local_timestamp = False\n ############################################################################\n\n \"\"\"\n Exchange gateway\n \"\"\"\n def __init__(self, api_socket, db_clients=[]):\n \"\"\"\n Constructor\n :param exchange_name: Exchange name\n :param exchange_api: Exchange API\n :param db_client: Database client\n \"\"\"\n self.db_clients = db_clients\n self.api_socket = api_socket\n self.lock = Lock()\n self.exch_snapshot_id = 0\n self.date_time = datetime.utcnow().date()\n\n @classmethod\n def get_exchange_name(cls):\n \"\"\"\n Get exchange name\n :return: Exchange name string\n \"\"\"\n return ''\n\n def get_instmt_snapshot_table_name(self, exchange, instmt_name):\n \"\"\"\n Get instmt snapshot\n :param exchange: Exchange name\n :param instmt_name: Instrument name\n \"\"\"\n #return 'exch_' + exchange.lower() + '_' + instmt_name.lower() + \\\n # '_snapshot_' + datetime.utcnow().strftime(\"%Y%m%d\")\n return 'exch_' + exchange.lower() + '_' + instmt_name.lower() + \\\n '_snapshot_' + self.date_time.strftime(\"%Y%m%d\")\n\n @classmethod\n def get_snapshot_table_name(cls):\n return 'exchanges_snapshot'\n\n @classmethod\n def is_allowed_snapshot(cls, db_client):\n return not isinstance(db_client, FileClient)\n\n @classmethod\n def is_allowed_instmt_record(cls, db_client):\n return not isinstance(db_client, ZmqClient)\n\n @classmethod\n def init_snapshot_table(cls, db_clients):\n for db_client in db_clients:\n db_client.create(cls.get_snapshot_table_name(),\n Snapshot.columns(),\n Snapshot.types(),\n [0,1], is_ifnotexists=True)\n\n def init_instmt_snapshot_table(self, instmt):\n table_name = self.get_instmt_snapshot_table_name(instmt.get_exchange_name(),\n instmt.get_instmt_name())\n\n instmt.set_instmt_snapshot_table_name(table_name)\n\n for db_client in self.db_clients:\n db_client.create(table_name,\n ['id'] + Snapshot.columns(False),\n ['int'] + Snapshot.types(False),\n [0], is_ifnotexists=True)\n\n if isinstance(db_client, (MysqlClient, SqliteClient)):\n with self.lock:\n r = db_client.execute('select max(id) from {};'.format(table_name))\n db_client.conn.commit()\n if r:\n res = db_client.cursor.fetchone()\n max_id = res['max(id)'] if isinstance(db_client, MysqlClient) else res[0]\n if max_id:\n self.exch_snapshot_id = max_id\n else:\n self.exch_snapshot_id = 0\n\n def start(self, instmt):\n \"\"\"\n Start the exchange gateway\n :param instmt: Instrument\n :return List of threads\n \"\"\"\n return []\n\n def get_instmt_snapshot_id(self, instmt):\n with self.lock:\n self.exch_snapshot_id += 1\n\n return self.exch_snapshot_id\n\n def insert_order_book(self, instmt):\n \"\"\"\n Insert order book row into the database client\n :param instmt: Instrument\n \"\"\"\n # If local timestamp indicator is on, assign the local timestamp again\n if self.is_local_timestamp:\n instmt.get_l2_depth().date_time = datetime.utcnow().strftime(\"%Y%m%d %H:%M:%S.%f\")\n\n # Update the snapshot\n if instmt.get_l2_depth() is not None:\n id = self.get_instmt_snapshot_id(instmt)\n for db_client in self.db_clients:\n if self.is_allowed_snapshot(db_client):\n db_client.insert(table=self.get_snapshot_table_name(),\n columns=Snapshot.columns(),\n types=Snapshot.types(),\n values=Snapshot.values(instmt.get_exchange_name(),\n instmt.get_instmt_name(),\n instmt.get_l2_depth(),\n Trade() if instmt.get_last_trade() is None else instmt.get_last_trade(),\n Snapshot.UpdateType.ORDER_BOOK),\n primary_key_index=[0,1],\n is_orreplace=True,\n is_commit=True)\n\n if self.is_allowed_instmt_record(db_client):\n db_client.insert(table=instmt.get_instmt_snapshot_table_name(),\n columns=['id'] + Snapshot.columns(False),\n types=['int'] + Snapshot.types(False),\n values=[id] +\n Snapshot.values('',\n '',\n instmt.get_l2_depth(),\n Trade() if instmt.get_last_trade() is None else instmt.get_last_trade(),\n Snapshot.UpdateType.ORDER_BOOK),\n is_commit=True)\n\n def insert_trade(self, instmt, trade):\n \"\"\"\n Insert trade row into the database client\n :param instmt: Instrument\n \"\"\"\n # If the instrument is not recovered, skip inserting into the table\n if not instmt.get_recovered():\n return\n\n # If local timestamp indicator is on, assign the local timestamp again\n if self.is_local_timestamp:\n trade.date_time = datetime.utcnow().strftime(\"%Y%m%d %H:%M:%S.%f\")\n\n date_time = datetime.strptime(trade.date_time, \"%Y%m%d %H:%M:%S.%f\").date()\n if date_time != self.date_time:\n self.date_time = date_time\n self.init_instmt_snapshot_table(instmt)\n\n # Set the last trade to the current one\n instmt.set_last_trade(trade)\n\n # Update the snapshot\n if instmt.get_l2_depth() is not None and \\\n instmt.get_last_trade() is not None:\n id = self.get_instmt_snapshot_id(instmt)\n for db_client in self.db_clients:\n is_allowed_snapshot = self.is_allowed_snapshot(db_client)\n is_allowed_instmt_record = self.is_allowed_instmt_record(db_client)\n if is_allowed_snapshot:\n db_client.insert(table=self.get_snapshot_table_name(),\n columns=Snapshot.columns(),\n values=Snapshot.values(instmt.get_exchange_name(),\n instmt.get_instmt_name(),\n instmt.get_l2_depth(),\n instmt.get_last_trade(),\n Snapshot.UpdateType.TRADES),\n types=Snapshot.types(),\n primary_key_index=[0,1],\n is_orreplace=True,\n is_commit=not is_allowed_instmt_record)\n\n if is_allowed_instmt_record:\n db_client.insert(table=instmt.get_instmt_snapshot_table_name(),\n columns=['id'] + Snapshot.columns(False),\n types=['int'] + Snapshot.types(False),\n values=[id] +\n Snapshot.values('',\n '',\n instmt.get_l2_depth(),\n instmt.get_last_trade(),\n Snapshot.UpdateType.TRADES),\n is_commit=True)\n","sub_path":"befh/exchanges/gateway.py","file_name":"gateway.py","file_ext":"py","file_size_in_byte":8989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"394180141","text":"\n\nfrom xai.brain.wordbase.nouns._sunroof import _SUNROOF\n\n#calss header\nclass _SUNROOFS(_SUNROOF, ):\n\tdef __init__(self,): \n\t\t_SUNROOF.__init__(self)\n\t\tself.name = \"SUNROOFS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"sunroof\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_sunroofs.py","file_name":"_sunroofs.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"367701915","text":"from app import app, db\nimport os\n\nfrom flask import Flask, request, render_template, jsonify, send_from_directory\n\nfrom app.models import *\nimport time\nimport simplejson\n\nfrom app.config import staticdir\n\nfrom app.util.makeDrink import *\n\n#import pigpio\n\n@app.route(\"/\")\ndef hello():\n #print(os.path.join(staticdir, 'html','index.html'))\n\n #Create Dispenser objects\n return send_from_directory(os.path.join(staticdir, 'html'),'index.html')\n\n@app.route(\"/drink\")\ndef toggle_pump():\n\n drinks = Drink.query.all()\n\n return (json.dumps([d.to_dict() for d in drinks]))\n\n@app.route(\"/makeDrinkID\", methods=['POST'])\ndef makeDrinkID():\n\n #Get POST request json data\n jsonData = request.get_json()\n #get drink ID from jsonData in request\n drinkID = jsonData['drink_id']\n #get drink from database using id\n drink = Drink.query.get(drinkID)\n #get drink recipe\n recipe = [ di.to_dict() for di in drink.recipe]\n #call drink making utility function\n makeDrinkRecipe(recipe) \n\n return(request.data)\n\n@app.route(\"/makeDrinkCustom\", methods=['POST'])\ndef makeDrinkCustom():\n\n #Get POST request json data\n jsonData = request.get_json()\n #get recipe from jsonData\n recipe = jsonData['recipe']\n\n print(recipe)\n\n makeDrinkRecipe(recipe)\n\n return (request.data)\n\n\n@app.route(\"/setDispensers\", methods=['POST'])\ndef setDispensers():\n\n #print(request.data)\n\n jsonData = request.get_json()\n\n print(jsonData)\n\n #Clear dispensers ingredients from database\n for dispenser in jsonData['dispensers']:\n d = Dispenser.query.filter_by(id=dispenser['id']).first()\n d.ingredient = None\n db.session.add(d)\n db.session.commit()\n\n #write new ingredients to dispensers\n for dispenser in jsonData['dispensers']:\n d = Dispenser.query.filter_by(id=dispenser['id']).first()\n i = Ingredient.query.filter_by(id=dispenser['ingredient_id']).first()\n d.ingredient = i\n print(d)\n\n db.session.add(d)\n db.session.commit()\n\n return (request.data)\n\n@app.route(\"/getDispensers\", methods=['GET'])\ndef getDispensers():\n\n dispensers = Dispenser.query.all()\n print(dispensers)\n\n return (json.dumps([d.to_dict() for d in dispensers]))\n\n@app.route(\"/getIngredients\", methods=['GET'])\ndef getIngredients():\n\n ingredients = Ingredient.query.all()\n print(ingredients)\n\n return (json.dumps([i.to_dict() for i in ingredients])) ","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"137084211","text":"# Copyright (c) 2020, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport time\nimport argparse\nimport confluent_kafka as ck\nfrom distributed import Client\nfrom dask_cuda import LocalCUDACluster\n\n\ndef create_dask_client():\n print(\"Creating local cuda cluster as no dask scheduler is provided.\")\n cluster = LocalCUDACluster()\n client = Client(cluster)\n print(client)\n return client\n\n\ndef kafka_sink(producer_conf, output_topic, parsed_df):\n producer = ck.Producer(producer_conf)\n json_str = parsed_df.to_json(orient=\"records\", lines=True)\n json_recs = json_str.split(\"\\n\")\n for json_rec in json_recs:\n producer.produce(output_topic, json_rec)\n producer.flush()\n\ndef calc_benchmark(processed_data, size_per_log):\n # Calculates benchmark for the streamz workflow\n t1 = int(round(time.time() * 1000))\n t2 = 0\n size = 0.0\n batch_count = 0\n # Find min and max time while keeping track of batch count and size\n for result in processed_data:\n (ts1, ts2, result_size) = (result[1], result[2], result[3])\n if ts1 == 0 or ts2 == 0:\n continue\n batch_count = batch_count + 1\n t1 = min(t1, ts1)\n t2 = max(t2, ts2)\n size += result_size * size_per_log\n time_diff = t2 - t1\n throughput_mbps = size / (1024.0 * time_diff) if time_diff > 0 else 0\n avg_batch_size = size / (1024.0 * batch_count) if batch_count > 0 else 0\n return (time_diff, throughput_mbps, avg_batch_size)\n\n\ndef parse_arguments():\n # Establish script arguments\n parser = argparse.ArgumentParser(\n description=\"Streamz and Dask. \\\n Data will be read from the input kafka topic, \\\n processed using clx streamz workflows.\"\n )\n parser.add_argument(\"-b\", \"--broker\", default=\"localhost:9092\", help=\"Kafka broker\")\n parser.add_argument(\n \"-i\", \"--input_topic\", default=\"input\", help=\"Input kafka topic\"\n )\n parser.add_argument(\n \"-o\", \"--output_topic\", default=\"output\", help=\"Output kafka topic\"\n )\n parser.add_argument(\"-g\", \"--group_id\", default=\"streamz\", help=\"Kafka group ID\")\n parser.add_argument(\"-m\", \"--model\", help=\"Model filepath\")\n parser.add_argument(\"-l\", \"--label_map\", help=\"Label map filepath\")\n parser.add_argument(\n \"--max_batch_size\",\n default=1000,\n type=int,\n help=\"Max batch size to read from kafka\",\n )\n parser.add_argument(\"--poll_interval\", type=str, help=\"Polling interval (ex: 60s)\")\n parser.add_argument(\n \"--benchmark\",\n help=\"Captures benchmark, including throughput estimates, with provided avg log size in KB. (ex: 500 or 0.1)\",\n type=float,\n )\n args = parser.parse_args()\n return args","sub_path":"examples/streamz/python/clx_streamz_tools/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"591892235","text":"import json\nimport sys\nfrom KratosMultiphysics import *\nimport KratosSwimmingDEM as script\nimport ProjectParameters as pp\nimport DEM_explicit_solver_var as DEM_parameters\n\nvarying_parameters = dict()\n\nirregular_mesh_sizes = []#[0.1, 0.2, 0.4]\nregular_mesh_n_points = [10, 20, 40]\nderivatives_types = [1, 3, 4, 5, 6, 7]\ncombinations_that_failed = []\nerrors = []\nfor size in irregular_mesh_sizes + regular_mesh_n_points:\n varying_parameters['size_parameter'] = size\n for derivatives_type in derivatives_types:\n varying_parameters['material_acceleration_calculation_type'] = derivatives_type\n varying_parameters['laplacian_calculation_type'] = derivatives_type\n parameters = Parameters(json.dumps(varying_parameters))\n import ethier_benchmark_algorithm\n with script.Solution(ethier_benchmark_algorithm, parameters) as test:\n try:\n test.Run()\n except:\n error = sys.exc_info()\n errors.append(error)\n combinations_that_failed.append({'size':size, 'type':derivatives_type})\n\nprint()\nprint('****************************************')\n\nif len(combinations_that_failed):\n print('The following combinations produced an error:')\n print()\n for combination, error in zip(combinations_that_failed, errors):\n print(combination)\n print(error)\nelse:\n print('All combinations run without errors')\nprint('****************************************')\n","sub_path":"applications/swimming_DEM_application/python_scripts/cellular_flow/run_ethier_benchmark.py","file_name":"run_ethier_benchmark.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"200906033","text":"# Ionic calulator\n\"\"\" this will hopefully calculate what the outcome of a neutralisation reaction will be when the user selects an acid and base following text prompts.\"\"\"\nprint(\"initalising neutralisation calculator v0.4.1\")\nprint(\"loading database\")\n# list of all possible ingrediants in the reaction\nacids = [\"hydrochloric acid\", \"sulfuric acid\", \"nitric acid\"]\n# because a base is a combination of a metal and an something else I'm going to split the base in too two parts\n\nbasestart = [\"ammonium\", \"sodium\", \"magnesium\", \"potassium\", \"silver\", \"lithium\", \"calcium\", \"copper\", \"lead\", \"iron(2)\", \"berrylium\", \"zinc\", \"barium\", \"aluminium\", \"iron(3)\"]\nbaseend = [\"oxide\", \"hydroxide\", \"carbonate\"]\n\n\n\"\"\" these are the possible salts that can be formed in the neutralisation process\"\"\"\nsalts = [\"chloride\", \"sulfate\", \"nitrate\"]\n\n\"\"\" the reaction also forms byproducts such as water and carbon dioxide depending on the non metalic part of the base used\"\"\"\nbyproducts = [\"water\", \"water and carbon dioxide\", \"water and carbon dioxide\"]\n\n# now the code finds out what acid and base the user wants to combine and determines the results\nprint(\"database bootup successfull... \")\ntutorial = input(\"would you like a list of all the available acids and bases? if you would then type 'yes' \")\ntry:\n tutorial.lower()\n tutorial.strip()\nexcept ValueError:\n # I put this 'nothing' variable in here because otherwise my IDLE had major issues running an empty except statement\n nothing = \"do nothing\"\nif tutorial == \"yes\":\n print(\"\"\"this is a list of available acids: {}\n Bases are comprised of\n two main parts, the first\n part can be made from any of\n these: {} and the second part\n can be made from any of these {}\"\"\".format(acids, basestart, baseend))\nprint(\"opening command line...\")\nwhile True:\n acidused = input(\"What acid is being used?\"\n )\n acidused.lower()\n if acidused in acids[0] or acidused in acids[1] or acidused in acids[2]:\n print(\"acid successfully retrieved\")\n # determines what the salt will be formed bsaed on the acid used\n if acidused in acids[0]: # hydrochloric\n salt = salts[0] # chloride\n elif acidused in acids[1]: # sulfuric\n salt = salts[1] # sulfate\n else: # it must be nitric acid\n salt = salts[1] # nitrate\n\n else:\n print(\"acid retrieval failed, please try again\")\n continue\n baseused = input(\"what base is being used?\"\n )\n baseused.lower()\n basecomponents = baseused.split(\" \")\n # because the first part of an acid is always a metal. and that metal remains unchanged throughout the reaction i'm simply going to take the first part of the base (the metal) and put it in a variable called 'metal'\n\n metal = basecomponents[0]\n\n \"\"\"this while loop finds out what number the base is within our list and then determines what this will cause the reation to do based on a list called 'byproducts' and stores that information in the variable 'byproduct' \"\"\"\n i = 0\n while i != 4:\n if i == 4:\n print(\"Error 605 base retrevial failed at attempt no.{} ... reason: base not in database\".format(i))\n if basecomponents[1] in baseend[i]:\n byproduct = byproducts[i]\n i = 4\n else:\n print(\"Error 404 base not found, attempt {} commencing\".format(i + 1))\n i += 1\n\n print(\"running tests, collecting results. printing results\")\n\n # joins it all together to show the final reaction\n print(\"{} + {} -> {} {} + {}\".format(acidused, baseused, metal, salt, byproduct))\n # asking the user if they want to try again\n repeat = input(\"would you like to continue? please type 't' or 'f'\")\n if repeat == \"t\":\n continue\n else:\n print(\"shutting down\")\n break\n","sub_path":"ionicCalculator.py","file_name":"ionicCalculator.py","file_ext":"py","file_size_in_byte":3876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"63048714","text":"import json\nimport xmljson\nfrom lxml.etree import parse\nimport glob\nimport subprocess\nimport codecs\nimport os\nimport time\nimport re\nfrom multiprocessing import Pool\nfrom multiprocessing import Process\n\nfor path in glob.glob(\"/Users/okada-toshiki/Library/Preferences/StepMania 5/LocalProfiles/00000000/Stats.xml\"):\n with open(path) as f:\n xmllines = f.readlines()\n xmlline_num = 0\n score = 0\n\n for xmlline in xmllines:\n if '' in xmlline:\n score = int(re.sub(r'\\D', '', xmllines[xmlline_num + 1]))\n print(score)\n if \"Failed\" not in xmlline:\n if score == 1000000:\n xmlline = \"Tier01\\n\"\n elif score <= 1000000 and score >= 990000:\n xmlline = \"Tier02\\n\"\n elif score <= 9900000 and score >= 950000:\n xmlline = \"Tier03\\n\"\n elif score <= 9500000 and score >= 900000:\n xmlline = \"Tier04\\n\"\n elif score <= 9000000 and score >= 890000:\n xmlline = \"Tier05\\n\"\n elif score <= 8900000 and score >= 850000:\n xmlline = \"Tier06\\n\"\n elif score <= 8500000 and score >= 800000:\n xmlline = \"Tier07\\n\"\n elif score <= 8000000 and score >= 790000:\n xmlline = \"Tier08\\n\"\n elif score <= 7900000 and score >= 750000:\n xmlline = \"Tier09\\n\"\n elif score <= 7500000 and score >= 700000:\n xmlline = \"Tier10\\n\"\n elif score <= 7000000 and score >= 690000:\n xmlline = \"Tier11\\n\"\n elif score <= 6900000 and score >= 650000:\n xmlline = \"Tier12\\n\"\n elif score <= 6500000 and score >= 600000:\n xmlline = \"Tier13\\n\"\n elif score <= 6000000 and score >= 590000:\n xmlline = \"Tier14\\n\"\n elif score <= 5900000 and score >= 550000:\n xmlline = \"Tier15\\n\"\n else:\n xmlline = \"Tier16\\n\"\n\n xmlline_num += 1\n print(xmlline, file=codecs.open(\n \"/Users/okada-toshiki/Library/Preferences/StepMania 5/LocalProfiles/00000000/Stats_new.xml\", 'a', 'utf-8'), end='')\n","sub_path":"rewrite_grade_in_stats.py","file_name":"rewrite_grade_in_stats.py","file_ext":"py","file_size_in_byte":2750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"564175092","text":"from selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom konlpy.tag import Kkma\nfrom konlpy.utils import pprint\nimport json\nimport requests\n\nkkma = Kkma()\npath = \"C:/Users/jangc/Desktop/chromedriver.exe\"\n\ndriver = webdriver.Chrome(path)\ndriver.get('https://news.naver.com/')\nelem = driver.find_element_by_xpath(\"//*[@id='lnb']/ul/li[12]/a\").click()\nelem = driver.find_element_by_xpath('//*[@id=\"wrap\"]/table/tbody/tr/td[2]/div/div[4]/div/a').click()\nelem = driver.find_element_by_xpath(\n '//*[@id=\"wrap\"]/table/tbody/tr/td[2]/div/div[4]/ol/li[1]/div[2]/div[1]/a').click()\n\nhtml = driver.page_source\nsoup = BeautifulSoup(html, 'html.parser')\ntest_title = soup.select('#articleTitle')\ntest_p = soup.select('#articleBodyContents')\nmake_sen = test_p[0].text\nhello = make_sen.split('\\n')\n# print(\"#\"*40 +'제목'+\"#\"*40)\n# print(test_title[0].text)\n# print(\"#\"*40 + '내용'+\"#\"*40)\n# print(make_sen)\n# print(hello)\nprint(\"형태소 프린트\")\n# pprint(kkma.nouns(make_sen))\nprint(len(kkma.nouns(make_sen)))\n\n# elem.clear()\n# elem.send_keys(\"\")\n# elem.submit()\n# assert \"No results found.\" not in driver.page_source\n# print(elem)\ndriver.close()\n","sub_path":"crawler/crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"133740790","text":"import json\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom net_utils import run_lstm, col_name_encode\n\n\nclass GroupPredictor(nn.Module):\n def __init__(self, N_word, N_h, N_depth, gpu):\n super(GroupPredictor, self).__init__()\n self.N_h = N_h\n self.gpu = gpu\n\n self.q_lstm = nn.LSTM(input_size=N_word+N_word, hidden_size=N_h/2,\n num_layers=N_depth, batch_first=True,\n dropout=0.3, bidirectional=True)\n\n self.col_lstm = nn.LSTM(input_size=N_word, hidden_size=N_h/2,\n num_layers=N_depth, batch_first=True,\n dropout=0.3, bidirectional=True)\n\n self.gby_num_h = nn.Linear(N_h, N_h)\n self.gby_num_l = nn.Linear(N_h, N_h)\n self.gby_num_out = nn.Sequential(nn.Tanh(), nn.Linear(N_h, 4))\n\n self.gby_att = nn.Linear(N_h, N_h)\n self.gby_out_K = nn.Linear(N_h, N_h)\n self.gby_out_col = nn.Linear(N_h, N_h)\n self.gby_out = nn.Sequential(nn.Tanh(), nn.Linear(N_h, 1))\n\n self.hv_att = nn.Linear(N_h, N_h)\n self.hv_out_q = nn.Linear(N_h, N_h)\n self.hv_out_c = nn.Linear(N_h, N_h)\n self.hv_out = nn.Sequential(nn.Tanh(), nn.Linear(N_h, 2)) #for having/none\n\n self.q_att = nn.Linear(N_h, N_h)\n self.col_out_q = nn.Linear(N_h, N_h)\n self.col_out_c = nn.Linear(N_h, N_h)\n self.col_out = nn.Sequential(nn.Tanh(), nn.Linear(N_h, 1))\n\n self.agg_att = nn.Linear(N_h, N_h)\n self.agg_out_q = nn.Linear(N_h, N_h)\n self.agg_out_c = nn.Linear(N_h, N_h)\n self.agg_out = nn.Sequential(nn.Tanh(), nn.Linear(N_h, 6)) #to 5\n\n self.op_att = nn.Linear(N_h, N_h)\n self.op_out_q = nn.Linear(N_h, N_h)\n self.op_out_c = nn.Linear(N_h, N_h)\n self.op_out = nn.Sequential(nn.Tanh(), nn.Linear(N_h, 12)) #to 5\n\n self.softmax = nn.Softmax() #dim=1\n self.CE = nn.CrossEntropyLoss()\n self.log_softmax = nn.LogSoftmax()\n self.mlsml = nn.MultiLabelSoftMarginLoss()\n self.bce_logit = nn.BCEWithLogitsLoss()\n self.sigm = nn.Sigmoid()\n if gpu:\n self.cuda()\n\n def forward(self, q_emb_var, q_len, col_emb_var, col_len, x_type_emb_var):\n max_q_len = max(q_len)\n max_col_len = max(col_len)\n B = len(q_len)\n\n x_emb_concat = torch.cat((q_emb_var, x_type_emb_var), 2)\n q_enc, _ = run_lstm(self.q_lstm, x_emb_concat, q_len)\n col_enc, _ = run_lstm(self.col_lstm, col_emb_var, col_len)\n\n # Predict group column number\n gby_num_att = torch.bmm(col_enc, self.gby_num_h(q_enc).transpose(1, 2))\n for idx, num in enumerate(col_len):\n if num < max_col_len:\n gby_num_att[idx, num:, :] = -100\n for idx, num in enumerate(q_len):\n if num < max_q_len:\n gby_num_att[idx, :, num:] = -100\n\n gby_num_att_val = self.softmax(gby_num_att.view((-1, max_q_len))).view(B, -1, max_q_len)\n gby_num_K = (q_enc.unsqueeze(1) * gby_num_att_val.unsqueeze(3)).sum(2).sum(1)\n gby_num_score = self.gby_num_out(self.gby_num_l(gby_num_K))\n\n # Predict the group by columns\n gby_att_val = torch.bmm(col_enc, self.gby_att(q_enc).transpose(1, 2))\n for idx, num in enumerate(q_len):\n if num < max_q_len:\n gby_att_val[idx, :, num:] = -100\n gby_att = self.softmax(gby_att_val.view((-1, max_q_len))).view(B, -1, max_q_len)\n K_gby_expand = (q_enc.unsqueeze(1) * gby_att.unsqueeze(3)).sum(2)\n gby_score = self.gby_out(self.gby_out_K(K_gby_expand) + \\\n self.gby_out_col(col_enc)).squeeze()\n\n for idx, num in enumerate(col_len):\n if num < max_col_len:\n gby_score[idx, num:] = -100\n\n # Predict Having\n hv_att_val = torch.bmm(col_enc, self.hv_att(q_enc).transpose(1, 2))\n for idx, num in enumerate(col_len):\n if num < max_col_len:\n hv_att_val[idx, num:, :] = -100\n for idx, num in enumerate(q_len):\n if num < max_q_len:\n hv_att_val[idx, :, num:] = -100\n\n hv_att_prob = self.softmax(hv_att_val.view((-1, max_q_len))).view(B, -1, max_q_len)\n hv_weighted = (q_enc.unsqueeze(1) * hv_att_prob.unsqueeze(3)).sum(2).sum(1)\n hv_score = self.hv_out(self.hv_out_q(hv_weighted))\n\n # Predict columns.\n att_val_qc = torch.bmm(col_enc, self.q_att(q_enc).transpose(1, 2))\n for idx, num in enumerate(q_len):\n if num < max_q_len:\n att_val_qc[idx, :, num:] = -100\n att_prob_qc = self.softmax(att_val_qc.view((-1, max_q_len))).view(B, -1, max_q_len)\n # q_weighted: (B, max_col_len, hid_dim)\n q_weighted = (q_enc.unsqueeze(1) * att_prob_qc.unsqueeze(3)).sum(2)\n # Compute prediction scores\n # self.col_out.squeeze(): (B, max_col_len)\n col_score = self.col_out(self.col_out_q(q_weighted) + self.col_out_c(col_enc)).squeeze()\n for idx, num in enumerate(col_len):\n if num < max_col_len:\n col_score[idx, num:] = -100\n # Predict aggregation\n agg_att_val = torch.bmm(col_enc, self.agg_att(q_enc).transpose(1, 2))\n for idx, num in enumerate(col_len):\n if num < max_col_len:\n agg_att_val[idx, num:, :] = -100\n for idx, num in enumerate(q_len):\n if num < max_q_len:\n agg_att_val[idx, :, num:] = -100\n agg_att = self.softmax(agg_att_val.view((-1, max_q_len))).view(B, -1, max_q_len)\n # q_weighted_num: (B, hid_dim)\n q_weighted_agg = (q_enc.unsqueeze(1) * agg_att.unsqueeze(3)).sum(2).sum(1)\n # self.col_num_out: (B, 4)\n agg_score = self.agg_out(self.agg_out_q(q_weighted_agg))\n\n\n # Predict op\n op_att_val = torch.matmul(col_enc, self.agg_att(q_enc).transpose(1, 2))\n for idx, num in enumerate(q_len):\n if num < max_q_len:\n op_att_val[idx, :, num:] = -100\n op_att = self.softmax(op_att_val.view(-1, max_q_len)).view(B, -1, max_q_len)\n q_weighted_op = (q_enc.unsqueeze(1) * op_att.unsqueeze(3)).sum(2).sum(1)\n\n op_score = self.op_out(self.op_out_q(q_weighted_op))\n\n score = (gby_num_score, gby_score, hv_score, col_score, agg_score, op_score)\n\n return score\n","sub_path":"third_party/spider/baselines/typesql/scripts/model/modules/group_predict.py","file_name":"group_predict.py","file_ext":"py","file_size_in_byte":6396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"231051892","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.metrics import roc_auc_score\nfrom keras.models import Model\nfrom keras.layers import Input, Dense, Embedding, SpatialDropout1D, concatenate, CuDNNGRU\nfrom keras.layers import GRU, Bidirectional, GlobalAveragePooling1D, GlobalMaxPooling1D\nfrom keras.preprocessing import text, sequence\nfrom keras.callbacks import Callback\nfrom toxic.train_utils import train_folds\nfrom tools.pickle_tools import TokenizerSaver\nimport tensorflow as tf\nfrom tools import string_tools\nimport warnings\nimport os\nimport config as C\nimport string\nimport logging\n\nnp.random.seed(42)\nwarnings.filterwarnings('ignore')\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'\n\n# os.environ['OMP_NUM_THREADS'] = '4'\n\n# EMBEDDING_FILE = '/data/yuchen/w2v/fasttext-crawl-300d-2m/crawl-300d-2M.vec'\n\nTRAIN_CORPUS = 'train_corpus'\nMODEL = 'model'\n\nEMBEDDING_FILE = C.EMBEDDING_FILE\nTRAIN_PATH = C.TRAIN_PATH\n# ROOT_PATH = '/Users/kouminquan/Workspaces/IBM/dataset'\n# test_path = '{}/bank_test.csv'.format(root_path)\n# submission = '{}/submission.csv'.format(root_path)\n\nPREDICATE_FIELDS = C.Y\n\ntrain = pd.read_csv(TRAIN_PATH)\n# test = pd.read_csv(test_path)\n# submission = pd.read_csv(submission)\n\nlogging.info('read csv finished!')\n\nX_train = train[C.X].fillna(string.whitespace).values\n# X_train = [string_tools.filter_unimportant(x) for x in X_train]\n\n# predicate_fields =[\"toxic\", \"severe_toxic\", \"obscene\", \"threat\", \"insult\", \"identity_hate\"]\n\nmax_features = C.max_features\n\ny_train = train[PREDICATE_FIELDS].values\n\n\n# X_test = test[\"comment_text\"].fillna(\"fillna\").values\nlogging.info('begin tokenizer')\n\ntokenizer = text.Tokenizer(num_words=max_features)\n# tokenizer.fit_on_texts(list(X_train) + list(X_test))\ntokenizer.fit_on_texts(list(X_train))\nX_train = tokenizer.texts_to_sequences(X_train)\n# X_test = tokenizer.texts_to_sequences(X_test)\nx_train = sequence.pad_sequences(X_train, maxlen=C.MAX_LEN)\n# x_test = sequence.pad_sequences(X_test, maxlen=maxlen)\n\nTokenizerSaver.save(tokenizer, C.TOKENIZER_NAME)\nprint('load tokenizer finish')\n\n\ndef get_coefs(word, *arr): return word, np.asarray(arr, dtype=np.float32)\n\n\nlogging.info('begin read word2vec')\n\nembeddings_index = dict(get_coefs(*o.rstrip().rsplit(' ')) for o in open(EMBEDDING_FILE, encoding='utf-8'))\n\nword_index = tokenizer.word_index\nnb_words = min(max_features, len(word_index))\nembedding_matrix = np.zeros((nb_words, C.EMBED_SIZE))\nprint(len(word_index))\nprint(embedding_matrix.shape)\nfor word, i in word_index.items():\n if i >= max_features: continue\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n try:\n embedding_matrix[i] = embedding_vector\n except IndexError:\n pass\n\nlogging.info('end read word2vec')\n\n\nclass RocAucEvaluation(Callback):\n def __init__(self, validation_data=(), interval=1):\n super(Callback, self).__init__()\n\n self.interval = interval\n self.X_val, self.y_val = validation_data\n\n def on_epoch_end(self, epoch, logs={}):\n if epoch % self.interval == 0:\n y_pred = self.model.predict(self.X_val, verbose=0)\n score = roc_auc_score(self.y_val, y_pred)\n print(\"\\n ROC-AUC - epoch: %d - score: %.6f \\n\" % (epoch + 1, score))\n\n\n# class EarlyStopWithRocAuc(Callback):\n# def __init__(self, moniter='val_loss', ):\n\n\ndef get_model():\n inp = Input(shape=(C.MAX_LEN,))\n x = Embedding(max_features, C.EMBED_SIZE, weights=[embedding_matrix])(inp)\n x = SpatialDropout1D(0.2)(x)\n x = Bidirectional(CuDNNGRU(320, return_sequences=True))(x)\n avg_pool = GlobalAveragePooling1D()(x)\n max_pool = GlobalMaxPooling1D()(x)\n conc = concatenate([avg_pool, max_pool])\n outp = Dense(len(PREDICATE_FIELDS), activation=\"sigmoid\")(conc)\n\n model = Model(inputs=inp, outputs=outp)\n model.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n return model\n\n\nmodel = get_model()\nbatch_size = 1024\nepochs = 20\nearly_stop = 5\n\n# X_tra, X_val, y_tra, y_val = train_test_split(x_train, y_train, train_size=0.95, random_state=233)\n# RocAuc = RocAucEvaluation(validation_data=(X_val, y_val), interval=1)\n\nmodels, scores = train_folds(x_train, y_train, epochs,\n fold_count=1, batch_size=batch_size,\n get_model_func=get_model, evaluation='f1', early_stop=early_stop)\n# hist = model.fit(X_tra, y_tra, batch_size=batch_size, epochs=epochs, validation_data=(X_val, y_val),\n# callbacks=[RocAuc], verbose=2)\n\nmodel = models[0]\ngraph = tf.get_default_graph()\nmodel.save(C.MODEL_NAME)\n\n\n# y_pred = model.predict(x_test, batch_size=1024)\n# submission[predicate_fields] = y_pred\n# submission.to_csv('bank_submission.csv', index=False)\n\n","sub_path":"gru_pooled.py","file_name":"gru_pooled.py","file_ext":"py","file_size_in_byte":4801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"19408695","text":"def listsum(numList):\n if len(numList) == 1:\n return numList[0]\n else:\n return numList[0] + listsum(numList[1:])\n\ndef best_profit(mylist):\n a = len(mylist)\n j = 1\n temp_list = []\n for i in range(0, a, 2):\n\n temp_list.append(mylist[j] - mylist[i])\n\n j = j+2\n c = listsum(temp_list)\n return c\n\nprint(\"Give your inputs\")\na = [int(x) for x in input().split(',')]\nprint(\"The maximum Profit =\", best_profit(a))\n","sub_path":"running.py","file_name":"running.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"326308821","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3351)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/pyams_catalog/utils.py\n# Compiled at: 2020-02-21 06:54:32\n# Size of source mod 2**32: 3903 bytes\n\"\"\"PyAMS_catalog.utils module\n\"\"\"\nimport logging, transaction\nfrom ZODB.interfaces import IBroken\nfrom hypatia.interfaces import ICatalog\nfrom zope.interface import Interface\nfrom zope.intid.interfaces import IIntIds\nfrom zope.keyreference.interfaces import NotYet\nfrom pyams_catalog.interfaces import INoAutoIndex\nfrom pyams_site.site import site_factory\nfrom pyams_utils.adapter import adapter_config\nfrom pyams_utils.container import find_objects_providing\nfrom pyams_utils.interfaces import ICacheKeyValue\nfrom pyams_utils.registry import get_utility, query_utility, set_local_registry\n__docformat__ = 'restructuredtext'\nLOGGER = logging.getLogger('PyAMS (catalog)')\n\n@adapter_config(context=ICatalog, provides=ICacheKeyValue)\ndef catalog_key_adapter(obj):\n \"\"\"Catalog key value adapter\"\"\"\n return str(frozenset(obj))\n\n\ndef index_object(obj, catalog='', ignore_notyet=False):\n \"\"\"Index given object into catalog\"\"\"\n LOGGER.debug('Indexing object {0!r}'.format(obj))\n intids = query_utility(IIntIds)\n if intids is not None:\n try:\n object_id = intids.register(obj)\n except NotYet:\n if not ignore_notyet:\n raise\n else:\n if isinstance(catalog, str):\n catalog = query_utility(ICatalog, name=catalog)\n if catalog is not None:\n catalog.index_doc(object_id, obj)\n\n\ndef reindex_object(obj, catalog=''):\n \"\"\"Reindex given object into catalog\"\"\"\n LOGGER.debug('Re-indexing object {0!r}'.format(obj))\n intids = query_utility(IIntIds)\n if intids is not None:\n object_id = intids.queryId(obj)\n if object_id is not None:\n if isinstance(catalog, str):\n catalog = query_utility(ICatalog, name=catalog)\n if catalog is not None:\n catalog.reindex_doc(object_id, obj)\n\n\ndef unindex_object(obj, catalog=''):\n \"\"\"Unindex given object from catalog\"\"\"\n LOGGER.debug('Un-indexing object {0!r}'.format(obj))\n intids = query_utility(IIntIds)\n if intids is not None:\n object_id = intids.queryId(obj)\n if object_id is not None:\n if isinstance(catalog, str):\n catalog = query_utility(ICatalog, name=catalog)\n if catalog is not None:\n catalog.unindex_doc(object_id)\n\n\ndef index_site(request, autocommit=True):\n \"\"\"Index all site contents in internal catalog\"\"\"\n application = site_factory(request)\n if application is not None:\n try:\n set_local_registry(application.getSiteManager())\n catalog = get_utility(ICatalog)\n catalog.reset()\n transaction.savepoint()\n intids = get_utility(IIntIds)\n for index, document in enumerate(find_objects_providing(application, Interface)):\n if INoAutoIndex.providedBy(document):\n pass\n else:\n if IBroken.providedBy(document):\n print('Skipping broken object: {0!r}'.format(document))\n else:\n print('Indexing: {0!r}'.format(document))\n catalog.reindex_doc(intids.register(document), document)\n if not index % 100:\n transaction.savepoint()\n\n finally:\n set_local_registry(None)\n\n if autocommit:\n transaction.commit()\n return application","sub_path":"pycfiles/pyams_catalog-1.0.2-py3.5/utils.cpython-35.py","file_name":"utils.cpython-35.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"587940039","text":"import requests\nfrom bs4 import BeautifulSoup\nimport argparse\n\n\ndef create_link(year, month):\n months = ['january', 'february', 'march', 'april', 'may', 'june', 'july',\n 'august', 'september', 'october', 'november', 'december']\n month_string = months[month - 1]\n if month_string == 'january':\n month_decreased = '12'\n link = 'https://www.smashingmagazine.com/{year_decreased}/{month_decreased}'\\\n '/desktop-wallpaper-calendars-{month_string}-{year}/'.format(year_decreased=year - 1,\n month_string=month_string,\n year=year,\n month_decreased=month_decreased)\n else:\n month_decreased = str(month-1).zfill(2)\n link = 'https://www.smashingmagazine.com/{year}/{month_decreased}'\\\n '/desktop-wallpaper-calendars-{month_string}-{year}/'.format(month_string=month_string,\n year=year,\n month_decreased=month_decreased)\n return link\n\n\ndef get_picture_links(link, resolution):\n content = requests.get(link).text\n soup = BeautifulSoup(content, \"lxml\")\n tags = soup.find_all(lambda tag: tag.name == \"a\" and\n tag.get('href') and\n ('.jpg' in tag.get(\"href\") or '.png' in tag.get(\"href\")) and\n resolution == tag.text)\n links = [tag['href'] for tag in tags]\n return links\n\n\ndef get_arguments():\n parser = argparse.ArgumentParser(description='Enter year, month and resolution to download wallpapers.')\n parser.add_argument('--year', metavar='type int', type=int, nargs=1,\n help='year')\n parser.add_argument('--month', metavar='type int', type=int, nargs=1,\n help='month')\n parser.add_argument('--resolution', metavar='...x... format. example: 320x480', type=str, nargs=1,\n help='resolution of wallpapers')\n arguments = vars(parser.parse_args())\n year = int(arguments['year'][0])\n month = int(arguments['month'][0])\n resolution = arguments['resolution'][0]\n if month > 12 or month < 1:\n print('Incorrect month!')\n exit()\n return year, month, resolution\n\n\ndef download_pictures(picture_links):\n print('Downloading...')\n for link in picture_links:\n file_name = link.split('/')[-1]\n pic = requests.get(link)\n open('./{}'.format(file_name), 'wb').write(pic.content)\n print('{} downloaded'.format(file_name))\n print('Download complete! If there are no files, then check the correctness of the parameters')\n\n\nif __name__ == '__main__':\n year, month, resolution = get_arguments()\n link = create_link(year, month)\n picture_links = get_picture_links(link, resolution)\n download_pictures(picture_links)","sub_path":"task2/downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"251612838","text":"class Solution:\n def uncommonFromSentences(self, s1: str, s2: str) -> List[str]:\n s1arr = s1.split()\n s2arr = s2.split()\n res = []\n \n words = set(s1arr + s2arr)\n for word in words:\n if word in s1arr and word not in s2arr:\n if s1arr.count(word) == 1:\n res.append(word)\n elif word in s2arr and word not in s1arr:\n if s2arr.count(word) == 1:\n res.append(word)\n else:\n continue\n \n return res","sub_path":"leetcodePython/0884.py","file_name":"0884.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"236254534","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n# Generated file, DO NOT EDIT\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass NotificationEventField(Model):\n \"\"\"NotificationEventField.\n\n :param field_type: Gets or sets the type of this field.\n :type field_type: :class:`NotificationEventFieldType `\n :param id: Gets or sets the unique identifier of this field.\n :type id: str\n :param name: Gets or sets the name of this field.\n :type name: str\n :param path: Gets or sets the path to the field in the event object. This path can be either Json Path or XPath, depending on if the event will be serialized into Json or XML\n :type path: str\n :param supported_scopes: Gets or sets the scopes that this field supports. If not specified then the event type scopes apply.\n :type supported_scopes: list of str\n \"\"\"\n\n _attribute_map = {\n 'field_type': {'key': 'fieldType', 'type': 'NotificationEventFieldType'},\n 'id': {'key': 'id', 'type': 'str'},\n 'name': {'key': 'name', 'type': 'str'},\n 'path': {'key': 'path', 'type': 'str'},\n 'supported_scopes': {'key': 'supportedScopes', 'type': '[str]'}\n }\n\n def __init__(self, field_type=None, id=None, name=None, path=None, supported_scopes=None):\n super(NotificationEventField, self).__init__()\n self.field_type = field_type\n self.id = id\n self.name = name\n self.path = path\n self.supported_scopes = supported_scopes\n","sub_path":"vsts/vsts/notification/v4_0/models/notification_event_field.py","file_name":"notification_event_field.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"432860410","text":"import bs4\nimport requests\n\n\nclass scraper:\n\n\tdef __init__(self):\n\n\t\tself.departments = []\n\t\tself.subjects = []\n\n\t\t#make sure website does't think I'm a bot\n\t\theaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36',\n \t}\n\n \t#get the site \n\t\tres = requests.get('http://www.cape.ucsd.edu/responses/Results.aspx?Name=&CourseNumber=CSE', headers=headers)\n\n\t\t#check for erros \n\t\tres.raise_for_status()\n\n\t\t#parse the html \n\t\thtml = bs4.BeautifulSoup(res.text, 'html.parser')\n\n\t\tpre_list = html.find_all('option')\n\n\t\tfor department in pre_list:\n\t\t\tunabbreviated = department.text.strip()\n\t\t\tabbreviation = ''\n\n\t\t\tfor character in unabbreviated:\n\t\t\t\t\n\t\t\t\tif (character.isspace()):\n\n\t\t\t\t\tbreak\n\n\t\t\t\tabbreviation += character\n\n\t\t\tself.departments.append(abbreviation)\n\n\n\t\tdel self.departments[0]\n\n\t\tres = requests.get('https://blink.ucsd.edu/instructors/courses/schedule-of-classes/subject-codes.html', headers=headers)\n\n\t\t#check for erros \n\t\tres.raise_for_status()\n\n\t\thtml = bs4.BeautifulSoup(res.text, 'html.parser')\n\n\t\tdirty_list = html.find_all('td')\n\n\t\tfor even in range(0, len(dirty_list)):\n\n\t\t\tif even % 2 is 0:\n\t\t\t\telem2add = dirty_list[even]\n\t\t\t\tself.subjects.append(elem2add.text.strip())\n\n\t\t#print(self.subjects)\n\n\n\n\n","sub_path":"cape_eval_app/cape_app/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"430390307","text":"#!/usr/bin/python3\n'''Places API view'''\nimport json\nfrom flask import Flask, jsonify, abort, request, make_response\nfrom models import storage\nfrom api.v1.views import app_views\nfrom models.place import Place\nfrom models.user import User\nfrom models.city import City\n\n\n@app_views.route('/cities//places', methods=['GET'],\n strict_slashes=False)\ndef get_places(city_id):\n '''Get places'''\n city = storage.get('City', city_id)\n if city is None:\n abort(404)\n j_list = []\n for k, v in storage.all('Place').items():\n if v.city_id == city.id:\n j_list.append(v.to_dict())\n return make_response(jsonify(j_list))\n\n\n@app_views.route('/places/', methods=['GET'], strict_slashes=False)\ndef get_place_id(place_id):\n '''Get place by id'''\n place = storage.get('Place', place_id)\n if place is None:\n abort(404)\n return make_response(jsonify(place.to_dict()))\n\n\n@app_views.route('/places/', methods=['DELETE'],\n strict_slashes=False)\ndef del_place_id(place_id):\n '''Delete place by id'''\n place = storage.get('Place', place_id)\n if place is None:\n abort(404)\n place.delete()\n return make_response(jsonify({}), 200)\n\n\n@app_views.route('/cities//places', methods=['POST'],\n strict_slashes=False)\ndef post_place(city_id):\n '''Create a place'''\n r = request.get_json()\n if r is None:\n return make_response(jsonify({\"error\": \"Not a JSON\"}), 400)\n elif 'user_id' not in r:\n return make_response(jsonify({\"error\": \"Missing user_id\"}), 400)\n elif 'name' not in r:\n return make_response(jsonify({\"error\": \"Missing name\"}), 400)\n if 'City.' + city_id in storage.all('City'):\n if 'User.' + r['user_id'] in storage.all('User'):\n new = Place(**r)\n new.save()\n else:\n abort(404)\n else:\n abort(404)\n return make_response(jsonify(new.to_dict()), 201)\n\n\n@app_views.route('/places/', methods=['PUT'], strict_slashes=False)\ndef update_place(place_id):\n '''Update a place'''\n ignore = ['id', 'created_at', 'updated_at', 'user_id', 'city_id']\n\n data = request.get_json()\n if data is None:\n return make_response(jsonify({\"error\": \"Not a JSON\"}), 400)\n\n place = storage.get('Place', place_id)\n if place is None:\n abort(404)\n\n for k, v in data.items():\n if k not in ignore:\n setattr(place, k, v)\n storage.save()\n return make_response(jsonify(place.to_dict()), 200)\n","sub_path":"api/v1/views/places.py","file_name":"places.py","file_ext":"py","file_size_in_byte":2564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"633652345","text":"import networkx as nx\nfrom itertools import combinations\n\nclass ProteinGraph(object):\n def __init__(self, protein_complex):\n self.pc = protein_complex\n\n def _build_nodes(self):\n #Ditch existing graph and start over.\n self.DG = nx.DiGraph()\n self.DG.add_node(\"S\")\n self.DG.add_node(\"T\")\n\n #Create all state nodes.\n for key in self.pc.residue_variables:\n self.DG.add_node(key+(\"PROTONATED\",))\n self.DG.add_node(key+(\"DEPROTONATED\",))\n\n def update_graph(self):\n \"\"\"Build a new graph based on the state of self.pc\"\"\"\n self._build_nodes()\n\n #Create edges going in and out of S and T.\n for key, v in self.pc.residue_variables.iteritems():\n prot_instance = v.instances[\"PROTONATED\"]\n prot_capacity = prot_instance.energyNF / 2.0\n prot_node = key+(\"PROTONATED\",)\n\n deprot_instance = v.instances[\"DEPROTONATED\"]\n deprot_capacity = deprot_instance.energyNF / 2.0\n deprot_node = key+(\"DEPROTONATED\",)\n\n if prot_capacity != 0.0:\n self.DG.add_edge(\"S\", deprot_node, capacity=prot_capacity)\n self.DG.add_edge(prot_node, \"T\", capacity=prot_capacity)\n\n if deprot_capacity != 0.0:\n self.DG.add_edge(\"S\", prot_node, capacity=deprot_capacity)\n self.DG.add_edge(deprot_node, \"T\", capacity=deprot_capacity)\n\n #Create all interaction energy edges.\n for p, q in combinations(self.pc.residue_variables.iteritems(),2):\n p_key, p_residue = p\n q_key, q_residue = q\n\n p_prot_instance = p_residue.instances[\"PROTONATED\"]\n p_prot_node = p_key+(\"PROTONATED\",)\n\n p_deprot_instance = p_residue.instances[\"DEPROTONATED\"]\n p_deprot_node = p_key+(\"DEPROTONATED\",)\n\n q_prot_instance = q_residue.instances[\"PROTONATED\"]\n q_prot_node = q_key+(\"PROTONATED\",)\n\n q_deprot_instance = q_residue.instances[\"DEPROTONATED\"]\n q_deprot_node = q_key+(\"DEPROTONATED\",)\n\n capacity = self.pc.normalized_interaction_energies[p_deprot_instance, q_deprot_instance] / 2.0\n\n if capacity != 0.0:\n self.DG.add_edge(p_deprot_node, q_prot_node, capacity=capacity)\n self.DG.add_edge(q_deprot_node, p_prot_node, capacity=capacity)\n\n capacity = self.pc.normalized_interaction_energies[p_prot_instance, q_deprot_instance] / 2.0\n\n if capacity != 0.0:\n self.DG.add_edge(p_prot_node, q_prot_node, capacity=capacity)\n self.DG.add_edge(q_deprot_node, p_deprot_node, capacity=capacity)\n\n capacity = self.pc.normalized_interaction_energies[p_deprot_instance, q_prot_instance] / 2.0\n\n if capacity != 0.0:\n self.DG.add_edge(p_deprot_node, q_deprot_node, capacity=capacity)\n self.DG.add_edge(q_prot_node, p_prot_node, capacity=capacity)\n\n capacity = self.pc.normalized_interaction_energies[p_prot_instance, q_prot_instance] / 2.0\n\n if capacity != 0.0:\n self.DG.add_edge(p_prot_node, q_deprot_node, capacity=capacity)\n self.DG.add_edge(q_prot_node, p_deprot_node, capacity=capacity)\n\n def get_cut(self):\n \"\"\"Performs the min cut.\n Returns cut_value, s nodes, t nodes\"\"\"\n cut_value, partition = nx.minimum_cut(self.DG, \"S\", \"T\")\n s_nodes, t_nodes = partition\n\n return cut_value, set(s_nodes), set(t_nodes)\n\n def get_labeling_from_cut(self, s_nodes, t_nodes):\n \"\"\"Creates a map of residues to instances based on the \"\"\"\n labeling = {}\n uncertain = []\n for key, v in self.pc.residue_variables.iteritems():\n prot_node = key+(\"PROTONATED\",)\n deprot_node = key+(\"DEPROTONATED\",)\n\n if prot_node in s_nodes and deprot_node in t_nodes:\n labeling[v] = v.instances[\"PROTONATED\"]\n elif deprot_node in s_nodes and prot_node in t_nodes:\n labeling[v] = v.instances[\"DEPROTONATED\"]\n else:\n #Inconclusive\n uncertain.append(v)\n\n\n\n return labeling, uncertain\n\n\n\n\n\n","sub_path":"python_src/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":4250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"298304036","text":"from authlib.specs.rfc6749 import OAuth2Error\nfrom flask import request, Blueprint, render_template, url_for, redirect, \\\n flash, abort\nfrom flask_babel import _\nfrom flask_login import login_required, current_user\n\nfrom app import oauth_server\nfrom app.decorators import response_headers\nfrom app.exceptions.base import BusinessRuleException\nfrom app.forms import init_form\nfrom app.forms.oauth_forms import OAuthClientForm\nfrom app.oauth_scopes import Scopes\nfrom app.service import oauth_service\n\nblueprint = Blueprint('oauth', __name__, url_prefix='/oauth')\n\n\n@blueprint.route('/authorize', methods=['GET', 'POST'], strict_slashes=False)\n@login_required\n@response_headers({\"X-Frame-Options\": \"SAMEORIGIN\"})\ndef authorize():\n if request.method == 'GET':\n try:\n grant = oauth_server.validate_consent_request(\n end_user=current_user)\n except OAuth2Error as error:\n flash(_(\"OAuth authorize request was invalid. \") +\n error.get_error_description(), 'danger')\n return redirect(url_for(\"home.home\"))\n\n if grant.client.auto_approve:\n return oauth_server.create_authorization_response(current_user)\n\n if oauth_service.user_has_approved_client(\n user_id=current_user.id, client=grant.client):\n return oauth_server.create_authorization_response(current_user)\n\n kwargs = {'grant': grant,\n 'user': current_user,\n 'scopes': oauth_service.get_scope_descriptions()}\n return render_template('oauth/oauthorize.html', **kwargs)\n\n confirm = request.form.get('confirm', False)\n if confirm:\n # granted by resource owner\n return oauth_server.create_authorization_response(current_user)\n # denied by resource owner\n return oauth_server.create_authorization_response(None)\n\n\n@blueprint.route('/token', methods=['POST'], strict_slashes=True)\ndef issue_token():\n return oauth_server.create_token_response()\n\n\n@blueprint.route('/revoke', methods=['POST'], strict_slashes=True)\ndef revoke_token():\n return oauth_server.create_endpoint_response(\n oauth_service.RevocationEndpoint.ENDPOINT_NAME)\n\n\n@blueprint.route('/introspect', methods=['POST'], strict_slashes=True)\ndef introspect_token():\n return oauth_server.create_endpoint_response(\n oauth_service.IntrospectionEndpoint.ENDPOINT_NAME)\n\n\n@blueprint.route(\"/errors\", methods=['GET'])\ndef errors():\n error = request.args.get(\"error\", _(\"Unknown OAuth error\"))\n error_description = request.args.get(\"error_description\",\n _(\"Please try again\"))\n return render_template('oauth/error.htm',\n error=error, description=error_description)\n\n\n@blueprint.route(\"/clients/\", methods=[\"GET\"])\n@login_required\ndef list_clients():\n owned_clients = oauth_service.get_owned_clients_by_user_id(\n user_id=current_user.id)\n approved_clients = oauth_service.get_approved_clients_by_user_id(\n user_id=current_user.id)\n return render_template('oauth/list_client.htm',\n owned_clients=owned_clients,\n approved_clients=approved_clients)\n\n\n@blueprint.route(\"/clients/revoke//\", methods=['POST'])\ndef revoke_client_token(client_id=None):\n client = oauth_service.revoke_user_tokens_by_client_id(\n user_id=current_user.id, client_id=client_id)\n flash(_(\"Successfully revoked token for client '%s'\" % client.client_name))\n return redirect(url_for(\"oauth.list_clients\"))\n\n\n@blueprint.route(\"/clients/reset//\", methods=[\"POST\"])\ndef reset_client_secret(client_id):\n try:\n oauth_service.reset_client_secret(client_id)\n except BusinessRuleException:\n flash(_(\"Public clients have no secret.\"), 'danger')\n return redirect(url_for(\"oauth.list_clients\"))\n\n\n@blueprint.route(\"/clients/register/\", methods=[\"GET\", \"POST\"])\n@blueprint.route(\"/clients/edit//\", methods=[\"GET\", \"POST\"])\ndef edit(client_id=None):\n # This is temporary until we allow dynamic client registration.\n abort(404)\n\n client = oauth_service.get_client_by_id(client_id=client_id)\n form = init_form(OAuthClientForm, obj=client)\n\n if form.redirect_uri.data is None and client:\n form.redirect_uri.data = ', '.join(client.redirect_uris)\n if form.scopes.data is None and client:\n form.scopes.data = [Scopes[s] for s in client.default_scopes]\n\n if form.validate_on_submit():\n if client:\n oauth_service.update_client(\n client_id=client_id, name=form.name.data,\n description=form.description.data,\n redirect_uri_list=form.redirect_uri.data,\n scopes=form.scopes.data)\n flash(_(\"Successfully updated client '%s'\" % client.name))\n else:\n client = oauth_service.create_client(\n user_id=current_user.id,\n name=form.name.data,\n description=form.description.data,\n redirect_uri_list=form.redirect_uri.data,\n scopes=form.scopes.data)\n flash(_(\"Successfully created client '%s'\" % client.name))\n return redirect(url_for(\"oauth.list_clients\"))\n return render_template(\"oauth/register.htm\", form=form)\n","sub_path":"app/views/oauth.py","file_name":"oauth.py","file_ext":"py","file_size_in_byte":5355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"438136682","text":"#!/usr/bin/env python3\nimport os\n\nrootDir = \".\"\n\nprint(\"# Morbergs receptsamling\")\n\nfor dirName, subdirList, fileList in os.walk(rootDir):\n subdirList.sort()\n if \".git\" in subdirList:\n subdirList.remove(\".git\")\n if \".github\" in subdirList:\n subdirList.remove(\".github\")\n if dirName == \".\":\n continue\n category = dirName\n print(\"\\n## {}\\n\".format(category[2:]))\n for fname in sorted(fileList):\n recipeLink = fname\n with open(dirName + \"/\" + fname, \"r\") as f:\n recipeTitle = f.readline().strip(\"#\").strip()\n print(\"* [{}]({}/{})\".format(recipeTitle, category, recipeLink))\n\nprint(\"\")\nprint(\"## [Sous Vide](sous-vide.md)\")\n","sub_path":"create-index.py","file_name":"create-index.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"133806769","text":"#! /bin/python3\n# -*- coding=utf-8 -*-\n\n\n\"\"\"\nfile: recv.py\nsocket service\n\"\"\"\n\n\nimport socket\nimport threading\nimport time\nimport sys\nimport os\nimport struct\n\ndef socket_service():\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.bind(('127.0.0.1', 6666))\n s.listen(10)\n except socket.error as msg:\n print (msg)\n sys.exit(1)\n print ('Waiting connection...')\n\n while 1:\n conn, addr = s.accept()\n t = threading.Thread(target=deal_data, args=(conn, addr))\n t.start()\n\ndef deal_data(conn, addr):\n # print ('Accept new connection from {0}'.format(addr))\n #conn.settimeout(500) \n # str to bytes \n sb = str.encode(\"Hi, Welcome to the server!\")\n conn.send(sb)\n i=0\n while 1:\n print(i)\n i=i+1\n fileinfo_size = struct.calcsize('128sl')\n buf = conn.recv(fileinfo_size)\n if buf:\n \n filename, filesize = struct.unpack('128sl', buf)\n fn = filename.decode('utf8').strip().strip('\\x00')\n new_filename = os.path.join('./', 'new_' + fn)\n # print ('file new name is {0}, filesize is {1}'.format(new_filename, filesize))\n\n fp = open(new_filename, 'wb')\n # print ('start receiving...')\n\n recvd_size = 0 # 定义已接收文件的大小\n while not recvd_size == filesize:\n if filesize - recvd_size > 1024:\n data = conn.recv(1024)\n recvd_size += len(data)\n else:\n data = conn.recv(filesize - recvd_size)\n recvd_size = filesize\n fp.write(data)\n fp.close()\n # print ('end receive...')\n #conn.close()\n #break\n\n\nif __name__ == '__main__':\n socket_service()","sub_path":"socket_service.py","file_name":"socket_service.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"87213022","text":"# -*- coding: utf-8 -*-\r\nfrom sklearn import svm, neighbors, tree\r\nfrom sklearn.linear_model import SGDClassifier\r\nfrom sklearn.neural_network import MLPClassifier\r\nimport random\r\n\r\ndef ml_eval(Y_test, Y_predict):\r\n [TP,FN,FP,TN] = [0,0,0,0]\r\n for i in range(len(Y_test)):\r\n exact = Y_test[i]\r\n guess = Y_predict[i]\r\n if exact == 1:\r\n if guess == 1:\r\n TP += 1\r\n else:\r\n FN += 1\r\n else:\r\n if guess == 1:\r\n FP += 1\r\n else:\r\n TN += 1\r\n precision = TP / (TP + FP)\r\n recall = TP / (TP + FN)\r\n F = 2 * precision * recall / (precision + recall)\r\n return [precision, recall, F]\r\n\r\ndef ml_train(model, X_train, X_test, Y_train, Y_test):\r\n # model\r\n if model.startswith('neighbours_uniform'):\r\n clf = neighbors.KNeighborsClassifier(int(model[-1]), weights='uniform')\r\n elif model.startswith('neighbours_distance'):\r\n clf = neighbors.KNeighborsClassifier(int(model[-1]), weights='distance')\r\n elif model == 'svm':\r\n clf = svm.SVC()\r\n elif model == 'tree':\r\n clf = tree.DecisionTreeClassifier()\r\n elif model == 'sgd':\r\n clf = SGDClassifier(loss=\"hinge\", penalty=\"l2\")\r\n elif model == 'neural_network':\r\n clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1)\r\n else:\r\n print('unknown model') \r\n # training\r\n clf.fit(X_train, Y_train)\r\n # test\r\n Y_predict = clf.predict(X_test)\r\n # evaluation\r\n return ml_eval(Y_test, Y_predict)\r\n \r\nrepeat = 20\r\nratio_train = 0.75\r\n\r\n# get data\r\nfilename = 'data_Cite2Chatelet.txt'\r\ndata = []\r\nwith open(filename, 'r') as f:\r\n lines = f.read().splitlines()\r\ndata = [line.split(',') for line in lines]\r\n\r\nN = len(data)\r\nnum_train = int(N*ratio_train)\r\n\r\nprint('Data Set size = %d' % N)\r\nprint('Training Set size = %d' % num_train)\r\nprint('Test Set size = %d' % (N-num_train))\r\n\r\nmodelList = ['svm','tree','sgd','neural_network']\r\nfor i in range(1,9):\r\n modelList.append('neighbours_uniform%d'%i)\r\nfor i in range(1,9):\r\n modelList.append('neighbours_distance%d'%i)\r\nfor model in modelList:\r\n scoreList = []\r\n for i in range(repeat):\r\n random.shuffle(data)\r\n \r\n X = [[x for x in map(int,item[:-1])] for item in data]\r\n Y = [int(int(item[-1])>0) for item in data]\r\n \r\n [X_train, X_test] = [X[:num_train], X[num_train:]]\r\n [Y_train, Y_test] = [Y[:num_train], Y[num_train:]]\r\n \r\n [precision, recall, F] = ml_train(model, X_train, X_test, Y_train, Y_test)\r\n scoreList.append([precision, recall, F])\r\n \r\n precision_mean = sum([item[0] for item in scoreList])/repeat\r\n recall_mean = sum([item[1] for item in scoreList])/repeat\r\n F_mean = sum([item[2] for item in scoreList])/repeat\r\n print('=============')\r\n print('Method : %s' % model)\r\n print('Mean of precision %.2f%%' % (precision_mean*100))\r\n print('Mean of recall %.2f%%' % (recall_mean*100))\r\n print('Mean of F %.4f' % F_mean)","sub_path":"Learning.py","file_name":"Learning.py","file_ext":"py","file_size_in_byte":3091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"447700991","text":"#!/usr/bin/env python3\n# MIT License\n#\n# Copyright (c) 2020 FABRIC Testbed\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n#\n# Author: Komal Thareja (kthare10@renci.org)\nimport traceback\nfrom datetime import datetime\n\nfrom fabric.actor.core.apis.i_authority_reservation import IAuthorityReservation\nfrom fabric.actor.core.apis.i_broker_reservation import IBrokerReservation\nfrom fabric.actor.core.apis.i_client_reservation import IClientReservation\nfrom fabric.actor.core.apis.i_reservation import IReservation\nfrom fabric.actor.core.core.authority_policy import AuthorityPolicy\nfrom fabric.actor.core.common.exceptions import AuthorityException\nfrom fabric.actor.core.kernel.resource_set import ResourceSet\nfrom fabric.actor.core.plugins.config.config_token import ConfigToken\nfrom fabric.actor.core.apis.i_resource_control import IResourceControl\nfrom fabric.actor.core.time.term import Term\nfrom fabric.actor.core.time.calendar.authority_calendar import AuthorityCalendar\nfrom fabric.actor.core.util.id import ID\nfrom fabric.actor.core.util.reservation_set import ReservationSet\nfrom fabric.actor.core.util.resource_type import ResourceType\n\n\nclass AuthorityCalendarPolicy(AuthorityPolicy):\n \"\"\"\n The base for authority policy implementations\n \"\"\"\n UNSUPPORTED_RESOURCE_TYPE = \"Unsupported resource type: {}\"\n\n def __init__(self):\n \"\"\"\n Creates a new instance.\n \"\"\"\n super().__init__()\n # If true, we will use lazy revocation.\n self.lazy_close = False\n # Resource control objects indexed by guid. \n self.controls_by_guid = {}\n # ResourceControl objects indexed by resource type. \n self.controls_by_resource_type = {}\n # The authority's calendar. A calendar of all requests\n self.calendar = None\n # Says if the actor has been initialized\n self.initialized = False\n\n def __getstate__(self):\n state = self.__dict__.copy()\n del state['logger']\n del state['actor']\n del state['clock']\n del state['initialized']\n\n del state['tickets']\n\n del state['controls_by_resource_type']\n del state['calendar']\n\n return state\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n self.logger = None\n self.actor = None\n self.clock = None\n self.initialized = False\n\n self.tickets = None\n\n self.controls_by_resource_type = {}\n self.restore()\n\n def restore(self):\n \"\"\"\n Custom restore function. Invoked during recovering the policy object.\n \"\"\"\n for c in self.controls_by_guid.values():\n try:\n self.register_control_types(control=c)\n except Exception as e:\n raise AuthorityException(\"Cannot restore resource control e:{}\".format(e))\n\n def initialize(self):\n \"\"\"\n initialize the policy\n \"\"\"\n if not self.initialized:\n super().initialize()\n self.calendar = AuthorityCalendar(clock=self.clock)\n self.initialize_controls()\n self.initialized = True\n\n def initialize_controls(self):\n \"\"\"\n Initializes all registered controls.\n @raises Exception in case of error\n \"\"\"\n for control in self.controls_by_guid.values():\n control.set_actor(actor=self.actor)\n control.initialize()\n\n def donate(self, *, resources: ResourceSet):\n super().donate(resources=resources)\n rc = self.get_control_by_type(rtype=resources.get_type())\n if rc is not None:\n rc.donate(resource_set=resources)\n else:\n raise AuthorityException(self.UNSUPPORTED_RESOURCE_TYPE.format(resources.get_type()))\n\n def eject(self, *, resources: ResourceSet):\n code = super().unavailable(resources=resources)\n if code == 0:\n rc = self.get_control_by_type(rtype=resources.get_type())\n if rc is not None:\n code = rc.unavailable(resource_set=resources)\n else:\n raise AuthorityException(self.UNSUPPORTED_RESOURCE_TYPE.format(resources.get_type()))\n return code\n\n def available(self, *, resources: ResourceSet):\n super().available(resources=resources)\n rc = self.get_control_by_type(rtype=resources.get_type())\n if rc is not None:\n rc.available(resource_set=resources)\n else:\n raise AuthorityException(self.UNSUPPORTED_RESOURCE_TYPE.format(resources.get_type()))\n\n def freed(self, *, resources: ResourceSet):\n super().freed(resources=resources)\n rc = self.get_control_by_type(rtype=resources.get_type())\n if rc is not None:\n rc.freed(resource_set=resources)\n else:\n raise AuthorityException(self.UNSUPPORTED_RESOURCE_TYPE.format(resources.get_type()))\n\n def release(self, *, resources: ResourceSet):\n super().release(resources=resources)\n rc = self.get_control_by_type(rtype=resources.get_type())\n if rc is not None:\n rc.release(resource_set=resources)\n else:\n raise AuthorityException(self.UNSUPPORTED_RESOURCE_TYPE.format(resources.get_type()))\n\n def recovery_starting(self):\n super().recovery_starting()\n for c in self.controls_by_guid.values():\n c.recovery_starting()\n\n def revisit(self, *, reservation: IReservation):\n super().revisit(reservation=reservation)\n if isinstance(reservation, IAuthorityReservation):\n self.calendar.add_closing(reservation=reservation, cycle=self.get_close(term=reservation.get_term()))\n approved = reservation.get_approved_resources()\n if approved is None:\n self.logger.debug(\"Reservation has no approved resources. Nothing is allocated to it.\")\n\n rtype = approved.get_type()\n self.logger.debug(\"Resource type for recovered reservation: \" + rtype)\n control = self.get_control_by_type(rtype=rtype)\n if control is None:\n raise AuthorityException(\"Missing resource control\")\n control.revisit(reservation=reservation)\n\n def recovery_ended(self):\n super().recovery_ended()\n for c in self.controls_by_guid.values():\n c.recovery_ended()\n\n def donate_reservation(self, *, reservation: IClientReservation):\n super().donate_reservation(reservation=reservation)\n rc = self.get_control_by_type(rtype=reservation.get_type())\n if rc is not None:\n rc.donate_reservation(reservation=reservation)\n else:\n raise AuthorityException(self.UNSUPPORTED_RESOURCE_TYPE.format(reservation.get_type()))\n\n def bind(self, *, reservation: IAuthorityReservation) -> bool:\n if isinstance(reservation, IBrokerReservation):\n return super().bind(reservation=reservation)\n # Simple for now: make sure that this is a valid term and do not modify\n # its start/end time and add it to the calendar. If the request came\n # after its start time, but before its end cycle, add it for allocation\n # to lastAllocatedCycle + 1. If it came after its end cycle, throw.\n current_cycle = self.actor.get_current_cycle()\n approved = reservation.get_requested_term()\n start = self.clock.cycle(when=approved.get_new_start_time())\n\n if start <= current_cycle:\n end = self.clock.cycle(when=approved.get_end_time())\n if end <= current_cycle:\n self.error(message=\"The request cannot be redeemed: its term has expired\")\n start = current_cycle + 1\n\n self.calendar.add_request(reservation=reservation, cycle=start)\n close = self.get_close(term=reservation.get_requested_term())\n self.calendar.add_closing(reservation=reservation, cycle=close)\n return False\n\n def extend(self, *, reservation: IReservation, resources: ResourceSet, term: Term):\n # Simple for now: make sure that this is a valid term and do not modify\n # its start/end time and add it to the calendar. If the request came\n # after its start time, but before its end cycle, add it for allocation\n # to lastAllocatedCycle + 1. If it came after its end cycle, throw an\n # exception.\n\n if resources is not None and term is not None:\n raise AuthorityException(\"Not implemented\")\n current_cycle = self.actor.get_current_cycle()\n approved = reservation.get_requested_term()\n start = self.clock.cycle(when=approved.get_new_start_time())\n\n if start <= current_cycle:\n end = self.clock.cycle(when=approved.get_end_time())\n if end <= current_cycle:\n self.error(message=\"The request cannot be redeemed: its term has expired\")\n start = current_cycle + 1\n\n self.calendar.remove_closing(reservation=reservation)\n self.calendar.add_request(reservation=reservation, cycle=start)\n close = self.get_close(term=reservation.get_requested_term())\n self.calendar.add_closing(reservation=reservation, cycle=close)\n return True\n\n def correct_deficit(self, *, reservation: IAuthorityReservation):\n if reservation.get_resources() is not None:\n rc = self.get_control_by_type(rtype=reservation.get_resources().get_type())\n if rc is not None:\n self.finish_correct_deficit(rset=rc.correct_deficit(reservation=reservation), reservation=reservation)\n else:\n raise AuthorityException(self.UNSUPPORTED_RESOURCE_TYPE.format(reservation.get_type()))\n\n def close(self, *, reservation: IReservation):\n self.calendar.remove_schedule_or_in_progress(reservation=reservation)\n if reservation.get_type() is not None:\n rc = self.get_control_by_type(rtype=reservation.get_type())\n if rc is not None:\n rc.close(reservation=reservation)\n else:\n raise AuthorityException(self.UNSUPPORTED_RESOURCE_TYPE.format(reservation.get_type()))\n\n def closed(self, *, reservation: IReservation):\n if isinstance(reservation, IAuthorityReservation):\n self.calendar.remove_outlay(reservation=reservation)\n\n def remove(self, *, reservation: IReservation):\n raise AuthorityException(\"Not implemented\")\n\n def finish(self, *, cycle: int):\n super().finish(cycle=cycle)\n self.calendar.tick(cycle=cycle)\n\n def assign(self, *, cycle: int):\n try:\n requests = self.get_requests(cycle=cycle)\n self.map_for_cycle(requests=requests, cycle=cycle)\n except Exception as e:\n self.logger.error(\"error in assign: {}\".format(e))\n\n def map_for_cycle(self, *, requests: ReservationSet, cycle: int):\n \"\"\"\n Orders mapper request processing for this cycle.\n \n @params requests The requests for this cycle\n @params cycle The cycle\n @raises Exception in case of error\n \"\"\"\n if requests is None or cycle == 0:\n # self.logger.debug(\"Authority requests for cycle {} = [none]\".format(cycle))\n return\n\n # self.logger.debug(\"Authority requests for cycle {} = {}\".format(cycle, requests))\n\n self.map_shrinking(bids=requests)\n self.map_growing(bids=requests)\n\n def map_shrinking(self, *, bids: ReservationSet):\n \"\"\"\n Maps reservations that are shrinking or staying the same (extending with\n no flex) in this cycle, and removes them from the bid set.\n \n @param bids set of deferred operations for this cycle (non-null)\n @raises Exception in case of error\n \"\"\"\n # self.logger.debug(\"Processing shrinking requests\")\n rids_to_remove = []\n for reservation in bids.values():\n adjust = reservation.get_deficit()\n if adjust > 0:\n continue\n if not reservation.is_terminal() and reservation.is_extending_lease():\n if adjust < 0:\n self.logger.debug(\"**Shrinking reservation by {}:{}\".format(adjust, reservation))\n else:\n self.logger.debug(\"**Extending reservation (no flex): {}\".format(reservation))\n self.map(reservation=reservation)\n rids_to_remove.append(reservation.get_reservation_id())\n\n for rid in rids_to_remove:\n bids.remove_by_rid(rid=rid)\n\n def map_growing(self, *, bids: ReservationSet):\n \"\"\"\n Maps reservations that are growing in this cycle (redeems or expanding\n extends), and removes them from the bid set.\n \n @param bids set of deferred operations for this cycle (non-null)\n @throws Exception in case of error\n \"\"\"\n # self.logger.debug(\"Processing growing requests\")\n rids_to_remove = []\n for reservation in bids.values():\n if reservation.is_terminal():\n continue\n adjust = reservation.get_deficit()\n\n if adjust > 0:\n if reservation.is_extending_lease():\n self.logger.debug(\"**Growing reservation by {}:{}\".format(adjust, reservation))\n else:\n self.logger.debug(\"**Redeeming reservation by {}:{}\".format(adjust, reservation))\n self.map(reservation=reservation)\n rids_to_remove.append(reservation.get_reservation_id())\n\n for rid in rids_to_remove:\n bids.remove_by_rid(rid=rid)\n\n def map(self, *, reservation: IAuthorityReservation):\n \"\"\"\n Maps a reservation. Indicates we will approve the request: update its\n expire time in the calendar, and issue a map probe. The map probe will\n result in a retry of the mapper request through bind or\n extend above, which will release the request to the\n associated mapper.\n \n @param reservation: the reservation\n @throws Exception in case of error\n \"\"\"\n assigned = self.assign_reservation(reservation=reservation)\n if assigned is not None:\n approved = reservation.get_requested_term()\n reservation.set_approved(term=approved, approved_resources=assigned)\n reservation.set_bid_pending(value=False)\n else:\n if not reservation.is_terminal():\n self.logger.debug(\"Deferring reservation {} for the next cycle: {}\".format(\n reservation, self.actor.get_current_cycle() + 1))\n self.reschedule(reservation=reservation)\n\n def assign_reservation(self, *, reservation: IAuthorityReservation):\n \"\"\"\n Assign resources for the given reservation\n \n @params reservation\n the request\n @returns a set of resources for the request\n @raises Exception in case of error\n \"\"\"\n rc = self.get_control_by_type(rtype=reservation.get_requested_resources().get_type())\n if rc is not None:\n try:\n return rc.assign(reservation=reservation)\n except Exception as e:\n traceback.print_exc()\n self.logger.error(\"Could not assign {}\".format(e))\n return None\n else:\n raise AuthorityException(self.UNSUPPORTED_RESOURCE_TYPE.format(reservation.get_type()))\n\n def configuration_complete(self, *, action: str, token: ConfigToken, out_properties: dict):\n super().configuration_complete(action=action, token=token, out_properties=out_properties)\n rc = self.get_control_by_type(rtype=token.get_resource_type())\n if rc is not None:\n rc.configuration_complete(action=action, token=token, out_properties=out_properties)\n else:\n raise AuthorityException(self.UNSUPPORTED_RESOURCE_TYPE.format(token.get_resource_type()))\n\n def is_expired(self, *, reservation: IReservation) -> bool:\n \"\"\"\n See if a reservation has expired\n \n @params reservation: reservation\n @return true if the reservation expired; otherwise, return false\n @raises Exception in case of error\n \"\"\"\n now = datetime.utcnow()\n end = reservation.get_term().get_end_time()\n\n return now > end\n\n def reschedule(self, *, reservation: IAuthorityReservation):\n \"\"\"\n Reschedule a reservation into the calendar\n \n @param reservation the reservation\n \"\"\"\n self.calendar.remove(reservation=reservation)\n self.calendar.add_request(reservation=reservation, cycle=self.actor.get_current_cycle() + 1)\n\n def get_close(self, *, term: Term) -> int:\n \"\"\"\n Return the cycle when a term closes\n \n @params term: the term\n @returns the cycle of the end of a term\n \"\"\"\n if self.lazy_close:\n return -1\n else:\n return self.clock.cycle(when=term.get_end_time()) + 1\n\n def get_closing(self, *, cycle: int) -> ReservationSet:\n return self.calendar.get_closing(cycle=cycle)\n\n def get_requests(self, *, cycle: int) -> ReservationSet:\n return self.calendar.get_requests(cycle=cycle)\n\n def get_control_by_id(self, *, guid: ID) -> IResourceControl:\n return self.controls_by_guid.get(guid, None)\n\n def get_control_by_type(self, *, rtype: ResourceType) -> IResourceControl:\n return self.controls_by_resource_type.get(rtype, None)\n\n def get_control_types(self):\n \"\"\"\n Returns a reverse map of resource control to resource types. The table is\n indexed by the resource control object and each entry is a linked list of\n resource types.\n \n @returns a table of all of the different control types\n \"\"\"\n result = {}\n for key, value in self.controls_by_resource_type.items():\n if value not in result:\n result[value] = []\n result[value].append(key)\n return result\n\n def register_control(self, *, control: IResourceControl):\n \"\"\"\n Registers the given control for the specified resource type. If the\n policy plugin has already been initialized, the control should be\n initialized.\n \n @param control: the control\n @raises ConfigurationException in case of error\n \"\"\"\n self.register_control_types(control=control)\n self.controls_by_guid[control.get_guid()] = control\n\n def register_control_types(self, *, control: IResourceControl):\n types = control.get_types()\n if types is None or len(types) == 0:\n raise AuthorityException(\"Resource control does not specify any types\")\n for t in types:\n if t is None:\n raise AuthorityException(\"Invalid resource type specified\")\n\n index = 0\n try:\n for rtype in types:\n if rtype in self.controls_by_resource_type:\n raise AuthorityException(\"There is already a control associated with resource type {}\".format(rtype))\n self.controls_by_resource_type[rtype] = control\n index += 1\n except Exception as e:\n j = 0\n for t in types:\n if t in self.controls_by_resource_type:\n self.controls_by_resource_type.pop(t)\n j += 1\n if j == index:\n break\n raise e\n","sub_path":"fabric/actor/core/policy/authority_calendar_policy.py","file_name":"authority_calendar_policy.py","file_ext":"py","file_size_in_byte":20597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"156423533","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\ncreated by me for task2\n\"\"\"\nimport sys\n\n\ndef sum_function(n):\n if n == 0:\n return 0\n return n + sum_function(n - 1)\n\n\ndef main():\n var1 = sum_function(1000)\n print(var1)\n\nif __name__ == '__main__':\n sys.setrecursionlimit(1500)\n main()\n","sub_path":"answers/mforoozani1/Task2.py","file_name":"Task2.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"87340160","text":"from flask import (\n Blueprint,\n session,\n g,\n render_template,\n redirect,\n url_for,\n request,\n)\n\nfrom .index import dao\nfrom .dao import Series\n\nbp_profile = Blueprint('profile', __name__)\n\n\n@bp_profile.before_request\ndef before_request():\n g.user = None\n if \"user\" in session:\n g.user = session[\"user\"]\n\n\n@bp_profile.route(\"/user\", methods=[\"GET\"])\ndef user():\n if g.user:\n # fetch user-series & user-seasons\n series_ = dao.list_user_series(name=g.user)\n seasons_ = dao.list_user_seasons(name=g.user)\n return render_template(\"user.html\", user=g.user, user_series=series_, user_seasons=seasons_)\n\n return redirect(url_for(\"index.index\"))\n\n\n@bp_profile.route(\"/user_series/\", methods=[\"POST\"])\ndef user_series(op):\n\n if op == \"insert\":\n imdb_id = request.form[\"imdb_id\"]\n title = request.form[\"title\"]\n poster = request.form[\"poster\"]\n\n dao.insert_user_series(g.user, imdb_id, title, poster)\n _, seasons = Series.select_seasons(title)\n dao.insert_user_season(g.user, imdb_id, title, seasons)\n\n elif op == \"delete\":\n imdb_id = request.form[\"imdb_id\"]\n title = request.form[\"title\"]\n\n dao.delete_user_series(g.user, imdb_id)\n dao.delete_user_season(g.user, title)\n\n elif op == \"update\":\n series_id = request.form[\"series_id\"]\n ep_ids = request.form.getlist('checks')\n dao.update_user_season(g.user, series_id, ep_ids)\n\n elif op == \"rate\":\n imdb_id = request.form[\"imdb_id\"]\n rate = request.form[\"rate\"]\n dao.update_user_series(g.user, imdb_id, rate)\n\n # redirect to user page\n return redirect(url_for(\"profile.user\"))\n","sub_path":"app/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"479305139","text":"import sys\nimport json\nimport requests as req\n\n\nclass MailGunSettings:\n BASE_URL = ('https://api.mailgun.net/v3/mg.guscavalcante.com')\n API_KEY = ('key-e8647f875a06b993cd8ce03f36fbda52')\n\n\nclass SendMailgun:\n def __init__(self):\n self.data = []\n \n @staticmethod\n def send_simple_text(self, email=\"\", subjetc=\"\", text=\"\"):\n url = MailGunSettings.BASE_URL\n key = MailGunSettings.API_KEY\n\n try:\n rpost = req.post(\n url,\n auth=('api', key),\n data={\n 'from': 'postmaster@mg.guscavalcante.com',\n 'to': email,\n 'subjetc': subjetc,\n 'text': text\n }\n )\n print('Email sent')\n\n except error as e:\n print(e) \n\n\nmg = SendMailgun()\nmg.send_simple_text(\"gustavo.bionic@gmail.com\", \"TestMailGun\",\"Testing MailGunnnnnn\")","sub_path":"mail/mailgun.py","file_name":"mailgun.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"40595754","text":"## A program that computes the real roots of a quadratic function using the\n# discriminant(sqrt(b**2-4ac)) to evaluate the number of roots. -ve = no real\n# roots, 0 = one real root and +ve = two real roots. ##\n\nfrom math import *\n# prompting user input\na = float(input(\"Please enter an \\\"a\\\" value for ax**2 + bx + c: \"))\nb = float(input(\"Please enter an \\\"b\\\" value for ax**2 + bx + c: \"))\nc = float(input(\"Please enter an \\\"c\\\" value for ax**2 + bx + c: \"))\n# defining the descriminant\ndisc = (b**2)-4*a*c\n# conditions for discriminant followed by applicable calculation and output\n# display\nif disc == 0:\n root = -b/2*a\n print(\"There is one real root for this equation and it is\", root)\nelif disc > 0:\n root1 = (-b+sqrt(disc))/2*a\n root2 = (-b-sqrt(disc))/2*a\n print(\"There are two real roots for this equation and they are\", root1, \"and\", root2)\nelse:\n print(\"There are no real roots\")\n","sub_path":"2 if statement exercises/Ex_50.py","file_name":"Ex_50.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"160632803","text":"import scrapy\nimport json\nimport os\n\nclass products(scrapy.Spider):\n name = \"products\"\n def start_requests(self):\n domList = []\n if os.path.exists('branch_info'):\n for file in os.listdir('branch_info'):\n f = open('branch_info/' + file, 'r')\n jsontext = f.read()\n f.close()\n category = json.loads(jsontext)\n cookie = category['cookies']\n specials = category['specials']\n cookieStr = ''\n for key in cookie:\n cookieStr = cookieStr + key + '=' + cookie[key] + ';'\n\n items = category['item']\n for item in items:\n url = item['url']\n label = item['label']\n headers = {'X-Requested-With': 'OnlineShopping.WebApp', 'Host': 'shop.countdown.co.nz',\n 'content-type': 'application/json','cookie' : cookieStr}\n url = 'https://xxxx.xxxxxxxx.com/api/v1/products?dasFilter=Department%3B%3B'+url+'%3Bfalse&nextUI=true&target=browse'\n domList.append({'url':url,'cookie':cookie,'name':file,'page':1, 'label': label,'header' : headers,'status':'browse'})\n for item in specials:\n url = item['url']\n label = item['label']\n headers = {'X-Requested-With': 'OnlineShopping.WebApp', 'Host': 'shop.countdown.co.nz',\n 'content-type': 'application/json','cookie' : cookieStr}\n url = 'https://xxxx.xxxxxxxx.com/api/v1/products?dasFilter=Department%3B%3B'+url+'%3Bfalse&nextUI=true&target=specials'\n domList.append({'url':url,'cookie':cookie,'name':file,'page':1, 'label': label,'header' : headers,'status':'specials'})\n for dom in domList:\n yield scrapy.Request(url=dom['url'], callback=self.parse1, headers=dom['header'], cookies=dom['cookie'],\n meta={'dom': dom}, dont_filter=True)\n def parse1(self,response):\n dom = response.meta['dom']\n products = json.loads(response.body)\n currentPageSize = products['currentPageSize']\n print()\n for i in range(1,currentPageSize + 1):\n # for i in range(1,2):\n dom['page'] = i\n ddd = {'dom':{'url':dom['url'],'cookie':dom['cookie'],'name':dom['name'],'page':i, 'label': dom['label'],'header':dom['header'],'status':dom['status']}}\n yield scrapy.Request(url=dom['url'] + \"&page=\" + str(i), callback=self.parse2, headers=dom['header'],\n meta=ddd, dont_filter=True, cookies=dom['cookie'])\n\n def parse2(self, response):\n dom = response.meta['dom']\n products = json.loads(response.body)\n p = products['products']['items']\n\n # print(products['context']['fulfilment']['address'])\n # for product in products['products']['items']:\n # sku = product['sku']\n # dom['sku'] = sku\n # yield scrapy.Request(url= 'https://xxxx.xxxxxxxx.com/api/v1/products/' + sku, callback=self.parse3, cookies=dom['cookie'],headers=dom['header'],meta = {\"dom\":dom}, dont_filter=True)\n leibie = ''\n for lb in products['breadcrumb']:\n try:\n leibie = leibie + dom['status'] + ', ' + products['breadcrumb'][lb]['name']\n except Exception as e:\n leibie += ''\n fdName = products['context']['fulfilment']['address']\n parentFiles = 'product/'\n if not os.path.exists(parentFiles):\n os.mkdir(parentFiles)\n parentFiles = parentFiles + '/' + fdName\n if not os.path.exists(parentFiles):\n os.mkdir(parentFiles)\n parentFiles = parentFiles + '/' + dom['status']\n if not os.path.exists(parentFiles):\n os.mkdir(parentFiles)\n parentFiles = parentFiles + '/' + dom['label']\n if not os.path.exists(parentFiles):\n os.mkdir(parentFiles)\n\n if os.path.exists(parentFiles + '/' + str(dom['page']) + '.json'):\n os.remove(parentFiles + '/' + str(dom['page']) + '.json')\n p1 = []\n\n for product in p:\n p1.append({\n\n 'name': product['name'],\n 'price': product['price']['salePrice'],\n 'fdName': fdName,\n 'leibie': dom['status'],\n 'cat': dom['label'],\n 'images': product['images']['big'],\n 'info': 'https://xxxx.xxxxxxxx.com/api/v1/products' + product['sku'],\n 'url': 'https://xxxx.xxxxxxxx.com/shop/productdetails?stockcode=' + product['sku']\n\n })\n f = open(parentFiles + '/' + str(dom['page']) + '.json', 'a')\n f.write(json.dumps(p1))\n f.close()\n","sub_path":"minespiders/minespider/spiders/products.py","file_name":"products.py","file_ext":"py","file_size_in_byte":4865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"296045999","text":"from .bv import BitVector\nfrom .config import config\n\n__all__ = ['PE']\n\nDATAWIDTH = 16\n\nCONST = 0\nVALID = 1\nBYPASS = 2\nDELAY = 3\n\nBITZERO = BitVector(0, num_bits=1)\nZERO = BitVector(0, num_bits=DATAWIDTH)\n\n\ndef msb(value):\n return value[-1]\n\n\ndef signed(value):\n return BitVector(value._value, value.num_bits, signed=True)\n\n\nclass Register:\n\n def __init__(self, mode, init, width):\n self.mode = mode\n self.value = BitVector(init, num_bits=width)\n self.width = width\n\n @property\n def const(self):\n return self.mode == CONST\n\n def __call__(self, value):\n if not isinstance(value, BitVector):\n value = BitVector(value, self.width)\n\n retvalue = value\n if self.mode == DELAY:\n self.value = value\n elif self.mode == CONST:\n retvalue = self.value\n return retvalue\n\n\nclass ALU:\n\n def __init__(self, op, opcode, width, signed=False, double=False):\n self.op = op\n self.signed = signed\n self.double = double\n self.opcode = opcode\n self.width = width\n self._carry = False\n\n def __call__(self, a=0, b=0, c=0, d=0):\n a = BitVector(a, self.width, self.signed)\n b = BitVector(b, self.width, self.signed)\n c = BitVector(c, self.width, self.signed)\n d = BitVector(d, self.width, self.signed)\n res = self.op(a, b, c, d)\n if self._carry:\n res_p = BitVector(a.as_int() + b.as_int() >= (2 ** self.width), 1)\n return res, res_p\n return res\n\n\n def carry(self):\n self._carry = True\n\n\nclass COND:\n\n def __init__(self, cond, signed=False):\n self.cond = cond\n self.signed = signed\n\n def __call__(self, a, b, res):\n return_vals = self.compare(a, b, res)\n return self.cond(*return_vals)\n\n def compare(self, a, b, res):\n eq = a == b\n eq = eq.as_int()\n a_msb = msb(a)\n b_msb = msb(b)\n c_msb = msb(res)\n if self.signed:\n ge = int((~(a_msb ^ b_msb) & ~c_msb) | (~a_msb & b_msb)) & 1\n le = int((~(a_msb ^ b_msb) & c_msb) | (a_msb & ~b_msb) | eq) & 1\n else:\n ge = int((~(a_msb ^ b_msb) & ~c_msb) | (a_msb & ~b_msb)) & 1\n le = int((~(a_msb ^ b_msb) & c_msb) | (~a_msb & b_msb) | eq) & 1\n return BitVector(ge, num_bits=1), \\\n BitVector(eq, num_bits=1), \\\n BitVector(le, num_bits=1), \\\n\n\nclass PE:\n\n def __init__(self, opcode, alu=None, signed=0):\n self.alu(opcode, signed, alu)\n self.cond()\n self.reg()\n self.place()\n\n def __call__(self, a, b=0, c=0, d=0, e=0, f=0):\n\n ra = self.RegA(a)\n rb = self.RegB(b)\n rc = self.RegC(c)\n rd = self.RegD(d)\n re = self.RegE(e)\n rf = self.RegF(f)\n\n res = ZERO\n res_p = BITZERO\n\n if self._add:\n add = self._add(ra, rb, rc, rd)\n\n if self._alu:\n res = self._alu(ra, rb, rc, rd)\n if isinstance(res, tuple):\n res, res_p = res[0], res[1]\n\n if self._cond:\n res_p = self._cond(ra, rb, res)\n\n return res.as_int(), res_p.as_int() if isinstance(res_p, BitVector) else res_p\n\n def alu(self, opcode, signed, _alu):\n self.opcode = config('0000000l0dsoooooo', o=opcode, s=signed)\n self.signed = signed\n self._alu = ALU(_alu, opcode, DATAWIDTH, signed=signed)\n return self\n\n def add(self, _add=None):\n self._add = _add\n return self\n\n def carry(self):\n self._alu.carry()\n return self\n\n def cond(self, _cond=None):\n self._add = None\n self._cond = None\n if _cond:\n self.add(lambda a, b, c, d: a+b if _cond else None)\n self._cond = COND(_cond, self.signed)\n return self\n\n def reg(self):\n self.regcode = 0\n self.rega()\n self.regb()\n self.regc()\n self.regd()\n self.rege()\n self.regf()\n return self\n\n def rega(self, regmode=BYPASS, regvalue=0):\n self.RegA = Register(regmode, regvalue, DATAWIDTH)\n self.raconst = regvalue\n self.regcode &= ~(3 << 0)\n self.regcode |= config('aa', a=regmode)\n return self\n\n def regb(self, regmode=BYPASS, regvalue=0):\n self.RegB = Register(regmode, regvalue, DATAWIDTH)\n self.rbconst = regvalue\n self.regcode &= ~(3 << 2)\n self.regcode |= config('aa', a=regmode) << 2\n return self\n\n def regc(self, regmode=BYPASS, regvalue=0):\n self.RegC = Register(regmode, regvalue, DATAWIDTH)\n self.rcconst = regvalue\n self.regcode &= ~(3 << 4)\n self.regcode |= config('aa', a=regmode) << 4\n return self\n\n def regd(self, regmode=BYPASS, regvalue=0):\n self.RegD = Register(regmode, regvalue, 1)\n self.rdconst = regvalue\n self.regcode &= ~(3 << 8)\n self.regcode |= config('aa', a=regmode) << 8\n return self\n\n def rege(self, regmode=BYPASS, regvalue=0):\n self.RegE = Register(regmode, regvalue, 1)\n self.reconst = regvalue\n self.regcode &= ~(3 << 10)\n self.regcode |= config('aa', a=regmode) << 10\n return self\n\n def regf(self, regmode=BYPASS, regvalue=0):\n self.RegF = Register(regmode, regvalue, 1)\n self.rfconst = regvalue\n self.regcode &= ~(3 << 12)\n self.regcode |= config('aa', a=regmode) << 12\n return self\n\n def lut(self, _lut=None):\n self.lut = _lut\n if self.lut:\n self.opcode |= 1 << 9\n else:\n self.opcode &= ~(1 << 9)\n return self\n\n def dual(self):\n self.opcode |= 1 << 7\n return self\n\n def place(self, x=None, y=None):\n self.x = x\n self.y = y\n return self\n","sub_path":"pe/pe.py","file_name":"pe.py","file_ext":"py","file_size_in_byte":5840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"223434131","text":"from django.conf.urls.defaults import *\nimport settings\nimport views\nimport feeds\n\nurlpatterns = patterns('',\n url(r'^$',views.home,name=\"home\"),\n \n url(r'^story/latest/feed',feeds.LatestFeed(),name=\"latest_feed\"),\n \n url(r'^story/top_rated/feed',feeds.TopRatedFeed(),name=\"top_rated_feed\"),\n \n url(r'^story/(?P\\d+)/read',views.read_story,name=\"read_story\"),\n \n url(r'^story/(?P\\d+)/download$',views.download_story,name=\"download_story\"),\n \n url(r'^story/(?P\\d+)/download_html$',views.download_story_html,name=\"download_story_html\"),\n \n url(r'^story/(?P\\d+)/edit',views.edit_story,name=\"edit_story\"),\n \n url(r'^story/create',views.create_story,name=\"create_story\"),\n \n url(r'^story/random',views.random_story,name=\"random_story\"),\n \n url(r'^story/(?P\\d+)/rate',views.rate_story,name=\"rate_story\"),\n \n url(r'^archive/$',views.story_archive,name=\"story_archive\"),\n \n url(r'^about/$',views.about,name=\"about\"),\n \n url(r'^search',views.story_archive,name=\"search\"),\n \n url(r'^author/(?P\\d+)/view',views.view_author,name=\"view_author\"),\n \n url(r'^author/(?P\\d+)/feed',feeds.AuthorFeed(),name=\"author_feed\"),\n \n url(r'^author/edit_profile', views.edit_profile,name=\"edit_profile\"),\n \n url(r'^author/dashboard', views.author_dashboard,name=\"author_dashboard\"),\n \n url(r'^genre/(?P\\d+)/view',views.view_genre, name=\"view_genre\"),\n \n url(r'^genre/(?P\\d+)/feed',feeds.GenreFeed(), name=\"genre_feed\"),\n \n url(r'facebook/test', views.facebook_test),\n\n url(r'^competitions', views.list_competitions, name=\"competitions\"),\n \n \n)\n\n#Use django to serve static files during development\nurlpatterns += patterns('django.views.static',\n(r'^media/(?P.*)$', \n 'serve', {\n 'document_root': settings.MEDIA_ROOT,\n 'show_indexes': True }),)\n","sub_path":"stories/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"638100471","text":"#Author: HiruNya\n#Modified by: 31ank\n\nfrom yaml import load\nfrom enum import Enum\nfrom typing import List\nfrom sys import exit as sys_exit\n\nDEFAULT_HH_COLOUR: int = 16711680\nDEFAULT_HA_COLOUR: int = 16711935\n\n\nclass Config:\n def __init__(self, path: str) -> None:\n with open(path, mode=\"r\") as file:\n data = load(file)\n with open(\"channels.yaml\", encoding='utf-8') as file:\n channels = load(file)\n self.token: str = str(data.get(\"token\", \"\"))\n if self.token == \"\":\n print('\"token\" is a required field')\n sys_exit()\n self.channels: List[str] = [str(i) for i in channels[\"channels\"]]\n if len(self.channels) == 0:\n print('\"channels\" must have at least one item')\n sys_exit()\n self.posts: int = int(data.get(\"posts\", 3))\n self.message: str = str(data.get(\"message\", \"New Hentai Release\"))\n self.hentai_haven: HentaiHavenConfig = HentaiHavenConfig(self, data.get(\"hentai_haven\", {}))\n self.hanime: HAnimeConfig = HAnimeConfig(self, data.get(\"hanime\", {}))\n self.reddit: List[RedditConfig] = [RedditConfig(self, name, item) for (name, item) in data.get(\"reddit\", {}).items()]\n self.redditporn: List[RedditPornConfig] = [RedditPornConfig(self,name,item) for (name, item) in data.get(\"pornreddit\", {}).items()]\n\n\nclass HentaiHavenConfig:\n def __init__(self, parent: Config, data) -> None:\n self.enabled: bool = data.get(\"enabled\", False)\n self.posts: int = data.get(\"posts\", parent.posts)\n if self.posts == 0:\n self.posts = parent.posts\n with open(\"channels.yaml\", encoding='utf-8') as file:\n hentaihaven = load(file)\n self.channels: List[str] = [str(i) for i in hentaihaven[\"channels\"]]\n if len(self.channels) == 0:\n self.channels = parent.channels\n self.embed_colour: int = parse_colour(data.get(\"embed_colour\", DEFAULT_HH_COLOUR), DEFAULT_HH_COLOUR) # Default: Red\n self.black_list: List[str] = data.get(\"blacklist\", [])\n self.message: str = parent.message\n\n\nclass HAnimeConfig:\n def __init__(self, parent: Config, data) -> None:\n self.enabled: bool = data.get(\"enabled\", False)\n self.posts: int = data.get(\"posts\", parent.posts)\n self.message: str = parent.message\n if self.posts == 0:\n self.posts = parent.posts\n with open(\"channels.yaml\", encoding='utf-8') as file:\n hanime = load(file)\n self.channels: List[str] = [str(i) for i in hanime[\"channels\"]]\n if len(self.channels) == 0:\n self.channels = parent.channels\n self.embed_colour: int = parse_colour(data.get(\"embed_colour\", DEFAULT_HA_COLOUR), DEFAULT_HA_COLOUR) # Default: Purple\n self.section: Section = section_from_str(data.get(\"section\", \"\"), Section.RECENT_UPLOADS)\n\n\nclass RedditConfig:\n def __init__(self, parent: Config, name: str, data) -> None:\n self.name: str = name\n self.posts: int = data.get(\"posts\", parent.posts)\n with open(\"channels.yaml\", encoding='utf-8') as file:\n reddit = load(file)\n self.channels: List[str] = [str(i) for i in reddit[\"channels\"]]\n if len(self.channels) == 0:\n self.channels = parent.channels\n\nclass RedditPornConfig:\n def __init__(self, parent: Config, name: str, data) -> None:\n self.name: str = name\n self.posts: int = data.get(\"posts\", parent.posts)\n with open(\"channels.yaml\", encoding='utf-8') as file:\n redditporn = load(file)\n self.channels: List[str] = [str(i) for i in redditporn[\"pornchannels\"]]\n if len(self.channels) == 0:\n self.channels = parent.channels\n\n\ndef parse_colour(data, default: int) -> int:\n col: int = default\n try:\n col = int(data)\n except ValueError:\n try:\n col = int(data, 16)\n except ValueError:\n pass\n if col < 0:\n col = default\n return col\n\n\nclass Section(Enum):\n RECENT_UPLOADS = 0\n NEW_RELEASES = 1\n TRENDING = 2\n RANDOM = 3\n\n def __int__(self) -> int:\n return self.value\n\n\ndef section_from_str(text: str, default: Section) -> Section:\n text = text.lower()\n section: Section = default\n if text == \"recent_uploads\":\n section = Section.RECENT_UPLOADS\n elif text == \"new releases\":\n section = Section.NEW_RELEASES\n elif text == \"trending\":\n section = Section.TRENDING\n elif text == \"random\":\n section = Section.RANDOM\n return section\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"311399251","text":"from modeling import BertForPreTraining, BertConfig\nfrom torch.utils.data import DataLoader, RandomSampler \nfrom run_pretraining import pretraining_dataset\n\n\ndataset = pretraining_dataset('test_file.h5',100) \ntrain_sampler = RandomSampler(dataset)\ntrain_dataloader = DataLoader(dataset, sampler=train_sampler,batch_size=32,num_workers=8, pin_memory=True)\n\n \nconfig=BertConfig(30522)\nmodel=BertForPreTraining(config).cuda()\n\nbatch=next(iter(train_dataloader))\nbatch=[data.cuda() for data in batch]\n\ninput_ids, tag_ids, segment_ids, input_mask, masked_lm_labels, masked_lm_tags, next_sentence_labels = batch\n\nloss = model(input_ids=input_ids, tag_ids=tag_ids, token_type_ids=segment_ids, attention_mask=input_mask,\n masked_lm_labels=masked_lm_labels, next_sentence_label=next_sentence_labels)\n\nprint(loss)\n","sub_path":"PyTorch/LanguageModeling/BERT/scratch/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"1628048","text":"import numpy as np\nimport math\n\ndef NB_param(feature_matrix, y, f_type, dictionary, m_est=0):\n\t\"\"\"\n\tusing naive bayesian to get the k(n_max_bin) + 1 parameters\n\n\tArgs:\n\t\tfeature_matrix - the feature matrix, 2D numpy array of size (num_examples, num_instances, )\n\t\ty - label, 1D numpy array of size (num_instances, )\n\t\tkbin - the number of bins for processing continuous variable,\n\t\t\tinteger, minimum value of 2\n\t\tm_est - use of the m estimation: 0 for MLE while -1 for laplace smooth\n\tReturns:\n\t\tparam_matrix: parameter matrix, 2D numpy array of size ( n_feature, n_max_bin)\n\n\t\"\"\"\n\t# initialize positive label conditional probability matrix\n\ty1matrix = np.full((dictionary.shape[0], dictionary.shape[1]), 0.0)\n\t# initialize negative label conditional probability matrix\n\ty0matrix = np.full((dictionary.shape[0], dictionary.shape[1]), 0.0)\n\t# calculate the probability of a positive label\n\tlabels, counts = np.unique(y, return_counts=True)\n\tpy = counts[0]/np.sum(counts)\n\tny = counts[1]/np.sum(counts)\n\t# calculate the probability of a negative label\n\tfor f in range(feature_matrix.shape[1]):\n\t\tcombine_mat = np.vstack([feature_matrix[:, f], y]).T\n\t\t# sort the combined matrix by the value of feature\n\t\tmat = combine_mat[combine_mat[:, 0].argsort()]\n\t\tif f_type[f] == 1:\n\t\t\tbin_vec = mat[:,0].astype(int)\n\t\t\tc_xi = np.bincount(bin_vec)\n\t\telse:\n\t\t\t# count the appearance of value in the feature\n\t\t\tu_xi, c_xi = np.unique(mat[:, 0], return_counts=True)\n\t\t\tdictionary[f, :u_xi.shape[0]] = u_xi\n\n\t\t# split the combined matrix by the feature value\n\t\t# These are the labels of each unique value in the feature vector\n\t\tlists_xi = np.split(mat[:, 1], np.cumsum(c_xi))[:-1]\n\t\tynum_1 = np.array([np.sum(x) for x in lists_xi])\n\t\tshapevec = np.array([x.shape[0] for x in lists_xi])\n\t\tynum_0 = shapevec - ynum_1\n\n\t\t# calculate the probability of positive labeled sample in each split and\n\t\t\n\t\tl_m = len(c_xi)\n\t\tif m_est == -1:\n\t\t\tpf_1 = (ynum_1 + 1)/(counts[0] + l_m)\n\t\t\tpf_0 = (ynum_0 + 1)/(counts[1] + l_m)\n\t\telif m_est == 0:\n\t\t\tpf_1 = (ynum_1) / (counts[0])\n\t\t\tpf_0 = (ynum_0) / (counts[1])\n\t\telse:\n\t\t\tpf_1 = (ynum_1 + m_est/l_m) / (counts[0] + m_est)\n\t\t\tpf_0 = (ynum_0 + m_est/l_m) / (counts[1] + m_est)\n\t\t\n\t\t# calculate the probability of negative labeled sample in each split\n\t\tlog_pf_1 = list(map(lambda x: math.log10(x) if x else -10e6, pf_1))\n\t\tlog_pf_0 = list(map(lambda x: math.log10(x) if x else -10e6, pf_0))\n\n\t\tlog_py = math.log10(py) if py else -10e6\n\t\tlog_ny = math.log10(ny) if ny else -10e6\n\n\t\tn_val = len(log_pf_1)\n\t\ty1matrix[f, 0:n_val] = log_pf_1\n\t\ty0matrix[f, 0:n_val] = log_pf_0\n\t\t\n\treturn y1matrix, y0matrix, log_py, log_ny\n\n\n\n","sub_path":"P2/nblib/nbparam.py","file_name":"nbparam.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"629616640","text":"from glob import glob\nimport h5py\nimport numpy as np\nfrom random import randint\nfrom tensorflow.keras.utils import Sequence\nimport tensorflow as tf\nfrom math import ceil\n\n\nclass VolumeGenerator(Sequence):\n def __init__(self, batch_size, sample_shape=(364, 364, 160),\n file_path='t', shuffle_order=True,\n normalise_input=True, remove_outliers=True,\n transform_angle=False, transform_position=False,\n get_slice=False, get_position=False, skip_empty=True,\n examples_per_load=1, train_debug=False):\n self.batch_size = batch_size\n self.sample_shape = sample_shape\n self.data_paths = VolumeGenerator.get_paths(file_path)\n self.shuffle_order = shuffle_order\n self.normalise_input = normalise_input\n self.remove_outliers = remove_outliers\n self.transform_angle = transform_angle\n self.transform_position = transform_position\n self.get_slice = get_slice\n self.get_position = get_position\n self.skip_empty = skip_empty\n self.examples_per_load = examples_per_load\n self.train_debug = train_debug\n\n if self.train_debug:\n cut = int(len(self.data_paths) / 5)\n self.data_paths = self.data_paths[:cut]\n\n assert self.batch_size <= len(self.data_paths), f\"Batch size {self.batch_size} must be less than or equal to number of training examples {len(self.data_paths)}\"\n self.on_epoch_end()\n\n def on_epoch_end(self):\n self.indexes = np.arange(len(self.data_paths))\n if self.shuffle_order:\n np.random.shuffle(self.indexes)\n\n def __len__(self):\n return ceil(len(self.data_paths) / self.batch_size)\n\n def __getitem__(self, index):\n indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]\n batch = [self.data_paths[idx] for idx in indexes]\n x, y = self.generate_batch(batch)\n return x, y\n\n def generate_batch(self, batch, skip_fail=3):\n x_train, y_train = [], []\n if self.get_position:\n image_arr, pos_arr = [], []\n for sample_path in batch:\n count = self.examples_per_load\n skip_count = skip_fail\n x_path, y_path = sample_path\n\n volume_x_original = VolumeGenerator.load_file(x_path)\n volume_y_original = VolumeGenerator.load_file(y_path)\n\n while count > 0:\n sample_pos, sample_pos_max = VolumeGenerator.get_sample_pos(volume_x_original.shape, self.sample_shape,\n self.transform_position)\n\n volume_x = VolumeGenerator.sample_from_volume(volume_x_original, self.sample_shape, sample_pos)\n volume_y = VolumeGenerator.sample_from_volume(volume_y_original, self.sample_shape, sample_pos)\n volume_y = np.any(volume_y, axis=-1)\n\n if self.normalise_input or self.remove_outliers:\n mean = tf.math.reduce_mean(volume_x)\n if self.remove_outliers:\n np.clip(volume_x, None, 0.01, volume_x)\n if self.normalise_input:\n volume_x = VolumeGenerator.normalise(volume_x, mean)\n\n volume_x = VolumeGenerator.expand_dim_as_float(volume_x)\n volume_y = VolumeGenerator.expand_dim_as_float(volume_y)\n\n if self.get_slice:\n slice_idx = int((self.sample_shape[2] + 1) / 2) - 1\n assert slice_idx >= 0\n volume_y = volume_y[:, :, slice_idx]\n\n if self.skip_empty:\n if np.sum(volume_y) == 0:\n skip_count -= 1\n if skip_count > 0:\n continue\n\n if self.get_position:\n image_arr.append(volume_x)\n pos = np.empty(3, dtype=np.float32)\n for i in range(3):\n pos[i] = VolumeGenerator.normalise_position(sample_pos[i], sample_pos_max[i])\n pos_arr.append(pos)\n else:\n x_train.append(volume_x)\n y_train.append(volume_y)\n count -= 1\n\n if self.get_position:\n image_arr = np.stack(image_arr, axis=0)\n pos_arr = np.stack(pos_arr, axis=0)\n x_train = [image_arr, pos_arr]\n else:\n x_train = np.stack(x_train, axis=0)\n y_train = np.stack(y_train, axis=0)\n return x_train, y_train\n\n @staticmethod\n def get_sample_pos(volume_shape, sample_shape, transform_position):\n \"\"\"\n - Get the position required to translate the volumes by. Ranges from 0 to volume_shape - sample_shape\n - If (volume_shape - sample_shape) == 0, sample and volume same shape. Also the position is centred.\n \"\"\"\n vol_x, vol_y, vol_z = volume_shape[0] - 1, volume_shape[1] - 1, volume_shape[2] - 1\n samp_x, samp_y, samp_z = sample_shape[0] - 1, sample_shape[1] - 1, sample_shape[2] - 1\n centre_x = int(vol_x / 2) - int(samp_x / 2)\n centre_y = int(vol_y / 2) - int(samp_y / 2)\n centre_z = int(vol_z / 2) - int(samp_z / 2)\n x_max = volume_shape[0] - sample_shape[0]\n y_max = volume_shape[1] - sample_shape[1]\n z_max = volume_shape[2] - sample_shape[2]\n pos_max = np.array([x_max, y_max, z_max], dtype=np.int32)\n pos = None\n if transform_position == \"normal\":\n stddev_x = int(centre_x / 4)\n stddev_y = int(centre_y / 4)\n stddev_z = int(centre_z / 4)\n x_pos = np.random.normal(centre_x, stddev_x)\n y_pos = np.random.normal(centre_y, stddev_y)\n z_pos = np.random.normal(centre_z, stddev_z)\n float_pos = np.array([x_pos, y_pos, z_pos], dtype=np.float32)\n float_pos = np.clip(float_pos, 0, [x_max, y_max, z_max])\n pos = np.rint(float_pos)\n elif transform_position == \"uniform\":\n x_pos = np.random.uniform(0, x_max)\n y_pos = np.random.uniform(0, y_max)\n z_pos = np.random.uniform(0, z_max)\n float_pos = np.array([x_pos, y_pos, z_pos], dtype=np.float32)\n pos = np.rint(float_pos)\n else:\n x_pos = centre_x\n y_pos = centre_y\n z_pos = centre_z\n pos = np.array([x_pos, y_pos, z_pos], dtype=np.int32)\n pos = pos.astype(int)\n return pos, pos_max\n\n @staticmethod\n def get_paths(file_path):\n if file_path == \"t\":\n file_path = \"./Data/train/train\"\n elif file_path == \"v\":\n file_path = \"./Data/valid/valid\"\n X_list = glob(f'{file_path}*.im')\n Y_list = glob(f'{file_path}*.seg')\n data_paths = []\n for x_name in X_list:\n x_id = x_name[-10:-3]\n y_name = f'{file_path}_{x_id}.seg'\n assert y_name in Y_list, \"{y_name} is missing in the data file\"\n data_paths.append([x_name, y_name])\n return data_paths\n\n @staticmethod\n def load_file(file):\n with h5py.File(file, 'r') as hf:\n volume = np.array(hf['data'])\n return volume\n\n @staticmethod\n def sample_from_volume(volume, sample_shape, sample_pos):\n pos_x, pos_y, pos_z = sample_pos\n volume_sample = volume[pos_x: pos_x + sample_shape[0],\n pos_y: pos_y + sample_shape[1],\n pos_z: pos_z + sample_shape[2]]\n return volume_sample\n\n @staticmethod\n def normalise(x_image, mean=None, std=None):\n if mean is None:\n mean = tf.math.reduce_mean(x_image)\n if std is None:\n std = tf.math.reduce_std(x_image)\n return (x_image - mean) / std\n\n @staticmethod\n def expand_dim_as_float(volume):\n return np.expand_dims(volume, axis=-1).astype(np.float32)\n\n @staticmethod\n def normalise_position(pos, pos_max):\n \"\"\"\n - Recieved the pos which is a value from 0 to (length - sample size)\n - A value scaled between -1 and 1 where 0 represents a sample from the centre.\n \"\"\"\n if pos_max == 0:\n return 0\n return 2 * ((pos / pos_max) - 0.5)\n\n\nif __name__ == \"__main__\":\n import sys\n import os\n sys.path.insert(0, os.getcwd())\n\n add_pos = True\n vol_gen = VolumeGenerator(1, (384, 384, 128), get_position=add_pos, examples_per_load=1)\n x, y = vol_gen.__getitem__(0)\n if add_pos:\n print(x[0].shape)\n print(x[1].shape)\n print(y.shape)\n print(y.dtype)\n","sub_path":"Segmentation/utils/data_loader_3d.py","file_name":"data_loader_3d.py","file_ext":"py","file_size_in_byte":8699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"383108838","text":"from os import path\nd = path.dirname(__file__)\nd = '/'.join(d.split('/')[:-2])\n\naudio_num_mel_bins = 80\naudio_sample_rate = 16000\nnum_freq = 513\nsymbol_size = 256\nn_fft = 1024\nrescale = True\nrescaling_max = 0.999\nhop_size = 256\nwin_size = 1024\nframe_shift_ms = None\npreemphasize = True\npreemphasis = 0.97\nmin_level_db = -100\nref_level_db = 20\nfmin = 55\nfmax = 7600\nsignal_normalization = True\nallow_clipping_in_normalization = True\nsymmetric_mels = True\nmax_abs_value = 4\npower = 1.1\nmagnitude_power = 1.3\n# griffin_lim_iters = 60\ngriffin_lim_iters = 3\ntrim_fft_size = 1200\ntrim_hop_size = 300\ntrim_top_db = 23\nuse_lws = False\nsilence_threshold = 2\ntrim_silence = True\nmax_mel_frames = 2048\nwavenet_pad_sides = 1\npredict_linear = True\n\nphone_list_file = \"data/phone_set.json\"\nbin_data_dir = \"liqiao\" # 生成特征名字,需要跟train.sh一致\nmetadata_csv = \"meta/liqiao.csv\"\n#metadata_csv = \"linnan.csv\"\ntest_num = 1\n\ntext_data_dir = \"meta\"\n#wav_data_dir = \"Wave16k/7000000000-7100002500\"\nwav_data_dir = \"wav/train\"\n\n#train_csv = \"meta/train.csv\"\n#test_csv = \"meta/test.csv\"\n#phone_set = \"res/phone_set.json\"\n#wav_dir = \"wav/train/final\"\n#test_wav_dir = \"wav/test\"\n#train_feat_dir = \"feat/train\"\n#test_feat_dir = \"feat/test\"\n#data_dir = '%s/1.pub-data'%(d) #\"/fast/lxd_room/bjfu-ailab/1.pub-data\"\n\n","sub_path":"hparam.py","file_name":"hparam.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"207044883","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution:\n # solution 1\n def isUnivalTree(self, root: TreeNode) -> bool:\n if not root:\n return True\n value, stack = root.val, [root]\n while stack:\n node: TreeNode = stack.pop()\n if node.val != value:\n return False\n if node.left:\n stack.append(node.left)\n if node.right:\n stack.append(node.right)\n return True\n\n # def isUnivalTree(self, root: TreeNode) -> bool:\n # def is_unival(tn: TreeNode, val) -> bool:\n # if not tn:\n # return True\n # if tn.val != val:\n # return False\n # return is_unival(tn.left, val) and is_unival(tn.right, val)\n #\n # if not root:\n # return True\n # return is_unival(root, root.val)\n\n\ndef test_solution():\n root = TreeNode(1)\n root.left = TreeNode(1)\n root.right = TreeNode(1)\n root.left.left = TreeNode(1)\n root.right.right = TreeNode(1)\n assert Solution().isUnivalTree(root)\n\n root.val = 2\n assert not Solution().isUnivalTree(root)\n\n\nif __name__ == '__main__':\n test_solution()\n","sub_path":"easy/965_univalued_binary_tree.py","file_name":"965_univalued_binary_tree.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"526219096","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nProject Euler Problem 93\n=======================\n\nBy using each of the digits from the set, {1, 2, 3, 4}, exactly once, and\nmaking use of the four arithmetic operations (+, , *, /) and\nbrackets/parentheses, it is possible to form different positive integer\ntargets.\n\nFor example,\n\n8 = (4 * (1 + 3)) / 2\n14 = 4 * (3 + 1 / 2)\n19 = 4 * (2 + 3) 1\n36 = 3 * 4 * (2 + 1)\n\nNote that concatenations of the digits, like 12 + 34, are not allowed.\n\nUsing the set, {1, 2, 3, 4}, it is possible to obtain thirty-one different\ntarget numbers of which 36 is the maximum, and each of the numbers 1 to 28\ncan be obtained before encountering the first non-expressible number.\n\nFind the set of four distinct digits, a < b < c < d, for which the longest\nset of consecutive positive integers, 1 to n, can be obtained, giving your\nanswer as a string: abcd.\n\n\"\"\"\n\n\ndef main():\n return \"unimplemented\"\n\n\nif __name__ == \"__main__\":\n import ntpath\n import time\n from common.shared_functions import verify_solution\n\n problem_number = int(ntpath.basename(__file__).replace(\"euler\", \"\").replace(\".py\", \"\"))\n print(\"Retrieving my answer to Euler Problem {0} ...\".format(problem_number))\n\n ts = time.time()\n my_answer = main()\n te = time.time()\n\n print(\"My answer: {1}\".format(problem_number, my_answer))\n\n verification_type = verify_solution(problem_number, my_answer)\n print(\"Verification: {0}\".format(verification_type.name))\n print(\"Took {0} seconds.\".format(te - ts))\n","sub_path":"project-euler/solvers/euler093.py","file_name":"euler093.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"520431766","text":"import requests\r\nimport json\r\nimport time\r\nfrom random import randint\r\n\r\nimport socket\r\nimport struct\r\n\r\n# def get_ip_address(ifname):\r\n# s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\r\n# return socket.inet_ntoa(fcntl.ioctl(\r\n# s.fileno(),\r\n# 0x8915, # SIOCGIFADDR\r\n# struct.pack('256s', ifname[:15])\r\n# )[20:24])\r\n\r\n#print(get_ip_address('en0'))\r\n#print(get_ip_address('wlan0'))\r\n\r\n####################################################################################################\r\n# ''' please enter your alternate device ID and your alternate sensor ID and other values in the variables below\r\n\r\ndeviceId = 'd5c69c4ae0d1e168' # the deviceAlternateId\r\nsensorId = '6af6247b84ca4d3b' # the sensorAlternateId\r\ncapabilityAlternateId = 'dht11'\r\nipCapabilityAlternateId = 'a2cff1720092814d'\r\ntenant = 'https://b7c43a66-5b99-4e0d-921b-7d8c142a2ebf.eu10.cp.iot.sap/iot/gateway/rest/measures/'\r\n####################################################################################################\r\n\r\npostAddress = (tenant + deviceId)\r\n\r\nprint('Device ID: ', deviceId)\r\nprint('Sensor ID: ', sensorId)\r\nprint('Posting to:', postAddress)\r\n\r\n# Initial values for temperature, humidity, light\r\ntemp = 50\r\nhum = 82\r\nlight = 1000\r\n\r\n\r\nbodyJson = {\r\n \"capabilityAlternateId\": ipCapabilityAlternateId,\r\n \"sensorAlternateId\": sensorId,\r\n \"measures\": [\r\n [\"10.1.1.20\"]\r\n ]\r\n}\r\n\r\ndata = json.dumps(bodyJson)\r\nheaders = {'content-type': 'application/json'}\r\n#r = requests.post(postAddress, data=data, headers=headers, cert=(\r\n #'dshop.pem', 'dshop_private_key.pem'), timeout=5)\r\n#responseCode = r.status_code\r\n#print(str(bodyJson))\r\n#print(\"==> HTTP Response: %d\" % responseCode)\r\n\r\nfor x in range (0,120000):\r\n\r\n bodyJson = {\r\n \"capabilityAlternateId\": capabilityAlternateId,\r\n \"sensorAlternateId\": sensorId,\r\n \"measures\": [\r\n [int(temp),int(hum)]\r\n ]\r\n }\r\n\r\n data = json.dumps(bodyJson)\r\n headers = {'content-type': 'application/json'}\r\n try:\r\n r = requests.post(postAddress, data=data, headers=headers, cert=(\r\n 'dansensor-device_certificate.pem', 'dankey.pem'), timeout=5)\r\n responseCode = r.status_code\r\n print(str(bodyJson))\r\n print(\"==> HTTP Response: %d\" % responseCode)\r\n except:\r\n print(\"Unable to reach server\")\r\n \r\n if(x % 2 == 0):\r\n #temp = 60\r\n hum = 92\r\n light = 950\r\n else:\r\n #temp = 50\r\n hum = 82\r\n light = 1000\r\n\r\n\r\n temp += randint(1,7)-4\r\n\r\n if (temp <= 43):\r\n temp +=2\r\n if (temp >= 88):\r\n temp -=2\r\n print(temp)\r\n time.sleep(60)\r\n","sub_path":"temp_humidity_IoTservice.py","file_name":"temp_humidity_IoTservice.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"373625046","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def leafSimilar(self, root1, root2):\n \"\"\"\n :type root1: TreeNode\n :type root2: TreeNode\n :rtype: bool\n \"\"\"\n return self.get_leaves(root1) == self.get_leaves(root2)\n \n def get_leaves(self, root):\n if root is None:\n return ()\n if root.left is None and root.right is None:\n return (root.val,)\n leaves = ()\n if root.left:\n leaves += self.get_leaves(root.left)\n if root.right:\n leaves += self.get_leaves(root.right)\n return leaves\n \n","sub_path":"leetcode/leaf_similar_trees.py","file_name":"leaf_similar_trees.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"121461782","text":"from config import *\n\n\nclass Paddle:\n def __init__(self):\n self.x = SCREEN_COLS // 2\n self.y = SCREEN_ROWS - PAD_VER_OFF\n self.length = PAD_LEN\n self.is_sticky = False\n self.is_shooter = False\n self.last_bullet = 0\n\n def move(self, inp):\n if inp == \"a\":\n self.x -= PAD_VEL\n if self.x - self.length // 2 <= 0:\n self.x = 1 + self.length // 2\n elif inp == \"d\":\n self.x += PAD_VEL\n if self.x + self.length // 2 >= SCREEN_COLS - 1:\n self.x = SCREEN_COLS - 2 - self.length // 2\n","sub_path":"paddle.py","file_name":"paddle.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"346617209","text":"import sys\nimport os\nfrom order_management import Order, Image, get_json_from_object, get_object_from_json, process_order\nimport filehandler\nimport function\n\n\ndef get_stdin():\n buf = \"\"\n for line in sys.stdin:\n buf = buf + line\n return buf\n\n\nif (__name__ == \"__main__\"):\n input = get_stdin()\n command=input.split(\";\")[0]\n order_json=input.split(\";\")[1]\n if \"info\" in command:\n function.info()\n else:\n order = get_object_from_json(order_json)\n with open(\"/tmp/input.txt\", \"a\") as myfile:\n myfile.write(input + \"\\n\")\n print(input)\n uuid = order.uuid\n function_name = order.function_name\n dimensions = order.dimensions\n target_file_format = order.target_file_format\n image_list = order.image_list\n img_count_function = order.img_count_function\n to_do = order.to_do\n\n os.environ['SOURCEPATH'] = \"/tmp/%s/source/\" % uuid\n os.environ['TARGETPATH'] = \"/tmp/%s/target/\" % uuid\n os.environ['BASICURL'] = 'http://fileserver-service:8000'\n\n print(\"UUID: %s\" % uuid)\n print(\"Function-name: %s\" % function_name)\n filehandler.download_file_list(to_do, uuid)\n\n print(\"All files downloaded - start function...\")\n function.start(order)\n\n print(\"Function finished - start upload of results...\")\n filehandler.upload_results(uuid, function_name)\n\n print(\"Upload finished - deleting tmp files...\")\n filehandler.remove_tmp(uuid)\n print(\"End of function!\")\n\n","sub_path":"scripts/function_service.py","file_name":"function_service.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"477392590","text":"# Universidade Federal do Amazonas\n# Aluna: Karina Rocha Ferreira - 21554907\n# Lab.codificação 04\n# 26/07/2016\nqi = float(input(\"Quantia inicial: \"))\ntempo = float(input(\"Tempo de investimento: \"))\njuros = 4.0\nsaldo = qi\nt = 1\nwhile (t <= tempo):\n\trend = saldo * juros/100\n\tsaldo = saldo + rend\n\tt = t + 1\n\tprint(round(saldo, 2))","sub_path":"exs/1450-1135.py","file_name":"1450-1135.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"74104588","text":"__author__ = 'nightblues'\nfrom http.server import *\nimport http.client\nimport re\nfrom socketserver import ThreadingMixIn\n\n\nclass RedirectRequestHandler(BaseHTTPRequestHandler):\n def get_request_body(self):\n if 'Content-Length' in self.headers:\n return self.rfile.read(int(self.headers['Content-Length']))\n return b''\n\n def get_request(self):\n data = \"\"\n data += self.command + ' ' + self.path + ' ' + self.request_version + '\\r\\n'\n data += self.headers.as_string()\n data += self.get_request_body().decode('utf-8')\n return bytes(data, 'utf-8')\n\n def load_page(self, hostname, protocol='http'):\n if protocol == 'https':\n client_conn = http.client.HTTPSConnection(hostname)\n else:\n client_conn = http.client.HTTPConnection(hostname)\n client_conn.request(self.command, self.path, self.get_request_body(), self.headers)\n response = client_conn.getresponse()\n resp_headers = response.msg\n resp_body = response.read()\n return {'status': response.status, 'headers': resp_headers, 'body': resp_body}\n\n def get_protocol(self):\n if self.path.strip().startswith('http://'):\n return 'http'\n elif self.command.strip() == 'CONNECT':\n return 'https'\n return 'http'\n\n def get_hostname(self):\n hostname_s = re.search(r'(http://)?([a-z0-9а-я:.-]+)', self.path)\n if hostname_s:\n return hostname_s.group(2)\n return 'localhost'\n\n def redirect_request(self):\n hostname = self.get_hostname()\n if hostname == 'localhost':\n self.wfile.write(bytes('This is proxy-server, not web-site.', 'utf-8'))\n else:\n page_data = self.load_page(hostname, self.get_protocol())\n self.send_response(page_data['status'])\n for header in page_data['headers'].items():\n self.send_header(header[0], header[1])\n self.end_headers()\n self.wfile.write(page_data['body'])\n\n def do_GET(self):\n self.redirect_request()\n\n def do_POST(self):\n self.redirect_request()\n\n def do_CONNECT(self):\n # self.redirect_request()\n self.wfile.write(bytes('Https is not supported.', 'utf-8'))\n\n\n\nclass ThreadedHTTPServer(ThreadingMixIn, HTTPServer):\n \"\"\"Handle requests in a separate thread.\"\"\"\n pass\n\n\nserver_address = ('localhost', 8080)\nhttpd = ThreadedHTTPServer(server_address, RedirectRequestHandler)\nhttpd.serve_forever()\n","sub_path":"proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"205770703","text":"import mock\nimport random\nfrom django import test\nfrom django.utils import timezone\n\nfrom . import helpers\nfrom wps import models\nfrom wps import settings\nfrom wps.tasks import cache\n\nclass CacheTaskTestCase(test.TestCase):\n\n def setUp(self):\n for _ in range(10):\n models.Cache.objects.create(uid=helpers.random_str(10), size=random.uniform(0.5, 10.0))\n\n def test_setup_period_tasks(self):\n mock_sender = mock.MagicMock()\n\n cache.setup_periodic_tasks(mock_sender)\n\n mock_sender.add_periodic_task.assert_called()\n\n @mock.patch('wps.tasks.cache.os.remove')\n @mock.patch('wps.tasks.cache.os.path.exists')\n def test_cache_free_space(self, mock_exists, mock_remove):\n mock_exists.return_value = True\n\n new_size = settings.CACHE_GB_MAX_SIZE / 10\n\n cached = models.Cache.objects.all()\n\n for item in cached:\n item.size = new_size\n\n item.save()\n\n # Cache should technically be full we'll add two smaller items and check if they are removed\n for _ in range(2):\n models.Cache.objects.create(uid=helpers.random_str(10), size=new_size/2)\n\n with self.assertNumQueries(7):\n cache.cache_clean()\n\n remain_count = models.Cache.objects.count()\n\n self.assertEqual(remain_count, 9)\n\n @mock.patch('wps.tasks.cache.os.remove')\n @mock.patch('wps.tasks.cache.os.path.exists')\n def test_cache_delete_expired(self, mock_exists, mock_remove):\n mock_exists.return_value = True\n\n cached = random.sample(models.Cache.objects.all(), 4)\n\n for item in cached:\n models.Cache.objects.filter(pk=item.pk).update(accessed_date=timezone.now() - settings.CACHE_MAX_AGE - timezone.timedelta(days=30))\n\n with self.assertNumQueries(7):\n cache.cache_clean()\n\n remain_count = models.Cache.objects.count()\n\n self.assertEqual(remain_count, 6)\n\n @mock.patch('wps.tasks.cache.os.remove')\n @mock.patch('wps.tasks.cache.os.path.exists')\n def test_cache_remove_files_not_on_disk(self, mock_exists, mock_remove):\n mock_remove.return_value = True\n\n mock_exists.side_effect = [\n False, False, False, False, False, False, False, False, False, False,\n False, True, False, True, True, True, False, False, False, False, False\n ]\n\n with self.assertNumQueries(11):\n cache.cache_clean()\n\n remain_count = models.Cache.objects.count()\n\n self.assertEqual(remain_count, 2)\n","sub_path":"compute/wps/tests/test_tasks_cache.py","file_name":"test_tasks_cache.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"641089713","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nEste é um arquivo de script temporário.\n\"\"\"\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass ConvNet(nn.Module):\n def __init__(self):\n super(ConvNet, self).__init__()\n \n self.conv1 = nn.Conv2d(in_channels= 3, out_channels=16, kernel_size=7, padding=3)\n self.conv2 = nn.Conv2d(in_channels=16, out_channels=16, kernel_size=7, padding=3)\n self.conv3 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, padding=2)\n \n self.batchnorm2 = nn.BatchNorm2d(32)\n \n self.fc1 = nn.Linear(in_features=32*32*32, out_features=5)\n \n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.max_pool2d(F.relu(self.conv2(x)), 2)\n x = F.max_pool2d(F.relu(self.batchnorm2(self.conv3(x))), 2)\n \n x = x.view(-1, 32*32*32)\n x = F.dropout(self.fc1(x), training=self.training)\n x = F.log_softmax(x, dim=-1) ##Output layer \n return x","sub_path":"ConvNet.py","file_name":"ConvNet.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"271981617","text":"\r\nclass CompanyTeam(object):\r\n\r\n def __init__(self, json_dict=None):\r\n self.id = None # (Integer)\r\n self.company = None # (CompanyReference)\r\n self.teamRole = None # *(TeamRoleReference)\r\n self.locationId = None # (Integer)\r\n self.businessUnitId = None # (Integer)\r\n self.contact = None # (ContactReference)\r\n self.member = None # (MemberReference)\r\n self.accountManagerFlag = None # (Boolean)\r\n self.techFlag = None # (Boolean)\r\n self.salesFlag = None # (Boolean)\r\n self._info = None # (Metadata)\r\n\r\n # initialize object with json dict\r\n self.__dict__.update(json_dict)\r\n\r\n def __repr__(self):\r\n string = None\r\n string = ''.join('{}: {}\\n'.format('Name',self.__dict__['name']))\r\n return string\r\n","sub_path":"company/company_team.py","file_name":"company_team.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"115146955","text":"import json\nimport os\nfrom operator import itemgetter\n\nfrom sklearn.metrics.pairwise import pairwise_distances\nfrom PIL import Image\nimport cv2\nimport numpy as np\nfrom tqdm import tqdm\nimport joblib\n\nfrom linc_cv.modality_whisker.icp import icp\nfrom linc_cv.settings import WHISKER_FEATURE_X_PATH, WHISKER_FEATURE_Y_PATH, \\\n WHISKER_IMAGES_PATH, WHISKER_BBOX_MODEL_PATH\n\nfrom .inference import YOLO\n\n\ndef resize_to_longer_edge(im, target_size):\n \"\"\"\n Make image square by enlarging it, then resize to target_size\n Input and output: PIL Image\n \"\"\"\n\n width, height = im.size\n if width > height:\n im_n = Image.new('L', (width, width,))\n offset = (width - height) // 2\n im_n.paste(im, (0, offset,))\n elif height > width:\n im_n = Image.new('L', (height, height,))\n offset = (height - width) // 2\n im_n.paste(im, (offset, 0,))\n else:\n im_n = im\n return im_n.resize(target_size)\n\n\ndef simplify_whisker(im, d, e, ma, t1, t2):\n clahe = cv2.createCLAHE()\n im = clahe.apply(im)\n _, t = cv2.threshold(im, t1, t2, cv2.THRESH_BINARY)\n\n kernel3 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3,))\n t = cv2.dilate(t, kernel3, iterations=d)\n t = cv2.erode(t, kernel3, iterations=e)\n\n params = cv2.SimpleBlobDetector_Params()\n params.filterByInertia = False\n params.filterByConvexity = False\n params.filterByArea = True\n params.minArea = ma\n d = cv2.SimpleBlobDetector_create(params)\n keypoints = d.detect(t)\n\n return None, keypoints\n\n\ndef comparator(A, B):\n if A.size == 0 or B.size == 0:\n return None\n\n if A.shape != B.shape:\n discrepancy = abs(A.shape[0] - B.shape[0])\n dists = pairwise_distances(A, B)\n idxs = np.argsort(-dists.ravel())\n pts_a, pts_b = np.unravel_index(idxs, dists.shape)\n if len(A) > len(B):\n A_m = np.ones(len(A), dtype=np.bool)\n idxs = set()\n pts_a = iter(pts_a)\n while len(idxs) < discrepancy:\n idxs.add(next(pts_a))\n A_m[list(idxs)] = False\n A = A[A_m]\n else:\n B_m = np.ones(len(B), dtype=np.bool)\n idxs = set()\n pts_b = iter(pts_b)\n while len(idxs) < discrepancy:\n idxs.add(next(pts_b))\n B_m[list(idxs)] = False\n B = B[B_m]\n\n T, distances, i = icp(A, B)\n\n # median is best so far... 46% @ top-10\n return np.median(distances)\n\n\ndef whisker_image_to_feature(image, bbox, label, d, e, ma, t1, t2, sz):\n crop = image.convert('L')\n crop = crop.crop(bbox)\n crop = resize_to_longer_edge(crop, target_size=(sz, sz,))\n crop = np.array(crop, dtype=np.uint8)\n im, kpts = simplify_whisker(crop, d, e, ma, t1, t2)\n feature = np.array([k.pt for k in kpts], dtype=np.float64)\n return feature, label\n\n\ndef train_whisker_classifier():\n # TODO: move these magic values into a common dictionary\n d = 5\n e = 2\n ma = 15\n t1 = 53\n t2 = 120\n sz = 400\n topk = 10\n\n whisker_bbox_model = YOLO(WHISKER_BBOX_MODEL_PATH)\n\n paths = []\n for root, dirs, files in os.walk(WHISKER_IMAGES_PATH):\n for f in files:\n paths.append(os.path.join(root, f))\n\n X = []\n y = []\n for path in tqdm(paths, desc=\"extracting whisker bboxes and features\"):\n label = path.split(os.sep)[-2]\n image = Image.open(path)\n rois = whisker_bbox_model.detect_image(image)\n if not rois:\n print(f'no rois found for {path}, SKIPPING...')\n continue\n # select roi with highest detection probability\n roi = sorted(rois, key=itemgetter(1), reverse=True)[0]\n _, confidence, box_x, box_y, box_w, box_h = roi\n if confidence < 0.99:\n continue\n bbox = (box_y, box_x, box_h, box_w,)\n feature, label = whisker_image_to_feature(image, bbox, label, d, e, ma, t1, t2, sz)\n X.append(feature)\n y.append(label)\n joblib.dump(X, WHISKER_FEATURE_X_PATH, compress=('xz', 9,))\n joblib.dump(y, WHISKER_FEATURE_Y_PATH, compress=('xz', 9,))\n print('dumped whisker feature db')\n\n\ndef validate_whisker_classifier():\n whisker_scores = []\n A = X[0]\n for idx, B in enumerate(tqdm(X, desc='chamfer distance computation')):\n score = comparator(A, B)\n whisker_scores.append([idx, score])\n whisker_scores = sorted(whisker_scores, key=itemgetter(1))\n max_score = max(whisker_scores, key=itemgetter(1))[1]\n print('max_score', max_score)\n topk_results = []\n for idx, score in whisker_scores[:topk]:\n label = y[idx]\n proba = round(1 - score / max_score, 3)\n topk_results.append([label, proba])\n print(topk_results)\n","sub_path":"linc_cv/modality_whisker/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"370415254","text":"\"\"\"\n- CS2911 - 001\n- Fall 2020\n- Lab 6\n- Names:\n - Sean Lang\n - Sean Lang's Clone\n\nA simple HTTP server\n\"\"\"\n\nimport socket\nimport re\nimport threading\nimport os\nfrom os import path, stat\nimport mimetypes\nimport datetime\nfrom urllib.parse import urlparse, unquote, parse_qs\n\n\ndef main():\n \"\"\" Start the server \"\"\"\n http_server_setup(8080)\n\n\ndef http_server_setup(port):\n \"\"\"\n Start the HTTP server\n - Open the listening socket\n - Accept connections and spawn processes to handle requests\n\n :param port: listening port number\n \"\"\"\n\n num_connections = 10\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n listen_address = ('', port)\n server_socket.bind(listen_address)\n server_socket.listen(num_connections)\n try:\n while True:\n request_socket, request_address = server_socket.accept()\n print('connection from {0}:{1}'.format(*request_address))\n # Create a new thread, and set up the handle_request method and its argument (in a tuple)\n request_handler = threading.Thread(\n target=handle_request, args=(request_socket,))\n # Start the request handler thread.\n request_handler.start()\n # Just for information, display the running threads (including this main one)\n print('threads: ', threading.enumerate())\n # Set up so a Ctrl-C should terminate the server; this may have some problems on Windows\n except KeyboardInterrupt:\n print(\"HTTP server exiting . . .\")\n print('threads: ', threading.enumerate())\n server_socket.close()\n\n\ndef handle_request(tcp_socket):\n \"\"\"\n Handle a single HTTP request, running on a newly started thread.\n\n Closes request socket after sending response.\n\n Should include a response header indicating NO persistent connection\n\n :param tcp_socket: socket representing TCP connection from the HTTP client_socket\n :return: None\n \"\"\"\n\n request_line = parse_http_request_line(read_line(tcp_socket))\n print('request line:', request_line)\n headers = read_headers(tcp_socket)\n print('headers: ', headers)\n file_path = resolve_url(request_line['url'])\n\n if file_path is None:\n # invalid path\n send_response(tcp_socket, 404)\n return\n\n if request_line['verb'] == 'GET':\n handle_get_request(tcp_socket, file_path)\n elif request_line['verb'] == 'POST':\n handle_post_request(tcp_socket, headers)\n\n else:\n # invalid verb\n send_response(tcp_socket, 400)\n\n\ndef handle_get_request(tcp_socket, file_path):\n \"\"\"\n This method handles a GET request. We ignore query parameters, redirects,\n and content-type negoation entirely and just respond with the file at that\n path.\n :author: Sean Lang\n :param tcp_socket: The socket to read from.\n :param file_path: The path to the file being requested.\n \"\"\"\n\n try:\n # don't use path.join because it handles absolute path segments\n # incorrectly. this does not break on windows during testing.\n requested_file = open('./fixtures' + file_path, 'rb')\n file_content = requested_file.read()\n mime_type = get_mime_type(file_path)\n requested_file.close()\n send_response(tcp_socket, 200, mime_type, file_content)\n except FileNotFoundError:\n send_response(tcp_socket, 404)\n\n\ndef handle_post_request(tcp_socket, headers):\n \"\"\"\n Handle a POST request. We ignore the URL entirely and just look for data or\n files being uploaded. Uploaded files are extracted from the request and\n written to the upload directory. Other data in the form is ignored entirely.\n :author: Sean Lang\n :param tcp_socket: The socket to read from.\n \"\"\"\n\n if 'Content-Length' in headers:\n content = read_body(tcp_socket, headers['Content-Length'])\n elif headers.get('Transfer-Encoding', None) == 'chunked':\n content = read_chunks(tcp_socket)\n else:\n send_response(tcp_socket, 500, b'text/plain',\n b'Didn\\'t get a Content-Length or Transfer-Encoding: chunked.\\n')\n return\n\n content_type = parse_header_value(headers.get('Content-Type', ''))\n if content_type['base_value'] == 'application/x-www-form-urlencoded':\n print('POST data: ', parse_qs(content))\n # just send a minimal response to say we got it\n send_response(tcp_socket, 200, b'text/plain', b'OK\\n')\n elif content_type['base_value'] == 'multipart/form-data':\n handle_post_multipart_request(tcp_socket, content, content_type)\n else:\n send_response(tcp_socket, 500)\n\n\ndef handle_post_multipart_request(tcp_socket, content, content_type):\n \"\"\"\n Handle a multipart form-data request and write any uploaded files to the\n upload directory. We do no validation on these files whatsoever, so anyone\n making requests to this server is free to fill up the disk with whatever\n they want.\n :author: Sean Lang\n :param tcp_socket: The socket to read from.\n \"\"\"\n\n boundary = content_type['boundary']\n content = b'\\r\\n' + content # add a leading CRLF to make splitting easy\n first_part = True\n for raw_part in content.split(b'\\r\\n--' + str.encode(boundary, 'ascii')):\n if first_part:\n # skip the first part because it's a preamble, usually blank\n first_part = False\n continue\n\n if raw_part == b'--\\r\\n':\n # according to rfc2046, the final boundary delimiter is followed\n # by 2 hyphens\n break\n\n if not raw_part.startswith(b'\\r\\n'):\n send_response(tcp_socket, 500, b'text/plain',\n b'Boundry must be followed by CRLF')\n return\n else:\n raw_part = raw_part.replace(b'\\r\\n', b'', 1)\n\n part = parse_form_part(raw_part)\n if part != None:\n part_headers, body = part\n disposition = part_headers.get('Content-Disposition', {})\n filename = disposition.get('filename', None)\n if filename != None:\n print('writing file: ', 'upload/' + filename)\n write_message_to_file(body, 'upload/' + filename)\n\n send_response(tcp_socket, 200, b'text/plain', b'OK\\n')\n\n\ndef parse_form_part(part):\n \"\"\"\n Parse a part of a multipart form already split by boundries. Pull out the\n headers, especially the Content-Disposition, and give back the headers and\n parsed body.\n :author: Sean Lang\n :return: A tuple of the headers (dictionary) and parsed body (bytes object).\n :rtype: tuple\n \"\"\"\n\n headers = {}\n raw_headers, part = part.split(b'\\r\\n\\r\\n', 1)\n for raw_header in raw_headers.split(b'\\r\\n'):\n key, value = parse_header(raw_header)\n if key == 'Content-Disposition':\n value = parse_header_value(value)\n headers[key] = value\n\n return (headers, part)\n\n\ndef resolve_url(url_path):\n \"\"\"\n Given a url path, turn it into a file path to check. Right now we just\n handle percent encoding, strip out any GET parameters (ignore them) and add\n a default directory index of index.html\n :author: Sean Lang\n :param url_path: The raw URL parsed from the request line\n :returns: A processed file path, with segments like '..' or '.' removed.\n :rtype: string\n\n >>> resolve_url('/abc?var=45')\n '/abc'\n >>> resolve_url('/abc/../')\n '/index.html'\n >>> resolve_url('//')\n '//index.html'\n >>> resolve_url('/')\n '/index.html'\n >>> resolve_url('/abc/../../def.html') is None\n True\n >>> resolve_url('/my%20index.html')\n '/my index.html'\n \"\"\"\n\n file_path = unquote(urlparse('http://localhost' + url_path).path)\n\n # posixpath.normpath allows for // at the beginning of the url, strips\n # trailing slashes, and replaces an empty path with '.', so we can't use\n # that. urljoin also has problems with not handling '..' correctly.\n segments = file_path.split('/')\n if segments[-1] == '':\n # add default directory index name\n segments[-1] = 'index.html'\n segments = [segment + '/' for segment in segments[:-1]] + [segments[-1]]\n resolved = []\n\n # resolve '..' and '.' segments in the path. curl & most browsers will\n # actually refuse to send us URLs this bad, so use GET from the lwp-request\n # package to test it out on the running server\n for segment in segments:\n if segment in ('../', '..'):\n if resolved[1:]:\n resolved.pop()\n else:\n # trying to break out of http root\n return None\n elif segment != './' and segment != '.':\n resolved.append(segment)\n file_path = ''.join(resolved)\n\n return file_path\n\n\ndef send_response(tcp_socket, *args):\n \"\"\"\n A simple wrapper around make_response & sendall to send a response. This is\n hard to test with doctests (it would require a mock tcp_socket), so we keep\n it out of the real make_response().\n :author: Sean Lang\n :param tcp_socket: The socket to read from.\n :param *args: All the args to pass to make_response.\n \"\"\"\n tcp_socket.sendall(make_response(*args))\n\n\ndef make_response(status_code, mime_type=b'text/html', body=b'', headers={}):\n \"\"\"\n Make a full HTTP GET response.\n :author: Sean Lang\n :param status_code: The status code to be sent in the response as an integer\n :param mime_type: The mime type to be used in the Content-Type header.\n Defaults to b'text/html'\n :param body: The body of the response. Defaults to an empty bytes object but\n will be filled in with an error page if a status code other than 200 is\n passed.\n :return: The formatted response body.\n :rtype: bytes\n\n >>> make_response(100)\n b'HTTP/1.1 100 Continue\\\\r\\\\n\\\\r\\\\n'\n \"\"\"\n\n status_line = make_http_status_line(status_code)\n\n if status_code < 200:\n # 1xx status codes don't need headers or bodies\n return status_line + b'\\r\\n\\r\\n'\n\n if (status_code < 200 or status_code >= 400) and body == b'':\n body = make_error_page(status_code)\n\n if headers.get(b'Date', None) is None:\n timestamp = datetime.datetime.utcnow().strftime(\n '%a, %d %b %Y %H:%M:%S GMT')\n headers[b'Date'] = timestamp\n headers[b'Server'] = b'Lab 7 Test Server'\n headers[b'Connection'] = b'close'\n headers[b'Content-Length'] = len(body)\n headers[b'Content-Type'] = mime_type\n\n response_lines = [status_line]\n for key, value in headers.items():\n response_lines.append(make_http_header(key, value))\n\n return b'\\r\\n'.join(response_lines) + b'\\r\\n\\r\\n' + body\n\n\n\"\"\"\nTaken from https://www.w3.org/Protocols/rfc2616/rfc2616-sec6.html and\nhttps://tools.ietf.org/id/draft-nottingham-thanks-larry-00.html\n\"\"\"\nstatus_code_map = {\n 100: b'Continue',\n 101: b'Switching Protocols',\n 200: b'OK',\n 201: b'Created',\n 202: b'Accepted',\n 203: b'Non-Authoritative Information',\n 204: b'No Content',\n 205: b'Reset Content',\n 206: b'Partial Content',\n 300: b'Multiple Choices',\n 301: b'Moved Permanently',\n 302: b'Found',\n 303: b'See Other',\n 304: b'Not Modified',\n 305: b'Use Proxy',\n 307: b'Temporary Redirect',\n 400: b'Bad Request',\n 401: b'Unauthorized',\n 402: b'Payment Required',\n 403: b'Forbidden',\n 404: b'Not Found',\n 405: b'Method Not Allowed',\n 406: b'Not Acceptable',\n 407: b'Proxy Authentication Required',\n 408: b'Request Time-out',\n 409: b'Conflict',\n 410: b'Gone',\n 411: b'Length Required',\n 412: b'Precondition Failed',\n 413: b'Request Entity Too Large',\n 414: b'Request-URI Too Large',\n 415: b'Unsupported Media Type',\n 416: b'Requested range not satisfiable',\n 417: b'Expectation Failed',\n 418: b'I\\'m a teapot',\n 500: b'Internal Server Error',\n 501: b'Not Implemented',\n 502: b'Bad Gateway',\n 503: b'Service Unavailable',\n 504: b'Gateway Time-out',\n 505: b'HTTP Version not supported',\n}\n\n\ndef make_http_status_line(status_code, version=b'HTTP/1.1'):\n \"\"\"\n Create a status line. The status phrase is omitted from the parameters\n because it is derived from the status_code. See https://www.w3.org/Protocols/rfc2616/rfc2616-sec6.html for a list of\n status_code to status phrase pairs.\n :author: Donal Moloney\n :param status_code: The code indicating the result of the request.\n :param version: The version of HTTP being used, defaults to 'HTTP/1.1'\n :return: The response status line, without a trailing CRLF.\n :rtype: bytes\n\n >>> make_http_status_line(200)\n b'HTTP/1.1 200 OK'\n \"\"\"\n\n return b' '.join((\n version,\n str(status_code).encode('ascii'),\n status_code_map[status_code]\n ))\n\n\nERROR_PAGE_FORMAT = b\"\"\"\n\n\n \n \n \n \n %i Error\n \n \n

Error

\n

%b

\n \n\n\"\"\"\n\n\ndef make_error_page(status_code):\n \"\"\"\n Makes an error page based on the status code.\n :author: Donal Moloney\n :param status_code: The status code of the response to be returned, used for\n formatting the error page.\n :return: A formatted error page.\n :rtype: bytes\n \"\"\"\n\n return ERROR_PAGE_FORMAT % (status_code, status_code_map[status_code])\n\n\ndef read_chunks(tcp_socket):\n \"\"\"\n Read the body of the response as chunks and concatenate them together\n :author: Sean Lang\n :param tcp_socket: The socket to read from.\n :return: The body of the response.\n :rtype: bytes\n \"\"\"\n\n content = b''\n chunk = read_chunk(tcp_socket)\n while len(chunk) != 0:\n content += chunk\n chunk = read_chunk(tcp_socket)\n return content\n\n\ndef read_chunk(tcp_socket):\n \"\"\"\n Read a single chunk, parsing the length of the chunk, reading the specified\n number of bytes from the socket, and consuming the trailing CRLF.\n :author: Sean Lang\n :param tcp_socket: The socket to read from.\n :return: The content of the chunk, without the trailing CRLF or leading size\n line.\n :rtype: bytes\n \"\"\"\n\n size = int(bytes.decode(read_line(tcp_socket), 'ascii'), 16)\n content = read_body(tcp_socket, size)\n\n # read the line ending and make sure it's correct\n if read_body(tcp_socket, 2) != b'\\r\\n':\n raise Exception('Didn\\'t get a correct line ending for chunk')\n return content\n\n\ndef read_body(tcp_socket, length):\n \"\"\"\n This method reads the body of the HTTP request, trying to read in as large\n of segments as possible to reduce the number of function calls.\n :author: Sean Lang\n :param tcp_socket: The socket to read from.\n :param length: The number of bytes to read.\n :return: The bytes read off of the socket, concat'd together.\n :rtype bytes\n \"\"\"\n\n content = tcp_socket.recv(length)\n while len(content) < length:\n content += tcp_socket.recv(length - len(content))\n return content\n\n\ndef read_line(tcp_socket):\n \"\"\"\n Read a single line and return it without the CRLF. Return a zero length line\n if it's blank (indicating the end of the headers).\n :author: Sean Lang\n :param tcp_socket: The socket to read from.\n :return: A single line from the response, without the trailing CRLF.\n :rtype: bytes\n \"\"\"\n\n line = b''\n char = tcp_socket.recv(1)\n while not (char == b'\\n' and line[-1:] == b'\\r'):\n line += char\n char = tcp_socket.recv(1)\n return line[:-1]\n\n\ndef read_headers(tcp_socket):\n \"\"\"\n Use read_line to read all the headers from the tcp_socket. Stop when we hit\n the end of the headers.\n :author: Sean Lang\n :param tcp_socket: The socket to read from.\n :return: All the headers we parsed, as key, value pairs.\n :rtype: dict\n \"\"\"\n\n headers = {}\n line = read_line(tcp_socket)\n while len(line) != 0:\n key, value = parse_header(line)\n headers[key] = value\n line = read_line(tcp_socket)\n return headers\n\n\nHEADER_RE = r'([^:]+): (.+)'\n\n\ndef parse_header(line):\n \"\"\"\n Parse a header and return a tuple with the (key,value) pair. Also, handle\n basic type conversion.\n :author: Sean Lang\n :param line: A line single of the header, without a trailing CRLF\n :return: The parsed header key and value.\n :rtype: tuple\n\n >>> parse_header(b'Content-Length: 10')\n ('Content-Length', 10)\n >>> parse_header(b'Content-Type: text/html')\n ('Content-Type', 'text/html')\n \"\"\"\n\n result = re.match(HEADER_RE, line.decode('ascii')).group(1, 2)\n if result[0] == 'Content-Length':\n # this always represents an integer\n result = (result[0], int(result[1]))\n return result\n\n\ndef parse_header_value(value):\n \"\"\"\n Parses a set of key, value pairs out of a semicolon delimited header value.\n Useful for handling POST requests.\n :author: Sean Lang\n :param value: The value to parse.\n :return: The parsed value as a dictionary of key, value pairs, with the part\n that has no key being called base_value.\n :rtype: dict\n\n >>> parse_header_value('multipart/form-data; boundary=-------8b61c4369832')\n {'base_value': 'multipart/form-data', 'boundary': '-------8b61c4369832'}\n >>> parse_header_value('form-data; name=\"fileToUpload\"; filename=\"test.md\"')\n {'base_value': 'form-data', 'name': 'fileToUpload', 'filename': 'test.md'}\n >>> parse_header_value('max-age=0')\n {'max-age': '0'}\n >>> parse_header_value('form-data; name=\"data\"; filename=\"style.css\"')\n {'base_value': 'form-data', 'name': 'data', 'filename': 'style.css'}\n \"\"\"\n\n result = {}\n for part in value.split('; '):\n kv_pair = part.split('=', 1)\n if len(kv_pair) == 1:\n result['base_value'] = kv_pair[0]\n else:\n result[kv_pair[0]] = kv_pair[1]\n\n for key, value in result.items():\n start = value[0]\n end = value[-1]\n if (start == '\"' and end == '\"') or (start == '\\'' and end == '\\''):\n result[key] = value[1:-1]\n\n return result\n\n\nREQUEST_LINE_RE = r'(.+) (.+) (HTTP/[0-9\\.]+)'\n\n\ndef parse_http_request_line(line):\n \"\"\"\n Parse the HTTP request line and return a dict with the info it contains.\n :author: Sean Lang\n :param line: The request line to parse, without a trailing CRLF.\n :return: The parts of the HTTP request line.\n :rtype dict\n\n >>> parse_http_request_line(b'GET /test.txt HTTP/1.1')\n {'verb': 'GET', 'url': '/test.txt', 'version': 'HTTP/1.1'}\n \"\"\"\n\n match = re.match(REQUEST_LINE_RE, line.decode('ascii'))\n return {\n 'verb': match.group(1),\n 'url': match.group(2),\n 'version': match.group(3),\n }\n\n\ndef make_http_header(key, value):\n \"\"\"\n Make an HTTP header.\n :author: Sean Lang\n :param key: Name of the header as a bytes object\n :param value: Value of the header as a string, bytes object, or int.\n :return: HTTP header for given key/value, without trailing CRLF.\n :rtype: bytes\n\n >>> make_http_header(b'Content-Length', b'10')\n b'Content-Length: 10'\n >>> make_http_header(b'Content-Length', '10')\n b'Content-Length: 10'\n >>> make_http_header(b'Content-Length', 10)\n b'Content-Length: 10'\n \"\"\"\n\n if type(value) == int:\n value = str(value)\n if type(value) == str:\n value = value.encode('ascii')\n return key + b': ' + value\n\n\ndef write_message_to_file(message, file_path):\n \"\"\"\n Write the message out to a file, specified by the given file_path.\n :author: Sean Lang\n :param message: The contents to write to the file as a bytes object.\n :param file_path: The file path to write to.\n \"\"\"\n\n output_file = open(file_path, 'wb')\n output_file.write(message)\n output_file.close()\n\n\n# ** Do not modify code below this line. You should add additional helper methods above this line.\n\n# Utility functions\n# You may use these functions to simplify your code.\n\n\ndef get_mime_type(file_path):\n \"\"\"\n Try to guess the MIME type of a file (resource), given its path (primarily its file extension)\n :param file_path: string containing path to (resource) file, such as './abc.html'\n :return: If successful in guessing the MIME type, a string representing the content type, such as 'text/html'\n Otherwise, None\n :rtype: int or None\n\n >>> get_mime_type('index.html')\n 'text/html'\n >>> get_mime_type('index.js')\n 'application/javascript'\n \"\"\"\n\n return mimetypes.guess_type(file_path)[0]\n\n\ndef get_file_size(file_path):\n \"\"\"\n Try to get the size of a file (resource) as number of bytes, given its path\n\n :param file_path: string containing path to (resource) file, such as './abc.html'\n :return: If file_path designates a normal file, an integer value representing the the file size in bytes\n Otherwise (no such file, or path is not a file), None\n :rtype: int or None\n \"\"\"\n\n # Initially, assume file does not exist\n file_size = None\n if os.path.isfile(file_path):\n file_size = os.stat(file_path).st_size\n return file_size\n\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n main()\n\n# Replace this line with your comments on the lab\n","sub_path":"httpserver.py","file_name":"httpserver.py","file_ext":"py","file_size_in_byte":21427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"509218637","text":"# MIT 6.034 Lab 3: Constraint Satisfaction Problems\n# Written by 6.034 staff\n\nfrom constraint_api import *\nfrom test_problems import get_pokemon_problem\n\n\n#### Part 1: Warmup ############################################################\n\ndef has_empty_domains(csp) :\n \"\"\"Returns True if the problem has one or more empty domains, otherwise False\"\"\"\n variables = csp.variables\n for var in variables:\n if len(csp.get_domain(var)) == 0:\n return True\n return False\n\n\ndef check_all_constraints(csp) :\n \"\"\"Return False if the problem's assigned values violate some constraint,\n otherwise True\"\"\"\n assignments = csp.assignments\n constraints = csp.constraints\n for constraint in constraints:\n if constraint.var1 in assignments and constraint.var2 in assignments:\n if not constraint.check(assignments[constraint.var1], assignments[constraint.var2]):\n return False\n return True\n\n\n\n#### Part 2: Depth-First Constraint Solver #####################################\n\ndef solve_constraint_dfs(problem) :\n \"\"\"\n Solves the problem using depth-first search. Returns a tuple containing:\n 1. the solution (a dictionary mapping variables to assigned values)\n 2. the number of extensions made (the number of problems popped off the agenda).\n If no solution was found, return None as the first element of the tuple.\n \"\"\"\n agenda = [problem]\n num_extensions = 0\n \n\n while agenda:\n prob = agenda.pop(0)\n num_extensions +=1\n if has_empty_domains(prob):\n continue\n if check_all_constraints(prob):\n if len(prob.unassigned_vars) > 0:\n unass_var = prob.pop_next_unassigned_var()\n values = prob.get_domain(unass_var)\n new_probs = []\n for val in values:\n new_prob = prob.copy()\n new_prob.set_assignment(unass_var,val)\n new_probs.append(new_prob)\n agenda = new_probs + agenda\n else:\n return(prob.assignments, num_extensions)\n \n return (None, num_extensions)\n \n\n# QUESTION 1: How many extensions does it take to solve the Pokemon problem\n# with DFS?\n\n# Hint: Use get_pokemon_problem() to get a new copy of the Pokemon problem\n# each time you want to solve it with a different search method.\n\nprint(solve_constraint_dfs(get_pokemon_problem())[1])\nANSWER_1 = solve_constraint_dfs(get_pokemon_problem())[1]\n\n\n#### Part 3: Forward Checking ##################################################\n\ndef eliminate_from_neighbors(csp, var) :\n \"\"\"\n Eliminates incompatible values from var's neighbors' domains, modifying\n the original csp. Returns an alphabetically sorted list of the neighboring\n variables whose domains were reduced, with each variable appearing at most\n once. If no domains were reduced, returns empty list.\n If a domain is reduced to size 0, quits immediately and returns None.\n \"\"\"\n def check_constraint(var1, var2, val1, val2):\n \"\"\"\n Checks whether there is a constraint violation between var1/val1 and var2/val2.\n Returns True if there isn't a violation or false if there is.\n \"\"\"\n constraints = csp.constraints\n for constraint in constraints:\n if (constraint.var1 == var1 or constraint.var1 == var2) and (constraint.var2 == var1 or constraint.var2 == var2):\n if not constraint.check(val1, val2):\n return False\n return True\n \n reduced_neighbors = []\n neighbors = csp.get_neighbors(var)\n unassig_var = csp.unassigned_vars\n values = csp.get_domain(var)\n remove_n_values = {}\n \n \n #Building dictionary of neighbors and values that conflict\n for n in neighbors:\n if n in unassig_var:\n n_values = csp.get_domain(n)\n remove_n_values[n] = []\n for n_val in n_values:\n constraint_check = []\n for val in values:\n if check_constraint(var,n,val,n_val):\n constraint_check.append(0)\n else:\n constraint_check.append(1)\n if sum(constraint_check) == len(constraint_check):\n remove_n_values[n].append(n_val)\n \n #Remove values from neighbors if they conflict. Check if domain is reduced to 0.\n for n in remove_n_values:\n for n_val in remove_n_values[n]:\n csp.eliminate(n,n_val)\n if len(csp.get_domain(n)) == 0:\n return None\n if len(remove_n_values[n]) > 0:\n reduced_neighbors.append(n)\n \n return reduced_neighbors\n\n\n# Because names give us power over things (you're free to use this alias)\nforward_check = eliminate_from_neighbors\n\ndef solve_constraint_forward_checking(problem) :\n \"\"\"\n Solves the problem using depth-first search with forward checking.\n Same return type as solve_constraint_dfs.\n \"\"\"\n agenda = [problem]\n num_extensions = 0\n \n\n while agenda:\n prob = agenda.pop(0)\n num_extensions +=1\n if has_empty_domains(prob):\n continue\n if check_all_constraints(prob):\n if len(prob.unassigned_vars) > 0:\n unass_var = prob.pop_next_unassigned_var()\n values = prob.get_domain(unass_var)\n new_probs = []\n for val in values:\n new_prob = prob.copy()\n new_prob.set_assignment(unass_var,val)\n new_probs.append(new_prob)\n for new_p in new_probs:\n eliminate_from_neighbors(new_p,unass_var)\n agenda = new_probs + agenda\n else:\n return(prob.assignments, num_extensions)\n \n return (None, num_extensions)\n\n\n# QUESTION 2: How many extensions does it take to solve the Pokemon problem\n# with DFS and forward checking?\n\nprint(solve_constraint_forward_checking(get_pokemon_problem())[1])\nANSWER_2 = solve_constraint_forward_checking(get_pokemon_problem())[1]\n\n\n#### Part 4: Domain Reduction ##################################################\n\ndef domain_reduction(csp, queue=None) :\n \"\"\"\n Uses constraints to reduce domains, propagating the domain reduction\n to all neighbors whose domains are reduced during the process.\n If queue is None, initializes propagation queue by adding all variables in\n their default order. \n Returns a list of all variables that were dequeued, in the order they\n were removed from the queue. Variables may appear in the list multiple times.\n If a domain is reduced to size 0, quits immediately and returns None.\n This function modifies the original csp.\n \"\"\"\n if queue == None:\n queue = csp.get_all_variables()\n \n dequeued = []\n \n while queue:\n var = queue.pop(0)\n dequeued.append(var)\n reduced_neighbors = eliminate_from_neighbors(csp,var)\n if reduced_neighbors == None:\n return None\n for n in reduced_neighbors:\n if n not in queue:\n queue.append(n)\n return dequeued \n\n\n\n# QUESTION 3: How many extensions does it take to solve the Pokemon problem\n# with DFS (no forward checking) if you do domain reduction before solving it?\n\nx = get_pokemon_problem()\ndomain_reduction(x)\nprint(solve_constraint_dfs(x)[1])\nANSWER_3 = 6\n\n\ndef solve_constraint_propagate_reduced_domains(problem) :\n \"\"\"\n Solves the problem using depth-first search with forward checking and\n propagation through all reduced domains. Same return type as\n solve_constraint_dfs.\n \"\"\"\n agenda = [problem]\n num_extensions = 0\n \n\n while agenda:\n prob = agenda.pop(0)\n num_extensions +=1\n if has_empty_domains(prob):\n continue\n if check_all_constraints(prob):\n if len(prob.unassigned_vars) > 0:\n unass_var = prob.pop_next_unassigned_var()\n values = prob.get_domain(unass_var)\n new_probs = []\n for val in values:\n new_prob = prob.copy()\n new_prob.set_assignment(unass_var,val)\n new_probs.append(new_prob)\n for new_p in new_probs:\n queue = eliminate_from_neighbors(new_p,unass_var)\n domain_reduction(new_p,queue)\n agenda = new_probs + agenda\n else:\n return(prob.assignments, num_extensions)\n \n return (None, num_extensions)\n\n\n\n# QUESTION 4: How many extensions does it take to solve the Pokemon problem\n# with forward checking and propagation through reduced domains?\nprint(solve_constraint_propagate_reduced_domains(get_pokemon_problem())[1])\nANSWER_4 = solve_constraint_propagate_reduced_domains(get_pokemon_problem())[1]\n\n\n#### Part 5A: Generic Domain Reduction #########################################\n\ndef propagate(enqueue_condition_fn, csp, queue=None) :\n \"\"\"\n Uses constraints to reduce domains, modifying the original csp.\n Uses enqueue_condition_fn to determine whether to enqueue a variable whose\n domain has been reduced. Same return type as domain_reduction.\n \"\"\"\n if queue == None:\n queue = csp.get_all_variables()\n \n dequeued = []\n while queue:\n var = queue.pop(0)\n dequeued.append(var)\n reduced_neighbors = eliminate_from_neighbors(csp,var)\n if reduced_neighbors == None:\n return None\n for n in reduced_neighbors:\n if enqueue_condition_fn(csp, n):\n if n not in queue:\n queue.append(n)\n return dequeued \n\ndef condition_domain_reduction(csp, var) :\n \"\"\"Returns True if var should be enqueued under the all-reduced-domains\n condition, otherwise False\"\"\"\n return True\n\n\ndef condition_singleton(csp, var) :\n \"\"\"Returns True if var should be enqueued under the singleton-domains\n condition, otherwise False\"\"\"\n domain = csp.get_domain(var)\n if len(domain) == 1:\n return True\n return False\n\n\ndef condition_forward_checking(csp, var) :\n \"\"\"Returns True if var should be enqueued under the forward-checking\n condition, otherwise False\"\"\"\n return False\n\n\n\n#### Part 5B: Generic Constraint Solver ########################################\n\ndef solve_constraint_generic(problem, enqueue_condition=None) :\n \"\"\"\n Solves the problem, calling propagate with the specified enqueue\n condition (a function). If enqueue_condition is None, uses DFS only.\n Same return type as solve_constraint_dfs.\n \"\"\"\n agenda = [problem]\n num_extensions = 0\n \n\n while agenda:\n prob = agenda.pop(0)\n num_extensions +=1\n if has_empty_domains(prob):\n continue\n if check_all_constraints(prob):\n if len(prob.unassigned_vars) > 0:\n unass_var = prob.pop_next_unassigned_var()\n values = prob.get_domain(unass_var)\n new_probs = []\n for val in values:\n new_prob = prob.copy()\n new_prob.set_assignment(unass_var,val)\n new_probs.append(new_prob)\n if enqueue_condition != None:\n for new_p in new_probs:\n propagate(enqueue_condition, new_p, [unass_var])\n agenda = new_probs + agenda\n else:\n return(prob.assignments, num_extensions)\n \n return (None, num_extensions)\n\n\n# QUESTION 5: How many extensions does it take to solve the Pokemon problem\n# with forward checking and propagation through singleton domains? (Don't\n# use domain reduction before solving it.)\nprint(solve_constraint_generic(get_pokemon_problem(),condition_singleton)[1])\nANSWER_5 = solve_constraint_generic(get_pokemon_problem(),condition_singleton)[1]\n\n\n#### Part 6: Defining Custom Constraints #######################################\n\ndef constraint_adjacent(m, n) :\n \"\"\"Returns True if m and n are adjacent, otherwise False.\n Assume m and n are ints.\"\"\"\n if abs(m-n) == 1:\n return True\n else:\n return False\n \n raise NotImplementedError\n\ndef constraint_not_adjacent(m, n) :\n \"\"\"Returns True if m and n are NOT adjacent, otherwise False.\n Assume m and n are ints.\"\"\"\n return not constraint_adjacent(m,n)\n\n\ndef all_different(variables) :\n \"\"\"Returns a list of constraints, with one difference constraint between\n each pair of variables.\"\"\"\n list_constraints = []\n variable_pairs = []\n \n \n for var1 in variables:\n for var2 in variables:\n constraint_need = True\n for var_pair in variable_pairs:\n if var1 in var_pair and var2 in var_pair:\n constraint_need = False\n if var1 != var2 and constraint_need:\n list_constraints.append(Constraint(var1,var2,constraint_different))\n variable_pairs.append([var1,var2])\n return list_constraints\n\n\n#### SURVEY ####################################################################\n\nNAME = 'Luke Chiang'\nCOLLABORATORS = 'None'\nHOW_MANY_HOURS_THIS_LAB_TOOK = 3\nWHAT_I_FOUND_INTERESTING = None\nWHAT_I_FOUND_BORING = None\nSUGGESTIONS = None\n","sub_path":"Lab 3/lab3.py","file_name":"lab3.py","file_ext":"py","file_size_in_byte":13327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"126655593","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n\tOpenname-resolver\n\t~~~~~\n\n\t:copyright: (c) 2014 by Openname.org\n\t:license: MIT, see LICENSE for more details.\n\"\"\"\n\nfrom coinrpc import namecoind \nfrom server.config import DEFAULT_HOST, MEMCACHED_TIMEOUT, MEMCACHED_PORT\n\nimport pylibmc\nmc = pylibmc.Client([DEFAULT_HOST + ':' + MEMCACHED_PORT],binary=True)\n\nfrom commontools import log \n\n#-----------------------------------\ndef warmup_cache(regrex,check_blocks=0):\n\n\tlog.debug(\"processing namespace %s\",regrex)\n\n\treply = namecoind.name_filter(regrex,check_blocks)\n\n\tcounter = 0 \n\tfor i in reply: \n\n\t\ttry:\n\t\t\t#set to no expiry i.e., 0\n\t\t\tmc.set(\"name_\" + str(i['name']),i['value'],0)\n\t\t\tlog.debug(\"inserting %s in cache\",i['name'])\n\t\t\tcounter += 1\n\t\texcept:\n\t\t\tlog.debug(\"not putting %s in cache\",i['name'])\n\t\n\tlog.debug(\"inserted %s entries in cache\",counter)\n\tlog.debug('-'*5)\n\n#-----------------------------------\nif __name__ == '__main__':\n\n\twarmup_cache('u/')\n\twarmup_cache('i/')","sub_path":"tools/warmup_cache.py","file_name":"warmup_cache.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"649919782","text":"\"\"\"\n314. Binary Tree Vertical Order Traversal\nGiven a binary tree, return the vertical order traversal of its nodes' values. (ie, from top to bottom, column by column).\n\nIf two nodes are in the same row and column, the order should be from left to right.\n\nExamples 1:\n\nInput: [3,9,20,null,null,15,7]\n\n 3\n /\\\n / \\\n 9 20\n /\\\n / \\\n 15 7 \n\nOutput:\n\n[\n [9],\n [3,15],\n [20],\n [7]\n]\n\"\"\"\n# Runtime: 20 ms, faster than 99.71% of Python3 online submissions for Binary Tree Vertical Order Traversal.\n# Memory Usage: 12.8 MB, less than 100.00% of Python3 online submissions for Binary Tree Vertical Order Traversal.\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def verticalOrder(self, root: TreeNode) -> List[List[int]]:\n # each node assign a location (depth, x)\n if root == None:\n return []\n stack = [[root, 1, 0]]\n res_dict = collections.defaultdict(list)\n res_dict[0].append(root.val)\n while stack:\n node, depth, idx = stack.pop(0)\n if node.left:\n stack.append([node.left, depth+1, idx-1])\n res_dict[idx-1].append(node.left.val)\n if node.right:\n stack.append([node.right, depth+1, idx+1])\n res_dict[idx+1].append(node.right.val)\n res = []\n for key in sorted(res_dict.keys()):\n res.append(res_dict[key])\n return res\n ","sub_path":"Widen/LC314_Binary_Tree_Vertical_Order_Traversal.py","file_name":"LC314_Binary_Tree_Vertical_Order_Traversal.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"427038738","text":"from .common import db, Field\nfrom pydal.validators import *\nfrom py4web.utils.populate import populate\n\n#\n# py4web app, AI-biorex ported 21.10.2020 16:37:42\n#\n\n#import pydal\n\n#from py4web import *\n#from apps.myapp.models import db\n\n#if not len( db().select(db.auth_user.id) ):\nif not db(db.auth_user).count():\n body = {\n \"username\": \"nil\",\n \"email\": \"nil@nil.com\",\n \"password\": str(CRYPT()(\"xyz12345\")[0]),\n #\"password\": str(pydal.validators.CRYPT()(\"xyz12345\")[0]),\n \"first_name\": \"MainUser\",\n \"last_name\": \"MainUserLast\",\n }\n db.auth_user.insert(**body)\n db.commit()\n\n\ndb.define_table(\n 'test_table',\n Field( 'f0', 'string', label='l0'),\n Field( 'f1', 'string', label='l1'),\n Field( 'f2', 'string', label='l2'),\n )\n\nif not db(db.test_table).count():\n populate(db.test_table, n=10)\n\n\ndb.define_table(\n 'dfindex0',\n Field('f0','string'),\n )\n\ndb.define_table(\n 'dflogin0',\n Field('f0','string'),\n Field('f1','string'),\n Field('f2','boolean'),\n )\n\ndb.define_table(\n 'dfcharts0',\n Field('f0','string'),\n )\n\ndb.define_table(\n 'dftables0',\n Field('f0','string'),\n )\n\ndb.define_table(\n 'dfforms0',\n Field('f0','string'),\n )\n\ndb.define_table(\n 'dfforms1',\n Field('f0','string'),\n Field('f1','string'),\n )\n\ndb.define_table(\n 'dfforms2',\n Field('f0','string'),\n Field('f1','string'),\n )\n\ndb.define_table(\n 'dfforms3',\n Field('f0','string'),\n Field('f1','string'),\n )\n\ndb.define_table(\n 'dfforms4',\n Field('f0','string'),\n Field('f1','string'),\n )\n\ndb.define_table(\n 'dfforms5',\n Field('f0','string'),\n Field('f1','string'),\n Field('f2','string'),\n Field('f3','string'),\n Field('f4','string'),\n Field('f5','string'),\n Field('f6','string'),\n Field('f7','boolean'),\n Field('f8','string'),\n Field('f9','string'),\n Field('f10','boolean'),\n Field('f11','boolean'),\n Field('f12','boolean'),\n Field('f13','boolean'),\n Field('f14','string'),\n Field('f15','string'),\n Field('f16','string'),\n Field('f17','string'),\n Field('f18','string'),\n Field('f19','string'),\n Field('f20','string'),\n Field('f21','string'),\n Field('f22','string'),\n Field('f23','string'),\n Field('f24','string'),\n Field('f25','string'),\n Field('f26','string'),\n Field('f27','string'),\n Field('f28','string'),\n Field('f29','boolean'),\n Field('f30','string'),\n Field('f31','string'),\n Field('f32','string'),\n Field('f33','string'),\n Field('f34','string'),\n Field('f35','string'),\n )\n\ndb.define_table(\n 'ttables0',\n Field('f0','string'),\n Field('f1','string'),\n Field('f2','string'),\n Field('f3','string'),\n )\n\ndb.define_table(\n 'ttables1',\n Field('f0','string'),\n Field('f1','string'),\n Field('f2','string'),\n Field('f3','string'),\n )\n\ndb.define_table(\n 'ttables2',\n Field('f0','string'),\n Field('f1','string'),\n Field('f2','string'),\n Field('f3','string'),\n )\n\ndb.define_table(\n 'ttables3',\n Field('f0','string'),\n Field('f1','string'),\n Field('f2','string'),\n Field('f3','string'),\n )\n\nif not db(db.ttables0).count():\n db.ttables0.insert(f0=\"p4w:#\", f1=\"p4w:First Name\", f2=\"p4w:Last Name\", f3=\"p4w:Username\")\n db.ttables0.insert(f0=\"p4w:1\", f1=\"p4w:Mark\", f2=\"p4w:Otto\", f3=\"p4w:@mdo\")\n db.ttables0.insert(f0=\"p4w:2\", f1=\"p4w:Jacob\", f2=\"p4w:Thornton\", f3=\"p4w:@fat\")\n db.ttables0.insert(f0=\"p4w:3\", f1=\"p4w:Larry\", f2=\"p4w:the Bird\", f3=\"p4w:@twitter\")\n db.commit()\n\nif not db(db.ttables1).count():\n db.ttables1.insert(f0=\"p4w:#\", f1=\"p4w:First Name\", f2=\"p4w:Last Name\", f3=\"p4w:Username\")\n db.ttables1.insert(f0=\"p4w:1\", f1=\"p4w:Mark\", f2=\"p4w:Otto\", f3=\"p4w:@mdo\")\n db.ttables1.insert(f0=\"p4w:2\", f1=\"p4w:Jacob\", f2=\"p4w:Thornton\", f3=\"p4w:@fat\")\n db.ttables1.insert(f0=\"p4w:3\", f1=\"p4w:Larry\", f2=\"p4w:the Bird\", f3=\"p4w:@twitter\")\n db.commit()\n\nif not db(db.ttables2).count():\n db.ttables2.insert(f0=\"p4w:#\", f1=\"p4w:First Name\", f2=\"p4w:Last Name\", f3=\"p4w:Username\")\n db.ttables2.insert(f0=\"p4w:1\", f1=\"p4w:Mark\", f2=\"p4w:Otto\", f3=\"p4w:@mdo\")\n db.ttables2.insert(f0=\"p4w:2\", f1=\"p4w:Jacob\", f2=\"p4w:Thornton\", f3=\"p4w:@fat\")\n db.ttables2.insert(f0=\"p4w:3\", f1=\"p4w:Larry\", f2=\"p4w:the Bird\", f3=\"p4w:@twitter\")\n db.ttables2.insert(f0=\"p4w:3\", f1=\"p4w:Sam\", f2=\"p4w:Nevoresky\", f3=\"p4w:@facebook\")\n db.commit()\n\nif not db(db.ttables3).count():\n db.ttables3.insert(f0=\"p4w:#\", f1=\"p4w:First Name\", f2=\"p4w:Last Name\", f3=\"p4w:Username\")\n db.ttables3.insert(f0=\"p4w:1\", f1=\"p4w:Mark\", f2=\"p4w:Otto\", f3=\"p4w:@mdo\")\n db.ttables3.insert(f0=\"p4w:2\", f1=\"p4w:Jacob\", f2=\"p4w:Thornton\", f3=\"p4w:@fat\")\n db.ttables3.insert(f0=\"p4w:3\", f1=\"p4w:Larry\", f2=\"p4w:the Bird\", f3=\"p4w:@twitter\")\n db.ttables3.insert(f0=\"p4w:4\", f1=\"p4w:Mark\", f2=\"p4w:Otto\", f3=\"p4w:@mdo\")\n db.ttables3.insert(f0=\"p4w:5\", f1=\"p4w:Jacob\", f2=\"p4w:Thornton\", f3=\"p4w:@fat\")\n db.ttables3.insert(f0=\"p4w:6\", f1=\"p4w:Larry\", f2=\"p4w:the Bird\", f3=\"p4w:@twitter\")\n db.commit()\n","sub_path":"bubbly/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"228070826","text":"\"\"\"mysite URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.conf import settings # add static files\nfrom django.conf.urls.static import static # addstatic files\nfrom engine.views import (base, high_rate, top_rate, movie_also_like,\n movie_also_dislike, movie_rcmd_exist_user, movie_rcmd_to_user, movie_detail, add_rating)\nfrom django.contrib.auth.views import login, logout\n\nurlpatterns = [\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', base),\n url(r'^high_rate$', high_rate, name = 'high_rate'), #平均最高分\n url(r'^top_rate$', top_rate, name = 'top_rate'), #最多人評分\n url(r'^movie_also_like$', movie_also_like), #同時也喜歡\n url(r'^movie_also_dislike$', movie_also_dislike), #同時也不喜歡\n url(r'^movie_rcmd_exist_user$', movie_rcmd_exist_user, name ='movie_rcmd_exist_user'), #推薦給現有使用者\n url(r'^movie_rcmd_to_user$', movie_rcmd_to_user, name = 'movie_rcmd_to_user'),\n url(r'^login/$', login, name='login'),\n url(r'^logout/$', logout, name='logout'),\n url(r'^accounts/', include('registration.backends.simple.urls')),\n url(r'^movie/(?P[0-9]+)/$', movie_detail, name='movie_detail'),\n url(r'^movie/(?P[0-9]+)/add_rating/$', add_rating, name='add_rating'),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n","sub_path":"mysite/mysite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"215396954","text":"# coding=utf-8\n# Copyright (c) Microsoft. All rights reserved.\nimport os\nimport torch\nimport torch.nn as nn\nfrom pretrained_models import MODEL_CLASSES\nfrom transformers import BertConfig\nfrom transformers import BertModel\n\nfrom module.dropout_wrapper import DropoutWrapper\nfrom module.san import SANClassifier, MaskLmHeader\nfrom module.san_model import SanModel\nfrom data_utils.task_def import EncoderModelType, TaskType, AdditionalFeatures\nimport tasks\nfrom experiments.exp_def import TaskDef\nfrom extensions.hooks import gradient_reversal_hook, MyHook\n\nclass LinearPooler(nn.Module):\n def __init__(self, hidden_size):\n super(LinearPooler, self).__init__()\n self.dense = nn.Linear(hidden_size, hidden_size)\n self.activation = nn.Tanh()\n\n def forward(self, hidden_states):\n first_token_tensor = hidden_states[:, 0]\n pooled_output = self.dense(first_token_tensor)\n pooled_output = self.activation(pooled_output)\n return pooled_output\n\ndef generate_decoder_opt(enable_san, max_opt):\n opt_v = 0\n if enable_san and max_opt < 3:\n opt_v = max_opt\n return opt_v\n\nclass SANBertNetwork(nn.Module):\n def __init__(self, opt, bert_config=None, initial_from_local=False):\n super(SANBertNetwork, self).__init__()\n self.dropout_list = nn.ModuleList()\n\n self.forward_hooks = []\n self.backward_hooks = []\n self.scoring_forward_hooks = {}\n self.scoring_backward_hooks = {}\n\n if opt['encoder_type'] not in EncoderModelType._value2member_map_:\n raise ValueError(\"encoder_type is out of pre-defined types\")\n self.encoder_type = opt['encoder_type']\n self.preloaded_config = None\n\n literal_encoder_type = EncoderModelType(self.encoder_type).name.lower()\n config_class, model_class, tokenizer_class = MODEL_CLASSES[literal_encoder_type]\n\n self.preloaded_config = config_class.from_dict(opt) # load config from opt\n self.bert = model_class(self.preloaded_config)\n hidden_size = self.bert.config.hidden_size\n\n if opt.get('dump_feature', False):\n self.opt = opt\n return\n if opt['update_bert_opt'] > 0:\n for p in self.bert.parameters():\n p.requires_grad = False\n\n task_def_list = opt['task_def_list']\n self.task_def_list = task_def_list\n self.decoder_opt = []\n self.task_types = []\n for task_id, task_def in enumerate(task_def_list):\n self.decoder_opt.append(generate_decoder_opt(task_def.enable_san, opt['answer_opt']))\n self.task_types.append(task_def.task_type)\n\n # create output header\n self.scoring_list = nn.ModuleList()\n self.dropout_list = nn.ModuleList()\n\n # create lists for additional inputs embeddings\n self.additional_input_features_list = nn.ModuleList()\n\n for task_id in range(len(task_def_list)):\n task_def: TaskDef = task_def_list[task_id]\n lab = task_def.n_class\n decoder_opt = self.decoder_opt[task_id]\n task_type = self.task_types[task_id]\n task_dropout_p = opt['dropout_p'] if task_def.dropout_p is None else task_def.dropout_p\n dropout = DropoutWrapper(task_dropout_p, opt['vb_dropout'])\n self.dropout_list.append(dropout)\n task_obj = tasks.get_task_obj(task_def)\n print('{}: {}'.format(task_id, task_obj))\n\n ################################################################\n ######## Add additional layers used during encoding ############\n ################################################################\n\n if task_def.additional_features is not None:\n for feature_name in task_def.additional_features:\n if feature_name == AdditionalFeatures.cue_indicator:\n embeds = nn.Embedding(2, hidden_size)\n self.additional_input_features_list.append(embeds)\n elif feature_name == AdditionalFeatures.scope_indicator:\n embeds = nn.Embedding(2, hidden_size)\n self.additional_input_features_list.append(embeds)\n else: self.additional_input_features_list.append(None)\n\n if task_obj is not None:\n print('Task obj for {} not None'.format(task_id))\n print('Setting out proj as with dec_opt {}, hid {}, lab {}'.format(decoder_opt,hidden_size,lab))\n out_proj = task_obj.train_build_task_layer(decoder_opt, hidden_size, lab, opt, prefix='answer', dropout=dropout)\n elif task_type == TaskType.Span:\n assert decoder_opt != 1\n out_proj = nn.Linear(hidden_size, 2)\n elif task_type == TaskType.SeqenceLabeling:\n out_proj = nn.Linear(hidden_size, lab)\n elif task_type == TaskType.MaskLM:\n if opt['encoder_type'] == EncoderModelType.ROBERTA:\n # TODO: xiaodl\n out_proj = MaskLmHeader(bert_model_type=opt['bert_model_type'], config=self.bert.config, embedding_weights=self.bert.embeddings.word_embeddings.weight)\n else:\n out_proj = MaskLmHeader(bert_model_type=opt['bert_model_type'], config=self.bert.config, embedding_weights=self.bert.embeddings.word_embeddings.weight)\n else:\n if decoder_opt == 1:\n out_proj = SANClassifier(hidden_size, hidden_size, lab, opt, prefix='answer', dropout=dropout)\n else:\n out_proj = nn.Linear(hidden_size, lab)\n if task_type == TaskType.Adversarial:\n # register the hook on the ouput layer\n out_proj.register_backward_hook(gradient_reversal_hook)\n # register hooks for checking gradients\n self.scoring_list.append(out_proj)\n self.scoring_forward_hooks[task_id] = (MyHook(out_proj))\n self.scoring_backward_hooks[task_id] = (MyHook(out_proj, backward=True))\n\n\n\n self.opt = opt\n self._my_init()\n\n # if not loading from local, loading model weights from pre-trained model, after initialization\n if not initial_from_local:\n config_class, model_class, tokenizer_class = MODEL_CLASSES[literal_encoder_type]\n #self.bert = model_class.from_pretrained(opt['init_checkpoint'],config=self.preloaded_config)\n self.bert = BertModel(self.preloaded_config)\n\n #register hooks\n for module in list(self.bert._modules.items()):\n self.forward_hooks.append((MyHook(module[1])))\n self.backward_hooks.append(MyHook(module[1], backward=True))\n\n\n def _my_init(self):\n def init_weights(module):\n if isinstance(module, (nn.Linear, nn.Embedding)):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=0.02 * self.opt['init_ratio'])\n if isinstance(module, nn.Linear):\n if module.bias is not None:\n module.bias.data.zero_()\n\n self.apply(init_weights)\n\n def encode(self, task_id, input_ids, token_type_ids, attention_mask, additional_features):\n if len(additional_features) == 0:\n outputs = self.bert(input_ids=input_ids, token_type_ids=token_type_ids,\n attention_mask=attention_mask)\n else:\n # get embeddings\n\n input_embeddings = self.embed_inputs_with_additional_features(task_id=task_id, input_ids=input_ids, position_ids=None, token_type_ids=token_type_ids, additional_feature_idxs=additional_features)\n outputs = self.bert(input_ids=None, token_type_ids=token_type_ids, inputs_embeds=input_embeddings,\n attention_mask=attention_mask)\n # input into bert\n sequence_output = outputs[0]\n pooled_output = outputs[1]\n return sequence_output, pooled_output\n\n def embed_inputs_with_additional_features(self, task_id, input_ids, position_ids, token_type_ids, additional_feature_idxs):\n # compute bert embeddings with adding embeddings for additional features before the Layernorm of the embedding layer\n # this is copied and modified from the Bert source code\n if input_ids is not None:\n input_shape = input_ids.size()\n else:\n input_shape = self.bert.embeddings.inputs_embeds.size()[:-1]\n seq_length = input_shape[1]\n device = input_ids.device if input_ids is not None else self.bert.embeddings.inputs_embeds.device\n if position_ids is None:\n position_ids = torch.arange(seq_length, dtype=torch.long, device=device)\n position_ids = position_ids.unsqueeze(0).expand(input_shape)\n if token_type_ids is None:\n token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)\n\n\n inputs_embeds = self.bert.embeddings.word_embeddings(input_ids)\n position_embeddings = self.bert.embeddings.position_embeddings(position_ids)\n token_type_embeddings = self.bert.embeddings.token_type_embeddings(token_type_ids)\n additional_feature_embeds = self.additional_input_features_list[task_id](additional_feature_idxs)\n embeddings = inputs_embeds + position_embeddings + token_type_embeddings + additional_feature_embeds\n\n embeddings = self.bert.embeddings.LayerNorm(embeddings)\n embeddings = self.bert.embeddings.dropout(embeddings)\n return embeddings\n\n\n def forward(self, input_ids, token_type_ids, attention_mask, premise_mask=None, hyp_mask=None, task_id=0, additional_features=[]):\n sequence_output, pooled_output = self.encode(task_id, input_ids, token_type_ids, attention_mask, additional_features=additional_features)\n\n decoder_opt = self.decoder_opt[task_id]\n task_type = self.task_types[task_id]\n task_obj = tasks.get_task_obj(self.task_def_list[task_id])\n if task_obj is not None:\n logits = task_obj.train_forward(sequence_output, pooled_output, premise_mask, hyp_mask, decoder_opt, self.dropout_list[task_id], self.scoring_list[task_id])\n return logits\n elif task_type == TaskType.Span:\n assert decoder_opt != 1\n sequence_output = self.dropout_list[task_id](sequence_output)\n logits = self.scoring_list[task_id](sequence_output)\n start_scores, end_scores = logits.split(1, dim=-1)\n start_scores = start_scores.squeeze(-1)\n end_scores = end_scores.squeeze(-1)\n return start_scores, end_scores\n elif task_type == TaskType.SeqenceLabeling:\n\n pooled_output = sequence_output\n pooled_output = self.dropout_list[task_id](pooled_output)\n pooled_output = pooled_output.contiguous().view(-1, pooled_output.size(2))\n logits = self.scoring_list[task_id](pooled_output)\n return logits\n elif task_type == TaskType.MaskLM:\n sequence_output = self.dropout_list[task_id](sequence_output)\n logits = self.scoring_list[task_id](sequence_output)\n return logits\n else:\n if decoder_opt == 1:\n max_query = hyp_mask.size(1)\n assert max_query > 0\n assert premise_mask is not None\n assert hyp_mask is not None\n hyp_mem = sequence_output[:, :max_query, :]\n logits = self.scoring_list[task_id](sequence_output, hyp_mem, premise_mask, hyp_mask)\n else:\n pooled_output = self.dropout_list[task_id](pooled_output)\n logits = self.scoring_list[task_id](pooled_output)\n return logits\n","sub_path":"mt_dnn/matcher.py","file_name":"matcher.py","file_ext":"py","file_size_in_byte":11974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"650519976","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 21 19:45:15 2017\n\n@author: Greg\n\"\"\"\nimport random\nS = None # Send\nR = None # Receiver\nsuccess = 0 # number of success\nN = 100000 # sample size / omega\nR_count = 0\n\n# variables given\np_zero = 0.4\nepsilon_zero = 0.02\nepsilon_one = 0.03\n\nfor i in range(0, N):\n # m, t random number generators from 0.0-1.0\n m = random.random()\n t = random.random()\n \n # condition for S\n if(m <= p_zero):\n S = 0\n else:\n S = 1\n \n # condition for R\n if(S == 0 and t <= epsilon_zero or S == 1 and t > epsilon_one):\n R = 1\n R_count += 1\n elif(S == 0 and t > epsilon_zero or S == 1 and t <= epsilon_one):\n R = 0\n else:\n print(\"error\")\n \n if(S == 1 and R == 1):\n success += 1\n#end of for\n\n#calculations\nprob_S_intersect_R_when_received_correctly = success/N\nprob_R_is_one = R_count/N\nprob_S_equal_one_given_that_R_is_one = prob_S_intersect_R_when_received_correctly / prob_R_is_one\n\nprint(\"P(S=1|R=1): \" + str(prob_S_equal_one_given_that_R_is_one))\n\n# Printed answer\n#P(S=1|R=1): 0.9862914008157463","sub_path":"Fall2017/EE380/Labs/Lab2/three_conditional_probability.py","file_name":"three_conditional_probability.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"360028820","text":"# Copyright (c) 2014 Ian C. Good\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport asyncio\n\nfrom pymap.interfaces import (SessionInterface, MailboxInterface,\n MessageInterface)\nfrom pymap.exceptions import * # NOQA\n\n\n__all__ = ['init']\n\n\nclass Session(SessionInterface):\n\n _mailboxes = ['INBOX', 'Sent', 'Drafts', 'Trash']\n _subscribed = _mailboxes[:]\n\n @asyncio.coroutine\n def list_mailboxes(self, subscribed=False):\n if subscribed:\n return [Mailbox(name) for name in self._mailboxes\n if name in self._subscribed]\n else:\n return [Mailbox(name) for name in self._mailboxes]\n\n @asyncio.coroutine\n def get_mailbox(self, name):\n if name not in self._mailboxes:\n raise MailboxNotFound(name)\n return Mailbox(name)\n\n @asyncio.coroutine\n def create_mailbox(self, name):\n if name in self._mailboxes:\n raise MailboxConflict(name)\n self._mailboxes.append(name)\n\n @asyncio.coroutine\n def delete_mailbox(self, name):\n if name not in self._mailboxes:\n raise MailboxNotFound(name)\n self._mailboxes.remove(name)\n\n @asyncio.coroutine\n def rename_mailbox(self, before, after):\n if before not in self._mailboxes:\n raise MailboxNotFound(before)\n if after in self._mailboxes:\n raise MailboxConflict(after)\n idx = self._mailboxes.index(before)\n self._mailboxes[idx] = after\n\n @asyncio.coroutine\n def subscribe(self, name):\n if name not in self._mailboxes:\n raise MailboxNotFound(name)\n if name not in self._subscribed:\n self._subscribed.append(name)\n\n @asyncio.coroutine\n def unsubscribe(self, name):\n if name in self._subscribed:\n self._subscribed.remove(name)\n\n\nclass Mailbox(MailboxInterface):\n\n sep = b'.'\n marked = False\n\n\nclass Message(MessageInterface):\n\n pass\n\n\n@asyncio.coroutine\ndef login(result):\n if result.authcid == 'testuser' and result.check_secret('testpass'):\n return Session('testuser')\n\n\ndef init():\n return login\n","sub_path":"pymap/httpmail/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"599040850","text":"#!/usr/bin/python\n\nimport math\n\ndef get_error(errors, index):\n for x in errors:\n if x['index'] == index:\n return x['error']\n return 0.0\n\ndef outlierCleaner(predictions, ages, net_worths):\n \"\"\"\n Clean away the 10% of points that have the largest\n residual errors (difference between the prediction\n and the actual net worth).\n\n Return a list of tuples named cleaned_data where \n each tuple is of the form (age, net_worth, error).\n \"\"\"\n \n cleaned_data = []\n residual_errors = []\n size = len(predictions)\n to_remove = int(size * 0.1)\n\n for i in range(size):\n prediction = predictions[i]\n net_worth = net_worths[i]\n residual_errors.append({'index': i, 'error': math.fabs(prediction - net_worth)})\n \n ordered_residual_errors = sorted(residual_errors, key=lambda k: k['error'], reverse=True)[0:to_remove]\n ignored_points = [x['index'] for x in ordered_residual_errors]\n\n for i in range(size):\n if i in ignored_points:\n continue\n age = ages[i]\n net_worth = net_worths[i]\n residual_error = get_error(residual_errors, i)\n cleaned_data.append((age, net_worth, residual_error))\n \n ### your code goes here\n\n \n return cleaned_data\n\n","sub_path":"outliers/outlier_cleaner.py","file_name":"outlier_cleaner.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"249546180","text":"# coding=utf-8\nfrom __future__ import absolute_import\n\nimport octoprint.plugin\nimport datetime\nimport flask\nfrom flask import jsonify, request, make_response, Response, send_file\nimport json\n\nfrom octoprint_SpoolManager.models.SpoolModel import SpoolModel\nfrom octoprint_SpoolManager.common import StringUtils\nfrom octoprint_SpoolManager.api import Transformer\nfrom octoprint_SpoolManager.common.SettingsKeys import SettingsKeys\n\nclass SpoolManagerAPI(octoprint.plugin.BlueprintPlugin):\n\n\tdef _updateSpoolModelFromJSONData(self, spoolModel, jsonData):\n\n\t\tspoolModel.databaseId = self._getValueFromJSONOrNone(\"databaseId\", jsonData)\n\t\tspoolModel.isTemplate = self._getValueFromJSONOrNone(\"isTemplate\", jsonData)\n\t\tspoolModel.displayName = self._getValueFromJSONOrNone(\"displayName\", jsonData)\n\t\tspoolModel.vendor = self._getValueFromJSONOrNone(\"vendor\", jsonData)\n\t\tspoolModel.material = self._getValueFromJSONOrNone(\"material\", jsonData)\n\t\tspoolModel.density = self._getValueFromJSONOrNone(\"density\", jsonData)\n\t\tspoolModel.diameter = self._getValueFromJSONOrNone(\"diameter\", jsonData)\n\t\tspoolModel.color = self._getValueFromJSONOrNone(\"color\", jsonData)\n\t\tspoolModel.temperature = self._getValueFromJSONOrNone(\"temperature\", jsonData)\n\t\tspoolModel.totalWeight = self._getValueFromJSONOrNone(\"totalWeight\", jsonData)\n\t\tspoolModel.remainingWeight = self._getValueFromJSONOrNone(\"remainingWeight\", jsonData)\n\t\tspoolModel.usedLength = self._getValueFromJSONOrNone(\"usedLength\", jsonData)\n\t\tspoolModel.usedWeight = self._getValueFromJSONOrNone(\"usedWeight\", jsonData)\n\t\tspoolModel.code = self._getValueFromJSONOrNone(\"code\", jsonData)\n\n\t\tspoolModel.firstUse = StringUtils.transformToDateTimeOrNone(self._getValueFromJSONOrNone(\"firstUse\", jsonData))\n\t\tspoolModel.lastUse = StringUtils.transformToDateTimeOrNone(self._getValueFromJSONOrNone(\"lastUse\", jsonData))\n\t\tspoolModel.purchasedOn = StringUtils.transformToDateTimeOrNone(self._getValueFromJSONOrNone(\"purchasedOn\", jsonData))\n\n\t\tspoolModel.purchasedFrom = self._getValueFromJSONOrNone(\"purchasedFrom\", jsonData)\n\t\tspoolModel.cost = self._getValueFromJSONOrNone(\"cost\", jsonData)\n\t\tspoolModel.costUnit = self._getValueFromJSONOrNone(\"costUnit\", jsonData)\n\n\t\tspoolModel.labels = json.dumps(self._getValueFromJSONOrNone(\"labels\", jsonData))\n\n\t\tspoolModel.noteText = self._getValueFromJSONOrNone(\"noteText\", jsonData)\n\t\tspoolModel.noteDeltaFormat = json.dumps(self._getValueFromJSONOrNone(\"noteDeltaFormat\", jsonData))\n\t\tspoolModel.noteHtml = self._getValueFromJSONOrNone(\"noteHtml\", jsonData)\n\t\tpass\n\n\t# def _transformSpoolModelToDict(self, spoolModel):\n\t# \tspoolAsDict = spoolModel.__data__\n\t#\n\t# \tspoolAsDict[\"firstUse\"] = StringUtils.formatDateTime(spoolModel.firstUse)\n\t# \tspoolAsDict[\"lastUse\"] = StringUtils.formatDateTime(spoolModel.lastUse)\n\t# \tspoolAsDict[\"purchasedOn\"] = StringUtils.formatDateTime(spoolModel.purchasedOn)\n\t#\n\t#\n\t#\n\t# \t# Decimal and date time needs to be converted\n\t#\n\t# \t# spoolAsDict[\"temperature\"] = StringUtils.formatSave(\"{:.02f}\", spoolAsDict[\"temperature\"], \"\")\n\t# \t# spoolAsDict[\"weight\"] = StringUtils.formatSave(\"{:.02f}\", spoolAsDict[\"weight\"], \"\")\n\t# \t# spoolAsDict[\"remainingWeight\"] = StringUtils.formatSave(\"{:.02f}\", spoolAsDict[\"remainingWeight\"], \"\")\n\t# \t# spoolAsDict[\"usedLength\"] = StringUtils.formatSave(\"{:.02f}\", spoolAsDict[\"usedLength\"], \"\")\n\t# \t# spoolAsDict[\"usedLength\"] = StringUtils.formatSave(\"{:.02f}\", spoolAsDict[\"usedLength\"], \"\")\n\t#\n\t# \t# spoolAsDict[\"firstUse\"] = spoolModel.firstUse.strftime('%d.%m.%Y %H:%M')\n\t# \t# spoolAsDict[\"lastUse\"] = spoolModel.lastUse.strftime('%d.%m.%Y %H:%M')\n\t#\n\t# \t# spoolAsDict[\"firstUse\"] = self._formatDateOrNone( spoolModel.firstUse )\n\t# \t# spoolAsDict[\"lastUse\"] = self._formatDateOrNone( spoolModel.lastUse )\n\t#\n\t#\n\t# \treturn spoolAsDict\n\t#\n\t# def _transformAllSpoolModelsToDict(self, allSpoolModels):\n\t# \tresult = []\n\t# \tfor job in allSpoolModels:\n\t# \t\tspoolAsDict = self._transformSpoolModelToDict(job)\n\t# \t\tresult.append(spoolAsDict)\n\t# \treturn result\n\n\tdef _getValueFromJSONOrNone(self, key, json):\n\t\tif key in json:\n\t\t\treturn json[key]\n\t\treturn None\n\n\t# def _formatDateOrNone(self, dateValue):\n\t# \tif dateValue != None:\n\t# \t\treturn dateValue.strftime('%d.%m.%Y %H:%M')\n\t# \treturn None\n\t# def _formatDateOrNone(self, dateValue):\n\t# \tif dateValue != None:\n\t# \t\treturn datetime.strptime(str(dateValue), '%d.%m.%Y %H:%M')\n\t# \treturn None\n\n\tdef loadSelectedSpool(self):\n\t\tspoolModel = None\n\n\t\tdatabaseId = self._settings.get_int([SettingsKeys.SETTINGS_KEY_SELECTED_SPOOL_DATABASE_ID])\n\t\tif (databaseId != None):\n\t\t\tspoolModel = self._databaseManager.loadSpool(databaseId)\n\t\t\tif (spoolModel == None):\n\t\t\t\tself._logger.warning(\n\t\t\t\t\t\"Last selected Spool from plugin-settings not found in database. Maybe deleted in the meantime.\")\n\n\t\treturn spoolModel\n\n\n\tdef _createSampleSpoolModel(self):\n\t\t#DisplayName, Vendor, Material, Color[# code], Diameter [mm], Density [g/cm³], Temperature [°C], TotalWeight [g], UsedWeight [g], UsedLength [mm], FirstUse [dd.mm.yyyy hh:mm], LastUse [dd.mm.yyyy hh:mm], PurchasedFrom, PurchasedOn [dd.mm.yyyy hh:mm], Cost, CostUnit, Labels, NoteText\n\n\t\ts1 = SpoolModel()\n\t\ts1.displayName = \"Number #1\"\n\t\ts1.vendor = \"The Spool Company\"\n\t\ts1.material = \"PETG\"\n\t\ts1.color = \"#FF0000\"\n\t\ts1.diameter = 1.75\n\t\ts1.density = 1.27\n\t\treturn s1\n\n\n\t################################################### APIs\n\n\t############################################################################################## ALLOWED TO PRINT\n\t@octoprint.plugin.BlueprintPlugin.route(\"/allowedToPrint\", methods=[\"GET\"])\n\tdef allowed_to_print(self):\n\n\t\tcheckForSelectedSpool = self._settings.get_boolean([SettingsKeys.SETTINGS_KEY_WARN_IF_SPOOL_NOT_SELECTED])\n\t\tcheckForFilamentLength = self._settings.get_boolean([SettingsKeys.SETTINGS_KEY_WARN_IF_FILAMENT_NOT_ENOUGH])\n\t\treminderSelectingSpool = self._settings.get_boolean([SettingsKeys.SETTINGS_KEY_REMINDER_SELECTING_SPOOL])\n\n\t\tif (checkForFilamentLength == False and checkForSelectedSpool == False):\n\t\t\treturn flask.jsonify({\n\t\t\t\t\"result\": \"startPrint\"\n\t\t\t})\n\n\t\tspoolModel = self.loadSelectedSpool()\n\n\t\tif (checkForSelectedSpool == True and spoolModel == None):\n\t\t\treturn flask.jsonify({\n\t\t\t\t\"result\": \"noSpoolSelected\",\n\t\t\t})\n\n\t\tif (checkForFilamentLength == True and spoolModel != None):\n\t\t\t# # check if loaded\n\t\t\t# if (spoolModel == None):\n\t\t\t# \treturn flask.jsonify({\n\t\t\t# \t\t\"result\": \"noSpoolForUsageCheck\",\n\t\t\t# \t})\n\t\t\t# else:\n\t\t\tresult = self.checkRemainingFilament();\n\t\t\tif (result == False):\n\t\t\t\treturn flask.jsonify({\n\t\t\t\t\t\"result\": \"filamentNotEnough\",\n\t\t\t\t})\n\n\t\tif (reminderSelectingSpool == True and spoolModel != None):\n\t\t\treturn flask.jsonify({\n\t\t\t\t\"result\": \"reminderSpoolSelection\",\n\t\t\t\t\"spoolName\": spoolModel.displayName\n\t\t\t})\n\n\t\treturn flask.jsonify({\n\t\t\t\"result\": \"startPrint\"\n\t\t})\n\n\t##################################################################################################### SELECT SPOOL\n\t@octoprint.plugin.BlueprintPlugin.route(\"/selectSpool\", methods=[\"PUT\"])\n\tdef select_spool(self):\n\t\tself._logger.info(\"API Store selected spool\")\n\t\tjsonData = request.json\n\n\t\tdatabaseId = self._getValueFromJSONOrNone(\"databaseId\", jsonData)\n\t\tif (databaseId != None):\n\t\t\tspoolModel = self._databaseManager.loadSpool(databaseId)\n\t\t\t# check if loaded\n\t\t\tif (spoolModel != None):\n\t\t\t\tself._logger.info(\"Store selected spool '\"+spoolModel.displayName+\"' in settings.\")\n\n\t\t\t\t# - store spool in Settings\n\t\t\t\tself._settings.set_int([SettingsKeys.SETTINGS_KEY_SELECTED_SPOOL_DATABASE_ID], databaseId)\n\t\t\t\tself._settings.save()\n\n\t\t\t\tself.checkRemainingFilament()\n\t\t\telse:\n\t\t\t\tself._logger.warning(\"Selected Spool with id '\"+str(databaseId)+\"' not in database anymore. Maybe deleted in the meantime.\")\n\t\telse:\n\t\t\t# No selection\n\t\t\tself._logger.info(\"Clear stored selected spool in settings.\")\n\t\t\tself._settings.set_int([SettingsKeys.SETTINGS_KEY_SELECTED_SPOOL_DATABASE_ID], None)\n\t\t\tself._settings.save()\n\t\t\tpass\n\n\t\t# databaseId = self._databaseManager.saveModel(spoolModel)\n\n\t\treturn flask.jsonify()\n\n\n\t################################################################################################## LOAD ALL SPOOLS\n\t@octoprint.plugin.BlueprintPlugin.route(\"/loadSpoolsByQuery\", methods=[\"GET\"])\n\tdef load_allSpools(self):\n\t\tself._logger.debug(\"API Load all spool\")\n\t\t# sp1 = SpoolModel()\n\t\t# sp1.displayName = \"Spool No.1\"\n\t\t# sp1.vendor = \"Janbex\"\n\t\t# sp1.material = \"ABS\"\n\t\t# sp1.color = \"#00dd00\"\n\t\t# sp1.density = 123.23\n\t\t# sp1.diameter = 432.12\n\t\t# sp1.temperature = 221\n\t\t# sp1.firstUse = datetime.datetime(2019, 5, 17)\n\t\t# sp1.lastUse = datetime.datetime(2019, 6, 4)\n\t\t# sp1.remainingWeight = 1234\n\t\t# sp1.weight = 2000\n\t\t# sp1.usedPercentage = str(1234.0 / (2000.0 / 100.0))\n\t\t# sp1.usedLength = 32\n\t\t# sp1.code = \"XS-28787-HKH-234\"\n\t\t# sp1.purchasedOn = datetime.datetime(2018, 4, 3)\n\t\t# sp1.purchasedFrom = \"http://www.amazon.de/eorjoeiirjfoiejfoijeroffjeroeoidj\"\n\t\t# sp1.cost = 3.14\n\t\t#\n\t\t# sp2 = SpoolModel()\n\t\t# sp2.displayName = \"Spool No.2\"\n\t\t# sp2.vendor = \"Plastic Joe\"\n\t\t# sp2.material = \"PETG\"\n\t\t#\n\t\t# allSpools = [sp1,sp2]\n\n\t\ttableQuery = flask.request.values\n\n\t\tallSpools = self._databaseManager.loadAllSpoolsByQuery(tableQuery)\n\t\ttotalItemCount = self._databaseManager.countSpoolsByQuery(tableQuery)\n\n\t\t# allSpoolsAsDict = self._transformAllSpoolModelsToDict(allSpools)\n\t\tallSpoolsAsDict = Transformer.transformAllSpoolModelsToDict(allSpools)\n\n\t\t# load all catalogs: vendors, materials, labels, [colors]\n\t\tvendors = list(self._databaseManager.loadCatalogVendors(tableQuery))\n\t\tmaterials = list(self._databaseManager.loadCatalogMaterials(tableQuery))\n\t\tlabels = list(self._databaseManager.loadCatalogLabels(tableQuery))\n\n\t\ttempateSpoolAsDict = None\n\t\tallTemplateSpools = self._databaseManager.loadSpoolTemplateSpool()\n\t\tfor spool in allTemplateSpools:\n\t\t\ttempateSpoolAsDict = Transformer.transformSpoolModelToDict(spool)\n\t\t\tbreak\n\n\t\tcatalogs = {\n\t\t\t\"vendors\": vendors,\n\t\t\t\"materials\": materials,\n\t\t\t\"labels\": labels\n\t\t}\n\t\t# catalogs = {\n\t\t# \t\"materials\": [\"\", \"ABS\", \"PLA\", \"PETG\"],\n\t\t# \t\"colors\": [\"\", \"#123\", \"#456\"],\n\t\t# \t\"labels\": [\"\", \"good\", \"bad\"]\n\t\t# }\n\n\t\treturn flask.jsonify({\n\t\t\t\t\t\t\t\t\"templateSpool\": tempateSpoolAsDict,\n\t\t\t\t\t\t\t\t\"catalogs\": catalogs,\n\t\t\t\t\t\t\t\t\"totalItemCount\": totalItemCount,\n\t\t\t\t\t\t\t\t\"allSpools\": allSpoolsAsDict\n\t\t\t\t\t\t\t})\n\n\n\t####################################################################################################### SAVE SPOOL\n\t@octoprint.plugin.BlueprintPlugin.route(\"/saveSpool\", methods=[\"PUT\"])\n\tdef save_spool(self):\n\t\tself._logger.info(\"API Save spool\")\n\t\tjsonData = request.json\n\n\t\tdatabaseId = self._getValueFromJSONOrNone(\"databaseId\", jsonData)\n\t\tif (databaseId != None):\n\t\t\tself._logger.info(\"Update spool with database id '\"+str(databaseId)+\"'\")\n\t\t\tspoolModel = self._databaseManager.loadSpool(databaseId)\n\t\t\tself._updateSpoolModelFromJSONData(spoolModel, jsonData)\n\t\telse:\n\t\t\tself._logger.info(\"Create new spool\")\n\t\t\tspoolModel = SpoolModel()\n\t\t\tself._updateSpoolModelFromJSONData(spoolModel, jsonData)\n\n\t\tdatabaseId = self._databaseManager.saveModel(spoolModel)\n\n\t\treturn flask.jsonify()\n\n\n\t##################################################################################################### DELETE SPOOL\n\t@octoprint.plugin.BlueprintPlugin.route(\"/deleteSpool/\", methods=[\"DELETE\"])\n\tdef delete_printjob(self, databaseId):\n\t\tself._logger.info(\"API Delete spool with database id '\" + str(databaseId) + \"'\")\n\t\tprintJob = self._databaseManager.deleteSpool(databaseId)\n\t\t# snapshotFilename = CameraManager.buildSnapshotFilename(printJob.printStartDateTime)\n\t\t# self._cameraManager.deleteSnapshot(snapshotFilename)\n\t\t# self._databaseManager.deletePrintJob(databaseId)\n\t\treturn flask.jsonify()\n","sub_path":"octoprint_SpoolManager/api/SpoolManagerAPI.py","file_name":"SpoolManagerAPI.py","file_ext":"py","file_size_in_byte":11705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"21535239","text":"# -*- coding: utf-8 -*-\n# #############################################################################\n#\n# Author: Yannick Buron\n# Copyright 2013 Yannick Buron\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\n\nfrom openerp import models, fields, api, _\nfrom openerp.exceptions import except_orm\nimport re\nfrom datetime import datetime\n\nimport logging\n\n_logger = logging.getLogger(__name__)\n\n\nclass ClouderImage(models.Model):\n _name = 'clouder.image'\n\n name = fields.Char('Image name', size=64, required=True)\n current_version = fields.Char('Current version', size=64, required=True)\n parent_id = fields.Many2one('clouder.image', 'Parent image')\n parent_version_id = fields.Many2one(\n 'clouder.image.version', 'Parent version')\n parent_from = fields.Char('From', size=64)\n privileged = fields.Boolean(\n 'Privileged?',\n help=\"Indicate if the containers shall be in privilaged mode. \"\n \"Warning : Theses containers will have access to the host system.\"\n )\n registry_id = fields.Many2one('clouder.container', 'Registry')\n dockerfile = fields.Text('DockerFile')\n volume_ids = fields.One2many('clouder.image.volume', 'image_id', 'Volumes')\n port_ids = fields.One2many('clouder.image.port', 'image_id', 'Ports')\n version_ids = fields.One2many(\n 'clouder.image.version', 'image_id', 'Versions')\n public = fields.Boolean('Public?')\n partner_id = fields.Many2one(\n 'res.partner', 'Manager',\n default=lambda self: self.env['clouder.model'].user_partner)\n\n _sql_constraints = [\n ('name_uniq', 'unique(name)', 'Image name must be unique!')\n ]\n\n @api.one\n @api.constrains('name')\n def _validate_data(self):\n if not re.match(\"^[\\w\\d_]*$\", self.name):\n raise except_orm(_('Data error!'), _(\n \"Name can only contains letters, digits and underscore\"))\n\n # @api.multi\n # def get_vals(self):\n #\n # vals = {}\n # vals.update(self.env.ref('clouder.clouder_settings').get_vals())\n #\n # ports = {}\n # for port in self.port_ids:\n # ports[port.name] = {'id': port.id, 'name': port.name, 'localport': port.localport}\n #\n # volumes = {}\n # for volume in self.volume_ids:\n # volumes[volume.id] = {'id': volume.id, 'name': volume.name}\n #\n # vals.update({\n # 'image_name': self.name,\n # 'image_privileged': self.privileged,\n # 'image_parent_id': self.parent_id and self.parent_id.id,\n # 'image_parent_from': self.parent_from,\n # 'image_ports': ports,\n # 'image_volumes': volumes,\n # 'image_dockerfile': self.dockerfile\n # })\n #\n # return vals\n\n @api.multi\n def build(self):\n\n if not self.dockerfile:\n return\n if not self.registry_id and self.name != 'img_registry':\n raise except_orm(\n _('Date error!'), _(\n \"You need to specify the registry \"\n \"where the version must be stored.\"))\n now = datetime.now()\n version = self.current_version + '.' + now.strftime('%Y%m%d.%H%M%S')\n self.env['clouder.image.version'].create({\n 'image_id': self.id, 'name': version,\n 'registry_id': self.registry_id and self.registry_id.id,\n 'parent_id': self.parent_version_id and self.parent_version_id.id})\n\n # def unlink(self, cr, uid, ids, context={}):\n # for image in self.browse(cr, uid, ids, context=context):\n # vals = self.get_vals(cr, uid, image.id, context=context)\n # self.purge(cr, uid, vals, context=context)\n # return super(clouder_image, self).unlink(cr, uid, ids, context=context)\n #\n # def purge(self, cr, uid, vals, context={}):\n # context.update({'clouder-self': self, 'clouder-cr': cr, 'clouder-uid': uid})\n # execute.execute_local(['sudo','docker', 'rmi', vals['image_name'] + ':latest'], context)\n\n\nclass ClouderImageVolume(models.Model):\n _name = 'clouder.image.volume'\n\n image_id = fields.Many2one('clouder.image', 'Image', ondelete=\"cascade\",\n required=True)\n name = fields.Char('Path', size=128, required=True)\n hostpath = fields.Char('Host path', size=128)\n user = fields.Char('System User', size=64)\n readonly = fields.Boolean('Readonly?')\n nosave = fields.Boolean('No save?')\n\n _sql_constraints = [\n ('name_uniq', 'unique(image_id,name)',\n 'Volume name must be unique per image!')\n ]\n\n\nclass ClouderImagePort(models.Model):\n _name = 'clouder.image.port'\n\n image_id = fields.Many2one('clouder.image', 'Image', ondelete=\"cascade\",\n required=True)\n name = fields.Char('Name', size=64, required=True)\n localport = fields.Char('Local port', size=12, required=True)\n expose = fields.Selection(\n [('internet', 'Internet'), ('local', 'Local'), ('none', 'None')],\n 'Expose?', required=True, default='local')\n udp = fields.Boolean('UDP?')\n\n _sql_constraints = [\n ('name_uniq', 'unique(image_id,name)',\n 'Port name must be unique per image!')\n ]\n\n\nclass ClouderImageVersion(models.Model):\n _name = 'clouder.image.version'\n _inherit = ['clouder.model']\n\n image_id = fields.Many2one(\n 'clouder.image', 'Image', ondelete='cascade', required=True)\n name = fields.Char('Version', size=64, required=True)\n parent_id = fields.Many2one('clouder.image.version', 'Parent version')\n registry_id = fields.Many2one('clouder.container', 'Registry')\n container_ids = fields.One2many(\n 'clouder.container', 'image_version_id', 'Containers')\n\n @property\n def fullname(self):\n return self.image_id.name + ':' + self.name\n\n\n @property\n def registry_address(self):\n return self.registry_id and self.registry_id.server_id.name + ':' + \\\n self.registry_id.ports['registry-ssl']['hostport']\n\n @property\n def fullpath(self):\n return self.registry_id and self.registry_address + \\\n '/' + self.fullname\n\n @property\n def fullpath_localhost(self):\n return self.registry_id and 'localhost:' + \\\n self.registry_id.ports['registry']['hostport'] +\\\n '/' + self.fullname\n\n _order = 'create_date desc'\n\n _sql_constraints = [\n ('name_uniq', 'unique(image_id,name)',\n 'Version name must be unique per image!')\n ]\n\n @api.one\n @api.constrains('name')\n def _validate_data(self):\n if not re.match(\"^[\\w\\d_.]*$\", self.name):\n raise except_orm(_('Data error!'), _(\n \"Image version can only contains letters, \"\n \"digits and underscore and dot\"))\n\n\n # @api.multi\n # def get_vals(self):\n #\n # vals = {}\n #\n # vals.update(self.image_id.get_vals())\n #\n # if self.parent_id:\n # parent_vals = self.parent_id.get_vals()\n # vals.update({\n # 'image_version_parent_id': parent_vals['image_version_id'],\n # 'image_version_parent_fullpath': parent_vals['image_version_fullpath'],\n # 'image_version_parent_fullpath_localhost': parent_vals['image_version_fullpath_localhost'],\n # 'image_version_parent_registry_server_id': parent_vals['registry_server_id'],\n # })\n #\n # if self.registry_id:\n # registry_vals = self.registry_id.get_vals()\n # registry_port = registry_vals['container_ports']['registry']['hostport']\n # vals.update({\n # 'registry_id': registry_vals['container_id'],\n # 'registry_fullname': registry_vals['container_fullname'],\n # 'registry_port': registry_port,\n # 'registry_server_id': registry_vals['server_id'],\n # 'registry_server_ssh_port': registry_vals['server_ssh_port'],\n # 'registry_server_domain': registry_vals['server_domain'],\n # 'registry_server_ip': registry_vals['server_ip'],\n # })\n #\n # vals.update({\n # 'image_version_id': self.id,\n # 'image_version_name': self.name,\n # 'image_version_fullname': self.image_id.name + ':' + self.name,\n # })\n #\n # if self.registry_id:\n # vals.update({\n # 'image_version_fullpath': vals['registry_server_ip'] + ':' + vals['registry_port'] + '/' + vals['image_version_fullname'],\n # 'image_version_fullpath_localhost': 'localhost:' + vals['registry_port'] + '/' + vals['image_version_fullname']\n # })\n # else:\n # vals['image_version_fullpath'] = ''\n #\n # return vals\n\n @api.one\n def unlink(self):\n if self.container_ids:\n raise except_orm(\n _('Inherit error!'),\n _(\"A container is linked to this image version, \"\n \"you can't delete it!\"))\n return super(ClouderImageVersion, self).unlink()\n\n @api.multi\n def deploy(self):\n ssh = self.connect(self.registry_id.server_id.name)\n dir = '/tmp/' + self.image_id.name + '_' + self.fullname\n self.execute(ssh, ['mkdir', '-p', dir])\n\n dockerfile = 'FROM '\n if self.image_id.parent_id and self.parent_id:\n if self.registry_id.server_id == self.parent_id.registry_id.server_id:\n dockerfile += self.parent_id.fullpath_localhost\n else:\n dockerfile += self.parent_id.fullpath\n elif self.image_id.parent_from:\n dockerfile += self.image_id.parent_from\n else:\n raise except_orm(_('Data error!'),\n _(\"You need to specify the image to inherit!\"))\n\n dockerfile += '\\nMAINTAINER ' + self.email_sysadmin + '\\n'\n\n dockerfile += self.image_id.dockerfile\n for volume in self.image_id.volume_ids:\n dockerfile += '\\nVOLUME ' + volume.name\n\n ports = ''\n for port in self.image_id.port_ids:\n ports += port.localport + ' '\n if ports:\n dockerfile += '\\nEXPOSE ' + ports\n\n self.execute(ssh, [\n 'echo \"' + dockerfile.replace('\"', '\\\\\"') +\n '\" >> ' + dir + '/Dockerfile'])\n self.execute(ssh,\n ['sudo', 'docker', 'build', '-t', self.fullname, dir])\n self.execute(ssh, ['sudo', 'docker', 'tag', self.fullname,\n self.fullpath_localhost])\n self.execute(ssh,\n ['sudo', 'docker', 'push', self.fullpath_localhost])\n self.execute(ssh, ['sudo', 'docker', 'rmi', self.fullname])\n self.execute(ssh, ['sudo', 'docker', 'rmi', self.fullpath_localhost])\n self.execute(ssh, ['rm', '-rf', dir])\n ssh.close()\n return\n\n #In case of problems with ssh authentification\n # - Make sure the /opt/keys belong to root:root with 700 rights\n # - Make sure the user in the container can access the keys, and if possible make the key belong to the user with 700 rights\n\n @api.multi\n def purge(self):\n #TODO There is currently no way to delete an image from private registry.\n return\n","sub_path":"clouder/clouder_image.py","file_name":"clouder_image.py","file_ext":"py","file_size_in_byte":12027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"457403677","text":"import os\nimport json\nimport requests\n\nsse_running = False\nsse_address = \"\"\ngame_name = \"\"\ngame_friendly_name = \"\"\n\ndef json_post(url, data):\n if not sse_running:\n sse_status()\n if sse_running:\n r = requests.post(url, json=data)\n if r.status_code != 200:\n print(\"Error sending data to SteelSeries Engine. Error Code: \" + r.status_code)\n\ndef register_game(icon_id):\n if sse_running:\n game_metadata = {\n \"game\": game_name,\n \"game_display_name\": game_friendly_name,\n \"icon_color_id\": icon_id\n }\n json_post(sse_address + \"/game_metadata\", game_metadata)\n\ndef remove_game():\n if sse_running:\n game_metadata = {\n \"game\": game_name\n }\n json_post(sse_address + \"/remove_game\", game_metadata)\n\ndef register_event(event, minimum, maximum, icon_id=0, handlers=[]):\n if sse_running:\n event_data = {\n \"game\": game_name,\n \"event\": event,\n \"min_value\": minimum,\n \"max_value\": maximum,\n \"icon_id\": icon_id,\n \"handlers\": handlers # See https://github.com/SteelSeries/gamesense-sdk/blob/master/doc/api/writing-handlers-in-json.md#binding-an-event\n }\n json_post(sse_address + \"/register_game_event\", event_data)\n\ndef remove_event(event):\n if sse_running:\n event_data = {\n \"game\": game_name,\n \"event\": event\n }\n json_post(sse_address + \"/remove_game_event\", event_data)\n\ndef heartbeat():\n # Sends a heartbeat event to SSE3 so that colours stay there!\n if sse_running:\n sse_data = {\n \"game\": game_name\n }\n json_post(sse_address + \"/game_heartbeat\", sse_data)\n\ndef send_event(event, value):\n # This function sends a game event and value to SteelSeries\n # Engine 3 so that pretty colours are a thing\n if sse_running:\n sse_data = {\n \"game\": game_name,\n \"event\": event,\n \"data\": {\n \"value\": value\n }\n }\n json_post(sse_address + \"/game_event\", sse_data)\n\ndef sse_status():\n global sse_running\n global sse_address\n # coreProps file exists when SSE3 is running\n file_name = \"C:/ProgramData/SteelSeries/SteelSeries Engine 3/coreProps.json\"\n if os.path.isfile(file_name):\n sse_running = True\n with open(file_name) as sse_data_file:\n sse_data = json.load(sse_data_file)\n sse_address = \"http://\" + sse_data[\"address\"]\n else:\n sse_running = False\n return sse_running\n","sub_path":"sse_python.py","file_name":"sse_python.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"618999879","text":"# import modules\nimport os\nimport csv\n\n# set variables\nnum_months = 0\nprofit_loss_total = 0\naverage_change = 0\ngreatest_increase_date = ''\ngreatest_increase_value = 0\ngreatest_decrease_date = ''\ngreatest_decrease_value = 0\n\n# open the csv file\ncsvpath = os.path.join('Resources', 'budget_data.csv')\nwith open(csvpath, newline='') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n\n # store the csv header\n csv_header = next(csvreader)\n\n for row in csvreader:\n my_date = row[0]\n # if there is a date add 1\n if (my_date):\n num_months = num_months + 1\n\n profit_loss = int(row[1])\n # if there is an int as to total\n if (profit_loss):\n profit_loss_total = profit_loss_total + profit_loss\n\n # check if the current row contains the greatest increase/decrease\n if (profit_loss > greatest_increase_value):\n greatest_increase_date = my_date\n greatest_increase_value = profit_loss\n elif (profit_loss < greatest_decrease_value):\n greatest_decrease_date = my_date\n greatest_decrease_value = profit_loss\n\n # calculate average change if num_months is > 0\n if (num_months > 0):\n average_change = round(profit_loss_total / num_months, 2)\n\n# print the results\nprint('Financial Analysis')\nprint('----------------------------')\nprint(f'Total Months: {num_months}')\nprint(f'Total: ${profit_loss_total}')\nprint(f'Average Change: ${average_change}')\nprint(f'Greatest Increase in Profits: {greatest_increase_date} (${greatest_increase_value})')\nprint(f'Greatest Decrease in Profits: {greatest_decrease_date} (${greatest_decrease_value})')\n\n# print to output file\noutput_file = 'output.txt'\n\nwith open(output_file, 'w') as text:\n text.write('Financial Analysis\\n')\n text.write('----------------------------\\n')\n text.write(f'Total Months: {num_months}\\n')\n text.write(f'Total: ${profit_loss_total}\\n')\n text.write(f'Average Change: ${average_change}\\n')\n text.write(f'Greatest Increase in Profits: {greatest_increase_date} (${greatest_increase_value})\\n')\n text.write(f'Greatest Decrease in Profits: {greatest_decrease_date} (${greatest_decrease_value})')","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"319263896","text":"from django.core.management.base import BaseCommand\nfrom shop.models import Product, Category\n\n\nclass Command(BaseCommand):\n help = 'Import products from csv'\n\n def handle(self, *args, **options):\n \"\"\"\n Здесь надо реализовать следующее:\n - открываем csv файл\n - читаем из него построчно\n - создаем товары\n\n Только есть нюанс, категория в данный момент является обязательной\n поэтому тут или в csv файле в каждой строке писать категорию к которой относится товар\n а при загрузке проверять что такая категория есть? если есть то брать существующую, если нет - создавать\n\n :param args:\n :param options:\n :return:\n \"\"\"\n\n # Пример как создаётся (сохраняется) категория\n category = Category()\n category.name = 'Электровеники'\n category.save()\n\n # и товар\n product = Product()\n product.name = 'test1'\n product.price = 100\n product.stock = 12\n product.available = True\n product.category = category\n product.save()\n\n self.stdout.write('Successfully')\n","sub_path":"shop/management/commands/import_product.py","file_name":"import_product.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"386138608","text":"from Company import Company\nfrom CommercialEmployee import CommercialEmployee\nfrom ProductionEmployee import ProductionEmployee\nfrom utils.Log import Log\n\n\nlog = Log()\nlogger = log.getLoggerInstance()\n\nlogger.info(\"Start Main Script\")\nlogger.debug(\"Instance of Company Class\")\ncompany = Company()\nlogger.info(\"Amount of employees\")\namountOfEmployees = int(input(\"How many employees would you register? \"))\nlogger.debug(\"Amount of employees %s\", amountOfEmployees)\ntry:\n if (amountOfEmployees < 4 or amountOfEmployees > 16):\n myError = ValueError(\"The amount of employees should be no less than 3 and no more than 15\")\n logger.info(myError)\n raise myError\nexcept ValueError:\n logger.debug(\"amount of employees %s\", amountOfEmployees)\n print(myError)\nelse:\n logger.info(\"amount correct, start process\")\n for amount in range(1, amountOfEmployees + 1):\n logger.debug(\"Employee %s\", amount)\n employeeName = input(\"Employee number {}, insert name: \".format(amount))\n logger.debug(\"name %s\", employeeName)\n typeOfEmployee = input(\"Employee number {}, insert type: \".format(amount))\n logger.debug(\"type %s\", typeOfEmployee)\n if typeOfEmployee == \"CE\":\n logger.info(\"new Commercial employee\")\n amountOfPiecesSold = int(input(\"Amount of pieces sold: \"))\n logger.debug(\"pieces sold %s\", amountOfPiecesSold)\n employee = CommercialEmployee(typeOfEmployee, amountOfPiecesSold, employeeName)\n else:\n logger.info(\"new production employee\")\n amountOfPiecesProduced = int(input(\"Amount of pieces produced: \"))\n logger.debug(\"pieces produced %s\", amountOfPiecesProduced)\n amountOfPiecesDefective = int(input(\"Amount of pieces defective found: \"))\n logger.debug(\"pieces defective %s\", amountOfPiecesDefective)\n employee = ProductionEmployee(typeOfEmployee, amountOfPiecesProduced, amountOfPiecesDefective, employeeName)\n logger.info(\"fill company employees\")\n company.fillEmployees(employee)\n logger.info(\"print company employees\")\n company.printEmployeesReport()","sub_path":"ManuelValdez/Python/test/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"602988749","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom PIL import Image\nfrom io import BytesIO\n\nfrom requests.packages.urllib3.exceptions import ReadTimeoutError\n\nfrom base import spyutil\nimport time\nimport sched\nimport re\n\n# 从米铺爬取代理\nurl = 'https://proxy.mimvp.com'\n\ndef getHtmlText(url , proxies=None):\n try:\n r = requests.get(url, proxies=proxies, timeout=3)\n r.raise_for_status()\n r.encoding = 'utf-8'\n return r.text\n except Exception as e:\n return 'ERROR'\n\n\n\ndef getVerifyCode(url):\n try:\n response = requests.get(url, timeout=5)\n image = Image.open(BytesIO(response.content))\n text = spyutil.getverify1(image)\n except Exception as e:\n pass\n # print(text)\n return text\n\n\ndef getProxy(url):\n '开始爬取'\n html = getHtmlText(url)\n soup = BeautifulSoup(html, \"html.parser\")\n tbody = soup.find('div', attrs={'id': 'free_freelist_open'}).find('tbody')\n trs = tbody.find_all('tr')\n results = []\n for tr in trs:\n result = {}\n codeUrl = 'https://proxy.mimvp.com/' + tr.find('td', class_='tbl-proxy-port').find('img')['src']\n code = getVerifyCode(codeUrl)\n result['ip'] = 'http://' + tr.find('td', class_='tbl-proxy-ip').text + ':' + code\n result['type'] = tr.find('td', class_='tbl-proxy-type').text\n if getHtmlText('http://www.163.com', proxies={'http': result['ip']}) != 'ERROR':\n print('代理ip:' + result['ip'] + '已加入代理池')\n results.append(result)\n else:\n print('代理ip:' + result['ip'] + '无效')\n\n with open('proxies.txt', 'a') as f:\n for result in results:\n if not existProxy(result['ip']):\n f.write(result['ip'] + '\\n')\n\n\ndef existProxy(proxy):\n with open('proxies.txt', 'r') as f:\n lines = f.read().splitlines()\n proxySet = set(lines)\n if proxy in proxySet:\n return True\n else:\n return False\n\n\ndef timeTask(second):\n print('开始执行,现在时间:' + time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(time.time())))\n getProxy(url)\n s.enter(second, 1, timeTask, (second,))\n\n\ndef test():\n with open('proxies.txt', 'a') as f:\n if not existProxy('http://117.127.0.209:8080'):\n print('exist')\n\n\nif __name__ == '__main__':\n s = sched.scheduler(time.time, time.sleep)\n s.enter(0, 1, timeTask, (60,))\n s.run()\n","sub_path":"base/mipuproxy.py","file_name":"mipuproxy.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"423137620","text":"import discord\nfrom discord.ext import commands\nimport json\n\nclass afk(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n \n @commands.Cog.listener()\n async def on_ready(self):\n print(f\"{self.__class__.__name__} Cog has been loaded\\n-----\")\n\n @commands.Cog.listener()\n async def on_message(self, msg):\n if msg.author.bot:\n return\n file = open(\"afk1.json\", \"r\")\n afk = json.load(file)\n if not str(msg.guild.id) in afk:\n return\n if len(msg.mentions) > 0:\n user = msg.mentions[0]\n if user.id in afk[str(msg.guild.id)][\"to-mention-ids\"]:\n msgs = f\"{user.name} is AFK with reason : {afk[str(msg.guild.id)][str(user.id)]}\"\n await msg.channel.send(msgs)\n if msg.author.id in afk[str(msg.guild.id)][\"to-mention-ids\"]:\n index = afk[str(msg.guild.id)][\"to-mention-ids\"].index(msg.author.id)\n del afk[str(msg.guild.id)][\"to-mention-ids\"][index]\n afk[str(msg.guild.id)].pop(str(msg.author.id))\n dumps = open(\"afk1.json\", \"w\")\n json.dump(afk, dumps, indent = 4)\n await msg.channel.send(\"Welcome back {}, I removed your AFK.\".format(msg.author.mention), delete_after = 10)\n await msg.author.edit(nick=f\"{msg.author.name}\")\n\n @commands.command(aliases=[\"away_from_keyboard\"], \n description=\"Sets your AFK when you wanna let others know ur gone.\", \n usage=\"[reason]\")\n @commands.cooldown(1, 5, commands.BucketType.user)\n async def afk(self, ctx, *, message = \"AFK\"):\n file = open(\"afk1.json\", \"r\")\n afk = json.load(file)\n if not str(ctx.guild.id) in afk:\n afk[str(ctx.guild.id)] = {}\n if not str(ctx.author.id) in afk[str(ctx.guild.id)]:\n afk[str(ctx.guild.id)][str(ctx.author.id)] = {}\n if not \"to-mention-ids\" in afk[str(ctx.guild.id)]:\n afk[str(ctx.guild.id)][\"to-mention-ids\"] = []\n if not ctx.author.id in afk[str(ctx.guild.id)][\"to-mention-ids\"]:\n afk[str(ctx.guild.id)][str(ctx.author.id)] = message\n afk[str(ctx.guild.id)][\"to-mention-ids\"].append(ctx.author.id)\n dumps = open(\"afk1.json\", \"w\")\n json.dump(afk, dumps, indent = 4)\n await ctx.send(\"{}, I have set your AFK with reason : {}\".format(ctx.author.mention, message)) \n try:\n await ctx.author.edit(nick = f\"[AFK] {ctx.author.name}\")\n except discord.Forbidden:\n pass\n else:\n return\n\n\ndef setup(bot):\n bot.add_cog(afk(bot))","sub_path":"cogs/afk.py","file_name":"afk.py","file_ext":"py","file_size_in_byte":2619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"95711167","text":"def print_matrix(matrix):\n for row in range(len(matrix)):\n line = []\n for col in range(len(matrix[row])):\n if matrix[row][col] == 0:\n line.append('0')\n else:\n line.append(matrix[row][col])\n print(line)\n\ndef add_stars(matrix, pic_height, pic_width):\n local_matrix = matrix\n frame_height = pic_height + 2\n frame_width = pic_width + 2 \n\n for col in range(frame_width-1):\n local_matrix[0][col] = '*'\n local_matrix[frame_height-1][col] = '*'\n\n for row in range(frame_height):\n local_matrix[row][0] = '*'\n local_matrix[row][frame_width - 1] = '*'\n\n return local_matrix\n\ndef add_data(picture, frame, pic_height, pic_width):\n local_frame = frame\n frame_height = pic_height + 2\n frame_width = pic_width + 2 \n\n frame_2d = [' ' for x in range(frame_height)]\n frame_2d = list(frame_2d)\n\n # Fill in your pic\n for row in range(pic_height):\n while ' ' in frame_2d:\n frame_2d.remove(' ')\n\n for col in range(pic_width):\n local_frame[row+1][col+1] = picture[row][col]\n\n\n # Make neat final presentation \n for row in range(frame_height):\n line = ''.join(local_frame[row])\n frame_2d.append(line)\n\n print(str(type(frame_2d[0])))\n output = []\n for currline in frame_2d:\n output.append(currline)\n\n print_matrix(output)\n return frame_2d\n\n\ndef addBorder(picture):\n pic_height = len(picture)\n pic_width = len(picture[0])\n\n frame = [[0 for x in range(pic_width + 2)] for y in range(pic_height + 2)]\n frame = add_stars(frame, pic_height, pic_width)\n frame = add_data(picture, frame, pic_height, pic_width)\n\n return frame\n\n\n\npicture = [\"abc\", \n \"ded\"]\naddBorder(picture)\nprint('\\n===================\\n')\npicture = [\"a\"]\naddBorder(picture)\n","sub_path":"intro/04_exploring_the_waters/15_add_border.py3","file_name":"15_add_border.py3","file_ext":"py3","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"514405974","text":"#Import libraries\nimport sys\nimport logging\nimport getpass\nimport sleekxmpp\nimport time\nimport ssl\n\nfrom optparse import OptionParser\nfrom sleekxmpp.exceptions import IqError, IqTimeout\nfrom menu import *\n\n#Principal Class -> USER\nclass EchoBot(sleekxmpp.ClientXMPP):\n\n def __init__(self, jid, password, nick, room):\n\n sleekxmpp.ClientXMPP.__init__(self, jid, password)\n #Nick and room\n self.room = room\n self.nick = nick\n\n #Event handlers\n self.add_event_handler(\"session_start\", self.start, threaded=True)\n self.add_event_handler(\"message\", self.message)\n self.add_event_handler(\"register\", self.register, threaded=True)\n self.add_event_handler(\"groupchat_message\", self.gp_msg, threaded=True)\n self.add_event_handler(\"muc::%s::got_online\" % self.room, self.gp_chat, threaded=True)\n\n #Process event: session_start\n def start(self, event):\n print(\"\\nSession start\")\n print(\"°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°°\")\n self.send_presence()\n self.get_roster()\n\n #Receive message\n def message(self, msg):\n if msg['type'] in ('normal', 'chat'):\n print(\"\\n\", msg['from'], \"says: \")\n print(\"==> \", msg['body'],\"\\n\")\n\n #Register\n def register(self, iq):\n resp = self.Iq()\n resp['type'] = 'set'\n resp['register']['username'] = self.boundjid.user\n resp['register']['password'] = self.password\n try:\n resp.send(now=True)\n logging.info(\"Succesfull register: %s!\" % self.boundjid)\n except IqError as e:\n logging.error(\"Error: unable to register %s\" %\n e.iq['error']['text'])\n self.disconnect()\n except IqTimeout:\n logging.error(\"Error: no response from server\")\n self.disconnect()\n\n #Get all users and print them\n def get_users(self):\n print(self.client_roster)\n\n #Delete user from server\n def delete_user(self):\n resp = self.Iq()\n resp['type'] = 'set'\n resp['from'] = self.boundjid.user\n resp['register'] = ' '\n resp['register']['remove'] = ' '\n try:\n resp.send(now=True)\n print(\"Account deleted for %s!\" % self.boundjid)\n except IqError as e:\n logging.error(\"Could not delete account: %s\" %\n e.iq['error']['text'])\n self.disconnect()\n except IqTimeout:\n logging.error(\"No response from server.\")\n self.disconnect()\n \n def gp_msg(self, msg):\n if msg['mucnick'] != self.nick and self.nick in msg['body']:\n print (\"%(body)s\" % msg)\n\n def gp_chat(self, presence):\n if presence['muc']['nick'] != self.nick:\n self.send_message(mto=presence['from'].bare,\n mbody=\"Hello, %s %s\" % (presence['muc']['role'],\n presence['muc']['nick']),\n mtype='groupchat')\n\n #Send files to another user\n def send_files(self,receiver, filename):\n stream = self['xep_0047'].open_stream(receiver)\n with open(filename) as f:\n data = f.read()\n stream.sendall(data)\n \nif __name__ == '__main__':\n optp = OptionParser()\n\n #Verbose\n optp.add_option('-q', '--quiet', help='set logging to ERROR',\n action='store_const', dest='loglevel',\n const=logging.ERROR, default=logging.INFO)\n optp.add_option('-d', '--debug', help='set logging to DEBUG',\n action='store_const', dest='loglevel',\n const=logging.DEBUG, default=logging.INFO)\n optp.add_option('-v', '--verbose', help='set logging to COMM',\n action='store_const', dest='loglevel',\n const=5, default=logging.INFO)\n\n #Information: jid, password, to, message\n optp.add_option(\"-j\", \"--jid\", dest=\"jid\",\n help=\"JID to use\")\n optp.add_option(\"-p\", \"--password\", dest=\"password\",\n help=\"password to use\")\n optp.add_option(\"-t\", \"--to\", dest=\"to\",\n help=\"JID to send the message to\")\n optp.add_option(\"-m\", \"--message\", dest=\"message\",\n help=\"message to send\")\n optp.add_option(\"-n\", \"--nick\", dest=\"nick\",\n help=\"MUC nickname\")\n optp.add_option(\"-r\", \"--room\", dest=\"room\",\n help=\"MUC room to join\")\n\n opts, args = optp.parse_args()\n\n #Login\n logging.basicConfig(level=opts.loglevel,\n format='%(levelname)-8s %(message)s')\n\n #Call function menu()\n optmen = int(menu())\n\n #Add credentials\n username = input(\"Username: \") \n opts.jid = username+\"@redes2020.xyz\"\n opts.password = getpass.getpass(\"Password: \")\n opts.nick = input(\"Nickname: \")\n opts.room = \"jwchat@conference.redes2020.xyz\"\n\n xmpp = EchoBot(opts.jid, opts.password, opts.nick, opts.room)\n if (optmen == 2):\n xmpp.del_event_handler(\"register\", xmpp.register)\n \n #Register plugins\n xmpp.register_plugin('xep_0004') # Data forms\n xmpp.register_plugin('xep_0030') # Service Discovery\n xmpp.register_plugin('xep_0045') # Multichat\n xmpp.register_plugin('xep_0060') # PubSub\n xmpp.register_plugin('xep_0066') # Out-of-band Data\n xmpp.register_plugin('xep_0077') # In-band Registration\n xmpp.register_plugin('xep_0199') # Ping\n #xmpp['xep_0077'].force_registration = True\n\n #authentication over an unencrypted connection\n xmpp['feature_mechanisms'].unencrypted_plain = True\n xmpp.ssl_version = ssl.PROTOCOL_TLS\n\n #Server connection\n #if xmpp.connect():\n if xmpp.connect(('redes2020.xyz', 5222)):\n xmpp.process(block=False) #True or false? \n #Principal menu\n while(1): \n choice = int(menu_in())\n #Show all users\n if(choice == 1):\n print(\"Contacts: \\n\")\n print(xmpp.client_roster) \n \"\"\"\n i = 0 \n y = 0\n for i in range (len(xmpp.client_roster)):\n print(xmpp.client_roster[i][y])\n i = i + 1\n \"\"\"\n #Add user \n elif(choice == 2):\n new_contact = input(\"username: \\n\")\n friend = new_contact + \"@redes2020.xyz\"\n xmpp.send_presence(pto = friend, ptype ='subscribe')\n\n #Show details from an specific user\n elif(choice == 3): \n print(\"\\n \", xmpp.client_roster, \"\\n\") \n\n #Direct message\n elif(choice == 4): \n print(\"\\nPRIVATE CHAT\\n\")\n username = input(\"\\n To: \")\n user_to = username + \"@redes2020.xyz\"\n content = input(\"\\n Content: \")\n xmpp.send_message(mto=user_to, mbody = content, mtype = 'chat')\n print(\"\\n SENT \\n\")\n\n #Send presence message and change status\n elif(choice == 5):\n status = input(\"Status: \")\n flag = 0\n while(flag == 0):\n sh = int(show_menu())\n flag = 1\n if(sh == 1):\n show = \"chat\"\n elif(sh == 2):\n show = \"away\"\n elif(sh == 3):\n show = \"xa\"\n elif(sh == 4):\n show = \"dnd\"\n else: \n print(\"Please, try again\")\n flag = 0\n\n \"\"\"\n self.send_presence(pstatus=\"i'm not around right now\", pshow='xa')\n Where pstatus controls the type of icon your IM client will show, and you\n have the options of: chat, away, xa, and dnd. The value 'xa' means\n extended away and 'dnd' means do not disturb.\n \"\"\"\n xmpp.makePresence(pfrom=xmpp.jid, pstatus=status, pshow=show)\n \n #Public chat\n elif(choice == 6): \n xmpp.plugin['xep_0045'].joinMUC(xmpp.room, xmpp.nick)\n print(\"\\nPUBLIC CHAT\\n\")\n msg_all = input(\"Message: \")\n xmpp.send_message(mto='all', mbody=msg_all, mtype='groupchat')\n print(\"\\n SENT \\n\")\n\n #Send file\n elif(choice == 7): \n username = input(\"\\n To: \")\n send_to = username + \"@redes2020.xyz\"\n file = input(\"File: \")\n xmpp.send_files(send_to, file)\n\n #Exit\n elif(choice == 8): \n print(\"See you later\")\n xmpp.disconnect()\n break\n\n #Delete account\n elif(choice == 9): \n xmpp.delete_user()\n xmpp.disconnect()\n break\n\n #If the option is not between (1-10)\n else: \n print(\"Invalid option\")\n \n #Fail connection\n else:\n print(\"Unable to connect :(\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"361341826","text":"# coding=utf-8\n\nCHOICES_TIPO_PREGUNTA = (\n ('unica', u'Unica Selección'),\n ('multi','Multiples Selecciones'),\n ('texto', 'Solo texto'),\n)\nCHOICES_NRESPUESTA =(('1', 1),\n ('2', 2),\n ('3', 3),\n ('4', 4),\n ('5', 5),\n ('6', 6),\n ('7', 7),)","sub_path":"core_apps/apps/delphi/choices.py","file_name":"choices.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"27544149","text":"# ~*~ coding: utf-8 ~*~\nfrom __future__ import unicode_literals\nfrom django.urls import path\nfrom apps import views\n\n__all__ = [\"urlpatterns\"]\n\napp_name = \"apps\"\n\nurlpatterns = [\n path('index/', views.AppsView.as_view(), name='apps_index'),\n path('calendar/', views.CalendarView.as_view(), name='event_list'),\n path('jsplumb/', views.JsplumbView.as_view(), name='jsplumb_index'),\n\n]","sub_path":"apps/urls/views_urls.py","file_name":"views_urls.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"565686163","text":"#\r\n#introdução a programação de computadores\r\n#Professor: Jucimar JR\r\n#EQUIPE 1\r\n#\r\n#Any Mendes Carvalho - 1615310044\r\n#Eduardo Maia Freire - 1615310003\r\n#Kid Mendes de Oliveira Neto - 1615310011\r\n#Luiz Gustavo de Rocha Melo - 1615310015\r\n#Matheus Palheta Barbosa -1615310019\r\n#Victor Rafael da Silva e Silva - 1615310025\r\n#\r\n\r\naltura = float(input(\"Entre com a sua altura: \")) # Solicita a altura do usuário\r\nsexo = input(\"Sexo M ou F: \") # Logo após isso solicita o sexo do usuário\r\npeso = float(input(\"Qual seu peso: \")) # E por último solicita\r\n \r\nif (\"F\" == sexo): # Verificará se o sexo é feminino, se sim, calculará o peso ideal\r\n resultado = (62.1*altura) - 44.7 \r\nelse: # Caso seja masculino, calculará o peso ideal\r\n resultado = (72.7*altura) - 58 \r\n \r\nif (peso > resultado): # Se o peso for maior que o peso ideal, imprimirá que está acima do peso\r\n print(\"Voce esta acima do peso, seu peso ideal e %.1f kg\" % (resultado)) \r\nelse: # Caso contrário, abaixo do peso ideal\r\n print(\"Voce esta abaixo do peso, seu peso ideal e %.1f kg\" % (resultado))\r\n","sub_path":"lista1/ipc_lista1.13.py","file_name":"ipc_lista1.13.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"636043257","text":"#!/usr/bin/env python\n\n'''\nTeam BotterHeads\n\n Aryan Rajput\n Ritvik Mahajan\n Sameer Talwar\n Shreshth Mehrotra\n'''\n\n\n### Program for printing relevent parameters of concern\n\n#Libraries\n\nfrom std_msgs.msg import Float32\nfrom sensor_msgs.msg import LaserScan, Imu, NavSatFix, JointState\nfrom geometry_msgs.msg import Quaternion,Twist\nimport rospy\nimport tf\nimport numpy as np\nimport math\n\n\n\n### The names of variables, Subscribers and callback functions are same to those in controller.py\n\n\n\n\nclass sbb(): #Creating SBB class\n def __init__(self):\n\n rospy.init_node('printpose')\n\n# Initialisations:\n\n\n #Frequency of operation.\n self.sample_rate = 100.0 #100 Hz\n\n #Array storing lidar distance values\n self.lidar_ranges = [0.0]*180 #180 sample points taken.\n\n #Pose from IMU\n self.sbb_orientation_quaternion = [0.0, 0.0, 0.0, 0.0]\n self.sbb_orientation_euler = [0.0, 0.0, 0.0] #roll,pitch,yaw values\n self.sbb_angular_velocity_bike=[0.0,0.0,0.0]\n \n #GPS parameters for pose estimation\n self.phi,self.lamb,self.h=0,0,0\n self.a = 6378137 \n self.e = 0.0818191908426\n \n\n\n #Distances and Angles for navigation.\n \n self.spawn_x = 0.0\n self.spawn_y = 0.0\n self.isspawned = 0 #Used for calculating initial spawn location\n\n #Current coordinates\n self.x = 0.0 \n self.y = 0.0 \n\n #Final goal\n self.goal_x_final = 15.7\n self.goal_y_final = -40.7\n\n #Intermediate goal in the vicinity of the enclosure\n self.goal_x = self.goal_x_final - 2.5\n self.goal_y = self.goal_y_final + 0.448\n \n self.c = 0.0 #Coefficeint for controlling rear wheel velocity\n \n self.e_x = 0.0 #Error from goal location's x component\n self.e_y = 0.0 #Error from goal location's y component\n \n #PID parameters for navigation\n self.distance_error = 0.0 #P error\n self.distance_error_prev = 0.0\n self.distance_d = 0.0 #D error\n self.distance_i = 0.0 #I error\n \n self.handle_angle = 0.0 #Angle of the handle relative to the bike\n self.heading_angle_req = 0.0 #Angle with the x-axis , of the required direction (towards the goal)\n self.bike_angle = 0.0 #Bike yaw angle\n\n #Checking wether we've reached the (intermediate) goal \n self.final = 0 \n\n \n \n #Geometric,Inertial properties of the bike,flywheel (for balancing)\n \n self.curr_angle = 0.0 #Roll angle of the bike, which needs to be driven to 0 for balancing\n self.moi_flywheel = 0.24 #flywheel MOI\n self.moi_bike = 4 #Bike MOI about the horizontal axis (Contributions of all parts of the bike accounted for)\n self.k_bike_over_I = 12 #(K/I) constant\n self.k_bike = self.k_bike_over_I*self.moi_bike #Bike torque constant K= (K/I)*I. Defined in such a way because (K/I) value can be computed from the IMU data. \n self.w_flywheel = 0 #Flywheel angular velocity\n self.k_damp = 10 #Damping constant for torque control\n\n #stand parameters \n self.stand1_pos = 0.0\n self.stand2_pos = 0.0\n self.stand1_w = 0.0\n self.stand2_w = 0.0\n\n #Subscribers\n rospy.Subscriber('/sbb/imu', Imu, self.imu_callback)\n rospy.Subscriber('/sbb/gps', NavSatFix, self.gps_callback)\n rospy.Subscriber('/stand1/joint_state', JointState, self.stand1_callback)\n rospy.Subscriber('/stand2/joint_state', JointState, self.stand2_callback)\n rospy.Subscriber('/handle/joint_state', JointState, self.handle_callback)\n rospy.Subscriber('/sbb/distance_sensor/front', LaserScan, self.lidar_callback)\n\n def imu_callback(self, msg):\n self.sbb_orientation_quaternion[0] = msg.orientation.x\n self.sbb_orientation_quaternion[1] = msg.orientation.y\n self.sbb_orientation_quaternion[2] = msg.orientation.z\n self.sbb_orientation_quaternion[3] = msg.orientation.w\n\n #conversion of orientation quarternion to euler angles\n (self.sbb_orientation_euler[1], self.sbb_orientation_euler[0], self.sbb_orientation_euler[2]) = tf.transformations.euler_from_quaternion([self.sbb_orientation_quaternion[0], self.sbb_orientation_quaternion[1], self.sbb_orientation_quaternion[2], self.sbb_orientation_quaternion[3]])\n self.curr_angle = self.sbb_orientation_euler[1] #Bike roll angle (Balancing)\n self.bike_angle = self.sbb_orientation_euler[2] #Bike yaw angle (Navigation)\n self.sbb_angular_velocity_bike[0] = msg.angular_velocity.x #bike angular velocity (Balancing)\n\n def gps_callback(self, msg):\n self.phi= msg.latitude\n self.lamb= msg.longitude\n self.h=msg.altitude\n self.phi = np.deg2rad(self.phi)\n self.lamb = np.deg2rad(self.lamb)\n \n N = self.a / np.sqrt(1 - ((self.e**2) * (np.sin(self.phi)**2)))\n \n if self.isspawned == 0: #calculating initial pose\n\n #Converting GPS parameters to x,y values \n self.spawn_x = (N * (1 - self.e ** 2) + self.h) * np.sin(self.phi)-1100248.62908+0.1\n self.spawn_y = -1*((N + self.h) * np.cos(self.phi) * np.sin(self.lamb)-1090835.97224+0.3)\n self.c = math.sqrt((self.goal_x - self.spawn_x)*(self.goal_x - self.spawn_x) + (self.goal_y - self.spawn_y)*(self.goal_y - self.spawn_y))\n \n self.isspawned = 1 #Initial pose calculated.\n\n #Converting GPS parameters to x,y values \n self.x = (N * (1 - self.e ** 2) + self.h) * np.sin(self.phi)-1100248.62908+0.1 \n self.y = -1*((N + self.h) * np.cos(self.phi) * np.sin(self.lamb)-1090835.97224+0.3) \n \n def lidar_callback(self, msg):\n self.lidar_ranges = msg.ranges #LiDAR values\n\n #Stand roll angles\n def stand1_callback(self, msg):\n self.stand1_pos = msg.position[0]\n def stand2_callback(self, msg):\n self.stand2_pos = msg.position[0]\n\n def handle_callback(self, msg):\n self.handle_angle = msg.position[0] #Handle angle wrt bike axis\n\n\n\n# Function to print various parameters of concern\n def displayparams(self):\n print(\"Current location = (\"+str(self.x)+\", \"+str(self.y)+\")\")\n print(\"Intermediate Goal Location = (\"+str(self.goal_x)+\", \"+str(self.goal_y)+\")\")\n print(\"Final Goal location = (\"+str(self.goal_x_final)+\", \"+str(self.goal_y_final)+\")\")\n self.e_x = self.goal_x_final - self.x\n self.e_y = self.goal_y_final - self.y\n self.distance_error = math.sqrt((self.e_x*self.e_x)+(self.e_y*self.e_y))\n print(\"Distance from final goal = \"+str(self.distance_error))\n \n\nif __name__ == '__main__':\n\n sbb = sbb() #Creating SBB object\n r = rospy.Rate(sbb.sample_rate) #100Hz frequency\n while not rospy.is_shutdown():\n\n try:\n sbb.displayparams()\n r.sleep()\n except rospy.exceptions.ROSTimeMovedBackwardsException:\n pass\n","sub_path":"Scripts/printpose.py","file_name":"printpose.py","file_ext":"py","file_size_in_byte":6998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"15563275","text":"import tkinter, tkinter.ttk as ttk, shutil,os,subprocess,sys,tkinter,logging,send2trash\n\n\n\nclass RadioBar(tkinter.Frame):\n def __init__(self,master,modes,labeltext,orientation): #will default to first option in modes, if not provided\n tkinter.Frame.__init__(self,master,borderwidth=10,relief='sunken')\n self.master = master\n self.buttons = []\n\n self.var=tkinter.StringVar()\n self.var.set(modes[0])\n\n l=tkinter.Label(self,text='%s:'%labeltext if labeltext else '')\n l.pack()\n for m in modes:\n b=tkinter.Radiobutton(self,text=m,variable=self.var,value=m)\n b.pack(side='left') if orientation=='horizontal' else b.pack()\n self.buttons.append(b)\n def get_choice(self):\n return self.var.get()\n\n #\n # modes = ('Hot','New','Top')\n #\n # self.var = tkinter.StringVar()\n # self.var.set('Hot') #default to 'Hot'\n #\n # tkinter.Label(self,text='Select Submission Category:').pack()\n #\n # for m in modes:\n # b = tkinter.Radiobutton(self,text=m, variable = self.var, value=m)\n # b.pack(side='left')\n # self.buttons.append(b)\n #\n #\n # def get_choice(self):\n # return self.var.get()\n\n\n\nclass StartPage(tkinter.Frame):\n\n def __init__(self, controller, master):\n tkinter.Frame.__init__(self,master)\n self.controller=controller\n self.wigets()\n\n\n def wigets(self):\n\n self.pastsubredditlist = tkinter.Listbox(self,selectmode=tkinter.SINGLE,font=('Italics',12), height=len(self.controller.pastvisitedsubreddits))\n for i,option in enumerate(self.controller.pastvisitedsubreddits):\n self.pastsubredditlist.insert(i,option)\n self.pastsubredditlist.select_set(0)\n self.pastsubredditlist.bind('',self.clicklist)\n\n self.entry_box=tkinter.Entry(self)\n self.entry_box.bind('',self.clickentrybox)\n\n #####ROW 0#####\n tkinter.Label(self,text='Choose a Subreddit',font=('Italics',20)).grid(row=0,column=1)\n\n #####ROW 1#####\n tkinter.Label(self, text = 'Past Subreddits', font=('Italics',15)).grid(row=1,column=0)\n tkinter.Label(self,text='Enter New Subreddit:',font=('Italics',15)).grid(row=1,column=2)\n\n #####ROW 2#####\n self.pastsubredditlist.grid(row=2,column=0,pady=10,sticky='W')\n self.entry_box.grid(row=2,column=2)\n\n #####ROW 3#####\n categories=('Hot','New','Top')\n textlabel=('Select submission category')\n\n self.categorybar = RadioBar(self,categories,textlabel,'horizontal')\n self.categorybar.grid(row=3,column=1,pady=15)\n\n scrollermodes = ('Enable wallpaper scroller','Disable wallpaper scroller')\n self.scrollermodebar = RadioBar(self,scrollermodes,'','vertical')\n self.scrollermodebar.grid(row=3,column=2,pady=15,padx=15)\n\n #####ROW 4#####\n self.submitbutton = tkinter.Button(self,text='Submit Subreddit Choice',command = self.inputsubredditchoice)\n self.submitbutton.grid(row=4,column=1)\n\n #####ROW 5#####\n #self.downloadbutton\n\n ####ROW 6#####\n self.progressbar = ttk.Progressbar(self,mode='indeterminate')\n self.progressbar.grid(row=6,column=1)\n\n self.pack()\n\n def inputsubredditchoice(self):\n\n enteredtext = self.entry_box.get()\n if enteredtext:\n self.controller.setsubreddit(enteredtext)\n else:\n self.controller.setsubreddit(self.pastsubredditlist.selection_get())\n\n self.controller.setsubmissioncategory(self.categorybar.get_choice())\n self.controller.enablescroller = self.scrollermodebar.get_choice()\n\n for obj in (self.entry_box,self.pastsubredditlist):\n obj.config(state=tkinter.DISABLED)\n obj.unbind('')\n\n for buttonbar in (self.categorybar.buttons,self.scrollermodebar.buttons):\n for b in buttonbar:\n b.config(state=tkinter.DISABLED)\n self.submitbutton.config(state=tkinter.DISABLED)\n\n self.makedownloadbutton()\n\n\n def clickentrybox(self,event):\n self.pastsubredditlist.config(state=tkinter.DISABLED)\n self.entry_box.config(state=tkinter.NORMAL)\n\n def clicklist(self,event):\n self.pastsubredditlist.config(state=tkinter.NORMAL)\n self.entry_box.delete(0,tkinter.END)\n self.entry_box.config(state=tkinter.DISABLED)\n\n def makedownloadbutton(self):\n\n w = 325\n h = w/13\n self.downloadbutton = tkinter.Canvas(self,highlightcolor='dark green',bg='gray',relief='raised',width=w,height=h)\n self.downloadbutton.create_text((w/2,h/2),text='DOWNLOAD IMAGES from %s'%self.controller.selectedsubreddit,font=('Italics',15),activefill='dark green')\n self.downloadbutton.grid(row=5,column=1,pady=15)\n self.downloadbutton.bind('',lambda event:self.controller.startdownloadthread())\n\n\nclass SavePopupWindow:\n\n def __init__(self,master):\n top = self.top = tkinter.Toplevel(master)\n tkinter.Label(top, text='Enter Path to Save:').pack()\n self.master = master\n\n self.e = tkinter.Entry(top)\n self.e.pack(pady = 5)\n\n okbutton = tkinter.Button(top,text='Save',command=self.ok_save)\n okbutton.pack(side='right',padx=10)\n\n cancelbutton = tkinter.Button(top,text='Cancel',command=self.cancel_save)\n cancelbutton.pack(side='left')\n\n def cancel_save(self):\n self.top.destroy()\n\n def ok_save(self):\n path = self.e.get()\n self.master.save_wallpaper(path)\n\n\n\nclass WallpaperScroller(tkinter.Frame):\n\n def __init__(self,master, dir):\n super().__init__(master)\n self.master = master\n self.master.title('Wallpaper Scroller')\n self.starting_wallpaper = self.get_desktop_path() #current wallpaper when application is started. #TODO: THIS DOES NOT WORK!!!!\n self.saved_wallpaper = None #change to this upon exit of program; set in self.savewallpaper()\n self.current_wallpaper_path = self.starting_wallpaper\n assert self.starting_wallpaper, 'Current image from a temp that was not saved'\n self.directory = dir\n self.getwallpapers() #LIST of file names in self.directory\n self.current_index = -1\n\n self.widgets()\n\n # def getstartingwallpaper(self):\n # script = \"\"\"osascript<', lambda event:self.scroll(direction = 'back'))\n\n self.forward_button = tkinter.Canvas(self,width=75,height=75,highlightthickness=0,borderwidth=1, relief='raised',background='green')\n self.forward_button.create_oval((7,5,70,70),fill='white')\n self.forward_button.create_line((7,40,67,40),arrow=tkinter.LAST,fill='black',width=10,arrowshape=(15,20,5))\n self.forward_button.bind('', lambda event: self.scroll(direction ='forward'))\n\n self.image_counter = tkinter.StringVar()\n self.image_counter.set('')\n\n ######LAYOUT#####\n\n tkinter.Label(self,textvar= self.image_counter,font=('Italics',12)).grid(row=0,column=0)\n #tkinter.Label(self,text='Current Image:',font=('Italics',12)).grid(row=0,column=1)\n tkinter.Button(self,text='Exit', command = self.exit).grid(row=0,column=2)\n\n self.back_button.grid(row=2,column=0,pady=25)\n self.forward_button.grid(row=2,column=2,pady=25)\n\n #tkinter.Button(self,text='Save Wallpaper',command=self.save_wallpaper).grid(row=2,column=1)\n tkinter.Button(self,text='Save Wallpaper',command=self.save_wallpaper).grid(row=3,column=1)\n\n self.pack_propagate(0)\n self.pack()\n\n\n","sub_path":"wallpaperscrollerGUI.py","file_name":"wallpaperscrollerGUI.py","file_ext":"py","file_size_in_byte":11949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"608331219","text":"# Copyright (c) 2013, System Engineering Software Society\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the System Engineering Software Society nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED.\n# IN NO EVENT SHALL SYSTEM ENGINEERING SOFTWARE SOCIETY BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Text type constructor module.\"\"\"\nfrom . dslist import TextList as list_type\nfrom . dsdict import TextDict as dict_type\nfrom . dsrecord import TextRecord as record_type\nfrom . dstable import TextTable as table_type\nfrom . dstext import TextText as text_type\nfrom . dslambda import TextLambda as lambda_type\nfrom ... types.types import (TypeList,\n TypeDict,\n TypeRecord,\n TypeTuple,\n TypeTable,\n TypeText,\n TypeFunction)\n\n\ndef factory(content_type):\n \"\"\"Return datasource constructor according to content_type.\"\"\"\n # Construct the child element.\n if isinstance(content_type, TypeList):\n return list_type\n elif isinstance(content_type, TypeDict):\n return dict_type\n elif isinstance(content_type, TypeRecord):\n return record_type\n elif isinstance(content_type, TypeTuple):\n return record_type\n elif isinstance(content_type, TypeTable):\n return table_type\n elif isinstance(content_type, TypeText):\n return text_type\n elif isinstance(content_type, TypeFunction):\n return lambda_type\n else:\n assert(False)\n\n\ndef create_content(content_type):\n \"\"\"Return the text group element for the given content type.\"\"\"\n if isinstance(content_type, TypeList):\n return []\n else:\n return {}\n\n\ndef create_path(container, container_type, path):\n \"\"\"Create path in file returning the group at container[path].\"\"\"\n\n def create_key(container_type, key):\n \"\"\"\n Parse string key depending on type.\n Return parsed key.\n \"\"\"\n if isinstance(container_type, TypeList):\n return int(key)\n else:\n return key\n\n def set_content(container, container_type, key, value):\n \"\"\"Set content.\"\"\"\n if isinstance(container_type, TypeList):\n assert int(key) == container(len)\n container.append(value)\n else:\n container[key] = value\n\n def update_content(container, container_type, key):\n \"\"\"\n Get value in container matching key.\n If it does not exist create new value according to type.\n Return tuple of value and its content type.\n \"\"\"\n key = create_key(container_type, key)\n content_type = container_type[key]\n\n try:\n content = container[key]\n except KeyError:\n content = create_content(content_type)\n set_content(container, container_type, key, content)\n return (content, content_type)\n\n root = container\n\n # Adding other keys.\n for key in [key for key in path.split(\"/\") if key != '']:\n root, container_type = update_content(root, container_type, key)\n return root\n\n\nclass TextFactory(object):\n \"\"\"\n Returns Text type constructors.\n Creates typed instances.\n \"\"\"\n def factory(self, datapointer, group, content_type, can_write):\n \"\"\"Return contained list slice.\"\"\"\n return factory(content_type)(self.factory,\n create_content,\n datapointer,\n group=group,\n can_write=can_write)\n\n def list_type(self, datapointer, container_type):\n \"\"\"Return the Text list type constructor.\"\"\"\n return list_type(self.factory,\n create_content,\n datapointer,\n container_type=container_type,\n create_path=create_path)\n\n def dict_type(self, datapointer, container_type):\n \"\"\"Return the Text dict type constructor.\"\"\"\n return dict_type(self.factory,\n create_content,\n datapointer,\n container_type=container_type,\n create_path=create_path)\n\n def record_type(self, datapointer, container_type):\n \"\"\"Return the Text record type constructor.\"\"\"\n return record_type(self.factory,\n create_content,\n datapointer,\n container_type=container_type,\n create_path=create_path)\n\n def table_type(self, datapointer, container_type):\n \"\"\"Return the Text table type constructor.\"\"\"\n return table_type(self.factory,\n create_content,\n datapointer,\n container_type=container_type,\n create_path=create_path)\n\n def text_type(self, datapointer, container_type):\n \"\"\"Return the Text text type constructor.\"\"\"\n return text_type(self.factory,\n create_content,\n datapointer,\n container_type=container_type,\n create_path=create_path)\n\n def lambda_type(self, datapointer, container_type):\n \"\"\"Return the Text lambda type constructor.\"\"\"\n return lambda_type(self.factory,\n create_content,\n datapointer,\n container_type=container_type,\n create_path=create_path)\n\n\ntypes = TextFactory()\n","sub_path":"CDE_Spark/sympathy/datasources/text/dstypes.py","file_name":"dstypes.py","file_ext":"py","file_size_in_byte":6994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"624358539","text":"import multiprocessing as mp\n\ndef func(n,a):\n\tprint(mp.cpu_count())\n\tn.value=3.1415927\n\tfor i in range(len(a)):\n\t\ta[i]=-a[i]\n\nif __name__=='__main__':\n\tnum=mp.Value('d',0.0)\n\tarr=mp.Array('i',range(10))\n\t\n\tp=mp.Process(target=func, args=(num,arr))\n\tp.start()\n\tp.join()\n\tprint(num.value)\n\tprint(arr[:])\n","sub_path":"Practice/9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"331505892","text":"# Monalisa Pereira 21600560\n# 260716\n# Lab 04 - Ex 10\n\nfrom math import *\n\nn = int(input(\"Insira o valor da aproximação: \"))\npi = (1/1*3**0)\nc = 1\ne = 1\nm = 3\n\nwhile (c', views.post_details, name='post_details')\n]\n","sub_path":"feed/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"386313229","text":"#!/usr/bin/python\n# coding=utf-8\n# author: zhou yong xia\n# creation date: 2018-11-20\n\n\nimport zmq\nimport time\nimport numpy as np\nimport cv2\nimport sys\nimport threading\nimport json\nimport socket\n\n# zmq的push/pull模式\n# push 和pull,只能有一端绑定端口,比如push端绑定端口了,那pull就不能绑定端口\n# push端只能用来发送数据,pull端只能用来接收数据\n# example:\n'''\ndt1 = ZmqDataTransfer()\ndt1.init_pusher_with_bind(5556) # 绑定端口,push端作为监听器,监听pull的连接\na = [1, 2, 3]\ndt1.send(a) # 从push端发送数据给pull端\n\ndt2 = ZmqDataTransfer()\ndt2.init_puller_without_bind('localhost', 5556) # 只是连接到已经绑定端口的push端\nb, value_type = dt2.recv() # 从push端接收数据\nprint('pulled data:%s, %s' % (value_type, str(b)))\n\n'''\n\n\nclass ZmqDataTransfer:\n def __init__(self):\n self.size_length = 16 # length of size\n self.dispatch_ip = None # 转发节点的ip地址\n self.dispatch_port = None # 转发节点的端口\n self.server_ip = None # 远端服务器的ip地址\n self.server_port = None # 远端服务器的端口\n self.server_thread = None # 服务器端的线程\n self.is_registered = False # 本地服务器是否已经在转发节点上注册\n self.local_ip = None # 本地服务器ip地址\n self.local_port = None # 本地服务器端口\n self.puller = None # 接收器\n self.pusher = None # 推送器\n\n # 向dispatch节点注册本服务器信息\n def __register_function__(self, server_type=13, dispatch_ip='localhost', dispatch_port=12300):\n context = zmq.Context()\n socket = context.socket(zmq.REQ)\n socket.connect(\"tcp://%s:%d\" % (dispatch_ip, dispatch_port))\n d = {\"cmd\": 1,\n \"serverInfo\": {\"serverType\": server_type,\n \"serverIP\": self.local_ip,\n \"serverPort\": self.local_port,\n \"serverCapability\": 10}\n }\n register_json = json.dumps(d) # dict转成json字符串\n socket.send(register_json.encode('utf-8'))\n response = socket.recv() # 接收响应吗\n self.is_registered = True\n print(\"response: %s\" % response)\n\n # 向dispatch节点注册本服务器信息\n # server_type: 13--storage, 12--feature extractor\n def register(self, server_type=13, dispatch_ip='localhost', dispatch_port=12300):\n self.dispatch_ip = dispatch_ip\n self.dispatch_port = dispatch_port\n self.local_ip = ZmqDataTransfer.get_host_ip()\n self.local_port = 14560\n\n t1 = threading.Thread(target=ZmqDataTransfer.__register_function__,\n name='RecvLoopThread',\n args=(self, server_type, dispatch_ip, dispatch_port))\n t1.setDaemon(True) # 设置为后台线程\n t1.start()\n\n tmpCnt = 40\n while (not self.is_registered) and (tmpCnt > 0):\n print(\"waiting struct register %d...\" % tmpCnt)\n tmpCnt -= 1\n if (self.is_registered is False) and (tmpCnt == 0):\n print(\"Unable to register to the cloud!\")\n sys.exit(0)\n time.sleep(1)\n t1.join()\n\n # 初始化push/pull模式的push端,并绑定到本地端口,作为监听器\n # local_port: 本地机器的端口,int\n def init_pusher_with_bind(self, local_port):\n if self.pusher:\n if self.local_port == local_port:\n return self.pusher\n\n context = zmq.Context()\n self.pusher = context.socket(zmq.PUSH)\n self.pusher.bind('tcp://*:%d' % local_port)\n print('server of pushing is working......')\n return self.pusher\n\n # 初始化push/pull模式的push端,连接到服务器,非监听器\n # server_ip: 服务器的ip地址,str\n # server_port: 服务器的端口,int\n def init_pusher_without_bind(self, server_ip, server_port):\n if self.pusher:\n if self.server_ip == server_ip and self.server_port == server_port:\n return self.pusher\n context = zmq.Context()\n self.pusher = context.socket(zmq.PUSH)\n if not server_ip:\n server_ip = '*'\n self.pusher.connect('tcp://%s:%d' % (server_ip, server_port))\n return self.pusher\n\n # 发送数据\n def send(self, data, sock=None):\n if sock is None:\n sock = self.get_socket()\n self.send_x(data, sock)\n\n # 接收数据\n def recv(self, sock=None):\n if sock is None:\n sock = self.get_socket()\n data, value_type = self.receive_x(sock)\n return data, value_type\n\n # 在dict中找图像数据,找的话,就把图像数据从list格式转换为numpy.ndarray,\n # 像素灰度值使用数据类型为uint8\n # key为'img','image','jpg','bmp' 就认为是图像数据,就自动进行数据格式转换\n # img_flag: 指定图像数据的key\n @staticmethod\n def dict_img_list_to_numpy_array(dict_data, dtype=None, img_flag=['img', 'image', 'jpg', 'bmp']):\n iterms = dict_data.items() # all items\n for (k, v) in iterms:\n if isinstance(v, list) and (k in img_flag):\n dict_data[k] = np.asarray(v, dtype=dtype)\n elif isinstance(v, dict):\n ZmqDataTransfer.dict_img_list_to_numpy_array(dict_data[k])\n\n # 接收dict:但按照json格式接收,然后转换成dict,用于接收 从C++端发送过来的数据\n # numpy array类型的图像数据类型强制变为uint8,\n # 如果需要支持其它数据类型,则把np.uint8去掉即可,\n # 但对于uint8的图像数据,可能会自动识别为int64,导致图像不能正确显示,此时需要强制转换一下,如下这条语句所示:\n # img = np.asarray(img, dtype=np.uint8)\n # 返回值:dict类型\n def recv_dict_as_json(self, sock=None):\n if sock is None:\n sock = self.get_socket()\n #content = sock.recv()\n content = sock.recv_json() # 接收json字符串,并转换成dict\n\n # 在dict中找图像数据,找的话,就把图像数据从list格式转换为numpy.ndarray,\n # 像素灰度值使用数据类型为uint8\n # key为'img','image','jpg','bmp'就认为是图像数据,就自动进行数据格式转换\n ZmqDataTransfer.dict_img_list_to_numpy_array(content, np.uint8)\n return content\n\n # 发送dict,但先转换为json格式,再发送,用于向C++端发送json数据\n # 要包含numpy array和bytes数据的话,请先转成list和str,我这里用了类MyEncoder做了这个转换工作\n def send_dict_as_json(self, data, sock=None):\n if sock is None:\n sock = self.get_socket()\n #self.send_data_type('json') # 与C++通讯时,去掉这句,除非C++接收端也先接收data_type\n js = json.dumps(data, cls=MyEncoder) # 把dict转换成json格式数据,MyEncoder用于自动把np.ndarray和bytes等\n #sock.send_json(js) # C++接收后,字符串的双引号前会多一个反斜杠\n sock.send_string(js)\n\n # 把dict中的np.ndarray 和bytes 转成list和str,暂时用不到\n def convert_data_for_zmq(self, dictionary):\n iterms = dictionary.items() # all items\n for (k, v) in iterms:\n # type of v is string\n if isinstance(v, dict):\n self.convert_data_for_zmq(dictionary[k])\n elif isinstance(v, np.ndarray):\n dictionary[k] = v.tolist()\n elif isinstance(v, bytes):\n dictionary[k] = str(v, encoding='utf-8')\n\n # 初始化push/pull模式的pull端,绑定本地端口,作为监听器\n def init_puller_with_bind(self, local_port):\n self.local_port = local_port\n context = zmq.Context()\n self.puller = context.socket(zmq.PULL)\n self.puller.bind('tcp://*:%d' % local_port)\n return self.puller\n\n # 初始化push/pull模式的pull端\n def init_puller_without_bind(self, server_ip, server_port):\n self.server_ip = server_ip\n self.server_port = server_port\n context = zmq.Context()\n self.puller = context.socket(zmq.PULL)\n if not server_ip:\n server_ip = '*'\n self.puller.connect('tcp://%s:%d' % (server_ip, server_port))\n return self.puller\n\n # 是否连上了服务器\n def is_connected(self):\n if not self.puller:\n return False\n return True\n\n # 发送数据的push端是否活着\n def is_push_server_alive(self):\n if not self.pusher:\n return False\n return True\n\n # 返回pusher和puller中不为空的那个socket\n def get_socket(self):\n if self.pusher:\n return self.pusher\n else:\n return self.puller\n\n # 发送整数int\n def send_int(self, integer, sock=None):\n self.send_size(integer, sock)\n\n # 发送数据长度信息:固定长度\n # size: 长度,int\n def send_size(self, size, sock=None):\n if sock is None:\n sock = self.get_socket()\n\n byes_of_size = size.to_bytes(self.size_length, byteorder='little')\n sock.send(byes_of_size)\n\n # 接收数据长度信息:固定长度\n def receive_size(self, sock=None):\n if sock is None:\n sock = self.get_socket()\n\n byes_of_size = sock.recv()\n size = int.from_bytes(byes_of_size, byteorder='little')\n return size\n\n # 接收float\n def receive_float(self, sock=None):\n if sock is None:\n sock = self.get_socket()\n\n string_data = sock.recv()\n float_data = float(string_data)\n return float_data\n\n # 发送float\n def send_float(self, float_data, sock=None):\n if sock is None:\n sock = self.get_socket()\n string_data = bytes(str(float_data), encoding='utf-8')\n sock.send(string_data)\n\n # 发送数据类型:先发送字节数,再发送用字符串表示的数据类型,其实就是send_string\n # string_type: string, 用字符串表示的数据类型,可以是'string', 'image', 'command'\n def send_data_type(self, string_type, sock=None):\n if sock is None:\n sock = self.get_socket()\n\n # 发送数据类型信息\n #bytes_type = bytes(string_type, encoding='utf-8')\n #sock.send(bytes_type)\n sock.send_string(string_type)\n\n # 接收数据类型\n # 返回值:string, 用字符串表示的数据类型,可以是'string', 'image', 'command'\n def receive_data_type(self, sock=None):\n if sock is None:\n sock = self.get_socket()\n\n #bytes_type = sock.recv()\n #string_type = str(bytes_type, encoding='utf-8')\n string_type = sock.recv_string()\n return string_type\n\n # 发送字符串:先发送字符串长度,再发送字符串内容\n def send_string(self, string_data, sock=None):\n if sock is None:\n sock = self.get_socket()\n sock.send_string(string_data)\n\n # 接收字符串:先接收字符串长度,再接收字符串内容\n def receive_string(self, sock=None):\n if sock is None:\n sock = self.get_socket()\n string_data = sock.recv_string()\n return string_data\n\n # 发送字典dic:\n # key必须是字符串\n # value 可以是int, float, string, list, numpy array, dic, opencv image\n def send_dict(self, dictionary, sock=None):\n if sock is None:\n sock = self.get_socket()\n dic_len = len(dictionary) # length of keys\n self.send_size(dic_len)\n\n iterms = dictionary.items() # all items\n for (k, v) in iterms:\n self.send_string(k) # the type of keys must be string\n # type of v is string\n if isinstance(v, str):\n self.send_data_type('string')\n self.send_string(v)\n # type of v is int\n elif isinstance(v, int):\n self.send_data_type('int')\n self.send_size(v)\n elif isinstance(v, float):\n self.send_data_type('float')\n self.send_float(v)\n elif isinstance(v, tuple):\n self.send_data_type('tuple')\n self.send_tuple(v)\n # type of v is a list\n elif isinstance(v, list):\n self.send_data_type('list')\n self.send_list(v)\n\n # type of v is a numpy array\n elif isinstance(v, np.ndarray):\n self.send_data_type('numpy.ndarray')\n self.send_array(v)\n\n elif isinstance(v, dict):\n self.send_data_type('dict')\n self.send_dict(v)\n else:\n self.send_data_type('error')\n err_msg = 'error in send_dict: the value type of dictionary %s is not supported!' % str(type(v))\n print(err_msg)\n self.send_string(err_msg)\n\n # 接收字典dic: key必须是字符串\n # 返回值:字典\n def receive_dict(self, sock=None):\n if sock is None:\n sock = self.get_socket()\n\n dic_len = self.receive_size() # length of keys\n\n dictionary = {}\n\n for i in range(dic_len):\n k = self.receive_string() # the type of keys must be string\n value_type = self.receive_data_type() # the type of value\n v = None\n\n if value_type.startswith('string'): # type of v is string\n v = self.receive_string(v)\n\n elif value_type.startswith('int'): # type of v is int\n v = self.receive_size()\n elif value_type.startswith('float'):\n v = self.receive_float()\n elif value_type.startswith('tuple'):\n v = self.receive_tuple()\n\n elif value_type.startswith('list'): # type of v is a list\n v = self.receive_list()\n\n elif value_type.startswith('dict'):\n v = self.receive_dict()\n\n elif value_type.startswith('numpy.ndarray'): # type of v is a numpy array\n v = self.receive_array()\n\n elif value_type.startswith('error'): # not supported data type\n err_msg = self.receive_string()\n print(err_msg)\n\n dictionary[k] = v # add one item to dictionary\n return dictionary\n\n # 发送tuple\n def send_tuple(self, t, sock=None):\n if sock is None:\n sock = self.get_socket()\n array = list(t)\n self.send_array(array)\n print('tuple is sent, length=', len(array))\n\n # 接收tuple\n def receive_tuple(self, sock=None):\n if sock is None:\n sock = self.get_socket()\n array = self.receive_array()\n t = tuple(array)\n return t\n\n # 发送list\n def send_list(self, list_data, sock=None):\n if sock is None:\n sock = self.get_socket()\n\n # 元素类型有可能不同,要逐个发送\n self.send_size(len(list_data))\n for element in list_data:\n self.send_x(element)\n\n # 接收list\n def receive_list(self, sock=None):\n if sock is None:\n sock = self.get_socket()\n\n str_size = self.receive_size()\n size = int(str_size)\n list_data = [None for row in range(size)]\n\n for i in range(size):\n list_data[i], type = self.receive_x()\n\n return list_data\n\n # 自动判断类型并发送数据,支持的数据类型有string, int, float, tuple, list, numpy.ndarray, dict\n def send_x(self, data, sock=None):\n if sock is None:\n sock = self.get_socket()\n v = data\n # type of v is string\n if isinstance(v, str):\n self.send_data_type('string')\n self.send_string(v)\n # type of v is int\n elif isinstance(v, int):\n self.send_data_type('int')\n self.send_size(v)\n elif isinstance(v, float):\n self.send_data_type('float')\n self.send_float(v)\n elif isinstance(v, tuple):\n self.send_data_type('tuple')\n self.send_tuple(v)\n # type of v is a list\n elif isinstance(v, list):\n self.send_data_type('list')\n self.send_list(v)\n\n # type of v is a numpy array\n elif isinstance(v, np.ndarray):\n\n if str(v.dtype) == 'uint8':\n self.send_data_type('image')\n self.send_image(v)\n else:\n self.send_data_type('numpy.ndarray')\n self.send_array(v)\n # type of v is a dict\n elif isinstance(v, dict):\n self.send_data_type('dict')\n self.send_dict(v)\n else:\n self.send_data_type('error')\n err_msg = 'error in send_dict: the value type of dictionary is not supported!'\n print(err_msg)\n self.send_string(err_msg)\n\n # 接收任意类型\n def receive_x(self, sock=None):\n if sock is None:\n sock = self.get_socket()\n\n value_type = self.receive_data_type() # the type of value\n v = None\n\n if value_type.startswith('string'): # type of v is string\n v = self.receive_string(v)\n elif value_type.startswith('int'): # type of v is int\n v = self.receive_size()\n elif value_type.startswith('float'):\n v = self.receive_float()\n elif value_type.startswith('tuple'):\n v = self.receive_tuple()\n elif value_type.startswith('list'): # type of v is a list\n v = self.receive_list()\n elif value_type.startswith('numpy.ndarray'): # type of v is a numpy array\n v = self.receive_array()\n elif value_type.startswith('array'): # type of v is a numpy array\n v = self.receive_array()\n elif value_type.startswith('dict'):\n v = self.receive_dict()\n elif value_type.startswith('image'):\n v = self.recv_image()\n elif value_type.startswith('json'):\n v = self.recv_dict_as_json()\n elif value_type.startswith('error'): # not supported data type\n err_msg = self.receive_string()\n print(err_msg)\n return v, value_type\n\n # 发送数组: 数组元素类型必须相同,可以使numpy.ndarray,也可以是list\n # 先发送数组的尺寸shape,再发送数组元素类型,数组内容转换为字节流bytes,再发送字节流长度和字节流内容\n # array: 可以是numpy array,也可以是list\n def send_array(self, array, sock=None):\n if sock is None:\n sock = self.get_socket()\n np_array = None\n if isinstance(array, list):\n np_array = np.array(array)\n elif isinstance(array, np.ndarray):\n np_array = array\n shape = np_array.shape\n shape_len = len(shape)\n self.send_size(shape_len) # 发送维数\n for element in shape:\n self.send_size(element) # 发送每一维的大小\n\n self.send_string(str(np_array.dtype)) # 发送数组元素类型\n\n bytes_data = np_array.tobytes()\n self.send_size(len(bytes_data))\n sock.send(bytes_data)\n print('array is sent, length=', len(bytes_data))\n\n # 接收数组:数组元素类型必须相同\n # 先接收数组的尺寸shape,再接收数组元素类型,再接收字节流长度和字节流内容\n # 返回值:numpy array\n def receive_array(self, sock=None):\n if sock is None:\n sock = self.get_socket()\n\n shape_len = self.receive_size() # 维数\n shape = []\n for element in range(0, shape_len):\n dim = self.receive_size() # 每一维的大小\n shape.append(dim)\n array_shape = tuple(shape)\n\n element_type = self.receive_string() # 数组元素类型\n\n size = self.receive_size() # 数组内容的总长度\n print('array length is ', size)\n\n # 当数组数据量较大时,要分多次接收\n bytes_data = b''\n received_len = 0\n while received_len < size:\n rest_size = size - received_len\n current_bytes = sock.recv(min(rest_size, 1024))\n received_len += len(current_bytes)\n bytes_data += current_bytes\n\n print('array is received, length=', received_len)\n np_array = np.fromstring(bytes_data, dtype=element_type)\n\n return np_array.reshape(array_shape)\n\n # 发送图像:先发送图像长度,再发送图像内容\n def send_image(self, frame, sock=None):\n if sock is None:\n sock = self.get_socket()\n # 对图片进行编码,因为socket不支持直接发送图片\n result, imgencode = cv2.imencode('.jpg', frame)\n np_array = np.array(imgencode)\n # stringData = np_array.tostring()\n bytes_data = np_array.tobytes()\n\n # 首先发送图片编码后的长度\n size = len(bytes_data)\n self.send_size(size, sock)\n print('sent image length: ', size)\n\n # 如果是python对python那么可以一次性发送,\n # 如果发给c++的server,则必须分开发,一个字节一个字节发送编码的内容,因为编码里面有字符串结束标志位,c++会截断\n sock.send(bytes_data)\n print('image is sent')\n\n # 接收图像:先接收图像长度,再接收图像内容\n # receive one image:\n # firstly, receive the size of image bytes, then receive image data\n # return the received image\n def recv_image(self, sock=None):\n if sock is None:\n sock = self.get_socket()\n size = self.receive_size(sock) # 首先接收来自客户端发送的大小信息\n print(\"image data length=\", size)\n\n # 若成功接收到大小信息,再接收整张图片\n # 当图片数据量较大时,要分多次接收\n bytes_data = b''\n received_len = 0\n\n while received_len < size:\n rest_size = size - received_len\n current_bytes = sock.recv(min(rest_size, 1024))\n received_len += len(current_bytes)\n bytes_data += current_bytes\n\n if not bytes_data:\n return None\n\n print(\"received image, actual received length=%d\" % received_len)\n #str(bytes_data, encoding='utf-8')\n np_array = np.fromstring(bytes_data, dtype='uint8')\n decimg = cv2.imdecode(np_array, 1) # 解码处理,返回mat图片\n\n return decimg\n\n # 关闭连接和停止服务器端程序\n def close(self):\n if self.puller:\n self.puller.close()\n if self.pusher:\n self.pusher.close()\n\n # 获取本机IP地址\n @staticmethod\n def get_host_ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect(('8.8.8.8', 80))\n ip = s.getsockname()[0]\n return ip\n\n\n# json不支持numpy array和bytes,所以必须转换成list和str\n# 这是一个编码器,可以自动把numpy array和bytes转换成list和str\n# example:\n# data = np.array([1,2,3])\n# js_data = json.dumps(data,cls=MyEncoder) # 把\nclass MyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n elif isinstance(obj, bytes):\n return str(obj, encoding='utf-8')\n return json.JSONEncoder.default(self, obj)\n\n\n","sub_path":"ZmqDataTransfer.py","file_name":"ZmqDataTransfer.py","file_ext":"py","file_size_in_byte":23706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"64612093","text":"# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/stable/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(os.pardir))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'OpenFF Fragmenter'\ncopyright = \"2018, Chaya D. Stern\"\nauthor = 'Chaya D. Stern'\n\n# The short X.Y version\nversion = ''\n# The full version, including alpha/beta/rc tags\nrelease = ''\n\n\n# -- General configuration ---------------------------------------------------\n\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'nbsphinx',\n 'nbsphinx_link',\n 'sphinxcontrib.bibtex',\n 'sphinxcontrib.autodoc_pydantic',\n]\n\nsource_suffix = '.rst'\n\nmaster_doc = 'index'\n\nlanguage = None\n\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'default'\n\n# Autodoc settings\nautosummary_generate = True\n\nautodoc_default_options = {\n 'member-order': 'bysource',\n}\n\nautodoc_mock_imports = [\n 'openff.toolkit',\n]\n\n# Napoleon settings\nnapoleon_numpy_docstring = True\nnapoleon_use_rtype = False\n\n# autodoc_pydantic settings\nautodoc_pydantic_show_config = False\nautodoc_pydantic_model_show_config = False\nautodoc_pydantic_show_validators = False\nautodoc_pydantic_model_show_validators = False\n\n# nbsphinx settings\nnbsphinx_execute = 'never'\n\n# sphinx bibtext settings\nbibtex_bibfiles = [\n 'index.bib'\n]\n\n# Set up the intershinx mappings.\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/', None),\n 'numpy': ('https://docs.scipy.org/doc/numpy/', None),\n 'openff.toolkit': ('https://open-forcefield-toolkit.readthedocs.io/en/latest/', None),\n}\n\n# Set up mathjax.\nmathjax_path = \"https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-svg.js\"\n\n# -- Options for HTML output -------------------------------------------------\n\nhtml_theme = 'sphinx_rtd_theme'\n\nhtml_theme_options = {\n 'prev_next_buttons_location': None,\n 'sticky_navigation': False\n}\n\nhtml_static_path = ['_static']\n\nhtml_context = {\n 'css_files': [\n '_static/css/theme_overrides.css', # override wide tables in RTD theme\n ],\n}\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'fragmenterdoc'\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n 'papersize': 'letterpaper',\n 'pointsize': '10pt',\n 'preamble': '',\n 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'fragmenter.tex', 'OpenFF Fragmenter Documentation', author, 'manual'),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'openff-fragmenter', 'OpenFF Fragmenter Documentation', [author], 1)\n]\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'openff-fragmenter', 'OpenFF Fragmenter Documentation',\n author, 'openff-fragmenter', 'Fragment molecules for quantum mechanics torsion scans.',\n 'Miscellaneous'),\n]\n","sub_path":"docs/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":4169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"488691724","text":"import pyodbc\n\ncon_string = 'driver=MySQL ODBC 5.3 Unicode Driver;' \\\n 'server=localhost;' \\\n 'database=pythonddata;' \\\n 'uid=root;' \\\n 'pwd=1234'\n\n\n# print(con_string)\n# print(pyodbc.connect(con_string))\n\n# สร้างตารางด้วย python\ndef create_table():\n with pyodbc.connect(con_string) as con:\n sql_cmd = \"\"\"\n create table person(\n id int PRIMARY KEY AUTO_INCREMENT,\n name varchar(255),\n weight float,\n height float\n )\n \"\"\"\n\n try:\n con.execute(sql_cmd)\n except pyodbc.ProgrammingError:\n print('Aready have table')\n\n\n# เพิ่มข้อมูลลงตาราง\ndef insert_data():\n with pyodbc.connect(con_string) as con:\n sql_cmd = \"\"\"\n INSERT INTO person(name,weight,height)\n VALUES('Samit',60,165);\n \"\"\"\n con.execute(sql_cmd)\n\n\n# ดึงข้อมูลออกมาแสดง\ndef select_data():\n with pyodbc.connect(con_string) as con:\n sql_cmd = \"\"\"\n SELECT * FROM person;\n \"\"\"\n for row in con.execute(sql_cmd):\n print(row)\n\n\n# เรียกใช้งานฟังก์ชันสร้างตาราง\n# create_table()\n\n# เรียกใช้ฟังก์ชันบันทึกข้อมูลลงตาราง\n# insert_data()\n\n# เรียกใช้ฟังชันดึงข้อมูลออกมาแสดง\nselect_data()\n\n# ภาษาไทยทำไมตัวหน้าหระ","sub_path":"connect_mysql.py","file_name":"connect_mysql.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"317886053","text":"from datetime import datetime\r\nfrom os import path\r\n\r\nimport pandas as pd\r\nfrom gooey import Gooey, GooeyParser\r\nfrom xlsxwriter import Workbook\r\n\r\nnow = datetime.now()\r\n\r\n\r\ndef initialize_branding_grid(branding_grid_file):\r\n grid_df = pd.DataFrame()\r\n try:\r\n grid_df = pd.read_excel(branding_grid_file, dtype=str)\r\n except pd.errors.ParserError:\r\n print(\"Unable to process Branding Grid\")\r\n\r\n grid_df.columns = grid_df.columns.str.title().str.strip()\r\n grid_df['Envelope'] = grid_df['Envelope'].fillna('Anthem') # Fill in envelope data that is not otherwise indicated.\r\n grid_df = grid_df.fillna('')\r\n grid_df = grid_df.apply(lambda x: x.str.strip())\r\n\r\n # Merge Contract Number columns\r\n grid_df['Contract Number'] = grid_df.apply(branding_grid_col_merge, axis=1)\r\n grid_df.drop_duplicates(subset=['Contract Number'], inplace=True)\r\n grid_df.set_index('Contract Number', inplace=True)\r\n grid_df.drop(['Cms Contract', '2018 Pbp', 'Sourcegroupnumber', 'Sourcesubgrpnbr'], axis=1, inplace=True)\r\n return grid_df\r\n\r\n\r\ndef branding_grid_col_merge(row):\r\n contract_name = []\r\n if row['Cms Contract']:\r\n contract_name.append(row['Cms Contract'])\r\n if row['2018 Pbp']:\r\n contract_name.append(row['2018 Pbp'])\r\n if row['Sourcegroupnumber']:\r\n contract_name.append(row['Sourcegroupnumber'])\r\n if row['Sourcesubgrpnbr']:\r\n contract_name.append(row['Sourcesubgrpnbr'])\r\n return '-'.join(contract_name)\r\n\r\n\r\ndef initialize_mailing_list(mailing_list_file, grid_df):\r\n list_df = pd.DataFrame()\r\n try:\r\n list_df = pd.read_csv(mailing_list_file, dtype=str)\r\n except pd.errors.ParserError:\r\n print(\"Unable to process Mailing List\")\r\n\r\n # Create Tier 3 and Tier 2 Contract Numbers for matching with the Branding Grid.\r\n list_df['LCN3'] = list_df['List Contract Number'].str.rsplit('-', 1, expand=True)[0]\r\n list_df['LCN2'] = list_df['List Contract Number'].str.rsplit('-', 2, expand=True)[0]\r\n\r\n # Find matching Contract Numbers in the Branding Grid.\r\n list_df.loc[list_df['List Contract Number'].isin(grid_df.index),\r\n 'Contract Number'] = list_df['List Contract Number']\r\n list_df.loc[list_df['LCN3'].isin(grid_df.index), 'Contract Number'] = list_df['LCN3']\r\n\r\n # Fill in remainder with Tier 2 contract numbers.\r\n list_df.loc[~list_df[['List Contract Number', 'LCN3']].isin(grid_df.index).any(axis=1),\r\n 'Contract Number'] = list_df['LCN2']\r\n\r\n # Drop temporary columns\r\n list_df.drop(['LCN3', 'LCN2'], axis=1, inplace=True)\r\n return list_df\r\n\r\n\r\ndef output_envelope_csv(merge_frame):\r\n anthem_df = merge_frame.loc[merge_frame['Envelope'] == 'Anthem']\r\n amerigroup_df = merge_frame.loc[merge_frame['Envelope'] == 'Amerigroup']\r\n anthem_df.to_csv(f'Anthem Envelope List {now:%m%d%y}.csv', header=True, index=False)\r\n amerigroup_df.to_csv(f'Amerigroup Envelope List {now:%m%d%y}.csv', header=True, index=False)\r\n\r\n\r\ndef list_summary(merge_frame):\r\n cname = {'City': 'Count'}\r\n df_group = merge_frame.rename(columns=cname).fillna('#N/A').groupby(['Envelope', 'Contract Number'],\r\n as_index=False).count()\r\n envelope_list = ['#N/A', 'Amerigroup', 'Anthem']\r\n dfs = []\r\n for idx, frame in enumerate(envelope_list):\r\n dfs.append(df_group[df_group['Envelope'] == frame]\r\n [['Envelope', 'Contract Number', 'Count']].reset_index(drop=True))\r\n\r\n with Workbook(f'Anthem Merge Summary_{now:%m%d%y}.xlsx') as wb:\r\n ws = wb.add_worksheet()\r\n fmt_header = wb.add_format({'font_size': 14, 'bold': 1, 'align': 'center'})\r\n fmt_bold = wb.add_format({'bold': 1})\r\n fmt_right = wb.add_format({'align': 'right'})\r\n col_iter = iter(range(len(dfs)))\r\n for idx, frame in enumerate(dfs):\r\n col = int(idx*2+next(col_iter))\r\n ws.set_column(col, col, 26)\r\n ws.set_column(col+1, col+1, 9, fmt_right)\r\n ws.merge_range(0, col, 0, col+1, frame['Envelope'][0], fmt_header)\r\n ws.write(1, col, 'Total', fmt_bold)\r\n ws.write(1, col+1, frame['Count'].agg(sum), fmt_bold)\r\n ws.write_row(2, col, frame.columns[1:3].values)\r\n ws.write_column(3, col, frame['Contract Number'])\r\n ws.write_column(3, col+1, frame['Count'])\r\n\r\n\r\ndef generate_proofs(working_df):\r\n contracts = working_df['Contract Number'].unique()\r\n proof_df = pd.DataFrame()\r\n for x in range(len(contracts)):\r\n proof_df = proof_df.append(working_df.loc[working_df['Contract Number'] == contracts[x]].head(2))\r\n proof_df['Proofs'] = 'Proof'\r\n proof_df = proof_df.append(working_df, ignore_index=True, sort=False)\r\n return proof_df\r\n\r\n\r\n@Gooey(program_name='Anthem Merge Program')\r\ndef main():\r\n parser = GooeyParser(description='Combine the branding grid with a processed mailing list for variable data merge')\r\n parser.add_argument('Branding_Grid',\r\n help=\"Select the Branding Grid File (.xlsx)\",\r\n widget=\"FileChooser\")\r\n parser.add_argument('Mailing_List',\r\n help=\"Select the Mailing List File (.csv)\",\r\n widget=\"FileChooser\")\r\n parser.add_argument('Purpose',\r\n choices=['Data', 'Merge'],\r\n default='Data',\r\n widget='Dropdown',\r\n help='Select purpose of merge:')\r\n\r\n args = parser.parse_args()\r\n grid_df = initialize_branding_grid(args.Branding_Grid)\r\n mail_df = initialize_mailing_list(args.Mailing_List, grid_df)\r\n\r\n if 'Data' in args.Purpose:\r\n env_df = grid_df['Envelope']\r\n merged_df = mail_df.join(env_df, on='Contract Number')\r\n output_envelope_csv(merged_df)\r\n list_summary(merged_df)\r\n\r\n if 'Merge' in args.Purpose:\r\n mail_df.drop(['Envelope'], axis=1, inplace=True)\r\n merged_df = mail_df.join(grid_df, on='Contract Number')\r\n final_df = generate_proofs(merged_df)\r\n filename = path.basename(args.Mailing_List).rsplit(' ', 1)[0]\r\n final_df.to_csv(f'{filename} Merged Variable.csv', index=False, encoding='ISO-8859-1')\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"anthem-merge.pyw","file_name":"anthem-merge.pyw","file_ext":"pyw","file_size_in_byte":6334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"269137162","text":"#!/usr/bin/env python\n\n\"\"\"Format to quaternion naming:\n quat_frameA_frameB := orientation of frameB\n expressed in frameA.\"\"\"\n\nimport rospy\nimport tf\nfrom geometry_msgs.msg import Quaternion, QuaternionStamped, TwistStamped\nfrom std_msgs.msg import Float64\nfrom tf import transformations as tfms\nimport math\n\n\n\ndef quat_cb(data, broadcaster):\n\n quat_ref_base = [0.707, 0, 0, 0.707] # reference orientation of shield frame relative to world frame. Base aligns with world frame\n\n quat_earth_ref = [0.7106, 0.00933, 0.02892, 0.7029] #Need to determine this emprically\n\n\n quat_earth_sensor = tfms.unit_vector([data.quaternion.x/1000,\n data.quaternion.y/1000,\n data.quaternion.z/1000,\n data.quaternion.w/1000])\n\n\n quat_ref_sensor = tfms.quaternion_multiply(quat_earth_sensor, tfms.quaternion_inverse(quat_earth_ref)) #check commutation\n\n quat_sensor_base = tfms.quaternion_multiply(quat_ref_base, tfms.quaternion_inverse(quat_ref_sensor))\n\n\n euler_sensor_base = tfms.euler_from_quaternion(quat_sensor_base)\n\n pos_x = 0 #- 0.4*math.sin(euler_sensor_base[1])\n pos_y = 0.120# - 0.4*math.sin(euler_sensor_base[0])\n pos_z = 0.70 #- 0.4*(math.sin(euler_sensor_base[1]) + math.sin(euler_sensor_base[2]))\n\n pos_sensor_base = qv_mult(quat_sensor_base, [pos_x, pos_y, pos_z])\n\n broadcaster.sendTransform((pos_sensor_base[0], pos_sensor_base[1], pos_sensor_base[2]),\n (quat_sensor_base[0], quat_sensor_base[1], quat_sensor_base[2], quat_sensor_base[3]),\n rospy.Time.now(),\n \"base_link\",\n \"motion_shield_ref\")\n\n\n\n\n\nif __name__=='__main__':\n rospy.init_node('base_link_pos')\n\n broadcaster = tf.TransformBroadcaster()\n\n rospy.Subscriber('quat_motion_shield', QuaternionStamped, quat_cb, broadcaster)\n\n\n rospy.spin()\n","sub_path":"src/ur10_cm/scripts/base_link_pose.py","file_name":"base_link_pose.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"109485341","text":"from random import randint\r\n\r\ndef rzucKostka():\r\n return randint(1, 6)\r\n\r\nsuma_rzutow = 0\r\n\r\nprint('Wyrzucone oczka: ', end='')\r\n\r\nfor _ in range(3):\r\n rzut = rzucKostka()\r\n print(f'{rzut}', end=' ')\r\n suma_rzutow += rzut\r\n\r\nprint(f'\\nSuma rzutow: {suma_rzutow}')","sub_path":"04-Subroutines/zad17.py","file_name":"zad17.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"231142409","text":"'''\r\n@Author: longfengpili\r\n@Date: 2019-12-12 11:03:01\r\n@LastEditTime : 2019-12-23 13:13:49\r\n@github: https://github.com/longfengpili\r\n'''\r\n#!/usr/bin/env python3\r\n#-*- coding:utf-8 -*-\r\n\r\nfrom datetime import datetime\r\nfrom graphviz import Digraph\r\n\r\nfrom excel_api import ReadDataFromExcel\r\nfrom mysetting import *\r\n\r\nclass QuestTutorial(object):\r\n \r\n def __init__(self, quest_file, game_version):\r\n self.quest_file = quest_file\r\n self.game_version = game_version\r\n self.sheetname = 'Quest_AB0'\r\n self.columns = ['id', 'PreQuestIds']\r\n\r\n def get_quests_from_file(self):\r\n quests_d = {}\r\n quests_l = []\r\n r_excel = ReadDataFromExcel(self.quest_file)\r\n datas = r_excel.get_sheet_values_by_columns(self.sheetname, self.columns, header_row=2)\r\n datas = datas[2:]\r\n datas = [[q if isinstance(q, str) else f\"{q:.0f}\" for q in data] for data in datas]\r\n for data in datas:\r\n quests_d[data[0]] = data[1].split('#') if '#' in data[1] else [data[1]]\r\n if '#' in data[1]:\r\n for q in data[1].split('#'):\r\n quests_l.append([q, data[0]])\r\n else:\r\n quests_l.append([data[1], data[0]])\r\n return quests_d, quests_l\r\n\r\n def find_long_questpath(self, quests_d, start, end, path=[]):\r\n path = path +[start]\r\n if start == end:\r\n return path\r\n \r\n long_path = []\r\n for node in quests_d.get(start):\r\n if node not in path:\r\n newpath = self.find_long_questpath(quests_d, node, end, path)\r\n if newpath:\r\n if not long_path or len(newpath) > len(long_path):\r\n long_path = newpath\r\n return long_path\r\n\r\n def save_questpath(self, quests_l):\r\n today = datetime.now().strftime('%Y%m%d')\r\n filename = today + 'questtutorial'\r\n dot = Digraph(name=filename, format='pdf', directory='./output')\r\n nodes = []\r\n for qs in quests_l:\r\n for q in qs:\r\n if q not in nodes:\r\n dot.node(q)\r\n nodes.append(q)\r\n dot.edge(qs[0], qs[1])\r\n dot.attr(rankdir='LR')\r\n dot.render()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","sub_path":"tutorials/quest_tutorial.py","file_name":"quest_tutorial.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"569654129","text":"import os\nimport filecmp\nimport sys\nimport shutil\nimport pytest\nimport virtool.jobs.aodp\n\nTEST_FILES_PATH = os.path.join(sys.path[0], \"tests\", \"test_files\")\nTEST_REF_PATH = os.path.join(TEST_FILES_PATH, \"aodp\", \"reference.fa\")\n\n\n@pytest.fixture\ndef mock_job(tmpdir, mocker, request, dbs, test_db_connection_string, test_db_name):\n index_dir = tmpdir.mkdir(\"references\").mkdir(\"foo_ref\").mkdir(\"foo_index\")\n shutil.copy(TEST_REF_PATH, index_dir.join(\"ref.fa\"))\n\n tmpdir.mkdir(\"samples\").mkdir(\"foo_sample\").mkdir(\"analysis\").mkdir(\"foo_analysis\")\n\n settings = {\n \"data_path\": str(tmpdir),\n \"db_name\": test_db_name\n }\n\n dbs.analyses.insert_one({\n \"_id\": \"foo_analysis\",\n \"workflow\": \"aodp\",\n \"ready\": False,\n \"sample\": {\n \"id\": \"foobar\"\n },\n \"subtraction\": {\n \"id\": \"Prunus persica\"\n }\n })\n\n dbs.indexes.insert_one({\n \"_id\": \"foo_index\",\n \"manifest\": {\n \"foo\": 1,\n \"bar\": 5\n },\n \"sequence_otu_map\": {\n \"foo\": \"bar\"\n }\n })\n\n dbs.samples.insert_one({\n \"_id\": \"foo_sample\",\n \"library_type\": \"amplicon\",\n \"paired\": False,\n \"quality\": {\n \"count\": 10000,\n \"length\": [78, 101]\n },\n \"subtraction\": {\n \"id\": \"foo_subtraction\"\n }\n })\n\n dbs.references.insert_one({\n \"_id\": \"foo_ref\",\n \"data_type\": \"barcode\"\n })\n\n dbs.jobs.insert_one({\n \"_id\": \"foo_job\",\n \"task\": \"aodp\",\n \"args\": {\n \"analysis_id\": \"foo_analysis\",\n \"index_id\": \"foo_index\",\n \"ref_id\": \"foo_ref\",\n \"sample_id\": \"foo_sample\"\n },\n \"proc\": 2,\n \"mem\": 8\n })\n\n queue = mocker.Mock()\n\n job = virtool.jobs.aodp.Job(\n test_db_connection_string,\n test_db_name,\n settings,\n \"foo_job\",\n queue\n )\n\n job.init_db()\n\n return job\n\n\ndef test_fetch_index(mock_job):\n mock_job.check_db()\n mock_job.prepare_index()\n\n assert filecmp.cmp(\n TEST_REF_PATH,\n mock_job.params[\"local_index_path\"],\n )\n","sub_path":"tests/jobs/test_aodp.py","file_name":"test_aodp.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"503587502","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Sep 15 13:58:18 2018\n\n@author: zee\n\"\"\"\n\nimport re\nimport os\nimport numpy as np\nimport pandas as pd\nimport collections\n\n\nbase_dir='data4-5/'\n\nsave_excel_path='data4-5.xlsx'\n\n#df =pd.DataFrame(columns=['AED_ID','Monitor_ID','Display','Statue_monitor','R1','R1_','R2','R2_','G1','G1_','G2','G2_','B1','B1_','B2','B2_','C1','C1_','C2','C2_'],index=None)\nfeatures=['AED_ID','Monitor_ID','Display','Statue_monitor','R1','R1_','R2','R2_','G1','G1_','G2','G2_','B1','B1_','B2','B2_','C1','C1_','C2','C2_']\nmydic=dict(zip(features,[[] for _ in range(len(features))]))\n\nfile_list=os.listdir(base_dir) \nfor name in file_list:\n path=os.path.join(base_dir,name)\n \n\nwith open(path,'r') as f:\n txt=f.readlines()\ns=''\nfor entry in txt:\n s+=entry\ns=s.replace('\\n','')\n \ndef f(s,i,pattern):\n len_s=len(s)\n len_p=len(pattern)\n if i+len_p >= len_s:\n return False\n for k in range(len_p):\n if s[i+k]!=pattern[k]:\n return False\n return True\n\n\ndef match(a,start,end):\n box=[]\n length=len(a)\n s_len=len(start)\n e_len=len(end)\n ss=-1\n\n \n \n for i,v in enumerate(a):\n if ss>0 :\n if f(a,i,end):\n box.append(a[ss:i])\n i=i+e_len\n ss=-1\n else:\n if f(a,i,start):\n ss=i+s_len\n i=ss\n else:\n if f(a,i,start):\n ss=i+s_len\n i=ss\n return box\n \n\nnn=match(s,'[',']')\nfor item in nn:\n s=s.replace('['+item+']','')\nhh=match(s,'*B,','&') \n\n\n\n\n\n\n\n\n\n\n\ndef appendDic(path):\n print (path)\n\n# path='data/7315-m1-d0.txt'\n aed_id=path.split('/')[-1].split('-')[0]\n m_id=int(path.split('m')[-1].split('-')[0])\n display=int(path.split('d')[-1].split('.')[0])\n \n with open(path,'r') as f:\n txt=f.readlines()\n s=''\n for entry in txt:\n s+=entry\n s=s.replace('\\n','')\n \n nn=match(s,'[',']')\n for item in nn:\n s=s.replace('['+item+']','')\n hh=match(s,'*B,','&') \n \n \n for xx in hh:\n xx=re.split('[,: ]',xx)\n x=[val for val in xx if val not in ['R','G','B','C','']]\n \n if len(x)<17:\n continue\n mydic['AED_ID'].append(aed_id)\n mydic['Monitor_ID'].append(m_id)\n mydic['Display'].append(display)\n# mydic['Statue_monitor'].append(x[0])\n for i in range(len(x)):\n mydic[features[i+3]].append(x[i])\n \n \nfile_list=os.listdir(base_dir) \nfor name in file_list:\n path=os.path.join(base_dir,name)\n appendDic(path)\n\ndf=pd.DataFrame(mydic,columns=features) \nc1_=df['C1_']\nc2_=df['C2_']\nc12_=zip(c1_,c2_)\ncounter=collections.Counter(c12_)\nmapper={}\nfor key in counter:\n if counter[key]!=1:\n mapper[key[0]]=key[1]\n \n\nfor i in range(df.shape[0]):\n df.loc[i,'C2_']=mapper[df.loc[i,'C1_']]\n\n\ndf['AED_ID']=df['AED_ID'].astype('str')\ndf[features[1:]]=df[features[1:]].astype('int')\ndf.to_excel(save_excel_path,index=False) \n\n\ndark=df[(df['AED_ID']=='73654011061') | (df['AED_ID']=='73654011061d')]\n","sub_path":"AED4/processing2.py","file_name":"processing2.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"32876215","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n######################################################\n# Dextripador\n# File: FileFormats/VDEX.py\n# Version: 0.7\n######################################################\n\nimport os\nimport sys\n\nfrom FileWork import *\nfrom DextractorException import *\nfrom FileFormats.DEX import DEXHeader\n\nVDEX_MAGIC_TYPE = c_ubyte * 4\nVDEX_VERIFIER_DEPS_VERSION_TYPE = c_ubyte * 4\nVDEX_DEX_SECTION_VERSION_TYPE = c_ubyte * 4\n\n\nclass VDEXFile():\n '''\n Parser for VDEX File Header, in this file\n we will find the DEX files after OAT version\n 124 (Android 8.0).\n\n VDEXFile {\n uint8_t magic_[4]\n uint8_t verifier_deps_version_[4]\n uint8_t dex_section_version_[4]\n uint32_t number_of_dex_files_\n uint32_t verifier_deps_size_\n uint32_t bootclasspath_checksums_size_\n uint32_t class_loader_context_size_\n }\n '''\n\n def __init__(self, file_pointer):\n self.file_p = file_pointer\n self.header_initialized = False\n self.dex_file = None\n\n self.magic = VDEX_MAGIC_TYPE()\n self.verifier_deps_version = VDEX_VERIFIER_DEPS_VERSION_TYPE()\n self.dex_section_version = VDEX_DEX_SECTION_VERSION_TYPE()\n self.number_of_dex_files = c_uint()\n self.verifier_deps_size = c_uint()\n self.bootclasspath_checksums_size = c_uint()\n self.class_loader_context_size = c_uint()\n\n def print_header(self):\n if not self.header_initialized:\n return\n\n print(\"\\n==================================\")\n print(\"VEX File Header\")\n print(\"==================================\")\n\n sys.stdout.write(\"\\nVDEX Magic: \")\n\n for i in range(ctypes.sizeof(VDEX_MAGIC_TYPE)):\n sys.stdout.write(\"%02X \" % (self.magic[i]))\n\n sys.stdout.write(\"(%s)\\n\" % ctypes.cast(\n self.magic, ctypes.c_char_p).value)\n\n sys.stdout.write(\"\\nVerifier Deps Version: \")\n\n for i in range(ctypes.sizeof(VDEX_VERIFIER_DEPS_VERSION_TYPE)):\n sys.stdout.write(\"%02X \" % (self.verifier_deps_version[i]))\n\n sys.stdout.write(\"(%s)\\n\" % ctypes.cast(\n self.verifier_deps_version, ctypes.c_char_p).value)\n\n sys.stdout.write(\"\\nDEX Section Version: \")\n\n for i in range(ctypes.sizeof(VDEX_DEX_SECTION_VERSION_TYPE)):\n sys.stdout.write(\"%02X \" % (self.dex_section_version[i]))\n\n sys.stdout.write(\"(%s)\" % ctypes.cast(\n self.dex_section_version, ctypes.c_char_p).value)\n\n sys.stdout.write(\"\\nNumber of DEX files: %d\" %\n (self.number_of_dex_files.value))\n\n sys.stdout.write(\"\\nVerifier Deps Size: %d\" %\n (self.verifier_deps_size.value))\n \n sys.stdout.write(\"\\nBootclasspath checksums size: %d\" % \n (self.bootclasspath_checksums_size.value))\n \n sys.stdout.write(\"\\nClass Loader Context Size: %d\" %\n (self.class_loader_context_size.value))\n \n def parse_header(self, offset, file_size):\n self.file_p.seek(offset, FILE_BEGIN)\n\n for i in range(ctypes.sizeof(VDEX_MAGIC_TYPE)):\n self.magic[i] = read_file_le(\n self.file_p, BYTE, BYTE_SIZE, self.file_p.tell())\n \n for i in range(ctypes.sizeof(VDEX_VERIFIER_DEPS_VERSION_TYPE)):\n self.verifier_deps_version[i] = read_file_le(\n self.file_p, BYTE, BYTE_SIZE, self.file_p.tell())\n \n for i in range(ctypes.sizeof(VDEX_DEX_SECTION_VERSION_TYPE)):\n self.dex_section_version[i] = read_file_le( \n self.file_p, BYTE, BYTE_SIZE, self.file_p.tell())\n \n self.number_of_dex_files = read_file_le(\n self.file_p, UINTEGER, UINTEGER_SIZE, self.file_p.tell())\n \n self.verifier_deps_size = read_file_le(\n self.file_p, UINTEGER, UINTEGER_SIZE, self.file_p.tell())\n \n self.bootclasspath_checksums_size = read_file_le(\n self.file_p, UINTEGER, UINTEGER_SIZE, self.file_p.tell())\n\n self.class_loader_context_size = read_file_le(\n self.file_p, UINTEGER, UINTEGER_SIZE, self.file_p.tell())","sub_path":"FileFormats/VDEX.py","file_name":"VDEX.py","file_ext":"py","file_size_in_byte":4210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"226591178","text":"\n\ndef num2str_with_width(number, width):\n if number >= 10**width:\n raise Exception(\"input out of range: {} >= {}\".format(number, 10**width))\n if number < 0:\n raise Exception(\"only positive input allowed\")\n format_str = \"%0{}d\".format(width)\n return format_str%(number)\n\n\ndef take_op_on_dir(path, operate, *operate_args, recursive=False):\n \"\"\"take operation on target dir, recursively or not\"\"\"\n parents = os.listdir(path)\n for parent in parents:\n child = os.path.join(path, parent)\n if os.path.isdir(child):\n if recursive:\n take_op_on_dir(child, operate, *operate_args, recursive=recursive)\n\n else:\n operate(child, *operate_args)\n","sub_path":"pyUtility.py","file_name":"pyUtility.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"632465854","text":"import yaml\n\ndef config():\n \"\"\"Provides configuration parameters, store information, api_keys\n \"\"\"\n try:\n with open(\"config.yml\", 'r') as ymlfile:\n return yaml.load(ymlfile, Loader=yaml.BaseLoader)\n except FileNotFoundError:\n logger = module_logger.configure('default')\n logger.error('Could not find configuration file config.yml')\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"624554856","text":"import re\n\n# Write a Python program to search the numbers (0-9) of length\n# between 1 to 3 in a given string.\nmatchObj = re.finditer(r\"[0-9]{1,3}\", \"Exercises number 1, 12, 13, and 345 are important\")\nfor n in matchObj:\n print(n.group(0))\n\n\n# Write a Python program to search some literals strings in a string.\n# Sample text : 'The quick brown fox jumps over the lazy dog.'\n# Searched words : 'fox', 'dog', 'horse'\nwords = ['fox', 'dog', 'horse']\ntext = \"The quick brown fox jumps over the lazy dog.\"\nfor word in words:\n if re.search(word, text):\n print(\"Matched!\")\n else:\n print('Not Matched!')\n\n\n# Write a Python program to search a literals string in a string and also find the\n# location within the original string where the pattern occurs.\n# Sample text : 'The quick brown fox jumps over the lazy dog.'\n# Searched words : 'fox'\nsearched = 'fox'\ntext = 'The quick brown fox jumps over the lazy dog.'\nmatch = re.search(searched, text)\ns = match.start()\ne = match.end()\nprint('Found \"%s\" in \"%s\" from %d to %d ' % \\\n (match.re.pattern, match.string, s, e))\n\n\n# Write a Python program to find the substrings within a string.\n# Sample text : 'Python exercises, PHP exercises, C# exercises'\n# Pattern : 'exercises'\n# Note: There are two instances of exercises in the input string.\ntext = 'Python exercises, PHP exercises, C# exercises'\npattern = 'exercises'\nmatchedObj = re.findall(pattern, text)\nfor match in matchedObj:\n print(match)\n\n\n# Write a Python program to find the occurrence and position of the substrings within a string.\ntext = 'Python exercises, PHP exercises, C# exercises'\npattern = 'exercises'\nmatchedObj = re.finditer(pattern, text)\nfor match in matchedObj:\n s = match.start()\n e = match.end()\n print(text[s:e], s, \"-\", e)\n\n\n# Write a Python program to replace whitespaces with an underscore and vice versa.\nprint(re.sub(r'\\s', '_', \"The quick brown fox jumps over the lazy dog.\"))\n# # or\nprint(\"The quick brown fox jumps over the lazy dog.\".replace(\" \", \"_\"))\n\n\n# Write a Python program to extract year, month and date from a an url.\ntext = \"https://www.washingtonpost.com/news/football-insider/wp/2016/09/02/odell-beckhams-fame-rests-on-one-stupid-little-ball-josh-norman-tells-author/\"\nprint(re.findall(r'(\\d{4})/(\\d{1,2})/(\\d{1,2})', text))\n\n\n# Write a Python program to convert a date of yyyy-mm-dd format to dd-mm-yyyy format.\ndate = \"2026-01-02\"\nprint(date)\nprint(re.sub(r'(\\d{4})/(\\d{1,2})/(\\d{1,2})', '\\\\3-\\\\2-\\\\1', date))\n\n\n# Write a Python program to match if two words from a list of words starting with letter 'P'.\nwords = [\"Python PHP\", \"Java JavaScript\", \"c c++\"]\nfor word in words:\n print(re.findall(r'(P\\w+)\\W(P\\w+)', word))\n\n\n\n","sub_path":"RegEx/dev_exercises/w3resource_part2.py","file_name":"w3resource_part2.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"587796072","text":"#! usr/bin/python\n\nfile_object = open(\"A-small-attempt1.in\", \"r\")\nfile_write = open(\"A-small-output.out\", \"w\")\nnum = int(file_object.readline())\nfor i in range(num): #number of cases\n for j in range(2): #two inputs for each\n answer = int(file_object.readline())\n array = []\n for k in range(4): #make array\n array.append(file_object.readline().split())\n if j == 0: #if first array, save the row\n choice = array[answer - 1]\n else: #if second, save the second row\n row = array[answer - 1]\n ans = [x for x in choice if x in row]\n if len(ans) == 1:\n file_write.write(\"Case #{0}: {1}\\n\".format(i + 1, ans[0]))\n elif len(ans) == 0:\n file_write.write(\"Case #{0}: Volunteer cheated!\\n\".format(i + 1))\n else:\n file_write.write(\"Case #{0}: Bad magician!\\n\".format(i + 1))\n\nfile_object.close()","sub_path":"solutions_python/Problem_135/1602.py","file_name":"1602.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"537823411","text":"# Describes images with SURF keypoint detection and description.\nimport numpy, cv2\n\n\ndef surfDescribe(image, kpCount=2000, hessian=100.0, mask=[[]]):\n if image is None:\n return [], []\n\n # obtains the gray-scaled version of the given image\n #gsImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gsImage = image\n if len(image.shape) > 2:\n gsImage = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n # creates a mask to ignore eventual black borders\n _, bMask = cv2.threshold(cv2.normalize(gsImage, alpha=0, beta=255, norm_type=cv2.cv.CV_MINMAX),\n 1, 255, cv2.THRESH_BINARY)\n bMask = cv2.convertScaleAbs(bMask)\n\n # combines the border mask to an eventual given mask\n if mask != [[]]:\n mask = cv2.bitwise_and(mask, bMask)\n else:\n mask = bMask\n\n # detects the SURF keypoints\n surfDetectorDescriptor = cv2.SURF(hessian)\n keypoints = surfDetectorDescriptor.detect(gsImage, mask)\n descriptions = []\n\n # describes the obtained keypoints\n if len(keypoints) > 0:\n # removes the weakest keypoints (according to hessian)\n keypoints = sorted(keypoints, key=lambda match: match.response, reverse=True)\n del keypoints[kpCount:]\n\n # describes the selected keypoints\n keypoints, descriptions = surfDetectorDescriptor.compute(gsImage, keypoints)\n\n # returns keypoints and descriptions\n return keypoints, descriptions\n","sub_path":"provenance/notredame/ImageDescriptor.py","file_name":"ImageDescriptor.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"615032956","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\ndef get_detected_line_y(speed):\n # 정지선을 여부를 확인하기 위한 검출 y좌표 계산\n detected_line_y = 90\n if speed <= 50:\n detected_line_y = detected_line_y\n elif speed <= 40:\n detected_line_y += 80\n elif speed <= 30:\n detected_line_y += 200\n return detected_line_y\n\n\ndef get_cross_value_count(frame, y):\n # y좌표를 기준으로 색상이 교차될 때, 각 생상의 value 갯수 \n value_counts = []\n\n # 처음 정지선을 확인하기 위해, (0, y)의 value 값을 초기값으로 설정\n value = frame[y][0]\n count = 0\n\n height, width = frame.shape[:2]\n for x in range(width):\n # (x, y)와 value 값이 같을 경우, 해당 좌표의 갯수를 카운트\n if frame[y][x] == value:\n count += 1\n # 다를 경우, 좌표 갯수를 리스트에 저장한 뒤, 새로운 value 값 및 카운트 초기화\n else:\n value_counts.append(count)\n value = frame[y][x]\n count = 1\n value_counts.append(count)\n value_counts2 = [i for i in value_counts if i>=3]\n # \"\"\"\n # Explain Matrix\n # \"\"\"\n # import cv2 as cv\n # explain_image = cv.cvtColor(frame, cv.COLOR_GRAY2BGR)\n # cv.rectangle(explain_image, (0, y-1), (explain_image.shape[1], y+1), (0, 0, 255), 1)\n # cv.imshow(\"origin\" + str(y), explain_image)\n return value_counts2\n\n\ndef is_detect_crossline(frame, speed, delta_y=100, cross_count_thresh=14):\n # 정지선 여부를 검사하기 위한 y좌표의 상/하한 계산\n detected_line_up = get_detected_line_y(speed)\n detected_line_down = detected_line_up + delta_y\n lens_counts = []\n ys = []\n cross_count_y0 = get_cross_value_count(frame, 0)\n lens_counts.append(len(cross_count_y0))\n height, width = frame.shape[:2]\n cycles = (height-90)//delta_y\n\n length_tresh = 40\n for i in range(cycles):\n y_detect_line = 90 + i * delta_y\n num = get_cross_value_count(frame, y_detect_line)\n lens_counts.append(len(num))\n ys.append(y_detect_line)\n\n if len(num)>cross_count_thresh:\n j = 0\n y_up, y_down = y_detect_line-1, y_detect_line+1\n\n while y_down<600:\n kkk = get_cross_value_count(frame, y_down)\n if len(kkk) < cross_count_thresh:\n break\n y_down +=1\n while y_up>0:\n kkk = get_cross_value_count(frame, y_up)\n if len(kkk) < cross_count_thresh:\n break\n y_up -= 1\n if y_down - y_up >length_tresh:\n print(\"range:\",y_up,y_down,\"list\",num)\n return True, y_down\n\n #print(lens_counts)\n\n\n\n # 상/하한에서 검출되는 교차되는 value 값의 갯수 계산\n cross_count_up = get_cross_value_count(frame, detected_line_up)\n cross_count_down = get_cross_value_count(frame, detected_line_down)\n\n\n '''\n \n # 상단에서 검출되는 갯수 세기\n if len(cross_count_up) > cross_count_thresh:\n print \"[STOPLINE] CROSS LINE UP\", len(cross_count_up)\n return True, \"up\"\n \n if len(cross_count_down) > cross_count_thresh:\n print \"[STOPLINE] CROSS LINE DOWN\", len(cross_count_down)\n return True, \"down\"\n '''\n return False, 0\n\n\n\n# def is_stopline(y_th=240, len_th=100):\n# lis = _detect_stopline(y_th,'stop')\n# if len(lis)!=0 and max(lis)>=len_th:\n# print(\"stop line\",max(lis))\n# return True\n# return False\n\n\n\n# if __name__ == '__main__':\n \n# speed = 50\n# da = 7\n# thres_50 = 90\n# if speed == 30:\n# #thres_y = 200\n# thres_y = thres_50 + 200\n# elif speed == 40:\n# #thres_y = 100\n# thres_y = thres_50 + 80\n# elif speed == 50:\n# thres_y = thres_50\n# stop = Stopline('174', speed=speed, thres_L=200)\n# rospy.init_node('stopline')\n# rospy.Subscriber(\"/usb_cam/image_raw\", Image, stop.camera_callback)\n# pub = rospy.Publisher('xycar_motor', xycar_motor, queue_size=1)\n\n# while not rospy.is_shutdown():\n# if stop.cam_image.size != (640*480*3):\n# print(\"not yet\")\n# continue\n\n# #is_stline = stop.is_stopline(y_th=240, len_th=300)\n# is_crossline, where = stop.is_crossline(y_th=thres_y, num_th=15)\n# if is_crossline:\n# if where =='up':\n# for _ in range(15):\n# drive(da,20,pub)\n# time.sleep(0.1)\n# #print(\"stopline\")\n# for _ in range(30): # 60\n# #print()\n# drive(da, 0, pub)\n# time.sleep(0.1)\n# for _ in range(20):\n# #drive(da,0,pub)\n# drive(da, speed*2//3, pub) # 30\n# time.sleep(0.1)\n# #speed = 0\n# else:\n# drive(da, speed ,pub)\n\n# if cv2.waitKey(1) & 0xFF == ord('q'):\n# break\n","sub_path":"soha_workspace/src/rally_3rd/src/module/stop_line/stop_line.py","file_name":"stop_line.py","file_ext":"py","file_size_in_byte":5043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"402033112","text":"import collections\nimport typing\nfrom dataclasses import dataclass\nfrom .grammar import Nt\nfrom .actions import Action\n\n@dataclass(frozen=True)\nclass Edge:\n \"\"\"An edge in a Parse table is a tuple of a source state and the term followed\n to exit this state. The destination is not saved here as it can easily be\n inferred by looking it up in the parse table.\n\n Note, the term might be `None` if no term is specified yet. This is useful\n when manipulating a list of edges and we know that we are taking transitions\n from a given state, but not yet with which term.\n\n src: Index of the state from which this directed edge is coming from.\n\n term: Edge transition value, this can be a terminal, non-terminal or an\n action to be executed on an epsilon transition.\n \"\"\"\n src: int\n term: typing.Union[str, Nt, Action]\n\n def __str__(self):\n return \"{} -- {} -->\".format(edge.src, str(edge.term))\n\n\n@dataclass(frozen=True)\nclass APS:\n # To fix inconsistencies of the grammar, we have to traverse the grammar\n # both forward by using the lookahead and backward by using the state\n # recovered from following reduce actions.\n #\n # To do so we define the notion of abstract parser state (APS), which is a\n # class which represents the known state of the parser, relative to its\n # starting point.\n #\n # An APS does not exclusively start at the parser entry point, but starts\n # from any state of the parse table by calling `APS.start`. Then we walk\n # the parse table forward, as-if we were shifting tokens or epsilon edges\n # in the parse table. The function `aps.shift_next(parse_table)` will\n # explore all possible futures reachable from the starting point.\n #\n # As the parse table is explored, new APS are produced by\n # `aps.shift_next(parse_table)`, which are containing the new state of the\n # parser and the history which has been seen by the APS since it started.\n slots = ['stack', 'shift', 'lookahead', 'replay', 'history']\n\n # This is the known stack at the location where we started investigating.\n # As more history is discovered by resolving reduce actions, this stack\n # would be filled with the predecessors which have been visited before\n # reaching the starting state.\n stack: typing.List[Edge]\n\n # This is the stack as manipulated by an LR parser. States are shifted to\n # it, including actions, and popped from it when visiting a reduce action.\n shift: typing.List[Edge]\n\n # This is the list of terminals and non-terminals encountered by shifting\n # edges which are not replying tokens.\n lookahead: typing.List[typing.Union[str, Nt]]\n\n # This is the list of lookahead terminals and non-terminals which remains\n # to be shifted. This list corresponds to terminals and non-terminals which\n # were necessary for removing inconsistencies, but have to be replayed\n # after shifting the reduced non-terminals.\n replay: typing.List[typing.Union[str, Nt]]\n\n # This is the list of edges visited since the starting state.\n history: typing.List[Edge]\n\n @staticmethod\n def start(state):\n \"Return an Abstract Parser State starting at a given state of a parse table\"\n edge = Edge(state, None)\n return APS([edge], [edge], [], [], [])\n\n def shift_next(self, pt):\n \"\"\"Yield an APS for each state reachable from this APS in a single step,\n by handling a single term (terminal, nonterminal, or action).\n\n All yielded APS are representing context information around the same\n starting state as `self`, either by having additional lookahead terms,\n or a larger stack representing the path taken to reach the starting\n state.\n\n For each outgoing edge, it builds a new APS which represents the state\n of the Parser if we were to have taken this edge. Only valid APS are\n yielded given the context provided by `self`.\n\n For example, we cannot reduce to a path which is different than what is\n already present in the `shift` list, or shift a term different than the\n next term to be shifted from the `replay` list.\n\n \"\"\"\n\n st, sh, la, rp, hs = self.stack, self.shift, self.lookahead, self.replay, self.history\n last_edge = sh[-1]\n state = pt.states[last_edge.src]\n if self.replay == []:\n for term, to in state.shifted_edges():\n edge = Edge(last_edge.src, term)\n new_sh = self.shift[:-1] + [edge]\n to = Edge(to, None)\n yield APS(st, new_sh + [to], la + [term], rp, hs + [edge])\n else:\n term = self.replay[0]\n rp = self.replay[1:]\n if term in state:\n edge = Edge(last_edge.src, term)\n new_sh = self.shift[:-1] + [edge]\n to = state[term]\n to = Edge(to, None)\n yield APS(st, new_sh + [to], la, rp, hs + [edge])\n\n term = None\n rp = self.replay\n for a, to in state.epsilon:\n edge = Edge(last_edge.src, a)\n prev_sh = self.shift[:-1] + [edge]\n # TODO: Add support for Lookahead and flag manipulation rules, as\n # both of these would invalide potential reduce paths.\n if a.update_stack():\n reducer = a.reduce_with()\n for path, reduced_path in pt.reduce_path(prev_sh):\n # reduce_paths contains the chains of state shifted,\n # including epsilon transitions, in order to reduce the\n # nonterminal. When reducing, the stack is resetted to\n # head, and the nonterminal `term.nt` is pushed, to resume\n # in the state `to`.\n\n # print(\"Compare shifted path, with reduced path:\\n\\tshifted = {}\\n\\treduced = {}, \\n\\taction = {},\\n\\tnew_path = {}\\n\".format(\n # \" \".join(edge_str(e) for e in prev_sh),\n # \" \".join(edge_str(e) for e in path),\n # str(a),\n # \" \".join(edge_str(e) for e in reduced_path),\n # ))\n if prev_sh[-len(path):] != path[-len(prev_sh):]:\n # If the reduced production does not match the shifted\n # state, then this reduction does not apply. This is\n # the equivalent result as splitting the parse table\n # based on the predecessor.\n continue\n\n # The stack corresponds to the stack present at the\n # starting point. The shift list correspond to the actual\n # parser stack as we iterate through the state machine.\n # Each time we consume all the shift list, this implies\n # that we had extra stack elements which were not present\n # initially, and therefore we are learning about the\n # context.\n new_st = path[:max(len(path) - len(prev_sh), 0)] + st\n assert pt.is_valid_path(new_st)\n\n # The shift list corresponds to the stack which is used in\n # an LR parser, in addition to all the states which are\n # epsilon transitions. We pop from this list the reduced\n # path, as long as it matches. Then all popped elements are\n # replaced by the state that we visit after replaying the\n # non-terminal reduced by this action.\n new_sh = prev_sh[:-len(path)] + reduced_path\n assert pt.is_valid_path(new_sh)\n\n # When reducing, we replay terms which got previously\n # pushed on the stack as our lookahead. These terms are\n # computed here such that we can traverse the graph from\n # `to` state, using the replayed terms.\n new_replay = []\n if reducer.replay > 0:\n new_replay = [ edge.term for edge in path if pt.term_is_stacked(edge.term) ]\n new_replay = new_replay[-reducer.replay:]\n new_replay = new_replay + rp\n new_la = la[:max(len(la) - reducer.replay, 0)]\n yield APS(new_st, new_sh, new_la, new_replay, hs + [edge])\n else:\n to = Edge(to, None)\n yield APS(st, prev_sh + [to], la, rp, hs + [edge])\n\n def string(self, name = \"aps\"):\n return \"\"\"{}.stack = [{}]\n{}.shift = [{}]\n{}.lookahead = [{}]\n{}.replay = [{}]\n{}.history = [{}]\n \"\"\".format(\n name, \" \".join(str(e) for e in self.stack),\n name, \" \".join(str(e) for e in self.shift),\n name, \", \".join(repr(e) for e in self.lookahead),\n name, \", \".join(repr(e) for e in self.replay),\n name, \" \".join(str(e) for e in self.history)\n )\n\n def __str__(self):\n return self.string()\n\ndef aps_lanes_str(aps_lanes, header = \"lanes:\", name = \"\\taps\"):\n return \"{}\\n{}\".format(header, \"\\n\".join(aps.string(name) for aps in aps_lanes))\n\n","sub_path":"jsparagus/aps.py","file_name":"aps.py","file_ext":"py","file_size_in_byte":9283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"351732371","text":"import turtle\nimport random\nfrom itertools import cycle\nimport copy\nfrom indclass import Board\n\n\nclass Population(object):\n def __init__(self, size, generations, poolsize, mRate, given_values, interval):\n self.populationsize = size\n self.mRate = mRate\n self.poolsize = poolsize\n self.given_values = given_values\n self.generations = generations\n self.population = []\n self.fitnesses = []\n self.top_individuals = []\n self.generation = 0\n self.populate()\n self.get_top_individuals()\n # self.get_roullette()\n self.top_fitness = self.top_individuals[0].get_fitness()\n # print(self.top_individuals[0].get_fitness())\n # self.create_screen()\n self.og = mRate\n self.mult = .005\n self.max = .04\n self.regenerate(interval)\n self.create_screen()\n self.update_screen()\n self.wn.exitonclick()\n\n def regenerate(self, interval): # TODO instead of retaining the overall max, just retain the max of the generation\n for i in range(self.generations):\n self.repopulate()\n self.mutate_all()\n self.get_top_individuals()\n # self.get_roullette()\n\n # print when the fitness changes\n if interval == 0:\n if self.top_fitness != self.top_individuals[0].get_fitness():\n # print(i, self.top_fitness, self.mRate)\n # self.top_fitness = self.top_individuals[0].get_fitness()\n # self.update_screen()\n pass\n elif i % interval == 0:\n # print at an interval\n print(i, self.top_individuals[0].get_fitness(), self.mRate)\n\n # update fitness, reset mult\n if self.top_fitness > self.top_individuals[0].get_fitness():\n self.top_fitness = self.top_individuals[0].get_fitness()\n print(i, self.top_fitness, self.mRate)\n # if i>1000:\n # self.update_screen()\n self.mRate = self.og\n elif self.top_individuals[0].get_fitness() == self.top_fitness:\n # increment mult towards max\n self.mRate = self.mRate + self.mult\n self.mRate = round(min(self.mRate + self.mult, self.max), 7)\n # reset mult\n if self.mRate == self.max:\n self.mRate = self.og\n if self.top_individuals[0].get_fitness() == 0:\n # solution reached\n self.solution = self.top_individuals[0].board\n break\n else: # end of generations\n self.solution = self.top_individuals[0].board\n\n def populate(self):\n for i in range(self.populationsize):\n self.population.append(Board(self.given_values, mutation=self.mRate))\n\n def repopulate(self):\n self.population = self.population[:1]\n for i in range(1, self.populationsize):\n self.crossover()\n\n def crossover(self):\n if self.poolsize == 1:\n board = copy.deepcopy(self.top_individuals[0].board)\n self.population.append(Board(self.given_values, board, self.mRate))\n return\n else:\n choices = [0, random.randint(0, self.poolsize - 1)]\n # choices = random.sample(range(0, self.poolsize),2)\n board = copy.deepcopy(self.top_individuals[choices[0]].board)\n for j in range(1):\n roworcol = random.randint(0, 1)\n index = random.randint(0, 8)\n if roworcol == 0:\n board = board[:index] + self.top_individuals[choices[1]].get_row(index) + board[index + 1:]\n else:\n col = self.top_individuals[choices[1]].get_col(index)\n for i in range(len(col)):\n board[i][index] = col[i]\n self.population.append(Board(self.given_values, board, self.mRate))\n\n def mutate_all(self):\n for i in range(len(self.population)):\n self.population[i].mutate()\n\n def get_fitnesses(self):\n self.fitnesses = [(index, x.get_fitness()) for (index, x) in enumerate(self.population)]\n\n def get_roullette(self):\n self.get_fitnesses()\n normalizer = sum(element[1] for element in self.fitnesses)\n while len(self.top_individuals) < self.poolsize:\n rando = random.choice(self.fitnesses)\n if random.uniform(0, 1) > rando[1] / normalizer:\n if self.population[rando[0]] not in self.top_individuals:\n self.top_individuals.append(self.population[rando[0]])\n\n def get_top_individuals(self):\n self.get_fitnesses()\n self.fitnesses.sort(key=lambda r: r[1])\n self.top_individuals = [self.population[self.fitnesses[i][0]] for i in range(self.poolsize)]\n\n def create_screen(self):\n self.wn = turtle.Screen()\n self.Droo = turtle.Turtle()\n self.Droo.speed(0)\n self.pos = self.Droo.pos()\n\n def update_screen(self):\n self.wn.clear()\n pcycle = cycle([(self.pos[0] + i * 20, self.pos[0] - j * 20) for j in range(0, 9) for i in range(0, 9)])\n x, y = self.pos\n self.Droo.penup()\n self.Droo.goto(next(pcycle))\n for row in self.top_individuals[0].board:\n for cell in row:\n self.Droo.write(cell, font=('Arial', 16, 'normal'))\n self.Droo.goto(next(pcycle))\n self.Droo.goto(-160, 30)\n self.Droo.write('{}% Solved'.format(round(100 - self.top_fitness * 100, 2)), font=('Arial', 16, 'normal'))\n","sub_path":"popclass.py","file_name":"popclass.py","file_ext":"py","file_size_in_byte":5623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"64741734","text":"import random as random\n#Varios Padres Varios Hijos Con Traslape\n\ndef func(x):\n return x**3-2*x**2+1\n\ndef ms(h,p):\n hijos = [0 for i in range(0,h)] #inicializados en 0\n valHijos = [0 for i in range(0, h)] # inicializados en 0\n padres = [random.uniform(0,5) for i in range(0,p)]\n valPadres = [func(padres[i]) for i in range(0, p)]\n K=0\n it = int(input(\"Cantidad de iteraciones: \"))\n while(K 1 :\n successList.sort()\n\n\n return '\\n'.join(successList)\n\n\n\nif __name__ == '__main__':\n\n\n tc = int(raw_input())\n for i in xrange(tc):\n dataStr=[]\n\n pFilter = raw_input()\n n = int(raw_input())\n for j in xrange(n):\n dataStr.append(raw_input())\n\n print(wildCardStr(pFilter,n,dataStr))\n\n\n '''\n\n print(wildCardStr('he?p',3,['help','heap','helpp']))\n print(wildCardStr('*p*',3,['help','papa','hello']))\n print(wildCardStr('*bb*',1,['babbbc']))\n print(wildCardStr('t*l?*o*r?ng*s',1,['thelordoftherings']))\n\n '''","sub_path":"8.2_WILDCARD/WildCardN.py","file_name":"WildCardN.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"454664775","text":"from tkinter import *\n\nclass desenhar():\n def __init__(self, janela):\n self.janela = janela\n\n self.font1 = ('Arial Black', 20, 'italic')\n self.largura = 900\n self.altura = 900\n self.ultimo = [400, 400]\n\n self.janela.title('DESENHANDO')\n\n #criando\n self.f1 = Frame(self.janela, bg='black')\n self.f2 = Frame(self.janela, bg='black')\n self.canvas = Canvas(self.f1, width=800, height=800, bg='white', cursor='star')\n self.bt1 = Button(self.f2, text='CIMA', font=self.font1, bg='yellow', fg='blue', cursor='hand2', command=self.Cima)\n self.bt2 = Button(self.f2, text='ESQUERDA', font=self.font1, bg='yellow', fg='blue', cursor='hand2', comman=self.Esquerda)\n self.bt3 = Button(self.f2, text='DIREITA', font=self.font1, bg='yellow', fg='blue', cursor='hand2', command=self.Direita)\n self.bt4 = Button(self.f2, text='BAIXO', font=self.font1, bg='yellow', fg='blue', cursor='hand2', command=self.Baixo)\n #self.canvas.create_line(400, 400, 400, 400, fill='blue')\n\n #colocando na tela\n self.f1.pack()\n self.f2.pack()\n self.canvas.pack()\n self.bt1.pack(side=LEFT)\n self.bt2.pack(side=LEFT)\n self.bt3.pack(side=LEFT)\n self.bt4.pack(side=LEFT)\n\n self.janela.mainloop()\n\n def Direita(self):\n x, y = self.ultimo[0]+ 10, self.ultimo[1]\n self.canvas.create_line(self.ultimo, x, y, fill='blue', width=10)\n self.ultimo = [x, y]\n\n\n def Cima(self):\n x, y = self.ultimo[0], self.ultimo[1] - 10\n self.canvas.create_line(self.ultimo, x, y, fill='green', width=10)\n self.ultimo = [x, y]\n\n def Baixo(self):\n x, y = self.ultimo[0], self.ultimo[1] + 10\n self.canvas.create_line(self.ultimo, x, y, fill='black', width=10)\n self.ultimo = [x, y]\n\n def Esquerda(self):\n x, y = self.ultimo[0] - 10, self.ultimo[1]\n self.canvas.create_line(self.ultimo, x, y, fill='red', width=10)\n self.ultimo = [x, y]\n\n\n","sub_path":"Projetos/App_geral/app2.py","file_name":"app2.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"607580616","text":"import os\nimport json\nfrom datetime import datetime\n\nfrom dal import autocomplete\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.shortcuts import get_object_or_404, render\nfrom django.urls import reverse\nfrom django.views import generic\nfrom django.utils import timezone\nfrom django.views import View\nfrom django.shortcuts import redirect\nfrom django.conf import settings\nfrom django.core.files.storage import FileSystemStorage\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import JsonResponse\nfrom django.contrib import messages\nfrom django.template.loader import render_to_string\nfrom django.db.models import Q\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.views.generic import CreateView\nfrom django.core import serializers\n\nfrom guardian.shortcuts import get_perms\n\nimport pandas as pd\nimport numpy as np\nimport json\nimport statistics\n\nimport uuid\nimport shutil\nfrom .models import ExpressionStudy, ExpressionData, Gene, Database\nfrom .forms import *\nfrom .graphs import getClasses, get_graph_data_full, get_graph_data_genes, getValues, getValuesExpression, get_density_graph_data_full, get_density_graph_gene_data_full, get_violin_graph_gene_data_full\n\nclass GeneAutocomplete(autocomplete.Select2QuerySetView):\n\n def get_result_value(self, result):\n return result.id\n\n def get_result_label(self, result):\n return result.symbol\n\n def get_queryset(self):\n query = self.q\n qs = Gene.objects.all()\n if query:\n qs = qs.filter(Q(symbol__icontains=query) | Q(synonyms__icontains=query)| Q(gene_id__icontains=query))\n return qs\n\ndef get_gene(request, gene_id):\n\n gene = get_object_or_404(Gene, id=gene_id)\n data = {'gene_id' : gene.gene_id, \"symbol\": gene.symbol, \"homolog_id\": gene.homolog_id, \"ensembl_id\": gene.ensemble_id}\n return JsonResponse(data)\n\ndef get_stud_db(request):\n db_ids = request.GET.getlist('db_ids[]') \n print (db_ids)\n db_ids = list(map(int, db_ids))\n print (db_ids)\n studies_list = []\n for db in db_ids :\n print(db)\n database =get_object_or_404(Database, id=db)\n print(database)\n studies = ExpressionStudy.objects.filter(database=database)\n studies_list.extend(studies)\n\n print(studies_list)\n\n table = render_to_string('studies/partial_study_table.html', {'studies': studies_list}, request)\n data = {'table_list' : table}\n\n return JsonResponse(data)\n\ndef index(request):\n columns = [\n \"article\",\n \"pmid\",\n \"ome\",\n \"technology\",\n \"species\",\n \"experimental_design\",\n \"topics\",\n \"tissues\",\n \"sex\",\n \"dev_stage\",\n \"age\",\n \"antibody\",\n \"mutant\",\n \"cell_sorted\",\n \"keywords\",\n ]\n\n all_studies = [study for study in ExpressionStudy.objects.exclude(data=None) if check_view_permissions(request.user, study)]\n studies = paginate(all_studies)\n form = ExpressionStudyFilterForm(studies=all_studies)\n table = render_to_string('studies/partial_study_table.html', {'studies': studies}, request)\n pagination = render_to_string('studies/partial_study_pagination.html', {'table': studies}, request)\n context = {'form': form, 'columns': columns, 'table': table, 'pagination': pagination}\n return render(request, 'studies/scatter_plot.html', context)\n\ndef document_select(request):\n\n if not \"id\" in request.GET:\n return redirect(reverse(\"studies:index\"))\n\n id_list = request.GET.getlist(\"id\")\n # Just in case\n if not all(x.isdigit() for x in id_list):\n return redirect(reverse(\"studies:index\"))\n\n studies = ExpressionStudy.objects.filter(id__in=id_list)\n if studies.count() == 0:\n return redirect(reverse(\"studies:index\"))\n\n table = render_to_string('studies/document_select.html', {'studies': studies}, request)\n data = {'table' : table}\n \n return JsonResponse(data)\n #return render(request, 'studies/document_select.html', {'studies': studies})\n\ndef show_graph(request):\n\n if not \"document_id\" in request.GET and not \"study_id\" in request.GET:\n return redirect(reverse(\"studies:index\"))\n\n document_id = request.GET[\"document_id\"]\n study_id = request.GET[\"study_id\"]\n # Just in case\n if not document_id.isdigit() or not study_id.isdigit():\n return redirect(reverse(\"studies:index\"))\n\n\n ##########################\n # File Statistique only for RGV files\n ##########################\n data = get_object_or_404(ExpressionData, id=document_id)\n\n data_stat = {}\n\n \n study = get_object_or_404(ExpressionStudy, id=study_id)\n form = GeneFilterForm()\n classes = getClasses(data)\n context = {'study': study, 'document': data, 'classes': classes, 'form': form}\n return render(request, 'studies/graph.html', context)\n\ndef get_graph_data(request):\n \n display_mode = \"scatter\"\n\n if \"mode\" in request.GET:\n display_mode = request.GET[\"mode\"]\n \n\n if not \"document_id\" in request.GET:\n return redirect(reverse(\"studies:index\"))\n\n document_id = request.GET[\"document_id\"]\n\n if not document_id.isdigit():\n return redirect(reverse(\"studies:index\"))\n\n data = get_object_or_404(ExpressionData, id=document_id)\n \n\n selected_class = request.GET.get('selected_class', None)\n\n if \"gene_id\" in request.GET:\n\n exp_list = []\n if \"|\" in request.GET['gene_id']:\n list_gene = request.GET['gene_id'].split(\"|\")\n for g_ in list_gene :\n gene = get_object_or_404(Gene, id=g_)\n exp_list.append(gene)\n else :\n gene = get_object_or_404(Gene, id=request.GET['gene_id'])\n exp_list.append(gene)\n if display_mode ==\"scatter\" :\n data = get_graph_data_genes(data,exp_list, selected_class)\n if display_mode ==\"density\" :\n data = get_density_graph_gene_data_full(data,exp_list, selected_class)\n if display_mode ==\"violin\" :\n data = get_violin_graph_gene_data_full(data,exp_list, selected_class)\n else:\n if display_mode ==\"scatter\" :\n data = get_graph_data_full(data, selected_class)\n if display_mode ==\"density\" :\n data = get_density_graph_data_full(data, selected_class)\n return JsonResponse(data)\n\ndef get_group_info(request):\n\n group = request.GET.get('group',None)\n sample = request.GET.get('sample',None)\n document_id = request.GET.get('document',None)\n \n data = get_object_or_404(ExpressionData, id=document_id)\n\n selected_class = request.GET.get('selected_class', None)\n\n expression_values_group = getValuesExpression(data, selected_class, group)\n\n\n table = render_to_string('studies/partial_group_info.html', {'genes_list': expression_values_group}, request)\n data = {'list' : expression_values_group, 'group':group}\n\n\n\n return JsonResponse(data)\n\n\ndef render_table(request):\n\n data = {}\n studies = ExpressionStudy.objects.exclude(data=None)\n kwargs = {}\n for key, value in request.GET.items():\n if value:\n if key == \"article\":\n kwargs[key + \"__icontains\"] = value\n elif key == \"technology\" or key == \"species\":\n kwargs[\"data__\" + key] = value\n elif key == \"page\":\n continue\n else:\n kwargs[key + \"__contains\"] = [value]\n\n \n studies = paginate([study for study in studies.filter(**kwargs).distinct() if check_view_permissions(request.user, study)], request.GET.get('page'))\n # Filter here\n table = render_to_string('studies/partial_study_table.html', {'studies': studies}, request)\n pagination = render_to_string('studies/partial_study_pagination.html', {'table': studies}, request)\n data['table'] = table\n data['pagination'] = pagination\n return JsonResponse(data)\n\ndef autocomplete_genes(request,taxonid):\n\n if request.is_ajax():\n query = request.GET.get('term','')\n qs = Gene.objects.all()\n qs = qs.filter(Q(tax_id__exact=int(taxonid)))\n qs = qs.filter(Q(symbol__icontains=query) | Q(synonyms__icontains=query)| Q(gene_id__icontains=query) & Q(tax_id__exact=int(taxonid)))\n results = []\n for gene in qs :\n results.append({'label' : gene.symbol, 'value':gene.symbol+\" (\"+str(gene.id)+\")\"})\n data = json.dumps(results[:10])\n else:\n data=\"fail\"\n mimetype = 'application/json'\n return HttpResponse(data, mimetype)\n\ndef paginate(values, query=None, count=5, is_ES=False):\n\n paginator = Paginator(values, count)\n\n try:\n val = paginator.page(query)\n except PageNotAnInteger:\n val = paginator.page(1)\n except EmptyPage:\n val = paginator.page(paginator.num_pages)\n\n return val\n\ndef check_view_permissions(user, study, strict=False):\n has_access = False\n if study.status == \"PUBLIC\" and not strict:\n has_access = True\n elif user.is_superuser:\n has_access = True\n elif user.is_authenticated and 'view_expressionstudy' in get_perms(user, study):\n has_access = True\n\n return has_access\n","sub_path":"sdap/studies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"45438057","text":"import numpy as np\nimport random as rd\nimport time\nimport math as mt\nimport sys\nimport copy\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib\nimport random\nimport globe\nimport numpy as np\nfrom gym.spaces.box import Box \n\ndef _init(globe, readDistance = False, MaxStep = 600, filename=None):\n\t#the location of UAV-RIS\n\tglobe.set_value('L_U', [80, 80, 20]) #[x, y, z]\n\t#the location of AP/BS\n\tglobe.set_value('L_AP', [0, 0, 10])\n\n\t#CSI parameters\n\tglobe.set_value('BW', mt.pow(10, 7)) #The bandwidth is 10 MHz\n\t# Noise power spectrum density is -174dBm/Hz;\n\tglobe.set_value('N_0', mt.pow(10, ((-174 / 3) / 10)))\n\tglobe.set_value('Xi', mt.pow(10, (3/10))) #the path loss at the reference distance D0 = 1m, 3dB;\n\t# urban env. from [Efficient 3-D Placement of an Aerial Base Station in Next Generation Cellular Networks] \n\t#and [Joint Trajectory-Task-Cache Optimization with Phase-Shift Design of RIS-Assisted UAV for MEC]\n\tglobe.set_value('a', 9.61)\n\tglobe.set_value('b', 0.16)\n\tglobe.set_value('eta_los', 1) \n\tglobe.set_value('eta_nlos', 20)\n\n\t# σ2 = -102dBm\n\tglobe.set_value('AWGN', mt.pow(10, (-102/10)))\n\t# number of RIS antenna\n\tglobe.set_value('N_ris', 100)\n\t#energy harvesting efficiency eta=0.7\n\tglobe.set_value('eta', 0.7) \n\t#path-loss exponent is α=3\n\tglobe.set_value('alpha', 3)\n\t# additional attenuation factor φ is 20 dB\n\tglobe.set_value('varphi', mt.pow(10, (20/10)))\n\t# max transmit Power from BS is 500W\n\tglobe.set_value('P_max', 5 * mt.pow(10, 5))#mt.pow(10, 43/10)\n\t#number of user \n\tglobe.set_value('N_u', 1)\n\t# carrier frequency is 750 MHz\n\tglobe.set_value('fc', 750 * mt.pow(10, 6))\n\t# speed of light\n\tglobe.set_value('c', 3 * mt.pow(10, 8))\n\t#minimal requirement of sinr 12db\n\tglobe.set_value('gamma_min', mt.pow(10, (12/10)))\n\t# the transmission power from the AP for a single user\n\tglobe.set_value('power_i', 0.5 * mt.pow(10, 3))\n\t# the length of a time slot\n\tglobe.set_value('t', int(MaxStep))\n\t#current time slot\n\tglobe.set_value('step', 0)\n\n\tglobe.set_value('kappa', mt.pow(10, (-30/10)))\n\tglobe.set_value('hat_alpha', 2.5)\n\tif readDistance == True:\n\t\tp = filename\n\t\twith open(p, encoding = 'utf-8') as f:\n\t\t\tdata = np.loadtxt(f, delimiter = \",\")\n\t\t\tdata.astype(np.int)\n\t\t\tglobe.set_value('DistanceRU', data)\n\telse :\n\t\tglobe.set_value('DistanceRU', D_RU(globe.get_value('t')))\n\t\n\t# tau is the radio of the energy-harvesting to time slot, default 0.5\n\t# lamda is the radio of the information transmit area, defalut 0.7\n\ndef _observation_space():\n # Return the observation space adjusted to match the shape of the processed\n # observations.\n box = Box(low=20, high=60, shape=(1,),\n dtype=np.float32) \n return box\n\ndef _action_space():\n # Return the actions space adjusted to match the shape of the processed\n # actions.\n box = Box(low=0, high=1, shape=(2,),\n dtype=np.float32) \n return box\n\ndef pl_BR():\n\tL_U = globe.get_value('L_U')\n\tL_AP = globe.get_value('L_AP')\n\ta = globe.get_value('a')\n\tb = globe.get_value('b')\n\tvarphi = globe.get_value('varphi')\n\talpha = globe.get_value('alpha')\n\n\ttheta = (180 / mt.pi) * mt.asin( ( (L_U[2] - L_AP[2]) / mt.sqrt(mt.pow(L_U[0], 2) + mt.pow(L_U[1], 2) + mt.pow((L_U[2] - L_AP[2]), 2))) )\n\tp_los = 1 + a * mt.exp(a * b - b * theta )\n\tp_los = 1 / p_los\n\n\tp_nlos = 1 - p_los\n\t# channel power gain (BS-RIS) with the los and nlos\n\tg_BR = (p_los + p_nlos * varphi) * mt.pow(mt.sqrt(mt.pow(L_U[0], 2) + mt.pow(L_U[1], 2) + mt.pow((L_U[2] - L_AP[2]), 2)), (0-alpha))\n\t\n\treturn g_BR\n\ndef EH(tau, lamda):\n\tpower_i = globe.get_value('power_i')\n\teta = globe.get_value('eta')\n\n\tg_BR = pl_BR()\n\tpower_total = power_i * globe.get_value('N_u')\n\tE_t = tau * eta * power_total * g_BR + (1 - tau) * (1 - lamda) * eta * power_total * g_BR\n\treturn E_t\n\ndef D_RU(num):\n\t# d_ru = [];\n\tresult = np.random.randint(20, 60, size=num)\n\t# print(\"====================\")\n\t# print(result)\n\t# for i in range(0,num):\n\t# \td_ru.append(random.randint(20,60))\n\tnp.savetxt(\"distance.csv\", result, delimiter=',')\n\treturn result\n\t\ndef capacity (distance, tau, lamda):\n\tkappa = globe.get_value('kappa')\n\that_alpha = globe.get_value('hat_alpha')\n\tpower_i = globe.get_value('power_i')\n\tAWGN = globe.get_value('AWGN')\n\tBW = globe.get_value('BW')\n\n\tfor x in range(0,globe.get_value('N_u')):\n\t\t# d_ru = globe.get_value('DistanceRU')\n\t\t# print(step)\n\t\t# distance = d_ru[step]\n\t\t# print(distance)\n\t\tg_BR = pl_BR()\n\t\tsignal = power_i * g_BR * lamda * kappa * mt.pow((distance/1), -hat_alpha) * (1 - tau)\n\t\tinterference = power_i * g_BR * lamda * kappa * mt.pow((distance/1), -hat_alpha) * (globe.get_value('N_u') - 1)\n\t\tif signal > 0:\n\t\t\tSINR = 10 * mt.log((signal/AWGN), 10)\n\t\telse:\n\t\t\tSINR = 0\n\t\t# print(kappa * mt.pow((20/1), -hat_alpha))\n\t\t# print(distance)\n\t\t# print(SINR)\n\t\t# tru = (1 - tau) * BW * mt.log((1+SINR), 2)\n\t\t# print(tru)\n\t\treturn SINR\n\ndef env_state(step, tau, lamda):\n\td_ru = globe.get_value('DistanceRU')\n\tif step < globe.get_value('t')-1:\n\t\tnext_dru = d_ru[step+1]\n\telse:\n\t\tnext_dru = d_ru[step]\n\n\treward = EH(tau, lamda)\n\tSINR = capacity (d_ru[step], tau, lamda)\n\tif SINR < 12:\n\t\treward = 0\n\t#radio_state = np.array([globe.get_value('power_i'), next_dru]) \n\tradio_state = np.array([next_dru])\n\t# radio_state.append(globe.get_value('power_i'))\n\t# radio_state.append(next_dru)\n\treturn reward, radio_state\n\ndef Step(a):\n\tt = globe.get_value('t')\n\ttau = a[0]\n\tlamda = a[1]\n\tstep = globe.get_value('step')\n\treward, radio_state = env_state(step, tau, lamda)\n\tdone = False\n\tif step == t - 1:\n\t\tdone = True\n\n\tglobe.set_value('step', int(step+1))\n\treturn radio_state, reward, done\n\ndef reset():\n\tglobe.set_value('step', 0)\n\td_ru = globe.get_value('DistanceRU')\n\tnext_dru = d_ru[0]\n\tradio_state = np.array([next_dru])#np.array([globe.get_value('power_i'), next_dru])\n\treturn radio_state\t\n\ndef Grid_step(action, step):\n\tt = globe.get_value('t')\n\ttau = action[0]\n\tlamda = action[1]\n\treward, radio_state = env_state(step, tau, lamda)\n\tdone = False\n\tif step == t - 1:\n\t\tdone = True\n\n\treturn reward, done\n\ndef grid_reset(filename):\n\tglobe.set_value('step', 0)\n\n\tp = filename\n\twith open(p, encoding = 'utf-8') as f:\n\t\tdata = np.loadtxt(f, delimiter = \",\")\n\t\tdata.astype(np.int)\n\t\tglobe.set_value('DistanceRU', data)\n\n\td_ru = globe.get_value('DistanceRU')\n\tnext_dru = d_ru[0]\n\tradio_state = np.array([next_dru])#np.array([globe.get_value('power_i'), next_dru])\n\treturn radio_state\n\ndef reloadData(filename):\n\twith open(filename, encoding = 'utf-8') as f:\n\t\tdata = np.loadtxt(f, delimiter = \",\")\n\t\tdata.astype(np.int)\n\t\tglobe.set_value('DistanceRU', data)\n\n","sub_path":"SingleUT/ARIS_ENV.py","file_name":"ARIS_ENV.py","file_ext":"py","file_size_in_byte":6537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"255669801","text":"import glob\nimport os\nfrom subprocess import check_call\nfrom charmhelpers.core import hookenv\nfrom charmhelpers.core.templating import render\nfrom charms.reactive import when, set_flag, clear_flag\n\n\n@when('reverseproxy.available')\ndef config_with_reverseproxy(reverseproxy):\n services = reverseproxy.services()\n cfg = hookenv.config()\n\n # workaround for Jinja templating not accepting the hyphenated keys that we're supplied here\n for i in range(len(services)):\n if services[i].get('hosts'):\n hookenv.log(\"Service {} has hosts: {}\".format(i, services[i].get('hosts')))\n services[i]['hosts'][0]['private_address'] = services[i]['hosts'][0].get('private-address')\n\n for service in services:\n service_dir = '/var/lib/tor/{}'.format(service['service_name'])\n if not os.path.isdir(service_dir):\n check_call(['install', '-d', service_dir, '-o', 'debian-tor', '-m', '700'])\n\n bridges = []\n for bridge in cfg.get('bridges', '').split(','):\n fields = bridge.split()\n if len(fields) > 1:\n addr, fp = fields[:2]\n bridges.append({'addr': addr, 'fingerprint': fp})\n\n render(\n source='torrc',\n target='/etc/tor/torrc',\n owner='root',\n perms=0o644,\n context={\n 'cfg': cfg,\n 'services': services,\n 'bridges': bridges,\n },\n )\n clear_flag('reverseproxy.available')\n set_flag('tor.start')\n\n\n@when('tor.started')\ndef update_status_hostnames():\n hostname_files = glob.glob('/var/lib/tor/*/hostname')\n status = ''\n for hostname_file in hostname_files:\n with open(hostname_file, 'r') as f:\n servicename = hostname_file.split('/')[4]\n hostname = f.read().strip()\n status = status + 'service {} running on {}, '.format(servicename, hostname)\n\n if status.endswith(', '):\n status = status[:-2]\n\n if status != '':\n hookenv.status_set('active', 'tor service ready: {}'.format(status.strip()))\n","sub_path":"tor-hidden/reactive/hidden.py","file_name":"hidden.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"95580092","text":"#!/usr/bin/python3\nimport socket\nimport pickle\nimport struct\n#def send(csocket,msg):\n #length=len(msg)\n #csocket.send((str(length)+\"#\").encode(\"utf-8\"))\n #csocket.send(msg)\n \n \n\n#def rcv(csocket):\n #rval=bytes()\n #length=\"\"\n #while True:\n #temp=csocket.recv(1)\n #if len(temp)==0:\n #return rval\n #dtemp=temp.decode(\"utf-8\")\n #if dtemp==\"#\":\n #break\n #length+=dtemp\n #length=int(length)\n #bsize=1024\n #rem=length\n #while rem!=0:\n #if rem>bsize:\n #temp=csocket.recv(bsize)\n #else:\n #temp=csocket.recv(rem)\n #rem-=len(temp)\n #rval+=temp\n #return rval\n\ndef send(csocket,msg):\n head=struct.pack('Q',len(msg))\n csocket.send(head)\n csocket.send(msg)\ndef rcv(csocket):\n rval=bytes()\n length=csocket.recv(8)\n if len(length)==0:\n return rval\n length=struct.unpack('Q',length)\n bsize=1024\n rem=length\n while rem!=0:\n if rem>bsize:\n temp=csocket.recv(bsize)\n else:\n temp=csocket.recv(rem)\n rem-=len(temp)\n rval+=temp\n return rval\n \n\nserversocket=socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nhost=\"0.0.0.0\"#socket.gethostname()\nport=9999\nserversocket.bind((host,port))\nserversocket.listen(5)\n\nwhile True:\n clientsocket,addr=serversocket.accept()\n print(\"addr:\",str(addr))\n send(clientsocket,(\"hello\\nworld1\").encode(\"utf-8\"))\n send(clientsocket,(\"helloworld2\").encode(\"utf-8\"))\n send(clientsocket,(\"asdffhdgfh\").encode(\"utf-8\"))\n send(clientsocket,(\"hellwerwe\").encode(\"utf-8\"))\n send(clientsocket,(\"xcdbvc\").encode(\"utf-8\"))\n send(clientsocket,(\"34234324\").encode(\"utf-8\"))\n clientsocket.close()\n\n","sub_path":"Python/socket/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"567980427","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nimport json\nimport re\nimport statistics\nimport math\n\ndef make_corpus_from_enhanced_by_Word2Vec():\n\tprint(\"整数の辞書(word2vecで強化後)\")\n\tpolarity = {}\n\tfile = open(\"corpus/corpus.txt\", \"r\").readlines()\n\tpattern = \"\\'(.*)\\':(.*),\"\n\trepatter = re.compile(pattern)\n\tfor i in file:\n\t\tmatchOB = repatter.search(i)\n\t\tif matchOB:\n\t\t\tpolarity[matchOB.group(1)] = int(matchOB.group(2))\n\treturn polarity\n\ndef make_corpus_from_integer_corpus():\n\tpolarity = {}\n\tfile = open(\"corpus2.txt\", \"r\").readlines()\n\tpattern = \"\\\"(.*)\\\":(.*),\"\n\trepatter = re.compile(pattern)\n\tfor i in file:\n\t\tmatchOB = repatter.search(i)\n\t\tif matchOB:\n\t\t\tpolarity[matchOB.group(1)] = int(matchOB.group(2))\n\treturn polarity\n\ndef make_corpus_from_float_corpus():\n\tprint(\"実数の辞書\")\n\tpolarity = {}\n\tfile = open(\"corpus/corpus3.txt\", \"r\").readlines()\n\tpattern = \"\\\"(.*)\\\":(.*)\"\n\trepatter = re.compile(pattern)\n\tfor i in file:\n\t\tmatchOB = repatter.search(i)\n\t\tif matchOB:\n\t\t\tpolarity[matchOB.group(1)] = float(matchOB.group(2))\n\treturn polarity\n\n\ndef make_corpus():\n\tpolarity = {}\n\tverb = open(\"corpus/wago.121808.pn\", \"r\")\n\tfor line in verb.readlines():\n\t\toutput = line.rstrip().split(\"\\t\")\n\t\ttry:\n\t\t\tif \"ポジ\" in output[0]:\n\t\t\t\tpolarity[output[1]] = 1\n\t\t\telse:\n\t\t\t\tpolarity[output[1]] = -1\n\t\texcept IndexError:\n\t\t\tcontinue\n\n\t# norn = open(\"corpus/norn.json\")\n\t# file = json.load(norn)\n\t# for line in file:\n\t# if line[\"Judge\"] == \"p\":\n\t# polarity[line[\"Word\"]] = 1\n\t# elif line[\"Judge\"] == \"n\":\n\t# polarity[line[\"Word\"]] = -1\n\t# else:\n\t# polarity[line[\"Word\"]] = 0\n\n\tnorn = open(\"corpus/pn.csv.m3.120408.trim\", \"r\")\n\tfor line in norn.readlines():\n\t\toutput = line.rstrip().split()\n\t\ttry:\n\t\t\tif output[1] == \"p\":\n\t\t\t\tpolarity[output[0]] = 1\n\t\t\telif output[1] == \"n\":\n\t\t\t\tpolarity[output[0]] = -1\n\t\t\telse:\n\t\t\t\tpolarity[output[0]] = 0\n\t\texcept IndexError:\n\t\t\tcontinue\n\n\treturn polarity\n\n\nimport re\n\n\ndef norn_nature():\n\tpattern = r\"\\((.*)\\)\"\n\trepatter = re.compile(pattern)\n\tnorn_nature = {}\n\n\tfile = open(\"corpus/pn.csv.m3.120408.trim\", \"r\")\n\n\tfor line in file.readlines():\n\t\tarray = re.split(\"\\t|\\s\", line)\n\t\tif len(array) == 4:\n\t\t\tmatchOB = repatter.findall(array[2])\n\t\telse:\n\t\t\tmatchOB = repatter.findall(array[3])\n\t\tif matchOB:\n\t\t\tnorn_nature.update({array[0]: matchOB[0]})\n\t\telse:\n\t\t\tnorn_nature.update({array[0]: \" \"})\n\treturn norn_nature\n\n\n# make_corpus()\n\ndef make_corpus_pn_ja():\n\tpolarity = {}\n\tword = open(\"corpus/pn_ja.txt\", \"r\")\n\tfor line in word.readlines():\n\t\toutput = line.rstrip().split(\":\")\n\t\ttry:\n\t\t\tpolarity[output[0]] = float(output[3])\n\t\texcept IndexError:\n\t\t\tcontinue\n\tmean = statistics.mean(polarity.values())\n\t# median = statistics.median(polarity.values())\n\n\tfor key in polarity.keys():\n\t\tpolarity[key] += math.fabs(mean)\n\t\t# print(\"\\\"\"+key+\"\\\"\"+\":\"+str(polarity[key]))\n\treturn polarity\n\n\n################################\n## Word2Vecを用いて辞書を強化 ##\n################################\n\nfrom gensim.models.word2vec import Word2Vec\n\n\ndef update_corpus(polarity):\n\t# model = Word2Vec.load(\"/Users/soufuru/pycharmProjects/word2Vec/venv/corpus/word2vec.gensim.model\")\n\tmodel = Word2Vec.load(\"/Users/soufuru/pycharmProjects/word2Vec/venv/word2vec.gensim.model\")\n\n\tfile = open(\"not_in_corpus.txt\", \"r\")\n\tfor line in file.readlines():\n\t\ttry:\n\t\t\tfor similar in model.wv.similar_by_word(line.rstrip(), 10):\n\t\t\t\tif similar[0] in polarity:\n\t\t\t\t\tpolarity[line.rstrip()] = polarity[similar[0]]\n\t\texcept KeyError:\n\t\t\tcontinue\n","sub_path":"venv/make_corpus.py","file_name":"make_corpus.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"205209571","text":"#\n# Copyright (c) 2020 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom src.api.types import SingleClassification, Classification, Tag\n\n\ndef test_single_classification():\n expected_dict = {\n 'type': 'classification',\n 'subtype': 'type',\n 'classification': {\n 'tag': {\n 'value': 'car',\n 'confidence': 0.97\n }\n }\n }\n tag = Tag('car', 0.97)\n# attribs = []\n# attribs.append(attribute)\n test_classification = SingleClassification(subtype_name='type', tag=tag)\n print(test_classification.as_dict())\n assert expected_dict == test_classification.as_dict()\n\n\ndef test_entity():\n expected_dict = {\n \"inferences\": [\n {\n \"type\": \"classification\",\n \"subtype\": \"animal\",\n \"classification\": {\n \"tag\": {\n \"value\": \"dog\",\n \"confidence\": 0.85\n }\n }\n },\n {\n \"type\": \"classification\",\n \"subtype\": \"animal\",\n \"classification\": {\n \"tag\": {\n \"value\": \"fox\",\n \"confidence\": 0.11\n }\n }\n }\n ]\n }\n\n classifications = [\n SingleClassification(subtype_name='animal', tag=Tag(\"dog\", 0.85)),\n SingleClassification(subtype_name='animal', tag=Tag(\"fox\", 0.11)),\n ]\n classification = Classification(classifications)\n assert expected_dict == classification.as_dict()\n","sub_path":"extras/ams_wrapper/tests/unit/test_classification_type.py","file_name":"test_classification_type.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"323143658","text":"# -*- coding: utf-8 -*-\nimport unittest\nfrom selenium import webdriver\nfrom pyvirtualdisplay import Display\ndisplay = Display(visible=0, size=(800, 600))\ndisplay.start()\n\nclass TestWebpage(unittest.TestCase):\n \n def setUp(self):\n self.browser = webdriver.Firefox()\n \n def testTitle(self):\n self.browser.get('http://localhost')\n self.assertIn('Buildings', self.browser.title)\n \n def tearDown(self):\n self.browser.quit()\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","sub_path":"seltest.py","file_name":"seltest.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"72845209","text":"import socket\nimport threading\n\n\ndef client(conn, addr):\n print('Запустился новый поток')\n while True:\n data = conn.recv(1024)\n if not data:\n break\n print(data.decode())\n conn.send(data)\n\n\nsock = socket.socket()\nport = 9090\n\nwhile True:\n try:\n sock.bind(('', port))\n break\n except:\n print(f'Порт занят {port}')\n port += 1\n\nprint (f'Сервер подключился к порту: {port}')\nsock.listen(3)\n\nwhile True:\n conn, addr = sock.accept()\n print(f'Клиент {addr} подключился к серверу')\n p = threading.Thread(target = client, args = (conn, addr))\n p.start()\n\nsock.close()\nprint('Сервер завершил работу')\n","sub_path":"4. Многопоточный сервер (без допов)/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"450310080","text":"# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport storm\n\nclass SplitSentenceBolt(storm.BasicBolt):\n def process(self, tup):\n words = tup.values[0].split(\" \")\n for word in words:\n storm.emit([word])\n\nSplitSentenceBolt().run()\n","sub_path":"lesson3/stage1/target/classes/resources/splitsentence.py","file_name":"splitsentence.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"35011642","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/kghose/.venvs/benten/lib/python3.7/site-packages/benten/code/yaml.py\n# Compiled at: 2019-10-21 21:28:14\n# Size of source mod 2**32: 2634 bytes\n\"\"\"Load the raw YAML\"\"\"\nfrom typing import Tuple, List\nfrom ruamel.yaml import YAML\nfrom ruamel.yaml.parser import ParserError\nfrom ruamel.yaml.scanner import ScannerError\nfrom ruamel.yaml.composer import ComposerError\nfrom ruamel.yaml.compat import StringIO\nfrom langserver.lspobjects import Diagnostic, DiagnosticSeverity, Range, Position\nimport logging\nlogger = logging.getLogger(__name__)\n_yaml_loader = YAML(typ='rt')\n_yaml_loader.allow_duplicate_keys = True\nfast_load = YAML(typ='safe')\nfast_load.indent(mapping=2, sequence=4, offset=2)\nfast_load.default_flow_style = False\n\ndef fast_yaml_load(txt):\n try:\n return fast_load.load(txt)\n except (ParserError, ScannerError) as e:\n try:\n pass\n finally:\n e = None\n del e\n\n\ndef yaml_to_string(v: dict):\n s = StringIO()\n fast_load.dump(v, s)\n return s.getvalue()\n\n\ndef parse_yaml(text, retries=3) -> Tuple[(dict, List[Diagnostic])]:\n problems = []\n try:\n cwl = _yaml_loader.load(text)\n except (ParserError, ScannerError, ComposerError) as e:\n try:\n if retries:\n if e.problem == \"could not find expected ':'\":\n return parse_yaml(heal_incomplete_key(text, e), retries - 1)\n if e.problem == 'mapping values are not allowed here':\n return parse_yaml(heal_incomplete_key_typeB(text, e), retries - 1)\n cwl = None\n problems = [\n Diagnostic(_range=Range(start=(Position(e.problem_mark.line, e.problem_mark.column)), end=(Position(e.problem_mark.line, e.problem_mark.column))),\n message=(str(e)),\n severity=(DiagnosticSeverity.Error),\n code='YAML err',\n source='Benten')]\n finally:\n e = None\n del e\n\n return (\n cwl, problems)\n\n\ndef heal_incomplete_key(original_text, e):\n logger.debug('Attempting to heal incomplete key')\n lines = original_text.splitlines(keepends=False)\n lines[e.context_mark.line] = lines[e.context_mark.line] + ':'\n return '\\n'.join(lines)\n\n\ndef heal_incomplete_key_typeB(original_text, e):\n logger.debug('Attempting to heal incomplete key')\n lines = original_text.splitlines(keepends=False)\n ln = e.problem_mark.line - 1\n while ln > 0 and len(lines[ln].strip()) == 0:\n ln -= 1\n\n if len(lines[ln]):\n lines[ln] = lines[ln] + ':'\n return '\\n'.join(lines)","sub_path":"pycfiles/benten-2020.3.10.macosx-10.9-x86_64.tar/yaml.cpython-37.py","file_name":"yaml.cpython-37.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"243842835","text":"import os\nimport logging\n\nfrom django.db import IntegrityError\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\n\nfrom bearing.constants import BearingSource\nfrom core.mixins import CSVImportMixin, PrettyPrintCommandMixin\nfrom bearing.models import Bearing, BearingDesign, BearingType\nfrom core.utils import unique_str\n\n\nlogger = logging.getLogger('file')\n\n\nclass Command(CSVImportMixin, PrettyPrintCommandMixin, BaseCommand):\n help = 'Import rolling bearings from CSV file'\n\n @staticmethod\n def save_to_file(data):\n try:\n file_name = os.path.join(settings.BASE_DIR, '..', 'deploy', 'data', 'output.log')\n f = open(os.path.abspath(file_name), str('w+b'))\n f.truncate()\n result = []\n for key, value in data.items():\n string = '%s: %s' % (key.capitalize(), \", \".join(value))\n result.append(string)\n f.write(str(string.encode('utf-8')))\n f.close()\n except Exception as e:\n logger.error(e)\n\n def handle(self, *args, **options):\n super().handle(*args, **options)\n\n count = 0\n Bearing.objects.filter(source=BearingSource.ROLLING.value).delete()\n errors = {'bearings': [], 'designs': [], 'types': [], 'unknown': []}\n file_name = os.path.join(settings.BASE_DIR, '..', 'deploy', 'data', 'initial', 'rolling_bearings.csv')\n for d in self.parse_csv_file(file_name):\n item = Bearing(**{\n 'name': d[0].strip(),\n 'slug': unique_str(),\n 'inner_diameter': self.parse_int(d[1]),\n 'outer_diameter': self.parse_int(d[2]),\n 'depth': self.parse_int(d[3]),\n 'weight': self.parse_float(d[4]),\n 'sketch': d[5].strip(),\n 'manufacturer_string': d[6].strip(),\n 'iso_string': d[7].strip(),\n 'analog': d[8].strip()\n })\n try:\n item.save()\n count += 1\n except BearingDesign.DoesNotExist:\n errors['designs'].append('%s%s' % (item.parser.bearing_design, item.parser.bearing_type))\n errors['bearings'].append(d[0].strip())\n logger.warning('ERROR: BearingDesign %s%s does not exist'\n % (item.parser.bearing_design, item.parser.bearing_type))\n except BearingType.DoesNotExist:\n errors['types'].append(item.parser.bearing_type)\n errors['bearings'].append(d[0].strip())\n logger.warning('ERROR: BearingType %s does not exist' % item.parser.bearing_type)\n except IntegrityError as e:\n logger.warning(e)\n except Exception as e:\n errors['unknown'].append(e)\n logger.error(e)\n\n self.stdout.write('Created %d bearings' % count)\n # self.save_to_file(errors)\n","sub_path":"app/bearing/management/commands/import_rolling_bearings.py","file_name":"import_rolling_bearings.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"169133969","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nfrom mpl_toolkits.mplot3d import Axes3D\n\nINTERVAL_START_X1 = -2\nINTERVAL_STOP_X1 = 2\nINTERVAL_START_X2 = -1\nINTERVAL_STOP_X2 = 3\nSAMPLING_INTERVAL = 0.05\nA = 1\nB = 100\nMAX_ITERATIONS = 1000\nTHRESHOLDS = np.array([0.001, 0.00001, 0.0000001])\nTHRESHOLDS_NAMES = np.array(['001', '00001', '0000001'])\nSTARTING_POINTS = np.array([[-1.5,-0.5], [0.5, 2.5]])\nCOLORS = np.array(['g', 'y'])\n\n# Function that computes the value of the function in a point x=(x1,x2)\ndef myFunction(x):\n y = (A - x[0])**2 + B*(x[1] - x[0]**2)**2\n return y\n\n# Function that computes the gradient (gx1, gx2) of the function in a point x=(x1,x2)\ndef myFunctionGradient(x):\n gradient = np.array([2*x[0] - 2*A + B*4*x[0]**3 - 4*B*x[1]*x[0], 2*B*x[1] - 2*B*x[0]**2])\n return gradient\n\n# Function that finds an alpha such that f(xk - alpha*grad(f(xk))) < f(xk)\n# alpha is found starting from 1 and dividing iteratively by 2 until the condition is satisfied \ndef findAlpha(xk, function, function_gradient):\n alpha = 1.0\n while function(xk - alpha*function_gradient(xk)) >= function(xk):\n alpha = alpha / 2\n return alpha\n\n# Function that implements the gradient descent method, with alpha that changes at every iteration.\n# It requires a starting point, alpha, a function that computes the value of a function in a specified point,\n# a function that computes the gradient of a function in a specified point, the maximum number of iterations\n# to perform and a threshold to stop the method.\n# It returns an array of points, where each point is computed using the gradient descent method.\ndef gradientDescentWithDynamicAlpha(x0, function, function_gradient, max_iterations, threshold):\n points = [x0]\n xk = x0\n alpha = findAlpha(xk, function, function_gradient)\n xk1 = xk - alpha*function_gradient(xk)\n i = 1\n while (i < max_iterations) & (abs(function(xk1) - function(xk)) > threshold):\n xk = xk1\n alpha = findAlpha(xk, function, function_gradient)\n xk1 = xk - alpha*function_gradient(xk)\n points.append(xk1)\n i = i + 1\n return np.array(points)\n\n# Create a new figure\nfig = plt.figure()\n# Split the figure in 1*1 (nrows*ncols) subaxes and create a new suplot positioned at 1 (plot_number)\nnrows = 1\nncols = 1\nplot_number = 1\nax = fig.add_subplot(nrows, ncols, plot_number, projection='3d')\n# Set the labels for the axes\nax.set_xlabel('$x_1$')\nax.set_ylabel('$x_2$')\nax.set_zlabel('$f(x_1, x_2)$')\n# Plot a surface with a solid linestyle connecting all the vertices\nX1 = np.linspace(INTERVAL_START_X1, INTERVAL_STOP_X1, ((INTERVAL_STOP_X1 - INTERVAL_START_X1) / SAMPLING_INTERVAL) + 1)\nX2 = np.linspace(INTERVAL_START_X2, INTERVAL_STOP_X2, ((INTERVAL_STOP_X2 - INTERVAL_START_X2) / SAMPLING_INTERVAL) + 1)\nX1, X2 = np.meshgrid(X1, X2)\nY = (A - X1)**2 + B*(X2 - X1**2)**2\nax.plot_surface(X1, X2, Y, rstride=1, cstride=1, linewidth=0, antialiased=False, cmap='inferno')\n# Save the current figure\nplt.savefig('../Images/01-rosenbrock-function-surface.png')\n\n# Create a new figure\nfig = plt.figure()\n# Split the figure in 1*1 (nrows*ncols) subaxes and create a new suplot positioned at 1 (plot_number)\nnrows = 1\nncols = 1\nplot_number = 1\nax = fig.add_subplot(nrows, ncols, plot_number)\n# Set the labels for the axes\nax.set_xlabel('$x_1$')\nax.set_ylabel('$x_2$')\n# Plot filled contours (up to levels_number automatically-chosen levels)\nlevels_number = 100\nax.contourf(X1, X2, Y, levels_number, cmap='inferno')\n# Plot the global minimum of the function\nax.plot(A, A**2, 'ro')\n# Save the current figure\nplt.savefig('../Images/01-rosenbrock-function-contours.png')\n\n# Apply the gradient method to the function starting at the specified points\nfor threshold_index, threshold in enumerate(THRESHOLDS):\n # Create a new figure\n fig = plt.figure()\n # Split the figure in 1*1 (nrows*ncols) subaxes and create a new suplot positioned at 1 (plot_number)\n nrows = 1\n ncols = 1\n plot_number = 1\n ax = fig.add_subplot(nrows, ncols, plot_number)\n # Set the labels for the axes\n ax.set_xlabel('$x_1$')\n ax.set_ylabel('$x_2$')\n # Plot filled contours (up to levels_number automatically-chosen levels)\n levels_number = 100\n ax.contourf(X1, X2, Y, levels_number, cmap='inferno')\n # Plot the global minimum of the function\n ax.plot(A, A**2, 'ro')\n for i in range(0, COLORS.size):\n x0 = STARTING_POINTS[i]\n points = gradientDescentWithDynamicAlpha(x0, myFunction, myFunctionGradient, MAX_ITERATIONS, threshold)\n ax.plot(points[:,0], points[:,1], COLORS[i])\n # Save the current figure\n plt.savefig('../Images/01-rosenbrock-function-contours-threshold-'+THRESHOLDS_NAMES[threshold_index]+'.png')","sub_path":"lab02/Scripts/01-rosenbrock-function.py","file_name":"01-rosenbrock-function.py","file_ext":"py","file_size_in_byte":4763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"140460600","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\n project: welborn productions - sitemaps - main\n @summary: provides main sitemap for sitemaps framework\n\n @author: Christopher Welborn \n@organization: welborn productions \n\n start date: Apr 3, 2013\n'''\n\nimport logging\n\n# For today's date\nfrom datetime import date\n\n# django cache stuff\nfrom django.views.decorators.cache import never_cache\n\n# xml_response.\nfrom wp_main.utilities import responses\n# Local models to build urls.\nfrom apps.models import wp_app\nfrom blogger.models import wp_blog\nfrom img.models import wp_image\nfrom misc.models import wp_misc\nfrom projects.models import wp_project\n\nlog = logging.getLogger('wp.sitemaps')\n\n\n@never_cache\ndef view_sitemap(request):\n \"\"\" Delivers sitemap for current domain using sitemap.xml template \"\"\"\n\n # return xml sitemap response\n return responses.xml_response(\n 'sitemaps/sitemap.xml',\n context={\n 'url_list': [sm_url for sm_url in build_urls(request)]\n },\n request=request,\n comments=False,\n )\n\n\n@never_cache\ndef view_blank_sitemap(request):\n \"\"\" Delivers a blank sitemap\n (for servers that don't need a sitemap like the test-server).\n \"\"\"\n\n return responses.text_response('', content_type='application/xml')\n\n\n@never_cache\ndef view_byserver(request):\n \"\"\" Decides which sitemap to deliver according to server.\n sends blank sitemap to server with names starting with 'test.'\n \"\"\"\n\n server_name = request.META['SERVER_NAME']\n if server_name.startswith('test.'):\n return view_blank_sitemap(request)\n\n # normal sitemap.\n return view_sitemap(request)\n\n\ndef build_app_urls(protocol, domain):\n \"\"\" Yields SitemapUrl()s for all web apps. \"\"\"\n # build apps urls.\n apps = wp_app.objects.filter(\n disabled=False,\n admin_only=False)\n for app in apps.order_by('name'):\n yield SitemapUrl(\n rel_location='/apps/{}'.format(app.alias),\n protocol=protocol,\n domain=domain,\n changefreq='monthly',\n lastmod=str(app.publish_date),\n priority='0.9'\n )\n\n\ndef build_blog_urls(protocol, domain):\n \"\"\" Yields SitemapUrl()s for all blog posts. \"\"\"\n for post in wp_blog.objects.filter(disabled=False).order_by('-posted'):\n yield SitemapUrl(\n rel_location='/blog/view/{}'.format(post.slug),\n protocol=protocol,\n domain=domain,\n changefreq='never',\n lastmod=str(post.posted),\n priority='0.5'\n )\n\n\ndef build_img_urls(protocol, domain):\n \"\"\" Yields SitemapUrl()s for all img posts. \"\"\"\n imgs = wp_image.objects.filter(disabled=False, private=False)\n for img in imgs.order_by('-publish_date'):\n yield SitemapUrl(\n rel_location='/img?id={}'.format(img.image_id),\n protocol=protocol,\n domain=domain,\n changefreq='never',\n lastmod=str(img.publish_date.date()),\n priority='0.5'\n )\n\n\ndef build_main_urls(protocol, domain):\n \"\"\" Yields SitemapUrl()s for the main pages. \"\"\"\n # Main urls and default change frequencies for them.\n url_freq = {\n '/': 'daily',\n '/about': 'monthly',\n '/apps': 'monthly',\n '/projects': 'weekly',\n '/blog': 'daily',\n '/misc': 'weekly',\n '/paste': 'daily',\n }\n today = str(date.today())\n # build basic urls for main site nav.\n for url in url_freq:\n yield SitemapUrl(\n rel_location=url,\n protocol=protocol,\n domain=domain,\n changefreq=url_freq[url],\n lastmod=today,\n priority='0.8'\n )\n\n\ndef build_misc_urls(protocol, domain):\n \"\"\" Yields SitemapUrl()s for all misc objects. \"\"\"\n # build misc urls.\n for misc in wp_misc.objects.filter(disabled=False).order_by('name'):\n yield SitemapUrl(\n rel_location='/misc/{}'.format(misc.alias),\n protocol=protocol,\n domain=domain,\n changefreq='monthly',\n lastmod=str(misc.publish_date),\n priority='0.8'\n )\n\n\ndef build_project_urls(protocol, domain):\n \"\"\" Yields SitemapUrl()s for all project pages. \"\"\"\n for proj in wp_project.objects.filter(disabled=False).order_by('name'):\n yield SitemapUrl(\n rel_location='/projects/{}'.format(proj.alias),\n protocol=protocol,\n domain=domain,\n changefreq='monthly',\n lastmod=str(proj.publish_date),\n priority='0.9'\n )\n\n\ndef build_urls(request):\n \"\"\" builds a list of SitemapUrl() containing:\n Full URL, Change Frequency, Last Modified Date\n for main site, projects, and blog sections/items.\n\n request is a WSGIRequest or HttpRequest object that was\n passed to the view. It is used to determine the protocol (http/https),\n and the domain name.\n (for building location urls: http://mysite.com/projects/myproject)\n\n returns list of SitemapUrl()\n \"\"\"\n\n try:\n # get protocol\n protocol = 'https' if request.is_secure() else 'http'\n except Exception as ex:\n errfmt = 'build_urls: unable to determine request.is_secure():\\n {}'\n log.error(errfmt.format(ex))\n else:\n # Find server name (.com or .info)\n serverattrs = (\n 'HTTP_X_FORWARDED_HOST',\n 'HTTP_X_FORWARDED_SERVER',\n 'HTTP_HOST'\n )\n domain = None\n for serverattr in serverattrs:\n if serverattr in request.META.keys():\n # get domain\n domain = request.META[serverattr]\n if domain:\n break\n\n # Unable to retrieve server name from request.\n if not domain:\n log.error('build_urls: unable to retrieve domain name!')\n else:\n # url list, consists of SitemapUrl() items containing:\n # (URL, Change Frequency, Last Modified Date)\n yield from build_main_urls(protocol, domain)\n yield from build_project_urls(protocol, domain)\n yield from build_blog_urls(protocol, domain)\n yield from build_misc_urls(protocol, domain)\n yield from build_app_urls(protocol, domain)\n yield from build_img_urls(protocol, domain)\n\n\nclass SitemapUrl(object): # noqa\n\n \"\"\" Provides info for individual sitemap urls. \"\"\"\n\n def __init__(\n self, location='', rel_location='', changefreq='', lastmod='',\n protocol='http', domain='', priority='0.5'):\n # changes info\n self.changefreq = changefreq\n self.lastmod = lastmod\n # priority\n self.priority = priority\n # location info\n self.rel_location = rel_location\n self.protocol = protocol\n self.domain = domain\n # build complete location on demand if needed.\n self.location = location or self.complete_url()\n\n def complete_url(self):\n \"\"\" builds complete url for this item if all info is present.\n ex:\n url = SitemapUrl(\n rel_location_='/projects',\n protocol='http',\n domain='mysite.com')\n loc = url.complete_url()\n # will return:\n # http://mysite.com/projects\n This is used to build .location when it isn't set, so\n loc = url.location\n ..will do the same thing.\n \"\"\"\n\n if (not self.domain) or (not self.protocol):\n surl = self.rel_location\n else:\n surl = ''.join((\n self.protocol,\n '://',\n self.domain,\n self.rel_location\n ))\n return surl\n","sub_path":"wp_main/sitemaps/sitemaps.py","file_name":"sitemaps.py","file_ext":"py","file_size_in_byte":7902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"31855173","text":"import io\nimport unittest\nimport unittest.mock as mock\n\nfrom extra_metrics.logs import logger\nfrom extra_metrics.config import ExtraMetricsConfiguration\nfrom extra_metrics.test.fake_mocks import FakeQueryInterface\n\nfrom extra_metrics.scripts import (\n running_on_a_fwxserver_host,\n validate_current_fw_version, ValidationExceptionCannotParseFileWaveVersion,\n ValidationExceptionWrongFileWaveVersion\n)\n\n\nclass TestConfiguration(unittest.TestCase):\n def setUp(self):\n self.cfg = ExtraMetricsConfiguration()\n self.config_text = '''\n [extra_metrics]\n fw_server_hostname = abc\n fw_server_api_key = def\n '''\n buf = io.StringIO(self.config_text)\n self.cfg.read_configuration(buf)\n\n def test_configuration_defaults(self):\n self.assertEqual(\"abc\", self.cfg.get_fw_api_server_hostname())\n self.assertEqual(\"def\", self.cfg.get_fw_api_key())\n self.assertEqual(True, self.cfg.get_verify_tls())\n\n def test_configuration_changes_are_written(self):\n self.cfg.set_fw_api_key('hello world')\n self.cfg.set_polling_delay_seconds(42)\n buf = io.StringIO()\n self.cfg.write_configuration(buf)\n value = buf.getvalue()\n self.assertTrue(\"hello world\" in value)\n self.assertTrue(\"42\" in value)\n self.assertEqual(self.cfg.get_polling_delay_seconds(), 42)\n\n self.assertEqual(True, self.cfg.get_verify_tls())\n self.cfg.set_verify_tls(False)\n self.assertEqual(False, self.cfg.get_verify_tls())\n\n self.cfg.set_verify_tls(True)\n self.assertEqual(True, self.cfg.get_verify_tls())\n\n\ndef get_unparsable_version(self):\n return None, None, None\n\n\ndef get_incorrect_version(self):\n return 13, 3, 0\n\n\ndef get_correct_version(self):\n return 14, 0, 0\n\n\nclass TestRuntimeChecks(unittest.TestCase):\n def setUp(self):\n self.fw_query = FakeQueryInterface()\n\n @mock.patch('extra_metrics.test.fake_mocks.FakeQueryInterface.get_current_fw_version_major_minor_patch', get_unparsable_version)\n def test_validation_fails_due_to_bad_parsing(self):\n with self.assertRaises(ValidationExceptionCannotParseFileWaveVersion):\n validate_current_fw_version(self.fw_query)\n\n @mock.patch('extra_metrics.test.fake_mocks.FakeQueryInterface.get_current_fw_version_major_minor_patch', get_correct_version)\n def test_validation_succeeds(self):\n validate_current_fw_version(self.fw_query)\n\n @mock.patch('extra_metrics.test.fake_mocks.FakeQueryInterface.get_current_fw_version_major_minor_patch', get_incorrect_version)\n def test_validation_finds_incorrect_version(self):\n with self.assertRaises(ValidationExceptionWrongFileWaveVersion):\n validate_current_fw_version(self.fw_query)\n\n def test_running_on_fwxserver_host(self):\n def my_check(yes_or_no, file_path):\n logger.info(\"checking file path:\", file_path)\n return yes_or_no\n\n self.assertTrue(running_on_a_fwxserver_host(exist_func=lambda f: my_check(True, f)))\n self.assertFalse(running_on_a_fwxserver_host(exist_func=lambda f: my_check(False, f)))","sub_path":"extra_metrics/test/test_configuration.py","file_name":"test_configuration.py","file_ext":"py","file_size_in_byte":3159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"369941384","text":"# -*- coding: utf-8 -*-\n\n# ======================================================================================================================\n# Copyright (©) 2015-2021 LCS - Laboratoire Catalyse et Spectrochimie, Caen, France. =\n# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT - See full LICENSE agreement in the root directory =\n# ======================================================================================================================\n\"\"\"\nThis module implements the DAsh callbacks.\n\"\"\"\n\n__all__ = []\n\nimport base64\nimport json\nfrom dash import no_update, callback_context\nfrom dash.dependencies import Input, Output, State\nimport dash_html_components as html\nfrom dash.exceptions import PreventUpdate\nimport dash_bootstrap_components as dbc\n\nimport spectrochempy as scp\n\n\nclass Callbacks(object):\n\n @staticmethod\n def parse_upload_contents(filename, contents, single=False):\n #\n # transform uploaded content to a NDDataset\n #\n content_type, content_string = contents.split(',')\n decoded = base64.b64decode(content_string)\n ds = scp.NDDataset.read(filename, content=decoded)\n return ds.write_json(to_string=True)\n\n @staticmethod\n def dataset_itemlist(ds):\n return [\n html.H6(f'{ds.name}'),\n html.Code(f'{str(ds)}'),\n # dcc.Markdown(f'{ds.description}'),\n # *[dcc.Markdown(f'{d}') for d in ds.history],\n ]\n\n def dataset_list(self, *datasets):\n return [dbc.ListGroupItem([\n dbc.Checkbox(id=f\"dataset-checkbox-{i}\", style={\n 'float': 'right'\n }, checked=True),\n *self.dataset_itemlist(ds)]) for i, ds in enumerate(datasets)]\n\n def uploaded_dataset_list(self, *datasets):\n #\n # TODO: active can be set to True for highligthing selected File (however for the moment we limit to one\n # component)\n # TODO: write a callback for each entry\n #\n all_datasets = [\n dbc.ListGroup(self.dataset_list(*datasets))\n ]\n return all_datasets\n\n @staticmethod\n def set_ROI_and_offset(ds, xrf, yrf, xrl, xru, yrl, yru):\n\n dimx = ds.dims[-1]\n x = getattr(ds, dimx)\n\n x.offset = float(xrf) if xrf else 0\n ds.history = f\">>> ds.{dimx}.offset = {x.offset}')\"\n x.roi = [float(xrl) if xrl else 0, float(xru) if xru else 0]\n ds.history = f\">>> ds.{dimx}.roi = {x.roi}\"\n\n if len(ds.dims) > 1:\n\n dimy = ds.dims[-2]\n y = getattr(ds, dimy)\n\n y.offset = float(yrf) if yrf else 0\n ds.history = f\">>> ds.{dimy}.offset = {y.offset}\"\n y.roi = float(yrl) if yrl else 0, float(yru) if yru else 0\n ds.history = f\">>> ds.{dimy}.roi = {y.roi}\"\n\n return ds\n\n def add_callbacks(self, app):\n \"\"\" Add all application calbacks \"\"\"\n\n # # ------------\n # # DATA UPLOAD\n # # ------------\n # @app.callback(Output('original-data', 'data'),\n # Input('upload-data', 'contents'),\n # State('upload-data', 'filename'))\n # def data_upload(list_of_contents, list_of_names):\n # # Store data in components\n #\n # data = {} # the data must be JSON serializable\n #\n # if list_of_contents is not None:\n # # data uploaded\n # z = zip(list_of_names, list_of_contents)\n # data = {n: self.parse_upload_contents(n, c) for n, c in z}\n # else:\n # raise PreventUpdate\n # return data\n\n # --------------\n # PROJECT UPLOAD\n # --------------\n\n @app.callback(Output('original-data', 'data'),\n Input('upload-project', 'contents'),\n State('upload-project', 'filename'))\n def project_upload(content, name):\n # Store project data in components\n\n data = {} # the data must be JSON serializable\n\n if content is not None:\n # data uploaded\n content_type, content_string = content.split(',')\n decoded = base64.b64decode(content_string)\n pj = scp.Project.load(content=decoded)\n data = pj.to_json()\n else:\n raise PreventUpdate\n return data\n\n # ----------------------\n # SAVE INTERMEDIATE DATA\n # ----------------------\n @app.callback([Output('intermediate-data', 'data'),\n Output('actual-x-roi', 'children'),\n Output('actual-y-roi', 'children'),\n Output('action-history', 'data')],\n [ # roi\n Input('x-roi-lower', 'value'),\n Input('x-roi-upper', 'value'),\n Input('y-roi-lower', 'value'),\n Input('y-roi-upper', 'value'),\n Input('x-offset', 'value'),\n Input('y-offset', 'value'),\n\n Input('original-data', 'modified_timestamp'), # new original data\n Input('confirm-mask', 'submit_n_clicks'), # modify masks\n Input('graph', 'selectedData'), # data for x-masks\n Input('graph', 'clickData'), # data for y-mask\n Input('graph-selector', 'value'), # transposed case?\n ],\n [State('original-data', 'data'),\n State('intermediate-data', 'data'),\n State('action-history', 'data')])\n def save_intermediate_data(xrl, xru, yrl, yru, xrf, yrf,\n ts, submit_mask, selectedData, clickData, selector,\n data, saved, history):\n \"\"\"\n Main callback for updating the figure and some dash components\n \"\"\"\n # no original data? probably not yet uplaoded... exit without updating\n if data is None:\n raise PreventUpdate\n\n project = scp.Project.from_json(data)\n # extract the project information from original data store\n # datasets = [scp.read_json(content=c.encode()) for n, c in data.items()]\n\n # show processed flag\n processed = True if 'Processed' in selector else False\n\n # if we want to show the original data:\n # -------------------------------------\n if not processed:\n ds = datasets[0]\n\n # set current ROI and offset if necessary\n self.set_ROI_and_offset(ds, xrf, yrf, xrl, xru, yrl, yru)\n\n # make output\n datasets[0] = ds\n data = {ds.filename: ds.write_json(to_string=True) for ds in datasets}\n return data, no_update, no_update, no_update\n\n # else we will output processed data\n # ----------------------------------\n if saved:\n datasets = [scp.read_json(content=c.encode()) for n, c in saved.items()]\n\n # for now we work with only one spectrum\n ds = datasets[0]\n dss = ds.copy() # for further comparison of changes\n\n # set current ROI and offset if necessary\n self.set_ROI_and_offset(ds, xrf, yrf, xrl, xru, yrl, yru)\n\n # show transposed?\n transposed = True if 'Transposed' in selector else False\n\n # apply masks\n if submit_mask and selectedData:\n # set x-masks\n range = selectedData.get('range', None)\n if range is not None:\n x_selection = range['x']\n if not transposed:\n ds[:, x_selection[1]:x_selection[0]] = scp.MASKED\n else:\n ds[x_selection[1]:x_selection[0]] = scp.MASKED\n\n # create output\n if ds == dss:\n # no change\n raise PreventUpdate\n\n datasets[0] = ds\n newdata = {ds.filename: ds.write_json(to_string=True) for ds in datasets}\n actx = f'Actual roi: {ds.x.roi_values[0]:~P} -> {ds.x.roi_values[1]:~P}'\n acty = f'Actual roi: {ds.y.roi_values[0]:~P} -> {ds.y.roi_values[1]:~P}'\n\n # update history\n # history is designed to be a list of python function,\n # able to be run later to reproduce all the transformations done\n ctx = callback_context.triggered\n if 'original-data' in ctx[0]['prop_id']:\n # Data just uploaded in Dash\n history = [f\">>> ds = scp.read('{ds.filename}')\"]\n else:\n # parameter changes\n for item in ctx:\n par = item['prop_id']\n\n return newdata, actx, acty, history\n\n # -----------------------\n # UPDATE DATA TAB CONTENT\n # -----------------------\n @app.callback([Output('current-data', 'children'),\n Output('show-project', 'style'),\n Output('show-current-data', 'style'),\n Output('show-graph', 'is_open'),\n Output('x-roi', 'children'),\n Output('y-roi', 'children'),\n Output('x-roi-lower', 'value'),\n Output('x-roi-upper', 'value'),\n Output('y-roi-lower', 'value'),\n Output('y-roi-upper', 'value'),\n Output('x-roi-units', 'children'),\n Output('y-roi-units', 'children'),\n Output('x-offset', 'value'),\n Output('y-offset', 'value')],\n [Input('original-data', 'modified_timestamp')],\n [State('original-data', 'data'),\n State('intermediate-data', 'data')])\n def update_tab_content(ts, data, saveddata):\n if ts is None:\n raise PreventUpdate\n\n dataloaded = None\n is_open = True\n xr = 'x'\n yr = 'y'\n xrl, xru = None, None\n yrl, yru = None, None\n xr_units = None\n yr_units = None\n xro = 0\n yro = 0\n style = dict({\n 'display': 'none'\n })\n\n if saveddata is not None: # take the saved data!\n data = saveddata # json.loads(saveddata)\n\n if data is not None:\n datasets = [scp.read_json(content=c.encode()) for n, c in data.items()]\n\n # for now we work with only one spectrum\n ds = datasets[0]\n dataloaded = self.uploaded_dataset_list(*datasets)\n is_open = False\n xr = ds.x.title # we assume homogeneous data (same kind of dimension)\n yr = ds.y.title\n xrl, xru = ds.x.roi\n yrl, yru = ds.y.roi\n xr_units = f'{ds.x.units:~P}'\n yr_units = f'{ds.y.units:~P}'\n xro = ds.x.offset\n yro = ds.y.offset\n style = dict({\n 'display': 'block'\n })\n\n return (dataloaded, style, style, not is_open,\n xr, yr, xrl, xru, yrl, yru,\n xr_units, yr_units, xro, yro)\n\n # -------------\n # UPDATE FIGURE\n # -------------\n\n @app.callback(Output('graph', 'figure'),\n [Input('intermediate-data', 'data'),\n Input('graph-selector', 'value'),\n # change on the type of figure to display (processed, transposed...)\n Input('graph-optimisation', 'value'), # change the optimisation level\n Input('zoom-reset', 'n_clicks'), # reset Zoom from button\n Input('cmap-select', 'value'),\n Input('select-mask', 'n_clicks')\n\n # Input('graph', 'relayoutData')\n ])\n def update_figure(data,\n selector,\n optim,\n zoomreset,\n cmap,\n dragmode,\n ): # relayout):\n\n if data is None:\n raise PreventUpdate\n\n datasets = [scp.read_json(content=c.encode()) for n, c in data.items()]\n\n ds = datasets[0]\n\n dragmode = 'zoom' if dragmode is None or dragmode % 2 == 0 else 'select'\n figure = ds.plot(use_plotly=True,\n selector=selector,\n optimisation=optim,\n zoomreset=zoomreset,\n cmap=cmap,\n dragmode=dragmode,\n )\n\n return figure\n\n # ------------------\n # CLEAR DATA BUTTON\n # ------------------\n @app.callback(\n [Output(\"original-data\", \"clear_data\"),\n Output(\"intermediate-data\", \"clear_data\")],\n [Input(\"close-data\", \"n_clicks\")]\n )\n def clear_data_click(n):\n if n is None:\n return False, False\n else:\n return True, True\n\n # -------------------\n # DISPLAY CONTROL\n # -------------------\n @app.callback(\n [Output(\"close-data\", \"style\"),\n Output('data-tab', 'disabled'),\n Output('graph-tab', 'disabled'),\n Output('processing-tab', 'disabled')],\n [Input('current-data', 'children')])\n def tab_display_control(children):\n if not children:\n return dict({\n 'display': 'none'\n }), True, True, True\n else:\n return dict({\n 'display': 'block'\n }), False, False, False\n\n # ----------------------------------\n # MODIFY CLOSING/OPENING CARD BUTTON\n # ----------------------------------\n for item in ['roi', 'current-data', 'mask',\n 'layout', 'xaxis', 'zaxis',\n 'baseline', 'peakpicking', 'subtraction']:\n @app.callback(\n [Output(f\"open-{item}-more\", \"is_open\"),\n Output(f\"{item}-more\", \"children\"),\n Output(f\"{item}-more\", \"color\")],\n [Input(f\"{item}-more\", \"n_clicks\")],\n [State(f\"{item}-more\", \"children\")]\n )\n def on_click(n, state):\n if n is None or state.startswith('Close'):\n return False, \"More\", \"info\"\n else:\n return True, \"Close this card\", \"warning\"\n\n # ------------\n # MENU PROJECT\n # ------------\n @app.callback(\n [Output(f\"project-name\", \"placeholder\"),\n Output(f\"project-name\", \"disabled\"),\n Output(f\"project-close\", \"disabled\"),\n Output(f\"project-save\", \"disabled\")],\n [Input(f\"project-new\", \"n_clicks\"),\n Input(f\"project-open\", \"n_clicks\")]\n )\n def on_menu_project(new, open):\n # ctx = callback_context.triggered\n if new is None and open is None:\n raise PreventUpdate\n\n ctx = callback_context.triggered\n if ctx and 'new' in ctx[0]['prop_id'] and ctx[0]['value'] is not None:\n return 'Untitled (edit to chnge this name)', False, False, False\n elif ctx and 'new' in ctx[0]['prop_id'] and ctx[0]['value'] is not None:\n return\n\n # Set masks\n # @app.callback()\n\n # GRAPH SELECTION AND HOVER\n\n # @app.callback(\n # Output('text-data', 'children'),\n # [Input('graph', 'hoverData')])\n # def display_hover_data(hoverData):\n # return json.dumps(hoverData, indent=2)\n #\n #\n # @app.callback(\n # Output('text-data', 'children'),\n # [Input('graph', 'clickData')])\n # def display_click_data(clickData):\n # return json.dumps(clickData, indent=2)\n #\n #\n # @app.callback(\n # Output('text-data', 'children'),\n # [Input('graph', 'selectedData')])\n # def display_selected_data(selectedData):\n # return json.dumps(selectedData, indent=2)\n\n @app.callback(\n [Output('text-data', 'children'),\n Output('confirm-mask', 'displayed'),\n Output('confirm-mask', 'submit_n_clicks'),\n Output('confirm-mask', 'message')],\n [Input('graph', 'relayoutData'),\n Input('graph', 'hoverData'),\n Input('graph', 'selectedData'),\n Input('graph', 'clickData')\n ])\n def display_relayout_data(relayoutData, hoverData, selectedData, clickData):\n ctx = callback_context\n text = json.dumps(hoverData, indent=2) + json.dumps(relayoutData, indent=2) + json.dumps(selectedData,\n indent=2) + \\\n json.dumps(\n clickData, indent=2)\n confirm = False\n message = ''\n nclicks = no_update\n if ctx.triggered[0]['prop_id'] == 'graph.selectedData':\n range = selectedData.get('range', None)\n if range is not None:\n x_selection = range['x']\n message = f'Are you sure you want to mask data in the {x_selection} region?'\n confirm = True\n nclicks = 0\n\n return text, confirm, nclicks, message\n\n #\n # Mask selection button aspect\n #\n\n @app.callback(\n [Output('select-mask', 'children'),\n Output('select-mask', 'color'), ],\n [Input('select-mask', 'n_clicks'), ]\n )\n def mask_buttton_aspect(n):\n if n is None or n % 2 == 0:\n return \"Select mask\", \"secondary\"\n else:\n return \"Stop mask selection\", \"danger\"\n","sub_path":"spectrochempy/gui/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":18858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"597521198","text":"\r\n# ----------------------------------------------------------------------\r\n# iob2DynaML.py\r\n# ----------------------------------------------------------------------\r\n# Author: Kent Wheeler\r\n# Date: 17 March 2020\r\n# Purpose: Script to create DynaML stn and msr files from Geolab .iob file\r\n# ----------------------------------------------------------------------\r\n# Usage: cmd:\\> python iob2DynaML.py <*.iob>\r\n# ----------------------------------------------------------------------\r\n# future development requires capability to read 2DC geolab records\r\n\r\nimport os, sys, shutil\r\nfrom src.Python4Dyna import * \r\nfrom src.Dyna2db import * \r\nfrom src.geolab import * \r\n \r\nif __name__ == \"__main__\":\r\n script_path = os.path.abspath(os.path.realpath(__file__))\r\n script_dir, script_name = os.path.split(script_path)\r\n os.chdir(script_dir)\r\n \r\n files = os.listdir(os.getcwd())\r\n if len(sys.argv)>1: files=[sys.argv[1]]\r\n for df in files:\r\n if df.endswith(\".iob\"):\r\n network=df.replace('.iob','').replace(' ','_')\r\n print(' Converting to DynaML: ' + network)\r\n datum, geoid = iob2DynaML(df)\r\n print(' Adjusting: ' + network)\r\n LSA(network, datum, geoid)\r\n \r\n network=df.replace('.iob','.phased-stage').replace(' ','_')\r\n if os.path.exists(network + '.adj'):\r\n db='network.db'\r\n create_DynaML_db(db)\r\n adj_stats, adj_stns, adj_obs=import_adj (network + '.adj',db)\r\n if os.path.exists(network + '.apu'):\r\n apu_stns=import_apu(network + '.apu',db)\r\n export_lst(adj_stats,adj_stns,apu_stns,adj_obs)\r\n else:\r\n print('Lst File not printed...apu not created')\r\n coords, xyz_bases = adj2Dicts(network + '.adj')\r\n if len(xyz_bases) > 0: \r\n coords, bases = gnss_xyz2dah(coords, xyz_bases)\r\n Create_DynaBAS(network, bases)\r\n #DynaML2db(network)\r\n #db2Trivials(network+'.db')\r\n else:\r\n print('Error in Adjustment...Check Dynadjust reported errors')\r\n \r\n files = os.listdir(os.getcwd())\r\n for f in files:\r\n if f.endswith('.aml'):os.remove(f)\r\n if f.endswith('.asl'):os.remove(f)\r\n if f.endswith('.bms'):os.remove(f)\r\n if f.endswith('.bst'):os.remove(f)\r\n if f.endswith('.dbid'):os.remove(f)\r\n if f.endswith('.dnaproj'):os.remove(f)\r\n if f.endswith('.imp'):os.remove(f)\r\n if f.endswith('.map'):os.remove(f)\r\n if f.endswith('.mtx'):os.remove(f)\r\n if f.endswith('.seg'):os.remove(f)\r\n if f.endswith('.xyz'):os.remove(f)\r\n if f.endswith('.db'):os.remove(f)\r\n if f.endswith('.lg.xml'):os.remove(f)\r\n if f.endswith('.adj') or f.endswith('.apu'):\r\n shutil .move(f, f.replace('.phased-stage',''))\r\n \r\n print('\\n============== Complete ==============')\r\n","sub_path":"iob2DynaML.py","file_name":"iob2DynaML.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"486706153","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Aug 29 13:42:42 2018\r\n\r\n@author: sukandulapati\r\n\"\"\"\r\n\r\nimport os\r\nimport pandas as pd\r\n\r\nfoldernames = os.listdir(\"D:/TGS/etl3/\")\r\ndict = {}\r\nfor f in foldernames:\r\n dict[f] = os.listdir(\"D:/TGS/etl3/\" + f)\r\n\r\n\r\ndata1 = [[sentiment, file_name]\r\n for sentiment, files in dict.items()\r\n for file_name in files\r\n ]\r\n\r\ndf1 = pd.DataFrame(data1, columns=['sentiment', 'file_name'])\r\n\r\ndf1['file_id'], df1['rating'] = df1['file_name'].str.split('_', 1).str\r\ndf1['file_part'], _ = df1['file_name'].str.split('.', 1).str\r\ndf1.head()\r\ndf1.tail()\r\n\r\n\r\nneg_df = df1.loc[df1['sentiment'] == 'neg']\r\n\r\npos_df = df1.loc[df1['sentiment'] == 'pos']\r\n\r\n\r\ndef get_dict(files, path):\r\n my_dict = {}\r\n for file in files:\r\n with open(path+file, encoding=\"utf8\") as f:\r\n items = [i.strip() for i in f.read().split(\",\")]\r\n my_dict[file.replace(\".txt\", \"\")] = items\r\n return my_dict\r\n \r\n\r\n#df for positive sentimenet \r\nfiles = os.listdir(\"D:/TGS/etl3/pos/\")\r\npath = \"D:/TGS/etl3/pos/\"\r\n\r\nmy_dict = get_dict(files,path)\r\n\r\npos_df['value']= pos_df['file_part'].map(my_dict)\r\n\r\npos_df.head()\r\n\r\n#df for nagative sentimenet \r\nfiles = os.listdir(\"D:/TGS/etl3/neg/\")\r\npath = \"D:/TGS/etl3/neg/\"\r\nmy_dict = get_dict(files, path)\r\n\r\nneg_df['value']= neg_df['file_part'].map(my_dict)\r\n\r\nneg_df.head()\r\n\r\n","sub_path":"data_pre_processing/etl_process.py","file_name":"etl_process.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"650062501","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 30 13:44:43 2016\n\n@author: wolfensb\n\"\"\"\n\nimport json\nimport numpy as np\n\nfrom pyradsim.hydrometeor import Hydrometeor\nfrom pyradsim.tmatrix import compute_pol_var\n\nclass Box(object):\n def __init__(self,name,box,config):\n self.name = name\n self.radar = box['radar']\n self.atmosphere = box['atmosphere']\n self.geometry = box['geometry']\n self.weight = box['weight']\n self.hydrometeors = []\n self.config = config\n \n for k in box['hydrometeors']:\n self.hydrometeors.append(Hydrometeor(k,box['hydrometeors'][k],\n self.radar['frequency'],self.atmosphere['T'],\n self.geometry['elevation_angle'],self.config['nbins_d'])) \n \n def update(self,box,config):\n self.radar = box['radar']\n self.atmosphere = box['atmosphere']\n self.geometry = box['geometry']\n self.weight = box['weight']\n self.config = config\n for h in self.hydrometeors:\n h.update(box['hydrometeors'][h.name],\n self.radar['frequency'],self.atmosphere['T'],\n self.geometry['elevation_angle'],self.config['nbins_d']) \n \n def get_ensemble_SZ(self):\n ensemble_S = np.zeros((4,),dtype=complex) # Amplitutde matrix\n ensemble_Z = np.zeros((16,)) # Phase matrix\n \n for h in self.hydrometeors: \n print('Simulating scattering of hydrometeor '+h.name)\n # Compute scattering matrices\n integ_S, integ_Z = h.get_SZ_integrated()\n\n ensemble_S += integ_S\n ensemble_Z += integ_Z\n return ensemble_S, ensemble_Z\n \n def get_pol_vars(self):\n ensemble_S, ensemble_Z = self.get_ensemble_SZ()\n pol = compute_pol_var(ensemble_S,ensemble_Z,self.radar['frequency']) \n return pol\n \n def __str__(self):\n msg = '\\n%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\\n'\n msg += 'BOX: '+self.name+'\\n'\n msg += '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\\n\\n'\n msg += 'Hydrometeors\\n'\n msg += '-------------------\\n'\n for h in self.hydrometeors:\n msg += h.__str__()\n msg += 'Radar\\n'\n msg += '-------------------\\n'\n msg += json.dumps(self.radar,indent=3) +'\\n\\n'\n msg += 'Geometry\\n'\n msg += '-------------------\\n'\n msg += json.dumps(self.geometry,indent=3) +'\\n\\n'\n msg += 'Atmosphere\\n'\n msg += '-------------------\\n'\n msg += json.dumps(self.atmosphere,indent=3) +'\\n\\n'\n msg += 'Weight\\n'\n msg += '-------------------\\n'\n msg += json.dumps(self.weight,indent=3) +'\\n'\n \n return msg\n \n \n","sub_path":"pyradsim/box.py","file_name":"box.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"315394550","text":"import logging\nimport json\nimport asyncio\n\nfrom utils.database import db_functions as db\nfrom utils.functions_ import is_empty\n\nfrom discord import Embed, Colour\nfrom discord.ext.commands import Bot, Cog, command\n\nlog = logging.getLogger('bot.' + __name__)\n\n\nwith open('jsons/shop.json', 'r') as f:\n data = json.load(f)\n\n\nclass ShopCog(Cog, name='Shop'):\n \"\"\"Commands related to the shop.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n self.config = self.bot.config\n\n @command(name=\"shop\")\n async def shop_cmd(self, ctx):\n \"\"\"View all sellable items and their values.\"\"\"\n desc = \"Current materials and their values. \\n\"\n for item in data[0]:\n desc += f\"**{item}:** {data[0][item]} gold. \\n\"\n embed = Embed(title=\"Shop\", colour=Colour.blurple(), description=desc)\n embed.set_footer(text=\"Use `!sell {item}` to sell an item.\")\n return await ctx.send(embed=embed)\n\n @command(name=\"sell\")\n async def sell_item(self, ctx, item_to_sell):\n \"\"\"Sell 1 or multiple items.\"\"\"\n db_connection = await db.dbconnection()\n cursor = await db_connection.cursor()\n channel = ctx.channel\n user_id = ctx.author.id\n for item in list(data[0]):\n if item.lower() == item_to_sell.lower():\n item_to_sell = item\n item_exists = True\n if 'item_exists' in locals():\n sql = \"SELECT \" + item_to_sell.lower() + \" FROM inventory WHERE character_id = %s\"\n val = user_id\n await cursor.execute(sql, (val,))\n result = await cursor.fetchall()\n amount = result[0][0]\n if amount > 0:\n await ctx.send(f\"You have {amount} {item_to_sell}. How much would you like to sell?\")\n try:\n def check(m):\n return m.channel == channel and ctx.author == m.author\n msg = await self.bot.wait_for('message', timeout=15.0, check=check)\n except asyncio.TimeoutError:\n return await ctx.send('No response. Action cancelled.')\n amount_to_sell = msg.content\n if amount_to_sell.isdigit() is True:\n if int(amount_to_sell) > int(amount):\n return await ctx.send(f\"You don't have enough {item_to_sell} to do that.\")\n else:\n selling_value = data[0][item_to_sell]\n sold_for = int(selling_value) * int(amount_to_sell)\n sql = f\"UPDATE inventory SET {item_to_sell} = {item_to_sell} - {amount_to_sell}, gold = gold + {sold_for} WHERE character_id = %s\"\n val = user_id\n await cursor.execute(sql, (val,))\n await db_connection.commit()\n await cursor.close()\n db_connection.close()\n return await ctx.send(f\"Sold {amount_to_sell} {item_to_sell} for {sold_for} gold.\")\n else:\n return await ctx.send(\"You have to submit a valid amount.\")\n else:\n return await ctx.send(\"Sorry you do not have enough of that item in order to sell it.\")\n else:\n return await ctx.send(\"The item you are trying to sell does not exist.\")\n\n @command(name=\"autosell\")\n async def autosell_cmd(self, ctx):\n \"\"\"Used to turn autoselling on or off.\"\"\"\n user_id = ctx.author.id\n sql = \"SELECT autosell FROM `character` WHERE user_id = '%s'\"\n val = user_id\n db_connection = await db.dbconnection()\n cursor = await db_connection.cursor()\n await cursor.execute(sql, (val,))\n result = await cursor.fetchall()\n autosell = result[0][0]\n if autosell == 'true':\n sql = \"UPDATE `character` SET autosell = %s WHERE user_id = '%s'\"\n val = ('false', user_id)\n await cursor.execute(sql, val)\n await db_connection.commit()\n await cursor.close()\n db_connection.close()\n return await ctx.send(\"Turned autosell off. This command can be used any time to turn it on.\")\n if autosell == 'false':\n sql = \"UPDATE `character` SET autosell = %s\"\n val = 'true'\n await cursor.execute(sql, (val,))\n await db_connection.commit()\n await cursor.close()\n db_connection.close()\n return await ctx.send(\"Turned autosell on. This command can be used any time to turn it off.\")\n\n\ndef setup(bot):\n bot.add_cog(ShopCog(bot))\n log.debug('Loaded')\n","sub_path":"cogs/shop.py","file_name":"shop.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"364630204","text":"# -*- coding: utf-8 -*-\n# pylint: disable=global-statement,invalid-name,missing-docstring\n\"\"\"The lexer, which uses ply.\n\nThere are 3 lexer modes: normal (initial), string-parsing, and literal-parsing.\nLiteral means either msg (bot output) or call (divert to another command).\n\nLALR parsers do not go very well with optional newlines, so there is some\nspecial handlings of that there. One postcondition of the lexer is that it\nnever outputs two NEWLINE in a row, and also swallows newlines after most\nsymbols which cannot end statements. At some later point we can reexamine\nhow those ambiguities resolve and fix things.\n\nMajor TODOs are switching to class-based lexer, as in\nhttp://www.dabeaz.com/ply/ply.html#ply_nn17. Then most pylint exceptions can\nbe removed.\n\"\"\"\nimport re\nfrom ply import lex\n\nfrom bufscript import optypes\n\nreserved = {\n 'IF': 'if',\n 'ELSE': 'else',\n 'TRUE': 'true',\n 'FALSE': 'false',\n 'SWITCH': 'switch',\n 'CASE': 'case',\n 'DEFAULT': 'default',\n 'QUIT': 'quit',\n 'WHILE': 'while',\n}\nsymbols = {\n 'LEFT_PAREN': '(',\n 'RIGHT_PAREN': ')',\n 'LEFT_BRACE': '{',\n 'RIGHT_BRACE': '}',\n 'COMMA': ',',\n 'ASSIGN': '=',\n 'SEMICOLON': ';',\n 'COLON': ':',\n 'RANGE': '..',\n 'INCREMENT': '++',\n 'DECREMENT': '--',\n 'PLUS': '+',\n 'MINUS': '-',\n 'MUL': '*',\n 'DIV': '/',\n 'MOD': '%',\n 'AND': '&&',\n 'OR': '||',\n 'ISEQUAL': '==',\n 'NOTEQUAL': '!=',\n 'LESSTHAN': '<',\n 'LESSEQ': '<=',\n 'GREATERTHAN': '>',\n 'GREATEREQ': '>=',\n 'NOT': '!',\n}\n# Symbols that can never end statements, basically\nno_swallow_eol = [\n reserved['TRUE'],\n reserved['FALSE'],\n symbols['RIGHT_PAREN'],\n symbols['RIGHT_BRACE'],\n]\nswallow_eol = set(list(reserved.values()) + list(symbols.values())) - set(no_swallow_eol)\n\nreserved_map = dict((v, k) for k, v in list(reserved.items()))\nsymbol_map = dict((v, k) for k, v in list(symbols.items()))\n# Longer symbols first. Exclude semicolons because it affects lexer states, it's a separate rule.\nsymbol_re = '|'.join(re.escape(r) for r in reversed(sorted(symbols.values())) if r != ';')\n\ntokens = tuple(reserved.keys()) + tuple(symbols.keys()) + (\n # INTCONST above WORD\n 'INTCONST',\n 'WORD',\n # VAR and FLAG over anything else in strings/literals, FLAG over ANYFLAG\n 'VAR',\n 'FLAG',\n 'ANYFLAG',\n # Special symbols which change states.\n # Probably STRING should be called QUOTE.\n 'STRING',\n 'LITERAL',\n 'NEWLINE',\n # String/literal bits\n 'LITERALBIT',\n 'STRINGBIT',\n 'ESCAPE',\n 'BADESCAPE',\n)\nstates = (\n ('string', 'exclusive'),\n ('literal', 'exclusive'),\n)\n\n# Position of the last swallow_eol symbol (or last newline)\n# Once the lexer becomes a class, handling can be put in a common place.\nlast_symbol = None\n\n@lex.TOKEN(symbol_re)\ndef t_symbol(t):\n t.type = symbol_map[t.value]\n #print t.value, t.value in swallow_eol\n if t.value in swallow_eol:\n global last_symbol\n last_symbol = t.lexpos + len(t.value)\n return t\n\ndef t_INITIAL_literal_SEMICOLON(t):\n r';'\n t.lexer.begin('INITIAL')\n if t.value in swallow_eol:\n global last_symbol\n last_symbol = t.lexpos + len(t.value)\n return t\n\n# Ignore anything between a double slash and the end of the line - would this\n# break if given, for example, \"and now I use a double slash: //. End of Line\"\n# as a string? Needs investigation.\ndef t_LINE_COMMENT(t):\n r'\\#[^\\n]*'\n # Ignore complete line commands for the sake of these\n global last_symbol\n if last_symbol == t.lexpos:\n last_symbol += len(t.value)\n\ndef t_INTCONST(t):\n r'[0-9]+'\n t.value = int(t.value)\n return t\n\ndef t_WORD(t):\n r'[\\w\\d]+'\n #r'[\\w\\d]*[\\w][\\w\\d]*'\n t.type = reserved_map.get(t.value, 'WORD')\n if t.value in swallow_eol:\n global last_symbol\n last_symbol = t.lexpos + len(t.value)\n return t\n\ndef t_ANY_VAR(t):\n r'\\$[a-zA-Z][a-zA-Z0-9]*'\n return t\n\n# This is a separate token to allow @_$1_more or @_u000\n@lex.TOKEN(optypes.FLAG_RE)\ndef t_string_literal_FLAG(t):\n return t\n\n# Flag in a regular code context, validated later.\ndef t_ANY_ANYFLAG(t):\n r'@_[a-zA-Z0-9\\$\\*_]*'\n return t\n\n# Must be two characters long\ndef t_string_literal_ESCAPE(t):\n r'\\\\[;\"$\\\\@]'\n return t\n\ndef t_string_literal_BADESCAPE(t):\n r'\\\\[^;\"$\\\\@]'\n return t\n\n# TODO: Because STRINGBIT and LITERALBIT exclude @ and $, standalone instances\n# of those cause lex failures.\ndef t_string_STRINGBIT(t):\n r'[^\\\\\"\\n@\\$]+'\n return t\n\ndef t_literal_LITERALBIT(t):\n r'[^\\\\;\\n@\\$]+'\n return t\n\ndef t_INITIAL_string_STRING(t):\n r'\"'\n if t.lexer.in_string:\n t.lexer.begin('INITIAL')\n else:\n t.lexer.begin('string')\n t.lexer.in_string = not t.lexer.in_string\n return t\n\ndef t_LITERAL(t):\n r'[~`]'\n t.lexer.begin('literal')\n return t\n\ndef t_ANY_linebreak(t):\n r'\\\\\\n'\n t.lexer.line_ends.append(t.lexpos + 1)\n t.lexer.lineno += 1\n\n\ndef t_ignore_space(t):\n r'[ \\t]+'\n global last_symbol\n if last_symbol == t.lexpos:\n last_symbol += len(t.value)\n\n\ndef t_INITIAL_literal_NEWLINE(t):\n r'\\n'\n t.lexer.begin('INITIAL')\n t.lexer.line_ends.append(t.lexpos)\n t.lexer.lineno += 1\n # TODO: If the last word allowed swallowing this (can not appear at end of statement),\n # swallow this instead.\n this_symbol = t.lexpos + len(t.value)\n global last_symbol\n if last_symbol == t.lexpos:\n last_symbol = this_symbol\n return None\n else:\n last_symbol = this_symbol\n return t\n\ndef t_ANY_error(t):\n row, col = find_token_location(t)\n if t.value[0] == '\\n':\n print('Unexpected newline at %d:%d' % (row, col))\n else:\n print('Illegal character %s at %d:%d' % (t.value[0], row, col))\n # Maybe introduce a special state to handle this.\n if t.lexer.in_string and t.value[0] == '\\n':\n t.lexer.in_string = False\n t.lexer.begin('INITIAL')\n t.lexer.skip(1)\n\ndef t_ANY_eof(t):\n t.lexer.begin('INITIAL')\n t.lexer.in_string = False\n\ndef find_token_location(token):\n last_cr = token.lexer.line_ends[token.lineno - 1]\n return (token.lineno, token.lexpos - last_cr)\n\nlexer = lex.lex()\nlexer.line_ends = [-1]\nlexer.in_string = False\n\n# Test only methods.\n# TODO: Make this a class instead.\ndef reset():\n lexer.line_ends = [-1]\n lexer.in_string = False\n global last_symbol\n last_symbol = None\n\ndef plex(s):\n print(s)\n print('-----------------')\n lexer.input(s)\n u = True\n while u:\n u = lexer.token()\n if u:\n print(u)\n\n","sub_path":"bufscript/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":6856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"474173551","text":"# -*- coding: utf-8 -*-\n'''\nCreated on Mon Nov 11 10:15:40 2019\n\nVersion Control:\nVersion Date Person Change\n 1.1 30Dec19 JMY Added timing to check elapsed time for each function in pasture functions\n\nKnown problems:\nFixed Date ID by Problem\n\n\n@author: John\n'''\nimport numpy as np\nfrom timeit import default_timer as timer\n\ntime_list = [] ; time_was = []\ntime_list.append(timer()) ; time_was.append(\"start\")\n\nimport StructuralInputs as sinp\nimport PropertyInputs as pinp\nimport UniversalInputs as uinp\nimport Periods as per\nimport Functions as fun\nimport SeasonalFunctions as zfun\nimport Sensitivity as sen\nimport FeedsupplyFunctions as fsfun\n\ntime_list.append(timer()) ; time_was.append(\"import Universal\")\n\nimport Pasture as pas\n\ntime_list.append(timer()) ; time_was.append(\"import Pasture\")\n\nparams={}\nr_vals={}\n\n###############\n#User control #\n###############\ntrial = 4 #4 is quick test\n\n##sort exp\nexp_data, exp_group_bool, trial_pinp = fun.f_read_exp()\nexp_data = fun.f_group_exp(exp_data, exp_group_bool)\n##select property for the current trial\npinp.f_select_pinp(trial_pinp.iloc[trial])\n\n##update sensitivity values\nsen.create_sa()\nfun.f_update_sen(trial,exp_data,sen.sam,sen.saa,sen.sap,sen.sar,sen.sat,sen.sav)\n##call sa functions - assigns sa variables to relevant inputs\nsinp.f_structural_inp_sa()\nuinp.f_universal_inp_sa()\npinp.f_property_inp_sa()\n##expand p6 axis to include nodes\nsinp.f1_expand_p6()\npinp.f1_expand_p6()\n\n##Populate the nv dict with the input values for the nv cutoffs (normally are from StockGenerator)\n### create nv dict\nnv={}\n### read values from the pasture_inputs dictionary\npas_inc = np.array(pinp.general['pas_inc'])\npastures = sinp.general['pastures'][pas_inc]\nexceldata = pinp.pasture_inputs[pastures[0]] # assign to exceldata the pasture data for the first pasture type (annuals)\ni_me_maintenance_vf = exceldata['MaintenanceEff'][:, 1:].T\n##add nv params to dict for use in pasture.py\nn_non_confinement_pools=4\nconfinement_inc = False\nindex_f = np.arange(n_non_confinement_pools+confinement_inc)\n##create the upper and lower cutoffs. If there is a confinement slice then it will be populated with values but they never get used.\n###get association between the input fp and the node adjusted fp\na_p6std_p6z = per.f_feed_periods(option=2)\n###apply association\n####stock\nsinp.structuralsa['i_nv_upper_p6z'] = np.take_along_axis(sinp.structuralsa['i_nv_upper_p6'][:,None],a_p6std_p6z,axis=0)\nsinp.structuralsa['i_nv_lower_p6z'] = np.take_along_axis(sinp.structuralsa['i_nv_lower_p6'][:,None],a_p6std_p6z,axis=0)\n\nnv_upper_p6fz = sinp.structuralsa['i_nv_upper_p6z'][:,None,:]\nnv_upper_p6fz = zfun.f_seasonal_inp(nv_upper_p6fz,numpy=True,axis=-1)\nnv_lower_p6fz = sinp.structuralsa['i_nv_lower_p6z'][:,None,:]\nnv_lower_p6fz = zfun.f_seasonal_inp(nv_lower_p6fz,numpy=True,axis=-1)\nnv_cutoff_lower_p6fz = nv_lower_p6fz + (\n nv_upper_p6fz - nv_lower_p6fz) / n_non_confinement_pools * index_f[:,None]\nnv_cutoff_upper_p6fz = nv_lower_p6fz + (nv_upper_p6fz - nv_lower_p6fz) / n_non_confinement_pools * (\n index_f[:,None] + 1)\n###Average these values to be passed to Pasture.py for efficiency of utilising ME and add to the dict\nnv_cutoff_ave_p6fz = (nv_cutoff_lower_p6fz + nv_cutoff_upper_p6fz) / 2\nnv['nv_cutoff_ave_p6fz'] = nv_cutoff_ave_p6fz\nnv['confinement_inc'] = confinement_inc\nnv['len_nv'] = n_non_confinement_pools+confinement_inc\n\n##call pasture module\npas.f_pasture(params, r_vals, nv)\n\n\ntime_list.append(timer()) ; time_was.append(\"Pasture complete\")\n\n\n# pas.map_excel(params,r_vals)\n#pas.map_excel('Property.xlsx') # read inputs from Excel file and map to the python variables\n# time_list.append(timer()) ; time_was.append(\"init & read inputs from Excel\")\n\n\n# pas.calculate_germ_and_reseed(params) # calculate the germination for each rotation phase\n# a = pas.foo_grn_reseeding_p6lrt\n# b = a[:,4,...]\n# c = np.sum(b, axis = 1)\n# time_list.append(timer()) ; time_was.append(\"germination & reseeding\")\n\n# pas.green_and_dry(params, r_vals, nv) # calculate the FOO lost when destocked and the FOO gained when grazed after establishment\n# time_list.append(timer()) ; time_was.append(\"green feed & dry feed\")\n\n# poc_con_p6t = pas.poc(params) # calculate the pasture on crop paddocks\n# poc_md_p6t = pas.poc_md() # calculate the pasture on crop paddocks\n# poc_vol_p6t = pas.poc_vol() # calculate the pasture on crop paddocks\n# print(poc_vol_ft)\n# time_list.append(timer()) ; time_was.append(\"poc\")\n\n\n\n\n#report the timer results\ntime_prev=time_list[0]\nfor time_step, time in enumerate(time_list):\n time_elapsed = time-time_prev\n if time_elapsed > 0: print(time_was[time_step], f\"{time_elapsed:0.4f}\", \"secs\")\n time_prev=time\nprint(\"elapsed total time for pasture module\", f\"{time_list[-1] - time_list[0]:0.4f}\", \"secs\") # Time in seconds\n\n\n#test times\n#def test1():\n# annual.germ_phase_data.columns.values[range(phase_len)] = [*range(phase_len)]\n#def test2():\n# annual.germ_phase_data.columns.values[0:phase_len] = [*range(phase_len)]\n#\n#print(timeit.repeat(test1,number=5,repeat=10))\n#print(timeit.repeat(test2,number=5,repeat=10))\n","sub_path":"lib/AfoLogic/PastureTest.py","file_name":"PastureTest.py","file_ext":"py","file_size_in_byte":5307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"495538156","text":"def LCS(A,B):\n n_a=len(A)\n n_b=len(B)\n matrixOfLength=[[0]*(n_a+1) for i in range(n_b+1)]\n for i in range(1,n_b+1):\n for j in range(1,n_a+1):\n if A[j-1]==B[i-1]:\n matrixOfLength[i][j]=matrixOfLength[i-1][j-1]+1\n else:\n matrixOfLength[i][j]=max(matrixOfLength[i-1][j],matrixOfLength[i][j-1])\n # print(matrixOfLength)\n # print(matrixOfLength[n_b][n_a])\n return matrixOfLength[n_b][n_a]\nA=[1,2,6,3,5]\nB=[1,4,2, 5]\nLCS(A,B)","sub_path":"task2_1.py","file_name":"task2_1.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"104999288","text":"import socket\nimport sys\nimport os\nimport argparse\nimport json\nimport logging\nimport select\nimport time\nimport threading\nimport configparser\nimport logs.configs.config_server_log\nfrom common.variables import DEFAULT_PORT, DEFAULT_IP_ADDRESS, STATUS, REMOVE_CONTACT, RESPONSE_400,\\\n ACTION, TIME, USER, ACCOUNT_NAME, SENDER, PRESENCE, RESPONSE, USERS_REQUEST, RESPONSE_200, \\\n ERROR, MESSAGE, MESSAGE_TEXT, DESTINATION, EXIT, LIST_INFO, ADD_CONTACT, GET_CONTACTS, RESPONSE_202\nfrom common.utils import get_message, send_message\nfrom common.decors import log\nfrom descrptrs import Port\nfrom metaclasses import ServerMaker\nfrom server_database import ServerStorage\nfrom PyQt5.QtWidgets import QApplication, QMessageBox\nfrom PyQt5.QtCore import QTimer\nfrom server_gui import MainWindow, gui_create_model, HistoryWindow, create_stat_model, ConfigWindow\nfrom PyQt5.QtGui import QStandardItemModel, QStandardItem\n\n# Инициализация логирования сервера.\nLOGGER = logging.getLogger('server')\n\n# Флаг что был подключён новый пользователь, нужен чтобы не мучать BD постоянными запросами на обновление\nnew_connection = False\nconflag_lock = threading.Lock()\n\n\n# Парсер аргументов коммандной строки.\n@log\ndef createParser(default_port, default_address):\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--port', type=int, default=default_port, )\n parser.add_argument('-a', '--address', default=default_address, )\n args = parser.parse_args(sys.argv[1:])\n listen_port = args.port\n listen_address = args.address\n return listen_address, listen_port\n\n\n# Основной класс сервера\nclass Server(threading.Thread, metaclass=ServerMaker):\n port = Port()\n\n def __init__(self, listen_address, listen_port, database):\n # Параментры подключения\n self.addr = listen_address\n self.port = listen_port\n\n # База данных сервера\n self.database = database\n\n # Список подключённых клиентов.\n self.clients = []\n\n # Список сообщений на отправку.\n self.messages = []\n\n # Словарь содержащий сопоставленные имена и соответствующие им сокеты.\n self.names = dict()\n\n # Конструктор предка\n super().__init__()\n\n def init_socket(self):\n LOGGER.info(\n f'Запущен сервер, порт для подключений: {self.port} , адрес с которого принимаются подключения: {self.addr}. Если адрес не указан, принимаются соединения с любых адресов.')\n # Готовим сокет\n skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n skt.bind((self.addr, self.port))\n skt.settimeout(0.5)\n\n # Начинаем слушать сокет.\n self.sock = skt\n self.sock.listen()\n\n def run(self):\n # Инициализация Сокета\n global new_connection\n self.init_socket()\n\n # Основной цикл программы сервера\n while True:\n # Ждём подключения, если таймаут вышел, ловим исключение.\n try:\n client, client_address = self.sock.accept()\n except OSError:\n pass\n else:\n LOGGER.info(f'Установлено соедение с ПК {client_address}')\n self.clients.append(client)\n\n recv_data_lst = []\n send_data_lst = []\n err_lst = []\n # Проверяем на наличие ждущих клиентов\n try:\n if self.clients:\n recv_data_lst, send_data_lst, err_lst = select.select(self.clients, self.clients, [], 0)\n except OSError as err:\n LOGGER.error(f'Ошибка работы с сокетами: {err}')\n\n # принимаем сообщения и если ошибка, исключаем клиента.\n if recv_data_lst:\n for client_with_message in recv_data_lst:\n try:\n self.process_client_message(get_message(client_with_message), client_with_message)\n except (OSError):\n # Ищем клиента в словаре клиентов и удаляем его из него и базы подключённых\n LOGGER.info(f'Клиент {client_with_message.getpeername()} отключился от сервера.')\n for name in self.names:\n if self.names[name] == client_with_message:\n self.database.user_logout(name)\n del self.names[name]\n break\n self.clients.remove(client_with_message)\n with conflag_lock:\n new_connection = True\n\n # Если есть сообщения, обрабатываем каждое.\n for message in self.messages:\n try:\n self.process_message(message, send_data_lst)\n except (ConnectionAbortedError, ConnectionError, ConnectionResetError, ConnectionRefusedError):\n LOGGER.info(f'Связь с клиентом с именем {message[DESTINATION]} была потеряна')\n self.clients.remove(self.names[message[DESTINATION]])\n self.database.user_logout(message[DESTINATION])\n del self.names[message[DESTINATION]]\n with conflag_lock:\n new_connection = True\n self.messages.clear()\n\n # Функция адресной отправки сообщения определённому клиенту. Принимает словарь сообщение, список зарегистрированых\n # пользователей и слушающие сокеты. Ничего не возвращает.\n def process_message(self, message, listen_socks):\n if message[DESTINATION] in self.names and self.names[message[DESTINATION]] in listen_socks:\n send_message(self.names[message[DESTINATION]], message)\n LOGGER.info(f'Отправлено сообщение пользователю {message[DESTINATION]} от пользователя {message[SENDER]}.')\n elif message[DESTINATION] in self.names and self.names[message[DESTINATION]] not in listen_socks:\n raise ConnectionError\n else:\n LOGGER.error(\n f'Пользователь {message[DESTINATION]} не зарегистрирован на сервере, отправка сообщения невозможна.')\n\n # Обработчик сообщений от клиентов, принимает словарь - сообщение от клиента, проверяет корректность, отправляет\n # словарь-ответ в случае необходимости.\n def process_client_message(self, message, client):\n global new_connection\n LOGGER.debug(f'Разбор сообщения от клиента : {message}')\n\n # Если это сообщение о присутствии, принимаем и отвечаем\n if ACTION in message and message[ACTION] == PRESENCE and TIME in message and USER in message:\n # Если такой пользователь ещё не зарегистрирован, регистрируем, иначе отправляем ответ и завершаем соединение.\n if message[USER][ACCOUNT_NAME] not in self.names.keys():\n self.names[message[USER][ACCOUNT_NAME]] = client\n client_ip, client_port = client.getpeername()\n self.database.user_login(message[USER][ACCOUNT_NAME], client_ip, client_port)\n send_message(client, RESPONSE_200)\n with conflag_lock:\n new_connection = True\n else:\n response = RESPONSE_400\n response[ERROR] = 'Имя пользователя уже занято.'\n send_message(client, response)\n self.clients.remove(client)\n client.close()\n return\n\n # Если это сообщение, то добавляем его в очередь сообщений. проверяем наличие в сети. и отвечаем.\n elif ACTION in message and message[ACTION] == MESSAGE and DESTINATION in message and TIME in message \\\n and SENDER in message and MESSAGE_TEXT in message and self.names[message[SENDER]] == client:\n if message[DESTINATION] in self.names:\n self.messages.append(message)\n self.database.process_message(message[SENDER], message[DESTINATION])\n send_message(client, RESPONSE_200)\n else:\n response = RESPONSE_400\n response[ERROR] = 'Пользователь не зарегистрирован на сервере.'\n send_message(client, response)\n return\n\n # Если клиент выходит\n elif ACTION in message and message[ACTION] == EXIT and ACCOUNT_NAME in message \\\n and self.names[message[ACCOUNT_NAME]] == client:\n self.database.user_logout(message[ACCOUNT_NAME])\n LOGGER.info(f'Клиент {message[ACCOUNT_NAME]} корректно отключился от сервера.')\n self.clients.remove(self.names[message[ACCOUNT_NAME]])\n self.names[message[ACCOUNT_NAME]].close()\n del self.names[message[ACCOUNT_NAME]]\n with conflag_lock:\n new_connection = True\n return\n\n # Если это запрос контакт-листа\n elif ACTION in message and message[ACTION] == GET_CONTACTS and USER in message and \\\n self.names[message[USER]] == client:\n response = RESPONSE_202\n response[LIST_INFO] = self.database.get_contacts(message[USER])\n send_message(client, response)\n\n # Если это добавление контакта\n elif ACTION in message and message[ACTION] == ADD_CONTACT and ACCOUNT_NAME in message and USER in message \\\n and self.names[message[USER]] == client:\n self.database.add_contact(message[USER], message[ACCOUNT_NAME])\n send_message(client, RESPONSE_200)\n\n # Если это удаление контакта\n elif ACTION in message and message[ACTION] == REMOVE_CONTACT and ACCOUNT_NAME in message and USER in message \\\n and self.names[message[USER]] == client:\n self.database.remove_contact(message[USER], message[ACCOUNT_NAME])\n send_message(client, RESPONSE_200)\n\n # Если это запрос известных пользователей\n elif ACTION in message and message[ACTION] == USERS_REQUEST and ACCOUNT_NAME in message \\\n and self.names[message[ACCOUNT_NAME]] == client:\n response = RESPONSE_202\n response[LIST_INFO] = [user[0] for user in self.database.users_list()]\n send_message(client, response)\n\n # Иначе отдаём Bad request\n else:\n response = RESPONSE_400\n response[ERROR] = 'Запрос некорректен.'\n send_message(client, response)\n return\n\n\n# Загрузка файла конфигурации\ndef config_load():\n config = configparser.ConfigParser()\n dir_path = os.path.dirname(os.path.realpath(__file__))\n config.read(f\"{dir_path}/{'server.ini'}\")\n # Если конфиг файл загружен правильно, запускаемся, иначе конфиг по умолчанию.\n if 'SETTINGS' in config:\n return config\n else:\n config.add_section('SETTINGS')\n config.set('SETTINGS', 'Default_port', str(DEFAULT_PORT))\n config.set('SETTINGS', 'Listen_Address', '')\n config.set('SETTINGS', 'Database_path', '')\n config.set('SETTINGS', 'Database_file', 'server_database.db3')\n return config\n\n\ndef main():\n # Загрузка файла конфигурации сервера\n config = config_load()\n\n # Загрузка параметров командной строки, если нет параметров, то задаём значения по умоланию.\n listen_address, listen_port = createParser(config['SETTINGS']['Default_port'], config['SETTINGS']['Listen_Address'])\n\n # Инициализация базы данных\n database = ServerStorage(os.path.join(config['SETTINGS']['Database_path'], config['SETTINGS']['Database_file']))\n\n # Создание экземпляра класса - сервера и его запуск:\n server = Server(listen_address, listen_port, database)\n server.daemon = True\n server.start()\n\n # Создаём графическое окуружение для сервера:\n server_app = QApplication(sys.argv)\n main_window = MainWindow()\n\n # Инициализируем параметры в окна\n main_window.statusBar().showMessage('Server Working')\n main_window.active_clients_table.setModel(gui_create_model(database))\n main_window.active_clients_table.resizeColumnsToContents()\n main_window.active_clients_table.resizeRowsToContents()\n\n # Функция обновляющяя список подключённых, проверяет флаг подключения, и если надо обновляет список\n def list_update():\n global new_connection\n if new_connection:\n main_window.active_clients_table.setModel(gui_create_model(database))\n main_window.active_clients_table.resizeColumnsToContents()\n main_window.active_clients_table.resizeRowsToContents()\n with conflag_lock:\n new_connection = False\n\n # Функция создающяя окно со статистикой клиентов\n def show_statistics():\n global stat_window\n stat_window = HistoryWindow()\n stat_window.history_table.setModel(create_stat_model(database))\n stat_window.history_table.resizeColumnsToContents()\n stat_window.history_table.resizeRowsToContents()\n stat_window.show()\n\n # Функция создающяя окно с настройками сервера.\n def server_config():\n global config_window\n # Создаём окно и заносим в него текущие параметры\n config_window = ConfigWindow()\n config_window.db_path.insert(config['SETTINGS']['Database_path'])\n config_window.db_file.insert(config['SETTINGS']['Database_file'])\n config_window.port.insert(config['SETTINGS']['Default_port'])\n config_window.ip.insert(config['SETTINGS']['Listen_Address'])\n config_window.save_btn.clicked.connect(save_server_config)\n\n # Функция сохранения настроек\n def save_server_config():\n global config_window\n message = QMessageBox()\n config['SETTINGS']['Database_path'] = config_window.db_path.text()\n config['SETTINGS']['Database_file'] = config_window.db_file.text()\n try:\n port = int(config_window.port.text())\n except ValueError:\n message.warning(config_window, 'Ошибка', 'Порт должен быть числом')\n else:\n config['SETTINGS']['Listen_Address'] = config_window.ip.text()\n if 1023 < port < 65536:\n config['SETTINGS']['Default_port'] = str(port)\n dir_path = os.path.dirname(os.path.realpath(__file__))\n with open(f\"{dir_path}/{'server.ini'}\", 'w') as conf:\n config.write(conf)\n message.information(config_window, 'OK', 'Настройки успешно сохранены!')\n else:\n message.warning(config_window, 'Ошибка', 'Порт должен быть от 1024 до 65536')\n\n # Таймер, обновляющий список клиентов 1 раз в секунду\n timer = QTimer()\n timer.timeout.connect(list_update)\n timer.start(1000)\n\n # Связываем кнопки с процедурами\n main_window.refresh_button.triggered.connect(list_update)\n main_window.show_history_button.triggered.connect(show_statistics)\n main_window.config_btn.triggered.connect(server_config)\n\n # Запускаем GUI\n server_app.exec_()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"home_work_2_5/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":17303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"194945651","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-s\n\nimport unittest\n\nfrom Prototipo.Schedulers.SchedulerSJF import SchedulerSJFPreemptive\nfrom Prototipo.instructions import *\nfrom Prototipo.pcb import PCB\nfrom Prototipo.program import Program\n\n\nclass MyTestCase(unittest.TestCase):\n\n def setUp(self):\n #Se crea el pcb del primer programa\n self._program1 = Program(\"so.exe\", [CPU(2), CPU(1)], 1)\n self._PCBProgram1=PCB(0)\n self._PCBProgram1.initialize(self._program1, self._program1.longitud() // 4)\n\n # Se crea el pcb del segundo programa\n self._program2 = Program(\"exel.exe\", [CPU(1), IO(1)], 2)\n self._PCBProgram2 = PCB(1)\n self._PCBProgram2.initialize(self._program2, self._program2.longitud() // 4)\n\n # Se crea el pcb del tercer programa\n self._program3 = Program(\"paint.exe\", [CPU(4), IO(1)], 3)\n self._PCBProgram3 = PCB(2)\n self._PCBProgram3.initialize(self._program3, self._program3.longitud() // 4)\n\n # Se crea al schedulerSJF\n self._shedulerSJF = SchedulerSJFPreemptive()\n # Se agregan el pid, la prioridad(en este caso no se utiliza),\n # y el primer burst de los programas 2 y 3\n self._shedulerSJF.add(self._PCBProgram2)\n self._shedulerSJF.add(self._PCBProgram3)\n\n # Obtiene el primer burst del programa 1\n firstBurstProgram1 = self._PCBProgram1.get_firstBurst()\n # Setea el primer burst del programa que esta en la CPU\n self._shedulerSJF.set_burstPCBInCPU(firstBurstProgram1)\n\n def testSchedulerSJF(self):\n\n # Obtiene el primer burst del programa 2\n firstBurstProgram2 = self._PCBProgram2.get_firstBurst()\n # Se comprueba que el burst del programa 2 es menor que el programa que esta en CPU\n self.assertTrue(self._shedulerSJF.isChange(self._PCBProgram3, self._PCBProgram2))\n\n # El scheduler desencola un programa\n pid = self._shedulerSJF.pop()\n # Se comprueba que desencolo al pid del programa con menor burst\n self.assertEqual(pid,self._PCBProgram2.get_pid())\n\n # Setea el busrt del programa que ahora esta en CPU\n self._shedulerSJF.set_burstPCBInCPU(firstBurstProgram2)\n\n # Se comprueba que el burst del programa 3 es menor que el programa que esta en CPU\n self.assertFalse(self._shedulerSJF.isChange(self._PCBProgram2, self._PCBProgram3))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"src/TestShedulerSJF.py","file_name":"TestShedulerSJF.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"369641482","text":"import pygame\nimport sys\nimport time\nfrom random import randint\nfrom pygame.locals import *\n\npygame.init()\n# Set up the colors\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nSome = (0, 255, 0)\nGold = (255, 215, 0, 255)\n# Set the window\nwindowWidth = 800\nwindowHeight = 400\nwindowSurface = pygame.display.set_mode((windowWidth, windowHeight), 0, 32)\nsurface_rect = windowSurface.get_rect()\npygame.display.set_caption('Pong')\n\n# Fonts that will be used on screen\nfont = pygame.font.SysFont(\"comicsansms\", 30)\nbig_font = pygame.font.SysFont(\"comicsanms\", 70)\nsmall_font = pygame.font.SysFont(\"comicsansms\", 50)\n\n# For the key events later on\nup = False\ndown = False\nleft = False\nright = False\nhit_sound = pygame.mixer.Sound(\"sound.wav\")\nwin_sound = pygame.mixer.Sound(\"winning.wav\")\nlose_sound = pygame.mixer.Sound(\"losing.wav\")\npygame.mixer.music.load(\"Temple_Of_Time.mp3\")\npygame.mixer.music.play(-1, 0.0)\n\n\n# --------------------------------- Paddle ---------------------------------------------------------------------\nclass Paddle(pygame.sprite.Sprite):\n def __init__(self, player_type):\n pygame.sprite.Sprite.__init__(self)\n self.player_type = player_type\n self.image = pygame.image.load('ex.png') if self.player_type < 10 else pygame.image.load('ex2.png')\n self.rect = self.image.get_rect()\n self.speed = 8\n\n if self.player_type == 1:\n self.rect.centerx = windowSurface.get_rect().right\n self.rect.centerx -= 20\n self.rect.centery = windowSurface.get_rect().centery\n elif self.player_type == 11:\n self.rect.centery = windowSurface.get_rect().top\n self.rect.centery += 10\n self.rect.centerx = windowSurface.get_rect().right - windowSurface.get_rect().centerx / 2\n elif self.player_type == 12:\n self.rect.centery = windowSurface.get_rect().bottom\n self.rect.centery -= 10\n self.rect.centerx = windowSurface.get_rect().right - windowSurface.get_rect().centerx / 2\n elif self.player_type == 2:\n self.rect.centerx = windowSurface.get_rect().left\n self.rect.centerx += 20\n self.rect.centery = windowSurface.get_rect().centery\n elif self.player_type == 21:\n self.rect.centery = windowSurface.get_rect().top\n self.rect.centery += 10\n self.rect.centerx = windowSurface.get_rect().left + windowSurface.get_rect().centerx / 2\n elif self.player_type == 22:\n self.rect.centery = windowSurface.get_rect().bottom\n self.rect.centery -= 10\n self.rect.centerx = windowSurface.get_rect().left + windowSurface.get_rect().centerx / 2\n\n def move(self):\n if self.player_type == 1:\n if (up is True) and (self.rect.y > 5):\n self.rect.y -= self.speed\n elif (down is True) and (self.rect.bottom < windowHeight - 5):\n self.rect.y += self.speed\n elif (self.player_type == 11) or (self.player_type == 12):\n if (left is True) and (self.rect.x > windowSurface.get_rect().centerx):\n self.rect.x -= self.speed\n elif (right is True) and (self.rect.right < windowWidth - 20):\n self.rect.x += self.speed\n\n def cpu_movexl(self, pix):\n self.rect.x -= pix\n if self.rect.x < 0:\n self.rect.x = 0\n\n def cpu_movexr(self, pix):\n self.rect.x += pix\n if self.rect.x > 400:\n self.rect.x = 400\n\n def cpu_moveu(self, pixel):\n self.rect.y -= pixel\n if self.rect.y < 0:\n self.rect.y = 0\n\n def cpu_moved(self, pixel):\n self.rect.y += pixel\n if self.rect.y > 400:\n self.rect.y = 400\n\n\n# Create the paddles\npaddle1a = Paddle(1)\npaddle1b = Paddle(11)\npaddle1c = Paddle(12)\npaddle2a = Paddle(2)\npaddle2b = Paddle(21)\npaddle2c = Paddle(22)\n\n\n# --------------------------------- Ball ---------------------------------------------------------------------------\nclass Ball(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n\n self.image = pygame.image.load('final.png')\n self.rect = self.image.get_rect()\n self.rect.centerx = surface_rect.centerx\n self.rect.centery = surface_rect.centery\n self.velocity = [randint(3, 5), randint(-5, 5)]\n\n def move(self):\n self.rect.x += self.velocity[0]\n self.rect.y += self.velocity[1]\n\n\n# ---------------------------------------------------------------------------------------------------------------------\n# Create thr ball and render all items to screen\nball = Ball()\nall_sprites = pygame.sprite.RenderPlain(paddle1a, paddle1b, paddle1c, paddle2a, paddle2b, paddle2c, ball)\n\n# Scores for the players\nplayer_score = 0\ncpu_score = 0\nplayer_games = 0\ncpu_games = 0\n\n\n# Collisions\ndef hit():\n if pygame.sprite.collide_rect(ball, paddle1a) or pygame.sprite.collide_rect(ball, paddle2a):\n ball.velocity[0] = -ball.velocity[0]\n ball.velocity[1] = randint(-8, 8)\n hit_sound.play()\n elif pygame.sprite.collide_rect(ball, paddle1b) or pygame.sprite.collide_rect(ball, paddle2b):\n ball.velocity[0] = randint(4, 8)\n ball.velocity[1] = -ball.velocity[1]\n hit_sound.play()\n elif pygame.sprite.collide_rect(ball, paddle1c) or pygame.sprite.collide_rect(ball, paddle2c):\n ball.velocity[0] = randint(4, 8)\n ball.velocity[1] = -ball.velocity[1]\n hit_sound.play()\n\n\n# ------------------------------- Run the game loop ------------------------------------------------------------------\nwhile True:\n if ball.rect.x > windowWidth:\n ball.rect.centerx = surface_rect.centerx\n ball.rect.centery = surface_rect.centery\n ball.velocity = [randint(3, 6), randint(-5, 5)]\n cpu_score += 1\n elif ball.rect.x < 0:\n ball.rect.centerx = surface_rect.centerx\n ball.rect.centery = surface_rect.centery\n ball.velocity = [randint(3, 5), randint(-5, 5)]\n player_score += 1\n\n if player_score == 1:\n player_games += 1\n player_score = 0\n elif cpu_score == 11 and cpu_score >= player_score + 2:\n cpu_games += 1\n cpu_score = 0\n\n if player_games == 3:\n text = font.render(str(\"YOU WIN\"), 1, BLACK)\n windowSurface.blit(text, (300, 300))\n win_sound.play()\n elif cpu_games == 3:\n text = font.render(str(\"YOU LOSE\"), 1, BLACK)\n windowSurface.blit(text, (300, 300))\n win_sound.play()\n\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n if event.type == KEYDOWN:\n if event.key == K_UP:\n up = True\n down = False\n elif event.key == K_DOWN:\n up = False\n down = True\n elif event.key == K_RIGHT:\n right = True\n left = False\n elif event.key == K_LEFT:\n left = True\n right = False\n elif event.type == KEYUP:\n up = False\n down = False\n left = False\n right = False\n\n if ball.velocity[0] < 0 and ball.velocity[1] > 0:\n paddle2a.cpu_moved(10)\n if ball.velocity[0] < 0 and ball.velocity[1] < 0:\n paddle2a.cpu_moveu(10)\n if ball.velocity[0] < 0 and ball.velocity[1] > 0:\n paddle2b.cpu_movexr(10)\n paddle2c.cpu_movexr(10)\n if ball.velocity[0] < 0 and ball.velocity[1] < 0:\n paddle2b.cpu_movexl(10)\n paddle2c.cpu_movexl(10)\n\n# Display the scores on the board\n score = font.render(str(cpu_score) + \" \" + str(player_score), True, WHITE, Some)\n score_rect = score.get_rect()\n score_rect.centerx = surface_rect.centerx\n score_rect.y = 10\n games = font.render(str(cpu_games) + \" \" + str(player_games), True, WHITE, Some)\n games_rect = games.get_rect()\n games_rect.centerx = surface_rect.centerx\n games_rect.y = 50\n\n windowSurface.fill(WHITE)\n windowSurface.blit(score, score_rect), windowSurface.blit(games, games_rect)\n\n# Draws the net down the middle\n net_center = surface_rect.centerx\n\n net_rect0 = pygame.Rect(net_center, 0, 5, 5)\n net_rect1 = pygame.Rect(net_center, 60, 5, 5)\n net_rect2 = pygame.Rect(net_center, 120, 5, 5)\n net_rect3 = pygame.Rect(net_center, 180, 5, 5)\n net_rect4 = pygame.Rect(net_center, 240, 5, 5)\n net_rect5 = pygame.Rect(net_center, 300, 5, 5)\n net_rect6 = pygame.Rect(net_center, 360, 5, 5)\n net_rect7 = pygame.Rect(net_center, 395, 5, 5)\n\n pygame.draw.rect(windowSurface, Gold, (net_rect0.left, net_rect0.top, net_rect0.width, net_rect0.height))\n pygame.draw.rect(windowSurface, Gold, (net_rect1.left, net_rect1.top, net_rect1.width, net_rect1.height))\n pygame.draw.rect(windowSurface, Gold, (net_rect2.left, net_rect2.top, net_rect2.width, net_rect2.height))\n pygame.draw.rect(windowSurface, Gold, (net_rect3.left, net_rect3.top, net_rect3.width, net_rect3.height))\n pygame.draw.rect(windowSurface, Gold, (net_rect4.left, net_rect4.top, net_rect4.width, net_rect4.height))\n pygame.draw.rect(windowSurface, Gold, (net_rect5.left, net_rect5.top, net_rect5.width, net_rect5.height))\n pygame.draw.rect(windowSurface, Gold, (net_rect6.left, net_rect6.top, net_rect6.width, net_rect6.height))\n\n all_sprites.draw(windowSurface)\n paddle1a.move(), paddle1b.move(), paddle1c.move()\n ball.move()\n hit()\n\n pygame.display.update()\n time.sleep(0.02)\n","sub_path":"pong_game.py","file_name":"pong_game.py","file_ext":"py","file_size_in_byte":9611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"120652228","text":"from selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom bs4 import BeautifulSoup\nfrom time import sleep\nfrom tkinter import *\nfrom tkinter.scrolledtext import ScrolledText\nimport sqlite3\n\nbrowser = webdriver.Chrome()\n# 指定最长等待时间为7s,超过则抛出异常\nwait = WebDriverWait(browser, 7)\n# 创建数据库\ntext_db = sqlite3.connect('D:/text.db')\n#创建表的SQL语句\n#sqlstr = \"create table tx(page varchar(5) primary key, text varchar(3000))\"\n# 执行SQL语句\n#text_db.execute(sqlstr)\n\n\n\n\ndef index_page(url,page):\n \"\"\"\n 提取文本信息\n \"\"\"\n try:\n # 要爬取的网页地址\n browser.get(url)\n # 将网页移动到继续阅读的按钮附近,保证可以点击到按钮\n page_move = browser.find_elements_by_css_selector(\"#html-reader-go-more > div.banner-core-wrap.super-vip\")\n browser.execute_script('arguments[0].scrollIntoView();', page_move[-1]) # 拖动到可见的元素去\n # 点击继续阅读,展开全部文本\n continue_read = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#html-reader-go-more div.banner-more-btn > span.moreBtn.goBtn')))\n continue_read.click()\n # 展开后等待3秒\n sleep(3)\n get_text(page)\n except TimeoutException:\n index_page(url,page)\n\n\ndef get_text(page):\n \"\"\"\n 提取节点内的文字\n \"\"\"\n # 网页源代码\n html = browser.page_source\n # 初始化BeautifulSoup对象\n soup = BeautifulSoup(html, 'lxml')\n # 移动页面\n try:\n move = browser.find_elements_by_css_selector('#pageNo-'+str(page))\n browser.execute_script('arguments[0].scrollIntoView();', move[-1]) # 拖动到可见的元素去\n #捕捉页码异常\n except IndexError:\n print('\\n' + \"未找到第\"+str(page)+\"页……\")\n return False\n # 等待3秒\n sleep(3)\n #s用来储存每一页的文字\n s=''\n # 查找结点\n for div in soup.select('#reader-container-inner-1'):\n #每个div2代表一页\n for div2 in div.select('#pageNo-'+str(page)):\n for ie in div2.find_all(class_='ie-fix'):\n for p in ie.find_all(name='p'):\n p.string.replace('\\n', '')\n p.string.replace('(', '\\(')\n p.string.replace(')', '\\)')\n p.string.replace(\"'\", \"\\'\")\n p.string.replace('\"','\\\"')\n s+=p.string\n #contents.insert(END, p.string.replace('\\n', ''))\n #contents.insert(END, str(page) + ':\\n' + '=' * 20)\n save_to_db(page,s)\n\n\ndef save_to_db(page,result):\n \"\"\"\n 保存至sqliteDB\n \"\"\"\n\n try:\n cur = text_db.cursor()\n sqlstr = \"INSERT INTO tx(page,text) VALUES(\\'\"+str(page)+\"\\',\\'\"+result+\"\\')\"\n cur.execute(sqlstr)\n text_db.commit()\n print(\"数据录入完成\")\n except Exception as e:\n print(e)\n print('录入失败')\n\ndef go(url):\n cur = text_db.cursor()\n cur.execute(\"DELETE FROM tx where 1 = 1\")\n text_db.commit()\n print(\"数据删除完毕\")\n for i in range(1, MAX_PAGE + 1):\n index_page(url, i)\n\ndef clear(contents):\n contents.delete('1.0', END)\n browser.close()\n\nMAX_PAGE=10\ndef main():\n # 创建窗体\n top = Tk()\n # 标题\n top.title(\"百度文库文本获取\")\n # 窗口大小\n top.geometry('750x500+500+200')\n # 多行文本框\n contents = ScrolledText()\n contents.pack(side=BOTTOM, expand=True, fill=BOTH)\n # 单行文本框\n url_text = Entry()\n url_text.pack(side=LEFT, expand=True, fill=X)\n url_text.insert(0, \"<请将网址填写到此处>\")\n # 三个按钮\n Button(text='爬取数据', command=lambda: go(url_text.get())).pack(side=LEFT)\n #Button(text='显示文本', command=lambda: getText()).pack(side=LEFT)\n Button(text='清空文本', command=lambda: clear(contents)).pack(side=LEFT)\n\n mainloop()\n \n\n\n\n\nif __name__ == '__main__':\n main()\n\n\n\n","sub_path":"Baidu_wenku_12_17.py","file_name":"Baidu_wenku_12_17.py","file_ext":"py","file_size_in_byte":4210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"647898693","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n# @Project : Elevator Dispatch\n#\n# @Purpose : Assignment of Operaint System\n#\n# @Time : 05/08/2019\n#\n# @Author : Feifan Wang\n#\n# @Student ID: 1751694\n#\n# @Contact : rootReturn0@outlook.com\n#\n# @Filename : Interface.py\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass mySlider (QtWidgets.QSlider):\n ''' custermize class QSlider '''\n\n def __init__(self, parent=None):\n super(mySlider, self).__init__(parent)\n\n def mousePressEvent(self, e): # disable mouse event\n if e.buttons():\n return\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.setWindowTitle(\"Elevator Simulator\")\n MainWindow.resize(800, 612)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.outsideWidget = QtWidgets.QWidget(self.centralwidget)\n self.outsideWidget.setGeometry(QtCore.QRect(10, 0, 121, 640))\n self.outsideWidget.setObjectName(\"outsideWidget\")\n self.label_6 = QtWidgets.QLabel(self.outsideWidget)\n self.label_6.setGeometry(QtCore.QRect(20, 532, 16, 16))\n self.label_6.setObjectName(\"label_6\")\n self.label_12 = QtWidgets.QLabel(self.outsideWidget)\n self.label_12.setGeometry(QtCore.QRect(17, 272, 16, 16))\n self.label_12.setObjectName(\"label_12\")\n self.label = QtWidgets.QLabel(self.outsideWidget)\n self.label.setGeometry(QtCore.QRect(10, 8, 60, 19))\n self.label.setObjectName(\"label\")\n self.label_10 = QtWidgets.QLabel(self.outsideWidget)\n self.label_10.setGeometry(QtCore.QRect(19, 324, 16, 16))\n self.label_10.setObjectName(\"label_10\")\n self.label_19 = QtWidgets.QLabel(self.outsideWidget)\n self.label_19.setGeometry(QtCore.QRect(16, 220, 16, 16))\n self.label_19.setObjectName(\"label_19\")\n self.label_8 = QtWidgets.QLabel(self.outsideWidget)\n self.label_8.setGeometry(QtCore.QRect(19, 480, 16, 16))\n self.label_8.setObjectName(\"label_8\")\n self.label_13 = QtWidgets.QLabel(self.outsideWidget)\n self.label_13.setGeometry(QtCore.QRect(16, 246, 16, 16))\n self.label_13.setObjectName(\"label_13\")\n self.label_18 = QtWidgets.QLabel(self.outsideWidget)\n self.label_18.setGeometry(QtCore.QRect(16, 142, 16, 16))\n self.label_18.setObjectName(\"label_18\")\n self.label_9 = QtWidgets.QLabel(self.outsideWidget)\n self.label_9.setGeometry(QtCore.QRect(19, 350, 16, 16))\n self.label_9.setObjectName(\"label_9\")\n self.label_17 = QtWidgets.QLabel(self.outsideWidget)\n self.label_17.setGeometry(QtCore.QRect(16, 116, 16, 16))\n self.label_17.setObjectName(\"label_17\")\n self.label_5 = QtWidgets.QLabel(self.outsideWidget)\n self.label_5.setGeometry(QtCore.QRect(19, 454, 16, 16))\n self.label_5.setObjectName(\"label_5\")\n self.label_21 = QtWidgets.QLabel(self.outsideWidget)\n self.label_21.setGeometry(QtCore.QRect(16, 169, 16, 16))\n self.label_21.setObjectName(\"label_21\")\n self.label_3 = QtWidgets.QLabel(self.outsideWidget)\n self.label_3.setGeometry(QtCore.QRect(19, 402, 16, 16))\n self.label_3.setObjectName(\"label_3\")\n self.label_20 = QtWidgets.QLabel(self.outsideWidget)\n self.label_20.setGeometry(QtCore.QRect(16, 194, 16, 16))\n self.label_20.setObjectName(\"label_20\")\n self.label_11 = QtWidgets.QLabel(self.outsideWidget)\n self.label_11.setGeometry(QtCore.QRect(16, 298, 16, 16))\n self.label_11.setObjectName(\"label_11\")\n self.label_16 = QtWidgets.QLabel(self.outsideWidget)\n self.label_16.setGeometry(QtCore.QRect(16, 90, 16, 16))\n self.label_16.setObjectName(\"label_16\")\n self.label_7 = QtWidgets.QLabel(self.outsideWidget)\n self.label_7.setGeometry(QtCore.QRect(19, 506, 16, 16))\n self.label_7.setObjectName(\"label_7\")\n self.label_15 = QtWidgets.QLabel(self.outsideWidget)\n self.label_15.setGeometry(QtCore.QRect(16, 64, 16, 16))\n self.label_15.setObjectName(\"label_15\")\n self.label_14 = QtWidgets.QLabel(self.outsideWidget)\n self.label_14.setGeometry(QtCore.QRect(15, 38, 16, 16))\n self.label_14.setObjectName(\"label_14\")\n self.label_2 = QtWidgets.QLabel(self.outsideWidget)\n self.label_2.setGeometry(QtCore.QRect(19, 376, 16, 16))\n self.label_2.setObjectName(\"label_2\")\n self.label_4 = QtWidgets.QLabel(self.outsideWidget)\n self.label_4.setGeometry(QtCore.QRect(19, 428, 16, 16))\n self.label_4.setObjectName(\"label_4\")\n self.label_refresh = QtWidgets.QLabel(self.outsideWidget)\n self.label_refresh.setGeometry(QtCore.QRect(28, 555, 60, 16))\n self.label_refresh.setStyleSheet(\"color:rgb(78,160,107)\")\n self.label_refresh.setObjectName(\"label_refresh\")\n self.pushButton_refresh = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_refresh.setGeometry(QtCore.QRect(85, 548, 30, 30))\n self.pushButton_refresh.setStyleSheet(\"color:rgb(173,233,211)\")\n self.pushButton_refresh.setObjectName(\"pushButton_refresh\")\n self.pushButton_2d = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_2d.setGeometry(QtCore.QRect(80, 500, 41, 26))\n self.pushButton_2d.setObjectName(\"pushButton_2d\")\n self.pushButton_11d = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_11d.setGeometry(QtCore.QRect(80, 266, 41, 26))\n self.pushButton_11d.setObjectName(\"pushButton_11d\")\n self.pushButton_19d = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_19d.setGeometry(QtCore.QRect(80, 58, 41, 26))\n self.pushButton_19d.setObjectName(\"pushButton_19d\")\n self.pushButton_7u = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_7u.setGeometry(QtCore.QRect(40, 370, 41, 26))\n self.pushButton_7u.setObjectName(\"pushButton_7u\")\n self.pushButton_14u = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_14u.setGeometry(QtCore.QRect(40, 188, 41, 26))\n self.pushButton_14u.setObjectName(\"pushButton_14u\")\n self.pushButton_15u = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_15u.setGeometry(QtCore.QRect(40, 162, 41, 26))\n self.pushButton_15u.setObjectName(\"pushButton_15u\")\n self.pushButton_14d = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_14d.setGeometry(QtCore.QRect(80, 188, 41, 26))\n self.pushButton_14d.setObjectName(\"pushButton_14d\")\n self.pushButton_2u = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_2u.setGeometry(QtCore.QRect(40, 500, 41, 26))\n self.pushButton_2u.setObjectName(\"pushButton_2u\")\n self.pushButton_6d = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_6d.setGeometry(QtCore.QRect(80, 396, 41, 26))\n self.pushButton_6d.setObjectName(\"pushButton_6d\")\n self.pushButton_13u = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_13u.setGeometry(QtCore.QRect(40, 214, 41, 26))\n self.pushButton_13u.setObjectName(\"pushButton_13u\")\n self.pushButton_17d = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_17d.setGeometry(QtCore.QRect(80, 110, 41, 26))\n self.pushButton_17d.setObjectName(\"pushButton_17d\")\n self.pushButton_11u = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_11u.setGeometry(QtCore.QRect(40, 266, 41, 26))\n self.pushButton_11u.setObjectName(\"pushButton_11u\")\n self.pushButton_4d = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_4d.setGeometry(QtCore.QRect(80, 448, 41, 26))\n self.pushButton_4d.setObjectName(\"pushButton_4d\")\n self.pushButton_18u = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_18u.setGeometry(QtCore.QRect(40, 84, 41, 26))\n self.pushButton_18u.setObjectName(\"pushButton_18u\")\n self.pushButton_15d = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_15d.setGeometry(QtCore.QRect(80, 162, 41, 26))\n self.pushButton_15d.setObjectName(\"pushButton_15d\")\n self.pushButton_16d = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_16d.setGeometry(QtCore.QRect(80, 136, 41, 26))\n self.pushButton_16d.setObjectName(\"pushButton_16d\")\n self.pushButton_8d = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_8d.setGeometry(QtCore.QRect(80, 344, 41, 26))\n self.pushButton_8d.setObjectName(\"pushButton_8d\")\n self.pushButton_5d = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_5d.setGeometry(QtCore.QRect(80, 422, 41, 26))\n self.pushButton_5d.setObjectName(\"pushButton_5d\")\n self.pushButton_19u = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_19u.setGeometry(QtCore.QRect(40, 58, 41, 26))\n self.pushButton_19u.setObjectName(\"pushButton_19u\")\n self.pushButton_17u = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_17u.setGeometry(QtCore.QRect(40, 110, 41, 26))\n self.pushButton_17u.setObjectName(\"pushButton_17u\")\n self.pushButton_20d = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_20d.setGeometry(QtCore.QRect(80, 32, 41, 26))\n self.pushButton_20d.setObjectName(\"pushButton_20d\")\n self.pushButton_10u = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_10u.setGeometry(QtCore.QRect(40, 292, 41, 26))\n self.pushButton_10u.setObjectName(\"pushButton_10u\")\n self.pushButton_18d = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_18d.setGeometry(QtCore.QRect(80, 84, 41, 26))\n self.pushButton_18d.setObjectName(\"pushButton_18d\")\n self.pushButton_3u = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_3u.setGeometry(QtCore.QRect(40, 474, 41, 26))\n self.pushButton_3u.setObjectName(\"pushButton_3u\")\n self.pushButton_8u = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_8u.setGeometry(QtCore.QRect(40, 344, 41, 26))\n self.pushButton_8u.setObjectName(\"pushButton_8u\")\n self.pushButton_5u = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_5u.setGeometry(QtCore.QRect(40, 422, 41, 26))\n self.pushButton_5u.setObjectName(\"pushButton_5u\")\n self.pushButton_3d = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_3d.setGeometry(QtCore.QRect(80, 474, 41, 26))\n self.pushButton_3d.setObjectName(\"pushButton_3d\")\n self.pushButton_12d = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_12d.setGeometry(QtCore.QRect(80, 240, 41, 26))\n self.pushButton_12d.setObjectName(\"pushButton_12d\")\n self.pushButton_16u = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_16u.setGeometry(QtCore.QRect(40, 136, 41, 26))\n self.pushButton_16u.setObjectName(\"pushButton_16u\")\n self.pushButton_7d = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_7d.setGeometry(QtCore.QRect(80, 370, 41, 26))\n self.pushButton_7d.setObjectName(\"pushButton_7d\")\n self.pushButton_1u = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_1u.setGeometry(QtCore.QRect(40, 526, 41, 26))\n self.pushButton_1u.setObjectName(\"pushButton_1u\")\n self.pushButton_12u = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_12u.setGeometry(QtCore.QRect(40, 240, 41, 26))\n self.pushButton_12u.setObjectName(\"pushButton_12u\")\n self.pushButton_6u = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_6u.setGeometry(QtCore.QRect(40, 396, 41, 26))\n self.pushButton_6u.setObjectName(\"pushButton_6u\")\n self.pushButton_9u = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_9u.setGeometry(QtCore.QRect(40, 318, 41, 26))\n self.pushButton_9u.setObjectName(\"pushButton_9u\")\n self.pushButton_4u = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_4u.setGeometry(QtCore.QRect(40, 448, 41, 26))\n self.pushButton_4u.setObjectName(\"pushButton_4u\")\n self.pushButton_9d = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_9d.setGeometry(QtCore.QRect(80, 318, 41, 26))\n self.pushButton_9d.setObjectName(\"pushButton_9d\")\n self.pushButton_10d = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_10d.setGeometry(QtCore.QRect(80, 292, 41, 26))\n self.pushButton_10d.setObjectName(\"pushButton_10d\")\n self.pushButton_13d = QtWidgets.QPushButton(self.outsideWidget)\n self.pushButton_13d.setGeometry(QtCore.QRect(80, 214, 41, 26))\n self.pushButton_13d.setObjectName(\"pushButton_13d\")\n self.widget_elevator1 = QtWidgets.QWidget(self.centralwidget)\n self.widget_elevator1.setGeometry(QtCore.QRect(130, -10, 140, 640))\n self.widget_elevator1.setObjectName(\"widget_elevator1\")\n self.elevator1_5 = QtWidgets.QPushButton(self.widget_elevator1)\n self.elevator1_5.setGeometry(QtCore.QRect(10, 430, 51, 25))\n self.elevator1_5.setObjectName(\"elevator1_5\")\n self.elevator1_4 = QtWidgets.QPushButton(self.widget_elevator1)\n self.elevator1_4.setGeometry(QtCore.QRect(10, 456, 51, 25))\n self.elevator1_4.setObjectName(\"elevator1_4\")\n self.elevator1_2 = QtWidgets.QPushButton(self.widget_elevator1)\n self.elevator1_2.setGeometry(QtCore.QRect(10, 508, 51, 25))\n self.elevator1_2.setObjectName(\"elevator1_2\")\n self.elevator1_15 = QtWidgets.QPushButton(self.widget_elevator1)\n self.elevator1_15.setGeometry(QtCore.QRect(10, 170, 51, 25))\n self.elevator1_15.setObjectName(\"elevator1_15\")\n self.elevator1_13 = QtWidgets.QPushButton(self.widget_elevator1)\n self.elevator1_13.setGeometry(QtCore.QRect(10, 222, 51, 25))\n self.elevator1_13.setObjectName(\"elevator1_13\")\n self.elevator1_19 = QtWidgets.QPushButton(self.widget_elevator1)\n self.elevator1_19.setGeometry(QtCore.QRect(10, 66, 51, 25))\n self.elevator1_19.setObjectName(\"elevator1_19\")\n self.elevator1_16 = QtWidgets.QPushButton(self.widget_elevator1)\n self.elevator1_16.setGeometry(QtCore.QRect(10, 144, 51, 25))\n self.elevator1_16.setObjectName(\"elevator1_16\")\n self.elevator1_17 = QtWidgets.QPushButton(self.widget_elevator1)\n self.elevator1_17.setGeometry(QtCore.QRect(10, 118, 51, 25))\n self.elevator1_17.setObjectName(\"elevator1_17\")\n self.elevator1_7 = QtWidgets.QPushButton(self.widget_elevator1)\n self.elevator1_7.setGeometry(QtCore.QRect(10, 378, 51, 25))\n self.elevator1_7.setObjectName(\"elevator1_7\")\n self.elevator1_12 = QtWidgets.QPushButton(self.widget_elevator1)\n self.elevator1_12.setGeometry(QtCore.QRect(10, 248, 51, 25))\n self.elevator1_12.setObjectName(\"elevator1_12\")\n self.elevator1_1 = QtWidgets.QPushButton(self.widget_elevator1)\n self.elevator1_1.setGeometry(QtCore.QRect(10, 534, 51, 25))\n self.elevator1_1.setObjectName(\"elevator1_1\")\n self.elevator1_3 = QtWidgets.QPushButton(self.widget_elevator1)\n self.elevator1_3.setGeometry(QtCore.QRect(10, 482, 51, 25))\n self.elevator1_3.setObjectName(\"elevator1_3\")\n self.elevator1_9 = QtWidgets.QPushButton(self.widget_elevator1)\n self.elevator1_9.setGeometry(QtCore.QRect(10, 326, 51, 25))\n self.elevator1_9.setObjectName(\"elevator1_9\")\n self.elevator1_8 = QtWidgets.QPushButton(self.widget_elevator1)\n self.elevator1_8.setGeometry(QtCore.QRect(10, 352, 51, 25))\n self.elevator1_8.setObjectName(\"elevator1_8\")\n self.elevator1_14 = QtWidgets.QPushButton(self.widget_elevator1)\n self.elevator1_14.setGeometry(QtCore.QRect(10, 196, 51, 25))\n self.elevator1_14.setObjectName(\"elevator1_14\")\n self.elevator1_10 = QtWidgets.QPushButton(self.widget_elevator1)\n self.elevator1_10.setGeometry(QtCore.QRect(10, 300, 51, 25))\n self.elevator1_10.setObjectName(\"elevator1_10\")\n self.elevator1_6 = QtWidgets.QPushButton(self.widget_elevator1)\n self.elevator1_6.setGeometry(QtCore.QRect(10, 404, 51, 25))\n self.elevator1_6.setObjectName(\"elevator1_6\")\n self.elevator1_11 = QtWidgets.QPushButton(self.widget_elevator1)\n self.elevator1_11.setGeometry(QtCore.QRect(10, 274, 51, 25))\n self.elevator1_11.setObjectName(\"elevator1_11\")\n self.elevator1_18 = QtWidgets.QPushButton(self.widget_elevator1)\n self.elevator1_18.setGeometry(QtCore.QRect(10, 92, 51, 25))\n self.elevator1_18.setObjectName(\"elevator1_18\")\n self.elevator1_20 = QtWidgets.QPushButton(self.widget_elevator1)\n self.elevator1_20.setGeometry(QtCore.QRect(10, 40, 51, 25))\n self.elevator1_20.setObjectName(\"elevator1_20\")\n self.ID_e1 = QtWidgets.QLabel(self.widget_elevator1)\n self.ID_e1.setAlignment(QtCore.Qt.AlignCenter)\n self.ID_e1.setGeometry(QtCore.QRect(6, 16, 60, 22))\n self.ID_e1.setObjectName(\"ID_e1\")\n self.status_e1_closed = QtWidgets.QLabel(self.widget_elevator1)\n self.status_e1_closed.setAlignment(QtCore.Qt.AlignCenter)\n self.status_e1_closed.setGeometry(QtCore.QRect(73, 564, 60, 22))\n self.status_e1_closed.setStyleSheet(\"color: rgb(223,129,113);\")\n self.status_e1_closed.setObjectName(\"status_e1_closed\")\n self.status_e1_open = QtWidgets.QLabel(self.widget_elevator1)\n self.status_e1_open.setAlignment(QtCore.Qt.AlignCenter)\n self.status_e1_open.setGeometry(QtCore.QRect(73, 564, 60, 22))\n self.status_e1_open.setStyleSheet(\"color: rgb(99,192,135);\")\n self.status_e1_open.setObjectName(\"status_e1_open\")\n self.status_e1_running = QtWidgets.QLabel(self.widget_elevator1)\n self.status_e1_running.setAlignment(QtCore.Qt.AlignCenter)\n self.status_e1_running.setGeometry(QtCore.QRect(73, 564, 60, 22))\n self.status_e1_running.setStyleSheet(\"color: rgb(206,180,139);\")\n self.status_e1_running.setObjectName(\"status_e1_running\")\n self.floor_e1 = QtWidgets.QLabel(self.widget_elevator1)\n self.floor_e1.setAlignment(QtCore.Qt.AlignCenter)\n self.floor_e1.setGeometry(QtCore.QRect(73, 15, 55, 22))\n self.floor_e1.setStyleSheet(\"color: rgb(98, 153 ,183);\")\n self.floor_e1.setObjectName(\"floor_e1\")\n self.elevator1Slider = mySlider(self.widget_elevator1)\n self.elevator1Slider.setMinimum(0)\n self.elevator1Slider.setMaximum(190)\n self.elevator1Slider.setGeometry(QtCore.QRect(90, 40, 22, 520))\n self.elevator1Slider.setOrientation(QtCore.Qt.Vertical)\n self.elevator1Slider.setObjectName(\"elevator1Slider\")\n self.widget_elevator2 = QtWidgets.QWidget(self.centralwidget)\n self.widget_elevator2.setGeometry(QtCore.QRect(260, -10, 140, 640))\n self.widget_elevator2.setObjectName(\"widget_elevator2\")\n self.elevator2_5 = QtWidgets.QPushButton(self.widget_elevator2)\n self.elevator2_5.setGeometry(QtCore.QRect(10, 430, 51, 25))\n self.elevator2_5.setObjectName(\"elevator2_5\")\n self.elevator2_4 = QtWidgets.QPushButton(self.widget_elevator2)\n self.elevator2_4.setGeometry(QtCore.QRect(10, 456, 51, 25))\n self.elevator2_4.setObjectName(\"elevator2_4\")\n self.elevator2_2 = QtWidgets.QPushButton(self.widget_elevator2)\n self.elevator2_2.setGeometry(QtCore.QRect(10, 508, 51, 25))\n self.elevator2_2.setObjectName(\"elevator2_2\")\n self.elevator2_15 = QtWidgets.QPushButton(self.widget_elevator2)\n self.elevator2_15.setGeometry(QtCore.QRect(10, 170, 51, 25))\n self.elevator2_15.setObjectName(\"elevator2_15\")\n self.elevator2_13 = QtWidgets.QPushButton(self.widget_elevator2)\n self.elevator2_13.setGeometry(QtCore.QRect(10, 222, 51, 25))\n self.elevator2_13.setObjectName(\"elevator2_13\")\n self.elevator2_19 = QtWidgets.QPushButton(self.widget_elevator2)\n self.elevator2_19.setGeometry(QtCore.QRect(10, 66, 51, 25))\n self.elevator2_19.setObjectName(\"elevator2_19\")\n self.elevator2_16 = QtWidgets.QPushButton(self.widget_elevator2)\n self.elevator2_16.setGeometry(QtCore.QRect(10, 144, 51, 25))\n self.elevator2_16.setObjectName(\"elevator2_16\")\n self.elevator2_17 = QtWidgets.QPushButton(self.widget_elevator2)\n self.elevator2_17.setGeometry(QtCore.QRect(10, 118, 51, 25))\n self.elevator2_17.setObjectName(\"elevator2_17\")\n self.elevator2_7 = QtWidgets.QPushButton(self.widget_elevator2)\n self.elevator2_7.setGeometry(QtCore.QRect(10, 378, 51, 25))\n self.elevator2_7.setObjectName(\"elevator2_7\")\n self.elevator2_12 = QtWidgets.QPushButton(self.widget_elevator2)\n self.elevator2_12.setGeometry(QtCore.QRect(10, 248, 51, 25))\n self.elevator2_12.setObjectName(\"elevator2_12\")\n self.elevator2_1 = QtWidgets.QPushButton(self.widget_elevator2)\n self.elevator2_1.setGeometry(QtCore.QRect(10, 534, 51, 25))\n self.elevator2_1.setObjectName(\"elevator2_1\")\n self.elevator2_3 = QtWidgets.QPushButton(self.widget_elevator2)\n self.elevator2_3.setGeometry(QtCore.QRect(10, 482, 51, 25))\n self.elevator2_3.setObjectName(\"elevator2_3\")\n self.elevator2_9 = QtWidgets.QPushButton(self.widget_elevator2)\n self.elevator2_9.setGeometry(QtCore.QRect(10, 326, 51, 25))\n self.elevator2_9.setObjectName(\"elevator2_9\")\n self.elevator2_8 = QtWidgets.QPushButton(self.widget_elevator2)\n self.elevator2_8.setGeometry(QtCore.QRect(10, 352, 51, 25))\n self.elevator2_8.setObjectName(\"elevator2_8\")\n self.elevator2_14 = QtWidgets.QPushButton(self.widget_elevator2)\n self.elevator2_14.setGeometry(QtCore.QRect(10, 196, 51, 25))\n self.elevator2_14.setObjectName(\"elevator2_14\")\n self.elevator2_10 = QtWidgets.QPushButton(self.widget_elevator2)\n self.elevator2_10.setGeometry(QtCore.QRect(10, 300, 51, 25))\n self.elevator2_10.setObjectName(\"elevator2_10\")\n self.elevator2_6 = QtWidgets.QPushButton(self.widget_elevator2)\n self.elevator2_6.setGeometry(QtCore.QRect(10, 404, 51, 25))\n self.elevator2_6.setObjectName(\"elevator2_6\")\n self.elevator2_11 = QtWidgets.QPushButton(self.widget_elevator2)\n self.elevator2_11.setGeometry(QtCore.QRect(10, 274, 51, 25))\n self.elevator2_11.setObjectName(\"elevator2_11\")\n self.elevator2_18 = QtWidgets.QPushButton(self.widget_elevator2)\n self.elevator2_18.setGeometry(QtCore.QRect(10, 92, 51, 25))\n self.elevator2_18.setObjectName(\"elevator2_18\")\n self.elevator2_20 = QtWidgets.QPushButton(self.widget_elevator2)\n self.elevator2_20.setGeometry(QtCore.QRect(10, 40, 51, 25))\n self.elevator2_20.setObjectName(\"elevator2_20\")\n self.ID_e2 = QtWidgets.QLabel(self.widget_elevator2)\n self.ID_e2.setAlignment(QtCore.Qt.AlignCenter)\n self.ID_e2.setGeometry(QtCore.QRect(6, 16, 60, 22))\n self.ID_e2.setObjectName(\"ID_e2\")\n self.status_e2_closed = QtWidgets.QLabel(self.widget_elevator2)\n self.status_e2_closed.setAlignment(QtCore.Qt.AlignCenter)\n self.status_e2_closed.setGeometry(QtCore.QRect(73, 564, 60, 22))\n self.status_e2_closed.setStyleSheet(\"color: rgb(223,129,113);\")\n self.status_e2_closed.setObjectName(\"status_e2_closed\")\n self.status_e2_open = QtWidgets.QLabel(self.widget_elevator2)\n self.status_e2_open.setAlignment(QtCore.Qt.AlignCenter)\n self.status_e2_open.setGeometry(QtCore.QRect(73, 564, 60, 22))\n self.status_e2_open.setStyleSheet(\"color: rgb(99,192,135);\")\n self.status_e2_open.setObjectName(\"status_e2_open\")\n self.status_e2_running = QtWidgets.QLabel(self.widget_elevator2)\n self.status_e2_running.setAlignment(QtCore.Qt.AlignCenter)\n self.status_e2_running.setGeometry(QtCore.QRect(73, 564, 60, 22))\n self.status_e2_running.setStyleSheet(\"color: rgb(206,180,139);\")\n self.status_e2_running.setObjectName(\"status_e2_running\")\n self.floor_e2 = QtWidgets.QLabel(self.widget_elevator2)\n self.floor_e2.setAlignment(QtCore.Qt.AlignCenter)\n self.floor_e2.setGeometry(QtCore.QRect(73, 15, 55, 22))\n self.floor_e2.setStyleSheet(\"color: rgb(98, 153 ,183);\")\n self.floor_e2.setObjectName(\"floor_e2\")\n self.elevator2Slider = mySlider(self.widget_elevator2)\n self.elevator2Slider.setMinimum(0)\n self.elevator2Slider.setMaximum(190)\n self.elevator2Slider.setGeometry(QtCore.QRect(90, 40, 22, 520))\n self.elevator2Slider.setOrientation(QtCore.Qt.Vertical)\n self.elevator2Slider.setObjectName(\"elevator2Slider\")\n self.widget_elevator3 = QtWidgets.QWidget(self.centralwidget)\n self.widget_elevator3.setGeometry(QtCore.QRect(390, -10, 140, 640))\n self.widget_elevator3.setObjectName(\"widget_elevator3\")\n self.elevator3_5 = QtWidgets.QPushButton(self.widget_elevator3)\n self.elevator3_5.setGeometry(QtCore.QRect(10, 430, 51, 25))\n self.elevator3_5.setObjectName(\"elevator3_5\")\n self.elevator3_4 = QtWidgets.QPushButton(self.widget_elevator3)\n self.elevator3_4.setGeometry(QtCore.QRect(10, 456, 51, 25))\n self.elevator3_4.setObjectName(\"elevator3_4\")\n self.elevator3_2 = QtWidgets.QPushButton(self.widget_elevator3)\n self.elevator3_2.setGeometry(QtCore.QRect(10, 508, 51, 25))\n self.elevator3_2.setObjectName(\"elevator3_2\")\n self.elevator3_15 = QtWidgets.QPushButton(self.widget_elevator3)\n self.elevator3_15.setGeometry(QtCore.QRect(10, 170, 51, 25))\n self.elevator3_15.setObjectName(\"elevator3_15\")\n self.elevator3_13 = QtWidgets.QPushButton(self.widget_elevator3)\n self.elevator3_13.setGeometry(QtCore.QRect(10, 222, 51, 25))\n self.elevator3_13.setObjectName(\"elevator3_13\")\n self.elevator3_19 = QtWidgets.QPushButton(self.widget_elevator3)\n self.elevator3_19.setGeometry(QtCore.QRect(10, 66, 51, 25))\n self.elevator3_19.setObjectName(\"elevator3_19\")\n self.elevator3_16 = QtWidgets.QPushButton(self.widget_elevator3)\n self.elevator3_16.setGeometry(QtCore.QRect(10, 144, 51, 25))\n self.elevator3_16.setObjectName(\"elevator3_16\")\n self.elevator3_17 = QtWidgets.QPushButton(self.widget_elevator3)\n self.elevator3_17.setGeometry(QtCore.QRect(10, 118, 51, 25))\n self.elevator3_17.setObjectName(\"elevator3_17\")\n self.elevator3_7 = QtWidgets.QPushButton(self.widget_elevator3)\n self.elevator3_7.setGeometry(QtCore.QRect(10, 378, 51, 25))\n self.elevator3_7.setObjectName(\"elevator3_7\")\n self.elevator3_12 = QtWidgets.QPushButton(self.widget_elevator3)\n self.elevator3_12.setGeometry(QtCore.QRect(10, 248, 51, 25))\n self.elevator3_12.setObjectName(\"elevator3_12\")\n self.elevator3_1 = QtWidgets.QPushButton(self.widget_elevator3)\n self.elevator3_1.setGeometry(QtCore.QRect(10, 534, 51, 25))\n self.elevator3_1.setObjectName(\"elevator3_1\")\n self.elevator3_3 = QtWidgets.QPushButton(self.widget_elevator3)\n self.elevator3_3.setGeometry(QtCore.QRect(10, 482, 51, 25))\n self.elevator3_3.setObjectName(\"elevator3_3\")\n self.elevator3_9 = QtWidgets.QPushButton(self.widget_elevator3)\n self.elevator3_9.setGeometry(QtCore.QRect(10, 326, 51, 25))\n self.elevator3_9.setObjectName(\"elevator3_9\")\n self.elevator3_8 = QtWidgets.QPushButton(self.widget_elevator3)\n self.elevator3_8.setGeometry(QtCore.QRect(10, 352, 51, 25))\n self.elevator3_8.setObjectName(\"elevator3_8\")\n self.elevator3_14 = QtWidgets.QPushButton(self.widget_elevator3)\n self.elevator3_14.setGeometry(QtCore.QRect(10, 196, 51, 25))\n self.elevator3_14.setObjectName(\"elevator3_14\")\n self.elevator3_10 = QtWidgets.QPushButton(self.widget_elevator3)\n self.elevator3_10.setGeometry(QtCore.QRect(10, 300, 51, 25))\n self.elevator3_10.setObjectName(\"elevator3_10\")\n self.elevator3_6 = QtWidgets.QPushButton(self.widget_elevator3)\n self.elevator3_6.setGeometry(QtCore.QRect(10, 404, 51, 25))\n self.elevator3_6.setObjectName(\"elevator3_6\")\n self.elevator3_11 = QtWidgets.QPushButton(self.widget_elevator3)\n self.elevator3_11.setGeometry(QtCore.QRect(10, 274, 51, 25))\n self.elevator3_11.setObjectName(\"elevator3_11\")\n self.elevator3_18 = QtWidgets.QPushButton(self.widget_elevator3)\n self.elevator3_18.setGeometry(QtCore.QRect(10, 92, 51, 25))\n self.elevator3_18.setObjectName(\"elevator3_18\")\n self.elevator3_20 = QtWidgets.QPushButton(self.widget_elevator3)\n self.elevator3_20.setGeometry(QtCore.QRect(10, 40, 51, 25))\n self.elevator3_20.setObjectName(\"elevator3_20\")\n self.ID_e3 = QtWidgets.QLabel(self.widget_elevator3)\n self.ID_e3.setAlignment(QtCore.Qt.AlignCenter)\n self.ID_e3.setGeometry(QtCore.QRect(6, 16, 60, 22))\n self.ID_e3.setObjectName(\"ID_e3\")\n self.status_e3_closed = QtWidgets.QLabel(self.widget_elevator3)\n self.status_e3_closed.setAlignment(QtCore.Qt.AlignCenter)\n self.status_e3_closed.setGeometry(QtCore.QRect(73, 564, 60, 22))\n self.status_e3_closed.setStyleSheet(\"color: rgb(223,129,113);\")\n self.status_e3_closed.setObjectName(\"status_e3_closed\")\n self.status_e3_open = QtWidgets.QLabel(self.widget_elevator3)\n self.status_e3_open.setAlignment(QtCore.Qt.AlignCenter)\n self.status_e3_open.setGeometry(QtCore.QRect(73, 564, 60, 22))\n self.status_e3_open.setStyleSheet(\"color: rgb(99,192,135);\")\n self.status_e3_open.setObjectName(\"status_e3_open\")\n self.status_e3_running = QtWidgets.QLabel(self.widget_elevator3)\n self.status_e3_running.setAlignment(QtCore.Qt.AlignCenter)\n self.status_e3_running.setGeometry(QtCore.QRect(73, 564, 60, 22))\n self.status_e3_running.setStyleSheet(\"color: rgb(206,180,139);\")\n self.status_e3_running.setObjectName(\"status_e3_running\")\n self.floor_e3 = QtWidgets.QLabel(self.widget_elevator3)\n self.floor_e3.setAlignment(QtCore.Qt.AlignCenter)\n self.floor_e3.setGeometry(QtCore.QRect(73, 15, 55, 22))\n self.floor_e3.setStyleSheet(\"color: rgb(98, 153 ,183);\")\n self.floor_e3.setObjectName(\"floor_e3\")\n self.elevator3Slider = mySlider(self.widget_elevator3)\n self.elevator3Slider.setMinimum(0)\n self.elevator3Slider.setMaximum(190)\n self.elevator3Slider.setGeometry(QtCore.QRect(90, 40, 22, 520))\n self.elevator3Slider.setOrientation(QtCore.Qt.Vertical)\n self.elevator3Slider.setObjectName(\"elevator3Slider\")\n self.widget_elevator4 = QtWidgets.QWidget(self.centralwidget)\n self.widget_elevator4.setGeometry(QtCore.QRect(520, -10, 140, 640))\n self.widget_elevator4.setObjectName(\"widget_elevator4\")\n self.elevator4_5 = QtWidgets.QPushButton(self.widget_elevator4)\n self.elevator4_5.setGeometry(QtCore.QRect(10, 430, 51, 25))\n self.elevator4_5.setObjectName(\"elevator4_5\")\n self.elevator4_4 = QtWidgets.QPushButton(self.widget_elevator4)\n self.elevator4_4.setGeometry(QtCore.QRect(10, 456, 51, 25))\n self.elevator4_4.setObjectName(\"elevator4_4\")\n self.elevator4_2 = QtWidgets.QPushButton(self.widget_elevator4)\n self.elevator4_2.setGeometry(QtCore.QRect(10, 508, 51, 25))\n self.elevator4_2.setObjectName(\"elevator4_2\")\n self.elevator4_15 = QtWidgets.QPushButton(self.widget_elevator4)\n self.elevator4_15.setGeometry(QtCore.QRect(10, 170, 51, 25))\n self.elevator4_15.setObjectName(\"elevator4_15\")\n self.elevator4_13 = QtWidgets.QPushButton(self.widget_elevator4)\n self.elevator4_13.setGeometry(QtCore.QRect(10, 222, 51, 25))\n self.elevator4_13.setObjectName(\"elevator4_13\")\n self.elevator4_19 = QtWidgets.QPushButton(self.widget_elevator4)\n self.elevator4_19.setGeometry(QtCore.QRect(10, 66, 51, 25))\n self.elevator4_19.setObjectName(\"elevator4_19\")\n self.elevator4_16 = QtWidgets.QPushButton(self.widget_elevator4)\n self.elevator4_16.setGeometry(QtCore.QRect(10, 144, 51, 25))\n self.elevator4_16.setObjectName(\"elevator4_16\")\n self.elevator4_17 = QtWidgets.QPushButton(self.widget_elevator4)\n self.elevator4_17.setGeometry(QtCore.QRect(10, 118, 51, 25))\n self.elevator4_17.setObjectName(\"elevator4_17\")\n self.elevator4_7 = QtWidgets.QPushButton(self.widget_elevator4)\n self.elevator4_7.setGeometry(QtCore.QRect(10, 378, 51, 25))\n self.elevator4_7.setObjectName(\"elevator4_7\")\n self.elevator4_12 = QtWidgets.QPushButton(self.widget_elevator4)\n self.elevator4_12.setGeometry(QtCore.QRect(10, 248, 51, 25))\n self.elevator4_12.setObjectName(\"elevator4_12\")\n self.elevator4_1 = QtWidgets.QPushButton(self.widget_elevator4)\n self.elevator4_1.setGeometry(QtCore.QRect(10, 534, 51, 25))\n self.elevator4_1.setObjectName(\"elevator4_1\")\n self.elevator4_3 = QtWidgets.QPushButton(self.widget_elevator4)\n self.elevator4_3.setGeometry(QtCore.QRect(10, 482, 51, 25))\n self.elevator4_3.setObjectName(\"elevator4_3\")\n self.elevator4_9 = QtWidgets.QPushButton(self.widget_elevator4)\n self.elevator4_9.setGeometry(QtCore.QRect(10, 326, 51, 25))\n self.elevator4_9.setObjectName(\"elevator4_9\")\n self.elevator4_8 = QtWidgets.QPushButton(self.widget_elevator4)\n self.elevator4_8.setGeometry(QtCore.QRect(10, 352, 51, 25))\n self.elevator4_8.setObjectName(\"elevator4_8\")\n self.elevator4_14 = QtWidgets.QPushButton(self.widget_elevator4)\n self.elevator4_14.setGeometry(QtCore.QRect(10, 196, 51, 25))\n self.elevator4_14.setObjectName(\"elevator4_14\")\n self.elevator4_10 = QtWidgets.QPushButton(self.widget_elevator4)\n self.elevator4_10.setGeometry(QtCore.QRect(10, 300, 51, 25))\n self.elevator4_10.setObjectName(\"elevator4_10\")\n self.elevator4_6 = QtWidgets.QPushButton(self.widget_elevator4)\n self.elevator4_6.setGeometry(QtCore.QRect(10, 404, 51, 25))\n self.elevator4_6.setObjectName(\"elevator4_6\")\n self.elevator4_11 = QtWidgets.QPushButton(self.widget_elevator4)\n self.elevator4_11.setGeometry(QtCore.QRect(10, 274, 51, 25))\n self.elevator4_11.setObjectName(\"elevator4_11\")\n self.elevator4_18 = QtWidgets.QPushButton(self.widget_elevator4)\n self.elevator4_18.setGeometry(QtCore.QRect(10, 92, 51, 25))\n self.elevator4_18.setObjectName(\"elevator4_18\")\n self.elevator4_20 = QtWidgets.QPushButton(self.widget_elevator4)\n self.elevator4_20.setGeometry(QtCore.QRect(10, 40, 51, 25))\n self.elevator4_20.setObjectName(\"elevator4_20\")\n self.ID_e4 = QtWidgets.QLabel(self.widget_elevator4)\n self.ID_e4.setAlignment(QtCore.Qt.AlignCenter)\n self.ID_e4.setGeometry(QtCore.QRect(6, 16, 60, 22))\n self.ID_e4.setObjectName(\"ID_e4\")\n self.status_e4_closed = QtWidgets.QLabel(self.widget_elevator4)\n self.status_e4_closed.setAlignment(QtCore.Qt.AlignCenter)\n self.status_e4_closed.setGeometry(QtCore.QRect(73, 564, 60, 22))\n self.status_e4_closed.setStyleSheet(\"color: rgb(223,129,113);\")\n self.status_e4_closed.setObjectName(\"status_e4_closed\")\n self.status_e4_open = QtWidgets.QLabel(self.widget_elevator4)\n self.status_e4_open.setAlignment(QtCore.Qt.AlignCenter)\n self.status_e4_open.setGeometry(QtCore.QRect(73, 564, 60, 22))\n self.status_e4_open.setStyleSheet(\"color: rgb(99,192,135);\")\n self.status_e4_open.setObjectName(\"status_e4_open\")\n self.status_e4_running = QtWidgets.QLabel(self.widget_elevator4)\n self.status_e4_running.setAlignment(QtCore.Qt.AlignCenter)\n self.status_e4_running.setGeometry(QtCore.QRect(73, 564, 60, 22))\n self.status_e4_running.setStyleSheet(\"color: rgb(206,180,139);\")\n self.status_e4_running.setObjectName(\"status_e4_running\")\n self.floor_e4 = QtWidgets.QLabel(self.widget_elevator4)\n self.floor_e4.setAlignment(QtCore.Qt.AlignCenter)\n self.floor_e4.setGeometry(QtCore.QRect(73, 15, 55, 22))\n self.floor_e4.setStyleSheet(\"color: rgb(98, 153 ,183);\")\n self.floor_e4.setObjectName(\"floor_e4\")\n self.elevator4Slider = mySlider(self.widget_elevator4)\n self.elevator4Slider.setMinimum(0)\n self.elevator4Slider.setMaximum(190)\n self.elevator4Slider.setGeometry(QtCore.QRect(90, 40, 22, 520))\n self.elevator4Slider.setOrientation(QtCore.Qt.Vertical)\n self.elevator4Slider.setObjectName(\"elevator4Slider\")\n self.widget_elevator5 = QtWidgets.QWidget(self.centralwidget)\n self.widget_elevator5.setGeometry(QtCore.QRect(650, -10, 140, 640))\n self.widget_elevator5.setObjectName(\"widget_elevator5\")\n self.elevator5_5 = QtWidgets.QPushButton(self.widget_elevator5)\n self.elevator5_5.setGeometry(QtCore.QRect(10, 430, 51, 25))\n self.elevator5_5.setObjectName(\"elevator5_5\")\n self.elevator5_4 = QtWidgets.QPushButton(self.widget_elevator5)\n self.elevator5_4.setGeometry(QtCore.QRect(10, 456, 51, 25))\n self.elevator5_4.setObjectName(\"elevator5_4\")\n self.elevator5_2 = QtWidgets.QPushButton(self.widget_elevator5)\n self.elevator5_2.setGeometry(QtCore.QRect(10, 508, 51, 25))\n self.elevator5_2.setObjectName(\"elevator5_2\")\n self.elevator5_15 = QtWidgets.QPushButton(self.widget_elevator5)\n self.elevator5_15.setGeometry(QtCore.QRect(10, 170, 51, 25))\n self.elevator5_15.setObjectName(\"elevator5_15\")\n self.elevator5_13 = QtWidgets.QPushButton(self.widget_elevator5)\n self.elevator5_13.setGeometry(QtCore.QRect(10, 222, 51, 25))\n self.elevator5_13.setObjectName(\"elevator5_13\")\n self.elevator5_19 = QtWidgets.QPushButton(self.widget_elevator5)\n self.elevator5_19.setGeometry(QtCore.QRect(10, 66, 51, 25))\n self.elevator5_19.setObjectName(\"elevator5_19\")\n self.elevator5_16 = QtWidgets.QPushButton(self.widget_elevator5)\n self.elevator5_16.setGeometry(QtCore.QRect(10, 144, 51, 25))\n self.elevator5_16.setObjectName(\"elevator5_16\")\n self.elevator5_17 = QtWidgets.QPushButton(self.widget_elevator5)\n self.elevator5_17.setGeometry(QtCore.QRect(10, 118, 51, 25))\n self.elevator5_17.setObjectName(\"elevator5_17\")\n self.elevator5_7 = QtWidgets.QPushButton(self.widget_elevator5)\n self.elevator5_7.setGeometry(QtCore.QRect(10, 378, 51, 25))\n self.elevator5_7.setObjectName(\"elevator5_7\")\n self.elevator5_12 = QtWidgets.QPushButton(self.widget_elevator5)\n self.elevator5_12.setGeometry(QtCore.QRect(10, 248, 51, 25))\n self.elevator5_12.setObjectName(\"elevator5_12\")\n self.elevator5_1 = QtWidgets.QPushButton(self.widget_elevator5)\n self.elevator5_1.setGeometry(QtCore.QRect(10, 534, 51, 25))\n self.elevator5_1.setObjectName(\"elevator5_1\")\n self.elevator5_3 = QtWidgets.QPushButton(self.widget_elevator5)\n self.elevator5_3.setGeometry(QtCore.QRect(10, 482, 51, 25))\n self.elevator5_3.setObjectName(\"elevator5_3\")\n self.elevator5_9 = QtWidgets.QPushButton(self.widget_elevator5)\n self.elevator5_9.setGeometry(QtCore.QRect(10, 326, 51, 25))\n self.elevator5_9.setObjectName(\"elevator5_9\")\n self.elevator5_8 = QtWidgets.QPushButton(self.widget_elevator5)\n self.elevator5_8.setGeometry(QtCore.QRect(10, 352, 51, 25))\n self.elevator5_8.setObjectName(\"elevator5_8\")\n self.elevator5_14 = QtWidgets.QPushButton(self.widget_elevator5)\n self.elevator5_14.setGeometry(QtCore.QRect(10, 196, 51, 25))\n self.elevator5_14.setObjectName(\"elevator5_14\")\n self.elevator5_10 = QtWidgets.QPushButton(self.widget_elevator5)\n self.elevator5_10.setGeometry(QtCore.QRect(10, 300, 51, 25))\n self.elevator5_10.setObjectName(\"elevator5_10\")\n self.elevator5_6 = QtWidgets.QPushButton(self.widget_elevator5)\n self.elevator5_6.setGeometry(QtCore.QRect(10, 404, 51, 25))\n self.elevator5_6.setObjectName(\"elevator5_6\")\n self.elevator5_11 = QtWidgets.QPushButton(self.widget_elevator5)\n self.elevator5_11.setGeometry(QtCore.QRect(10, 274, 51, 25))\n self.elevator5_11.setObjectName(\"elevator5_11\")\n self.elevator5_18 = QtWidgets.QPushButton(self.widget_elevator5)\n self.elevator5_18.setGeometry(QtCore.QRect(10, 92, 51, 25))\n self.elevator5_18.setObjectName(\"elevator5_18\")\n self.elevator5_20 = QtWidgets.QPushButton(self.widget_elevator5)\n self.elevator5_20.setGeometry(QtCore.QRect(10, 40, 51, 25))\n self.elevator5_20.setObjectName(\"elevator5_20\")\n self.ID_e5 = QtWidgets.QLabel(self.widget_elevator5)\n self.ID_e5.setAlignment(QtCore.Qt.AlignCenter)\n self.ID_e5.setGeometry(QtCore.QRect(6, 16, 60, 22))\n self.ID_e5.setObjectName(\"ID_e5\")\n self.status_e5_closed = QtWidgets.QLabel(self.widget_elevator5)\n self.status_e5_closed.setAlignment(QtCore.Qt.AlignCenter)\n self.status_e5_closed.setGeometry(QtCore.QRect(73, 564, 60, 22))\n self.status_e5_closed.setStyleSheet(\"color: rgb(223,129,113);\")\n self.status_e5_closed.setObjectName(\"status_e5_closed\")\n self.status_e5_open = QtWidgets.QLabel(self.widget_elevator5)\n self.status_e5_open.setAlignment(QtCore.Qt.AlignCenter)\n self.status_e5_open.setGeometry(QtCore.QRect(73, 564, 60, 22))\n self.status_e5_open.setStyleSheet(\"color: rgb(99,192,135);\")\n self.status_e5_open.setObjectName(\"status_e5_open\")\n self.status_e5_running = QtWidgets.QLabel(self.widget_elevator5)\n self.status_e5_running.setAlignment(QtCore.Qt.AlignCenter)\n self.status_e5_running.setGeometry(QtCore.QRect(73, 564, 60, 22))\n self.status_e5_running.setStyleSheet(\"color: rgb(206,180,139);\")\n self.status_e5_running.setObjectName(\"status_e5_running\")\n self.floor_e5 = QtWidgets.QLabel(self.widget_elevator5)\n self.floor_e5.setAlignment(QtCore.Qt.AlignCenter)\n self.floor_e5.setGeometry(QtCore.QRect(73, 15, 55, 22))\n self.floor_e5.setStyleSheet(\"color: rgb(98, 153 ,183);\")\n self.floor_e5.setObjectName(\"floor_e5\")\n self.elevator5Slider = mySlider(self.widget_elevator5)\n self.elevator5Slider.setMinimum(0)\n self.elevator5Slider.setMaximum(190)\n self.elevator5Slider.setGeometry(QtCore.QRect(90, 40, 22, 520))\n self.elevator5Slider.setOrientation(QtCore.Qt.Vertical)\n self.elevator5Slider.setObjectName(\"elevator5Slider\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 22))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\n \"MainWindow\", \"Elevator Simulator\"))\n self.label_6.setText(_translate(\"MainWindow\", \"1\"))\n self.label_12.setText(_translate(\"MainWindow\", \"11\"))\n self.label.setText(_translate(\"MainWindow\", \"Floor\"))\n self.label_10.setText(_translate(\"MainWindow\", \"9\"))\n self.label_19.setText(_translate(\"MainWindow\", \"13\"))\n self.label_8.setText(_translate(\"MainWindow\", \"3\"))\n self.label_13.setText(_translate(\"MainWindow\", \"12\"))\n self.label_18.setText(_translate(\"MainWindow\", \"16\"))\n self.label_9.setText(_translate(\"MainWindow\", \"8\"))\n self.label_17.setText(_translate(\"MainWindow\", \"17\"))\n self.label_5.setText(_translate(\"MainWindow\", \"4\"))\n self.label_21.setText(_translate(\"MainWindow\", \"15\"))\n self.label_3.setText(_translate(\"MainWindow\", \"6\"))\n self.label_20.setText(_translate(\"MainWindow\", \"14\"))\n self.label_11.setText(_translate(\"MainWindow\", \"10\"))\n self.label_16.setText(_translate(\"MainWindow\", \"18\"))\n self.label_7.setText(_translate(\"MainWindow\", \"2\"))\n self.label_15.setText(_translate(\"MainWindow\", \"19\"))\n self.label_14.setText(_translate(\"MainWindow\", \"20\"))\n self.label_2.setText(_translate(\"MainWindow\", \"7\"))\n self.label_4.setText(_translate(\"MainWindow\", \"5\"))\n self.label_refresh.setText(_translate(\"MainWindow\", \"Refresh\"))\n self.pushButton_refresh.setText(_translate(\"MainWindow\", \"♻️\"))\n self.pushButton_2d.setText(_translate(\"MainWindow\", \"↓\"))\n self.pushButton_11d.setText(_translate(\"MainWindow\", \"↓\"))\n self.pushButton_19d.setText(_translate(\"MainWindow\", \"↓\"))\n self.pushButton_7u.setText(_translate(\"MainWindow\", \"↑\"))\n self.pushButton_14u.setText(_translate(\"MainWindow\", \"↑\"))\n self.pushButton_15u.setText(_translate(\"MainWindow\", \"↑\"))\n self.pushButton_14d.setText(_translate(\"MainWindow\", \"↓\"))\n self.pushButton_2u.setText(_translate(\"MainWindow\", \"↑\"))\n self.pushButton_6d.setText(_translate(\"MainWindow\", \"↓\"))\n self.pushButton_13u.setText(_translate(\"MainWindow\", \"↑\"))\n self.pushButton_17d.setText(_translate(\"MainWindow\", \"↓\"))\n self.pushButton_11u.setText(_translate(\"MainWindow\", \"↑\"))\n self.pushButton_4d.setText(_translate(\"MainWindow\", \"↓\"))\n self.pushButton_18u.setText(_translate(\"MainWindow\", \"↑\"))\n self.pushButton_15d.setText(_translate(\"MainWindow\", \"↓\"))\n self.pushButton_16d.setText(_translate(\"MainWindow\", \"↓\"))\n self.pushButton_8d.setText(_translate(\"MainWindow\", \"↓\"))\n self.pushButton_5d.setText(_translate(\"MainWindow\", \"↓\"))\n self.pushButton_19u.setText(_translate(\"MainWindow\", \"↑\"))\n self.pushButton_17u.setText(_translate(\"MainWindow\", \"↑\"))\n self.pushButton_20d.setText(_translate(\"MainWindow\", \"↓\"))\n self.pushButton_10u.setText(_translate(\"MainWindow\", \"↑\"))\n self.pushButton_18d.setText(_translate(\"MainWindow\", \"↓\"))\n self.pushButton_3u.setText(_translate(\"MainWindow\", \"↑\"))\n self.pushButton_8u.setText(_translate(\"MainWindow\", \"↑\"))\n self.pushButton_5u.setText(_translate(\"MainWindow\", \"↑\"))\n self.pushButton_3d.setText(_translate(\"MainWindow\", \"↓\"))\n self.pushButton_12d.setText(_translate(\"MainWindow\", \"↓\"))\n self.pushButton_16u.setText(_translate(\"MainWindow\", \"↑\"))\n self.pushButton_7d.setText(_translate(\"MainWindow\", \"↓\"))\n self.pushButton_1u.setText(_translate(\"MainWindow\", \"↑\"))\n self.pushButton_12u.setText(_translate(\"MainWindow\", \"↑\"))\n self.pushButton_6u.setText(_translate(\"MainWindow\", \"↑\"))\n self.pushButton_9u.setText(_translate(\"MainWindow\", \"↑\"))\n self.pushButton_4u.setText(_translate(\"MainWindow\", \"↑\"))\n self.pushButton_9d.setText(_translate(\"MainWindow\", \"↓\"))\n self.pushButton_10d.setText(_translate(\"MainWindow\", \"↓\"))\n self.pushButton_13d.setText(_translate(\"MainWindow\", \"↓\"))\n self.elevator1_5.setText(_translate(\"MainWindow\", \"5\"))\n self.elevator1_4.setText(_translate(\"MainWindow\", \"4\"))\n self.elevator1_2.setText(_translate(\"MainWindow\", \"2\"))\n self.elevator1_15.setText(_translate(\"MainWindow\", \"15\"))\n self.elevator1_13.setText(_translate(\"MainWindow\", \"13\"))\n self.elevator1_19.setText(_translate(\"MainWindow\", \"19\"))\n self.elevator1_16.setText(_translate(\"MainWindow\", \"16\"))\n self.elevator1_17.setText(_translate(\"MainWindow\", \"17\"))\n self.elevator1_7.setText(_translate(\"MainWindow\", \"7\"))\n self.elevator1_12.setText(_translate(\"MainWindow\", \"12\"))\n self.elevator1_1.setText(_translate(\"MainWindow\", \"1\"))\n self.elevator1_3.setText(_translate(\"MainWindow\", \"3\"))\n self.elevator1_9.setText(_translate(\"MainWindow\", \"9\"))\n self.elevator1_8.setText(_translate(\"MainWindow\", \"8\"))\n self.elevator1_14.setText(_translate(\"MainWindow\", \"14\"))\n self.elevator1_10.setText(_translate(\"MainWindow\", \"10\"))\n self.elevator1_6.setText(_translate(\"MainWindow\", \"6\"))\n self.elevator1_11.setText(_translate(\"MainWindow\", \"11\"))\n self.elevator1_18.setText(_translate(\"MainWindow\", \"18\"))\n self.elevator1_20.setText(_translate(\"MainWindow\", \"20\"))\n self.elevator2_5.setText(_translate(\"MainWindow\", \"5\"))\n self.elevator2_4.setText(_translate(\"MainWindow\", \"4\"))\n self.elevator2_2.setText(_translate(\"MainWindow\", \"2\"))\n self.elevator2_15.setText(_translate(\"MainWindow\", \"15\"))\n self.elevator2_13.setText(_translate(\"MainWindow\", \"13\"))\n self.elevator2_19.setText(_translate(\"MainWindow\", \"19\"))\n self.elevator2_16.setText(_translate(\"MainWindow\", \"16\"))\n self.elevator2_17.setText(_translate(\"MainWindow\", \"17\"))\n self.elevator2_7.setText(_translate(\"MainWindow\", \"7\"))\n self.elevator2_12.setText(_translate(\"MainWindow\", \"12\"))\n self.elevator2_1.setText(_translate(\"MainWindow\", \"1\"))\n self.elevator2_3.setText(_translate(\"MainWindow\", \"3\"))\n self.elevator2_9.setText(_translate(\"MainWindow\", \"9\"))\n self.elevator2_8.setText(_translate(\"MainWindow\", \"8\"))\n self.elevator2_14.setText(_translate(\"MainWindow\", \"14\"))\n self.elevator2_10.setText(_translate(\"MainWindow\", \"10\"))\n self.elevator2_6.setText(_translate(\"MainWindow\", \"6\"))\n self.elevator2_11.setText(_translate(\"MainWindow\", \"11\"))\n self.elevator2_18.setText(_translate(\"MainWindow\", \"18\"))\n self.elevator2_20.setText(_translate(\"MainWindow\", \"20\"))\n self.elevator3_5.setText(_translate(\"MainWindow\", \"5\"))\n self.elevator3_4.setText(_translate(\"MainWindow\", \"4\"))\n self.elevator3_2.setText(_translate(\"MainWindow\", \"2\"))\n self.elevator3_15.setText(_translate(\"MainWindow\", \"15\"))\n self.elevator3_13.setText(_translate(\"MainWindow\", \"13\"))\n self.elevator3_19.setText(_translate(\"MainWindow\", \"19\"))\n self.elevator3_16.setText(_translate(\"MainWindow\", \"16\"))\n self.elevator3_17.setText(_translate(\"MainWindow\", \"17\"))\n self.elevator3_7.setText(_translate(\"MainWindow\", \"7\"))\n self.elevator3_12.setText(_translate(\"MainWindow\", \"12\"))\n self.elevator3_1.setText(_translate(\"MainWindow\", \"1\"))\n self.elevator3_3.setText(_translate(\"MainWindow\", \"3\"))\n self.elevator3_9.setText(_translate(\"MainWindow\", \"9\"))\n self.elevator3_8.setText(_translate(\"MainWindow\", \"8\"))\n self.elevator3_14.setText(_translate(\"MainWindow\", \"14\"))\n self.elevator3_10.setText(_translate(\"MainWindow\", \"10\"))\n self.elevator3_6.setText(_translate(\"MainWindow\", \"6\"))\n self.elevator3_11.setText(_translate(\"MainWindow\", \"11\"))\n self.elevator3_18.setText(_translate(\"MainWindow\", \"18\"))\n self.elevator3_20.setText(_translate(\"MainWindow\", \"20\"))\n self.elevator4_5.setText(_translate(\"MainWindow\", \"5\"))\n self.elevator4_4.setText(_translate(\"MainWindow\", \"4\"))\n self.elevator4_2.setText(_translate(\"MainWindow\", \"2\"))\n self.elevator4_15.setText(_translate(\"MainWindow\", \"15\"))\n self.elevator4_13.setText(_translate(\"MainWindow\", \"13\"))\n self.elevator4_19.setText(_translate(\"MainWindow\", \"19\"))\n self.elevator4_16.setText(_translate(\"MainWindow\", \"16\"))\n self.elevator4_17.setText(_translate(\"MainWindow\", \"17\"))\n self.elevator4_7.setText(_translate(\"MainWindow\", \"7\"))\n self.elevator4_12.setText(_translate(\"MainWindow\", \"12\"))\n self.elevator4_1.setText(_translate(\"MainWindow\", \"1\"))\n self.elevator4_3.setText(_translate(\"MainWindow\", \"3\"))\n self.elevator4_9.setText(_translate(\"MainWindow\", \"9\"))\n self.elevator4_8.setText(_translate(\"MainWindow\", \"8\"))\n self.elevator4_14.setText(_translate(\"MainWindow\", \"14\"))\n self.elevator4_10.setText(_translate(\"MainWindow\", \"10\"))\n self.elevator4_6.setText(_translate(\"MainWindow\", \"6\"))\n self.elevator4_11.setText(_translate(\"MainWindow\", \"11\"))\n self.elevator4_18.setText(_translate(\"MainWindow\", \"18\"))\n self.elevator4_20.setText(_translate(\"MainWindow\", \"20\"))\n self.elevator5_5.setText(_translate(\"MainWindow\", \"5\"))\n self.elevator5_4.setText(_translate(\"MainWindow\", \"4\"))\n self.elevator5_2.setText(_translate(\"MainWindow\", \"2\"))\n self.elevator5_15.setText(_translate(\"MainWindow\", \"15\"))\n self.elevator5_13.setText(_translate(\"MainWindow\", \"13\"))\n self.elevator5_19.setText(_translate(\"MainWindow\", \"19\"))\n self.elevator5_16.setText(_translate(\"MainWindow\", \"16\"))\n self.elevator5_17.setText(_translate(\"MainWindow\", \"17\"))\n self.elevator5_7.setText(_translate(\"MainWindow\", \"7\"))\n self.elevator5_12.setText(_translate(\"MainWindow\", \"12\"))\n self.elevator5_1.setText(_translate(\"MainWindow\", \"1\"))\n self.elevator5_3.setText(_translate(\"MainWindow\", \"3\"))\n self.elevator5_9.setText(_translate(\"MainWindow\", \"9\"))\n self.elevator5_8.setText(_translate(\"MainWindow\", \"8\"))\n self.elevator5_14.setText(_translate(\"MainWindow\", \"14\"))\n self.elevator5_10.setText(_translate(\"MainWindow\", \"10\"))\n self.elevator5_6.setText(_translate(\"MainWindow\", \"6\"))\n self.elevator5_11.setText(_translate(\"MainWindow\", \"11\"))\n self.elevator5_18.setText(_translate(\"MainWindow\", \"18\"))\n self.elevator5_20.setText(_translate(\"MainWindow\", \"20\"))\n self.status_e1_closed.setText(_translate(\"MainWindow\", \"closed\"))\n self.status_e2_closed.setText(_translate(\"MainWindow\", \"closed\"))\n self.status_e3_closed.setText(_translate(\"MainWindow\", \"closed\"))\n self.status_e4_closed.setText(_translate(\"MainWindow\", \"closed\"))\n self.status_e5_closed.setText(_translate(\"MainWindow\", \"closed\"))\n self.status_e1_open.setText(_translate(\"MainWindow\", \"open\"))\n self.status_e2_open.setText(_translate(\"MainWindow\", \"open\"))\n self.status_e3_open.setText(_translate(\"MainWindow\", \"open\"))\n self.status_e4_open.setText(_translate(\"MainWindow\", \"open\"))\n self.status_e5_open.setText(_translate(\"MainWindow\", \"open\"))\n self.status_e1_running.setText(_translate(\"MainWindow\", \"running\"))\n self.status_e2_running.setText(_translate(\"MainWindow\", \"running\"))\n self.status_e3_running.setText(_translate(\"MainWindow\", \"running\"))\n self.status_e4_running.setText(_translate(\"MainWindow\", \"running\"))\n self.status_e5_running.setText(_translate(\"MainWindow\", \"running\"))\n self.floor_e1.setText(_translate(\"MainWindow\", \"1\"))\n self.floor_e2.setText(_translate(\"MainWindow\", \"1\"))\n self.floor_e3.setText(_translate(\"MainWindow\", \"1\"))\n self.floor_e4.setText(_translate(\"MainWindow\", \"1\"))\n self.floor_e5.setText(_translate(\"MainWindow\", \"1\"))\n self.ID_e1.setText(_translate(\"MainWindow\", \"No.1\"))\n self.ID_e2.setText(_translate(\"MainWindow\", \"No.2\"))\n self.ID_e3.setText(_translate(\"MainWindow\", \"No.3\"))\n self.ID_e4.setText(_translate(\"MainWindow\", \"No.4\"))\n self.ID_e5.setText(_translate(\"MainWindow\", \"No.5\"))\n","sub_path":"elevatorInterface.py","file_name":"elevatorInterface.py","file_ext":"py","file_size_in_byte":56016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"459489825","text":"import math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\n# Complete the gemstones function below.\r\ndef gemstones(arr):\r\n\r\n common_elements = set(arr[0]).intersection(set(arr[1]))\r\n\r\n for i in range(2, len(arr)):\r\n common_elements = common_elements.intersection(set(arr[i]))\r\n\r\n return len(common_elements)\r\n\r\n\r\nn = int(input())\r\n\r\narr = []\r\n\r\nfor _ in range(n):\r\n arr_item = input()\r\n arr.append(arr_item)\r\n\r\nresult = gemstones(arr)\r\n\r\nprint(result)\r\n","sub_path":"Personal_Projects/FindCommonChars.py","file_name":"FindCommonChars.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"605005533","text":"\nimport tensorflow as tf\nimport numpy as np\n\n\nclass Network(object):\n\n def __init__(self, sentence_size, categories, vocabulary_size, embedding_size, filters, filter_size):\n\n #placeholders of input, output\n self.x = tf.placeholder(tf.int32, [None, sentence_size], name=\"x\")\n self.y = tf.placeholder(tf.float32, [None, categories], name=\"y\")\n\n #embedding layer\n with tf.name_scope(\"embedding\"):\n W_emebeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0), name=\"W_emebeddings\")\n #W_emebeddings = tf.Print(W_emebeddings, [W_emebeddings], message=\"W embedded : \")\n self.embedded_words = tf.nn.embedding_lookup(W_emebeddings, self.x)\n #self.embedded_words = tf.Print(self.embedded_words, [self.embedded_words], message=\"embedded_words chars\")\n self.embedded_words_expanded = tf.expand_dims(self.embedded_words, -1)\n\n #convolution + maxpool layer for each filter size\n total_maxpools = []\n for i, filter in enumerate(filters):\n with tf.name_scope(\"convolution-maxpool-%s\" % filter):\n filter_shape = [filter, embedding_size, 1, filter_size]\n #convolution\n W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name=\"W\")\n b = tf.Variable(tf.constant(0.1, shape=[filter_size]), name=\"b\")\n conv = tf.nn.conv2d(self.embedded_words_expanded, W, strides=[1, 1, 1, 1], padding=\"VALID\", name=\"convolution\")\n print(conv.get_shape())\n #activation function\n h_conv = tf.nn.relu(conv + b, name=\"relu\")\n #maxpool\n max_pool = tf.nn.max_pool(h_conv, ksize=[1, sentence_size - filter + 1, 1, 1], strides=[1, 1, 1, 1], padding='VALID', name=\"maxpool\")\n total_maxpools.append(max_pool)\n\n #combine all the maxpool features\n total_filters_size = filter_size * len(filters)\n self.h_pool = tf.concat(3, total_maxpools, name=\"concat\")\n self.h_pool_flat = tf.reshape(self.h_pool, [-1, total_filters_size])\n\n #dropout\n self.dropout_keep = tf.placeholder(tf.float32, name=\"dropout_keep\")\n with tf.name_scope(\"dropout\"):\n self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep)\n\n #output layer with (unnormalized) costs and predictions\n with tf.name_scope(\"output\"):\n W = tf.get_variable(\"W\", shape=[total_filters_size, categories], initializer=tf.contrib.layers.xavier_initializer())\n b = tf.Variable(tf.constant(0.1, shape=[categories]), name=\"b\")\n self.y_conv = tf.nn.xw_plus_b(self.h_drop, W, b, name=\"scores\")\n self.prediction = tf.argmax(self.y_conv, 1, name=\"predictions\")\n\n #cross-entropy cost\n with tf.name_scope(\"cost\"):\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(self.y_conv, self.y)\n self.cost = tf.reduce_mean(cross_entropy)\n\n #accuracy\n with tf.name_scope(\"accuracy\"):\n correct_prediction = tf.equal(self.prediction, tf.argmax(self.y, 1), name=\"correct-predictions\")\n self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name=\"accuracy\")\n","sub_path":"backend/text-classification-tf-synonyms_invocab/Network.py","file_name":"Network.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"186762309","text":"#!/usr/bin/python\r\nimport subprocess\r\n\r\n#----------------------------------------------------------------------\r\n# Object for results of an external command.\r\n#----------------------------------------------------------------------\r\nclass CommandResult:\r\n def __init__(self, code, output, error = None):\r\n self.code = code\r\n self.output = output\r\n self.error = error\r\n\r\n#----------------------------------------------------------------------\r\n# Run an external command.\r\n#----------------------------------------------------------------------\r\ndef runCommand(command, returnNoneOnSuccess = True):\r\n # Default mode - Pipe stdout and stderr to stdout\r\n # -----------------------------------------------\r\n try:\r\n commandStream = subprocess.Popen(command, shell = True,\r\n stdin = subprocess.PIPE,\r\n stdout = subprocess.PIPE,\r\n stderr = subprocess.PIPE)\r\n output, error = commandStream.communicate()\r\n code = commandStream.returncode\r\n\r\n # For downward compatibility we return None for a successful\r\n # command return code\r\n # ----------------------------------------------------------\r\n if returnNoneOnSuccess and code == 0:\r\n return CommandResult(None, output, error)\r\n except Exception as info:\r\n logwrite(\"failure running command: %s\\n%s\" % (command, str(info)))\r\n\r\n return CommandResult(code, output, error)\r\n","sub_path":"nscan-kenel/scripts/PEP8/run_util.py","file_name":"run_util.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"428920548","text":"from django.test import TestCase\n\nfrom company.models import Vacancy, VacancyQuestion, Choice\nfrom company.serializers import VacancyQuestionSerializer, VacancySerializer\n\n\nclass test(TestCase):\n\n def test_serializers(self):\n self.create_data()\n v = Vacancy.objects.get(id=1)\n q = VacancyQuestion.objects.get(id=1)\n c = Choice.objects.get(choice_text=\"five\")\n ser = VacancySerializer(v)\n print(ser.data)\n ser = VacancyQuestionSerializer(q)\n print(ser.data)\n\n def create_data(self):\n v = Vacancy.objects.create(company_id=1, title=\"hjfd\", description=\"kfjg\", requirements=\"kfgjfkj\", benefits=\"kfjdkg\", salary=1111, job_type=\"Full Time\", interest_field=\"kjgdfkj\")\n q = VacancyQuestion.objects.create(question_text=\"How old are you?\", vacancy=v)\n c = Choice.objects.create(choice_text=\"five\", question=q)\n","sub_path":"company/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"253746313","text":"# Copyright 2013 National Technology & Engineering Solutions of Sandia, LLC (NTESS). \r\n# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government \r\n# retains certain rights in this software.\r\n\r\n# This is a Python command line script which decomposes a set\r\n# of movies into pairwise distance matrices and trajectories\r\n# for the VideoSwarm application. The inputs to this script are\r\n# a .csv file containing meta data, a column number in the .csv file\r\n# (indexed from 1) indicating the movie file names to use for the\r\n# distance matrix calculations, and a directory name (must exist)\r\n# to write out the necessary VideoSwarm files.\r\n\r\n# S. Martin\r\n# 9/15/2017\r\n\r\n# Options have been added to create movies and run in parallel.\r\n# J. Gittinger, M. Letter \r\n# 11/2020\r\n\r\n\r\n# standard library\r\n##################\r\n\r\n# command line \r\nimport argparse\r\n\r\n# reading files, parsing file names\r\nimport os\r\nfrom os import listdir\r\nimport csv\r\nimport urllib.parse\r\n\r\n# error handling\r\nimport sys\r\nimport traceback\r\n\r\n# estimating time to complete\r\nimport time\r\nimport logging\r\nimport itertools\r\n\r\n# 3rd party libraries\r\n#####################\r\n\r\n# video processing\r\nimport imageio\r\n\r\n# parallel computations\r\nimport ipyparallel\r\n\r\n# computing distance matrices and coordinates\r\nimport numpy\r\nfrom sklearn.metrics.pairwise import euclidean_distances\r\n\r\n# create movies\r\nimport ffmpy\r\n\r\n\r\n# subroutines to set up environment\r\n###################################\r\n\r\n# parse command line arguments\r\ndef parse_command_line ():\r\n\r\n # provide description\r\n parser = argparse.ArgumentParser(\r\n description=\"Computes MDS coordinates for video frames \"\r\n \"and trajectories for use by the VideoSwarm \"\r\n \"Slycat plugin. \"\r\n \"The output files are in the VideoSwarm format \"\r\n \"with the names movies.trajectories, movies.xcoords \"\r\n \"movies.ycoords, and movies.csv.\")\r\n\r\n # mandatory arguments\r\n #####################\r\n\r\n # csv file\r\n parser.add_argument(\"--csv_file\",\r\n help=\"the .csv file containing the meta data for the videos \"\r\n \"to be processed.\")\r\n\r\n # column of frames\r\n parser.add_argument(\"--frame_col\", type=int,\r\n help=\"column number (indexed by 1) with the frame files \"\r\n \"to be processed, first frame only. Note that frame \"\r\n \"files are expected to be of the format *.#.*, \"\r\n \"where * is the video name (and does not vary by frame), \"\r\n \"# is the frame number in the video, \"\r\n \"and the last * is the file type extension.\")\r\n\r\n # output directory for VS files\r\n parser.add_argument(\"--output_dir\",\r\n help=\"output directory for the VideoSwarm files\")\r\n\r\n # conditional mandatory arguments\r\n #################################\r\n\r\n # decision to generate movies\r\n parser.add_argument(\"--generate_movies\",\r\n help=\"generate movies\")\r\n\r\n # if generate_movies is true, must also provide\r\n # output directory containing movies\r\n parser.add_argument(\"--movie_dir\",\r\n help=\"write movies to this directory\")\r\n\r\n # if generate_movies is false, must provide instead\r\n\r\n # column of movies\r\n parser.add_argument(\"--movie_col\", default=None,\r\n help=\"column number (indexed by 1) with the movie files \"\r\n \"(can't use with --generate_movies).\")\r\n\r\n # optional arguments\r\n ####################\r\n\r\n # log file\r\n parser.add_argument(\"--log_file\", default=None,\r\n help=\"log file for job status (optional)\")\r\n\r\n # decision to replace existing movies\r\n parser.add_argument(\"--replace_movies\", default=None,\r\n help=\"replace existing movies.\")\r\n\r\n # naming template for the simulation\r\n parser.add_argument(\"--sim_id_template\", default=None,\r\n help=\"naming template for the simulation id, consists of the \"\r\n \"first part of the frame file path, up to the simulation identifier.\")\r\n\r\n # number of dimensions to use for alignment\r\n parser.add_argument(\"--num_dim\", default=10, type=int,\r\n help=\"number of dimensions to use for alignment between frames, \"\r\n \"defaults to 10.\")\r\n\r\n # percent energy to use for alignment\r\n parser.add_argument(\"--energy\", default=None, type=float,\r\n help=\"percent of energy to use for alignment (overrides num_dim).\")\r\n\r\n # known duration of video\r\n parser.add_argument(\"--fps\", default=25, type=float,\r\n help=\"frame per second, if known. If unknown the script will \"\r\n \"default to 25 fps.\")\r\n\r\n # size of parallel partition\r\n parser.add_argument('--group_size', default=10, type=int,\r\n help=\"number of frames per processor for parallel computation, \"\r\n \"defaults to 50.\")\r\n\r\n # parse arguments and distribute to variables\r\n return parser.parse_args()\r\n\r\n# log file handler\r\ndef create_job_logger(file_name):\r\n \"\"\"\r\n returns a logging function with the jid.log as the file name\r\n changed to print for compatibility for hpc\r\n :param jid: job id\r\n :return:\r\n \"\"\"\r\n return lambda msg: print(msg, flush = True)\r\n\r\n# set up logging\r\ndef init_logging (args):\r\n\r\n # set up log file, or print to screen (default)\r\n if args.log_file:\r\n log = create_job_logger(args.log_file)\r\n else:\r\n def log(msg):\r\n print(msg, flush = True)\r\n \r\n return log\r\n\r\n# check command line arguments\r\ndef check_command_line_args(args, log):\r\n\r\n # check to see if mandatory arguments are present\r\n if args.csv_file == None:\r\n log(\"[VS_LOG] Error: .csv file not specified.\")\r\n sys.exit()\r\n\r\n if args.frame_col == None:\r\n log(\"[VS-LOG] Error: frame column must be specified.\")\r\n sys.exit()\r\n\r\n if args.output_dir == None:\r\n log(\"[VS-LOG] Error: output directory not specified.\")\r\n sys.exit()\r\n\r\n # check that frame col is >= 1\r\n if args.frame_col <= 0:\r\n log('[VS-LOG] Error: frame column must be >= 1.')\r\n sys.exit()\r\n\r\n # check presence of both conditional arguments\r\n if args.movie_col != 'None' and args.generate_movies == 'true':\r\n log(\"[VS-LOG] Error: can't use both --generate_movies and --movie_col.\")\r\n sys.exit()\r\n\r\n # check conditional mandatory arguments\r\n if args.generate_movies == 'true':\r\n\r\n # check that movie directory was specified\r\n if args.movie_dir == None:\r\n log(\"[VS-LOG] Error: movie directory not specified.\")\r\n sys.exit()\r\n\r\n # check if the movie directory exists\r\n if os.path.exists(args.movie_dir):\r\n\r\n # check if overwrite has been selected\r\n if not args.replace_movies:\r\n\r\n log(\"[VS-LOG] Movie directory already exists, you must use \"\r\n \"--replace_movies to proceed.\")\r\n sys.exit()\r\n\r\n # otherwise check to see if movie column was provided and >= 1\r\n # else:\r\n \r\n # movie column provided\r\n # if args.movie_col == 'None':\r\n # log('[VS-LOG] Error: must either generate movies or specify movie column.')\r\n # sys.exit()\r\n \r\n # movie col is >= 1\r\n # if args.movie_col <= 0:\r\n # log('[VS-LOG] Error: movie column must be >= 1.')\r\n # sys.exit()\r\n\r\n # check optional arguments\r\n if args.group_size <= 0:\r\n log('[VS-LOG] Error: group size must be >= 1.')\r\n sys.exit()\r\n\r\n if args.fps <= 0:\r\n log('[VS-LOG] Error: fps must be > 0.')\r\n sys.exit()\r\n\r\n# set up parallel python\r\ndef init_parallel(log):\r\n\r\n # set up ipython processor pool\r\n try:\r\n pool = ipyparallel.Client(profile=None)\r\n pool = pool.direct_view()\r\n except Exception as e:\r\n log(str(e))\r\n raise Exception(\"A running IPython parallel cluster is required to run this script.\")\r\n\r\n return pool\r\n\r\n# create output directories if they don't already exist\r\ndef init_working_dirs (args, log):\r\n\r\n # check to see if output directory exists\r\n if not os.path.exists(args.output_dir):\r\n\r\n # make diretory if it does not exist\r\n log(\"[VS-LOG] Creating working directory: \" + args.output_dir)\r\n os.makedirs(args.output_dir)\r\n\r\n # check to see if movies directory exists\r\n if args.generate_movies == 'true':\r\n if not os.path.exists(args.movie_dir):\r\n\r\n # make directory if it does not exist\r\n log(\"[VS-LOG] Creating movies directory: \" + args.movie_dir)\r\n os.makedirs(args.movie_dir)\r\n\r\n# set up MDS alignment parameters\r\ndef init_parameters (args, log):\r\n\r\n # check limits on number dimensions\r\n num_dim = args.num_dim\r\n if num_dim < 2:\r\n log(\"[VS-LOG] Error: number dimensions must be >= 2.\")\r\n sys.exit()\r\n\r\n # check limits on percent energy\r\n use_energy = False\r\n energy = 0\r\n if args.energy != None:\r\n energy = args.energy\r\n if energy <= 0 or energy > 100:\r\n log(\"[VS-LOG] Error: percent energy must be > 0 and <= 100.\")\r\n sys.exit()\r\n else:\r\n use_energy = True\r\n energy = float(args.energy)\r\n\r\n return num_dim, use_energy, energy\r\n\r\n# subroutines for reading and finding files\r\n###########################################\r\n\r\n# read csv file and get movie/frame file names\r\ndef read_csv(args, log):\r\n\r\n # read csv file\r\n log(\"[VS-LOG] Reading \" + str(args.csv_file) + \" ...\")\r\n csv_file = open(args.csv_file)\r\n meta_data = list(csv.reader(csv_file))\r\n csv_file.close()\r\n\r\n num_movies = len(meta_data) - 1\r\n\r\n # get file names of movies\r\n movie_files = []\r\n if args.movie_col != 'None':\r\n movie_files = [movie_file[int(args.movie_col) - 1] for movie_file in meta_data]\r\n movie_files = movie_files[1:]\r\n\r\n # get file names of frames\r\n frame_files = [frame_file[int(args.frame_col) - 1] for frame_file in meta_data]\r\n frame_files = frame_files[1:]\r\n\r\n return num_movies, movie_files, frame_files, meta_data\r\n\r\n# read and order frame files, generate movies if requested\r\ndef order_frame_files(args, log, num_movies, movie_files, frame_files, pool):\r\n\r\n # identify all frame files and order them by frame number\r\n log(\"[VS-LOG] Locating and ordering frame files ...\")\r\n\r\n num_frames = 0\r\n all_frame_files = []\r\n\r\n list_args = list(itertools.repeat(args, args.group_size))\r\n list_log = list(itertools.repeat(log, args.group_size))\r\n list_frame_files = []\r\n list_indeces = []\r\n\r\n for i in range(0, num_movies):\r\n\r\n # create movie, if requested\r\n if (args.generate_movies == 'true' and args.replace_movies == 'None') or (args.generate_movies == 'true' and args.replace_movies == 'true'):\r\n list_frame_files.append(frame_files[i])\r\n list_indeces.append(i)\r\n \r\n if i%args.group_size == 0:\r\n pool_results = pool.map_sync(create_movie,\r\n list_args,\r\n list_log,\r\n list_frame_files,\r\n list_indeces)\r\n list_frame_files = []\r\n list_indeces = []\r\n for j in range(0, len(pool_results)):\r\n movie_files.append(pool_results[j])\r\n\r\n elif i == num_movies - 1:\r\n pool_results = pool.map_sync(create_movie,\r\n list_args[0:i%args.group_size],\r\n list_log[0:i%args.group_size],\r\n list_frame_files,\r\n list_indeces)\r\n for k in range(0, len(pool_results)):\r\n movie_files.append(pool_results[k])\r\n\r\n # keep track of movie files created\r\n\r\n elif (args.generate_movies == 'false' and args.movie_dir != 'None') or (args.generate_movies == 'true' and args.replace_movies == 'false'):\r\n movie_input, movie_output, file_location, frame_file_path = create_movie_name(args, log, frame_files, i)\r\n movie_name = file_location + movie_output\r\n movie_files.append(movie_name)\r\n\r\n # isolate first frame file\r\n frame_file_path, frame_file_name = \\\r\n os.path.split(urllib.parse.urlparse(frame_files[i]).path)\r\n\r\n if args.generate_movies == 'false' and args.movie_dir != 'None':\r\n if args.sim_id_template != None:\r\n frame_file_path_split = frame_file_path.split(args.sim_id_template)\r\n frame_file_path_split = frame_file_path_split[1].split('/')\r\n simulation_id = '.' + frame_file_path_split[0] \r\n\r\n # check for at least two dots in frame file name\r\n frame_split = frame_file_name.split('.')\r\n if len(frame_split) < 3:\r\n log(\"[VS-LOG] Error: incorrect frame file name format.\")\r\n sys.exit()\r\n\r\n # get root file name, frame #, and extension\r\n frame_ext = frame_split[-1]\r\n frame_num = frame_split[-2]\r\n frame_root = \".\".join(frame_split[0:-2])\r\n\r\n # get all files in frame path\r\n files_in_path = os.listdir(frame_file_path)\r\n\r\n # restrict to files with same root name\r\n frames_in_path = []\r\n frame_nums_in_path = []\r\n for j in range(0, len(files_in_path)):\r\n\r\n # get root file name\r\n file_split = files_in_path[j].split(\".\")\r\n\r\n # only consider files with at least two dots\r\n if len(file_split) < 3:\r\n continue\r\n\r\n # only consider files with same extension\r\n file_ext = file_split[-1]\r\n if file_ext != frame_ext:\r\n continue\r\n\r\n # get file root & frame num\r\n file_root = \".\".join(file_split[0:-2])\r\n file_num = file_split[-2]\r\n\r\n # compare to file root of frames of interest\r\n if frame_root == file_root:\r\n frames_in_path.append(files_in_path[j])\r\n frame_nums_in_path.append(int(file_num))\r\n\r\n # order frames in path by frame number\r\n all_frame_files.append([os.path.join(frame_file_path, frames_in_path[j])\r\n for j in numpy.argsort(frame_nums_in_path)])\r\n\r\n # check that all movies have same number of frames\r\n if i == 0:\r\n num_frames = len(all_frame_files[i])\r\n elif num_frames != len(all_frame_files[i]):\r\n # log(\"[VS-LOG] Error: inconsistent number of frames for video \" + str(movie_files[i]))\r\n log(\"[VS-LOG] Error: inconsistent number of frames for video\")\r\n sys.exit()\r\n\r\n # try to read image\r\n try:\r\n frame = imageio.imread(all_frame_files[0][0])\r\n except:\r\n log(\"[VS-LOG] Error: could not read frame \" + str(all_frame_files[0][0]))\r\n sys.exit()\r\n\r\n # may succeed and be empty\r\n if frame is None:\r\n log(\"[VS-LOG] Error: could not read frame \" + str(all_frame_files[0][0]))\r\n sys.exit()\r\n\r\n num_pixels = numpy.size(frame)\r\n\r\n # get duration of video based on number of frames and fps\r\n vid_duration = num_frames / float(args.fps)\r\n log(\"[VS-LOG] Estimated video duration is: \" + str(vid_duration) + \" seconds.\")\r\n\r\n return num_frames, num_pixels, all_frame_files, movie_files, vid_duration\r\n\r\ndef create_movie_name(args, log, frame_file, i):\r\n # isolate first frame file\r\n frame_file_path, frame_file_name = \\\r\n os.path.split(urllib.parse.urlparse(frame_file).path)\r\n\r\n # get simulation identifier for movie generation\r\n if args.sim_id_template != 'None':\r\n frame_file_path_split = frame_file_path.split(args.sim_id_template)\r\n frame_file_path_split = frame_file_path_split[1].split('/')\r\n simulation_id = '.' + frame_file_path_split[0]\r\n else:\r\n simulation_id = '.'\r\n\r\n split_path = frame_file.split(frame_file_path)\r\n\r\n # get the frame name, including number and file extension\r\n frame_name = split_path[1].split('/')[1]\r\n\r\n # get the identifier name only\r\n identifier = frame_name.split('.')[0]\r\n\r\n file_location = split_path[0]\r\n\r\n # generate movie name\r\n if args.movie_dir[-1] == '/':\r\n movie_output = args.movie_dir + identifier + simulation_id + '.%d.mp4' % (i+1)\r\n else:\r\n movie_output = args.movie_dir + '/' + identifier + simulation_id + '.%d.mp4' % (i+1)\r\n\r\n # frames to make into movie\r\n movie_input = frame_file_path + '/' + identifier + '*.jpg'\r\n\r\n return movie_input, movie_output, file_location, frame_file_path\r\n\r\n# create movie i\r\ndef create_movie(args, log, frame_file, i):\r\n import ffmpy\r\n \r\n def create_movie_name_parallel(args, log, frame_file, i):\r\n import os\r\n import urllib.parse\r\n\r\n file_extension = frame_file[len(frame_file) - 3] + frame_file[len(frame_file) - 2] + frame_file[len(frame_file) - 1]\r\n\r\n # isolate first frame file\r\n frame_file_path, frame_file_name = \\\r\n os.path.split(urllib.parse.urlparse(frame_file).path)\r\n\r\n # get simulation identifier for movie generation\r\n if args.sim_id_template != 'None':\r\n frame_file_path_split = frame_file_path.split(args.sim_id_template)\r\n frame_file_path_split = frame_file_path_split[1].split('/')\r\n simulation_id = '.' + frame_file_path_split[0]\r\n else:\r\n simulation_id = '.'\r\n\r\n split_path = frame_file.split(frame_file_path)\r\n\r\n # get the frame name, including number and file extension\r\n frame_name = split_path[1].split('/')[1]\r\n\r\n # get the identifier name only\r\n identifier = frame_name.split('.')[0]\r\n\r\n file_location = split_path[0]\r\n\r\n # generate movie name\r\n if args.movie_dir[-1] == '/':\r\n movie_output = args.movie_dir + identifier + simulation_id + '.%d.mp4' % (i+1)\r\n else:\r\n movie_output = args.movie_dir + '/' + identifier + simulation_id + '.%d.mp4' % (i+1)\r\n\r\n # frames to make into movie\r\n if file_extension == 'png':\r\n movie_input = frame_file_path + '/' + identifier + '*.png'\r\n elif file_extension =='jpg':\r\n movie_input = frame_file_path + '/' + identifier + '*.jpg'\r\n\r\n return movie_input, movie_output, file_location, frame_file_path\r\n\r\n movie_input, movie_output, file_location, frame_file_path = create_movie_name_parallel(args, log, frame_file, i)\r\n\r\n #create the movie\r\n ff = ffmpy.FFmpeg(\r\n inputs={None: ['-y', '-pattern_type', 'glob'], movie_input: None},\r\n outputs={None: ['-force_key_frames', '0.0,0.04,0.08', '-vcodec', \r\n 'libx264', '-pix_fmt', 'yuv420p', '-acodec', 'aac'],\r\n movie_output: None}\r\n )\r\n ff.run()\r\n\r\n return file_location + movie_output\r\n\r\n# subroutines for MDS computations\r\n##################################\r\n\r\n# compute coordinates for a particular frame across all movies\r\n# this routine is run in parallel using ipyparallel so it \r\n# must be self contained in terms of imports and subroutines\r\n# returns coords as a numpy array if everything works,\r\n# otherwise returns a string with an error message\r\ndef compute_coords(frame_number, input_num_movies, input_frame_files, \r\n input_energy, input_use_energy, input_num_dim, input_num_pixels):\r\n\r\n # error handling (standard library)\r\n import sys\r\n import traceback\r\n\r\n # reading frames (3rd party library)\r\n import imageio\r\n\r\n # computations (3rd party libraries)\r\n import numpy\r\n from sklearn.metrics.pairwise import euclidean_distances\r\n\r\n # classical multidimensional scaling subroutine\r\n def cmdscale(D):\r\n \"\"\" \r\n Classical multidimensional scaling (MDS) \r\n\r\n Parameters \r\n ---------- \r\n D : (n, n) array \r\n Symmetric distance matrix. \r\n\r\n Returns \r\n ------- \r\n Y : (n, p) array \r\n Configuration matrix. Each column represents a dimension. Only the \r\n p dimensions corresponding to positive eigenvalues of B are returned. \r\n Note that each dimension is only determined up to an overall sign, \r\n corresponding to a reflection. \r\n\r\n e : (n,) array \r\n Eigenvalues of B. \r\n \"\"\"\r\n\r\n # Number of points\r\n n = len(D)\r\n\r\n # Centering matrix\r\n H = numpy.eye(n) - numpy.ones((n, n)) / n\r\n\r\n # YY^T\r\n B = -H.dot(D ** 2).dot(H) / 2\r\n\r\n # Diagonalize\r\n evals, evecs = numpy.linalg.eigh(B)\r\n\r\n # Sort by eigenvalue in descending order\r\n idx = numpy.argsort(evals)[::-1]\r\n evals = evals[idx]\r\n evecs = evecs[:, idx]\r\n\r\n # Compute the coordinates using positive-eigenvalued components only\r\n w, = numpy.where(evals >= 0)\r\n L = numpy.diag(numpy.sqrt(evals[w]))\r\n V = evecs[:, w]\r\n Y = V.dot(L)\r\n\r\n # if only one coordinate then add two columns of zeros\r\n if len(w) == 1:\r\n Y = numpy.append(numpy.reshape(Y, (Y.shape[0], 1)),\r\n numpy.zeros((Y.shape[0], 2)), axis=1)\r\n\r\n # if only two coordinates then add one column of zeros\r\n if len(w) == 2:\r\n Y = numpy.append(Y, numpy.zeros((Y.shape[0], 1)), axis=1)\r\n\r\n return Y, evals\r\n\r\n # start of compute_coords main code\r\n input_frames = numpy.ones((input_num_movies, input_num_pixels))\r\n \r\n # using a try/catch to show errors on remote nodes/cores\r\n try:\r\n\r\n # read in one frame for all movies\r\n for j in range(0, input_num_movies):\r\n\r\n # get frame i\r\n try:\r\n frame_i = imageio.imread(input_frame_files[j])\r\n except Exception as e:\r\n return str(e), input_num_dim\r\n\r\n # check for empty image file\r\n if frame_i is None:\r\n return input_frame_files[j] + ' is empty.', input_num_dim\r\n\r\n # save frame pixels\r\n input_frames[j, :] = frame_i.astype(float).reshape(input_num_pixels)\r\n\r\n # now compute distance for frame i\r\n dist_mat = euclidean_distances(input_frames) / 255.0\r\n\r\n # now compute MDS for frame i\r\n mds_coords, evals = cmdscale(dist_mat)\r\n\r\n # re-compute num_dim if energy is being used\r\n # this should only be done on the first call\r\n if input_use_energy:\r\n\r\n # set num_dims according to percent\r\n energy_evals = numpy.cumsum(evals) / numpy.sum(evals)\r\n energy_dim = numpy.where(energy_evals >= input_energy/100)\r\n if len(energy_dim[0]) == 0:\r\n input_num_dim = len(energy_evals)\r\n\r\n else:\r\n input_num_dim = max(1,numpy.amin(energy_dim)) + 1\r\n\r\n # truncate mds coords\r\n if mds_coords.shape[1] >= input_num_dim:\r\n curr_coords = mds_coords[:,0:input_num_dim]\r\n\r\n else:\r\n curr_coords = numpy.concatenate((mds_coords, \\\r\n numpy.zeros((input_num_movies, \r\n input_num_dim - mds_coords.shape[1]))), axis=1)\r\n\r\n return curr_coords, input_num_dim\r\n\r\n except Exception as e:\r\n return traceback.format_exc(), input_num_dim\r\n\r\n# parallel coordinate calculation\r\ndef parallel_coords(log, pool, GROUP_SIZE, num_movies, num_frames, num_pixels, \r\n all_frame_files, num_dim, use_energy, energy):\r\n\r\n # accumulate frame coords for each frame\r\n all_curr_coords = []\r\n\r\n # estimate time for entire run\r\n start_time = time.time()\r\n\r\n # organize the frame files for ipyparallel \r\n # (should be visited in reverse order)\r\n list_frame_files = []\r\n for frame_number in reversed(range(num_frames)):\r\n accumulator = []\r\n for j in range(0, num_movies):\r\n accumulator.append(all_frame_files[j][frame_number])\r\n list_frame_files.append(accumulator)\r\n\r\n # compute last frame to adjust num_dim if necessary\r\n curr_coords, num_dim = compute_coords(0, num_movies, \r\n list_frame_files[0], energy, use_energy, num_dim, num_pixels)\r\n\r\n # save last frame, or quit on error\r\n if type(curr_coords) == numpy.ndarray:\r\n all_curr_coords.append(curr_coords)\r\n else:\r\n log(curr_coords)\r\n sys.exit()\r\n \r\n # num_dim is now correct, so don't need to re-compute\r\n use_energy = False\r\n\r\n # organize parameters for ipyparallel (these are all constants)\r\n list_num_movies = list(itertools.repeat(num_movies, GROUP_SIZE))\r\n list_energy = list(itertools.repeat(energy, GROUP_SIZE))\r\n list_use_energy = list(itertools.repeat(use_energy, GROUP_SIZE))\r\n list_num_dim = list(itertools.repeat(num_dim, GROUP_SIZE))\r\n list_num_pixels = list(itertools.repeat(num_pixels, GROUP_SIZE))\r\n\r\n log(\"[VS-LOG] Sending compute jobs to nodes, \"\r\n \"this may take a while depending on job size ...\")\r\n \r\n # call ipyparallel in batches\r\n steps = list(range(1, num_frames, GROUP_SIZE)) + [num_frames]\r\n for i in range(len(steps)-1):\r\n\r\n # get start and stop\r\n start = steps[i]\r\n stop = steps[i+1]\r\n \r\n # use remainder in case we are at the end\r\n remainder = stop - start\r\n\r\n # frame numbers for this batch\r\n frame_numbers = list(range(start,stop))\r\n\r\n # call ipyparallel\r\n pool_results = pool.map_sync(compute_coords,\r\n frame_numbers,\r\n list_num_movies[0:remainder],\r\n list_frame_files[start:stop],\r\n list_energy[0:remainder],\r\n list_use_energy[0:remainder],\r\n list_num_dim[0:remainder],\r\n list_num_pixels[0:remainder])\r\n\r\n # if no errors accumulate results\r\n for result in pool_results:\r\n if type(result[0]) == numpy.ndarray:\r\n all_curr_coords.append(result[0])\r\n \r\n # otherwise log error message and quit\r\n else:\r\n log(result[0])\r\n sys.exit()\r\n\r\n # print progress\r\n log(\"[VS-LOG] %s/%s frames computed.\" % (stop, num_frames))\r\n \r\n # estimated percentage complete\r\n progress = round(stop / num_frames * 100.0)\r\n\r\n # make sure it's a number between 0 and 100\r\n progress = numpy.clip(progress, 0, 100)\r\n\r\n # record into log for polling code\r\n log(\"[VS-PROGRESS] \" + str(progress))\r\n\r\n log(\"[VS-PROGRESS] \" + str(100))\r\n\r\n # keeping a linear calculation example here for debugging\r\n # alt_curr_coords = [compute_coords(frame_number, num_movies, list_frame_files[frame_number], \r\n # energy, use_energy, num_dim, num_pixels)[0] for frame_number in range(num_frames)]\r\n\r\n # check that parallel and serial computation agree\r\n # check_parallel = True\r\n # for frame_number in range(num_frames):\r\n # if not numpy.array_equal(all_curr_coords[frame_number], \r\n # alt_curr_coords[frame_number]):\r\n # check_parallel = False\r\n # print(check_parallel)\r\n\r\n time_elapsed = time.time() - start_time\r\n log(\"[VS-LOG] total compute time (s): %s\" % time_elapsed)\r\n\r\n return all_curr_coords\r\n\r\n# perform frame alignment (assume that all_curr_coords is in reverse order)\r\ndef align_coords(all_curr_coords, num_frames, num_movies):\r\n\r\n # storage for x and y coords\r\n xcoords = numpy.ones((num_frames, num_movies))\r\n ycoords = numpy.ones((num_frames, num_movies))\r\n\r\n for i in range(num_frames):\r\n\r\n # rotate to previous coordinates\r\n if i == 0:\r\n old_coords = all_curr_coords[i]\r\n\r\n else:\r\n # do Kabsch algorithm\r\n A = all_curr_coords[i].transpose().dot(old_coords)\r\n U, S, V = numpy.linalg.svd(A)\r\n rot_mat = (V.transpose()).dot(U.transpose())\r\n\r\n # rotate to get new coordinates\r\n all_curr_coords[i] = all_curr_coords[i].dot(rot_mat.transpose())\r\n\r\n # update old coords\r\n old_coords = all_curr_coords[i]\r\n\r\n # update x,y coords\r\n xcoords[num_frames-1-i, :] = all_curr_coords[i][:, 0]\r\n ycoords[num_frames-1-i, :] = all_curr_coords[i][:, 1]\r\n\r\n return xcoords, ycoords\r\n\r\n# scale coordinates for VideoSwarm interface\r\ndef scale_coords (xcoords, ycoords):\r\n\r\n # get range for x\r\n min_x = numpy.amin(xcoords)\r\n max_x = numpy.amax(xcoords)\r\n\r\n # get range for y\r\n min_y = numpy.amin(ycoords)\r\n max_y = numpy.amax(ycoords)\r\n\r\n # scale coordinates to be in [0,1]^2\r\n # if constant assign value of 1/2\r\n xcoords = (xcoords - min_x)\r\n if max_x > min_x:\r\n xcoords = xcoords / (max_x - min_x)\r\n else:\r\n xcoords = xcoords + 0.5\r\n\r\n ycoords = (ycoords - min_y)\r\n if max_y > min_y:\r\n ycoords = ycoords / (max_y - min_y)\r\n else:\r\n ycoords = ycoords + 0.5\r\n\r\n return xcoords, ycoords\r\n\r\n\r\n# output results to VideoSwarm files\r\n####################################\r\n\r\n# write out VideoSwarm files\r\ndef output_VS_files(args, log, meta_data, movie_files, vid_duration, \r\n num_frames, xcoords, ycoords):\r\n\r\n log(\"[VS-LOG] Writing movies.csv file ...\")\r\n # add a column to the end of the csv with created movie files\r\n if args.movie_col == 'None':\r\n log(\"[VS-LOG] Creating movie column.\")\r\n for i in range(0, (len(meta_data))):\r\n if i == 0:\r\n meta_data[i].append(\"Movie Files\")\r\n else:\r\n meta_data[i].append(movie_files[i-1])\r\n\r\n # write out movies.meta (the .csv file)\r\n meta_file = open(os.path.join(args.output_dir, 'movies.csv'), 'w')\r\n csv_meta_file = csv.writer(meta_file)\r\n csv_meta_file.writerows(meta_data)\r\n meta_file.close()\r\n\r\n # write out movies.xcoords file (use only float precision)\r\n log(\"[VS-LOG] Writing movies.xcoords ...\")\r\n xcoords_file = open(os.path.join(args.output_dir, 'movies.xcoords'), 'w')\r\n csv_xcoords_file = csv.writer(xcoords_file)\r\n for i in xcoords.tolist():\r\n csv_xcoords_file.writerow(['{:f}'.format(x) for x in i])\r\n xcoords_file.close()\r\n\r\n # write out movies.ycoords file\r\n log(\"[VS-LOG] Writing movies.ycoords ...\")\r\n ycoords_file = open(os.path.join(args.output_dir, 'movies.ycoords'), 'w')\r\n csv_ycoords_file = csv.writer(ycoords_file)\r\n for i in ycoords.tolist():\r\n csv_ycoords_file.writerow(['{:f}'.format(y) for y in i])\r\n ycoords_file.close()\r\n\r\n # add time to first row of xcoords to make trajectories\r\n num_movies = len(meta_data) - 1\r\n time_row = numpy.linspace(0, vid_duration, num=num_frames)\r\n traj = numpy.ones((num_movies + 1, num_frames))\r\n traj[0, :] = time_row\r\n traj[1:, :] = xcoords.transpose()\r\n\r\n # write out movies.trajectories\r\n log(\"[VS-LOG] Writing movies.trajectories ...\")\r\n traj_file = open(os.path.join(args.output_dir, 'movies.trajectories'), 'w')\r\n csv_traj_file = csv.writer(traj_file)\r\n for i in traj.tolist():\r\n csv_traj_file.writerow(['{:f}'.format(t) for t in i])\r\n traj_file.close()\r\n\r\n log(\"[VS-LOG] All files written successfully to: \" + str(args.output_dir))\r\n log(\"[VS-FINISHED] parse_frames.py complete.\")\r\n\r\n\r\n# organize all the steps for computing coordinates\r\ndef main():\r\n\r\n # set up environment\r\n ####################\r\n\r\n # parse command line\r\n args = parse_command_line()\r\n\r\n # start loggging\r\n log = init_logging(args)\r\n\r\n # check command line arguments\r\n check_command_line_args(args, log)\r\n\r\n # start parallel pool\r\n pool = init_parallel(log)\r\n\r\n # create output directories\r\n init_working_dirs(args, log)\r\n\r\n # initialize alignment parameters\r\n num_dim, use_energy, energy = init_parameters(args, log)\r\n\r\n # read and parse files\r\n ######################\r\n\r\n # read csv file\r\n num_movies, movie_files, frame_files, meta_data = read_csv(args, log)\r\n\r\n # order frame files and make movies, if requested\r\n num_frames, num_pixels, all_frame_files, movie_files, vid_duration = \\\r\n order_frame_files(args, log, num_movies, movie_files, frame_files, pool)\r\n\r\n # MDS calculations\r\n ##################\r\n\r\n # perform parallel coordinate calculation\r\n all_curr_coords = parallel_coords(log, pool, args.group_size, num_movies, \r\n num_frames, num_pixels, all_frame_files, \r\n num_dim, use_energy, energy)\r\n\r\n # algin coordinates\r\n xcoords, ycoords = align_coords(all_curr_coords, num_frames, num_movies)\r\n\r\n # scale coordinates\r\n xcoords, ycoords = scale_coords(xcoords, ycoords)\r\n\r\n # write out VS files\r\n ####################\r\n\r\n output_VS_files(args, log, meta_data, movie_files, \r\n vid_duration, num_frames, xcoords, ycoords)\r\n\r\n\r\n# command line entry point\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"agent/parse_frames.py","file_name":"parse_frames.py","file_ext":"py","file_size_in_byte":34539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"371893042","text":"'''\r\nCes commandes n'ont pas une grande utilités, mais bon\r\nLast update: 16/01/2019\r\n'''\r\n# DEPENDENCES\r\nimport discord\r\nimport asyncio\r\nimport time\r\n\r\nfrom discord.ext import commands\r\n\r\n # CUSTOM\r\nfrom conf.settings import EMBED_COLOR, BOT_NAME, BOT_AVATAR_URL, EMBED_FOOTER, EMBED_FOOTER_ICON, GITHUB_ICO, PING_ICO, SAY_ICO, EMBED_FOOTER, EMBED_FOOTER_ICON\r\nfrom conf.log_editor import Edit_Commands_Logs\r\nfrom conf.readability import Simple_Embed\r\n\r\n # LANG\r\nfrom conf.lang import*\r\n\r\nclass Fun:\r\n def __init__(self, client):\r\n self.client = client\r\n \r\n @commands.command(pass_context=True)\r\n async def ping(self, ctx):\r\n '''\r\n Retourne un message : 'pong' avec une indication sur le temps de réponse\r\n '''\r\n # Informations de la commandes\r\n serverName = ctx.message.guild.name\r\n user = ctx.message.author\r\n channel = ctx.message.channel \r\n Edit_Commands_Logs('ping', serverName, channel.name, user.name)\r\n '----------------------------------------------------------------------'\r\n\r\n # Récupération du temps\r\n time_now = int(round(time.time()*1000)) # On récupère le temps en millisecondes\r\n tempsReponse = 0 \r\n resultat = 0 # Stocke le temps de réponse en MS\r\n\r\n # Envoie des messages\r\n # Définition du premier Embed à envoyer\r\n pong_Embed = discord.Embed(color=EMBED_COLOR)\r\n pong_Embed.set_author(name=BOT_NAME, icon_url=BOT_AVATAR_URL)\r\n pong_Embed.set_thumbnail(url=user.avatar_url)\r\n pong_Embed.set_footer(text=EMBED_FOOTER, icon_url=EMBED_FOOTER_ICON)\r\n \r\n pong_Embed.add_field(name=latence, value=calcul_tps_rep, inline=False)\r\n\r\n pong = await ctx.send(embed=pong_Embed)\r\n\r\n # Edition du message pour afficher le temps de réponse\r\n # Et calcul\r\n # Définition de l'embed de remplacement\r\n tempsReponse = int(round(time.time()*1000))\r\n resultat = tempsReponse - time_now\r\n\r\n pong_EmbedSecond = discord.Embed(color=EMBED_COLOR)\r\n pong_EmbedSecond.set_author(name=BOT_NAME, icon_url=BOT_AVATAR_URL)\r\n pong_EmbedSecond.set_thumbnail(url=user.avatar_url)\r\n pong_EmbedSecond.set_footer(text=EMBED_FOOTER, icon_url=EMBED_FOOTER_ICON)\r\n\r\n pong_EmbedSecond.add_field(name=latence, value=calculated_tps_rep.format(resultat), inline=False)\r\n\r\n # Actualisation du message\r\n await pong.edit(embed=pong_EmbedSecond)\r\n \r\n @commands.command(pass_context=True)\r\n async def github(self, ctx):\r\n '''\r\n Envoie le lien vers le Repo GitHub\r\n '''\r\n # Informations de la commandes\r\n serverName = ctx.message.guild.name\r\n user = ctx.message.author\r\n channel = ctx.message.channel \r\n Edit_Commands_Logs('github', serverName, channel.name, user.name)\r\n '----------------------------------------------------------------------'\r\n\r\n github_embed = discord.Embed(title='Github', color=EMBED_COLOR)\r\n github_embed.set_author(name=BOT_NAME, icon_url=BOT_AVATAR_URL)\r\n github_embed.set_thumbnail(url=GITHUB_ICO)\r\n github_embed.set_footer(text=EMBED_FOOTER, icon_url=EMBED_FOOTER_ICON)\r\n\r\n # Pour insérer un lien [TEXT](lien)\r\n github_embed.add_field(name='Repository de {}'.format(BOT_NAME), value='[Cliquez ici](https://github.com/DrLarck/Guibot) pour accéder au repository ``GitHub`` de {}'.format(BOT_NAME))\r\n \r\n await ctx.send(embed=github_embed)\r\n \r\n @commands.command(pass_context=True)\r\n async def say(self, ctx, *, say):\r\n '''\r\n Répète le message de l'utilisateur\r\n '''\r\n message = ctx.message\r\n embed = Simple_Embed('', SAY_ICO, 'Message:', say)\r\n embed.set_footer(text=EMBED_FOOTER, icon_url=EMBED_FOOTER_ICON)\r\n\r\n await ctx.send(embed=embed)\r\n await message.delete()\r\n \r\n '''\r\n ERRORS\r\n '''\r\n\r\n @say.error\r\n async def say_error(self, ctx, error):\r\n if isinstance(error, commands.MissingRequiredArgument):\r\n await ctx.send(error_say.format(ctx.message.author.id))\r\n \r\n\r\ndef setup(client):\r\n client.add_cog(Fun(client))\r\n","sub_path":"cogs/fun.py","file_name":"fun.py","file_ext":"py","file_size_in_byte":4226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"407643173","text":"import sys\r\nimport math\r\nfrom java.lang import System\r\nfrom net.sf.l2j.gameserver.ai import CtrlIntention\r\nfrom net.sf.l2j.gameserver.instancemanager import GrandBossManager\r\nfrom net.sf.l2j.gameserver.lib import Rnd\r\nfrom net.sf.l2j.gameserver.model import L2CharPosition\r\nfrom net.sf.l2j.gameserver.model.quest.jython import QuestJython as JQuest\r\nfrom net.sf.l2j.gameserver.serverpackets import Earthquake\r\nfrom net.sf.l2j.gameserver.serverpackets import PlaySound\r\nfrom net.sf.l2j.gameserver.serverpackets import SocialAction\r\nfrom net.sf.l2j.gameserver.serverpackets import SpecialCamera\r\n\r\n# Boss: Antharas\r\n\r\nANTHARAS = 12211\r\n\r\n#Antharas Status Tracking :\r\nDORMANT = 0 #Antharas is spawned and no one has entered yet. Entry is unlocked\r\nWAITING = 1 #Antharas is spawend and someone has entered, triggering a 30 minute window for additional people to enter\r\n #before he unleashes his attack. Entry is unlocked\r\nFIGHTING = 2 #Antharas is engaged in battle, annihilating his foes. Entry is locked\r\nDEAD = 3 #Antharas has been killed. Entry is locked\r\n\r\nclass Antharas(JQuest) :\r\n def __init__(self,id,name,descr):\r\n JQuest.__init__(self,id,name,descr)\r\n self.lastAction = 0\r\n self.zone = GrandBossManager.getInstance().getZone(179700,113800,-7709)\r\n info = GrandBossManager.getInstance().getStatsSet(ANTHARAS)\r\n status = GrandBossManager.getInstance().getBossStatus(ANTHARAS)\r\n if status == DEAD :\r\n # load the unlock date and time for antharas from DB\r\n temp = long(info.getLong(\"respawn_time\")) - System.currentTimeMillis()\r\n # if antharas is locked until a certain time, mark it so and start the unlock timer\r\n # the unlock time has not yet expired. Mark Antharas as currently locked. Setup a timer\r\n # to fire at the correct time (calculate the time between now and the unlock time,\r\n # setup a timer to fire after that many msec)\r\n if temp > 0 :\r\n self.startQuestTimer(\"antharas_unlock\", temp, None, None)\r\n else :\r\n # the time has already expired while the server was offline. Immediately spawn antharas in his cave.\r\n # also, the status needs to be changed to DORMANT\r\n antharas = self.addSpawn(ANTHARAS,185708,114298,-8221,32768,False,0)\r\n GrandBossManager.getInstance().setBossStatus(ANTHARAS,DORMANT)\r\n antharas.broadcastPacket(Earthquake(185708,114298,-8221,20,10))\r\n GrandBossManager.getInstance().addBoss(antharas)\r\n else :\r\n loc_x = info.getInteger(\"loc_x\")\r\n loc_y = info.getInteger(\"loc_y\")\r\n loc_z = info.getInteger(\"loc_z\")\r\n heading = info.getInteger(\"heading\")\r\n hp = info.getInteger(\"currentHP\")\r\n mp = info.getInteger(\"currentMP\")\r\n antharas = self.addSpawn(ANTHARAS,loc_x,loc_y,loc_z,heading,False,0)\r\n GrandBossManager.getInstance().addBoss(antharas)\r\n antharas.setCurrentHpMp(hp,mp)\r\n if status == WAITING :\r\n # Start timer to lock entry after 30 minutes\r\n self.startQuestTimer(\"waiting\",1800000, antharas, None)\r\n elif status == FIGHTING :\r\n self.lastAction = System.currentTimeMillis()\r\n # Start repeating timer to check for inactivity\r\n self.startQuestTimer(\"antharas_despawn\",60000, antharas, None, True)\r\n\r\n def onAdvEvent (self,event,npc,player):\r\n if npc :\r\n if event == \"waiting\" :\r\n npc.teleToLocation(185452,114835,-8221,0)\r\n npc.getAI().setIntention(CtrlIntention.AI_INTENTION_MOVE_TO, L2CharPosition(181911,114835,-7678,0))\r\n self.startQuestTimer(\"antharas_has_arrived\",2000, npc, None, True)\r\n npc.broadcastPacket(PlaySound(1, \"BS02_A\", 1, npc.getObjectId(), 185452, 114835, -8221))\r\n GrandBossManager.getInstance().setBossStatus(ANTHARAS,FIGHTING)\r\n elif event == \"camera_1\" :\r\n self.startQuestTimer(\"camera_2\",3000, npc, None)\r\n npc.broadcastPacket(SpecialCamera(npc.getObjectId(),700,13,-19,0,20000,0,0,1,0))\r\n elif event == \"camera_2\" :\r\n self.startQuestTimer(\"camera_3\",10000, npc, None)\r\n npc.broadcastPacket(SpecialCamera(npc.getObjectId(),700,13,0,6000,20000,0,0,1,0))\r\n elif event == \"camera_3\" :\r\n self.startQuestTimer(\"camera_4\",200, npc, None)\r\n npc.broadcastPacket(SpecialCamera(npc.getObjectId(),3700,0,-3,0,10000,0,0,1,0))\r\n elif event == \"camera_4\" :\r\n self.startQuestTimer(\"camera_5\",10800, npc, None)\r\n npc.broadcastPacket(SpecialCamera(npc.getObjectId(),1100,0,-3,22000,30000,0,0,1,0))\r\n elif event == \"camera_5\" :\r\n self.startQuestTimer(\"antharas_despawn\",60000, npc, None, True)\r\n npc.broadcastPacket(SpecialCamera(npc.getObjectId(),1100,0,-3,300,7000,0,0,1,0))\r\n self.lastAction = System.currentTimeMillis()\r\n elif event == \"antharas_despawn\" :\r\n temp = System.currentTimeMillis() - int(self.lastAction) \r\n if temp > 900000 :\r\n npc.teleToLocation(185708,114298,-8221,0)\r\n GrandBossManager.getInstance().setBossStatus(ANTHARAS,DORMANT)\r\n npc.setCurrentHpMp(npc.getMaxHp(),npc.getMaxMp())\r\n self.zone.oustAllPlayers()\r\n self.cancelQuestTimer(\"antharas_despawn\", npc, None)\r\n elif event == \"antharas_has_arrived\" :\r\n dx = abs(npc.getX() - 181911)\r\n dy = abs(npc.getY() - 114835)\r\n if dx <= 50 and dy <= 50 :\r\n self.startQuestTimer(\"camera_1\",2000, npc, None)\r\n npc.getSpawn().setLocx(181911)\r\n npc.getSpawn().setLocy(114835)\r\n npc.getSpawn().setLocz(-7678)\r\n npc.getAI().setIntention(CtrlIntention.AI_INTENTION_IDLE)\r\n self.cancelQuestTimer(\"antharas_has_arrived\", npc, None)\r\n else :\r\n npc.getAI().setIntention(CtrlIntention.AI_INTENTION_MOVE_TO, L2CharPosition(181911,114835,-7678,0))\r\n elif event == \"spawn_cubes\" :\r\n cube = self.addSpawn(12324,177615,114941,-7709,0,False,900000)\r\n radius = 1500\r\n for i in range(0,19,1) :\r\n x = radius*math.cos(i*.331) #.331~2pi/19\r\n y = radius*math.sin(i*.331)\r\n self.addSpawn(12324,177615+int(x),114941+int(y),-7709,0,False,900000)\r\n self.cancelQuestTimer(\"antharas_despawn\", npc, None)\r\n self.startQuestTimer(\"remove_players\",900000, None, None)\r\n else :\r\n if event == \"antharas_unlock\" :\r\n antharas = self.addSpawn(ANTHARAS,185708,114298,-8221,32768,False,0)\r\n GrandBossManager.getInstance().addBoss(antharas)\r\n GrandBossManager.getInstance().setBossStatus(ANTHARAS,DORMANT)\r\n antharas.broadcastPacket(Earthquake(185708,114298,-8221,20,10)) \r\n elif event == \"remove_players\" :\r\n self.zone.oustAllPlayers()\r\n return\r\n\r\n def onAttack (self,npc,player,damage,isPet):\r\n self.lastAction = System.currentTimeMillis()\r\n if GrandBossManager.getInstance().getBossStatus(ANTHARAS) != FIGHTING :\r\n player.teleToLocation(82480,149087,-3350,1)\r\n return\r\n\r\n def onKill(self,npc,player,isPet):\r\n npc.broadcastPacket(SpecialCamera(npc.getObjectId(),1200,20,-10,0,13000,0,0,1,0))\r\n npc.broadcastPacket(PlaySound(1, \"BS01_D\", 1, npc.getObjectId(), npc.getX(), npc.getY(), npc.getZ()))\r\n self.startQuestTimer(\"spawn_cubes\", 10000, npc, None)\r\n GrandBossManager.getInstance().setBossStatus(ANTHARAS,DEAD)\r\n respawnTime = long((192 + Rnd.get(145)) * 3600000)\r\n self.startQuestTimer(\"antharas_unlock\", respawnTime, None, None)\r\n # also save the respawn time so that the info is maintained past reboots\r\n info = GrandBossManager.getInstance().getStatsSet(ANTHARAS)\r\n info.set(\"respawn_time\",(long(System.currentTimeMillis()) + respawnTime))\r\n GrandBossManager.getInstance().setStatsSet(ANTHARAS,info)\r\n return\r\n\r\n# now call the constructor (starts up the ai)\r\nQUEST = Antharas(-1,\"antharas\",\"ai\")\r\n\r\nQUEST.addKillId(ANTHARAS)\r\nQUEST.addAttackId(ANTHARAS)","sub_path":"trunk/Lisvus_DataPack/data/jscript/ai/individual/antharas.py","file_name":"antharas.py","file_ext":"py","file_size_in_byte":8763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"339298990","text":"from botocore.client import BaseClient\nimport boto3\nimport os\nimport json\nfrom botocore.exceptions import ClientError\n\naccess_key = 'AWS_ACCESS_KEY_ID'\naccess_value = os.getenv(access_key)\nsecret_key = 'AWS_SECRET_ACCESS_KEY'\nsecret_value = os.getenv(secret_key)\n\n\n# uses credentials from environment\ndef s3_con() -> BaseClient:\n s3 = boto3.client(\n 's3',\n aws_access_key_id=access_value,\n aws_secret_access_key=secret_value,\n\n)\n return s3\n\ns3=s3_connect()\nresponse = s3.list_buckets()\nfor bucket in response['Buckets']:\n print(f' {bucket[\"Name\"]}')\n\nbucket = 'completed-bucket'\nkey = '1234.json'\n\n# Fetch an object or print error if it doesnt exist.\ntry :\n response = s3.get_object(Bucket=bucket, Key=key)\n content = response['Body']\n jsonObject = json.loads(content.read())\n print(jsonObject)\nexcept ClientError as ex:\n if ex.response['Error']['Code'] == 'NoSuchKey':\n print('No object found - returning empty')\n else:\n raise\n","sub_path":"poc/AWS_S3/s3/s3_connect.py","file_name":"s3_connect.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"566449243","text":"import random\n\n#Задаются константы в соответствии с условием\nmax_score = 10\npoints_win = 3\npoints_draw = 1\npoints_lose = 0\n\n#Структура для команды\nclass Team:\n def __init__(self, name):\n self.name = name #Название\n self.score = 0 #Личный счёт\n self.wins = 0 #Количество побед\n self.loses = 0 #Количество поражений\n self.draws = 0 #Количество ничьих\n self.goals_conceded = 0 #Количество пропущенных голов\n self.goals_scored = 0 #Количество забитых голов\n\n#Структура для отдельно взятой игры\nclass Game:\n def __init__(self):\n self.Team1_Name = ''\n self.Team1_Goals = 0\n self.Team2_Name = ''\n self.Team2_Goals = 0\n self.result = 'Игра еще не сыграна'\n\n def play(self, Team1, Team2):\n # Генерируется случайный счет для команд\n res_Team1 = random.randint(0, max_score)\n res_Team2 = random.randint(0, max_score)\n\n #Читерский код для команды Спартак (за победу спартака был обещан +1 балл к командному зачёту! ;))\n #if Team1.name.lower() == 'спартак': res_Team1 = max_score\n #if Team2.name.lower() == 'спарт��к': res_Team2 = max_score\n\n print('Играют %s и %s. Счет: [%d:%d]' % (Team1.name, Team2.name, res_Team1, res_Team2))\n if res_Team1 > res_Team2: #Победа первой команды\n Team1.score += points_win\n Team1.wins += 1\n Team2.score += points_lose\n Team2.loses += 1\n elif res_Team1 < res_Team2: #Победа второй команды\n Team1.score += points_lose\n Team1.loses += 1\n Team2.score += points_win\n Team2.wins += 1\n elif res_Team1 == res_Team2: #Ничья\n Team1.score += points_draw\n Team1.draws += 1\n Team2.score += points_draw\n Team2.draws += 1\n\n #Запись забитых и пропущенных голов соответственно\n Team1.goals_scored += res_Team1\n Team1.goals_conceded += res_Team2\n Team2.goals_scored += res_Team2\n Team2.goals_conceded += res_Team1\n\n #Запоминание игравших команд\n self.Team1_Name = Team1.name\n self.Team1_Goals = res_Team1\n self.Team2_Name = Team2.name\n self.Team2_Goals = res_Team2\n\n #Представление результатов в удобочитаемом виде\n self.result = '%s [%d:%d] %s' % (Team1.name, res_Team1, res_Team2, Team2.name)\n\n#Вывод конечной таблицы чемпионата\ndef print_summary(TeamList):\n print('\\n #', 'Название ', 'Побед', 'Поражений', 'Ничьих', 'Забито', 'Пропущено', 'Очков')\n number = 1\n for e in TeamList:\n print('{0:2} {1:12} {2:5} {3:9} {4:6} {5:6} {6:9} {7:5}'.format(number, e.name, e.wins, e.loses, e.draws, e.goals_scored, e.goals_conceded, e.score))\n number += 1\n\n#Вывод истории сыгранных игр\ndef print_gamelog(GameList):\n print('\\nСписок игр чемпионата')\n number = 1\n for e in GameList:\n print('Игра %d. %s' % (number, e.result))\n number += 1\n\n#Поиск матча между командами\ndef findMatch(GameList, Team1, Team2):\n for e in GameList:\n if e.Team1_Name.lower() == Team1.lower():\n if e.Team2_Name.lower() == Team2.lower():\n return e.result\n elif e.Team1_Name.lower() == Team2.lower():\n if e.Team2_Name.lower() == Team1.lower():\n return e.result\n\n#Точка входа\nif __name__ == '__main__':\n Teams = [] #Создание списка команд\n number = 1\n while True: #Ввод команд, пока не нажат Enter при пустой строке\n response = input('Введите название команды %d: ' % number)\n if response == '':\n break\n Teams.append(Team(response))\n number += 1\n\n #Проверка на количество команд\n if len(Teams) > 1:\n Games = [] #Создание списка игр\n #Команды играют друг с другом так, чтобы не играть дважды и не играть с самими собой\n for i in range(0, len(Teams) - 1):\n for j in range(i + 1, len(Teams)):\n game = Game()\n game.play(Teams[i], Teams[j])\n Games.append(game)\n #Сортировка по убыванию для удобного представления данных\n Teams.sort(key=lambda x: x.score, reverse=True)\n #Вывод конечной таблицы очков\n print_summary(Teams)\n #Вывод списка сыгранных игр\n print_gamelog(Games)\n print('Чемпионат успешно завершен!')\n print('Победитель: команда \\\"%s\\\" с результатом %d очков!' % (Teams[0].name, Teams[0].score))\n\n #Поиск матча из сыгранных\n #Ввод в формате: 'Команда1 и Команда2'\n while True:\n response = input('\\nНайти матч между командами... ')\n if response == '':\n break\n q = response.split(' и ')\n if len(q) == 2:\n res = findMatch(Games, q[0], q[1])\n if res == None:\n print('Матчей между командами \\\"%s\\\" и \\\"%s\\\" не найдено.' % (q[0], q[1]))\n else:\n print(res)\n else:\n print('Неверный ввод. Команд в запросе должно быть ровно две. Формат: \\'Команда1 и Команда 2\\'.')\n else:\n print('Слишком мало команд для проведения чемпионата!')","sub_path":"championship.py","file_name":"championship.py","file_ext":"py","file_size_in_byte":6393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"368573714","text":"# -*- coding: utf-8 -*-\n\nimport scrapy\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\n\nfrom universities.items import University\n\n\nclass ChemicalEngineeringSpider(scrapy.Spider):\n \"\"\"\n Scrape all faculty members profiles from\n http://www.bse.vt.edu website\n\n \"\"\"\n name = \"chemeng\"\n allowed_domains = [\"che.vt.edu\"]\n start_urls = (\n 'http://www.che.vt.edu/people_faculty.php',\n )\n\n def parse(self, response):\n \"\"\"\n Getting links from department of Chemical Engineering\n\n \"\"\"\n sel = Selector(response)\n\n links = sel.xpath('//ul[@class=\"facultylist\"]/li/h2/a/@href').extract()\n for link in links:\n p_link = 'http://www.che.vt.edu%s' % link\n request = Request(p_link, callback=self.max_parse)\n yield request\n\n def max_parse(self, response):\n \"\"\"\n Parse profile page from Virginia Tech\n\n \"\"\"\n\n item = University()\n sel = Selector(response)\n\n name = sel.xpath('//li[@class=\"facultymember indentmore\"]/h2/text()').extract()\n if name:\n item['name'] = name\n\n title = sel.xpath('//span[@class=\"title\"]/text()').extract()\n if title:\n item['title'] = ' '.join([x.strip() for x in title[0].split('\\r\\n') if x.strip()])\n\n item['institution'] = 'Virginia Tech'\n item['department'] = 'Chemical Engineering'\n item['division'] = 'College of Engineering'\n\n email = sel.xpath('//li[contains(text(), \"Email:\")]/following-sibling::a/text()').extract()\n if email:\n item['email'] = email[0].strip()\n\n phone = sel.xpath('//h4[contains(text(), \"Contact:\")]/following-sibling::ul/li/text()').extract()\n if phone:\n item['phone'] = phone[0].strip()\n url = sel.xpath('//li[@class=\"facultymember indentmore\"]/h2/@href').extract()\n if url:\n item['url'] = url\n return item\n","sub_path":"universities/spiders/virginia_tech/chemical_engineering.py","file_name":"chemical_engineering.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"618354132","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\n# importing useful libraries -- feel free to add any others you find necessary\nimport socket\nimport hashlib\nimport re\n\nhost = \"142.93.117.193\" # IP address or URL\nport = 7331 # port\n\n# use these to connect to the service\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((host, port))\n\n# receive some data\ndata = s.recv(1024)\ndata = data.decode('utf-8')\nwhile(\"CMSC389R\" not in data):\n regex = 'Find me the (\\w+) hash of (\\w+)'\n matches = re.finditer(regex,data, re.MULTILINE)\n hash_type = None\n to_hash = None\n for matchnum, match in enumerate(matches):\n hash_type = match.group(1)\n to_hash = match.group(2)\n print(data)\n print(\"Hash Type: \" + hash_type)\n print(\"To Hash: \" + to_hash)\n if hash_type == None or to_hash == None:\n print(\"FAILED TO FIND MATCH\")\n else:\n algo = hashlib.new(hash_type)\n algo.update(to_hash.encode('utf-8'))\n hashed = algo.hexdigest() + \"\\n\"\n print(hashed)\n s.send(hashed.encode('utf-8'))\n data = s.recv(1024)\n data = data.decode('utf-8')\nprint(data)\n# close the connection\ns.close()\n","sub_path":"week/9/writeup/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"369961432","text":"from __future__ import unicode_literals\nfrom datetime import datetime\nfrom django.db import models\nfrom django.utils import timezone\nfrom start.models import BankAccount\n\n# Create your models here.\n\n\nclass Transaction(models.Model):\n \"\"\" model for transactions\"\"\"\n bank_account = models.ForeignKey(BankAccount, on_delete=models.CASCADE)\n status = models.PositiveSmallIntegerField()\n amount = models.DecimalField(decimal_places=4, max_digits=100)\n #voucher = models.ImageField(upload_to='vouchers/%Y/%m/%d', blank=True, null=True)\n pen_to_dollars = models.BooleanField()\n change_type = models.DecimalField(max_digits=6, decimal_places=4)\n date_time = models.DateTimeField(default=timezone.now)\n date_edited = models.DateTimeField(default=timezone.now)\n valid = models.BooleanField(default=True)\n\n def __str__(self):\n money = 'soles'\n moneyy = 'dolares'\n statuss = \"\"\n if self.status == 1:\n statuss = \"Pendiente\"\n elif self.status == 2:\n statuss = \"Verificando\"\n elif self.status == 3:\n statuss = \"En proceso\"\n elif self.status == 4:\n statuss = \"Realizado\"\n if self.pen_to_dollars:\n target = str(round((self.amount/self.change_type),3))\n else:\n target = str(round((self.amount*self.change_type),3))\n money = 'dolares'\n moneyy = 'soles'\n return str(self.amount)+' '+money+' ----> '+target+' '+moneyy+' | '+\\\n datetime.strftime(self.date_time,'%d/%m/%Y') +' ('+ statuss+')'\n\n def __init__(self, *args, **kwargs):\n super(Transaction, self).__init__(*args, **kwargs)\n self.old_status = self.status\n\n def save(self, *args, **kwargs):\n if self.old_status != self.status:\n self.date_edited = timezone.now()\n super(Transaction, self).save(*args, **kwargs)\n\n\nclass ImageVoucher(models.Model):\n image = models.ImageField(upload_to='vouchers/%Y/%m/%d', blank=True, null=True)\n pk_transaction = models.ForeignKey(Transaction, null=True)\n","sub_path":"transactions/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"551700927","text":"from PyQt5.QtGui import QPainter, QPen, QColor, QBrush\nfrom PyQt5.QtCore import QPointF\nfrom PyQt5.QtWidgets import QWidget\n\nfrom obstacle import TABLE_WIDTH, TABLE_HEIGHT\n\n\nclass Point:\n def __init__(self, ident, x, y):\n self.id = ident\n self.x = x\n self.y = y\n\n def get_qcolor(self):\n d = (0xff, 0xdd, 0xbb, 0x99, 0x77, 0x55)[(self.id // 6) % 6]\n r, v, b = ((d, 0, 0), (0, d, 0), (0, 0, d), (d, d, 0), (d, 0, d), (0, d, d))[self.id % 6]\n return QColor(r, v, b)\n\n def paint(self, painter, x_offset, y_offset, width, height):\n width_factor = width / TABLE_WIDTH\n height_factor = height / TABLE_HEIGHT\n painter.setBrush(QBrush(self.get_qcolor()))\n painter.setPen(QPen(self.get_qcolor()))\n painter.drawLine(QPointF(width - self.x * width_factor + x_offset - 10, self.y * height_factor + y_offset),\n QPointF(width - self.x * width_factor + x_offset + 10, self.y * height_factor + y_offset))\n painter.drawLine(QPointF(width - self.x * width_factor + x_offset, self.y * height_factor + y_offset - 10),\n QPointF(width - self.x * width_factor + x_offset, self.y * height_factor + y_offset + 10))\n\n","sub_path":"point.py","file_name":"point.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"593224546","text":"# action for nano db\n\nimport os \nimport sys\nimport re \nimport time \nimport subprocess \nfrom functools import partial \nfrom glob import glob\nimport scipy \nimport numpy as np \nimport prody \nimport tempfile\nimport xml.dom.minidom\nfrom chembl_webresource_client.new_client import new_client\nactivities = new_client.activity\n\nBLASTDB = '/core/database/dataset_libs/BS1/blastdb/chembl_23_blast.fa'\n\n\n\ndef blast(pdb_path):\n\n cdir = os.getcwd()\n tdir = tempfile.mkdtemp()\n os.chdir(tdir)\n\n receptor = os.path.basename(os.path.splitext(pdb_path)[0])\n pdbHead = prody.parsePDBHeader(pdb_path)\n pdbFile = prody.parsePDB(pdb_path)\n\n ligands = []\n for chem in pdbHead['chemicals']:\n ligands.append([chem.chain, str(chem.resnum), chem.resname, chem.name])\n \n blast_result = []\n for chain, resnum, resname, name in ligands:\n \n rec = pdbFile.select('not (chain {} resnum {})'.format(chain, resnum))\n ligand = pdbFile.select('chain {} resnum {}'.format(chain, resnum))\n\n cen_ligand = prody.calcCenter(ligand)\n\n res_coll = []\n ligCoords = ligand.getCoords()\n print('lig_size', len(ligCoords))\n\n sequence = ''\n i = 4\n while len(sequence)< 100:\n\n for center in ligCoords:\n around_atoms = rec.select('same residue as within {} of center'.format(i), center=center)\n if around_atoms is None:\n continue\n res_coll.append(around_atoms)\n #res_indices = around_atoms.getResindices()\n #print(around_atoms.getHierView()['A'].getSequence())\n #print (res_indices)\n #res_coll = res_coll | set(res_indices)\n resindices = reduce(lambda x,y: x|y, res_coll)\n sequence = resindices.getHierView()['A'].getSequence()\n print('sequence', i,len(sequence), sequence)\n i +=1\n \n\n \n\n with open('sequence.fasta','w') as fout:\n fout.write(\">receptor\\n\" + sequence + '\\n')\n\n cmd = 'blastp -db {} -query sequence.fasta -outfmt 5 -out result'.format(BLASTDB)\n #print(os.getcwd())\n \n cl = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)\n cl.wait()\n\n #print(os.listdir(os.getcwd()))\n\n dtree = xml.dom.minidom.parse(\"result\")\n collection = dtree.documentElement\n hits = collection.getElementsByTagName(\"Hit\")\n\n hit_result = []\n \n for hit in hits:\n hit_id = hit.getElementsByTagName('Hit_id')[0].childNodes[0].data\n hsps = hit.getElementsByTagName('Hit_hsps')[0]\n identity = hsps.getElementsByTagName('Hsp_identity')[0].childNodes[0].data\n align_len = hsps.getElementsByTagName('Hsp_align-len')[0].childNodes[0].data\n qseq = hsps.getElementsByTagName('Hsp_qseq')[0].childNodes[0].data\n hseq = hsps.getElementsByTagName('Hsp_hseq')[0].childNodes[0].data\n midline = hsps.getElementsByTagName('Hsp_midline')[0].childNodes[0].data\n\n blast_result.append([receptor, hit_id, str(identity), str(align_len), str(len(sequence)),midline, hseq, sequence])\n\n return blast_result\n\nif __name__ == '__main__':\n blast('3eml')\n \n","sub_path":"affinityDB/OLD_dataset_libs/BS1/nano_action.py","file_name":"nano_action.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"53496126","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom pygame import *\r\nfrom blocks import (Platform, Spike, Ring,\r\n BackFontRing, FrontFontRing, Invisible,\r\n HRing, HBackFontRing, HFrontFontRing,\r\n HInvisible, SavePoint, BonusLife, Exit)\r\n\r\nWIDTH = 32\r\nHEIGHT = 32\r\nGRAVITY = 0.23\r\n\r\n\r\nclass Player(sprite.Sprite):\r\n\r\n def __init__(self, x_coordinate, y_coordinate):\r\n\r\n sprite.Sprite.__init__(self)\r\n self.xvel = 0\r\n self.startX = x_coordinate\r\n self.startY = y_coordinate\r\n self.image = image.load(\"images/ball.png\")\r\n self.rect = Rect(x_coordinate, y_coordinate, WIDTH, HEIGHT)\r\n self.yvel = 0\r\n self.onGround = False\r\n self.Score = \"0000000\"\r\n self.ring_count = 0\r\n self.lifes = 3\r\n self.life_image = image.load(\"images/life_image.png\")\r\n self.ring_image = image.load(\"images/ring_image.png\")\r\n self.died_image = image.load(\"images/poped.png\")\r\n self.MOVE_SPEED = 4\r\n self.JUMP_POWER = 7.5\r\n\r\n def update(self, left, right, up, platforms):\r\n\r\n if up:\r\n if self.onGround:\r\n self.yvel = -self.JUMP_POWER\r\n\r\n if left:\r\n self.xvel = -self.MOVE_SPEED\r\n\r\n if right:\r\n self.xvel = self.MOVE_SPEED\r\n\r\n if not (left or right):\r\n self.xvel = 0\r\n\r\n if not self.onGround:\r\n self.yvel += GRAVITY\r\n\r\n self.onGround = False\r\n self.rect.y += self.yvel\r\n self.collide(0, self.yvel, platforms)\r\n\r\n self.rect.x += self.xvel\r\n self.collide(self.xvel, 0, platforms)\r\n\r\n def collide(self, xvel, yvel, platforms):\r\n\r\n for platform in platforms:\r\n if sprite.collide_rect(self, platform):\r\n\r\n if isinstance(platform, Spike):\r\n self.die()\r\n return\r\n\r\n if (isinstance(platform, SavePoint) and\r\n not platform.activeted):\r\n platform.get_saved(self)\r\n new_score = str(int(self.Score) + 500)\r\n self.Score = \"0\" * (7 - len(new_score)) + new_score\r\n\r\n if (isinstance(platform, BonusLife) and\r\n not platform.activeted):\r\n platform.deactivate()\r\n new_score = str(int(self.Score) + 1000)\r\n self.Score = \"0\" * (7 - len(new_score)) + new_score\r\n self.lifes += 1\r\n\r\n if isinstance(platform, Ring) or isinstance(platform, HRing):\r\n if platform.active:\r\n new_score = str(int(self.Score) + 500)\r\n self.Score = \"0\" * (7 - len(new_score)) + new_score\r\n platform.deactivate()\r\n self.ring_count -= 1\r\n if self.ring_count == 0:\r\n for d in platforms:\r\n if isinstance(d, Exit):\r\n d.activate()\r\n if isinstance(platform, Exit):\r\n exit(platform)\r\n\r\n if (isinstance(platform, FrontFontRing) or\r\n isinstance(platform, HFrontFontRing)):\r\n for d in platforms:\r\n if (sprite.collide_rect(self, d) and\r\n (isinstance(d, Ring) or\r\n isinstance(d, HRing))):\r\n platform.deactivate()\r\n\r\n if (isinstance(platform, BackFontRing) or\r\n isinstance(platform, HBackFontRing)):\r\n for d in platforms:\r\n if (sprite.collide_rect(self, d) and\r\n (isinstance(d, Ring) or\r\n isinstance(d, HRing))):\r\n platform.deactivate()\r\n\r\n if (isinstance(platform, Platform) or\r\n isinstance(platform, Invisible) or\r\n isinstance(platform, HInvisible)):\r\n if xvel > 0:\r\n self.rect.right = platform.rect.left\r\n\r\n if xvel < 0:\r\n self.rect.left = platform.rect.right\r\n\r\n if yvel > 0:\r\n self.rect.bottom = platform.rect.top\r\n self.onGround = True\r\n if self.yvel < 6:\r\n self.yvel = 0\r\n else:\r\n self.yvel = - self.yvel / 2\r\n\r\n if yvel < 0:\r\n self.rect.top = platform.rect.bottom\r\n self.yvel = 0\r\n\r\n def die(self):\r\n time.wait(500)\r\n self.lifes -= 1\r\n self.teleporting(self.startX, self.startY)\r\n\r\n def teleporting(self, go_x, go_y):\r\n self.rect.x = go_x\r\n self.rect.y = go_y\r\n\r\n def is_game_over(self):\r\n return self.lifes < 1\r\n\r\n\r\nclass FastPlayer(Player):\r\n\r\n def __init__(self, x, y_coordinate):\r\n Player.__init__(self, x, y_coordinate)\r\n self.MOVE_SPEED = 10\r\n self.image = image.load(\"images/fast_ball.png\")\r\n\r\n\r\nclass HighJumpPlayer(Player):\r\n\r\n def __init__(self, x, y_coordinate):\r\n Player.__init__(self, x, y_coordinate)\r\n self.JUMP_POWER = 12\r\n self.image = image.load(\"images/high_jump_ball.png\")\r\n","sub_path":"ball.py","file_name":"ball.py","file_ext":"py","file_size_in_byte":5500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"135622392","text":"\"\"\"\n\nAuthor: L.Smalbil\n\nThis tool is meant to quickly retrieve important financial and economic information for a given country in a\ngiven period.\n\n\"\"\"\n\nimport pandas as pd\nfrom indicators import Indicators\nimport re\n\nif __name__ == \"__main__\":\n # Read Data\n\n consumer_conf_data_eu = pd.read_csv('eu_consumer_conf_2000_2020.csv')\n gdp_eu = pd.read_csv('eu_gdp_2000_2020.csv')\n house_hold_debt_eu = pd.read_csv('eu_household_debt_2000_2019.csv')\n unemployement_eu = pd.read_csv('eu_unemployement_2000_2020.csv')\n disposable_income_eu = pd.read_csv('eu_disposable_income_2000_2019.csv')\n wages_eu = pd.read_csv('eu_wages_2000-2019.csv')\n eu_revenue = pd.read_csv('eu_tax_revenue.csv')\n\n while True:\n begin = str(input('Please enter the begin year as follows: yyyy-mm. '))\n if bool(re.match('[0-9]{4}-[0-9]{2}', begin)) == False:\n print('Sorry, I did not quite catch that. Please try again')\n continue\n else:\n break\n\n while True:\n end = str(input('Please enter the end time as follows: yyyy-mm. '))\n if bool(re.match('[0-9]{4}-[0-9]{2}', end)) == False:\n print('Sorry, I did not quite catch that. Please try again')\n continue\n else:\n break\n\n while True:\n country = str(input('Please enter the country code: '))\n if bool(re.match('[A-Z]{3}|[A-Z]{4}', country)) == False:\n print('Sorry, I did not quite catch that. Please try again')\n continue\n else:\n break\n\n # Return Information to User\n object = Indicators(country = country, begin = begin, end = end)\n\n try:\n object.consumer_confidence(data=consumer_conf_data_eu)\n print('\\n')\n except IndexError:\n print('No consumer confidence data within the specified time window.')\n\n try:\n object.gdp(data = gdp_eu)\n print('\\n')\n except IndexError:\n print('No gdp data within the specified time window.')\n\n try:\n object.household_debt(data = house_hold_debt_eu)\n print('\\n')\n except IndexError:\n print('No household debt data within the specified time window.')\n\n try:\n object.unemployement(data=unemployement_eu)\n print('\\n')\n except IndexError:\n print('No unemployement data within the specified time window.')\n\n try:\n object.disposable_income(data=disposable_income_eu)\n print('\\n')\n except IndexError:\n print('No disposable income data within the specified time window.')\n\n try:\n object.wages(data=wages_eu)\n print('\\n')\n except IndexError:\n print('No wage data within the specified time window.')\n\n try:\n object.tax_revenue(data=eu_revenue)\n print('\\n')\n except IndexError:\n print('No tax revenue data within the specified time window.')\n #raise 'No data within the specified time window.'\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"606256848","text":"import utime\n\nimport comms\nimport sprites\nimport model\n\nimport imagenes\n\nPIXELS = 54\n\nDEBUG = True\n\ntry:\n from remotepov import update\nexcept:\n import povdisplay\n povdisplay.init(PIXELS, imagenes.palette_pal)\n update = lambda: None\n if DEBUG:\n print(\"setting up fan debug\")\n import uctypes\n debug_buffer = uctypes.bytearray_at(povdisplay.getaddress(999), 32*16)\n next_loop = 1000\n def update():\n global next_loop\n now = utime.ticks_ms()\n if utime.ticks_diff(next_loop, now) < 0:\n next_loop = utime.ticks_add(now, 1000)\n comms.send(b\"debug\", debug_buffer)\n\n#gameover = sprites.get_sprite(0)\n#gameover.image_strip = 6\n## Disable Frame\n#gameover.frame = DISABLED_FRAME\n#gameover.x = -32\n#gameover.y = 2\n\n# init images\nsprites.set_imagestrip(0, imagenes.galaga_png)\nsprites.set_imagestrip(1, imagenes.numerals_png)\nsprites.set_imagestrip(3, imagenes.disparo_png)\nsprites.set_imagestrip(4, imagenes.ll9_png)\n#sprites.set_imagestrip(4, imagenes._00_galaga_png)\n#sprites.set_imagestrip(4, imagenes.crawling_png)\nsprites.set_imagestrip(5, imagenes.explosion_png)\nsprites.set_imagestrip(6, imagenes.explosion_nave_png)\nsprites.set_imagestrip(10, imagenes.tierra_flat_png)\nsprites.set_imagestrip(11, imagenes.marte_flat_png)\nsprites.set_imagestrip(12, imagenes.jupiter_flat_png)\nsprites.set_imagestrip(13, imagenes.saturno_flat_png)\nsprites.set_imagestrip(14, imagenes.sves_flat_png)\nsprites.set_imagestrip(15, imagenes.ventilastation_flat_png)\nsprites.set_imagestrip(16, imagenes.tecno_estructuras_flat_png)\n\ndef reset_game():\n\n global scene\n scene = model.Fleet()\n\nbutton_was_down = False\nreset_was_down = False\n\ndef process_input(b):\n left = bool(b & 1)\n right = bool(b & 2)\n up = bool(b & 4)\n down = bool(b & 8)\n boton = bool(b & 16)\n accel = bool(b & 32)\n decel = bool(b & 64)\n reset = bool(b & 128)\n\n global reset_was_down\n if not reset_was_down and reset:\n reset_game()\n reset_was_down = reset\n\n if up and left:\n direction = \"↖\"\n elif up and right:\n direction = \"↗\"\n elif down and left:\n direction = \"↙\"\n elif down and right:\n direction = \"↘\"\n elif up:\n direction = \"↑\"\n elif down:\n direction = \"↓\"\n elif left:\n direction = \"←\"\n elif right:\n direction = \"→\"\n else:\n direction = \" \"\n\n scene.heading(up, down, left, right)\n\n global button_was_down\n if not button_was_down and boton:\n scene.fire()\n button_was_down = boton\n \n scene.accel(accel, decel)\n\n #text = \"\\r{0} {2} {1} {3} {4} \".format(direction, boton, int(nave.x), decel, accel)\n #sock_send(bytes(text, \"utf-8\"))\n #print(text, end=\"\")\n\n\n\ndef game_loop():\n last_val = None\n counter = 0 \n reset_game()\n \n while True:\n next_loop = utime.ticks_add(utime.ticks_ms(), 30)\n\n val = comms.receive(1)\n if val is not None:\n process_input(val[0])\n last_val = val[0]\n elif last_val is not None:\n process_input(last_val)\n\n scene.step()\n\n update()\n delay = utime.ticks_diff(next_loop, utime.ticks_ms())\n if delay > 0:\n utime.sleep_ms(delay)\n else:\n print(\"odelay:\", delay)\n\ngame_loop()\n","sub_path":"vyruss/python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"434462576","text":"class TrieNode:\n def __init__(self):\n self.children = {}\n self.endOfWord = False\n\n\nclass WordDictionary(object):\n\n def __init__(self):\n self.root = TrieNode()\n\n def addWord(self, word):\n cur = self.root\n for i in word:\n if i not in cur.children:\n cur.children[i] = TrieNode()\n cur = cur.children[i]\n cur.endOfWord = True\n\n def search(self, word):\n def dfs(j, cur):\n for i in range(j, len(word)):\n c = word[i]\n if c == \".\":\n for child in cur.children.values():\n if dfs(i + 1, child):\n return True\n return False\n else:\n if c not in cur.children:\n return False\n cur = cur.children[c]\n return cur.endOfWord\n\n return dfs(0, self.root)\n\n # Your WordDictionary object will be instantiated and called as such:\n# obj = WordDictionary()\n# obj.addWord(word)\n# param_2 = obj.search(word)\n","sub_path":"src/211-design-add-and-search-words-data-structure.py","file_name":"211-design-add-and-search-words-data-structure.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"341448045","text":"import xlwt\nimport numpy as np\n\ndef writeExcelResult(path, flag):\n\tresult = xlwt.Workbook(encoding='utf-8')\n\tsheet = result.add_sheet('Sheet 1', cell_overwrite_ok=True)\n\t# Write header\n\theader = [\n\t\t\"Image Name\", \"Corner Point 1\", \"Corner Point 2\", \"Corner Point 3\",\n\t\t\"Corner Point 4\", \"Student ID Number\", \"Phone Number\", \"Personal ID Number\"\n\t]\n\tfor i in range(len(header)):\n\t\tsheet.write(0, i, header[i])\n\t# Read result txt datas into xlsx datas\n\tsrcPath = \"../temp0/img\"\n\tlength = 10\n\tif flag :\n\t\tsrcPath = \"../temp/img\"\n\n\trawNumber = 1\n\tfor index in range(length):\n\t\t# Write Image Number\n\t\timgName = str(index + 1) + \".bmp\"\n\t\tsheet.write(rawNumber, 0, imgName)\n\t\tcolNumber = 1\n\t\t# Write corner points result\n\t\ttargetPath = srcPath + str(index+1) + \"/points.txt\"\n\t\treader = np.loadtxt(targetPath, dtype=str)\n\t\tfor j in range(len(reader)):\n\t\t\tdstStr = \"(\" + str(reader[j][0]) + \", \" + str(reader[j][1]) + \")\"\n\t\t\tsheet.write(rawNumber, colNumber, dstStr)\n\t\t\tcolNumber += 1\n\n\t\t# Write Number results\n\t\ttargetpath = srcPath + str(index+1) + \"/detect.txt\"\n\t\treader = np.loadtxt(targetpath, dtype=str)\n\t\tfor j in range(len(reader)):\n\t\t\tif j > 2 and j % 3 == 0:\n\t\t\t\trawNumber += 1\n\t\t\t\tcolNumber = 5\n\t\t\t\tsheet.write(rawNumber, 0, imgName)\n\t\t\ttargetStr = reader[j]\n\t\t\tsheet.write(rawNumber, colNumber, targetStr)\n\t\t\tcolNumber += 1\n\t\trawNumber += 1\n\n\tresult.save(path)\n\n\nif __name__ == '__main__':\n\tpath = \"../output/result.xlsx\"\n\twriteExcelResult(path=path, flag=True)","sub_path":"Final/Final-Project/PartI/python/WriteResult.py","file_name":"WriteResult.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"20559952","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nroot = os.path.dirname(__file__)\nif root not in sys.path:\n sys.path.insert(0, root)\n\nos.environ['DJANGO_SETTINGS_MODULE'] = 'test_settings'\n\ndef run_tests(verbosity=1):\n from django.test.simple import DjangoTestSuiteRunner\n runner = DjangoTestSuiteRunner(verbosity=verbosity)\n return runner.run_tests(['test_override_settings'])\n\nif __name__ == '__main__':\n run_tests()\n","sub_path":"tests/run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"91373793","text":"import numpy as np\r\nimport pandas as pd\r\nimport datetime\r\n# import pandas_datareader.data as web\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\n\r\n\r\ndef feedFromCsv(ticker):\r\n\tdf = pd.read_csv('csv_sources/{}_d.csv'.format(ticker))\r\n\r\n\tdef updateStock(ticker):\r\n\t\tif df[-1:]['Date'].item() == str(datetime.datetime.today().date()):\r\n\t\t\tprint('Data up to date')\r\n\t\t\treturn pd.DataFrame()\r\n\r\n\t\tresp = requests.get('https://stooq.com/q/d/?s=' + ticker)\r\n\t\tsoup = BeautifulSoup(resp.text, 'html.parser')\r\n\t\ttable = soup.find('table', { 'id': 'fth1' })\r\n\t\tif not table:\r\n\t\t\tprint('invalid ticker or site limit exceeded')\r\n\t\t\treturn pd.DataFrame()\r\n\r\n\t\trows = table.findAll('tr')[1:]\r\n\t\tdff = pd.DataFrame()\r\n\t\tfor row in rows:\r\n\t\t\trdata = row.findAll('td')\r\n\t\t\ttopDate = datetime.datetime.strptime(rdata[1].text, '%d %b %Y').date()\r\n\t\t\tif df[-1:]['Date'].item() == str(topDate):\r\n\t\t\t\tbreak\r\n\t\t\trdict = {}\r\n\t\t\trdict['Date'] = topDate\r\n\t\t\trdict['Open'] = float(rdata[2].text)\r\n\t\t\trdict['High'] = float(rdata[3].text)\r\n\t\t\trdict['Low'] = float(rdata[4].text)\r\n\t\t\trdict['Close'] = float(rdata[5].text)\r\n\t\t\trdict['Volume'] = int(rdata[8].text.replace(',', ''))\r\n\t\t\tdff = dff.append(rdict, ignore_index=True)\r\n\r\n\t\tdff = dff.iloc[::-1]\r\n\t\treturn dff\r\n\r\n\tdfn = updateStock(ticker)\r\n\tif not dfn.empty:\r\n\t\tnLen = len(dfn)\r\n\t\tdf.drop(df.index[:nLen], inplace=True)\r\n\t\tdf = df.append(dfn, ignore_index=True)\r\n\t\tdf.to_csv('csv_sources/{}_d.csv'.format(ticker), index=False)\r\n\t\tprint('Data actualized')\r\n\r\n\treturn df\r\n\r\ndef matrixOfReturns(tickers):\r\n\tmtx = np.empty((0, 1000))\r\n\tfor ticker in tickers:\r\n\t\tdf = feedFromCsv(ticker)\r\n\t\tdf['dRet'] = df['Close'].pct_change() * 100\r\n\t\tdf.dropna(inplace=True)\r\n\t\tmtx = np.append(mtx, np.asmatrix(df['dRet']), axis=0)\r\n\r\n\treturn mtx\r\n","sub_path":"feedFromCsv.py","file_name":"feedFromCsv.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"24998706","text":"\n\nfrom xai.brain.wordbase.nouns._piston import _PISTON\n\n#calss header\nclass _PISTONS(_PISTON, ):\n\tdef __init__(self,): \n\t\t_PISTON.__init__(self)\n\t\tself.name = \"PISTONS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"piston\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_pistons.py","file_name":"_pistons.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"539096772","text":"import numpy as np\nfrom torch.utils.data import Dataset\nimport glob\n\nclass DSDDataset(Dataset):\n def __init__(self, root_dir):\n self.root_dir = root_dir\n self.length = len(glob.glob(root_dir+\"/*_X.npy\"))\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, idx):\n base_name = self.root_dir+\"/\"+str(idx)\n mix = np.load(\n base_name + \"_X.npy\"\n )\n mix = mix.astype(np.float32)\n\n mask = np.load(\n base_name + \"_Y.npy\"\n ).astype(np.float32)\n return mix, mask\n","sub_path":"RGT1/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"107602929","text":"\n#! /powerapps/share/python-anaconda-3.2019.7/bin/python3.7\n\n\"\"\"\n@Author: odedkushnir\n\n\"\"\"\n\nimport sys, argparse\nimport matplotlib\n# matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport subprocess\nfrom util import pbs_jobs\nimport seaborn as sns\nimport numpy as np\n\n\nclass comutations_bubble(object):\n def __init__(self, a, b, freq, pval):\n self.nodes = set([a, b])\n self.distances = {'_'.join([str(a), str(b)]): freq, '_'.join([str(b), str(a)]): freq}\n self.pvalues = {'_'.join([str(a), str(b)]): pval, '_'.join([str(b), str(a)]): pval}\n self.meandist = np.mean(list(self.distances.values()))\n\n def __len__(self):\n return len(self.nodes)\n\n def __repr__(self):\n return ':'.join([str(a) for a in [len(self), min(self.nodes), max(self.nodes)]])\n\n def can_merge(self, other, distance=10):\n return len(self.nodes.intersection(other.nodes)) > 0 and (\n (other.meandist * distance >= self.meandist) and (other.meandist / distance <= self.meandist))\n\n def union(self, other):\n self.nodes = self.nodes.union(other.nodes)\n self.distances.update(other.distances)\n self.pvalues.update(other.pvalues)\n self.meandist = np.mean(list(self.distances.values()))\n\n\ndef load_file(path, label):\n df = pd.read_csv(path, sep=\"\\t\", names=[\"start\", \"end\", \"fisher_stat\", \"pval\", \"variant_freq\"])\n df[\"sample\"] = label\n df = df[df[\"pval\"] < 1]\n return df\n\n\ndef obtain_comutations(comutations, max_pval=10 ** -9, distance=10):\n dfs = []\n sig_positions = comutations[(comutations[\"pval\"] < max_pval)]\n\n lines = sig_positions.itertuples(index=False)\n nodes = []\n for line in lines:\n a = line[0]\n b = line[1]\n pval = line[3]\n dist = line[4]\n node = comutations_bubble(a, b, dist, pval)\n nodes.append(node)\n\n results = []\n nodes = sorted(nodes, key=lambda item: -item.meandist)\n while len(nodes) > 0:\n bubble = nodes.pop(0)\n merged = False\n for item in nodes:\n if item.can_merge(bubble, distance):\n item.union(bubble)\n merged = True\n break\n if not merged:\n results.append(bubble)\n\n if results:\n # for cluster in sorted(results, key=lambda item: -item.meandist):\n # print cluster.meandist, sorted(cluster.nodes)\n # Collate lines\n for i, cluster in zip(range(len(results)), sorted(results, key=lambda item: -item.meandist)):\n # distances=[]\n distances = map(lambda x: (int(x.split(\"_\")[0]), int(x.split(\"_\")[1]), cluster.distances[x]),\n cluster.distances.keys())\n data = pd.DataFrame(distances, columns=[\"Pos1\", \"Pos2\", \"Freq\"])\n data[\"Stretch\"] = i\n\n data[\"Sample\"] = comutations.iloc[0]['sample']\n data[\"meandist\"] = cluster.meandist\n dfs.append(data)\n\n return pd.concat(dfs)\n\n\ndef collect_cooccurs(freqs_df, comutations_df, max_pval=10 ** -9, distance=10, add_ctx=\"ADAR\"):\n sample_comutations = comutations_df\n sample_comutations = sample_comutations[sample_comutations[\"pval\"] < 0.1]\n sample_comutations = obtain_comutations(sample_comutations, max_pval=max_pval, distance=distance)\n sample_comutations = sample_comutations[[\"Pos1\", \"Stretch\", \"meandist\"]].sort_values(\n [\"Pos1\", \"Stretch\"]).drop_duplicates(['Pos1'])\n merged = freqs_df[freqs_df[\"Rank\"] != 0]\n merged = merged.merge(sample_comutations, how=\"left\", left_on=\"Pos\", right_on=\"Pos1\")\n\n merged[\"Stretch\"] = \"s\" + merged[\"Stretch\"].astype(str)\n merged[\"Stretch\"] = merged[\"Stretch\"].apply(lambda x: x.replace(\".0\", \"\"))\n merged[\"Stretch\"] = np.where(merged[\"Stretch\"] == \"snan\", '-', merged[\"Stretch\"])\n merged[\"Co-occurrences_identified\"] = np.where(merged[\"Stretch\"] == \"-\", \"No\", \"Yes\")\n merged[\"ADAR_context\"] = np.where(merged[\"Prev\"].isin([\"AA\", \"UA\"]), \"Yes\",\n np.where(merged[\"Ref\"] == \"A\", \"No\", \"Not A>G\"))\n merged[\"ADAR_reverse_context\"] = np.where(merged[\"Next\"].isin([\"UU\", \"UA\"]), \"Yes\",\n np.where(merged[\"Ref\"] == \"U\", \"No\", \"Not U>C\"))\n merged[\"APOBEC3F_context\"] = np.where(merged[\"Next\"].isin([\"GA\"]), \"Yes\",\n np.where(merged[\"Ref\"] == \"G\", \"No\", \"Not G>A\"))\n # Avoid APOBEC3G\n # merged[\"APOBEC3G context\"] = np.where(merged[\"next\"].isin([\"GG\"]), \"Yes\",np.where(merged[\"Ref\"]==\"G\",\"No\",\"Not G>A\"))\n merged[\"APOBEC3G_context\"] = \"No\"\n merged[\"Editing_context\"] = np.where(\n merged[\"ADAR_context\"] == \"Yes\",\n \"ADAR (sense)\",\n np.where(\n merged[\"ADAR_reverse_context\"] == \"Yes\",\n \"ADAR (antisense)\",\n np.where(\n merged[\"APOBEC3F_context\"] == \"Yes\",\n \"APOBEC3F\",\n np.where(\n merged[\"APOBEC3G_context\"] == \"Yes\",\n \"APOBEC3G\",\n \"No editing context\",\n ),\n ),\n ),\n )\n\n if add_ctx == \"ADAR\":\n merged[\"ADAR\"] = np.where(merged[\"Next\"].isin([\"UU\", \"UA\"]),\n \"Reverse complement\",\n np.where(merged[\"Prev\"].isin([\"AA\", \"UA\"]),\n \"Forward\",\n \"No\"\n )\n )\n return merged\n \"\"\"1. Create all_parts.blast, all_parts.blast.cropped, mutations_all.txt.cropped\"\"\"\ndef main():\n# cmds = \"for sample in 2_1 2_2 5_1 5_2 8_1 8_2 10_1 10_2 12_1 12_2; do cd /sternadi/home/volume3/okushnir/AccuNGS/\" \\\n# \"20201008RV-202329127/merged/passages/p${sample}/20201012_q38; cat mutations_all.txt | grep -v ref_pos > \" \\\n# \"mutations_all.txt.cropped ; for file in `ls tmp/*.blast`; do cat $file >> all_parts.blast ; done ; \" \\\n# \"cat all_parts.blast | cut -f1,2,3 > all_parts.blast.cropped ; done\"\n# cmd_file = \"/sternadi/home/volume3/okushnir/Cluster_Scripts/all_parts.cmd\"\n# pbs_jobs.create_pbs_cmd(cmd_file, alias=\"all_parts\", gmem=3, cmds=cmds, load_python=False)\n# job_id = pbs_jobs.submit(cmd_file)\n# status = pbs_jobs.check_pbs(job_id)\n# if status == \"Done\":\n# print(\"Done!\")\n# #\n# if __name__ == \"__main__\":\n# main()\n\n# def main(args):\n# sample = args.sample\n\n \"\"\"2. Run variants_on_same_read.py\"\"\"\n # section_lst = (1, 2, 3)\n # for i in section_lst:\n # cmds = \"base=$sample\\n\" \\\n # \"freqs=`ls ${base} | grep freqs`\\n\" \\\n # \"mkdir ${base}/accungs_associations\\n\" \\\n # \"python /sternadi/home/volume1/maozgelbart/variants_on_same_read.py ${base}/all_parts.blast.cropped $\" \\\n # \"{base}/mutations_all.txt.cropped $PBS_ARRAY_INDEX ${base}/${freqs} > ${base}/accungs_associations/\" \\\n # \"$PBS_ARRAY_INDEX.txt\"\n # cmd_file = \"/sternadi/home/volume3/okushnir/Cluster_Scripts/co_occur.cmd\"\n # if i == 1:\n # pbs_jobs.create_array_pbs_cmd(cmd_file, jnum=\"3522-4999\", alias=\"accungs_assoc\", gmem=3, cmds=cmds)\n # print(\"qsub -v sample='%s' %s\" % (sample, cmd_file))\n # job_id = pbs_jobs.submit(\"-v sample='%s' %s\" % (sample, cmd_file))\n # # print(job_id)\n # job_id = job_id.replace(\"[]\", \"\")\n # print(job_id)\n # status = pbs_jobs.check_pbs(job_id)\n # if status == \"Done\":\n # print(\"Done (%s/%s)\" % (str(i), str(len(section_lst))))\n # continue\n # if i == 2:\n # pbs_jobs.create_array_pbs_cmd(cmd_file, jnum=\"5000-6499\", alias=\"accungs_assoc\", gmem=3, cmds=cmds)\n # print(\"qsub -v sample='%s' %s\" % (sample, cmd_file))\n # job_id = pbs_jobs.submit(\"-v sample='%s' %s\" % (sample, cmd_file))\n # # print(job_id)\n # job_id = job_id.replace(\"[]\", \"\")\n # print(job_id)\n # status = pbs_jobs.check_pbs(job_id)\n # if status == \"Done\":\n # print(\"Done (%s/%s)\" % (str(i), str(len(section_lst))))\n # continue\n # if i == 3:\n # pbs_jobs.create_array_pbs_cmd(cmd_file, jnum=\"6500-7212\", alias=\"accungs_assoc\", gmem=3, cmds=cmds)\n # print(\"qsub -v sample='%s' %s\" % (sample, cmd_file))\n # job_id = pbs_jobs.submit(\"-v sample='%s' %s\" % (sample, cmd_file))\n # # print(job_id)\n # job_id = job_id.replace(\"[]\", \"\")\n # print(job_id)\n # status = pbs_jobs.check_pbs(job_id)\n # if status == \"Done\":\n # print(\"Done (%s/%s)\" % (str(i), str(len(section_lst))))\n # continue\n # print(\"Done!!!!\")\n\n \"\"\"3. Concatenate all the files\"\"\"\n # cmds = \"cd $sample/accungs_associations; cat *txt>all.txt\"\n # cmd_file = \"/sternadi/home/volume3/okushnir/Cluster_Scripts/cat_txt.cmd\"\n # pbs_jobs.create_pbs_cmd(cmd_file, alias=\"cat_txt\", gmem=3, cmds=cmds, load_python=False)\n # job_id = pbs_jobs.submit(\"-v sample='%s' %s\" % (sample, cmd_file))\n # print(job_id)\n # status = pbs_jobs.check_pbs(job_id)\n # if status == \"Done\":\n # print(\"Done!\")\n\n \"\"\"4. Run collect_cooccurs and merge it to freqs file\"\"\"\n passages_lst = [\"p2_1\", \"p2_2\", \"p2_3\", \"p5_1\", \"p5_2\", \"p5_3\", \"p8_1\", \"p8_2\", \"p8_3\", \"p10_1\", \"p10_2\", \"p10_3\",\n \"p12_1\", \"p12_2\", \"p12_3\"]\n for passage in passages_lst:\n sample = \"/sternadi/home/volume3/okushnir/AccuNGS/20201008RV-202329127/merged/passages/%s/20201012_q38\" % (\n passage)\n label = sample.split(\"/\")[-2]\n df_path = \"%s/accungs_associations/all.txt\" % sample\n df = load_file(df_path, label)\n freqs_df = pd.read_csv(\"/sternadi/home/volume3/okushnir/AccuNGS/20201008RV-202329127/merged/passages/q38_data_mutation.csv\")\n # freqs_df = pd.read_csv(\n # \"/sternadi/home/volume3/okushnir/AccuNGS/190627_RV_CV/merged/RVB14/q38_data_mutation.csv\")\n label = label.replace('_', '-')\n freqs_df = freqs_df.loc[freqs_df.label == label]\n\n merged_df = collect_cooccurs(freqs_df, df)\n merged_df = merged_df.loc[merged_df.Stretch != \"-\"]\n merged_df = merged_df.loc[(merged_df.Mutation == \"U>C\") | (merged_df.Mutation == \"A>G\") |\n (merged_df.Mutation == \"G>A\")| (merged_df.Mutation == \"C>U\")]\n merged_df[\"Pos\"] = merged_df[\"Pos\"].astype(int)\n merged_df = merged_df.sort_values(by=[\"meandist\", \"Stretch\", \"Pos\"])\n # merged_df = merged_df.loc[(merged_df.Editing_context != \"ADAR (sense)\") & (merged_df.Editing_context != \"ADAR (antisense)\")]\n\n file_name = sample + \"/co_occur_all.csv\"\n co_occur_df = merged_df[[\"Pos\", \"Base\", \"Frequency\", \"Ref\", \"Read_count\", \"Rank\", \"Prob\", \"Mutation\", \"Stretch\",\n \"meandist\", \"Co-occurrences_identified\", \"ADAR_context\", \"ADAR_reverse_context\",\n \"APOBEC3G_context\", \"APOBEC3F_context\", \"Editing_context\", \"ADAR\", \"label\"]]\n co_occur_df = co_occur_df.sort_values(by=[\"meandist\", \"Stretch\", \"Pos\"])\n co_occur_df.to_csv(file_name, sep=\",\", encoding='utf-8')\n print(merged_df)\n\n\nif __name__ == \"__main__\":\n main()\n\n# if __name__ == \"__main__\":\n# parser = argparse.ArgumentParser()\n# parser.add_argument(\"sample\", type=str, help=\"sample dir path\")\n# args = parser.parse_args(sys.argv[1:])\n# main(args)","sub_path":"Co-occur/Archive/Passages/co_occur_new_pass.py","file_name":"co_occur_new_pass.py","file_ext":"py","file_size_in_byte":11886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"254256287","text":"#!/usr/bin/env python3\n\nimport argparse\nimport os\nimport subprocess\nimport sys\n\nimport rospkg\n\n# pylint: disable=invalid-name\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Create symlink to compile_commands for ros packages\"\n )\n parser.add_argument(\"package_name\", type=str, help=\"Package name\")\n\n args = parser.parse_args()\n\n workspace = os.getenv(\"ROS_WORKSPACE\")\n\n # Get build directory\n build = subprocess.check_output([\"catkin\", \"locate\", \"-b\"], cwd=workspace)\n build = build.strip()\n build = build.decode('utf-8')\n\n # Get package to do the operation\n pkg = rospkg.RosPack()\n try:\n package = pkg.get_path(args.package_name)\n except rospkg.ResourceNotFound as e:\n print(\n \"Package {} could not be found\".format(args.package_name), file=sys.stderr\n )\n exit(1)\n\n dir_name = os.path.basename(package)\n\n # Find compile_commands.json\n full_path = \"{}/{}/compile_commands.json\".format(build, dir_name)\n print(full_path)\n if os.path.isfile(full_path):\n # Link compile commands\n symlink = \"{}/compile_commands.json\".format(package)\n answer = input(\n \"Create symlink from '{}' to '{}'? ([Y]/n)\\n\".format(full_path, symlink)\n )\n if answer in [\"\", \"Y\", \"y\"]:\n print(\"Creating symlink\")\n os.symlink(full_path, symlink)\n","sub_path":"scripts/find_compile_commands.py","file_name":"find_compile_commands.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"551498640","text":"def counts(a: str):\n dic = dict()\n for i in a:\n if i not in dic.keys():\n num = + 1\n dic[i] = num\n else:\n dic[i] = dic[i] + 1\n return dic\n\n\ndef comp(a, b):\n if counts(a) == counts(b):\n return True\n else:\n return False\n\n\nprint(comp(\"abacd\", \"aabcd\"))\n","sub_path":"python/leecode/比较字符串.py","file_name":"比较字符串.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"273713663","text":"class Student(object):\n\n def __init__(self, name, grade):\n self.name = name;\n self.grade = grade;\n\n def printStudent(self):\n print (\"Name: %s Grade %s\" %(self.name, self.grade))\n\n\nstuA = Student(\"Abc\", \"12\");\nstuA.name = \"Garima\"\nstuA.grade = \"pre-K\"\nstuA.printStudent();\n\n# inheritance\nclass MathStudent(Student):\n\n def __init__(self, name, grade, mathScore):\n super(MathStudent,self).__init__(name, grade)\n self.mathScore = mathScore\n\nstuM = MathStudent(\"Geek\", \"5\", \"98\")\nstuM.printStudent()\nprint(\"MathScore: %s\" %stuM.mathScore)","sub_path":"Student.py","file_name":"Student.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"596797410","text":"import rlpy\n#### Domain ####\ndomain = rlpy.Domains.InfCartPoleBalance()\n### Agent ####\nrepresentation \t= rlpy.Representations.Tabular(domain, discretization=20)\npolicy = rlpy.Policies.eGreedy(representation, epsilon=0.1)\nagent = rlpy.Agents.SARSA(policy, representation, domain.discount_factor)\n### Experiment ####\nexperiment = rlpy.Experiments.Experiment(agent, domain, max_steps=100)\nexperiment.run()\nexperiment.save()\n","sub_path":"examples/tutorial/paper_example.py","file_name":"paper_example.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"243267595","text":"from math import sin, cos, radians\n\n\ndef func_args_unpack(func, args):\n return func(*args)\n\n\ndef get_len(iterable, total):\n try:\n length = iterable.__len__()\n except AttributeError:\n length = total\n return length\n\n\ndef cpu_bench(number):\n product = 1.0\n for elem in range(number):\n angle = radians(elem)\n product *= sin(angle)**2 + cos(angle)**2\n return product\n\n\ndef fibonacci(number):\n if number <= 1:\n return number\n else:\n return fibonacci(number-2) + fibonacci(number-1)\n\n\ndef iterate_by_pack(iterable, pack_size: int = 1):\n if pack_size < 1:\n raise ValueError(\"pack_size must be greater than 0\")\n iterator = iter(iterable)\n sentinel = object()\n item = None\n while item is not sentinel:\n pack = []\n for _ in range(pack_size):\n item = next(iterator, sentinel)\n if item is sentinel:\n break\n pack.append(item)\n if pack:\n yield pack\n\n\ndef get_packs_count(array, pack_size):\n total, extra = divmod(len(array), pack_size)\n if extra:\n total += 1\n return total\n\n","sub_path":"parallelbar/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"645932853","text":"from com.bridgelabz.utility.Data_structure_utility import *\n\nif __name__==\"__main__\":\n\n list = OrderedList()\n list.add(30)\n list.add(31)\n list.add(27)\n list.add(100)\n list.add(101)\n print (list)\n print (list.getIndex(27))\n print (list.getItem(4))\n list.pop(4)\n print (list)\n list.insert(1, 5)\n print (list)","sub_path":"com/bridgelabz/data_structure/OrderedList.py","file_name":"OrderedList.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"291259134","text":"import pygame\nfrom math import pi, cos, sin, atan2\n\n\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nBACKGROUND = (0, 255, 255)\n\n\nwall1 = pygame.image.load('./minecraftStone.png')\nwall2 = pygame.image.load('./minecraftWood.png')\nwall3 = pygame.image.load('./diamond.png')\nwall4 = pygame.image.load('./lava.png')\nwall5 = pygame.image.load('./water.png')\n\ntextures = {\n \"1\": wall1,\n \"2\": wall2,\n \"3\": wall3,\n \"4\": wall4,\n \"5\": wall5,\n}\n\nhand = pygame.image.load('./stevehand.png')\n\nitem = pygame.image.load('./player.png')\n\nenemies = [\n {\n \"x\": 100,\n \"y\": 200,\n \"texture\": pygame.image.load('./enemy.png')\n },\n {\n \"x\": 280,\n \"y\": 190,\n \"texture\": pygame.image.load('./enemy.png')\n },\n {\n \"x\": 225,\n \"y\": 340,\n \"texture\": pygame.image.load('./enemy.png')\n },\n {\n \"x\": 220,\n \"y\": 425,\n \"texture\": pygame.image.load('./enemy.png')\n },\n {\n \"x\": 425,\n \"y\": 275,\n \"texture\": pygame.image.load('./diamante.png')\n }\n]\n\nclass Raycaster(object):\n def __init__(self, screen):\n _, _, self.width, self.height = screen.get_rect()\n self.screen = screen\n self.blocksize = 50\n self.player = {\n \"x\": self.blocksize + 20,\n \"y\": self.blocksize + 20,\n \"a\": 0,\n \"fov\": pi/3\n }\n self.map = []\n self.zbuffer = [-float('inf') for z in range(0, 500)]\n # self.clear()\n\n def clear(self):\n for x in range(self.width):\n for y in range(self.height):\n r = int((x/self.width)*255) if x/self.width < 1 else 1\n g = int((y/self.height)*255) if y/self.height < 1 else 1\n b = 0\n color = (r, g, b)\n self.point(x, y, color)\n\n def point(self, x, y, c = None):\n screen.set_at((x, y), c)\n\n def draw_rectangle(self, x, y, texture):\n for cx in range(x, x + 50):\n for cy in range(y, y + 50):\n tx = int((cx - x)*173 / 50)\n ty = int((cy - y)*173 / 50)\n c = texture.get_at((tx, ty))\n self.point(cx, cy, c)\n\n def load_map(self, filename):\n with open(filename) as f:\n for line in f.readlines():\n self.map.append(list(line))\n\n def cast_ray(self, a):\n d = 0\n while True:\n x = self.player[\"x\"] + d*cos(a)\n y = self.player[\"y\"] + d*sin(a)\n\n i = int(x/50)\n j = int(y/50)\n\n if self.map[j][i] != ' ':\n hitx = x - i*50\n hity = y - j*50\n\n if 1 < hitx < 49:\n maxhit = hitx\n else:\n maxhit = hity\n\n tx = int(maxhit * 173 / 50)\n\n return d, self.map[j][i], tx\n\n self.point(int(x), int(y), (255, 255, 255))\n\n d += 1\n\n def draw_stake(self, x, h, texture, tx):\n start = int(250 - h/2)\n end = int(250 + h/2)\n for y in range(start, end):\n ty = int(((y - start)*173)/(end - start))\n c = texture.get_at((tx, ty))\n self.point(x, y, c)\n\n def draw_sprite(self, sprite):\n sprite_a = atan2(sprite[\"y\"] - self.player[\"y\"], sprite[\"x\"] - self.player[\"x\"])\n\n sprite_d = ((self.player[\"x\"] - sprite[\"x\"])**2 + (self.player[\"y\"] - sprite[\"y\"])**2)**0.5\n sprite_size = (500/sprite_d) * 70\n\n sprite_x = 500 + (sprite_a - self.player[\"a\"])*500/self.player[\"fov\"] + 250 - sprite_size/2\n sprite_y = 250 - sprite_size/2\n\n sprite_x = int(sprite_x)\n sprite_y = int(sprite_y)\n sprite_size = int(sprite_size)\n\n for x in range(sprite_x, sprite_x + sprite_size):\n for y in range(sprite_y, sprite_y + sprite_size):\n if 500 < x < 1000 and self.zbuffer[x - 500] >= sprite_d:\n tx = int((x - sprite_x) * 128/sprite_size)\n ty = int((y - sprite_y) * 128/sprite_size)\n c = sprite[\"texture\"].get_at((tx, ty))\n if c != (152, 0, 136, 255):\n self.point(x, y, c)\n self.zbuffer[x - 500] = sprite_d\n\n def draw_player(self, xi, yi, w = 100, h = 100):\n for x in range(xi, xi + w):\n for y in range(yi, yi + h):\n tx = int((x - xi) * 32/w)\n ty = int((y - yi) * 32/h)\n c = hand.get_at((tx, ty))\n if c != (152, 0, 136, 255):\n self.point(x, y, c)\n\n def draw_item(self, xi, yi, w = 200, h = 200):\n for x in range(xi, xi + w):\n for y in range(yi, yi + h):\n tx = int((x - xi) * 32/w)\n ty = int((y - yi) * 32/h)\n c = item.get_at((tx, ty))\n if c != (152, 0, 136, 255):\n self.point(x, y, c)\n\n def render(self):\n for x in range(0, 500, 50):\n for y in range(0, 500, 50):\n i = int(x/50)\n j = int(y/50)\n if self.map[j][i] != ' ':\n self.draw_rectangle(x, y, textures[self.map[j][i]])\n\n self.point(self.player[\"x\"], self.player[\"y\"], (255, 255, 255))\n\n for i in range(0, 500):\n self.point(500, i, (0, 0, 0))\n self.point(501, i, (0, 0, 0))\n self.point(499, i, (0, 0, 0))\n\n for i in range(0, 500):\n a = self.player[\"a\"] - self.player[\"fov\"]/2 + self.player[\"fov\"]*i/500\n d, c, tx = self.cast_ray(a)\n x = 500 + i\n h = 500/(d*cos(a-self.player[\"a\"])) * 70\n self.draw_stake(x, h, textures[c], tx)\n self.zbuffer[i] = d\n\n for enemy in enemies:\n self.point(enemy[\"x\"], enemy[\"y\"], (0, 0, 0))\n self.draw_sprite(enemy)\n\n self.draw_player(1000\n - 50 - 128, 500 - 100)\n self.draw_item(1000-300-128,500-200)\n\n\n\npygame.init()\nsound = pygame.mixer.Sound(\"./footsteps.wav\")\n\n\ndef text_objects(text, font):\n textSurface = font.render(text, True, (0,0,0))\n return textSurface, textSurface.get_rect()\n\ndef button(msg,x,y,w,h,ic,ac, action=None):\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n print(click)\n if x+w > mouse[0] > x and y+h > mouse[1] > y:\n pygame.draw.rect(screen, ac,(x,y,w,h))\n\n if click[0] == 1 and action != None:\n if action == 'play':\n game()\n elif action == 'quit':\n pygame.quit()\n quit()\n\n else:\n pygame.draw.rect(screen, ic,(x,y,w,h))\n\n smallText = pygame.font.Font(\"freesansbold.ttf\",20)\n textSurf, textRect = text_objects(msg, smallText)\n textRect.center = ( (x+(w/2)), (y+(h/2)) )\n screen.blit(textSurf, textRect)\n \n\nscreen = pygame.display.set_mode((1000, 500))\nscreen.set_alpha(None)\nr = Raycaster(screen)\nr.load_map('./map.txt')\n\ndef gameWin():\n intro = True\n\n while intro:\n for event in pygame.event.get():\n if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):\n pygame.quit()\n quit()\n\n screen.fill((0,0,255))\n largeText = pygame.font.Font('freesansbold.ttf', 115)\n TextSurf, TextRect = text_objects('Ganaste', largeText)\n TextRect.center = ((1000/2),(500/2))\n screen.blit(TextSurf, TextRect)\n\n button('Quit',450,400,100,50,(255,0,0),(200,0,0),'quit')\n\n pygame.display.update()\n\ndef gameIntro():\n intro = True\n\n while intro:\n for event in pygame.event.get():\n if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):\n pygame.quit()\n quit()\n\n screen.fill((0,0,255))\n largeText = pygame.font.Font('freesansbold.ttf', 115)\n TextSurf, TextRect = text_objects('Wolfencraft', largeText)\n TextRect.center = ((1000/2),(100))\n screen.blit(TextSurf, TextRect)\n\n largeText = pygame.font.Font('freesansbold.ttf', 60)\n TextSurf, TextRect = text_objects('Encuentra el diamante', largeText)\n TextRect.center = ((1000/2),(250))\n screen.blit(TextSurf, TextRect)\n\n button('Start',450,400,100,50,(0,255,0),(0,200,0),'play')\n\n pygame.display.update()\n\ndef game():\n c = 0\n jugar = True\n pygame.mixer.music.load('./minecraftMusic.mp3')\n pygame.mixer.music.play(-1)\n while jugar:\n \n print(pygame.mouse.get_pos())\n screen.fill((113, 113, 113))\n r.render()\n\n for e in pygame.event.get():\n if e.type == pygame.QUIT or (e.type == pygame.KEYDOWN and e.key == pygame.K_ESCAPE):\n exit(0)\n if e.type == pygame.KEYDOWN:\n if e.key == pygame.K_a:\n r.player[\"a\"] -= pi/10\n elif e.key == pygame.K_d:\n r.player[\"a\"] += pi/10\n\n elif e.key == pygame.K_RIGHT:\n r.player[\"x\"] += 10\n pygame.mixer.Sound.play(sound)\n elif e.key == pygame.K_LEFT:\n r.player[\"x\"] -= 10\n pygame.mixer.Sound.play(sound)\n elif e.key == pygame.K_UP:\n r.player[\"y\"] -= 10\n pygame.mixer.Sound.play(sound)\n elif e.key == pygame.K_DOWN:\n r.player[\"y\"] += 10\n pygame.mixer.Sound.play(sound)\n\n if e.key == pygame.K_f:\n if screen.get_flags() and pygame.FULLSCREEN:\n pygame.display.set_mode((1000, 500))\n else:\n pygame.display.set_mode((1000, 500), pygame.DOUBLEBUF|pygame.HWACCEL|pygame.FULLSCREEN)\n if r.player['x'] > 398 and r.player['x'] < 451 and r.player['y'] > 252 and r.player['y'] < 298:\n print('ganaste')\n gameWin()\n\n pygame.display.flip()\n\ngameIntro()\n","sub_path":"Proyecto3/cast.py","file_name":"cast.py","file_ext":"py","file_size_in_byte":8822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"160167419","text":"#!/usr/bin/env python\n\nimport roslib\nimport rospy\nimport sys\nimport argparse\nimport tf\nimport numpy as np\nfrom tf.transformations import *\nfrom math import acos, sqrt\nfrom ahbros.coordinate_transformations import *\n\n\n\ndef main(args):\n parser = argparse.ArgumentParser()\n parser.add_argument('from_tf', help='Base Frame')\n parser.add_argument('to_tf', help='Target Frame')\n args = parser.parse_args(rospy.myargv()[1:])\n from_tf, to_tf = args.from_tf, args.to_tf\n\n #print('Consider using: rosrun tf2 tf2_echo %s %s' % (from_tf, to_tf))\n\n rospy.init_node('get_relative_tf', anonymous=True)\n tf_listener = tf.TransformListener()\n\n tf_listener.waitForTransform(from_tf, to_tf, rospy.Time(), rospy.Duration(4.0))\n position, quaternion = tf_listener.lookupTransform(from_tf, to_tf, rospy.Time())\n print('Translation: %s' % str(position))\n print('Quaternion: %s' % str(quaternion))\n matrix = np.dot(translation_matrix(position), quaternion_matrix(quaternion))\n print('Matrix:\\n%s' % matrix)\n poseMsg = homogeneous2pose_msg(matrix)\n print('geometry_msgs/Pose:\\n%s' % poseMsg)\n rpy = homogeneous2rpy(matrix)\n print('RPY: %s' % str(rpy))\n axis_angle = homogeneous2axis_angle(matrix)\n print('Axis Angle: %s' % str(axis_angle))\n print('As static publisher: rosrun tf static_transform_publisher %s %s %s %s 100' % (' '.join([str(p) for p in position]), ' '.join([str(q) for q in quaternion]), from_tf, to_tf))\n\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"scripts/get_relative_tf.py","file_name":"get_relative_tf.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"417989672","text":"import socket\nimport pickle\nimport multiprocessing\nimport time\nfrom transfer import _send_msg, _recv_msg\nfrom info import mds_ip, monitor_ip, storage_ip, num_objects_per_file, max_num_objects_per_pg, MSG_SIZE, HEADERSIZE\n\nSTORAGE_ID = 1\n\n\ndef recovery(node_ip, node_id):\n\t#Will call monitor to state about the down node\n\tsoc = socket.socket()\n\tsoc.settimeout(5)\n\tprint (\"Socket successfully created for Recovery: Primary\")\n\n\tmonitor_1 = monitor_ip[\"primary\"]\n\t\n\ttry :\n\t\tsoc.connect((monitor_1[\"ip\"], monitor_1[\"port\"]))\t\n\t\tsoc.timeout(None)\n\n\t\tprint(f\"Connecting Primary monitor...\")\n\t\t\n\t\tres = {\"type\": \"FAIL\", \"ip\" : node_ip, \"id\" : node_id}\n\t\t_send_msg(soc, res)\n\t\ttime.sleep(3)\n\t\t\n\t\tmsg = _recv_msg(c, MSG_SIZE)\n\t\tif msg == None:\n\t\t\tpass\n\t\telif msg[\"type\"] == \"ACK\": \n\t\t\treturn\n\n\texcept : \n\t\tprint(\"Didn't Connect! [Timeout] Primary Monitor is Down\")\n\n\tsoc.close()\n\n\tsoc = socket.socket()\n\tsoc.settimeout(5)\n\tprint (\"Socket successfully created for Recovery: Backup\")\n\t\n\n\tmonitor_2 = monitor_ip[\"backup\"]\n\n\ttry : \n\t\tsoc.connect((monitor_2[\"ip\"], monitor_2[\"port\"]))\t\n\t\tsoc.settimeout(None)\t\t\t\n\t\tprint(f\"Connecting Backup monitor...\")\n\t\n\t\tres = {\"type\": \"FAIL\", \"ip\" : node_ip, \"id\" : node_id}\n\t\t_send_msg(soc, res)\n\t\ttime.sleep(3)\n\n\t\tmsg = _recv_msg(c, MSG_SIZE)\n\t\tif msg == None:\n\t\t\tpass\n\t\telif msg[\"type\"] == \"ACK\": \n\t\t\treturn\n\n\texcept:\n\t\tprint(\"MAY GOD HELP US!! WE ARE DOOMED\\n\\n\")\n\n\tsoc.close()\n\n\n\ndef gossip():\n\n\twhile True:\n\t\ttime.sleep(10)\n\t\t# Wait for 10 sec to run this protocol\n\n\t\ti=0\n\t\tfor i in range(4):\n\t\t\tif i+1 != STORAGE_ID:\n\t\t\t\tnode_ip = storage_ip[i+1][\"ip\"]\n\t\t\t\tport = storage_ip[i+1][\"port\"]\n\n\t\t\t\tsoc = socket.socket()\n\t\t\t\tsoc.settimeout(5)\n\t\t\t\tprint (\"Socket successfully created for Gossip\")\n\t\t\t\n\t\t\t\ttry :\n\t\t\t\t\tsoc.connect((node_ip, port))\n\t\t\t\t\tsoc.settimeout(None)\n\n\t\t\t\t\tprint(f\"Connecting {node_ip} storage node number {i+1}\")\n\t\t\t\t\t\n\t\t\t\t\tmsg = {\"type\": \"ALIVE\"}\n\t\t\t\t\t_send_msg(soc, msg)\n\t\t\t\t\ttime.sleep(3)\n\n\t\t\t\t\trec = _recv_msg(c, MSG_SIZE)\n\t\t\t\t\tif rec == None: \n\t\t\t\t\t\tprint(f\"Didn't receive data to Storage {i+1} ip {node_ip}! [Timeout] \")\n\t\t\t\t\t\trecovery(node_ip, i+1)\n\n\t\t\t\t\telif rec[\"type\"] != \"ALIVE\": \n\t\t\t\t\t\trecovery(node_ip, i+1)\n\t\t\t\t\t\n\t\t\t\texcept :\t\n\t\t\t\t\tprint(f\"Didn't Connect to Storage {i+1} ip {node_ip}! [Timeout]\")\n\t\t\t\t\trecovery(node_ip, i+1)\n\t\t\t\n\t\t\t\tsoc.close()\t\n\n\n\ndef heartbeat_protocol():\n\t#This will check for incoming messages \n\t#from other nodes and reply \n\n\ts = socket.socket() \n\tprint (\"Socket successfully created for Heartbeat\")\n\tport = storage_ip[STORAGE_ID][\"port\"] \n\ts.bind(('', port)) \n\tprint (\"Socket binded to %s\" %(port)) \n\t \n\t# put the socket into listening mode \n\ts.listen(5) \n\tprint (\"Socket is listening\") \n\n\twhile True:\n\t\t# Establish connection with client. \n\t\tc, addr = s.accept() \n\t\tprint ('Got connection from', addr )\n\n\t\t\n\t\tmsg = _recv_msg(c, MSG_SIZE)\t\n\n\t\tif msg == None:\n\t\t\tprint(f\"Didn't receive data from ip {addr}! [Timeout] \")\n\t\t\trecovery(addr, None)\n\n\t\telif msg[\"type\"] != \"ALIVE\": \n\t\t\tres = {\"type\": \"ACK\"}\n\t\t\t_send_msg(c, res)\n\n\t\tc.close()\n\n\nif __name__ == \"__main__\":\n\n\tp1 = multiprocessing.Process(name='p1', target=heartbeat_protocol)\n\tp2 = multiprocessing.Process(name='p2', target=gossip)\n\tp1.start()\n\tp2.start()","sub_path":"osd/storage_gossip.py","file_name":"storage_gossip.py","file_ext":"py","file_size_in_byte":3221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"317906751","text":"import pytest\nimport sys\nimport os\n\nfrom shutil import rmtree\nfrom pytest_mock.plugin import MockerFixture\nfrom test_utils.pyspark import TestPySpark\n\nimport pandas as pd\n\nfrom data_ingestion.ingestion import Ingester\nfrom ingestion_expected import get_expected_metadata\n\nclass TestIngestion(TestPySpark):\n\n @classmethod\n def setup_class(cls):\n cls.spark = cls.start_spark()\n root_dir = os.path.dirname(os.path.realpath(__file__)).split(\"/data-ingestion/\")[0]\n cls.parameters = {\n \"co2_input_path\": f\"{root_dir}/datasets/ingestion/inputs/EmissionsByCountry.csv\",\n \"temperatures_global_input_path\": f\"{root_dir}/datasets/ingestion/inputs/GlobalTemperatures.csv\",\n \"temperatures_country_input_path\": f\"{root_dir}/datasets/ingestion/inputs/TemperaturesByCountry.csv\",\n\n \"co2_output_path\": f\"{root_dir}/data-ingestion/tmp/test/outputs/EmissionsByCountry.parquet/\",\n \"temperatures_global_output_path\": f\"{root_dir}/data-ingestion/tmp/test/outputs/GlobalTemperatures.parquet/\",\n \"temperatures_country_output_path\": f\"{root_dir}/data-ingestion/tmp/test/outputs/TemperaturesByCountry.parquet/\",\n }\n cls.ingester = Ingester(cls.spark, cls.parameters)\n return\n\n @classmethod\n def teardown_class(cls):\n cls.stop_spark()\n output_paths = cls.parameters.values()\n for path in output_paths:\n if (\"/tmp/\" in path) and os.path.exists(path):\n rmtree(path.rsplit(\"/\", 1)[0])\n\n def test_replace_invalid_chars(self):\n # BEWARE: dictionaries do not necessarily enforce order.\n # To check column names, always use sorted()\n INVALID_CHARS = [\" \", \",\", \";\", \"\\n\", \"\\t\", \"=\", \"-\", \"{\", \"}\", \"(\", \")\"]\n df = pd.DataFrame(\n {\n 'My Awesome Column': pd.Series([\"Germany\", \"New Zealand\", \"Australia\", \"UK\"]),\n '(Another) Awesome Column': pd.Series([\"Germany\", \"New Zealand\", \"Australia\", \"UK\"]),\n }\n )\n df.columns = [self.ingester.replace_invalid_chars(x) for x in df.columns]\n\n all_columns_valid = True\n for column in df.columns:\n if not all_columns_valid:\n break\n for char in INVALID_CHARS:\n if char in column:\n all_columns_valid = False\n break\n assert all_columns_valid\n assert sorted(df.columns) == sorted([\"My_Awesome_Column\", \"Another_Awesome_Column\"])\n\n def test_fix_columns(self):\n # BEWARE: dictionaries do not necessarily enforce order.\n # To check column names, always use sorted()\n pandas_df = pd.DataFrame(\n {\n 'My Awesome Column': pd.Series([\"Germany\", \"New Zealand\", \"Australia\", \"UK\"]),\n '(Another) Awesome Column': pd.Series([\"Germany\", \"New Zealand\", \"Australia\", \"UK\"]),\n }\n )\n spark_df = self.spark.createDataFrame(pandas_df)\n fixed_df = self.ingester.fix_columns(spark_df)\n assert sorted(fixed_df.columns) == sorted([\"My_Awesome_Column\", \"Another_Awesome_Column\"])\n\n def test_run(self, mocker: MockerFixture):\n \"\"\"High level job test: count + schema checks but nothing more granular\"\"\"\n\n # Optional - if mocking is needed:\n # mock_object = mocker.Mock()\n # mock_object.some_property.some_method(some_argument).another_method.return_value = ...\n\n # Run the job and check for _SUCCESS files for each partition\n self.ingester.run()\n\n output_path_keys = [\"temperatures_country_output_path\", \"temperatures_global_output_path\", \"co2_output_path\"]\n output_path_values = [self.parameters[k] for k in output_path_keys]\n expected_metadata_dict = get_expected_metadata()\n expected_metadata = [expected_metadata_dict[k.replace(\"_path\", \"\")] for k in output_path_keys]\n\n for (path, expected) in list(zip(output_path_values, expected_metadata)):\n files = os.listdir(path)\n snappy_parquet_files = [x for x in files if x.endswith(\".snappy.parquet\")]\n # For this exercise, we require you to control each table's partitioning to 1 parquet partition\n assert (True if len(snappy_parquet_files) == 1 else False)\n assert (True if \"_SUCCESS\" in files else False)\n\n # Check count and schema - this covers most of pyspark-test's (https://pypi.org/project/pyspark-test/) functionality already\n # No need for a full equality check (it collects everything into the driver's memory - too time/memory consuming)\n df = self.spark.read.parquet(path)\n assert df.count() == expected[\"count\"]\n assert df.schema == expected[\"schema\"]\n\nif __name__ == '__main__':\n pytest.main(sys.argv)\n","sub_path":"data-ingestion/tests/data_ingestion/test_ingestion.py","file_name":"test_ingestion.py","file_ext":"py","file_size_in_byte":4857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"304876102","text":"import scrapy\nimport csv\n\nheader = [\"Job Title\", \"Company\", \"Location\", \"Apply here\"]\nwith open(\"Jobs Listed on Indeed.csv\", 'w', encoding='UTF8') as file:\n writer = csv.writer(file)\n # write the header\n writer.writerow(header)\n\n\nclass IndeedSpider(scrapy.Spider):\n name = \"indeed\"\n allowed_domains = [\"in.indeed.com\"]\n start_urls = [\"https://in.indeed.com/software-developer-fresher-jobs\"]\n start_index = 10 # will be used to crawl the next pages\n\n def parse(self, response):\n job_cards = response.xpath(\n \"//div[contains(@class, 'jobsearch-SerpJobCard')]\")\n for job_card in job_cards:\n title = job_card.xpath(\"normalize-space(.//h2/a/@title)\").get()\n company = job_card.xpath(\n \"normalize-space(.//div/div/span[@class='company']/text())\").get()\n location = job_card.xpath(\n \"normalize-space(.//div/span[contains(@class,'location')]/text())\").get()\n\n apply = job_card.xpath(\".//h2/a/@href\").get()\n\n if(not title):\n title = \"No Job Title Found\"\n\n if(not company):\n company = \"Company Not Specified\"\n\n if(not location): # to check if it exists or not\n location = \"unspecified\"\n if(apply is None):\n apply_here = \"Couldn't find the link\"\n else:\n apply_here = f\"https://in.indeed.com{apply}\"\n\n # # yield{\n # # 'title': title,\n # # 'Company': company,\n # # 'Location': location,\n # # 'Apply here': apply_here\n # # }\n if(title != \"No Job Title Found\" and company != \"Company Not Specified\" and location != \"unspecified\" and apply_here != \"Couldn't find the link\"):\n dataRow = [title, company, location, apply_here]\n # print(dataRow)\n with open('Jobs Listed on Indeed.csv', 'a', encoding='UTF8', newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerow(dataRow)\n print(f\"Page {self.start_index/10} done! \")\n\n next_page = f\"https://in.indeed.com/jobs?q=software+developer+fresher&start={self.start_index}\"\n self.start_index += 10\n if(next_page):\n yield scrapy.Request(url=next_page, callback=self.parse)\n","sub_path":"jobsAggregator/spiders/indeed.py","file_name":"indeed.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"426555641","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"code_info\n@Time :2021 2021/5/10 21:10\n@Author :Hanabi55\n@File :choose7.py\n\"\"\"\nf_read = open(\"poetry.txt\", \"r\", encoding=\"utf-8\")\nf_write = open(\"poetry7.txt\", \"a\", encoding=\"utf-8\")\npakeage = []\nx = 0\nfor i in range(43659):\n line = f_read.readline()\n if len(line) == 33 and \"□\" not in line and \"(\" not in line and \"-\" not in line and line[31]==\"。\":\n num_1 = 0\n num_2 = 0\n for j in line:\n if j == \",\":\n num_1 += 1\n elif j == \"。\":\n num_2 += 1\n if num_1 + num_2 == 4 and line not in pakeage:\n x += 1\n if num_1 != 2:\n print(x)\n pakeage.append(line)\n f_write.write(line)\nf_write.close()\nf_read.close()\n","sub_path":"choose7.py","file_name":"choose7.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"654128169","text":"'''\nGiven an array of positive and negative integers {-1,6,9,-4,-10,-9,8,8,4} (repetition allowed) sort the array in a way\nsuch that the starting from a positive number, the elements should be arranged as one positive and one negative element\n maintaining insertion order. First element should be starting from positive integer and the resultant array should look\n like {6,-1,9,-4,8,-10,8,-9,4}\n'''\n\n\ndef rearrange(arr,n):\n i = -1\n for j in range(n):\n if arr[j] < 0:\n i += 1\n print(i)\n arr[i], arr[j] = arr[j], arr[i]\n print(arr)\n\ndef rearrange_insertion_order(arr, n):\n i = 0\n for j in range(n):\n if arr[j] < 0:\n temp = arr[j]\n arr.remove(arr[j])\n arr.insert(i, temp)\n i += 1\n print(arr)\n\n pos, neg = i, 0\n while (pos < n and neg < pos and arr[neg] < 0):\n arr[neg], arr[pos] = arr[pos], arr[neg]\n pos += 1\n neg += 2\n\narr = [-1, 2, -3, 4, 5, 6, -7, 8, 9]\nn = len(arr)\nrearrange_insertion_order(arr,n)\nprint(arr)\n\n","sub_path":"one_positive_one_negetive.py","file_name":"one_positive_one_negetive.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"410988513","text":"\"\"\"\nThis file contains the primary server code for the SFTP server.\n\nSee COPYING for license information.\n\"\"\"\nfrom zope import interface\n\nfrom twisted.conch.interfaces import ISFTPServer, ISession\nfrom twisted.cred import portal\nfrom twisted.python import components, log\nfrom twisted.internet import defer\n\nfrom twisted.conch import avatar\nfrom twisted.conch.ssh import session\nfrom twisted.conch.ssh.filetransfer import FileTransferServer, SFTPError, \\\n FX_FAILURE, FX_NO_SUCH_FILE\nfrom twisted.conch.ssh.common import getNS\nfrom twisted.conch.ssh.transport import SSHServerTransport\n\nfrom swftp.swift import NotFound, Conflict\nfrom swftp.sftp.swiftfile import SwiftFile\nfrom swftp.sftp.swiftdirectory import SwiftDirectory\nfrom swftp.swiftfilesystem import SwiftFileSystem, swift_stat, obj_to_path\n\n\nclass SwiftSession:\n \" Barebones Session that closes when a client tries to open a shell \"\n interface.implements(ISession)\n\n def __init__(self, avatar):\n self.avatar = avatar\n\n def openShell(self, proto):\n self.avatar.conn.transport.transport.loseConnection()\n\n def getPty(self, term, windowSize, modes):\n pass\n\n def closed(self):\n pass\n\n\nclass SwiftSFTPRealm:\n \"Swift SFTP Realm\"\n interface.implements(portal.IRealm)\n\n def requestAvatar(self, avatarId, mind, *interfaces):\n avatar = SwiftSFTPAvatar(avatarId)\n return interfaces[0], avatar, avatar.logout\n\n\nclass SwiftFileTransferServer(FileTransferServer):\n # Overridden to expose the session to the file object to do intellegent\n # throttling. Without this, memory bloat occurs.\n def _cbOpenFile(self, fileObj, requestId):\n fileObj.session = self.transport.session\n FileTransferServer._cbOpenFile(self, fileObj, requestId)\n\n # This is overridden because Flow was sending data that looks to be invalid\n def packet_REALPATH(self, data):\n requestId = data[:4]\n data = data[4:]\n path, data = getNS(data)\n # assert data == '', 'still have data in REALPATH: %s' % repr(data)\n d = defer.maybeDeferred(self.client.realPath, path)\n d.addCallback(self._cbReadLink, requestId) # same return format\n d.addErrback(self._ebStatus, requestId, 'realpath failed')\n\n\nclass SwiftSSHServerTransport(SSHServerTransport):\n # Overridden to set the version string.\n version = 'SwFTP'\n ourVersionString = 'SSH-2.0-SwFTP'\n\n\nclass SwiftSFTPAvatar(avatar.ConchUser):\n \"Swift SFTP Avatar\"\n def __init__(self, swiftconn):\n avatar.ConchUser.__init__(self)\n self.swiftconn = swiftconn\n\n self.channelLookup.update({\"session\": session.SSHSession})\n self.subsystemLookup.update({\"sftp\": SwiftFileTransferServer})\n\n self.cwd = ''\n\n def logout(self):\n self.log_command('logout')\n self.swiftconn = None\n\n def log_command(self, command, *args):\n arg_list = ', '.join(str(arg) for arg in args)\n log.msg(\"COMMAND: %s(%s)\" % (command, arg_list),\n system=\"SwFTP-SFTP, (%s)\" % self.swiftconn.username,\n metric='command.%s' % command)\n\n\nclass SFTPServerForSwiftConchUser:\n \"SFTP Server For a Swift User\"\n interface.implements(ISFTPServer)\n\n def __init__(self, avatar):\n self.swiftconn = avatar.swiftconn\n self.swiftfilesystem = SwiftFileSystem(self.swiftconn)\n self.avatar = avatar\n self.conn = avatar.conn\n self.log_command('login')\n\n def log_command(self, *args, **kwargs):\n return self.avatar.log_command(*args, **kwargs)\n\n def gotVersion(self, otherVersion, extData):\n return {}\n\n def openFile(self, fullpath, flags, attrs):\n self.log_command('openFile', fullpath, flags, attrs)\n f = SwiftFile(self, fullpath, flags=flags, attrs=attrs)\n d = f.checkExistance()\n\n def errback(failure):\n failure.trap(NotFound)\n raise SFTPError(FX_FAILURE, \"Container Doesn't Exist\")\n\n d.addCallback(lambda r: f)\n d.addErrback(errback)\n return d\n\n def removeFile(self, fullpath):\n self.log_command('removeFile', fullpath)\n return self.swiftfilesystem.removeFile(fullpath)\n\n def renameFile(self, oldpath, newpath):\n self.log_command('renameFile', oldpath, newpath)\n d = self.swiftfilesystem.renameFile(oldpath, newpath)\n\n def errback(failure):\n failure.trap(NotFound, Conflict)\n if failure.check(NotFound):\n raise SFTPError(FX_NO_SUCH_FILE, 'No Such File')\n if failure.check(Conflict):\n raise NotImplementedError\n\n d.addErrback(errback)\n return d\n\n def makeDirectory(self, fullpath, attrs):\n self.log_command('makeDirectory', fullpath, attrs)\n\n def errback(failure):\n failure.trap(NotFound)\n raise SFTPError(FX_NO_SUCH_FILE, 'Directory Not Found')\n\n d = self.swiftfilesystem.makeDirectory(fullpath, attrs)\n d.addErrback(errback)\n return d\n\n def removeDirectory(self, fullpath):\n self.log_command('removeDirectory', fullpath)\n d = self.swiftfilesystem.removeDirectory(fullpath)\n\n def errback(failure):\n failure.trap(NotFound, Conflict)\n if failure.check(NotFound):\n return\n if failure.check(Conflict):\n raise SFTPError(FX_FAILURE, 'Directory Not Empty')\n\n d.addErrback(errback)\n return d\n\n def openDirectory(self, fullpath):\n self.log_command('openDirectory', fullpath)\n directory = SwiftDirectory(self.swiftfilesystem, fullpath)\n\n def cb(*result):\n return directory\n\n def errback(failure):\n failure.trap(NotFound)\n raise SFTPError(FX_FAILURE, 'Not Found')\n\n d = directory.get_full_listing()\n d.addCallback(cb)\n d.addErrback(errback)\n return d\n\n def getAttrs(self, fullpath, followLinks=False):\n self.log_command('getAttrs', fullpath)\n d = self.swiftfilesystem.getAttrs(fullpath)\n\n def cb(result):\n return self.format_attrs(result)\n\n def errback(failure):\n failure.trap(NotFound)\n raise SFTPError(FX_NO_SUCH_FILE, 'Not Found')\n\n d.addCallback(cb)\n d.addErrback(errback)\n\n return d\n\n def format_attrs(self, result):\n s = swift_stat(**result)\n return {\n \"size\": s.st_size,\n \"uid\": s.st_uid,\n \"gid\": s.st_gid,\n \"permissions\": s.st_mode,\n \"atime\": int(s.st_atime),\n \"mtime\": int(s.st_mtime)\n }\n\n def setAttrs(self, path, attrs):\n return\n\n def readLink(self, path):\n raise NotImplementedError\n\n def makeLink(self, linkPath, targetPath):\n raise NotImplementedError\n\n def realPath(self, path):\n container, obj = obj_to_path(path)\n real_path = '/'\n if container:\n real_path += container\n if obj:\n real_path += '/' + obj\n return real_path\n\n def extendedRequest(self, extName, extData):\n raise NotImplementedError\n\ncomponents.registerAdapter(\n SFTPServerForSwiftConchUser, SwiftSFTPAvatar, ISFTPServer)\ncomponents.registerAdapter(SwiftSession, SwiftSFTPAvatar, ISession)\n","sub_path":"swftp/sftp/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":7305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"263107905","text":"\"\"\"\n\n557. Reverse Words in a String III\n\nGiven a string, you need to reverse the order of characters\nin each word within a sentence while still preserving whitespace\nand initial word order.\n\nExample 1:\nInput: \"Let's take LeetCode contest\"\nOutput: \"s'teL ekat edoCteeL tsetnoc\"\nNote: In the string, each word is separated by single space\nand there will not be any extra space in the string.\n\n\n\n\"\"\"\n\n\n\n\n\nclass Solution(object):\n def reverseWords(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n result = \"\"\n \n for token in s.split(' '):\n result += token[::-1] + \" \"\n \n return result[:-1]\n \ndef main():\n \n solution = Solution()\n print ( \"s'teL ekat edoCteeL tsetnoc == \" + \\\n solution.reverseWords(\"Let's take LeetCode contest\") )\n \n \n \n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n","sub_path":"reverse_string3.py","file_name":"reverse_string3.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"119778137","text":"import os\nfrom flask_script import Manager, Shell, Server\nfrom app import create_app, db\nfrom app.address import models\n\n\napp = create_app(os.getenv('FLASK_CONFIG') or 'default')\nmanager = Manager(app)\n\n\ndef make_shell_context():\n return dict(app=app, db=db, Address=models.Address)\n\nmanager.add_command('shell', Shell(make_context=make_shell_context))\nmanager.add_command('runserver', Server())\nmanager.add_default_commands\n\n\n@manager.command\ndef tests():\n \"\"\"Run the unit tests.\"\"\"\n import unittest\n\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=3).run(tests)\n\n\n@manager.command\ndef create_db():\n \"\"\"Creates the database.\"\"\"\n db.create_all()\n db.session.commit()\n\n\n@manager.command\ndef recreate_db():\n \"\"\"Recreates the database.\"\"\"\n db.drop_all()\n db.create_all()\n db.session.commit()\n\nif __name__ == '__main__':\n manager.run()\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"7182234","text":"from ..items import AmazonSpiderItem\nimport scrapy\n\nclass CrawlAmazonSpider(scrapy.Spider):\n name = \"amazon\"\n page_num = 2\n start_urls = [\n # Sci-Fi-Books\n # \"https://www.amazon.in/s?i=stripbooks&bbn=976390031&rh=n%3A976389031%2Cn%3A1402038031%2Cp_n_publication_date%3A2684819031%2Cp_n_feature_three_browse-bin%3A9141482031&dc&qid=1611460927&rnid=976390031&ref=sr_nr_n_11\"\n # Crime-Thriller-Books\n \"https://www.amazon.in/s?i=stripbooks&bbn=976390031&rh=n%3A976389031%2Cn%3A1318161031%2Cp_n_publication_date%3A2684819031%2Cp_n_feature_three_browse-bin%3A9141482031&dc&qid=1611476080&rnid=976390031&ref=sr_nr_n_9\"\n ]\n\n def parse(self, response):\n items = AmazonSpiderItem()\n\n all_books = response.css(\".s-latency-cf-section\")\n\n for book in all_books:\n book_title = book.css(\".a-color-base.a-text-normal::text\").extract()\n book_author = book.css(\".a-color-secondary .a-size-base+ .a-size-base\").css(\"::text\").extract()\n book_price = book.css(\".a-spacing-top-mini+ .a-spacing-top-mini .a-spacing-mini:nth-child(1) .a-link-normal.a-text-bold , .a-price-whole , .a-spacing-top-small .a-text-bold\").css(\"::text\").extract()\n book_imagelink = book.css(\".s-image::attr(src)\").extract()\n\n items[\"book_author\"] = \"\".join([author.strip(\"\\n\") for author in book_author])\n items[\"book_price\"] = \" \".join([price.strip(\"\\n\") for price in book_price])\n\n if len(book_title) == 1:\n items[\"book_title\"] = book_title[0]\n else:\n items[\"book_title\"] = book_title\n\n try:\n items[\"book_imagelink\"] = book_imagelink[0]\n except:\n items[\"book_imagelink\"] = book_imagelink\n\n yield items\n\n # next_page = f\"https://www.amazon.in/s?i=stripbooks&bbn=976390031&rh=n%3A976389031%2Cn%3A1402038031%2Cp_n_publication_date%3A2684819031%2Cp_n_feature_three_browse-bin%3A9141482031&dc&page={CrawlAmazonSpider.page_num}&qid=1611468424&rnid=976390031&ref=sr_pg_2\"\n next_page = f\"https://www.amazon.in/s?i=stripbooks&bbn=976390031&rh=n%3A976389031%2Cn%3A1318161031%2Cp_n_publication_date%3A2684819031%2Cp_n_feature_three_browse-bin%3A9141482031&dc&page={CrawlAmazonSpider.page_num}&qid=1611476083&rnid=976390031&ref=sr_pg_2\"\n\n # alternate these variables for diff genre\n # scifi_num = 100\n crime_num = 75\n if CrawlAmazonSpider.page_num <= crime_num :\n yield response.follow(next_page, callback=self.parse)\n CrawlAmazonSpider.page_num += 1\n\n\n\n","sub_path":"Python/Scrapy/amazon_spider/amazon_spider/spiders/crawl_amazon.py","file_name":"crawl_amazon.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"155951873","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\ndef triangles():\n index, List = 0, [1]\n while True:\n yield List\n List.append(0)\n while index < len(List):\n List[index] = List[index-1] + List[index]\n index = index + 1\n # List = [List[i-1] + List[i] for i in range(len(List))]\n return 'Done'\n","sub_path":"liaoxuefeng/Generator.py","file_name":"Generator.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"631190232","text":"# -*- coding: utf-8 -*-\nimport tensorflow as tf\nimport os\nimport collections\nimport numpy as np\n\n#seq length 为num_step-1\ndef batch_iter(x,y,batch_size):\n data_len=len(x)\n num_batch=int((data_len-1)/batch_size)+1\n\n for i in range(num_batch):\n start_id=i*batch_size\n end_id=min((i+1)*batch_size,data_len)\n if end_id-start_id id max_length——numsteps max_data_row——使用训练数据行数\ndef _file_to_word_ids(filename, word_to_id, max_length=None, max_data_row=None):\n data = []\n with open(filename, 'r') as f:\n lines = f.readlines()\n count =0\n for line in lines:\n word_ids=[]\n line = line.strip()\n\n words = line.split(\" \")\n if max_length and len(words) >= max_length:\n continue\n for word in words:\n if word in word_to_id:\n word_ids.append(word_to_id[word])\n else:\n word_ids.append(word_to_id['UNK'])\n word_ids.append(word_to_id['ENDMARKER'])\n if max_length:\n for i in range(max_length - len(words)-1):\n word_ids.append(word_to_id['PAD'])\n count +=1\n data.append(word_ids)\n if max_data_row and count>max_data_row:\n break\n return data\n\n\ndef raw_data(max_data_row,data_path=None, word_to_id=None, max_length=None):\n train_path = os.path.join(data_path,\"c/train.txt\")\n val_path=os.path.join(data_path,'c/val.txt')\n test_path = os.path.join(data_path, \"c/test.txt\")\n\n train_data=np.asarray(_file_to_word_ids(train_path, word_to_id, max_length, max_data_row=max_data_row))\n val_data=np.asarray(_file_to_word_ids(val_path,word_to_id,max_length))\n test_data= np.asarray(_file_to_word_ids(test_path, word_to_id, max_length))\n\n vocabulary_size = len(word_to_id)\n end_id = word_to_id['ENDMARKER']\n left_id = word_to_id['{']\n right_id = word_to_id['}']\n PAD_ID = word_to_id['PAD']\n return train_data,val_data,test_data,vocabulary_size, end_id, left_id, right_id, PAD_ID\n\ndef get_pair(data):\n labels=data[:,1:]\n inputs=data[:,:-1]\n return inputs,labels\n\ndef _read_words(filename):\n with tf.gfile.GFile(filename, 'r') as f:\n return f.read().replace(\"\\r\\n\", \" ENDMARKER \").split(' ')\n\ndef _build_vocab(filename,vocab_size):\n data = _read_words(filename)\n counter = collections.Counter(data)\n count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n words, values = list(zip(*count_pairs))\n words = words[0:vocab_size-2]\n word_to_id = dict(zip(words, range(len(words))))\n word_to_id['UNK'] = len(word_to_id)\n word_to_id['PAD'] = len(word_to_id)\n return word_to_id\n\n\ndef reverseDic(curDic):\n newmaplist = {}\n for key, value in curDic.items():\n newmaplist[value] = key\n return newmaplist\n\n\n","sub_path":"tools/data_reader.py","file_name":"data_reader.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"567603677","text":"#!/usr/bin/python3\nfrom common import countDivisors, triangleNumGenerator\nimport time\n\nif __name__ == '__main__':\n triangGen = triangleNumGenerator()\n\n cntr = 1\n\n start = time.time()\n\n for i in triangGen:\n divs = countDivisors(i)\n #\tprint(cntr,'<->',i,'<->', divs)\n\n if divs > 499:\n print(cntr,'<->',i,'<->', divs)\n break\n cntr += 1\n \n print(\"It was done in {0} seconds\".format(time.time() - start))\n","sub_path":"python/problem12.py","file_name":"problem12.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"597015517","text":"import base64\nimport json\nfrom Crypto.Cipher import ChaCha20\nfrom Crypto.Random import get_random_bytes\n\nplaintext = b'This is the secret message to encrypt'\n\nkey = get_random_bytes(32)\nnonce = get_random_bytes(8)\n\ncipher = ChaCha20.new(key=key,nonce=nonce)\nciphertext = cipher.encrypt(plaintext)\n\nnonceb64 = base64.b64encode(cipher.nonce).decode('utf-8')\nciphertextb64 = base64.b64encode(ciphertext).decode('utf-8')\nresult = json.dumps({'nonce':nonceb64, 'ciphertext':ciphertextb64})\nprint(result)\n\n\n#unpack and decipher\nb64 = json.loads(result)\nciphertext2 = base64.b64decode(b64['ciphertext'])\nnonce2 = base64.b64decode(b64['nonce'])\nprint(nonce2)\nprint(nonce)\n\ncipher_dec = ChaCha20.new(key=key,nonce=nonce2)\nplaintext_dec = cipher_dec.decrypt(ciphertext2)\n\n# smarter use of JSON objects even more useful when more data are saved\n# json_k = [ 'nonce', 'ciphertext']\n# json_v = [ base64.b64encode(x).decode('utf-8') for x in (cipher.nonce, ciphertext) ]\n# result2 = json.dumps(dict(zip(json_k, json_v)))\n# print(result2)\n#\n# b64 = json.loads(result2)\n# json_k = [ 'nonce', 'ciphertext']\n# jv = {k:base64.b64decode(b64[k]) for k in json_k}\n#\n# cipher_dec = ChaCha20.new(key=key,nonce=jv['nonce'])\n# plaintext_dec = cipher_dec.decrypt(jv['ciphertext'])\n#\n\nprint(plaintext_dec)\n","sub_path":"AY2021/py-basics/symmetric/7.stream_json.py","file_name":"7.stream_json.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"49741883","text":"def createtable(principal,apr):\n for year in range(1,11):\n principal = principal*(1+apr)\n print(\"%2d\"%year,end='')\n total = caculatenum(principal)\n print(\"*\"*total)\n print(\"0.0k 2.5k 5.0k 7.5k 10.0k\")\ndef caculatenum(principal):\n total = int(principal*4/1000.0)\n return total\ndef main():\n print(\"请输入本金和利率\")\n principal = eval(input(\"请输入本金\"))\n apr = eval(input(\"请输入利率\"))\n createtable(principal,apr)\nmain()\n","sub_path":"python/银行利息图像.py","file_name":"银行利息图像.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"652611034","text":"\"\"\"\nrc_eval.py\nStandalone script to evaluate RC values given an aimless shooting working directory and reaction coordinate definition\n\"\"\"\n\nimport sys\nimport os\nimport glob\nimport argparse\nimport pickle\nimport time\nimport math\nfrom atesa import utilities\n\n\ndef update_progress(progress, message='Progress', eta=0, quiet=False):\n \"\"\"\n Print a dynamic progress bar to stdout.\n\n Credit to Brian Khuu from stackoverflow, https://stackoverflow.com/questions/3160699/python-progress-bar\n\n Parameters\n ----------\n progress : float\n A number between 0 and 1 indicating the fractional completeness of the bar. A value under 0 represents a 'halt'.\n A value at 1 or bigger represents 100%.\n message : str\n The string to precede the progress bar (so as to indicate what is progressing)\n eta : int\n Number of seconds to display as estimated completion time (converted into HH:MM:SS)\n quiet : bool\n If True, suppresses output entirely\n\n Returns\n -------\n None\n\n \"\"\"\n\n if quiet:\n return None\n\n barLength = 10 # Modify this to change the length of the progress bar\n status = \"\"\n if isinstance(progress, int):\n progress = float(progress)\n if not isinstance(progress, float):\n progress = 0\n status = \"error: progress var must be float\\r\\n\"\n if progress < 0:\n progress = 0\n status = \"Halt...\\r\\n\"\n if progress >= 1:\n progress = 1\n status = \"Done! \\r\\n\"\n block = int(round(barLength * progress))\n if eta:\n # eta is in seconds; convert into HH:MM:SS\n eta_h = str(math.floor(eta/3600))\n eta_m = str(math.floor((eta % 3600) / 60))\n eta_s = str(math.floor((eta % 3600) % 60)) + ' '\n if len(eta_m) == 1:\n eta_m = '0' + eta_m\n if len(eta_s) == 2:\n eta_s = '0' + eta_s\n eta_str = eta_h + ':' + eta_m + ':' + eta_s\n text = \"\\r\" + message + \": [{0}] {1}% {2}\".format(\"#\" * block + \"-\" * (barLength - block), round(progress * 100, 2), status) + \" ETA: \" + eta_str\n else:\n text = \"\\r\" + message + \": [{0}] {1}% {2}\".format(\"#\" * block + \"-\" * (barLength - block), round(progress * 100, 2), status)\n sys.stdout.write(text)\n sys.stdout.flush()\n\n\ndef main(working_directory, rc_definition):\n \"\"\"\n The main function of rc_eval.py. Accepts an aimless shooting working directory and a reaction coordinate definition,\n producing in that directory a new file named 'rc.out' (overwriting if one already exists) identifying each shooting\n point (files in the directory whose names end in \"_init.rst7\") and its corresponding reaction coordinate value in a\n sorted list.\n\n Parameters\n ----------\n working_directory : str\n The path to the aimless shooting working directory in which to act\n rc_definition : str\n A reaction coordinate definition formatted as a string of python-readable code with \"CV[X]\" standing in for the\n Xth CV value (one-indexed); this RC definition should be in terms of reduced variables (values between 0 and 1)\n\n Returns\n -------\n None\n\n \"\"\"\n\n # Change to working directory\n os.chdir(working_directory)\n\n # Unpickle settings object for use in utilities.get_cvs\n try:\n settings = pickle.load(open('settings.pkl', 'rb'))\n except FileNotFoundError: # replace with more informative error message\n raise FileNotFoundError('the working directory must contain a valid settings.pkl file, which is generated '\n 'automatically when running ATESA, but one was not found in the working directory: '\n + working_directory)\n\n # Obtain list of shooting point coordinate files\n file_list = glob.glob('*_init.rst7')\n if not file_list:\n raise FileNotFoundError('no valid shooting point files (as given by names ending in \\'_init.rst7\\') were found '\n 'in the working directory \\'' + working_directory + '\\'. Is this an aimless shooting '\n 'working directory?')\n\n # Iterate through the list, calling evaluate_rc for each one and storing the result\n results = []\n count = 0\n count_to = len(file_list)\n update_progress(0, 'Evaluating RC values')\n speed_data = [0, 0]\n for file in file_list:\n t = time.time()\n cv_list = utilities.get_cvs(file, settings, reduce=settings.rc_reduced_cvs).split(' ')\n results.append([file + ': ', utilities.evaluate_rc(rc_definition, cv_list)])\n this_speed = time.time() - t\n speed_data = [(speed_data[1] * speed_data[0] + this_speed) / (speed_data[1] + 1), speed_data[1] + 1]\n count += 1\n eta = (count_to - count) * speed_data[0]\n update_progress(count/count_to, 'Evaluating RC values', eta=eta)\n results = sorted(results, key=lambda x: abs(float(x[1]))) # sort results by absolute value of RC\n\n # Create and write to rc.out file\n open('rc.out', 'w').close()\n with open('rc.out', 'a') as f:\n for result in results:\n f.write(result[0] + str(result[1]) + '\\n')\n f.close()\n\n\nif __name__ == '__main__':\n main(sys.argv[1], sys.argv[2])\n","sub_path":"atesa/rc_eval.py","file_name":"rc_eval.py","file_ext":"py","file_size_in_byte":5230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"325223080","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom zerothrift.context import *\nfrom zerothrift.events import *\nfrom zerothrift.core.client import *\nfrom zerothrift.core.server import *\n\n_transport = None\n_endpoint = None\n\ndef get_transport(endpoint=None, timeout=5):\n global _transport, _endpoint\n\n assert _endpoint is None or _endpoint == endpoint\n\n if not _transport:\n _endpoint = endpoint\n _transport = TZmqTransport(endpoint, zmq.DEALER, timeout=timeout)\n _transport.open()\n return _transport\n\n\ndef get_protocol(service):\n assert _transport\n return TZmqBinaryProtocol(_transport, service=service)\n\n\nRPC_DEFAULT_CONFIG = \"config.ini\"\n\nRPC_ZK = \"zk\"\nRPC_ZK_TIMEOUT = \"zk_session_timeout\"\n\nRPC_PRODUCT = \"product\"\nRPC_SERVICE = \"service\"\n\nRPC_FRONT_HOST = \"front_host\"\nRPC_FRONT_PORT = \"front_port\"\nRPC_IP_PREFIX = \"ip_prefix\"\n\nRPC_BACK_ADDRESS = \"back_address\"\n\nRPC_WORKER_POOL_SIZE = \"worker_pool_size\"\nRPC_PROXY_ADDRESS = \"proxy_address\"\n\n\ndef parse_config(config_path):\n config = {}\n for line in open(config_path, \"r\").readlines():\n line = line.strip()\n if not line or line.find(\"#\") != -1:\n continue\n items = line.split(\"=\")\n if len(items) >= 2:\n config[items[0]] = items[1]\n return config\n\n","sub_path":"zerothrift/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"320068504","text":"\nimport numpy as np \n\n\nnumEpocas = 10000 #70000\nnumAmostras = 6\n\npeso = np.array([113, 122, 107, 98, 115, 120])\npH = np.array([6.8, 4.7, 5.2, 3.6, 2.9, 4.2])\n\nnormaliza = False\nif normaliza:\n peso = peso/np.linalg.norm(peso)\n pH = pH/np.linalg.norm(peso)\n\n\n\nbias = 1\n\nX = np.vstack((peso, pH)) # Ou X = np.asarray([peso, pH])\nY = np.array([-1, 1, -1, -1, 1, 1])\n\n\neta = 0.1\n\ne = np.zeros(6)\n\nW = np.ones([1,3])\n\nfor j in range(numEpocas):\n for k in range(numAmostras):\n Xb = np.hstack((bias, X[:,k]))\n \n V = np.dot(W, Xb)\n \n Yr = np.sign(V)\n \n e[k] = Y[k] - Yr\n \n W = W + eta*e[k]*Xb\n \nprint(\"Vetor de errors (e) = \" + str(e))","sub_path":"cod.py","file_name":"cod.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"626873634","text":"import os\nimport pprint\n#check os type to call correct screen clear function\nif os.name == 'nt':\n clear = \"os.system('cls')\"\nelse:\n clear = \"os.system('clear')\"\n\n\nwhile True:\n chcount = {}\n msg = input(\"Insert Mesage (empty for quit): \")\n if msg == '':\n break\n else:\n for char in msg:\n chcount.setdefault(char, 0)\n chcount[char]= chcount[char] + 1\n print(chcount)\n input()\n exec(clear)","sub_path":"exericies/python.old/Automating The Boring Stuff/CharCount.py","file_name":"CharCount.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"365313325","text":"__author__ = 'onotole'\n\n\nimport requests\nfrom config import *\nREQ_GET_TANKS_ID = \"https://api.wotblitz.ru/wotb/encyclopedia/vehicles/?application_id=\"+applicationID+\\\n \"&fields=default_profile.weight\"\n\nsession = requests.Session()\nreq = REQ_GET_TANKS_ID\nr = session.get(req).json()[\"data\"]\nsum_mass = 0\nprint(r)\nfor tank_id in r:\n sum_mass += r[tank_id]['default_profile']['weight']\n\nprint(sum_mass)","sub_path":"wotBlitzStat1/dumpWOTstats/sum_mass.py","file_name":"sum_mass.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"185750094","text":"from django.shortcuts import render\n# Create your views here.\n\nimport json\nfrom django.http import HttpResponse\nfrom company.models import Company\nfrom django.utils.decorators import method_decorator\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import View\nfrom company.forms import CompanyForm\nfrom django.db.models import F\nfrom django.db.models import Q\nfrom django.db.models import Sum\n\n#无参数的方法\n@method_decorator(csrf_exempt, name='dispatch')\nclass CompanyView(View):\n\n #用form表单接收\n def get(self, request):\n res = CompanyForm(request.GET)\n if not res.is_valid():\n return HttpResponse(status=422)\n company = Company.objects.filter(company_name=res.data.get('company_name'))\n print(company.values())\n return HttpResponse(status=201)\n\n #把传入的json信息提取并作为表的数据内容创建新数据\n def post(self, request):\n stream = request.body.decode()\n json_data = json.loads(stream)\n company = Company.objects.create(company_name=json_data['company_name'])\n return HttpResponse(status=201, content={'company_id': company.company_id})\n\n#有参数的方法\n@method_decorator(csrf_exempt,name='dispatch')\nclass CompaniesView(View):\n\n #带参数的get方法\n def get(self,request,company_name):\n company = Company.objects.get(company_name=company_name)\n print(company.company_id)\n company1 = Company.objects.filter(company_name=company_name)\n print(company1)\n return HttpResponse(status=201)\n\n def put(self,request,company_id):\n company=Company.objects.get(company_id=company_id)\n Company.objects.get(company_id=company_id).update(company_name='zmy')\n return HttpResponse(status=201)\n\n\n def delete(self,request,company_id):\n Company.objects.filter(log_id=company_id).delete()\n return HttpResponse(status=201)\n\n\n # def get(self,request,company_id):\n # try:\n # company = Company.objects.get(company_name=company_name)\n # except Compeny.DoesNotExist:\n # return HttpResponse(company.detail_info())\n # return HttpResponse(company.detail_info())\n","sub_path":"park/company/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"374281105","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def levelOrder(self, root: TreeNode) -> List[List[int]]:\n if not root: return\n q, ret = collections.deque([root]), []\n \n while True:\n tmp, L = [], len(q)\n \n for i in range(L):\n n = q.popleft()\n if n.left: q.append(n.left)\n if n.right: q.append(n.right)\n tmp.append(n.val)\n \n ret.append(tmp)\n if not len(q): break\n \n return ret\n ","sub_path":"0102.binary-tree-level-order-traversal/binary-tree-level-order-traversal.py","file_name":"binary-tree-level-order-traversal.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"296802791","text":"import threading\r\n\r\ncount = 0\r\n\r\ndef OnTimer(): \r\n global count\r\n count += 1\r\n print(count)\r\n\r\n if count < 10:\r\n timer = threading.Timer(1, OnTimer)\r\n timer.start()\r\n\r\n #if count == 10:\r\n # print(\"Canceling timer...\")\r\n # timer.cancel()\r\n\r\nprint(\"Starting timer...\")\r\nOnTimer()","sub_path":"12/TimerStartCancel.py","file_name":"TimerStartCancel.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"384257557","text":"from manimlib.imports import *\n\n\nclass Plot1(GraphScene):\n CONFIG = {\n \"y_max\": 50,\n \"y_min\": 0,\n \"x_max\": 7,\n \"x_min\": 0,\n \"y_tick_frequency\": 5,\n \"x_tick_frequency\": 0.5,\n \"axes_color\": BLUE,\n \"y_labeled_nums\": range(0, 60, 10),\n \"x_labeled_nums\": list(np.arange(2, 7.0 + 0.5, 0.5)),\n \"x_label_decimal\": 1,\n \"y_label_direction\": RIGHT,\n \"x_label_direction\": UP,\n \"y_label_decimal\": 3\n }\n\n def construct(self):\n self.setup_axes(animate=True)\n graph = self.get_graph(lambda x: x**2, color=GREEN, x_min=2, x_max=4)\n self.play(ShowCreation(graph), run_time=2)\n self.wait()\n\n\nclass Plot1v2(GraphScene):\n CONFIG = {\n \"y_max\": 50,\n \"y_min\": 0,\n \"x_max\": 7,\n \"x_min\": 0,\n \"y_tick_frequency\": 5,\n \"x_tick_frequency\": 1,\n \"axes_color\": BLUE,\n \"graph_origin\": np.array((0, 0, 0))\n }\n\n def construct(self):\n self.setup_axes(animate=True)\n graph = self.get_graph(lambda x: x**2, color=GREEN, x_min=2, x_max=4)\n self.play(ShowCreation(graph), run_time=2)\n self.wait()\n\nclass Plot2(GraphScene):\n CONFIG = {\n \"y_max\" : 50,\n \"y_min\" : 0,\n \"x_max\" : 7,\n \"x_min\" : 0,\n \"y_tick_frequency\" : 5,\n \"axes_color\" : BLUE,\n \"x_axis_label\" : \"$t$\",\n \"y_axis_label\" : \"$f(t)$\",\n }\n def construct(self):\n self.setup_axes()\n graph = self.get_graph(lambda x : x**2, color = GREEN)\n self.play(\n \tShowCreation(graph),\n run_time = 2\n )\n self.wait()\n\n def setup_axes(self):\n # Add this line\n GraphScene.setup_axes(self)\n # Parametters of labels\n # For x\n init_label_x = 2\n end_label_x = 7\n step_x = 1\n # For y\n init_label_y = 20\n end_label_y = 50\n step_y = 5\n # Position of labels\n # For x\n self.x_axis.label_direction = DOWN #DOWN is default\n # For y\n self.y_axis.label_direction = LEFT\n # Add labels to graph\n # For x\n self.x_axis.add_numbers(*range(\n init_label_x,\n end_label_x+step_x,\n step_x\n ))\n # For y\n self.y_axis.add_numbers(*range(\n init_label_y,\n end_label_y+step_y,\n step_y\n ))\n # Add Animation\n self.play(\n ShowCreation(self.x_axis),\n ShowCreation(self.y_axis)\n )\n","sub_path":"EB_plot_2d.py","file_name":"EB_plot_2d.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"172611180","text":"# This code will get the number inputted and\r\n\r\nbase_number = float(input(\"Enter the base number:\")) # input base number\r\nexponent_number = float(input(\"Enter the exponent number:\")) # input exponent number\r\nresult = 1\r\ncount = 0 # count initiates on 0\r\n\r\nwhile count < exponent_number: # While true\r\n count += 1 # Add 1 to count\r\n result *= base_number \r\n\r\nprint(\"The base\", base_number, \"power with the exponent\", exponent_number, \"is equal to =\", result)\r\n","sub_path":"wiki.python.org.br/Repeat_Structure/13 Exponentiation.py","file_name":"13 Exponentiation.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"218710095","text":"from inkml_dataset import InkMLDataSet\nfrom inkml_dataset import InkMLFile\nimport inkml_dataset\nimport sys\nimport cv2\nimport json\ndef processOneFile():\n\n input_inkml = sys.argv[1]\n output_path = sys.argv[2]\n inkml_file = InkMLFile()\n inkml_file.load(input_inkml)\n\n cv2.imshow(\"xxxxx\", inkml_file.annotation_imge())\n cv2.waitKey()\n\n\ndef processDataset(file_list, json_out_file, save_image = False):\n dataset = InkMLDataSet(\"FCinkML/\")\n dataset.file_list = file_list\n dataset.load()\n\n annotation = dataset.get_annotation()\n annotation_str = json.dumps(annotation, indent=2)\n\n with open(json_out_file, \"w\") as coco_json_file:\n coco_json_file.write(annotation_str)\n\n if save_image:\n dataset.save_images(\"images\")\n\ndef processAllFile():\n processDataset(\"listInkML_Train.txt\", \"inkml_train.json\", False)\n processDataset(\"listInkML_Test.txt\", \"inkml_val.json\", False)\n\nif __name__ == \"__main__\":\n #processOneFile()\n inkml_dataset.set_debugging_mode(True)\n processAllFile()\n #processDataset(\"listInkML_Dev.txt\", \"inkml_dev.json\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"273987726","text":"#Mitch Zinser\n#Genetic Algorithm that evolves a desired string from a randomly created pool of candidate strings\n#Command line inputs: file.py string_to_find population_size\n#Neither are required, string defaults to Hellow World, population size defaults to 1000\nimport random, string, time, sys \n#String for list of letters, time for timing operation\nletter_pool = string.ascii_letters + string.punctuation + \" \"\n\n\n#Fitness function. Takes in goal string and string to eval. Returns number for fitness (0 <= fitness <= len(goal_str))\ndef fitness(goal, str_in):\n\tfit = 0\n\tfor i in range(len(goal)):\n\t\tif goal[i] == str_in[i]:\n\t\t\tfit += 1\n\treturn fit\n\n\n#Function that populates the starting generation. Takes in number of citizens, length of each citizens string, and goal string, returns list that is a list of citizens\ndef pop_gen(pop_num, str_len, goal_str):\n\t#List this is the generation being created. Each entry is a citizen list of [fitness func value, string]\n\tgen = []\n\tfor i in range(pop_num):\n\t\t#Choose str_len number of random letters (upper and lower)\n\t\tcitizen = \"\"\n\t\tfor j in range(str_len):\n\t\t\tcitizen += random.choice(letter_pool)\n\t\t#Start with fitness function being 0\n\t\tgen.append([fitness(goal_str, citizen), citizen])\n\treturn gen\n\n#Genetic algorithm loop. Takes in goal word, population size, crossover rate, mutation rate, and max generations. Returns list of all generations\ndef genetic(goal, pop_size, cross_rate, mutate_rate, gen_max):\n\t#Get length of goal string\n\tgoal_len = len(goal)\n\t#Get goal fitness for solution\n\tgoal_fit = fitness(goal, goal)\n\t#Get indexes of the 1/4 point\n\tparent_cutoff = int(pop_size/4)\n\t#Get index of where to split string of parent in crossover\n\tcross_index = int(goal_len*cross_rate)\n\n\t#Each generation is list of citiens (each citizen is a list of [fitness func value, string])\n\tcur_gen = []\n\tnext_gen = []\n\t#list of best fit for each generation. This allows basic archiving without wasting as much memory\n\tgenerations = []\n\t#Create starting population\n\tcur_gen = pop_gen(pop_size, goal_len, goal)\n\n\t#Start main generation loop. Each iteration creates a new generation and evaluates the current one. Run until solution found or generation limit reached\n\tfor i in range(gen_max):\n\t\t#Record best candidate in this generation\n\t\tgenerations.append(max(cur_gen))\n\n\t\t#Check if any of the generation are a perfect fit. Break if they are\n\t\tif max(cur_gen)[0] == goal_fit:\n\t\t\tbreak\n\t\t\tprint(\"Break with\", max(cur_gen))\n\t\t#Make pool of best fitness citizens and some not great fits for diversity. Do this by iterating through current generation and sorting. Take the top half and then\n\t\t#Fill pool with top 3/4 of this generations citizens\n\t\tparent_pool = sorted(cur_gen)[parent_cutoff:]\n\n\t\t#Population size of next generation\n\t\tnext_gen_len = 0\n\t\t#Create children until the next generation is long enough\n\t\twhile (next_gen_len < pop_size):\n\t\t\t#Choose two parents\n\t\t\tparent1 = random.choice(parent_pool)\n\t\t\tparent2 = random.choice(parent_pool)\n\t\t\t#If they aren't the same, create two children for the next generation\n\t\t\tif parent1 != parent2:\n\t\t\t\t'''Using one point crossover\n\t\t\t\tat crossover percent of parent. eg. crossover rate = 0.4, take 0.4 of first parent and 0.6 of second, then vice versa'''\n\t\t\t\t#Create two children from parent\n\t\t\t\t#Store children strings as list \n\t\t\t\t#Create first child\n\t\t\t\tchild = []\n\t\t\t\t#Run crossover with first part from parent1\n\t\t\t\tchild.extend(list(parent1[1][:cross_index]))\n\t\t\t\tchild.extend(list(parent2[1][cross_index:]))\n\t\t\t\t#Apply mutation rate to child\n\t\t\t\tfor let in range(goal_len):\n\t\t\t\t\t#Get random number, if it is below mutation rate, change letter to random letter\n\t\t\t\t\tif random.random() < mutate_rate:\n\t\t\t\t\t\tchild[let] = random.choice(letter_pool)\n\t\t\t\tchild = \"\".join(child)\n\t\t\t\t#Get fitness value of child and add to next generation\n\t\t\t\tnext_gen.append([fitness(goal, child), child])\n\t\t\t\t\n\n\t\t\t\t#Create second child\n\t\t\t\tchild = []\n\t\t\t\t#Run crossover with first part from parent2\n\t\t\t\tchild.extend(list(parent2[1][:cross_index]))\n\t\t\t\tchild.extend(list(parent1[1][cross_index:]))\n\t\t\t\t#Apply mutation rate to child\n\t\t\t\tfor let in range(goal_len):\n\t\t\t\t\t#Get random number, if it is below mutation rate, change letter to random letter\n\t\t\t\t\tif random.random() < mutate_rate:\n\t\t\t\t\t\tchild[let] = random.choice(letter_pool)\n\t\t\t\tchild = \"\".join(child)\n\t\t\t\t#Get fitness value of child and add to next generation\n\t\t\t\tnext_gen.append([fitness(goal, child), child])\n\t\t\t\t#Increment next gen size\n\t\t\t\tnext_gen_len += 2\n\n\t\t#Copy next generation into current generation and reset next generation\n\t\tcur_gen = next_gen\n\t\tnext_gen = []\n\t\t#Print progress\n\t\tif i%100 == 0:\n\t\t\tprint(\"On generation:\", i)\n\n\n\treturn generations\n\n'''Observations\nPopulation:\nHigher populations (eg 1000+) take longer to run, but require fewer generations.\nAround 100-200 runs fastest for shorter strings, but require many more generations.\nNearly linear (Time to run and generations required) from observations, not quite, but enough for estimation.\n\nCrossover Rate:\nNot much studied. Around 0.5 seems to work well, not sure how relevant is is for this algorithm since two children are created from two parents\n\nMutation rate:\n0.1 seems to high for later calculations, 0 is too low to converge. Could make it variable and decreases each generation\n0.01 Tests very well so far\n\nGeneration Max:\nNot much to be said. Would be a problem for lower populations. Would need to be increased when running low pop. for speed.\n\nSeeding:\nRandom number engine seeding does work for this algorithm, as expected.\n'''\n\nif __name__ == \"__main__\":\n\tif len(sys.argv) > 1:\n\t\t#String that will try to be evolved to\n\t\tgoal_string = sys.argv[1]\n\telse:\n\t\tgoal_string = \"Hello World\"\n\tif len(sys.argv) > 2:\n\t\tpop = int(sys.argv[2])\n\telse:\n\t\t#Population size\n\t\tpop = 1000\n\n\t#Crossover rate, percent chance that letter will come from 1st parent\n\tcross = 0.5\n\t#Mutation rate, percent chance of each letter to be mutated\n\tmut = 0.01\n\t#Max amount of generations to create\n\tgen = 4000\n\t#Get fitness of goal string\n\tgoal_fitness = fitness(goal_string, goal_string)\n\tprint(\"Looking for:\" + goal_string + \" with max fitness of \" + str(fitness(goal_string, goal_string)))\n\n\t#Start timer\n\tstart = time.time()\n\n\t#Start genetic algorithm\n\tgeneration_done = genetic(goal_string, pop, cross, mut, gen)\n\tend = time.time()\n\t\n\n\t'''Just for writing to file'''\n\tprint(\"----------------------\")\n\tprint(\"Done in\", end-start, \"Writing to file\")\n\n\t#Create file name\n\tfilename = goal_string[:5] + \"-pop\" + str(pop) + \"-cross0,\" + str(cross)[2:] + \"-mutate0,\" + str(mut)[2:] + \"-genmax\" + str(gen) + \".txt\"\n\t#Check if solution was ever found\n\tif generation_done[len(generation_done)-1][0] == goal_fitness:\n\t\tprint(\"Solution found on generation: \" + str(len(generation_done)))\n\telse:\n\t\tprint(\"No solution found, only approximation\")\n\tprint(\"Best fit found: \" + str(generation_done[len(generation_done)-1]))\n\tprint(\"Data saved to\",filename)\n\t#Write to file\n\twith open(filename, 'w') as out_file:\n\t\t#Write header\n\t\tout_file.write(\"Looking for: \" + goal_string + \" with max fitness of \" + str(fitness(goal_string, goal_string)) + \"\\n\")\n\t\tout_file.write(\"Population size: \" + str(pop) + \"\\n\")\n\t\tout_file.write(\"Crossover rate: \" + str(cross) + \"\\n\")\n\t\tout_file.write(\"Mutation rate: \" + str(mut) + \"\\n\")\n\t\tout_file.write(\"Max number of generations: \" + str(gen) + \"\\n\")\n\t\tout_file.write(\"Time to run: \" + str(end-start) + \"\\n\")\n\t\t#Check if solution was ever found\n\t\tif generation_done[len(generation_done)-1][0] == goal_fitness:\n\t\t\tout_file.write(\"Solution found on generation: \" + str(len(generation_done)) + \"\\n\")\n\t\t#Otherwise say it could only find best fit\n\t\telse:\n\t\t\tout_file.write(\"No solution found, only best fit\" + \"\\n\")\n\t\t#Print the best fit from the last generation\n\t\tout_file.write(\"Best fit found: \" + str(generation_done[len(generation_done)-1]) + \"\\n\")\n\t\t#Print only the best fit from each generation\n\t\tfor gen_num, i in enumerate(generation_done):\n\t\t\tout_file.write(\"Generation \" + str(gen_num) + \"\\n\")\n\t\t\t#Print each citizen of each generation\n\t\t\tout_file.write(str(i) + \"\\n\")","sub_path":"249[I] - Hello World Genetic Algorithm/Sent_Version/Zinser_Genetic_String_Evolver.py","file_name":"Zinser_Genetic_String_Evolver.py","file_ext":"py","file_size_in_byte":8086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"430275678","text":"import numpy as np, cmath, math\nimport sympy as sym\n\nsym.init_printing()\n\nzl, z0, beta, lamda, z_p = sym.symbols('Z_l Z_0 β λ Z_p')\nbeta = (sym.pi)/lamda\nz_e = z0 * ((zl + (z0*sym.tan(beta*z_p))*sym.I)/(z0+(zl*sym.tan(beta*z_p))*sym.I))\n\nz_e =z_e.subs([(z0,100),(zl,260+sym.I*180),(z_p,0.434*lamda)])\n\nprint(sym.pretty(z_e))\nprint(f\"Z_e = {z_e.evalf()}\")\n","sub_path":"Pylab/TO/general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"312982539","text":"'''\nConvert table data from html to CSV.\n\nUpdate log: (date / version / author : comments)\n2020-06-27 / 1.0.0 / Du Jiang : Creation\n Support Transaction\n'''\n\nimport csv\nimport getopt\nimport os\nimport sys\nfrom time import localtime, strftime, time\n\nfrom bs4 import BeautifulSoup\n\n# Global variables.\n# The value can be updated by command line options.\n__input_folder_path = None\n__input_file_prefix = None\n__output_file_path = None\n\n\ndef getDafaFileList():\n expected_file_list = []\n full_file_list = os.listdir(__input_folder_path)\n for file_name in full_file_list:\n if file_name.startswith(__input_file_prefix) and file_name.endswith(\".html\"):\n expected_file_list.append(file_name)\n print(\"expected_file_list =\", expected_file_list)\n return expected_file_list\n\n\ndef process_inventory_list():\n print(\"-\" * 100)\n time_str = strftime(\"%Y-%m-%d %H:%M:%S\", localtime(time()))\n print(\"Start time =\", time_str)\n\n file_list = getDafaFileList()\n\n headers = []\n records = []\n field_count = -1\n try:\n print(\"-\" * 80)\n for file_name in file_list:\n file_path = os.path.join(__input_folder_path, file_name)\n print(\"HTML data file =\", file_path)\n with open(file_path, \"r\") as file:\n html_data = file.read()\n\n print(\"HTML data size =\", len(html_data))\n document = BeautifulSoup(html_data, \"html.parser\")\n print(\"HTML title =\", document.title)\n\n header_table_section = document.find(\"table\", {\"id\": \"forLandscape\"})\n if header_table_section is None:\n print(\"Empty data, skip.\")\n continue\n\n # Only need to get hearders once from thr first file.\n if len(headers) == 0:\n thead = header_table_section.find(\"thead\")\n if thead is None:\n raise Exception(\"Cannot find any thead.\")\n tr = thead.find(\"tr\")\n if tr is None:\n raise Exception(\"Cannot find any thead.tr.\")\n th_list = tr.find_all(\"th\")\n if len(th_list) == 0:\n raise Exception(\"Cannot find any thead.tr.th.\")\n for th in th_list:\n field = th.find(\"label\")\n field_name = field.get_text().replace(\"\\t\", \"\").replace(\"\\n\", \" \").strip()\n field_name = ' '.join(field_name.split())\n field_name = field_name.encode('ascii', errors = 'ignore').decode()\n headers.append(field_name)\n field_count = len(headers)\n print(\"field_count =\", field_count)\n\n data_table_section = document.find(\"div\", {\"class\": \"row show-for-medium-up\"})\n if data_table_section is None:\n raise Exception(\"Cannot find any data_table_section.\")\n\n data_table = data_table_section.find(\"table\")\n if data_table is None:\n raise Exception(\"Cannot find any data_table.\")\n\n tbody = data_table.find(\"tbody\")\n if tbody is None:\n raise Exception(\"Cannot find any tbody.\")\n tr_list = tbody.find_all(\"tr\")\n if tr_list is not None:\n temp_records = []\n for tr in tr_list:\n td_list = tr.find_all(\"td\")\n if len(td_list) < field_count:\n raise Exception(\"Cannot find enough tbody.tr.td.\")\n record = []\n for td in td_list:\n field_value = td.get_text().replace(\"\\t\", \"\").replace(\"\\n\", \" \").replace(\",\", \"\").strip()\n dot_index = field_value.find(\".\")\n if (dot_index > 0):\n if field_value[0] == \"$\":\n field_value = field_value[:dot_index].replace(\"$\", \"\")\n else:\n field_value = field_value[:dot_index] + \"/\" + field_value[dot_index + 3:]\n field_value = ' '.join(field_value.split())\n field_value = field_value.encode('ascii', errors = 'ignore').decode()\n record.append(field_value)\n temp_records.append(record)\n print(\"Records from file =\", len(temp_records))\n records.extend(temp_records)\n print(\"Total records =\", len(records))\n\n print(\"-\" * 80)\n\n print(\"Total records =\", len(records))\n print(\"Process inventory list: ok.\")\n except Exception as e:\n print(\"Process inventory list: Exception = {0}\".format(e))\n\n time_str = strftime(\"%Y-%m-%d %H:%M:%S\", localtime(time()))\n print(\"Stop time =\", time_str)\n\n print(\"-\" * 100)\n\n # If given __output_file_path, output to file; otherwise, output to\n # screen.\n if __output_file_path:\n if (len(headers) == 0) or (len(records) == 0):\n raise Exception(\"No data retrieved.\")\n\n try:\n # Open output file.\n with open(__output_file_path, \"wt\", encoding = \"utf-8\") as output_file:\n print('output_file =', output_file)\n # Output file as CSV format.\n cout = csv.writer(output_file, lineterminator = \"\\n\")\n # Write header line.\n cout.writerow(headers)\n # Write record lines.\n cout.writerows(records)\n print(\"Output process results: ok\")\n except Exception as e:\n print(\"Output process results: Exception = {0}\".format(e))\n else:\n # Output screen as JSON format.\n print(\"headers =\", headers)\n print(\"records =\")\n for record in records:\n print(record)\n\n print(\"-\" * 100)\n\n\ndef usage():\n print('''\nConvert table data from html to CSV.\n\nUsage:\n-h\n-p -i [-o ]\n\nOptions:\n-h : Show help.\n-p : Source data file path. Compulsory.\n-i : Source data file name prefix (HTML). Compulsory.\n-o : Result output file path (CSV). Optional, output to screen by default.\n''')\n\n\ndef main(argv):\n '''\n Pass input arguments from command line to method.\n\n @param argv: A list of arguments\n '''\n\n global __input_folder_path\n global __input_file_prefix\n global __output_file_path\n\n print(\"argv =\", argv)\n\n __show_usage = False\n __exit_code = 0\n __error_message = None\n\n # If no any option.\n if not argv:\n __show_usage = True\n\n # Parse command line.\n if not __show_usage:\n try:\n opts, args = getopt.getopt(argv, \"hf:i:o:\")\n print(\"opts =\", opts)\n print(\"args =\", args)\n except Exception as e:\n # There would be getopt.GetoptError.\n print(\"Parse command line: Exception = {0}\".format(e))\n __show_usage, __exit_code, __error_message = True, -1, \"Wrong command line option.\"\n\n # Check and parse each option.\n if not __show_usage:\n try:\n for opt, arg in opts:\n if opt == \"-h\":\n __show_usage, __exit_code = True, 0\n elif opt == \"-f\":\n __input_folder_path = arg\n elif opt == \"-i\":\n __input_file_prefix = arg\n elif opt == \"-o\":\n __output_file_path = arg\n else:\n __show_usage, __exit_code, __error_message = True, -\\\n 2, \"Unknown command line option.\"\n except Exception as e:\n print(\"Parse command options: Exception = {0}\".format(e))\n __show_usage, __exit_code, __error_message = True, -\\\n 3, \"Wrong value for command line option.\"\n\n print(\"show_usage =\", __show_usage)\n print(\"input_folder_path =\", __input_folder_path)\n print(\"input_file_prefix =\", __input_file_prefix)\n print(\"output_file_path =\", __output_file_path)\n\n # Check options are valid.\n if not __show_usage:\n if (__input_folder_path is None) or (__input_file_prefix is None):\n __show_usage, __exit_code, __error_message = True, -\\\n 4, \"Missing compulsory command line option.\"\n\n if not __show_usage:\n process_inventory_list()\n else:\n print(\"__exit_code =\", __exit_code)\n if __error_message:\n print(\"__error_message =\", __error_message)\n print(\"\")\n usage()\n sys.exit(__exit_code)\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","sub_path":"Python_Test/PyDataMiningSample/com/djs/learn/hdb/ConvertHtmlToRawData.py","file_name":"ConvertHtmlToRawData.py","file_ext":"py","file_size_in_byte":8645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"420928270","text":"def solve(arr):\n # timeout...\n # ans = 0\n # arr.sort()\n # while not (arr[0] == 0 and arr[1] == 0):\n # arr[1] -= 1\n # arr[-1] -= 1\n # ans += 1\n # arr.sort()\n # return ans\n\n maxArr = max(arr)\n sumArr = sum(arr)\n if sumArr <= 2*maxArr:\n return sumArr-maxArr\n else:\n return int(sumArr/2)\n\nprint(solve([12, 12, 12]))","sub_path":"casinoChipsWeek1L6.py","file_name":"casinoChipsWeek1L6.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"555147011","text":"from audio_io import read_wav, save_signals\nfrom stft_tools import stft_vanilla\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nfrom nmf_em_naive import NmfEmNaive, init_strategy\nfrom sources_retriever import to_files, extract_sources_influences\nimport numpy as np\nimport librosa.core\nimport argparse\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--n_components\", type=int, default=20)\nparser.add_argument(\"--n_sources\", type=int, default=4)\nparser.add_argument(\"--filesource\")\nparser.add_argument(\"--mode\", default='D')\nparser.add_argument(\"--directory\", default=\"./results\")\nargs = parser.parse_args()\n\n\nn_components = args.n_components\nn_sources = args.n_sources\nfilesource = args.filesource\ndirec = args.directory\nmode = args.mode\n\n# fs, x_t = read_wav(filename=filesource)\n\nfs = 16000\nx_t, fs = librosa.core.load(path=filesource, mono=False, sr=fs)\nx_t = x_t.T\n\nprint(x_t.shape)\n# x_t = x_t[:40000]\n\nfreqs, times, x_f = stft_vanilla(x_t.T, nperseg=1024)\nn_canals, n_freqs, n_bins = x_f.shape\n\n# x_f = x_f.reshape((n_freqs, n_bins, n_canals))\nx_f = x_f.transpose([1, 2, 0])\nprint(x_f.shape)\n\n# plt.plot(x_t[:, 0])\n# plt.show()\n\na0, w0, h0 = init_strategy(x_f, n_sources=n_sources, n_comps=n_components)\n\nalg = NmfEmNaive(x_f, n_components, n_sources,\n test_shapes=True, test_dots=False, mode=mode,\n a0=a0, w0=w0, h0=h0, n_jobs=1)\nprint(alg.sigma_hat_f.min(), alg.sigma_hat_f.max())\n\ncosts = []\n\nfor iterate in tqdm(range(300)):\n alg.e_step()\n alg.m_step()\n my_cost = alg.cost()\n print(my_cost)\n costs.append(my_cost)\n\n signals_iter = alg.s\n a_iter = alg.a\n if iterate % 50 == 0:\n # save_signals(signals_iter, n_freqs=n_freqs, n_bins=n_bins, fs=fs,\n # filename=direc+'/signals_iter{}'.format(iterate))\n coefs = extract_sources_influences(signals_iter, a_iter)\n to_files(coefs, nperseg=1024, fs=fs,\n filename=direc+'/signals_iter{}'.format(iterate))\n\n\nsignals = alg.s\na = alg.a\ncoefs = extract_sources_influences(signals, a)\nto_files(coefs, nperseg=1024, fs=fs,\n filename=direc+'/signals_final')\n\n\nnp.save(direc+'/final_s.npz', alg.s)\nnp.save(direc+'/final_a.npz', alg.a)\n\nplt.plot(costs)\nplt.ylabel('Loss')\nplt.xlabel('Iterations')\nplt.title('EM Algorithm Convergence')\nplt.savefig('/home/pierre/MVA/audio/em_convergence_4_males.png')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"649894360","text":"import json\n\nfrom flask import Blueprint, request, make_response, jsonify\nfrom flask.views import MethodView\n\nfrom project.server import bcrypt, db\nfrom project.server.models import User\n\nauth_blueprint = Blueprint('auth', __name__)\n\nclass RegisterAPI(MethodView):\n \"\"\"\n User Registration Resource\n \"\"\"\n\n def get(self):\n responseObject = {\n 'status': 'success',\n 'message': 'Request successful but please send an HTTP POST request to register the user.'\n }\n return make_response(jsonify(responseObject)), 201\n\n def post(self):\n # get the post data\n post_data = request.get_json(); print(request)\n # check if user already exists\n user = User.query.filter_by(email=post_data.get('email')).first()\n if not user:\n try:\n user = User(\n email=post_data.get('email'),\n password=post_data.get('password')\n )\n\n # insert the user\n db.session.add(user)\n db.session.commit()\n # generate the auth token\n auth_token = user.encode_auth_token(user.id)\n responseObject = {\n 'status': 'success',\n 'message': 'Successfully registered.',\n 'auth_token': auth_token\n }\n return make_response(jsonify(responseObject)), 201\n except Exception as e:\n responseObject = {\n 'status': 'fail',\n 'message': 'Some error occurred. Please try again.'\n }\n return make_response(jsonify(responseObject)), 401\n else:\n responseObject = {\n 'status': 'fail',\n 'message': 'User already exists. Please Log in.',\n }\n return make_response(jsonify(responseObject)), 202\n\nclass ListUsersAPI(MethodView):\n \"\"\"\n User Index Resource\n \"\"\"\n\n def get(self):\n users = User.query.all()\n all_data = []\n for user in users:\n user_l = {'admin':user.admin, 'email':user.email, 'id':user.id, 'registered_on':str(user.registered_on)}\n all_data.append(user_l)\n\n responseObject = {\n 'users': all_data\n }\n return make_response(jsonify(responseObject)), 201\n\n \n\n# define the API resources\nregistration_view = RegisterAPI.as_view('register_api')\nlist_user_view = ListUsersAPI.as_view('list_user_api')\n\n# add Rules for API Endpoints\nauth_blueprint.add_url_rule(\n '/auth/register',\n view_func=registration_view,\n methods=['POST', 'GET']\n)\n\nauth_blueprint.add_url_rule(\n '/users/index',\n view_func=list_user_view,\n methods=['GET']\n)","sub_path":"project/server/auth/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"396514872","text":"inFile = open('input.txt', 'r', encoding='utf8')\n# outFile = open('output.txt', 'w', encoding='utf8')\nk = 0\nc = []\nu = 1\ni = 1\nallL = set()\nuniqL = set()\ncAll = []\nfor line in inFile:\n f = line.strip()\n # c.append(f)\n if k < 1:\n n = int(f)\n a = set(range(1, n + 1))\n k += 1\n elif u == 1:\n m = int(f)\n u = 0\n else:\n if i <= m:\n p = set(list(f.split()))\n allL.update(p)\n b = p.copy()\n # uniqL = b - p\n c.append(f)\n # print(c)\n i += 1\n if i > m:\n cAll.append(tuple(c))\n c = []\n i = 1\n u = 1\nt1 = set(cAll[0])\nfor i in range(1, len(cAll)):\n t1.intersection_update(set(cAll[i]))\nprint(len(t1))\nfor i in range(len(t1)):\n print((list(t1))[i])\nprint(len(allL))\nfor i in range(len(allL)):\n print((list(allL))[i])\ninFile.close()\n","sub_path":"doneTask_07/Task09_setPoliglot.py","file_name":"Task09_setPoliglot.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"532694409","text":"\"\"\"\nDescription\n\nGiven a directed graph where each edge is represented by a tuple, such as [u, v, w]represents an edge with a weight w from u to v.\nYou need to calculate at least the need to add the number of edges to ensure that each point of the weight are balancing. That is, the sum of weight of the edge pointing to this point is equal to the sum of weight of the edge of the point that points to the other point.\n\nExample\n\nFor example:\nGiven a graph [[0,1,10],[2,0,5]]\nReturn 2\nTwo edges are need to added. There are [1,0,5] and [1,2,5]\n\nGiven a graph [[0,1,10],[1,0,1],[1,2,5],[2,0,5]]\nReturn 1\nOnly one edge need to added. There is [1,0,4]\n\nIdea\n\nFirst, we get the account balance on each node and exclude those were already balanced, that is 0 account balance. Then, we consider each set of the rest accounts and check if sum of account balances in this set is 0. If true, we can say that this set of account can be balanced. And if this set has n nodes, we only need make a circle of these ndoes so needs n -1 edges to balance all of them to 0. Beside, if this set can be divided into two subset and sum of these subsets' additional edges is smaller than the previous result, then the current set should be the sum.\n\"\"\"\nfrom sys import maxsize\n\nclass Solution:\n \"\"\"\n @param edges: a directed graph where each edge is represented by a tuple\n @return: the number of edges\n \"\"\"\n\n def balanceGraph(self, edges):\n dept = {}\n account = []\n for e in edges:\n dept[e[0]] = dept.get(e[0], 0) - e[2]\n dept[e[1]] = dept.get(e[1], 0) + e[2]\n for v in dept.values():\n if v != 0: #exclude balanced accounts\n account.append(v)\n accountNum = len(account)\n if accountNum == 0:\n return 0\n setNum = 1 << accountNum\n dp = [maxsize for _ in range(setNum)]\n for _set in range(1, setNum):\n _sum = 0\n count = 0\n for node in range(0, accountNum):\n if ((1 << node) & _set) != 0: # node in the set\n _sum += account[node]\n count += 1\n if _sum == 0:\n dp[_set] = count - 1\n for subset in range(1, _set):\n \n if (subset & _set) == subset \\\n and dp[subset] + dp[_set - subset] < dp[_set]:\n # subset is in set and these two subset can reduce number of edges\n dp[_set] = dp[subset] + dp[_set - subset]\n return dp[setNum - 1]\n ","sub_path":"algorithm/dynamic-programming/OptimalAccountBalancing.py","file_name":"OptimalAccountBalancing.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"478181341","text":"#This is a Nipype generator. Warning, here be dragons.\n#!/usr/bin/env python\n\nimport sys\nimport nipype\nimport nipype.pipeline as pe\n\r\nimport nipype.interfaces.io as io\r\nimport nipype.interfaces.fsl as fsl\r\nimport nipype.interfaces.spm as spm\r\n\r\n#Flexibly collect data from disk to feed into workflows.\r\nio_SelectFiles = pe.Node(io.SelectFiles(templates={'anatomical':'Subject{subj_id}/Anatomical/anat.nii.gz','functional':'Subject{subj_id}/Functional/rEP3D_Session*.nii'}), name='io_SelectFiles', iterfield = ['subj_id'])\r\nio_SelectFiles.inputs.base_directory = '/project/3015003.04/FamTutorial/SubjectData/'\r\nio_SelectFiles.inputs.anatomical = 'Subject{subj_id}/Anatomical/anat.nii.gz'\r\nio_SelectFiles.inputs.functional = 'Subject{subj_id}/Functional/rEP3D_Session*.nii'\r\nio_SelectFiles.iterables = [('subj_id', ['01', '02', '03'])]\r\n\r\n#Wraps the executable command ``bet``.\r\nfsl_BET = pe.Node(interface = fsl.BET(), name='fsl_BET')\r\n\r\n#Use spm_realign for estimating within modality rigid body alignment\r\nspm_Realign = pe.Node(interface = spm.Realign(), name='spm_Realign')\r\n\r\n#Wraps the executable command ``flirt``.\r\nfsl_FLIRT = pe.Node(interface = fsl.FLIRT(), name='fsl_FLIRT')\r\n\r\n#Generic datasink module to store structured outputs\r\nio_DataSink = pe.Node(interface = io.DataSink(), name='io_DataSink')\r\nio_DataSink.inputs.base_directory = '/project/3015003.04/FamTutorial/Results/'\r\n\r\n#Create a workflow to connect all those nodes\r\nanalysisflow = nipype.Workflow('MyWorkflow')\r\nanalysisflow.connect(io_SelectFiles, \"functional\", spm_Realign, \"in_files\")\r\nanalysisflow.connect(io_SelectFiles, \"anatomical\", fsl_BET, \"in_file\")\r\nanalysisflow.connect(fsl_BET, \"out_file\", fsl_FLIRT, \"in_file\")\r\nanalysisflow.connect(spm_Realign, \"mean_image\", fsl_FLIRT, \"reference\")\r\nanalysisflow.connect(fsl_FLIRT, \"out_file\", io_DataSink, \"registered\")\r\nanalysisflow.connect(spm_Realign, \"realigned_files\", io_DataSink, \"realigned\")\r\n\n#Run the workflow\nplugin = 'MultiProc' #adjust your desired plugin here\nplugin_args = {'n_procs': 1} #adjust to your number of cores\nanalysisflow.write_graph(graph2use='flat', format='png', simple_form=False)\nanalysisflow.run(plugin=plugin, plugin_args=plugin_args)\n","sub_path":"GIRAFFE/code/workflow.py","file_name":"workflow.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"431238786","text":"from fastapi import FastAPI\nfrom fastapi.responses import HTMLResponse, FileResponse\nfrom fastapi.staticfiles import StaticFiles\nfrom starlette.middleware.cors import CORSMiddleware\n\nfrom app.core import config\nfrom app.core.logger import _logger\nfrom app.db import database\nfrom app.api.routes import api_router\n\napp = FastAPI(\n title=config.NAME\n)\n\nif config.CORS_ORIGIN:\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[str(origin) for origin in config.CORS_ORIGIN],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n\n\nasync def redirectSPA():\n return FileResponse('app/static/index.html')\n\n\n@app.middleware(\"http\")\nasync def add_custom_header(request, call_next):\n response = await call_next(request)\n if response.status_code == 404:\n return await redirectSPA\n return response\n\n\n@app.exception_handler(404)\nasync def not_found(request, exc):\n return await redirectSPA()\n\n\n@app.on_event('startup')\nasync def event_startup():\n _logger.info('Ensuring model indexes created into database ...')\n from app.api.models import ensure_indexes\n try:\n await ensure_indexes()\n except Exception as err:\n _logger.critical(err)\n raise err\n _logger.info('Model indexes created')\n\n\n@app.on_event('shutdown')\nasync def event_shutdown():\n _logger.info('Closing connection to database ...')\n database.client.close()\n _logger.info('Connection closed')\n\n\napp.include_router(api_router, prefix='/api')\napp.mount('/_assets/', StaticFiles(directory='app/static/_assets'))\napp.route('/', redirectSPA)\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"548866086","text":"from flask import render_template,request,redirect,url_for,abort\nfrom flask_login import login_required\n\nfrom ..models import User\nfrom . import main\nfrom .forms import UpdateProfile\nfrom .. import db, photos\nimport cloudinary\nimport cloudinary.uploader\nfrom cloudinary.uploader import upload\nimport cloudinary.api\nfrom cloudinary.utils import cloudinary_url\nfrom ..request import get_book\n\n@main.route('/', methods = ['GET','POST'])\ndef index():\n title = 'Popular Books'\n book = get_book()\n return render_template('index.html', title=title, books=book)\n\n@main.route('/user/', methods = ['GET','POST'])\ndef profile(uname):\n user = User.query.filter_by(username = uname).first()\n\n if user is None:\n abort(404)\n form = UpdateProfile()\n\n if form.validate_on_submit():\n user.bio = form.bio.data\n\n db.session.add(user)\n db.session.commit()\n\n return render_template(\"profile/profile.html\", user = user, form=form)\n\n@main.route('/user//update',methods = ['GET','POST'])\n@login_required\ndef update_profile(uname):\n user = User.query.filter_by(username = uname).first()\n if user is None:\n abort(404)\n\n form = UpdateProfile()\n\n if form.validate_on_submit():\n user.bio = form.bio.data\n\n db.session.add(user)\n db.session.commit()\n\n return redirect(url_for('.profile',uname=user.username))\n\n return render_template('profile/update.html',form =form)\n\n@main.route('/user//update/pic',methods= ['POST'])\n@login_required\ndef update_pic(uname):\n user = User.query.filter_by(username = uname).first()\n if 'photo' in request.files:\n filename = request.files['photo']\n upload = cloudinary.uploader.upload(filename)\n path = upload.get('url')\n user.profile_pic_path = path\n db.session.commit()\n return redirect(url_for('main.profile', uname=uname))","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"214543173","text":"from pbcore.io import AlignmentSet\nimport pandas as pd\nimport numpy as np\nimport argparse\nimport logging\n\nlogging.basicConfig()\nlog = logging.getLogger(__name__)\n\ndef parseArgs():\n parser = argparse.ArgumentParser(description=('Generate AlignmentSet'\n 'summary metrics '\n 'from .pbi file.'))\n subparsers = parser.add_subparsers(help='Choose mode of operation',\n dest='command')\n summarize_parser = subparsers.add_parser('summarize')\n full_parser = subparsers.add_parser('full-mode')\n \n summarize_parser.add_argument('alignmentset',\n help='Path to AlignmentSet')\n summarize_parser.add_argument('output',\n help='Path to output CSV')\n summarize_parser.add_argument('--by-zmw',\n help=('Combine subreads from a particular ZMW \\n'\n 'when displaying readlength and template-span \\n'\n 'summary information. Flag does nothing when \\n'\n 'program is operated in full-mode.'),\n action=\"store_true\")\n full_parser.add_argument('alignmentset',\n help='Path to AlignmentSet')\n full_parser.add_argument('output',\n help='Path to output CSV') \n args = parser.parse_args()\n return args\n\ndef refIdToName(referenceInfoTable):\n id_to_name = {}\n for refInfo in referenceInfoTable:\n id_to_name[refInfo['ID']] = refInfo['Name']\n return id_to_name\n\ndef saveFullCsv(df, output_path):\n \"\"\"\n Save full summary to CSV\n \"\"\"\n df.drop(['qId',\n 'qStart',\n 'qEnd',\n 'tId',\n 'nM',\n 'nMM',\n 'nIns',\n 'nDel',\n 'readQual',\n 'contextFlag',\n 'virtualFileOffset',\n 'isReverseStrand',\n 'mapQV'],\n axis=1,\n inplace=True)\n log.info('Saving info to CSV...')\n df.to_csv(output_path, index=False)\n log.info('Done.')\n\ndef saveSummaryCsv(df, output_path, by_zmw):\n \"\"\"\n Save by-contig summary to CSV\n \"\"\"\n df['readlength'] = df['aEnd'] - df['aStart']\n df['templatespan'] = df['tEnd'] - df['tStart']\n df.drop(['qId',\n 'qStart',\n 'qEnd',\n 'tStart',\n 'tEnd',\n 'aEnd',\n 'aStart',\n 'tId',\n 'nM',\n 'nMM',\n 'nIns',\n 'nDel',\n 'readQual',\n 'contextFlag',\n 'virtualFileOffset',\n 'isReverseStrand',\n 'mapQV'],\n axis=1,\n inplace=True)\n if not by_zmw:\n mean = df.groupby(by=['refName']).mean()\n mean['refName'] = mean.index\n median = df.groupby(by=['refName']).median()\n median['refName'] = median.index\n count = df.groupby(by=['refName']).count()\n count['nReads'] = count['accuracy'] # choose a column, all same\n sdf = pd.merge(mean,\n median,\n on='refName',\n suffixes=['_mean', '_median'])\n sdf['nReads'] = pd.Series(count['nReads'].values)\n sdf['nZmws'] = pd.Series(df.groupby(by=['refName'])['holeNumber'].nunique().values)\n\n else: # group metrics by ZMW as well\n acc_mean = df.groupby(by=['refName'])['accuracy'].mean()\n acc_median = df.groupby(by=['refName'])['accuracy'].median()\n unique_count = df.groupby(by=['refName'])['holeNumber'].nunique()\n count = df.groupby(by=['refName'])['holeNumber'].count()\n sdf = pd.DataFrame({'refName': acc_mean.index,\n 'nReads': count,\n 'nZmws': unique_count,\n 'accuracy_mean': acc_mean,\n 'accuracy_median': acc_median})\n\n for length in ['readlength', 'templatespan']:\n lengths = df.groupby(by=['refName', 'holeNumber'])[length].sum()\n mean_length = lengths.groupby(by=['refName']).mean()\n median_length = lengths.groupby(by=['refName']).median()\n sdf[length + '_mean'] = mean_length\n sdf[length + '_median'] = median_length\n\n sdf = sdf[['refName', 'nReads', 'nZmws',\n 'accuracy_mean', 'accuracy_median',\n 'templatespan_mean', 'templatespan_median',\n 'readlength_mean', 'readlength_median']]\n sdf.to_csv(output_path, index=False)\n log.info('Done.')\n\ndef generatePbiCsv(aset_path, output_path,\n program_mode,\n by_zmw):\n \"\"\"\n Given alignmentset, generate CSV summary of\n pbi index and save to output_path\n \"\"\"\n aset = AlignmentSet(aset_path)\n log.info('Opened AlignmentSet for processing...')\n df = pd.DataFrame.from_records(aset.index)\n df['refName'] = df['tId'].map(refIdToName(aset.referenceInfoTable))\n df['accuracy'] = np.round(1. - np.divide(df['nDel'] + df['nMM'] + df['nIns'],\n df['tEnd'] - df['tStart'],\n dtype=float),\n decimals=2)\n if program_mode == 'full-mode':\n log.info('Saving full-mode CSV')\n saveFullCsv(df, output_path)\n elif program_mode == 'summarize':\n log.info('Saving summarize CSV')\n saveSummaryCsv(df, output_path, by_zmw)\n\ndef main():\n log.setLevel(logging.INFO)\n args = parseArgs()\n log.info('Parsed command-line arguments...')\n program_mode = args.command\n\n # Based on command-line arguments, decide\n # whether the results should presented on\n # a per-subread bases or on a per-zmw basis.\n by_zmw = False\n if args.by_zmw:\n by_zmw = True\n\n generatePbiCsv(args.alignmentset, args.output,\n program_mode,\n by_zmw)\n\n\nif __name__ == '__main__':\n main()","sub_path":"biotk/scripts/extractIndexMetrics.py","file_name":"extractIndexMetrics.py","file_ext":"py","file_size_in_byte":6148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"107862813","text":"import time\nimport threading\nprint('死锁')\nstart_time = time.time()\n#lock= threading.Lock()\nlock = threading.RLock()#避免死锁(递归锁,内部一个计数器acquire时相当于+1 release时-1)\ndef add():\n #acquire 和 release是成对出现的\n lock.acquire()\n print('ok')\n time.sleep()\n lock.acquire()\n print('ok1')\n lock.release()\n lock.release()\n\n\n","sub_path":"net/threading/threading_lock_01.py","file_name":"threading_lock_01.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"539250019","text":"from django.shortcuts import redirect, render\nfrom django.urls import reverse\nfrom django.views.generic import ListView\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.db.models import Q\n\nfrom apps.mobile_api.models import UserProfile, Transaction\nfrom ..constant import TAB_USER_PROFILE, TAB_USER_MANAGEMENT\n\n\nclass ProfileList(ListView):\n model = UserProfile\n template_name = 'user_profile/profile_list.html'\n context_object_name = 'profiles'\n paginate_by = 10\n header_text = [\"No\", \"Email\", \"Phone Number\", \"Name\", \"Surname\", \"Credit\", \"Verified Email\", \"Updated PIN\", \"Status\", \"Action\"]\n page_size_list = [10, 20, 30]\n active_tab = TAB_USER_PROFILE\n parent_tab = TAB_USER_MANAGEMENT\n\n SUSPEND_ACTION, REACTIVE_ACTION = range(2)\n\n def get_queryset(self):\n filter_query = Q()\n try:\n search_key = self.request.GET.get('search_key',)\n except KeyError:\n search_key = None\n if search_key:\n filter_query = filter_query & (Q(email__icontains=search_key) |\n Q(name__icontains=search_key) |\n Q(phone_number__icontains=search_key)\n )\n account_list = UserProfile.objects.filter(filter_query).order_by(\"-id\")\n return account_list\n\n def get_context_data(self, **kwargs):\n context = super(ProfileList, self).get_context_data(**kwargs)\n context['header_texts'] = self.header_text\n context['page_size_list'] = self.page_size_list\n context['paginate_by'] = int(self.get_paginate_by())\n context['active_tab'] = self.active_tab\n context['parent_tab'] = self.parent_tab\n context['suspend_action'] = self.SUSPEND_ACTION\n context['reactive_action'] = self.REACTIVE_ACTION\n return context\n\n def get_paginate_by(self, queryset=None):\n return self.request.GET.get('paginate_by', self.paginate_by)\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(ProfileList, self).dispatch(*args, **kwargs)\n\n @method_decorator(login_required)\n def post(self, request, *args, **kwargs):\n profile_id = request.POST.get(\"profile_id\")\n action_type = int(request.POST.get(\"action_type\"))\n user_profile = UserProfile.objects.get(id=profile_id)\n if action_type == self.SUSPEND_ACTION:\n user_profile.suspend_user()\n if action_type == self.REACTIVE_ACTION:\n user_profile.re_active_user()\n return redirect(reverse('admin_v1:profile_list'))\n\n\nclass ProfileDetailView(ListView):\n model = UserProfile\n template_name = 'user_profile/profile_detail.html'\n context_object_name = 'transactions'\n paginate_by = 10\n header_text = [\"No\", \"Amount\", \"Description\", \"Created at\"]\n page_size_list = [10,20,30]\n active_tab = TAB_USER_PROFILE\n parent_tab = TAB_USER_MANAGEMENT\n\n def get_queryset(self):\n filter_query = Q()\n account_list = Transaction.objects.filter(filter_query).order_by(\"-id\")\n return account_list\n\n def get_context_data(self, **kwargs):\n context = super(ProfileDetailView, self).get_context_data(**kwargs)\n context['header_texts'] = self.header_text\n context['page_size_list'] = self.page_size_list\n context['paginate_by'] = int(self.get_paginate_by())\n context['active_tab'] = self.active_tab\n context['parent_tab'] = self.parent_tab\n context['user_id'] = self.kwargs.get(\"user_id\")\n context['user_profile'] = UserProfile.objects.get(user_id=self.kwargs.get(\"user_id\"))\n return context\n\n def get_paginate_by(self, queryset=None):\n return self.request.GET.get('paginate_by', self.paginate_by)\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(ProfileDetailView, self).dispatch(*args, **kwargs)\n\n\n@login_required()\ndef add_credit_to_user(request):\n user_profile_id = request.GET.get(\"profile_id\")\n profile = UserProfile.objects.get(id=user_profile_id)\n if request.method == \"POST\":\n amount = int(request.POST.get(\"amount\"))\n reason = request.POST.get(\"reason\")\n profile.add_credit(amount=amount, reason=reason)\n return redirect(reverse('admin_v1:profile_detail', kwargs={\"user_id\": profile.user_id}))\n context_data = {\n \"user_profile\": profile\n }\n return render(request, 'user_profile/add_credit.html', context=context_data)\n","sub_path":"loto/apps/admin_v1/views/user_profile.py","file_name":"user_profile.py","file_ext":"py","file_size_in_byte":4613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"264295908","text":"from flask import Flask, redirect, render_template\nfrom flask_pymongo import PyMongo\nfrom mars_scrape import scrape\n\napp = Flask(__name__)\n\nmongo = PyMongo(app, uri='mongodb://localhost:27017/mars_db')\nmars = mongo.db.mars\n\n@app.route('/')\ndef index():\n mars.find_one()\n return render_template('index.html', mars = mars)\n\n@app.route('/scrape')\ndef scrape_mars():\n data = scrape()\n mars.update({},data,upsert=True)\n return redirect('/')\n \nif __name__=='__main__':\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"339233184","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n#\n\nimport pytest\nfrom PageObject.index import Index\nfrom PageObject.firm import FirmPage\nfrom common.readconfig import conf\nfrom common.connect_db import operation_mysql\nimport time\nimport os\n\ntest_add_firm1 = [(\"\",\"7\",\"test address\"),(\" \",\"7\",\"test address\"),(\"test firm\",\"\",\"test address\"),(\"\",\"\",\"test address\"),(\"\",\"7\",\"\"),\n (\"test firm\",\"\",\"\"),(\"\",\"\",\"\"),(\"智能厂家智能厂家智能厂家智能厂家智能厂家智\",\"7\",\"test address\"),\n (\"test\",\"1000\",\"test address\"),(\"test firm\",\"7\",\"厂家地址厂家地址厂家地址厂家地址厂家地址厂\")]\ntest_add_firm2 = [(\"测试厂商\",\"7\",\"test address\"),(\"t\",\"7\",\"test address\"),(\"111\",\"7\",\"test address\"),(\"!@#$%^&*()\",\"7\",\"test address\"),\n (\"测试厂商测试厂商测试厂商测试厂商测试厂商\",\"7\",\"test address\")]\nclass TestFirm:\n @pytest.fixture(scope='class', autouse=True)\n def open_index_firm(self, drivers, open_loginhtml):\n self.index = Index(drivers)\n self.index.switch_firm()\n #\n @pytest.fixture(scope='function', autouse=False)\n def delete_testfirm(self, drivers):\n yield\n firm = FirmPage(drivers)\n firm.switch_to_iframe()\n firm.click_row()\n firm.click_del()\n firm.swtich_to_default()\n firm.click_confirm()\n firm.click_confirm()\n #\n @pytest.fixture(scope='function', autouse=False)\n def add_testfirm(self, drivers):\n firm = FirmPage(drivers)\n firm.switch_to_iframe()\n firm.click_add()\n firm.input_firm_name(\"测试厂商\")\n firm.input_firm_address(\"测试厂商测试地址\")\n firm.input_firm_code(30)\n firm.click_save()\n firm.swtich_to_default()\n firm.click_confirm()\n firm.click_confirm()\n\n @pytest.mark.hello\n def test_add_firm_roolback(self, drivers):\n '''新增厂商_撤销'''\n firm = FirmPage(drivers)\n firm.switch_to_iframe()\n firm.click_add()\n firm.input_firm_name(\"测试厂商1\")\n firm.input_firm_code(6)\n firm.input_firm_address(\"测试厂商测试地址\")\n firm.click_rollback()\n firm.swtich_to_default()\n firm.click_confirm()\n\n # @pytest.mark.hello\n def test_add_firm(self, drivers, delete_testfirm):\n '''新增厂商'''\n firm = FirmPage(drivers)\n firm.switch_to_iframe()\n firm.click_add()\n firm.input_firm_name(\"测试厂商1\")\n firm.input_firm_address(\"测试厂商测试地址\")\n firm.input_firm_code(10)\n firm.click_save()\n firm.swtich_to_default()\n firm.click_confirm()\n firm.click_confirm()\n\n # @pytest.mark.hello\n @pytest.mark.parametrize(\"add_firm_name,add_firm_code,add_firm_address\",test_add_firm1)\n def test_add_firm_inputerror(self, drivers, add_firm_name, add_firm_code, add_firm_address, expect_message=\"请输入正确内容\"):\n '''新增厂商异常,厂商名称和厂商代码不能为空,名称和地址不超过20个字符,编码不超过1000'''\n firm = FirmPage(drivers)\n firm.switch_to_iframe()\n firm.click_add()\n firm.input_firm_name(add_firm_name)\n firm.input_firm_address(add_firm_address)\n firm.input_firm_code(add_firm_code)\n firm.click_save()\n firm.swtich_to_default()\n time.sleep(0.5)\n firm_error_message = firm.error_message()\n assert expect_message in firm_error_message\n time.sleep(2)\n firm.switch_to_iframe()\n firm.click_rollback()\n firm.switch_default()\n firm.click_confirm()\n time.sleep(1)\n\n # @pytest.mark.hello\n @pytest.mark.parametrize(\"add_firm_name,add_firm_code,add_firm_address\", test_add_firm2)\n def test_add_firm(self, drivers, add_firm_name, add_firm_code, add_firm_address, delete_testfirm):\n '''新增厂商正常,厂商名称支持特殊字符、中文、英文、数字'''\n firm = FirmPage(drivers)\n firm.switch_to_iframe()\n firm.click_add()\n firm.input_firm_name(add_firm_name)\n firm.input_firm_address(add_firm_address)\n firm.input_firm_code(add_firm_code)\n firm.click_save()\n firm.swtich_to_default()\n firm.click_confirm()\n firm.click_confirm()\n new_firm_name = operation_mysql().search_one(\"SELECT * FROM firm WHERE flag=0 ORDER BY created DESC LIMIT 0,1\")[\"name\"]\n assert add_firm_name == new_firm_name\n\n # @pytest.mark.hello\n def test_edit_firm(self, drivers, add_testfirm, delete_testfirm):\n '''修改厂商名称、地址'''\n firm = FirmPage(drivers)\n firm.switch_to_iframe()\n firm.click_row()\n firm.click_edit()\n firm.input_firm_name(\"修改测试厂商1\")\n firm.input_firm_address(\"修改测试厂商地址1\")\n firm.click_save()\n firm.swtich_to_default()\n firm.click_confirm()\n firm.click_confirm()\n firm_edit_name = operation_mysql().search_one(sql=\"SELECT * FROM firm WHERE flag=0 ORDER BY created DESC LIMIT 0,1\")['name']\n assert firm_edit_name == '修改测试厂商1'\n\n # @pytest.mark.hello\n def test_del_firm(self, drivers, add_testfirm):\n '''删除厂商'''\n firm = FirmPage(drivers)\n firm.switch_to_iframe()\n firm.click_row()\n firm.click_del()\n firm.swtich_to_default()\n firm.click_cancel()\n firm.switch_to_iframe()\n firm.click_row()\n firm.click_del()\n firm.swtich_to_default()\n firm.click_confirm()\n firm.click_confirm()\n firm_number = operation_mysql().search_all(sql=\"SELECT * FROM firm WHERE flag=0\")[0]\n assert firm_number == 5\n\n\n\nif __name__ == '__main__':\n pytest.main(['test_firm.py', '-m=hello', '--maxfail=10', '--alluredir=./allure_results'])\n os.system('allure generate ./allure_results/ -o ./allure_reports --clean')","sub_path":"TestCase/test_firm.py","file_name":"test_firm.py","file_ext":"py","file_size_in_byte":5966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"291705161","text":"# Create fibonacci sequence based on input (list containing starting int) and stops before a provided max number\n\n\ndef fib(seq, max_num):\n if len(seq) == 1:\n seq.append(seq[0])\n while seq[-1] + seq[-2] < max_num:\n seq.append(seq[-1] + seq[-2])\n fib(seq, max_num)\n return seq\n\n\nprint(fib([1], 10000))","sub_path":"Python/Fibonacci_recursive.py","file_name":"Fibonacci_recursive.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"169089705","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nTest the creation of ``shapely`` view factor PV arrays, and direct shading\nconditions\n\"\"\"\n\nfrom pvfactors.pvarray import Array\nfrom pvfactors.pvrow import PVRowLine\nfrom pvfactors.pvcore import LinePVArray\nimport os\nimport numpy as np\n\n\ndef test_create_array():\n \"\"\"\n Check that the pvrows know what's the index of their neighbors.\n \"\"\"\n # PV array parameters\n arguments = {\n 'n_pvrows': 3,\n 'pvrow_height': 1.5,\n 'pvrow_width': 1.,\n 'gcr': 0.3,\n 'surface_tilt': 20.,\n 'surface_azimuth': 180.\n }\n # Create vf array\n array = Array(**arguments)\n\n # Run some sanity checks on the creation of the vf array\n assert len(array.pvrows) == 3\n assert isinstance(array.pvrows[0], PVRowLine)\n assert isinstance(array.pvrows[0].lines[0], LinePVArray)\n assert array.line_registry.shape[0] == 13\n\n # Check that the expected neighbors are correct\n tol = 1e-8\n expected_pvrow_neighbors = np.array(\n [np.nan, 0., 1., np.nan, np.nan, np.nan, np.nan, np.nan,\n np.nan, np.nan, np.nan, np.nan, np.nan, 1., 2., np.nan])\n calculated_pvrow_neighbors = (array.surface_registry\n .index_pvrow_neighbor.values)\n np.testing.assert_allclose(calculated_pvrow_neighbors,\n expected_pvrow_neighbors, atol=tol, rtol=0,\n equal_nan=True)\n\n\ndef test_plotting():\n \"\"\"\n Check that the plotting functions are functional (only on local machine)\n \"\"\"\n\n is_ci = os.environ.get('CI', False)\n if not is_ci:\n import matplotlib.pyplot as plt\n from pvfactors.plot import plot_pvarray\n # Create array where sun vector is in the direction of the modules\n arguments = {\n 'n_pvrows': 3,\n 'pvrow_height': 1.5,\n 'solar_zenith': 30,\n 'solar_azimuth': 0.,\n 'surface_azimuth': 90.,\n 'pvrow_width': 1.,\n 'gcr': 0.3,\n 'surface_tilt': 0.\n }\n array = Array(**arguments)\n f, ax = plt.subplots(figsize=(10, 5))\n _ = plot_pvarray(ax, array)\n\n # Test with interrow forward shading\n arguments = {\n 'n_pvrows': 5,\n 'pvrow_height': 3.0,\n 'solar_zenith': 30,\n 'solar_azimuth': 180.,\n 'surface_azimuth': 270.,\n 'pvrow_width': 3.0,\n 'gcr': 0.9,\n 'surface_tilt': 20.\n }\n array = Array(**arguments)\n f, ax = plt.subplots()\n _ = plot_pvarray(ax, array)\n\n # Test with interrow backward shading\n arguments = {\n 'n_pvrows': 5,\n 'pvrow_height': 3.0,\n 'solar_zenith': 60,\n 'solar_azimuth': 0.,\n 'surface_azimuth': 90.,\n 'pvrow_width': 3.0,\n 'gcr': 0.9,\n 'surface_tilt': 20.\n }\n array = Array(**arguments)\n f, ax = plt.subplots()\n _ = plot_pvarray(ax, array)\n\n else:\n print(\"Not running 'test_plotting' in CI\")\n\n\ndef test_merge_shadows():\n \"\"\"\n When direct shading happens between pvrows, the shadow of the rows on the\n ground is supposed to form a continuous shadow (because of the overlap).\n Test that this functionally works\n \"\"\"\n # Use specific vf array configuration leading to direct shading\n arguments = {\n 'n_pvrows': 5,\n 'pvrow_height': 2.,\n 'solar_zenith': 30,\n 'solar_azimuth': 0.,\n 'surface_azimuth': 0.,\n 'pvrow_width': 3,\n 'gcr': 0.9,\n 'surface_tilt': 20.\n }\n array = Array(**arguments)\n # There should be 1 continuous shadow on the groud, but 4 distinct ground\n # shaded areas, delimited by what the front and the back of the pvrows\n # can see\n assert (array.line_registry.loc[(array.line_registry.line_type == 'ground')\n & array.line_registry.shaded]\n .shape[0] == 4)\n\n\ndef test_interrow_shading():\n \"\"\"\n Testing the ability of the model to find direct shading between pvrows\n \"\"\"\n # Forward direct shading of the pvrows\n arguments = {\n 'n_pvrows': 5,\n 'pvrow_height': 3.,\n 'solar_zenith': 30,\n 'solar_azimuth': 180.,\n 'surface_azimuth': 180.,\n 'pvrow_width': 3.,\n 'gcr': 0.9,\n 'surface_tilt': 20.\n }\n array = Array(**arguments)\n # There should be 4 pvrows with direct shading\n assert (array.surface_registry.loc[\n (array.surface_registry.line_type == 'pvrow')\n & array.surface_registry.shaded]\n .shape[0] == 4)\n\n # Backward direct shading of the pvrows (sun in the back of the modules)\n arguments = {\n 'n_pvrows': 5,\n 'pvrow_height': 3.0,\n 'solar_zenith': 80,\n 'solar_azimuth': 0.,\n 'surface_azimuth': 180.,\n 'pvrow_width': 3.0,\n 'gcr': 0.9,\n 'surface_tilt': 30.\n }\n array = Array(**arguments)\n # There should still be 4 pvrows with direct shading\n assert (array.surface_registry.loc[\n (array.surface_registry.line_type == 'pvrow')\n & array.surface_registry.shaded]\n .shape[0] == 4)\n","sub_path":"pvfactors/tests/test_array_geometry.py","file_name":"test_array_geometry.py","file_ext":"py","file_size_in_byte":5221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"511203832","text":"import pymysql\nimport paramiko\nimport pandas as pd\nfrom sshtunnel import SSHTunnelForwarder\nfrom os.path import expanduser\nimport datetime\nfrom datetime import timedelta\n# SSH Tunnel and connection setup- Main-DB-Read Main DB 的連線參數\nmypkey = paramiko.RSAKey.from_private_key_file(\"D:\\\\aws\\\\us-west-spider.pem\")\nsql_hostname = \"127.0.0.1\"\nsql_username = \"read_only\"\nsql_password = \"eland1234\" # 此文件不將機敏密碼輸入,但這個資料庫的密碼使用的是內部慣例的密碼\nsql_main_database = 'dmp_stat'\nsql_port = 3306\nssh_host = \"54.212.237.220\"\nssh_user = \"root\"\nssh_port = 22\n\n\n# 先 define 一個方便輸入日期範圍的功能,供後續主要功能- log_extractor 使用\ndef DateFormatter(date_string):\n '''\n :param date_string: input date in format as 20180510\n :return: datetime library readied date format\n '''\n year = str(date_string)[:4]\n month = str(date_string)[4:6]\n day = str(date_string)[-2:]\n if month[0] == 0:\n month = str(date_string)[5:6]\n if day[0] == 0:\n day = str(date_string)[-1:]\n result = datetime.date(int(year), int(month), int(day))\n return result\n\n\n# 真正撈資料的功能 input: 時間範圍、需要欄位, url_pattern也就是要 url like 的 pattern;output: 指定的 log 存成 pandas dataframe\ndef log_extractor(date_range, columns_needed, **kwargs):\n '''\n :param date_range: input the date range in format of [yyyymmdd, yyyymmdd] ex: [20190310,20190311]\n :param columns_needed: input all the columns needed as column1, column2, ...\n 還是得記得去改下面 query 以符合自己的需要唷(where 後面的語句),這邊支援 datetime的使用,會用SQL的方式自己把\n 需要的日期串上去(因為 dmp_stat 裡面是沒有日期的) ex: ['datetime', 'url', 'uid']\n :param url_pattern: 如果有要使用 url_pattern 撈 log, 則需指定 url_pattern = \"www.kimy.com.tw\"`,\n 實際上執行 query 時則會以 url like \"%www.kimy.com.tw%\" 進行\n :return: dataframe with the log file input\n '''\n print(\"The mission starts at... \" + str(datetime.datetime.now()))\n url_pattern = kwargs.get('url_pattern', '')\n d1 = DateFormatter(date_range[0])\n d2 = DateFormatter(date_range[1])\n delta = d2 - d1\n result_dataframe = pd.DataFrame()\n columns = str()\n if 'datetime' in columns_needed:\n columns_needed.remove('datetime')\n for j in columns_needed:\n columns += '\\'' + j + '\\','\n columns = columns[:-1]\n columns = columns.replace(\"\\'\", \"`\")\n for i in range(delta.days + 1):\n date = d1 + timedelta(i)\n with SSHTunnelForwarder(\n (ssh_host, ssh_port),\n ssh_username=ssh_user,\n ssh_pkey=mypkey,\n remote_bind_address=(sql_hostname, sql_port)) as tunnel:\n conn = pymysql.connect(host='127.0.0.1', user=sql_username,\n passwd=sql_password, db=sql_main_database,\n port=tunnel.local_bind_port)\n query = \"select STR_TO_DATE(CONCAT(%s,' ',`hour`,':',`minute`,':',`second`), '%%Y%%m%%d %%H:%%i:%%s') AS `datetime`, %s from dmp_stat.%s where `url` like \\\"%%%s%%\\\"\" % (\n str(date).replace(\"-\", \"\"), columns, str(date).replace(\"-\", \"\"), str(url_pattern))\n data = pd.read_sql_query(query, conn)\n result_dataframe = result_dataframe.append(data, ignore_index=True)\n conn.close()\n print(\"The mission ends at... \" + str(datetime.datetime.now()))\n return result_dataframe\n else:\n columns = str()\n for j in columns_needed:\n columns += '\\'' + j + '\\','\n columns = columns[:-1]\n columns = columns.replace(\"\\'\", \"`\")\n for i in range(delta.days + 1):\n date = d1 + timedelta(i)\n with SSHTunnelForwarder(\n (ssh_host, ssh_port),\n ssh_username=ssh_user,\n ssh_pkey=mypkey,\n remote_bind_address=(sql_hostname, sql_port)) as tunnel:\n conn = pymysql.connect(host='127.0.0.1', user=sql_username,\n passwd=sql_password, db=sql_main_database,\n port=tunnel.local_bind_port)\n\n query = '''select %s from dmp_stat.%s where `url` like \\\"%%%s%%\\\"''' % (\n columns, str(date).replace(\"-\", \"\"), str(url_pattern))\n data = pd.read_sql_query(query, conn)\n result_dataframe = result_dataframe.append(data, ignore_index=True)\n conn.close()\n print(\"The mission ends at... \" + str(datetime.datetime.now()))\n return result_dataframe\n","sub_path":"link to sql.py","file_name":"link to sql.py","file_ext":"py","file_size_in_byte":4807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"341597402","text":"from bottle import *\n\nfrom JwcLoginHelper import JwcLoginHelper\nfrom RuijieHelper import RuijieHelper\n\n@get(\"/jwc\")\ndef JwcLoginHander():\n stuid = request.params.get('stuid')\n pswd = request.params.get('pswd')\n host = request.params.get('host')\n\n if stuid == None or pswd == None or host == None:\n return \"Invalid params\"\n\n result = JwcLoginHelper(stuid, pswd, host).login()\n while(result['errcode'] == 3):\n result = JwcLoginHelper(stuid, pswd, host).login()\n\n return result\n\n@get(\"/rj\")\ndef RuijieLoginHandler():\n stuid = request.params.get('stuid')\n pswd = request.params.get('pswd')\n\n if stuid == None or pswd == None:\n return \"Invalid params\"\n\n return RuijieHelper(stuid, pswd).login()\n\nif __name__ == '__main__':\n debug(True)\n run(host='localhost', port=80, reloader=True)\nelse:\n app = default_app()\n # import sae\n # application = sae.create_wsgi_app(app)\n from bae.core.wsgi import WSGIApplication\n application = WSGIApplication(app)\n","sub_path":"index.wsgi","file_name":"index.wsgi","file_ext":"wsgi","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"422638377","text":"import requests\nfrom requests.exceptions import RequestException\n\n\nclass MyAPIClient(object):\n \"\"\"A simple client for the public API.\"\"\"\n\n def __init__(self, api_url=None):\n \"\"\"Constructor.\"\"\"\n if api_url is None:\n from django.conf import settings\n self._api_url = settings.MY_API_PREFIX\n else:\n self._api_url = api_url\n\n def _send_request(self, url, params=None):\n \"\"\"Send a request to the API.\"\"\"\n if params is None:\n params = {}\n try:\n resp = requests.get(url, params=params)\n except RequestException:\n return None\n\n if resp.status_code != 200:\n return None\n\n return resp.json()\n\n def test_list(self):\n url = \"{}/mychecker/\".format(self._api_url)\n return self._send_request(url)\n","sub_path":"grodt_prj/lib/api_client.py","file_name":"api_client.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"119703638","text":"# python native modules\n\n# third-party modules\nfrom PyQt5.QtWidgets import *\n\n# labtool project modules\nfrom app.designer.window.window import Ui_LabToolWindow\nfrom app.designer.oscilloscope_settings.oscilloscope_settings_dialog import OscilloscopeSettingsDialog\nfrom app.designer.generator_settings.generator_settings_dialog import GeneratorSettingsDialog\nfrom app.designer.impedance.impedance_dialog import ImpedanceDialog\nfrom app.designer.bode.bode_dialog import BodeDialog\nfrom app.designer.output.output_dialog import OutputDialog\n\nfrom labtool.algorithm.bode_algorithm import BodeAlgorithm\nfrom labtool.algorithm.impedance_algorithm import ImpedanceAlgorithm\n\nfrom labtool.base.instrument import InstrumentType\nfrom labtool.tool import LabTool\n\n# Installing devices...(?)\nfrom labtool.oscilloscope.agilent.agilent_dso6014A import AgilentDSO6014A\nfrom labtool.oscilloscope.agilent.agilent_dso7014A import AgilentDSO7014A\nfrom labtool.oscilloscope.rigol.rigol_ds4014 import RigolDS4014\n\nfrom labtool.generator.agilent.agilent_33220a import Agilent33220A\n\n\nclass MainWindow(QMainWindow, Ui_LabToolWindow):\n\n def __init__(self, *args, **kwargs):\n super(MainWindow, self).__init__(*args, **kwargs)\n self.setupUi(self)\n self.setWindowTitle(\"LabTool\")\n self.setFixedSize(self.size())\n\n # MainWindow internal members\n self.widget_map = {\n \"DEVICE_CONNECTION\": 0,\n \"MAIN_MENU\": 1\n }\n\n self.connected_devices = []\n self.oscilloscope = None\n self.generator = None\n\n # Children Dialogs\n self.oscilloscope_settings_dialog = OscilloscopeSettingsDialog()\n self.generator_settings_dialog = GeneratorSettingsDialog()\n self.bode_dialog = BodeDialog()\n self.impedance_dialog = ImpedanceDialog()\n\n # Slot and signal connections\n self.refresh.clicked.connect(self.on_refresh)\n self.connection.clicked.connect(self.on_connect)\n self.disconnection.clicked.connect(self.on_disconnect)\n self.oscilloscope_settings.clicked.connect(self.on_oscilloscope_settings)\n self.generator_settings.clicked.connect(self.on_generator_settings)\n self.measure_bode.clicked.connect(self.on_bode)\n self.measure_input_impedance.clicked.connect(self.on_impedance)\n\n self.go_device_connection()\n\n ##############################\n # General MainWindow Methods #\n ##############################\n def go_device_connection(self):\n self.stackedWidget.setCurrentIndex(self.widget_map[\"DEVICE_CONNECTION\"])\n\n def go_main_menu(self):\n self.stackedWidget.setCurrentIndex(self.widget_map[\"MAIN_MENU\"])\n\n ######################################\n # MainWindow Device Connection Slots #\n ######################################\n def on_refresh(self):\n \"\"\" Refreshing the oscilloscope and generator devices list \"\"\"\n self.connected_devices = []\n self.oscilloscopes.clear()\n self.generators.clear()\n\n # Status flags\n excluded_devices = 0\n\n # Updating the oscilloscope and generator detected devices\n devices = LabTool.get_devices()\n for device in devices:\n device_info = LabTool.get_device_information(device)\n device_type = LabTool.is_device_detected(device_info)\n if device_type is InstrumentType.Generator or device_type is InstrumentType.Oscilloscope:\n self.connected_devices.append(\n {\n \"device-id\": device,\n \"device-type\": device_type,\n \"device-brand\": device_info[\"brand\"],\n \"device-model\": device_info[\"model\"],\n \"device-series-number\": device_info[\"series-number\"],\n \"device-uid\": \"{} - {} - {}\".format(\n device_info[\"brand\"],\n device_info[\"model\"],\n device_info[\"series-number\"]\n )\n }\n )\n else:\n excluded_devices += 1\n\n # Loading devices to the GUI List Widgets\n self.generators.addItems(\n [\n connected_device[\"device-uid\"]\n for connected_device in self.connected_devices\n if connected_device[\"device-type\"] is InstrumentType.Generator\n ]\n )\n\n self.oscilloscopes.addItems(\n [\n connected_device[\"device-uid\"]\n for connected_device in self.connected_devices\n if connected_device[\"device-type\"] is InstrumentType.Oscilloscope\n ]\n )\n\n # UserInterface messages\n if not len(devices):\n self.connection_status.setText(\n \"No device connnections detected!\"\n )\n elif excluded_devices:\n self.connection_status.setText(\n \"\"\"{} device connection was excluded because the instrument was not detected.\n Only supported devices will be detected!\"\"\".format(\n excluded_devices\n )\n )\n\n # Verifying if there are enough instruments selected to continue with the program\n self.connection.setEnabled(len(self.oscilloscopes.currentText()) and len(self.generators.currentText()))\n\n def on_connect(self):\n self.oscilloscope = self.open_device_by_uid(self.oscilloscopes.currentText())\n self.generator = self.open_device_by_uid(self.generators.currentText())\n\n self.oscilloscope_used.setText(self.oscilloscopes.currentText())\n self.generator_used.setText(self.generators.currentText())\n\n self.go_main_menu()\n\n def open_device_by_uid(self, uid: str):\n for connected_device in self.connected_devices:\n if connected_device[\"device-uid\"] == uid:\n return LabTool.open_device_by_id(connected_device[\"device-id\"])\n\n ##############################\n # MainWindow Main Menu Slots #\n ##############################\n def on_disconnect(self):\n self.oscilloscope.close()\n self.generator.close()\n self.oscilloscope = self.generator = None\n self.go_device_connection()\n\n def on_oscilloscope_settings(self):\n self.oscilloscope_settings_dialog.exec()\n\n def on_generator_settings(self):\n self.generator_settings_dialog.exec()\n\n def on_bode(self):\n if self.bode_dialog.exec():\n algorithm = BodeAlgorithm(\n self.oscilloscope,\n self.generator,\n self.bode_dialog.make_requirements(),\n self.oscilloscope_settings_dialog.make_channel_setup(),\n self.oscilloscope_settings_dialog.make_trigger_setup(),\n self.oscilloscope_settings_dialog.make_acquire_setup(),\n self.oscilloscope_settings_dialog.make_timebase_setup(),\n self.generator_settings_dialog.make_generator_setup(),\n self.generator_settings_dialog.make_preferences_setup()\n )\n\n dialog = OutputDialog(algorithm)\n dialog.exec()\n\n def on_impedance(self):\n if self.impedance_dialog.exec():\n algorithm = ImpedanceAlgorithm(\n self.oscilloscope,\n self.generator,\n self.impedance_dialog.make_requirements(),\n self.oscilloscope_settings_dialog.make_channel_setup(),\n self.oscilloscope_settings_dialog.make_trigger_setup(),\n self.oscilloscope_settings_dialog.make_acquire_setup(),\n self.oscilloscope_settings_dialog.make_timebase_setup(),\n self.generator_settings_dialog.make_generator_setup(),\n self.generator_settings_dialog.make_preferences_setup()\n )\n\n dialog = OutputDialog(algorithm)\n dialog.exec()\n\n\nif __name__ == \"__main__\":\n app = QApplication([])\n window = MainWindow()\n window.show()\n app.exec()\n","sub_path":"app/designer/window/main_window.py","file_name":"main_window.py","file_ext":"py","file_size_in_byte":8068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"183550643","text":"#!/bin/env python\n# -*- coding:utf8 -*-\nimport json\nimport sys\nimport commands\nimport nginx\nfrom infodate.softinfo import *\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\n# 定义日志级别\nLogFormat='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s'\nlogging.basicConfig(level=logging.INFO,format=LogFormat)\n\ndef is_running_ng():\n \"\"\"实例化fifter,cmd命令执行失败返回'cmd命令没有执行成功,请确认输入'\"\"\"\n cmdline = fifter(\"ps axu|grep nginx|grep master|grep -v grep\")\n return cmdline\n\ndef get_runing_pid():\n \"\"\"当服务器上有多个节点,将多个pid放入pidList列表中\"\"\"\n pidList = []\n cmdline = is_running_ng()\n result = cmdline.lookfor\n for i in result:\n pidList.append(i[1])\n return pidList\n\ndef get_fifter_object():\n \"\"\"\n 初始化softinfo 包中的fifter类\n :return:\n \"\"\"\n ps = fifter(\"ps axu|grep nginx|grep master|grep -v grep\")\n return ps\n\ndef get_bin_file(pid):\n \"\"\"\n 获取运行中nginx的sbin可执行文件的绝对路径\n :param pid:\n :return:\n \"\"\"\n bin_file = get_fifter_object().exe(pid)\n return bin_file\n\ndef get_nginx_conf(pid):\n \"\"\"\n 根据pid获取运行中的nginx的配置文件nginx.conf的绝对路径\n :param pid: 运行中的pid\n :return: nginx.conf的绝对路径\n \"\"\"\n nginx_sbin = get_bin_file(pid)\n nginx_conf = nginx_sbin.replace(nginx_sbin[-10:],\"conf/nginx.conf\")\n if not os.path.exists(nginx_conf):\n return None\n else:\n return nginx_conf\n\ndef get_nginx_user(pid):\n \"\"\"\n 根据pid获取运行中的nginx的启动用户\n :param pid:\n :return:\n \"\"\"\n nginx_user = get_fifter_object().username(pid)\n return nginx_user\n\ndef get_nginx_location(pid):\n \"\"\"\n nginx的安装路径\n :param pid:\n :return:\n \"\"\"\n ngxin_location = get_nginx_conf(pid)\n return ngxin_location[0:-15]\n\ndef get_nginx_version(pid):\n \"\"\"\n 获取nginx的版本号\n :param pid:\n :return:\n \"\"\"\n nginx_sbin = get_bin_file(pid)\n cmd_nginx_sbin = nginx_sbin + \" -v 2>&1\" + \"|grep 'nginx version'\"\n ex_nginx_sbin = commands.getstatusoutput(cmd_nginx_sbin)\n checkCode(ex_nginx_sbin[0], cmd_nginx_sbin, ex_nginx_sbin[1])\n return ex_nginx_sbin[1][15:]\n\ndef remove_irr_file(conf):\n \"\"\"\n 通过commands模块对于nginx的配置文件进行去除空行和注释内容,生产临时文件存放在/tmp目录下\n :param conf: 输入通过get_nginx_conf()函数获取到的nginx配置文件路径\n :return: 临时文件的据对路径\n \"\"\"\n conf = \"/opt/nginx/conf/nginx.conf.bak\"\n cmdline = \"grep -v \\# \" + conf + \"|sed /^$/d\"\n _ex_cmdline = commands.getstatusoutput(cmdline)\n checkCode(_ex_cmdline[0], cmdline, _ex_cmdline[1])\n import datetime\n i = datetime.datetime.now()\n nginx_conf = \"/tmp/nginx_file\" + str(i.isoformat())\n if not os.path.exists(nginx_conf):\n os.mknod(nginx_conf)\n with open(nginx_conf, 'w') as f:\n f.write(_ex_cmdline[1])\n return nginx_conf\n\nclass analyze_nginx_conf():\n\n def __init__(self, ngxin_conf):\n self.ngxin_conf = ngxin_conf\n\n # @property\n # def init_analyze_conf(self):\n # \"\"\"\n # 初始化nginx.conf配置文件,格式化文件内容返回\n # :return:\n # \"\"\"\n # init_analyze = nginx.loadf(self.ngxin_conf)\n # return init_analyze\n\n def analyze_http(self, iniAnalyze):\n \"\"\"\n 获取nginx配置文件中http语句块中的所有内容\n :param iniAnalyze:\n :return:\n \"\"\"\n iniAna_dict = iniAnalyze.as_dict\n for http in iniAna_dict['conf']:\n for key,value in http.items():\n if key == 'http ':\n return http[key]\n\n def http_remove_irregular(self, ana_http):\n \"\"\"\n 对于nginx的配置文件可以有多个server,对不同的server进行加数据重新命名\n :param ana_http:\n :return:\n \"\"\"\n server_part = {}\n server_nu = 1\n for key in ana_http:\n for k,v in key.items():\n if not isinstance(key[k], list):\n server_part[k] = v\n elif isinstance(key, dict):\n if k == 'server':\n dict_key = k + str(server_nu)\n server_nu += 1\n for key1 in v:\n for ke1, va1 in key1.items():\n if not isinstance(key1[ke1], dict):\n server_part[dict_key] = v\n return server_part, server_nu\n\n def get_all_server(self, conf):\n \"\"\"\n 获取nginx配置文件中所有的server语句块的内容\n :param conf:\n :return:\n \"\"\"\n conf_http, server_nu = conf\n server = []\n if isinstance(conf_http, dict):\n for k, v in conf_http.items():\n for nu in range(server_nu):\n if k == ('server' + str(nu)):\n server.append(conf_http[k])\n return server\n\n def get_all_conf(self):\n\n _comm = \"cat \" + self.ngxin_conf\n _ex_comm = commands.getstatusoutput(_comm)\n checkCode(_ex_comm[0], _comm, _ex_comm[1])\n return _ex_comm[1]\n\n def get_all_port(self):\n\n _comm_text = \"grep \\\"listen\\\" \" + self.ngxin_conf + \"|grep -v \\#\"\n _ex_comm_text = commands.getstatusoutput(_comm_text)\n checkCode(_ex_comm_text[0], _comm_text, _ex_comm_text[1])\n port_part = [ x.replace(\";\",\"\") for x in _ex_comm_text[1].split(\"\\n\")]\n port = [ p.split()[-1] for p in port_part ]\n return port\n\n\n# class set_param2json():\n#\n# def get_server_part(self, server_part, server_num):\n# \"\"\"\n# 获取nginx配置文件中的关于server语句块中的内容,每个server为一个list\n# :param server_part:\n# :param server_num:\n# :return:\n# \"\"\"\n# server = []\n# for i in range(server_num - 1):\n# server_name = \"server\" + str(i + 1)\n# server.append(server_part[server_name])\n# return server\n#\n# def get_server_port(self, server_list):\n# \"\"\"\n# 获取所有的监听端口,即listen字段,返回一个list\n# :param server_list:\n# :return:\n# \"\"\"\n# server_port = []\n# for server_part in server_list:\n# for server in server_part:\n# for server_key , server_value in server.items():\n# if server_key == 'listen':\n# server_port.append(server_value)\n# return server_port\n#\n# def get_server_location(self, server_port, server_list):\n# \"\"\"\n# 根据监听端口返回对于的localtion字段信息\n# :param server_port:\n# :param server_list:\n# :return:\n# \"\"\"\n# localtion = []\n# for server_part in server_list:\n# for key in server_part:\n# for server_key , server_value in key.items():\n# if server_value == str(server_port):\n# for key1 in server_part:\n# for server_key1, server_value1 in key1.items():\n# if \"location\" in server_key1:\n# localtion.append(key1)\n# return localtion\n\n\n\n # def get_localtion_proxypass(self, localtion):\n # \"\"\"\n # 获取location语句块中所有的proxy_pass的字段信息\n # :param localtion:\n # :return:\n # \"\"\"\n # proxy_pass = []\n # for key in localtion:\n # for server_key, server_value in key.items():\n # for k in server_value:\n # for k_key, k_value in k.items():\n # if k_key == 'proxy_pass':\n # proxy_pass.append(k_value)\n # print proxy_pass\n\n # def get_port_of_server(self, port, server_list):\n # \"\"\"\n # 输入配置文件中获取的端口号,匹配配置文件中server语句块中的listen字段,返回整个server中的location语句块\n # :param port:\n # :param server_list:\n # :return:\n # \"\"\"\n # for server_part in server_list:\n # for server in server_part:\n # for server_key , server_value in server.items():\n # if server_key == 'listen':\n # if server[server_key] == port:\n # return server_part\n\n\n# 主程序入口\nif __name__ == \"__main__\":\n # 获取运行时的nginx的pid\n pid = get_runing_pid()\n NginxInfo = []\n for pidnu in pid:\n user = get_nginx_user(pidnu)\n BinFile = get_bin_file(pidnu)\n AppName = \"nginx\"\n InstallDir = get_nginx_location(pidnu)\n ng_version = get_nginx_version(pidnu)\n ng_conf = get_nginx_conf(pidnu)\n # 通过command模块去除nginx.conf文件中的空行和注释内容\n nginx_conf_irr = remove_irr_file(ng_conf)\n init_nginx_conf = analyze_nginx_conf(nginx_conf_irr)\n # 通过python-nginx模块格式话nginx.conf配置文件\n # nginx_module_loadf = init_nginx_conf.init_analyze_conf\n # 获取配置文件中http模块语句中的内容\n # get_nginx_http = init_nginx_conf.analyze_http(nginx_module_loadf)\n # 对于http模块中的不同server进行序号编组\n # get_nginx = init_nginx_conf.http_remove_irregular(get_nginx_http)\n\n # init = set_param2json()\n # server_all_part = init.get_server_part(get_nginx[0], get_nginx[1])\n # server_all_port = init.get_server_port(server_all_part)\n port_list = init_nginx_conf.get_all_port()\n\n for port in port_list:\n server = init_nginx_conf.get_all_conf()\n PartInfo = {\n \"user\": user,\n \"BinFile\" : BinFile,\n \"AppName\" : AppName,\n \"port\" : port,\n \"InstallDir\" : InstallDir,\n \"version\" : ng_version,\n \"NginxConf\" : ng_conf,\n \"server\" : server\n }\n NginxInfo.append(PartInfo)\n sys.stdout.write(json.dumps(NginxInfo, sort_keys=True, indent=4, separators=(',', ': '), encoding='utf8',\n ensure_ascii=True))\n # 删除command模块留下的在/tmp目录下的临时文件\n os.remove(nginx_conf_irr)\n","sub_path":"nginx/get_nginx_ci_backup.py","file_name":"get_nginx_ci_backup.py","file_ext":"py","file_size_in_byte":10555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"629268384","text":"\r\nimport pandas as pd\r\nimport numpy as np\r\nimport dash\r\nfrom dash.dependencies import Input, Output\r\nimport dash_core_components as dcc\r\nimport dash_html_components as html\r\n\r\n\r\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\r\n\r\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\r\n\r\nurl = 'https://raw.githubusercontent.com/AlainKuiete/DATA608ASSINGMENTS/master/Global_Mobility_Report.csv'\r\n\r\nmobility = pd.read_csv(url)\r\n\r\nmobility.columns = ['country_code', 'country_region', 'region1', 'region2', 'date', \r\n 'recreation', 'grocery', 'parks', 'transit', 'workplaces', 'residentials']\r\n\r\nmobile = mobility.copy()\r\ncr_code = mobile['country_code'].unique()\r\ncountry_region = mobile['country_region'].dropna().unique()\r\nmobile['region1'].fillna(\"NA\", inplace=True)\r\nsub_region1 = mobile['region1'].dropna().unique()\r\nmobile['region2'].fillna(\"NA\", inplace=True)\r\nsub_region2 = mobile['region2'].dropna().unique()\r\n\r\n'''places = {'recreation': 'retail_and_recreation_percent_change_from_baseline',\r\n 'grocery': 'grocery_and_pharmacy_percent_change_from_baseline',\r\n 'parks': 'parks_percent_change_from_baseline',\r\n 'transit': 'transit_stations_percent_change_from_baseline',\r\n 'workplaces': 'workplaces_percent_change_from_baseline',\r\n 'residentials': 'residential_percent_change_from_baseline'\r\n }\r\n'''\r\nplaces = ['recreation', 'grocery', 'parks', 'transit', 'workplaces', 'residentials']\r\n# Boostrap CSS.\r\napp.layout = html.Div(\r\n html.Div([\r\n html.Div(\r\n [\r\n html.H1(children='Community Mobility Report',\r\n className='nine columns'),\r\n html.Img(\r\n src=\"https://icons8.com/icon/CGf5JvobsqEu/coronavirus\",\r\n className='three columns',\r\n style={\r\n 'height': '9%',\r\n 'width': '9%',\r\n 'float': 'right',\r\n 'position': 'relative',\r\n 'padding-top': 0,\r\n 'padding-right': 0\r\n },\r\n ),\r\n html.Div(children='''\r\n Each Community Mobility Report dataset is presented by location and highlights the percent change in\r\nvisits to places like grocery stores and parks within a geographic area.\r\n\r\n ''',\r\n className='nine columns'\r\n )\r\n ], className=\"row\"\r\n ),\r\n\r\n html.Div(\r\n [\r\n html.Div(\r\n [\r\n html.P('Choose Country:'),\r\n dcc.Dropdown(\r\n id = 'CountryRegion',\r\n options = [{'label': i, 'value': i} for i in country_region],\r\n value='United States'\r\n ),\r\n ],style={'margin-top': '10', 'width': '24%', 'display': 'inline-block'}, className = \"three columns\",\r\n \r\n ),\r\n html.Div(\r\n [\r\n html.P('Choose Region:'),\r\n \r\n dcc.Dropdown(\r\n id='SubRegion1',\r\n options= [{'label': i, 'value': i} for i in sub_region1],\r\n value='New York'\r\n ), \r\n \r\n ],style={'margin-top': '10', 'width': '24%', 'display': 'inline-block'}, className = \"three columns\",\r\n ),\r\n\r\n html.Div(\r\n [\r\n html.P('Choose Region:'),\r\n dcc.Dropdown(\r\n id = 'SubRegion2',\r\n options= [{'label': k, 'value': k} for k in sub_region2],\r\n value='Bronx County',\r\n ),\r\n ],style={'width': '24%', 'display': 'inline-block'}, className = \"three columns\",\r\n ),\r\n \r\n ], className=\"row\"\r\n ),\r\n\r\n\r\n html.Div([\r\n html.P('Choose Paces:'),\r\n dcc.Checklist(\r\n id = 'Places',\r\n options=[\r\n {'label': l, 'value': l } for l in places\r\n ], \r\n ),\r\n ], style={'margin-top': '10'}, className = 'row'\r\n ),\r\n\r\n html.Div(\r\n [\r\n html.Div([\r\n dcc.Graph(\r\n id='retail-graph'\r\n )\r\n ], className= 'six columns'\r\n ),\r\n\r\n html.Div([\r\n dcc.Graph(\r\n id='grocery-graph'\r\n )\r\n ], className= 'six columns'\r\n )\r\n ], className=\"row\"\r\n )\r\n ], className='ten columns offset-by-one')\r\n)\r\n\r\n@app.callback(\r\n Output('SubRegion1', 'options'),\r\n [Input('CountryRegion', 'value')])\r\ndef set_region1_options(selected_country):\r\n mobile['region1'].fillna(\"NA\", inplace=True)\r\n return [{'label': i, 'value': i} for i in mobile[mobile[\"country_region\"]==selected_country]['region1'].dropna().unique()]\r\n\r\n@app.callback(\r\n Output('SubRegion1', 'value'),\r\n [Input('SubRegion1', 'options')])\r\ndef set_region1_value(available_options):\r\n return available_options[0]['value']\r\n\r\n@app.callback(\r\n Output('SubRegion2', 'options'),\r\n [Input('SubRegion1', 'value')])\r\ndef set_region2_options(selected_region):\r\n mobile['region1'].fillna(\"NA\", inplace=True)\r\n return [{'label': i, 'value': i} for i in mobile[mobile[\"region1\"]==selected_region]['region2'].dropna().unique()]\r\n\r\n@app.callback(\r\n Output('SubRegion2', 'value'),\r\n [Input('SubRegion2', 'options')])\r\ndef set_region2_value(available_options):\r\n return available_options[0]['value']\r\n\r\n\r\n@app.callback(\r\n Output('retail-graph', 'figure'),\r\n [Input('SubRegion1', 'value'),\r\n Input('Places', 'value'),\r\n ])\r\ndef update_graph(region, place):\r\n\r\n \r\n cmobility = mobility[mobility[\"region2\"] == region]\r\n cmobility.reset_index(inplace=True)\r\n cmobility.set_index('date', inplace=True)\r\n y = cmobility[place]\r\n data = [\r\n {'x': cmobility.index, 'y': y, 'type': 'line', 'name': 'Global_Mobility_Report'\r\n\r\n },\r\n ]\r\n\r\n figure = {\r\n 'data': data,\r\n 'layout': {\r\n 'title': 'Mobility Trend',\r\n 'xaxis' : dict(\r\n title='x Axis',\r\n titlefont=dict(\r\n family='Courier New, monospace',\r\n size=20,\r\n color='#7f7f7f'\r\n )),\r\n 'yaxis' : dict(\r\n title='y Axis',\r\n titlefont=dict(\r\n family='Helvetica, monospace',\r\n size=20,\r\n color='#7f7f7f'\r\n ))\r\n }\r\n }\r\n return figure \r\n\r\nif __name__ == '__main__':\r\n app.run_server(debug=True)\r\n","sub_path":"project2.py","file_name":"project2.py","file_ext":"py","file_size_in_byte":7294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"526844481","text":"#!/usr/bin/env python\n\nimport sys\n\n# Usage\nif len(sys.argv) != 3:\n sys.exit(\"Usage: python {} file.[fastq|fq] part_id 1>file.part\".format(sys.argv[0]))\n\n# Get FASTQ file\nfastq_file_name = sys.argv[1]\nif not fastq_file_name.endswith(\".fastq\") and not fastq_file_name.endswith(\".fq\"):\n sys.exit(\"FASTQ file name must end with either '.fastq' or '.fq'\")\nfastq_file = open(fastq_file_name, 'r')\n\n# Get part ID\npart_id = int(sys.argv[2])\nif not 0 <= part_id <= 3:\n sys.exit(\"part_id must be in range [0,3]\")\n\n# Write part to stdout\nwhile 1:\n sequence_identifier = fastq_file.readline().rstrip('\\n')\n if not sequence_identifier:\n break # reached end of file, everything ok\n raw_sequence = fastq_file.readline().rstrip('\\n')\n description = fastq_file.readline().rstrip('\\n')\n quality_scores = fastq_file.readline().rstrip('\\n')\n\n if part_id == 0:\n sys.stdout.write(sequence_identifier + \"\\n\")\n if part_id == 1:\n sys.stdout.write(raw_sequence + \"\\n\")\n if part_id == 2:\n sys.stdout.write(description + \"\\n\")\n if part_id == 3:\n sys.stdout.write(quality_scores + \"\\n\")\n\nsys.exit()\n","sub_path":"src/ngstools/xtract_part_fastq.py","file_name":"xtract_part_fastq.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"365473095","text":"'''\nPopulate the currency object to save currencies in local storage\nAvoid making constant requests to API\n'''\nimport json\nimport requests\nimport os\nimport codecs\n\ncurr_blacklist = [\n 'congo',\n 'congo, democratic republic',\n 'new caledonia',\n 'macedonia (former yug. rep.)',\n 'saint lucia',\n 'saint vincent and the grenadines',\n 'wallis and futuna islands'\n]\ncurr_addlist = [\n 'europe'\n]\nif __name__ == '__main__':\n json_obj = \"{}\\\\{}\".format(os.getcwd(), 'curr_obj.json')\n curr_img = '{}\\\\icons\\\\country'.format(os.getcwd())\n apikey = \"c262c6631c49037b623c\"\n curr_obj = {}\n\n response = requests.get('https://free.currconv.com/api/v7/countries?apiKey=' + apikey).json()\n \n for key, val in response['results'].items():\n currency_name = val['name'].lower()\n if(currency_name in curr_blacklist):\n continue\n\n for img in os.listdir(curr_img):\n img_path = img\n img_name = img_path.split('.')[0]\n #convert dashes back into spaces\n if \"-\" in img_name:\n img_name = img_name.replace('-', ' ')\n if (currency_name == img_name):\n curr_img_path = img_path\n\n curr_obj[val['name']] = {\n 'currencySymbol' : val['currencySymbol'],\n 'currencyName' : val['currencyName'],\n 'currencyID' : val['currencyId'],\n 'name': val['name'],\n 'img_path': curr_img_path \n }\n \n curr_obj['Europe'] = {\n 'currencySymbol' : \"€\",\n \"currencyName\": \"European euro\",\n \"currencyID\": \"EUR\",\n \"name\": \"Europe\",\n \"img_path\": \"european-union.png\"\n\n }\n \n with codecs.open(json_obj, 'w', encoding='utf-8') as f:\n json.dump(curr_obj, f, ensure_ascii=False, sort_keys=True)\n \n \n","sub_path":"lib/json_pop/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"5177889","text":"import json as j\nimport requests\n\nwith open ('/Users/jacksonoppenheim/Documents/MTGRNN/RNA/RNA2.json', 'r') as f:\n RNA_dict = j.loads(f.read())\n\ndef extract_values(obj, key):\n \"\"\"Pull all values of specified key from nested JSON.\"\"\"\n arr = []\n\n def extract(obj, arr, key):\n \"\"\"Recursively search for values of key in JSON tree.\"\"\"\n if isinstance(obj, dict):\n for k, v in obj.items():\n if isinstance(v, (dict, list)):\n extract(v, arr, key)\n elif k == key:\n arr.append(v)\n elif isinstance(obj, list):\n for item in obj:\n extract(item, arr, key)\n return arr\n\n results = extract(obj, arr, key)\n return results\n\nnames = extract_values('RNA2.json', 'name')\n\nendpoint = '/Users/jacksonoppenheim/Documents/MTGRNN/RNA/RNA2.json'\n\nr = requests.get(endpoint)\ncard_values = extract_values(r.json(), 'name')\n\nprint(card_values)\n","sub_path":"Cards/RNA/complexRNA.py","file_name":"complexRNA.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"297717285","text":"from django.template import Library\nfrom django.template.loader import get_template\n\nregister = Library()\n\n\n@register.simple_tag\ndef djandgo_pagination(orders):\n pages_range = [number for number in range(orders.number - 1, orders.number + 10)\n if 0 < number <= orders.paginator.num_pages]\n return get_template('desktop/paginator/paginator.html').render(\n {\n 'orders': orders,\n 'pages_range': pages_range\n }\n )\n","sub_path":"desktop/templatetags/djandgo_pagination.py","file_name":"djandgo_pagination.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"} +{"seq_id":"263736511","text":"def isprime(n):\r\n n = abs(int(n))\r\n if n < 2:\r\n return False\r\n if n == 2:\r\n return True\r\n if not n & 1:\r\n return False\r\n for x in range(3, int(n**0.5)+1, 2):\r\n if n % x == 0:\r\n return False\r\n return True\r\n\r\n\r\nwhile True:\r\n numberFloatString = input()\r\n if numberFloatString == '0.0':\r\n break\r\n temp = \"\"\r\n for i in numberFloatString:\r\n if i != '.':\r\n temp += i\r\n if isprime(int(temp[:2])) or isprime(int(temp[:3])) or isprime(int(temp[:4])):\r\n print(\"TRUE\")\r\n else:\r\n print(\"FALSE\")\r\n","sub_path":"floatingPrime.py","file_name":"floatingPrime.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"603070677","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n\turl(r'portal/([0-9]+)/$', views.portal, name='portal'),\n\turl(r'uploads/([0-9]+)/([0-9]+)/$', views.uploads, name='uploads'),\n\turl(r'editfile/(?P[0-9]+)/$', views.UpdateFile.as_view(), name='editfile'),\n\turl(r'deletefile/(?P[0-9]+)/$', views.DeleteFile.as_view(), name='delfile'),\n\turl(r'quasigrader/([0-9]+)/$', views.quasigrader, name='quasigrader'),\n\turl(r'idlewarn/$', views.idlewarn, name='idlewarn'),\n\turl(r'leaderboard/$', views.leaderboard, name='leaderboard'),\n\turl(r'downloads/(?P[0-9]+)/$', views.DownloadListView.as_view(), name='downloads'),\n\turl(r'past/$', views.past, name='past'),\n\turl(r'index/$', views.index, name='index'),\n]\n","sub_path":"dashboard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"175345423","text":"\"\"\"\nThis programm uses the data by https://www.kaggle.com/skihikingkevin/pubg-match-deaths (thanks for posting it)\nthe csv files contains: killed_by\tkiller_name killer_placement killer_position_x killer_position_y map match_id time victim_name victim_placement victim_position_x victim_position_y\nwritten by Maximilian Zangl 12.02.18\n\"\"\"\nimport numpy as np\nimport csv\nimport cv2\n\nMAP_PATH = \"erangel.jpg\"\nMAP_NAME =\"ERANGEL\"\nDATA_PATH = \"deaths\\kill_match_stats_final_0.csv\"\nTEST_PATH= \"tester.csv\"\nANNOTATION=\"2\"\n \n#fucntions\ndef extract_data(csv_data_path,t1,t2):\n list_with_extracted_data = []\n with open(csv_data_path,\"rb\") as csv_file:\n reader = csv.reader(csv_file)\n labels = reader.next() \n kpi = labels.index(\"killer_placement\")\n map_name = labels.index(\"map\")\n time = labels.index(\"time\")\n for row in reader:\n #data gets filtered here:\n if(row[kpi]!=\"1.0\" and row[map_name]==\"ERANGEL\"and t1<=int(row[time]) and int(row[time])<=t2):\n list_with_extracted_data.append(row)\n csv_file.close()\n return list_with_extracted_data\n\ndef extract_death_coordinates(killed_by): \n death_coordinates = np.zeros([len(killed_by),2])\n i=0\n for row in killed_by:\n x = row[3] \n y = row[4]\n try:\n death_coordinates[i]= [float(x),float(y)]\n except ValueError:\n death_coordinates[i]= [0,0]\n i=i+1\n return death_coordinates\n\ndef transform_coordinates_map_to_pixels(deaths_coordiantes):\n f = 4096.0/812800.0 #map is 800000x800000 pixel grid of 8x8 image is 4096x4096\n arr = np.multiply(deaths_coordiantes,f)\n deaths_pixels = arr.astype(np.int32)\n return deaths_pixels\n\ndef draw_deaths_on_map(image_path,deaths_pixels,annotation):\n map_grey = cv2.imread(image_path,0)\n map = cv2.imread(image_path,1)\n map = cv2.cvtColor(map_grey,cv2.COLOR_GRAY2RGB)\n for row in deaths_pixels:\n if (row[0]<4097 and row[1]<4097):\n #cg = map_grey[row[1],row[0]]\n red = 255\n map[row[1],row[0]] = [0,0,red]\n cv2.imwrite(\"marked\"+str(annotation)+\".png\",map)\n return\ndef bin_timeframe (max_min,bins):#e.g.40min and bins=2: 0-20min 20-40min as array[bins,2]\n step = max_min*60.0/bins\n time_arr = np.zeros([bins,2],np.int32)\n for i in range(bins):\n time_arr[i,0] = i*step\n time_arr[i,1] = (i+1)*step\n return time_arr\n\ndef draw_with_time (max_mins,bins):\n time_array = bin_timeframe(max_mins,bins)\n for i in range(len(time_array)):\n #print(i,time_array[i,0],time_array[i,1])\n kbw=extract_data(DATA_PATH,time_array[i,0],time_array[i,1])\n dc=extract_death_coordinates(kbw)\n dp=transform_coordinates_map_to_pixels(dc)\n annotation = \" \" + str(time_array[i,0]) +\"_\"+ str(time_array[i,1])\n draw_deaths_on_map(MAP_PATH,dp,annotation)\n return\n\n#main\ndraw_with_time(35,8)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"pubg.py","file_name":"pubg.py","file_ext":"py","file_size_in_byte":2997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"567104489","text":"'''\r\nDate: 19/01/2021 22.37.36 +0800\r\nAuthor: KnowsCount\r\nLastEditTime: 24/01/2021 11.02.16 +0800\r\nFilePath: /money-and-honour/src/modmerger_options.py\r\n'''\r\n# master options for modmerger framework\r\n# by sphere\r\n\r\n# -2 : print error only\r\n# -1 : print errors and warnings\r\n# 0 : print errors, warnings and info\r\n# 1 : print all\r\nDEBUG_MODE = -1\r\n\r\n# fill this in yourself with the module system you are using, so that some mods can make smarter decisions on how to merge with your source.\r\nmodule_sys_info = {\r\n \"version\": 1127, # version number * 1000\r\n}\r\n\r\noptions = {\r\n\r\n # for debugging. checked by modified process_scripts.py to show name of script being processed\r\n \"process_scripts_show_script_name\": 0,\r\n}\r\n\r\n# List of active mod code names.\r\n# This is also the default order during bulk processing\r\n# The specific mod source files must be in the format \"{modname}_????.py\".\r\n# for example, the mod content corresponding to \"items\", for mod \"fc\" should be in the file \"fc_items.py\"\r\n\r\nmods_active = [\r\n # insert the active mod names here\r\n \"formations\", # motomataru's formations v3\r\n \"formAI\", # motomataru's formations v3 AI extension. Comment this line out if you don't want ai to use formations\r\n \"trees\",\r\n \"freelancer\",\r\n \"KAOSPolitical\", # KAOS Political (1.2) - Lazeras\r\n \"pbod\",\r\n \"KAOSBank\", # Kaos Bank (2.0)\r\n \"yael_core\", ## ALWAYS NEEDED\r\n \"yael_cattlefollow\",\r\n \"yael_ransomtavern\",\r\n \"yael_tournamentrewards\",\r\n \"yael_arenarewards\",\r\n \"yael_eqmgr\",\r\n \"gpu\",\r\n]\r\n\r\n\r\n# Alternate process order for certain modules components\r\n# Only need to be defined if order/combination is different from mods_active\r\n# Each element in is is a tuple with the following elements\r\n#\r\n# 1) mod component name (less the \"module_\" prefix), e.g. for \"module_items\", it will be \"items\"\r\n# 2) list of mod names in the order to be processed. The mod names should be\r\n# the ones used in mods_active, and will only be processed if they are in\r\n# mods_active.\r\n#\r\n\r\nmods_process_order = [\r\n # (\"{component_name}\", [{list of mod names}]),\r\n\r\n]\r\n\r\n\r\n# check and fill in defaults for certain required variables\r\ntry:\r\n module_sys_info[\"version\"]\r\nexcept KeyError:\r\n # assume version to be latest version that modmerger was tested on\r\n module_sys_info[\"version\"] = 1127\r\n","sub_path":"src/modmerger_options.py","file_name":"modmerger_options.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"82315938","text":"from django.shortcuts import render\nfrom django.views import View\nfrom django.conf import settings\nimport requests\nfrom .models import Profile\nfrom django.contrib.auth.models import User\nimport json\nfrom django.contrib.auth import login, logout\nimport pdb\nfrom django.http import HttpResponseRedirect, Http404\nfrom bs4 import BeautifulSoup\nimport urllib\nfrom .models import Product\n\n# Create your views here.\n\n\nclass handleCode(View):\n\n def get(self,request, *args, **kwargs):\n access_token=request.GET.get('access_token')\n url='https://steemconnect.com/api/oauth2/token'\n me_url='https://steemconnect.com/api/me'\n #data={'code':code, 'client_secret':settings.CLIENT_SECRET}\n #response=requests.post(url, data=data)\n #response=response.json()\n #username=response.get('username')\n #refresh_token=response.get('refresh_token')\n #access_token=response.get('access_token')\n headers={'Authorization': access_token}\n me_response=requests.get(me_url,headers=headers)\n me_response=me_response.json()\n #pdb.set_trace()\n username=me_response.get('account').get('name')\n posting_key=me_response.get('account').get('posting').get('key_auths')[0][0]\n active_key=me_response.get('account').get('active').get('key_auths')[0][0]\n memo_key=me_response.get('account').get('memo_key')\n\n try:\n user=User.objects.get(username=username)\n profile=Profile.objects.get(user=user)\n #profile.refresh_token=refresh_token\n profile.access_token=access_token\n profile.save()\n except User.DoesNotExist:\n user=User.objects.create_user(username=username)\n profile=Profile.objects.create(posting_key=posting_key, active_key=active_key,\n memo_key=memo_key, user=user, access_token=access_token)\n user=login(request, user)\n request.session['access_token'] = access_token\n return HttpResponseRedirect('/')\n\n\n #request.session['access_token']=response.get('access_token')\n #pdb.set_trace()\n\n #sportherald.pythonanywhere.com\n\n\ndef user_logout(request):\n if request.user.is_authenticated:\n logout(request)\n return HttpResponseRedirect('/')\n else:\n return Http404('Invalid url')\n\n\n\n\n\ndef search_cap_us(query='brush'):\n products = []\n header = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:32.0) Gecko/20100101 Firefox/32.0', }\n params = urllib.parse.urlencode({\"search_query\": query})\n search_url='http://shop.cap-us.com/search.php'\n response = requests.get(search_url, params, headers=header)\n soup = BeautifulSoup(response.content, \"html.parser\")\n product_lists = soup.find_all(\"li\", class_=\"ListView\")\n for item in product_lists:\n\n p = Product()\n div = item.find_all(\"div\", class_=\"ProductImage\")\n\n a = div[0].find_all(\"a\")\n p.image_url = a[0].img[\"src\"]\n p.manufacturer_url = a[0][\"href\"]\n #pdb.set_trace()\n detail_div = item.find_all(\"div\", class_=\"ProductDetails\")\n #pdb.set_trace()\n title_a = detail_div[0].find_all(\"a\")\n p.search_term=query\n description=item.find_all(\"p\", class_=\"ProductDescription\")\n #pdb.set_trace()\n p.details=description[0].text\n titleprice = title_a[0].text\n list_stuff=process_title(titleprice)\n p.title=list_stuff[0]\n p.price=list_stuff[1]\n p.save()\n\n\ndef process_title(titleprice):\n titlepricelist=titleprice.split('$')\n price=titlepricelist[1]\n titlepricelist[1]=float(price)\n #pdb.set_trace()\n return titlepricelist\n\n\n\ndef colparmer(query='brush'):\n scraped_products = []\n search_url='https://www.coleparmer.com/search'\n params = urllib.parse.urlencode({\"searchterm\": query})\n response = requests.get(search_url, params)\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n products_div=soup.find_all(\"div\", class_='eb-productListing')\n try:\n product_ul=products_div[0].find_all(\"ul\", recursive=False)\n except:\n return scraped_products\n product_lists=product_ul[0].find_all(\"li\", recursive=False)\n for product in product_lists:\n p = Product()\n title_div=product.find_all(\"div\", class_='eb-productname')\n atitle=title_div[0].find_all(\"a\")\n p.title=atitle[0].text\n p.manufacturer_url=atitle[0][\"href\"]\n img=product.find_all(\"img\", class_=\"lazy\")\n p.image_url=img[0]['data-original']\n #pdb.set_trace()\n #apiece=product.find_all(\"a\", class_=\"btn-add-cart\")\n #p.pieces=get_piece(apiece[0].text)\n price_range=product.find_all(\"span\", class_=\"price-range\")\n p.price=process_price(price_range[0])\n description=product.find_all(\"span\", class_=\"spec-list-value-box\")\n p.details='Product Type:'+ description[0].text\n scraped_products.append(p)\n return scraped_products\n\n\n\ndef get_piece(text):\n text_list=text.split(' ')\n return text_list[2]\n\ndef process_price(price_range):\n prices=price_range.text\n prices=prices.strip()\n prices_list=prices.split(' ')\n price=prices_list[0]\n price=price.replace('$','')\n price=float(price)\n return price\n\ndef diamonddental(query):\n scraped_products = []\n search_url = 'http://www.diamonddentalsupplies.com/search'\n params = urllib.parse.urlencode({\"q\": query})\n response=requests.get(search_url, params)\n soup=BeautifulSoup(response.content, \"html.parser\")\n product_list=soup.find_all(\"div\", class_=\"product-item\")\n for product in product_list:\n p=Product()\n p.title=product.h2.text\n imgdiv=product.find_all('div', class_='picture')\n imagea=imgdiv[0].a\n p.manufacturer_url=imagea['href']\n p.image_url=imagea.img['src']\n description=product.find_all('div' , class_='description')\n p.details=description[0].text\n price_con=product.find_all('span', class_='price')\n price=price_con[0].text\n price=price.strip()\n price_list=price.split(' ')\n if price_list[0] == 'From':\n price=price_list[1]\n else:\n price=price_list[0]\n price = price.replace('$', '')\n #pdb.set_trace()\n p.price=float(price)\n scraped_products.append(p)\n return scraped_products\n\ndef blowoutmedical(query='brush'):\n scraped_products = []\n search_url='https://www.blowoutmedical.com/catalogsearch/result/'\n params = urllib.parse.urlencode({\"q\": query})\n response = requests.get(search_url, params)\n soap=BeautifulSoup(response.content, 'html.parser')\n item_inners=soap.find_all('div', class_='item-inner')\n for product in item_inners:\n p=Product()\n imagea=product.find_all('a', class_=\"product-image\")\n p.manufacturer_url=imagea[0]['href']\n p.image_url=imagea[0].img['src']\n p.title=imagea[0].img['alt']\n price=product.find_all('span', class_='price')[0]\n #pdb.set_trace()\n p.price=float(price.text.strip().replace('$', ''))\n scraped_products.append(p)\n return scraped_products\n\n\n\ndef iqdentalsupply(query='brush'):\n search_url='http://www.iqdentalsupply.com/Shop'\n base_url='http://www.iqdentalsupply.com'\n scraped_products=[]\n parmas=urllib.parse.urlencode({'search':query})\n response=requests.get(search_url, parmas)\n soup=BeautifulSoup(response.content, 'html.parser')\n product_lists=soup.find_all('div', class_='cell')\n for product in product_lists:\n p=Product()\n atag=product.find_all('a', class_='thumbnail')\n p.manufacturer_url=base_url+atag[0]['href']\n p.image_url=base_url+atag[0].img['src']\n p.title=product.h3.a.text\n try:\n tds=product.find_all('td', 'texttable')\n p.price=float(tds[1].text.strip().replace('$',''))#use the first price in the list\n except:\n price=product.find_all('div', class_='price')\n p.price=float(price[0].h3.text.strip().replace('$', ''))\n scraped_products.append(p)\n return\n\n\n\ndef hygine_direct(query='brush'):\n scraped_products=[]\n search_url='http://www.hygiene-direct.com/search.php'\n params=urllib.parse.urlencode({'search_query':query})\n response=requests.get(search_url, params)\n soup=BeautifulSoup(response.content, 'html.parser')\n product_lists=soup.find_all('li', class_='ListView')\n for product in product_lists:\n #thumbnail is not on d detail search\n p=Product()\n atag=product.find_all('a', class_='ProductLink')\n p.title=atag[0].text\n p.manufacturer_url=atag[0]['href']\n p.price=float(product.find_all('span', class_='ProductPrice')[0].text.strip().split('$').pop())\n scraped_products.append(p)\n return scraped_products\n\n\n\ndef mendasearch(query='brush'):\n search_url='http://menda.descoindustries.com/Search/%s' % query\n response=requests.get(search_url)\n soup=BeautifulSoup(response.content, 'html.parser')\n ul=soup.find_all('ul', class_='grid')\n product_lists=ul[0].find_all('li', recursive=False)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"acc/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"} +{"seq_id":"582053891","text":"import networkx as nx # pip install networkx\r\nimport time\r\n\r\n\r\nstart_time = time.time() # take note of start time\r\n#=============================================================================\r\n# Set control variables\r\n#=============================================================================\r\n# single docu example\r\n#input_filename = [\"./input/24 Oras 05.04.2020.docx\"]\r\n#output_filename_prefix = \"24O.05.04.2020\" # common prefix of your output filename\r\n#input_mode = \"docu\"\r\n#graph_header = \"Show\" # custom cell header of output csv\r\n#graph_name = [\"24 Oras\"] # custom cell value of output csv\r\n\r\n\r\n# single docu example\r\n#input_filename = [\"./input/TV Patrol 05.04.2020.docx\"]\r\n#output_filename_prefix = \"TV Patrol 05.04.2020\" # common prefix of your output filename\r\n#input_mode = \"docu\"\r\n#graph_header = \"Show\" # custom cell header of output csv\r\n#graph_name = [\"TV Patrol\"] # custom cell value of output csv\r\n\r\n\r\n# multiple docu example (TVP vs 24Oras)\r\n#input_filename = [\"./input/24 Oras 06.01-05.2020.docx\", \"./input/TV Patrol 06.01-05.2020.docx\"]\r\n#output_filename_prefix = \"TVPvs24Oras_06.01-05.2020\" # common prefix of your output filename\r\n#input_filename = [\"./input/sample1.docx\", \"./input/sample2.docx\"]\r\n#output_filename_prefix = \"sample\" # common prefix of your output filename\r\n#input_mode = \"docu\"\r\n#graph_header = \"Show\" # custom cell header of output csv\r\n#graph_name = [\"24 Oras\", \"TV Patrol\"] # custom cell value of output csv\r\n\r\n\r\n# Twitter example\r\ninput_filename = [\"./input/city_tweets_2020-06-17.csv\"]\r\noutput_filename_prefix = \"city_tweets_2020-06-17\" # common prefix of your output filename\r\ninput_mode = \"twitter\"\r\ngraph_header = \"City\" # custom cell header of output csv\r\ngraph_name = [\"Caloocan City\", \"City Of Makati\", \"Manila\", \"Quezon City\", \"Taguig City\", \"Cebu City\", \"Davao City\"]\r\n\r\noutput_folder = \"./output/\" # output folder\r\ngraph_mode = \"bigrams\"\r\ncommunity_mode = \"top5\"\r\noutput_mode = \"gephi\"\r\n\r\n\r\n#=============================================================================\r\n# Load and clean the data\r\n#=============================================================================\r\n# change input mode\r\nif input_mode == \"twitter\":\r\n import tweet_input as proj_input\r\nelif input_mode == \"docu\":\r\n import docu_input as proj_input\r\nelse:\r\n print(\"Invalid input mode. Exiting...\")\r\n exit()\r\n\r\n# check if successful\r\nproj_input.check_import()\r\n\r\n\r\n# load file\r\n# note: twitter = csv file, docu = docx file\r\n# returns list of entries (tweets/sentences)\r\nprint(\"Loading files...\")\r\ninput_entries = proj_input.load_file(input_filename, graph_name)\r\nprint(\"Done. There are \" + str(len(input_entries)) + \" total entries.\\n\")\r\n\r\n# clean data (depends on the input mode)\r\nprint(\"Cleaning data...\")\r\ninput_entries = proj_input.clean_data(input_entries)\r\nprint(\"Done.\\n\")\r\n\r\n\r\n#=============================================================================\r\n# Create graph\r\n#=============================================================================\r\n# change process mode\r\nif graph_mode == \"bigrams\":\r\n import bigram_graph_process as graph_process\r\nelse:\r\n print(\"Invalid process mode. Exiting...\")\r\n exit()\r\n\r\n# check if successful\r\ngraph_process.check_import()\r\n\r\n# create a graph using the input entries (returns networkx graph object)\r\nprint(\"Creating graphs...\")\r\nGRAPHS = graph_process.create_graph(input_entries, input_mode)\r\nnum_graphs = len(GRAPHS)\r\nprint(\"Done. \" + str(num_graphs) + \" graphs created.\\n\")\r\n\r\n\r\n#=============================================================================\r\n# Create communities and color the graph\r\n#=============================================================================\r\n# change process mode\r\nif community_mode == \"top5\":\r\n import top5_community_process as community_process\r\nelif community_mode == \"cities\":\r\n import cities_coloring_process as community_process\r\nelse:\r\n print(\"Invalid process mode. Exiting...\")\r\n exit()\r\n\r\n# check if successful\r\ncommunity_process.check_import()\r\n\r\n# create communities and color the graph\r\nprint(\"Detecting communities...\")\r\nGRAPHS = community_process.create_communities(GRAPHS, num_graphs, graph_header, input_mode, output_filename_prefix)\r\nprint(\"Done.\\n\")\r\n\r\n\r\n#=============================================================================\r\n# Export\r\n#=============================================================================\r\n# set output mode\r\nif output_mode == \"gephi\":\r\n import gephi_output as proj_output\r\nelse:\r\n print(\"Invalid process mode. Exiting...\")\r\n exit()\r\n\r\n# check if successful\r\nproj_output.check_import()\r\n\r\n# export graph\r\nprint(\"Exporting graph...\")\r\nproj_output.export_graph(GRAPHS, output_folder, output_filename_prefix, graph_header, input_mode)\r\nprint(\"Done.\\n\")\r\n\r\n\r\n#=============================================================================\r\n# End\r\n#=============================================================================\r\n# display time elapsed\r\nprint(\"Program finished after \" + str(time.time() - start_time) + \" seconds.\")\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}